summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoan Touzet <wohali@users.noreply.github.com>2018-03-26 19:20:42 -0400
committerGitHub <noreply@github.com>2018-03-26 19:20:42 -0400
commit26ddad7a72e0b188048c4eef60eca58699953b00 (patch)
tree0fd85623523e3c1f60356f71c8d2a49a1b2af4f1
parent56c8f718cca6890fad8667c5938d41dc15ca1458 (diff)
parente7c48b3d2db46e05f651567d611fb51f8ec7db1f (diff)
downloadcouchdb-fix-custom-compaction-daemon-config.tar.gz
Merge branch 'master' into fix-custom-compaction-daemon-configfix-custom-compaction-daemon-config
-rw-r--r--.gitignore5
-rw-r--r--Jenkinsfile117
-rw-r--r--LICENSE117
-rw-r--r--Makefile84
-rw-r--r--Makefile.win18
-rw-r--r--NOTICE7
-rw-r--r--README-DEV.rst24
-rwxr-xr-xconfigure9
-rwxr-xr-xdev/run24
-rw-r--r--rebar.config.script12
-rw-r--r--rel/haproxy.cfg4
-rwxr-xr-xrel/overlay/bin/couchdb8
-rw-r--r--rel/overlay/etc/default.ini44
-rw-r--r--rel/plugins/eunit_plugin.erl20
-rw-r--r--rel/reltool.config2
-rw-r--r--src/chttpd/src/chttpd_auth_request.erl4
-rw-r--r--src/chttpd/src/chttpd_db.erl63
-rw-r--r--src/chttpd/src/chttpd_httpd_handlers.erl1
-rw-r--r--src/chttpd/src/chttpd_misc.erl35
-rw-r--r--src/chttpd/src/chttpd_view.erl16
-rw-r--r--src/chttpd/test/chttpd_csp_tests.erl (renamed from src/couch/test/couchdb_csp_tests.erl)11
-rw-r--r--src/chttpd/test/chttpd_db_test.erl218
-rw-r--r--src/chttpd/test/chttpd_dbs_info_test.erl169
-rw-r--r--src/chttpd/test/chttpd_security_tests.erl22
-rw-r--r--src/chttpd/test/chttpd_view_test.erl123
-rw-r--r--src/couch/.gitignore5
-rw-r--r--src/couch/include/couch_db.hrl5
-rw-r--r--src/couch/include/couch_eunit.hrl11
-rw-r--r--src/couch/include/couch_js_functions.hrl9
-rw-r--r--src/couch/src/couch.app.src1
-rw-r--r--src/couch/src/couch.erl1
-rw-r--r--src/couch/src/couch_att.erl146
-rw-r--r--src/couch/src/couch_auth_cache.erl16
-rw-r--r--src/couch/src/couch_bt_engine.erl962
-rw-r--r--src/couch/src/couch_bt_engine.hrl24
-rw-r--r--src/couch/src/couch_bt_engine_compactor.erl496
-rw-r--r--src/couch/src/couch_bt_engine_header.erl434
-rw-r--r--src/couch/src/couch_bt_engine_stream.erl70
-rw-r--r--src/couch/src/couch_changes.erl21
-rw-r--r--src/couch/src/couch_compaction_daemon.erl87
-rw-r--r--src/couch/src/couch_db.erl720
-rw-r--r--src/couch/src/couch_db_engine.erl893
-rw-r--r--src/couch/src/couch_db_int.hrl69
-rw-r--r--src/couch/src/couch_db_updater.erl1310
-rw-r--r--src/couch/src/couch_file.erl40
-rw-r--r--src/couch/src/couch_httpd.erl13
-rw-r--r--src/couch/src/couch_httpd_auth.erl12
-rw-r--r--src/couch/src/couch_httpd_db.erl11
-rw-r--r--src/couch/src/couch_httpd_misc_handlers.erl38
-rw-r--r--src/couch/src/couch_httpd_multipart.erl77
-rw-r--r--src/couch/src/couch_key_tree.erl6
-rw-r--r--src/couch/src/couch_passwords.erl39
-rw-r--r--src/couch/src/couch_query_servers.erl2
-rw-r--r--src/couch/src/couch_server.erl236
-rw-r--r--src/couch/src/couch_stream.erl255
-rw-r--r--src/couch/src/couch_users_db.erl10
-rw-r--r--src/couch/src/couch_util.erl47
-rw-r--r--src/couch/src/test_engine_attachments.erl93
-rw-r--r--src/couch/src/test_engine_compaction.erl185
-rw-r--r--src/couch/src/test_engine_fold_changes.erl190
-rw-r--r--src/couch/src/test_engine_fold_docs.erl390
-rw-r--r--src/couch/src/test_engine_get_set_props.erl70
-rw-r--r--src/couch/src/test_engine_open_close_delete.erl81
-rw-r--r--src/couch/src/test_engine_purge_docs.erl158
-rw-r--r--src/couch/src/test_engine_read_write_docs.erl317
-rw-r--r--src/couch/src/test_engine_ref_counting.erl103
-rw-r--r--src/couch/src/test_engine_util.erl607
-rw-r--r--src/couch/src/test_request.erl10
-rw-r--r--src/couch/test/chttpd_endpoints_tests.erl1
-rw-r--r--src/couch/test/couch_bt_engine_tests.erl20
-rw-r--r--src/couch/test/couch_db_plugin_tests.erl2
-rw-r--r--src/couch/test/couch_db_tests.erl10
-rw-r--r--src/couch/test/couch_key_tree_tests.erl18
-rw-r--r--src/couch/test/couch_passwords_tests.erl42
-rw-r--r--src/couch/test/couch_server_tests.erl16
-rw-r--r--src/couch/test/couch_stream_tests.erl32
-rw-r--r--src/couch/test/couchdb_compaction_daemon_tests.erl3
-rw-r--r--src/couch/test/couchdb_vhosts_tests.erl25
-rw-r--r--src/couch/test/couchdb_views_tests.erl42
-rw-r--r--src/couch_epi/rebar.config4
-rw-r--r--src/couch_epi/src/couch_epi_codegen.erl19
-rw-r--r--src/couch_event/src/couch_event_listener.erl28
-rw-r--r--src/couch_index/src/couch_index.erl6
-rw-r--r--src/couch_index/src/couch_index_updater.erl6
-rw-r--r--src/couch_log/src/couch_log.erl1
-rw-r--r--src/couch_log/test/couch_log_test_util.erl7
-rw-r--r--src/couch_mrview/src/couch_mrview.erl32
-rw-r--r--src/couch_mrview/src/couch_mrview_util.erl19
-rw-r--r--src/couch_mrview/test/couch_mrview_local_docs_tests.erl6
-rw-r--r--src/couch_peruser/src/couch_peruser.erl52
-rw-r--r--src/couch_peruser/test/couch_peruser_test.erl140
-rw-r--r--src/couch_replicator/include/couch_replicator_api_wrap.hrl (renamed from src/couch_replicator/src/couch_replicator_api_wrap.hrl)13
-rw-r--r--src/couch_replicator/src/couch_replicator.app.src1
-rw-r--r--src/couch_replicator/src/couch_replicator.erl2
-rw-r--r--src/couch_replicator/src/couch_replicator_api_wrap.erl7
-rw-r--r--src/couch_replicator/src/couch_replicator_auth.erl100
-rw-r--r--src/couch_replicator/src/couch_replicator_auth_noop.erl52
-rw-r--r--src/couch_replicator/src/couch_replicator_auth_session.erl693
-rw-r--r--src/couch_replicator/src/couch_replicator_changes_reader.erl2
-rw-r--r--src/couch_replicator/src/couch_replicator_docs.erl49
-rw-r--r--src/couch_replicator/src/couch_replicator_httpc.erl86
-rw-r--r--src/couch_replicator/src/couch_replicator_ids.erl51
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler.erl2
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler_job.erl2
-rw-r--r--src/couch_replicator/src/couch_replicator_utils.erl90
-rw-r--r--src/couch_replicator/src/couch_replicator_worker.erl49
-rw-r--r--src/couch_replicator/test/couch_replicator_compact_tests.erl4
-rw-r--r--src/couch_replicator/test/couch_replicator_filtered_tests.erl4
-rw-r--r--src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl4
-rw-r--r--src/couch_replicator/test/couch_replicator_proxy_tests.erl2
-rw-r--r--src/couch_replicator/test/couch_replicator_selector_tests.erl4
-rw-r--r--src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl28
-rw-r--r--src/couch_replicator/test/couch_replicator_test_helper.erl4
-rw-r--r--src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl4
-rw-r--r--src/couch_stats/src/couch_stats.app.src2
-rw-r--r--src/couch_stats/src/couch_stats.erl2
-rw-r--r--src/couch_stats/src/couch_stats_process_tracker.erl12
-rw-r--r--src/fabric/src/fabric_db_create.erl33
-rw-r--r--src/fabric/src/fabric_rpc.erl9
-rw-r--r--src/fabric/src/fabric_view_all_docs.erl10
-rw-r--r--src/mango/src/mango_cursor_special.erl7
-rw-r--r--src/mango/src/mango_httpd.erl2
-rw-r--r--src/mango/src/mango_idx_text.erl2
-rw-r--r--src/mango/src/mango_native_proc.erl51
-rw-r--r--src/mango/test/03-operator-test.py9
-rw-r--r--src/mango/test/16-index-selectors-test.py10
-rw-r--r--src/mango/test/mango.py18
-rw-r--r--src/mem3/include/mem3.hrl6
-rw-r--r--src/mem3/src/mem3.erl19
-rw-r--r--src/mem3/src/mem3_nodes.erl3
-rw-r--r--src/mem3/src/mem3_rep.erl13
-rw-r--r--src/mem3/src/mem3_shards.erl61
-rw-r--r--src/mem3/src/mem3_util.erl17
-rw-r--r--src/mem3/test/mem3_util_test.erl16
-rwxr-xr-xtest/javascript/run16
-rw-r--r--test/javascript/tests-cluster/with-quorum/db-creation.js27
-rw-r--r--test/javascript/tests-cluster/without-quorum/db-creation.js28
-rw-r--r--test/javascript/tests/design_docs_query.js154
-rw-r--r--test/javascript/tests/users_db_security.js144
-rw-r--r--test/javascript/tests/view_errors.js2
140 files changed, 9409 insertions, 2475 deletions
diff --git a/.gitignore b/.gitignore
index 3e2219239..3f5c4b284 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,9 @@
*.o
-*.pyc
+*.sha256
+*.sha512
*.snap
*.so
+*.pyc
.DS_Store
.rebar/
.eunit/
@@ -26,6 +28,7 @@ share/server/main.js
share/www
src/b64url/
src/bear/
+src/bcrypt/
src/config/
src/couch/priv/couch_js/config.h
src/couch/priv/couchjs
diff --git a/Jenkinsfile b/Jenkinsfile
index 221e96f45..905a85f29 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -118,31 +118,6 @@ pipeline {
deleteDir()
} // node
},
- centos7erlangdefault: {
- node(label: 'ubuntu') {
- timeout(time: 45, unit: "MINUTES") {
- sh 'docker pull couchdbdev/centos-7-erlang-default'
- withDockerContainer(image: 'couchdbdev/centos-7-erlang-default', args: '-e LD_LIBRARY_PATH=/usr/local/bin') {
- sh 'rm -f apache-couchdb-*.tar.gz'
- unstash 'tarball'
- sh '''
- cwd=$(pwd)
- mkdir -p $COUCHDB_IO_LOG_DIR
-
- # Build CouchDB from tarball
- builddir=$(mktemp -d)
- cd $builddir
- tar -xf $cwd/apache-couchdb-*.tar.gz
- cd apache-couchdb-*
- ./configure --with-curl
- make all
- make check || (build-aux/logfile-uploader.py && false)
- '''
- } // withDocker
- } // timeout
- deleteDir()
- } // node
- },
centos7erlang183: {
node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
@@ -183,29 +158,6 @@ pipeline {
deleteDir()
} // node
},
- ubuntu1404erlangdefault: {
- node(label: 'ubuntu') {
- timeout(time: 45, unit: "MINUTES") {
- sh 'docker pull couchdbdev/ubuntu-14.04-erlang-default'
- withDockerContainer(image: 'couchdbdev/ubuntu-14.04-erlang-default') {
- sh 'rm -f apache-couchdb-*.tar.gz'
- unstash 'tarball'
- sh '''
- cwd=$(pwd)
- mkdir -p $COUCHDB_IO_LOG_DIR
- builddir=$(mktemp -d)
- cd $builddir
- tar -xf $cwd/apache-couchdb-*.tar.gz
- cd apache-couchdb-*
- ./configure --with-curl
- make all
- make check || (build-aux/logfile-uploader.py && false)
- '''
- } // withDocker
- } // timeout
- deleteDir()
- } // node
- },
ubuntu1404erlang183: {
node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
@@ -246,29 +198,6 @@ pipeline {
deleteDir()
} // node
},
- ubuntu1604erlangdefault: {
- node(label: 'ubuntu') {
- timeout(time: 45, unit: "MINUTES") {
- sh 'docker pull couchdbdev/ubuntu-16.04-erlang-default'
- withDockerContainer(image: 'couchdbdev/ubuntu-16.04-erlang-default') {
- sh 'rm -f apache-couchdb-*.tar.gz'
- unstash 'tarball'
- sh '''
- cwd=$(pwd)
- mkdir -p $COUCHDB_IO_LOG_DIR
- builddir=$(mktemp -d)
- cd $builddir
- tar -xf $cwd/apache-couchdb-*.tar.gz
- cd apache-couchdb-*
- ./configure --with-curl
- make all
- make check || (build-aux/logfile-uploader.py && false)
- '''
- } // withDocker
- } // timeout
- deleteDir()
- } // node
- },
ubuntu1604erlang183: {
node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
@@ -309,29 +238,6 @@ pipeline {
deleteDir()
} // node
},
- debian8erlangdefault: {
- node(label: 'ubuntu') {
- timeout(time: 45, unit: "MINUTES") {
- sh 'docker pull couchdbdev/debian-8-erlang-default'
- withDockerContainer(image: 'couchdbdev/debian-8-erlang-default') {
- sh 'rm -f apache-couchdb-*.tar.gz'
- unstash 'tarball'
- sh '''
- cwd=$(pwd)
- mkdir -p $COUCHDB_IO_LOG_DIR
- builddir=$(mktemp -d)
- cd $builddir
- tar -xf $cwd/apache-couchdb-*.tar.gz
- cd apache-couchdb-*
- ./configure --with-curl
- make all
- make check || (build-aux/logfile-uploader.py && false)
- '''
- } // withDocker
- } // timeout
- deleteDir()
- } // node
- },
debian8erlang183: {
node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
@@ -372,29 +278,6 @@ pipeline {
deleteDir()
} // node
},
- debian9erlangdefault: {
- node(label: 'ubuntu') {
- timeout(time: 45, unit: "MINUTES") {
- sh 'docker pull couchdbdev/debian-9-erlang-default'
- withDockerContainer(image: 'couchdbdev/debian-9-erlang-default') {
- sh 'rm -f apache-couchdb-*.tar.gz'
- unstash 'tarball'
- sh '''
- cwd=$(pwd)
- mkdir -p $COUCHDB_IO_LOG_DIR
- builddir=$(mktemp -d)
- cd $builddir
- tar -xf $cwd/apache-couchdb-*.tar.gz
- cd apache-couchdb-*
- ./configure --with-curl
- make all
- make check || (build-aux/logfile-uploader.py && false)
- '''
- } // withDocker
- } // timeout
- deleteDir()
- } // node
- },
debian9erlang183: {
node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
diff --git a/LICENSE b/LICENSE
index e9a9c81e8..a209352a0 100644
--- a/LICENSE
+++ b/LICENSE
@@ -2157,3 +2157,120 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
+
+The Erlang code is subject to this license:
+
+%% Copyright (c) 2011 Hunter Morris <hunter.morris@smarkets.com>
+
+%% Permission to use, copy, modify, and distribute this software for any
+%% purpose with or without fee is hereby granted, provided that the above
+%% copyright notice and this permission notice appear in all copies.
+
+%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+The underlying blowfish code is derived from OpenBSD libc and is
+subject to the following license:
+
+/*
+ * Blowfish block cipher for OpenBSD
+ * Copyright 1997 Niels Provos <provos@physnet.uni-hamburg.de>
+ * All rights reserved.
+ *
+ * Implementation advice by David Mazieres <dm@lcs.mit.edu>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Niels Provos.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+The underlying bcrypt (hashing) code is derived from OpenBSD libc and is
+subject to the following license:
+
+/*
+ * Copyright 1997 Niels Provos <provos@physnet.uni-hamburg.de>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Niels Provos.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+The asynchronous queue code (c_src/async_queue.c and
+c_src/async_queue.h) is from the esnappy project, copyright 2011
+Konstantin V. Sorokin. It is subject to the following license:
+
+Copyright (c) 2011 Konstantin V. Sorokin
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of the copyright holder nor the names of contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
diff --git a/Makefile b/Makefile
index 248dddc58..006ad5496 100644
--- a/Makefile
+++ b/Makefile
@@ -14,8 +14,17 @@ include version.mk
REBAR?=$(shell echo `pwd`/bin/rebar)
IN_RELEASE = $(shell if [ ! -d .git ]; then echo true; fi)
-COUCHDB_VERSION_SUFFIX = $(shell if [ ! -z "$(COUCH_RC)" ]; then echo '-RC$(COUCH_RC)'; else if [ -d .git ]; then echo '-`git rev-parse --short --verify HEAD`'; fi; fi)
-COUCHDB_VERSION = $(vsn_major).$(vsn_minor).$(vsn_patch)$(COUCHDB_VERSION_SUFFIX)
+ifeq ($(IN_RELEASE), true)
+COUCHDB_VERSION = $(vsn_major).$(vsn_minor).$(vsn_patch)
+else
+RELTAG = $(shell git describe | grep -E '^[0-9]+\.[0-9]\.[0-9]+(-RC[0-9]+)?$$')
+ifeq ($(RELTAG),)
+COUCHDB_VERSION_SUFFIX = $(shell git rev-parse --short --verify HEAD)
+COUCHDB_VERSION = $(vsn_major).$(vsn_minor).$(vsn_patch)-$(COUCHDB_VERSION_SUFFIX)
+else
+COUCHDB_VERSION = $(RELTAG)
+endif
+endif
DESTDIR=
@@ -91,6 +100,8 @@ fauxton: share/www
.PHONY: check
# target: check - Test everything
check: all
+ @$(MAKE) test-cluster-with-quorum
+ @$(MAKE) test-cluster-without-quorum
@$(MAKE) eunit
@$(MAKE) javascript
@$(MAKE) mango-test
@@ -105,6 +116,17 @@ eunit: couch
@$(REBAR) setup_eunit 2> /dev/null
@$(REBAR) -r eunit $(EUNIT_OPTS)
+
+setup-eunit: export BUILDDIR = $(shell pwd)
+setup-eunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config
+setup-eunit:
+ @$(REBAR) setup_eunit 2> /dev/null
+
+just-eunit: export BUILDDIR = $(shell pwd)
+just-eunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config
+just-eunit:
+ @$(REBAR) -r eunit $(EUNIT_OPTS)
+
.PHONY: soak-eunit
soak-eunit: export BUILDDIR = $(shell pwd)
soak-eunit: export ERL_AFLAGS = -config $(shell pwd)/rel/files/eunit.config
@@ -114,7 +136,7 @@ soak-eunit: couch
.PHONY: javascript
# target: javascript - Run JavaScript test suites or specific ones defined by suites option
-javascript:
+javascript: devclean
@mkdir -p share/www/script/test
ifeq ($(IN_RELEASE), true)
@cp test/javascript/tests/lorem*.txt share/www/script/test/
@@ -122,13 +144,46 @@ else
@mkdir -p src/fauxton/dist/release/test
@cp test/javascript/tests/lorem*.txt src/fauxton/dist/release/test/
endif
- @rm -rf dev/lib
@dev/run -n 1 -q --with-admin-party-please \
--enable-erlang-views \
-c 'startup_jitter=0' \
'test/javascript/run --suites "$(suites)" \
--ignore "$(ignore_js_suites)"'
+# TODO: port to Makefile.win
+.PHONY: test-cluster-with-quorum
+test-cluster-with-quorum: devclean
+ @mkdir -p share/www/script/test
+ifeq ($(IN_RELEASE), true)
+ @cp test/javascript/tests/lorem*.txt share/www/script/test/
+else
+ @mkdir -p src/fauxton/dist/release/test
+ @cp test/javascript/tests/lorem*.txt src/fauxton/dist/release/test/
+endif
+ @dev/run -n 3 -q --with-admin-party-please \
+ --enable-erlang-views --degrade-cluster 1 \
+ -c 'startup_jitter=0' \
+ 'test/javascript/run --suites "$(suites)" \
+ --ignore "$(ignore_js_suites)" \
+ --path test/javascript/tests-cluster/with-quorum'
+
+# TODO: port to Makefile.win
+.PHONY: test-cluster-without-quorum
+test-cluster-without-quorum: devclean
+ @mkdir -p share/www/script/test
+ifeq ($(IN_RELEASE), true)
+ @cp test/javascript/tests/lorem*.txt share/www/script/test/
+else
+ @mkdir -p src/fauxton/dist/release/test
+ @cp test/javascript/tests/lorem*.txt src/fauxton/dist/release/test/
+endif
+ @dev/run -n 3 -q --with-admin-party-please \
+ --enable-erlang-views --degrade-cluster 2 \
+ -c 'startup_jitter=0' \
+ 'test/javascript/run --suites "$(suites)" \
+ --ignore "$(ignore_js_suites)" \
+ --path test/javascript/tests-cluster/without-quorum'
+
.PHONY: soak-javascript
soak-javascript:
@mkdir -p share/www/script/test
@@ -184,7 +239,7 @@ build-test:
.PHONY: mango-test
# target: mango-test - Run Mango tests
-mango-test: all
+mango-test: devclean all
./test/build/test-run-couch-for-mango.sh \
@@ -211,24 +266,6 @@ dialyze: .rebar
@$(REBAR) -r dialyze $(DIALYZE_OPTS)
-.PHONY: docker-image
-# target: docker-image - Build Docker image
-docker-image:
- @docker build --rm -t couchdb/dev-cluster .
-
-
-.PHONY: docker-start
-# target: docker-start - Start CouchDB in Docker container
-docker-start:
- @docker run -d -P -t couchdb/dev-cluster > .docker-id
-
-
-.PHONY: docker-stop
-# target: docker-stop - Stop Docker container
-docker-stop:
- @docker stop `cat .docker-id`
-
-
.PHONY: introspect
# target: introspect - Check for commits difference between rebar.config and repository
introspect:
@@ -276,6 +313,7 @@ ifeq ($(IN_RELEASE), true)
@cp -R share/docs/html/* rel/couchdb/share/www/docs/
@cp share/docs/man/apachecouchdb.1 rel/couchdb/share/docs/couchdb.1
else
+ @mkdir -p rel/couchdb/share/www/docs/
@mkdir -p rel/couchdb/share/docs/
@cp -R src/docs/build/html/ rel/couchdb/share/www/docs
@cp src/docs/build/man/apachecouchdb.1 rel/couchdb/share/docs/couchdb.1
diff --git a/Makefile.win b/Makefile.win
index 874ddf411..7ff0ab5c5 100644
--- a/Makefile.win
+++ b/Makefile.win
@@ -137,24 +137,6 @@ dialyze: .rebar
@$(REBAR) -r dialyze $(DIALYZE_OPTS)
-.PHONY: docker-image
-# target: docker-image - Build Docker image
-docker-image:
- @docker build --rm -t couchdb\dev-cluster .
-
-
-.PHONY: docker-start
-# target: docker-start - Start CouchDB in Docker container
-docker-start:
- @docker run -d -P -t couchdb\dev-cluster > .docker-id
-
-
-.PHONY: docker-stop
-# target: docker-stop - Stop Docker container
-docker-stop:
- @docker stop `cat .docker-id`
-
-
.PHONY: introspect
# target: introspect - Check for commits difference between rebar.config and repository
introspect:
diff --git a/NOTICE b/NOTICE
index a1f06ae7b..c04033897 100644
--- a/NOTICE
+++ b/NOTICE
@@ -177,3 +177,10 @@ This product also includes the following third-party components:
* velocity-react
Copyright (c) 2015 Twitter, Inc.
+
+* erlang-bcrypt
+ - Erlang code: Copyright (c) 2011 Hunter Morris <hunter.morris@smarkets.com>
+ - Blowfish block cipher & bcrypt (hashing) code for OpenBSD, Copyright
+ 1997 Niels Provos <provos@physnet.uni-hamburg.de>
+ - The asynchronous queue code (c_src/async_queue.c and c_src/async_queue.h)
+ is from the esnappy project, copyright 2011 Konstantin V. Sorokin.
diff --git a/README-DEV.rst b/README-DEV.rst
index 3587e8586..9cfa1f2ef 100644
--- a/README-DEV.rst
+++ b/README-DEV.rst
@@ -198,30 +198,6 @@ See ``make help`` for more info and useful commands.
Please report any problems to the developer's mailing list.
-Testing a cluster
------------------
-
-We use `Docker <https://docker.io>`_ to safely run a local three node
-cluster all inside a single docker container.
-
-Assuming you have Docker installed and running::
-
- make docker-image
-
-This will create a docker image (tagged 'couchdb/dev-cluster') capable
-of running a joined three node cluster.
-
-To start it up::
-
- make docker-start
-
-A three node cluster should now be running (you can now use ``docker ps``
-to find the exposed ports of the nodes).
-
-To stop it::
-
- make docker-stop
-
Releasing
---------
diff --git a/configure b/configure
index 412341208..fa0dfed6a 100755
--- a/configure
+++ b/configure
@@ -46,6 +46,7 @@ Options:
-c | --with-curl request that couchjs is linked to cURL (default false)
--disable-fauxton do not build Fauxton
--disable-docs do not build any documentation or manpages
+ --dev alias for --with-curl --disable-docs --disable-fauxton
--skip-deps do not update erlang dependencies
--rebar=PATH use rebar by specified path (version >=2.6.0 && <3.0 required)
EOF
@@ -77,6 +78,14 @@ parse_opts() {
continue
;;
+ --dev)
+ WITH_DOCS=0
+ WITH_FAUXTON=0
+ WITH_CURL="true"
+ shift
+ continue
+ ;;
+
--skip-deps)
SKIP_DEPS=1
shift
diff --git a/dev/run b/dev/run
index 4924de1f6..a5d8fde8c 100755
--- a/dev/run
+++ b/dev/run
@@ -130,6 +130,8 @@ def setup_argparse():
help='The node number to seed them when creating the node(s)')
parser.add_option('-c', '--config-overrides', action="append", default=[],
help='Optional key=val config overrides. Can be repeated')
+ parser.add_option('--degrade-cluster', dest="degrade_cluster",type=int, default=0,
+ help='The number of nodes that should be stopped after cluster config')
return parser.parse_args()
@@ -142,6 +144,7 @@ def setup_context(opts, args):
'admin': opts.admin.split(':', 1) if opts.admin else None,
'nodes': ['node%d' % (i + opts.node_number) for i in range(opts.nodes)],
'node_number': opts.node_number,
+ 'degrade_cluster': opts.degrade_cluster,
'devdir': os.path.dirname(fpath),
'rootdir': os.path.dirname(os.path.dirname(fpath)),
'cmd': ' '.join(args),
@@ -337,18 +340,35 @@ def startup(ctx):
cluster_setup_with_admin_party(ctx)
else:
cluster_setup(ctx)
-
+ if ctx['degrade_cluster'] > 0:
+ degrade_cluster(ctx)
def kill_processes(ctx):
for proc in ctx['procs']:
if proc and proc.returncode is None:
proc.kill()
+def degrade_cluster(ctx):
+ if ctx['with_haproxy']:
+ haproxy_proc = ctx['procs'].pop()
+ for i in range(0,ctx['degrade_cluster']):
+ proc = ctx['procs'].pop()
+ if proc is not None:
+ kill_process(proc)
+ if ctx['with_haproxy']:
+ ctx['procs'].append(haproxy_proc)
+
+@log('Stoping proc {proc.pid}')
+def kill_process(proc):
+ if proc and proc.returncode is None:
+ proc.kill()
def boot_nodes(ctx):
for node in ctx['nodes']:
ctx['procs'].append(boot_node(ctx, node))
- ctx['procs'].append(boot_haproxy(ctx))
+ haproxy_proc = boot_haproxy(ctx)
+ if haproxy_proc is not None:
+ ctx['procs'].append(haproxy_proc)
def ensure_all_nodes_alive(ctx):
diff --git a/rebar.config.script b/rebar.config.script
index 60d2e3124..4f6f82def 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -46,7 +46,7 @@ SubDirs = [
DepDescs = [
%% Independent Apps
-{config, "config", {tag, "1.0.1"}},
+{config, "config", {tag, "1.0.3"}},
{b64url, "b64url", {tag, "1.0.1"}},
{ets_lru, "ets-lru", {tag, "1.0.0"}},
{khash, "khash", {tag, "1.0.1"}},
@@ -63,8 +63,10 @@ DepDescs = [
{folsom, "folsom", {tag, "CouchDB-0.8.2"}},
{ibrowse, "ibrowse", {tag, "CouchDB-4.0.1"}},
{jiffy, "jiffy", {tag, "CouchDB-0.14.11-2"}},
-{mochiweb, "mochiweb", {tag, "CouchDB-2.12.0-1"}},
-{meck, "meck", {tag, "0.8.8"}}
+{mochiweb, "mochiweb", {tag, "v2.17.0"}},
+{meck, "meck", {tag, "0.8.8"}},
+{bcrypt, {url, "https://github.com/apache/couchdb-erlang-bcrypt"},
+ {tag, "1.0.2"}}
],
@@ -90,14 +92,14 @@ AddConfig = [
{deps, lists:map(MakeDep, DepDescs)},
{sub_dirs, SubDirs},
{lib_dirs, ["src"]},
- {erl_opts, [debug_info, {i, "../"}]},
+ {erl_opts, [bin_opt_info, debug_info, {i, "../"}]},
{eunit_opts, [verbose]},
{plugins, [eunit_plugin]},
{dialyzer, [
{plt_location, local},
{plt_location, COUCHDB_ROOT},
{plt_extra_apps, [
- asn1, compiler, crypto, inets, kernel, os_mon, runtime_tools,
+ asn1, bcrypt, compiler, crypto, inets, kernel, os_mon, runtime_tools,
sasl, ssl, stdlib, syntax_tools, xmerl]},
{warnings, [unmatched_returns, error_handling, race_conditions]}]},
{post_hooks, [{compile, "escript support/build_js.escript"}]}
diff --git a/rel/haproxy.cfg b/rel/haproxy.cfg
index 73ec987f4..45affaffe 100644
--- a/rel/haproxy.cfg
+++ b/rel/haproxy.cfg
@@ -30,8 +30,8 @@ defaults
timeout connect 500
stats enable
- stats scope .
- stats uri /_stats
+ stats uri /_haproxy_stats
+ # stats auth admin:admin # Uncomment for basic auth
frontend http-in
# This requires HAProxy 1.5.x
diff --git a/rel/overlay/bin/couchdb b/rel/overlay/bin/couchdb
index c82f581f4..a9e6e9bea 100755
--- a/rel/overlay/bin/couchdb
+++ b/rel/overlay/bin/couchdb
@@ -26,6 +26,10 @@ export BINDIR="$ROOTDIR/erts-$ERTS_VSN/bin"
export EMU=beam
export PROGNAME=`echo $0 | sed 's/.*\///'`
+ARGS_FILE="${COUCHDB_ARGS_FILE:-$ROOTDIR/etc/vm.args}"
+SYSCONFIG_FILE="${COUCHDB_SYSCONFIG_FILE:-$ROOTDIR/releases/$APP_VSN/sys.config}"
+
exec "$BINDIR/erlexec" -boot "$ROOTDIR/releases/$APP_VSN/couchdb" \
- -args_file "$ROOTDIR/etc/vm.args" \
- -config "$ROOTDIR/releases/$APP_VSN/sys.config" "$@"
+ -args_file "${ARGS_FILE}" \
+ -config "${SYSCONFIG_FILE}" "$@"
+
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index c473495fe..df4387735 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -52,6 +52,16 @@ changes_doc_ids_optimization_threshold = 100
; Maximum attachment size.
; max_attachment_size = infinity
+; The default storage engine to use when creating databases
+; is set as a key into the [couchdb_engines] section.
+default_engine = couch
+
+[couchdb_engines]
+; The keys in this section are the filename extension that
+; the specified engine module will use. This is important so
+; that couch_server is able to find an existing database without
+; having to ask every configured engine.
+couch = couch_bt_engine
[cluster]
q=8
@@ -69,6 +79,10 @@ require_valid_user = false
; List of headers that will be kept when the header Prefer: return=minimal is included in a request.
; If Server header is left out, Mochiweb will add its own one in.
prefer_minimal = Cache-Control, Content-Length, Content-Range, Content-Type, ETag, Server, Transfer-Encoding, Vary
+;
+; Limit maximum number of databases when tying to get detailed information using
+; _dbs_info in a request
+max_db_number_for_dbs_info_req = 100
[database_compaction]
; larger buffer sizes can originate smaller files
@@ -91,6 +105,10 @@ delete_dbs = false
; Set a default q value for peruser-created databases that is different from
; cluster / q
;q = 1
+; prefix for user databases. If you change this after user dbs have been
+; created, the existing databases won't get deleted if the associated user
+; gets deleted because of the then prefix mismatch.
+database_prefix = userdb-
[httpd]
port = {{backend_port}}
@@ -171,7 +189,8 @@ require_valid_user = false
timeout = 600 ; number of seconds before automatic logout
auth_cache_size = 50 ; size is number of cache entries
allow_persistent_cookies = false ; set to true to allow persistent cookies
-iterations = 10 ; iterations for password hashing
+iterations = 10 ; iterations for PBKDF2 password hashing
+log_rounds = 10 ; 2^log_rounds iterations for Bcrypt password hashing
; min_iterations = 1
; max_iterations = 1000000000
; password_scheme = pbkdf2
@@ -412,14 +431,35 @@ ssl_certificate_max_depth = 3
; Re-check cluster state at least every cluster_quiet_period seconds
; cluster_quiet_period = 60
+; List of replicator client authentication plugins to try. Plugins will be
+; tried in order. The first to initialize successfully will be used for that
+; particular endpoint (source or target). Normally couch_replicator_auth_noop
+; would be used at the end of the list as a "catch-all". It doesn't do anything
+; and effectively implements the previous behavior of using basic auth.
+; There are currently two plugins available:
+; couch_replicator_auth_session - use _session cookie authentication
+; couch_replicator_auth_noop - use basic authentication (previous default)
+; Currently previous default behavior is still the default. To start using
+; session auth, use this as the list of plugins:
+; `couch_replicator_auth_session,couch_replicator_auth_noop`.
+; In a future release the session plugin might be used by default.
+;auth_plugins = couch_replicator_auth_noop
+
+
[compaction_daemon]
; The delay, in seconds, between each check for which database and view indexes
; need to be compacted.
-check_interval = 300
+check_interval = 3600
; If a database or view index file is smaller then this value (in bytes),
; compaction will not happen. Very small files always have a very high
; fragmentation therefore it's not worth to compact them.
min_file_size = 131072
+; With lots of databases and/or with lots of design docs in one or more
+; databases, the compaction_daemon can create significant CPU load when
+; checking whether databases and view indexes need compacting. The
+; snooze_period setting ensures a smoother CPU load. Defaults to
+; 3 seconds wait.
+; snooze_period = 3
[compactions]
; List of compaction rules for the compaction daemon.
diff --git a/rel/plugins/eunit_plugin.erl b/rel/plugins/eunit_plugin.erl
index bbf83d2ec..1de20b394 100644
--- a/rel/plugins/eunit_plugin.erl
+++ b/rel/plugins/eunit_plugin.erl
@@ -32,8 +32,28 @@ build_eunit_config(Config0, AppFile) ->
Cwd = filename:absname(rebar_utils:get_cwd()),
DataDir = Cwd ++ "/tmp/data",
ViewIndexDir = Cwd ++ "/tmp/data",
+ TmpDataDir = Cwd ++ "/tmp/tmp_data",
+ cleanup_dirs([DataDir, TmpDataDir]),
Config1 = rebar_config:set_global(Config0, template, "setup_eunit"),
Config2 = rebar_config:set_global(Config1, prefix, Cwd),
Config3 = rebar_config:set_global(Config2, data_dir, DataDir),
Config = rebar_config:set_global(Config3, view_index_dir, ViewIndexDir),
rebar_templater:create(Config, AppFile).
+
+
+cleanup_dirs(Dirs) ->
+ lists:foreach(fun(Dir) ->
+ case filelib:is_dir(Dir) of
+ true -> del_dir(Dir);
+ false -> ok
+ end
+ end, Dirs).
+
+
+del_dir(Dir) ->
+ All = filelib:wildcard(Dir ++ "/**"),
+ {Dirs, Files} = lists:partition(fun filelib:is_dir/1, All),
+ ok = lists:foreach(fun file:delete/1, Files),
+ SortedDirs = lists:sort(fun(A, B) -> length(A) > length(B) end, Dirs),
+ ok = lists:foreach(fun file:del_dir/1, SortedDirs),
+ ok = file:del_dir(Dir).
diff --git a/rel/reltool.config b/rel/reltool.config
index 464f5f4fe..aa3100647 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -15,6 +15,7 @@
{rel, "couchdb", "2.2.0", [
%% stdlib
asn1,
+ bcrypt,
compiler,
crypto,
inets,
@@ -66,6 +67,7 @@
%% stdlib
{app, asn1, [{incl_cond, include}]},
+ {app, bcrypt, [{incl_cond, include}]},
{app, compiler, [{incl_cond, include}]},
{app, crypto, [{incl_cond, include}]},
{app, inets, [{incl_cond, include}]},
diff --git a/src/chttpd/src/chttpd_auth_request.erl b/src/chttpd/src/chttpd_auth_request.erl
index 4e2e0dbf2..05c5e8e35 100644
--- a/src/chttpd/src/chttpd_auth_request.erl
+++ b/src/chttpd/src/chttpd_auth_request.erl
@@ -35,6 +35,8 @@ authorize_request_int(#httpd{path_parts=[<<"favicon.ico">>|_]}=Req) ->
Req;
authorize_request_int(#httpd{path_parts=[<<"_all_dbs">>|_]}=Req) ->
Req;
+authorize_request_int(#httpd{path_parts=[<<"_dbs_info">>|_]}=Req) ->
+ Req;
authorize_request_int(#httpd{path_parts=[<<"_replicator">>], method='PUT'}=Req) ->
require_admin(Req);
authorize_request_int(#httpd{path_parts=[<<"_replicator">>], method='DELETE'}=Req) ->
@@ -81,6 +83,8 @@ server_authorization_check(#httpd{path_parts=[<<"_stats">>]}=Req) ->
Req;
server_authorization_check(#httpd{path_parts=[<<"_active_tasks">>]}=Req) ->
Req;
+server_authorization_check(#httpd{path_parts=[<<"_dbs_info">>]}=Req) ->
+ Req;
server_authorization_check(#httpd{method=Method, path_parts=[<<"_utils">>|_]}=Req)
when Method =:= 'HEAD' orelse Method =:= 'GET' ->
Req;
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index dbbb454cb..ed0adead9 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -284,8 +284,14 @@ create_db_req(#httpd{}=Req, DbName) ->
N = chttpd:qs_value(Req, "n", config:get("cluster", "n", "3")),
Q = chttpd:qs_value(Req, "q", config:get("cluster", "q", "8")),
P = chttpd:qs_value(Req, "placement", config:get("cluster", "placement")),
+ EngineOpt = parse_engine_opt(Req),
+ Options = [
+ {n, N},
+ {q, Q},
+ {placement, P}
+ ] ++ EngineOpt,
DocUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
- case fabric:create_db(DbName, [{n,N}, {q,Q}, {placement,P}]) of
+ case fabric:create_db(DbName, Options) of
ok ->
send_json(Req, 201, [{"Location", DocUrl}], {[{ok, true}]});
accepted ->
@@ -518,6 +524,21 @@ db_req(#httpd{method='GET',path_parts=[_,OP]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
throw({bad_request, "`keys` parameter must be an array."})
end;
+db_req(#httpd{method='POST',
+ path_parts=[_, OP, <<"queries">>]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
+ Props = chttpd:json_body_obj(Req),
+ case couch_mrview_util:get_view_queries(Props) of
+ undefined ->
+ throw({bad_request,
+ <<"POST body must include `queries` parameter.">>});
+ Queries ->
+ multi_all_docs_view(Req, Db, OP, Queries)
+ end;
+
+db_req(#httpd{path_parts=[_, OP, <<"queries">>]}=Req,
+ _Db) when ?IS_ALL_DOCS(OP) ->
+ send_method_not_allowed(Req, "POST");
+
db_req(#httpd{method='POST',path_parts=[_,OP]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
chttpd:validate_ctype(Req, "application/json"),
{Fields} = chttpd:json_body_obj(Req),
@@ -636,6 +657,29 @@ db_req(#httpd{path_parts=[_, DocId]}=Req, Db) ->
db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
db_attachment_req(Req, Db, DocId, FileNameParts).
+multi_all_docs_view(Req, Db, OP, Queries) ->
+ Args0 = couch_mrview_http:parse_params(Req, undefined),
+ Args1 = Args0#mrargs{view_type=map},
+ ArgQueries = lists:map(fun({Query}) ->
+ QueryArg1 = couch_mrview_http:parse_params(Query, undefined,
+ Args1, [decoded]),
+ QueryArgs2 = couch_mrview_util:validate_args(QueryArg1),
+ set_namespace(OP, QueryArgs2)
+ end, Queries),
+ Options = [{user_ctx, Req#httpd.user_ctx}],
+ VAcc0 = #vacc{db=Db, req=Req, prepend="\r\n"},
+ FirstChunk = "{\"results\":[",
+ {ok, Resp0} = chttpd:start_delayed_json_response(VAcc0#vacc.req,
+ 200, [], FirstChunk),
+ VAcc1 = VAcc0#vacc{resp=Resp0},
+ VAcc2 = lists:foldl(fun(Args, Acc0) ->
+ {ok, Acc1} = fabric:all_docs(Db, Options,
+ fun couch_mrview_http:view_cb/2, Acc0, Args),
+ Acc1
+ end, VAcc1, ArgQueries),
+ {ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"),
+ chttpd:end_delayed_json_response(Resp1).
+
all_docs_view(Req, Db, Keys, OP) ->
Args0 = couch_mrview_http:parse_params(Req, Keys),
Args1 = Args0#mrargs{view_type=map},
@@ -825,7 +869,8 @@ db_doc_req(#httpd{method='COPY', user_ctx=Ctx}=Req, Db, SourceDocId) ->
missing_rev -> nil;
Rev -> Rev
end,
- {TargetDocId, TargetRevs} = couch_httpd_db:parse_copy_destination_header(Req),
+ {TargetDocId0, TargetRevs} = couch_httpd_db:parse_copy_destination_header(Req),
+ TargetDocId = list_to_binary(mochiweb_util:unquote(TargetDocId0)),
% open old doc
Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
% save new doc
@@ -1354,6 +1399,20 @@ get_md5_header(Req) ->
parse_doc_query(Req) ->
lists:foldl(fun parse_doc_query/2, #doc_query_args{}, chttpd:qs(Req)).
+parse_engine_opt(Req) ->
+ case chttpd:qs_value(Req, "engine") of
+ undefined ->
+ [];
+ Extension ->
+ Available = couch_server:get_engine_extensions(),
+ case lists:member(Extension, Available) of
+ true ->
+ [{engine, iolist_to_binary(Extension)}];
+ false ->
+ throw({bad_request, invalid_engine_extension})
+ end
+ end.
+
parse_doc_query({Key, Value}, Args) ->
case {Key, Value} of
{"attachments", "true"} ->
diff --git a/src/chttpd/src/chttpd_httpd_handlers.erl b/src/chttpd/src/chttpd_httpd_handlers.erl
index 9c3044126..cb52e2c40 100644
--- a/src/chttpd/src/chttpd_httpd_handlers.erl
+++ b/src/chttpd/src/chttpd_httpd_handlers.erl
@@ -18,6 +18,7 @@ url_handler(<<>>) -> fun chttpd_misc:handle_welcome_req/1;
url_handler(<<"favicon.ico">>) -> fun chttpd_misc:handle_favicon_req/1;
url_handler(<<"_utils">>) -> fun chttpd_misc:handle_utils_dir_req/1;
url_handler(<<"_all_dbs">>) -> fun chttpd_misc:handle_all_dbs_req/1;
+url_handler(<<"_dbs_info">>) -> fun chttpd_misc:handle_dbs_info_req/1;
url_handler(<<"_active_tasks">>) -> fun chttpd_misc:handle_task_status_req/1;
url_handler(<<"_scheduler">>) -> fun couch_replicator_httpd:handle_scheduler_req/1;
url_handler(<<"_node">>) -> fun chttpd_misc:handle_node_req/1;
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 15eabbfbd..253da233e 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -14,6 +14,7 @@
-export([
handle_all_dbs_req/1,
+ handle_dbs_info_req/1,
handle_node_req/1,
handle_favicon_req/1,
handle_favicon_req/2,
@@ -37,6 +38,8 @@
[send_json/2,send_json/3,send_method_not_allowed/2,
send_chunk/2,start_chunked_response/3]).
+-define(MAX_DB_NUM_FOR_DBS_INFO, 100).
+
% httpd global handlers
handle_welcome_req(Req) ->
@@ -141,6 +144,38 @@ all_dbs_callback({error, Reason}, #vacc{resp=Resp0}=Acc) ->
{ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
{ok, Acc#vacc{resp=Resp1}}.
+handle_dbs_info_req(#httpd{method='POST'}=Req) ->
+ chttpd:validate_ctype(Req, "application/json"),
+ Props = chttpd:json_body_obj(Req),
+ Keys = couch_mrview_util:get_view_keys(Props),
+ case Keys of
+ undefined -> throw({bad_request, "`keys` member must exist."});
+ _ -> ok
+ end,
+ MaxNumber = config:get_integer("chttpd",
+ "max_db_number_for_dbs_info_req", ?MAX_DB_NUM_FOR_DBS_INFO),
+ case length(Keys) =< MaxNumber of
+ true -> ok;
+ false -> throw({bad_request, too_many_keys})
+ end,
+ {ok, Resp} = chttpd:start_json_response(Req, 200),
+ send_chunk(Resp, "["),
+ lists:foldl(fun(DbName, AccSeparator) ->
+ case catch fabric:get_db_info(DbName) of
+ {ok, Result} ->
+ Json = ?JSON_ENCODE({[{key, DbName}, {info, {Result}}]}),
+ send_chunk(Resp, AccSeparator ++ Json);
+ _ ->
+ Json = ?JSON_ENCODE({[{key, DbName}, {error, not_found}]}),
+ send_chunk(Resp, AccSeparator ++ Json)
+ end,
+ "," % AccSeparator now has a comma
+ end, "", Keys),
+ send_chunk(Resp, "]"),
+ chttpd:end_json_response(Resp);
+handle_dbs_info_req(Req) ->
+ send_method_not_allowed(Req, "POST").
+
handle_task_status_req(#httpd{method='GET'}=Req) ->
{Replies, _BadNodes} = gen_server:multi_call(couch_task_status, all),
Response = lists:flatmap(fun({Node, Tasks}) ->
diff --git a/src/chttpd/src/chttpd_view.erl b/src/chttpd/src/chttpd_view.erl
index 0aaa849a5..3c05c64ca 100644
--- a/src/chttpd/src/chttpd_view.erl
+++ b/src/chttpd/src/chttpd_view.erl
@@ -49,6 +49,22 @@ design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
fun couch_mrview_http:view_cb/2, VAcc, Args),
{ok, Resp#vacc.resp}.
+handle_view_req(#httpd{method='POST',
+ path_parts=[_, _, _, _, ViewName, <<"queries">>]}=Req, Db, DDoc) ->
+ chttpd:validate_ctype(Req, "application/json"),
+ Props = couch_httpd:json_body_obj(Req),
+ case couch_mrview_util:get_view_queries(Props) of
+ undefined ->
+ throw({bad_request,
+ <<"POST body must include `queries` parameter.">>});
+ Queries ->
+ multi_query_view(Req, Db, DDoc, ViewName, Queries)
+ end;
+
+handle_view_req(#httpd{path_parts=[_, _, _, _, _, <<"queries">>]}=Req,
+ _Db, _DDoc) ->
+ chttpd:send_method_not_allowed(Req, "POST");
+
handle_view_req(#httpd{method='GET',
path_parts=[_, _, _, _, ViewName]}=Req, Db, DDoc) ->
couch_stats:increment_counter([couchdb, httpd, view_reads]),
diff --git a/src/couch/test/couchdb_csp_tests.erl b/src/chttpd/test/chttpd_csp_tests.erl
index 5eb33f909..e86436254 100644
--- a/src/couch/test/couchdb_csp_tests.erl
+++ b/src/chttpd/test/chttpd_csp_tests.erl
@@ -10,29 +10,28 @@
% License for the specific language governing permissions and limitations under
% the License.
--module(couchdb_csp_tests).
+-module(chttpd_csp_tests).
-include_lib("couch/include/couch_eunit.hrl").
--define(TIMEOUT, 1000).
-
setup() ->
ok = config:set("csp", "enable", "true", false),
- Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
lists:concat(["http://", Addr, ":", Port, "/_utils/"]).
teardown(_) ->
ok.
+
csp_test_() ->
{
"Content Security Policy tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
{
foreach,
fun setup/0, fun teardown/1,
diff --git a/src/chttpd/test/chttpd_db_test.erl b/src/chttpd/test/chttpd_db_test.erl
index f3c779bd3..636603710 100644
--- a/src/chttpd/test/chttpd_db_test.erl
+++ b/src/chttpd/test/chttpd_db_test.erl
@@ -19,7 +19,10 @@
-define(PASS, "pass").
-define(AUTH, {basic_auth, {?USER, ?PASS}}).
-define(CONTENT_JSON, {"Content-Type", "application/json"}).
+-define(DESTHEADER1, {"Destination", "foo%E5%95%8Abar"}).
+-define(DESTHEADER2, {"Destination", "foo%2Fbar%23baz%3Fpow%3Afiz"}).
-define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
+-define(i2l(I), integer_to_list(I)).
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
@@ -62,7 +65,20 @@ all_test_() ->
fun should_return_404_for_delete_att_on_notadoc/1,
fun should_return_409_for_del_att_without_rev/1,
fun should_return_200_for_del_att_with_rev/1,
- fun should_return_409_for_put_att_nonexistent_rev/1
+ fun should_return_409_for_put_att_nonexistent_rev/1,
+ fun should_return_update_seq_when_set_on_all_docs/1,
+ fun should_not_return_update_seq_when_unset_on_all_docs/1,
+ fun should_return_correct_id_on_doc_copy/1,
+ fun should_return_400_for_bad_engine/1,
+ fun should_succeed_on_all_docs_with_queries_keys/1,
+ fun should_succeed_on_all_docs_with_queries_limit_skip/1,
+ fun should_succeed_on_all_docs_with_multiple_queries/1,
+ fun should_succeed_on_design_docs_with_queries_keys/1,
+ fun should_succeed_on_design_docs_with_queries_limit_skip/1,
+ fun should_succeed_on_design_docs_with_multiple_queries/1,
+ fun should_succeed_on_local_docs_with_queries_keys/1,
+ fun should_succeed_on_local_docs_with_queries_limit_skip/1,
+ fun should_succeed_on_local_docs_with_multiple_queries/1
]
}
}
@@ -187,6 +203,53 @@ should_return_409_for_put_att_nonexistent_rev(Url) ->
end).
+should_return_update_seq_when_set_on_all_docs(Url) ->
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)],
+ {ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/"
+ ++ "?update_seq=true&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ?assertNotEqual(undefined,
+ couch_util:get_value(<<"update_seq">>, ResultJson)),
+ ?assertNotEqual(undefined,
+ couch_util:get_value(<<"offset">>, ResultJson))
+ end).
+
+
+should_not_return_update_seq_when_unset_on_all_docs(Url) ->
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)],
+ {ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/"
+ ++ "?update_seq=false&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ?assertEqual(undefined,
+ couch_util:get_value(<<"update_seq">>, ResultJson)),
+ ?assertNotEqual(undefined,
+ couch_util:get_value(<<"offset">>, ResultJson))
+ end).
+
+
+should_return_correct_id_on_doc_copy(Url) ->
+ ?_test(begin
+ {ok, _, _, _} = create_doc(Url, "testdoc"),
+ {_, _, _, ResultBody1} = test_request:copy(Url ++ "/testdoc/",
+ [?CONTENT_JSON, ?AUTH, ?DESTHEADER1]),
+ {ResultJson1} = ?JSON_DECODE(ResultBody1),
+ Id1 = couch_util:get_value(<<"id">>, ResultJson1),
+
+ {_, _, _, ResultBody2} = test_request:copy(Url ++ "/testdoc/",
+ [?CONTENT_JSON, ?AUTH, ?DESTHEADER2]),
+ {ResultJson2} = ?JSON_DECODE(ResultBody2),
+ Id2 = couch_util:get_value(<<"id">>, ResultJson2),
+ [
+ ?assertEqual(<<102,111,111,229,149,138,98,97,114>>, Id1),
+ ?assertEqual(<<"foo/bar#baz?pow:fiz">>, Id2)
+ ]
+ end).
+
+
attachment_doc() ->
{ok, Data} = file:read_file(?FIXTURE_TXT),
{[
@@ -197,3 +260,156 @@ attachment_doc() ->
]}
}]}}
]}.
+
+
+should_return_400_for_bad_engine(_) ->
+ ?_test(begin
+ TmpDb = ?tempdb(),
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
+ BaseUrl = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
+ Url = BaseUrl ++ "?engine=cowabunga",
+ {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
+ ?assertEqual(400, Status)
+ end).
+
+
+should_succeed_on_all_docs_with_queries_keys(Url) ->
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}]}",
+ {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/",
+ [?CONTENT_JSON, ?AUTH], QueryDoc),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end).
+
+
+should_succeed_on_all_docs_with_queries_limit_skip(Url) ->
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/",
+ [?CONTENT_JSON, ?AUTH], QueryDoc),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end).
+
+
+should_succeed_on_all_docs_with_multiple_queries(Url) ->
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]},
+ {\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/",
+ [?CONTENT_JSON, ?AUTH], QueryDoc),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson1} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
+ {InnerJson2} = lists:nth(2, ResultJsonBody),
+ ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
+ end).
+
+
+should_succeed_on_design_docs_with_queries_keys(Url) ->
+ ?_test(begin
+ [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc = "{\"queries\": [{\"keys\": [ \"_design/ddoc3\",
+ \"_design/ddoc8\"]}]}",
+ {ok, RC, _, RespBody} = test_request:post(Url ++
+ "/_design_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end).
+
+
+should_succeed_on_design_docs_with_queries_limit_skip(Url) ->
+ ?_test(begin
+ [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(Url ++
+ "/_design_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end).
+
+
+should_succeed_on_design_docs_with_multiple_queries(Url) ->
+ ?_test(begin
+ [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc = "{\"queries\": [{\"keys\": [ \"_design/ddoc3\",
+ \"_design/ddoc8\"]}, {\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(Url ++
+ "/_design_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson1} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
+ {InnerJson2} = lists:nth(2, ResultJsonBody),
+ ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
+ end).
+
+
+should_succeed_on_local_docs_with_queries_keys(Url) ->
+ ?_test(begin
+ [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc = "{\"queries\": [{\"keys\":
+ [ \"_local/doc3\", \"_local/doc8\"]}]}",
+ {ok, RC, _, RespBody} = test_request:post(Url ++ "/_local_docs/queries/",
+ [?CONTENT_JSON, ?AUTH], QueryDoc),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end).
+
+
+should_succeed_on_local_docs_with_queries_limit_skip(Url) ->
+ ?_test(begin
+ [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(Url ++
+ "/_local_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end).
+
+
+should_succeed_on_local_docs_with_multiple_queries(Url) ->
+ ?_test(begin
+ [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc = "{\"queries\": [{\"keys\": [ \"_local/doc3\",
+ \"_local/doc8\"]}, {\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(Url ++
+ "/_local_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson1} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
+ {InnerJson2} = lists:nth(2, ResultJsonBody),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
+ end).
diff --git a/src/chttpd/test/chttpd_dbs_info_test.erl b/src/chttpd/test/chttpd_dbs_info_test.erl
new file mode 100644
index 000000000..5b61d8831
--- /dev/null
+++ b/src/chttpd/test/chttpd_dbs_info_test.erl
@@ -0,0 +1,169 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_dbs_info_test).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-define(USER, "chttpd_db_test_admin").
+-define(PASS, "pass").
+-define(AUTH, {basic_auth, {?USER, ?PASS}}).
+-define(CONTENT_JSON, {"Content-Type", "application/json"}).
+
+
+setup() ->
+ Hashed = couch_passwords:hash_admin_password(?PASS),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
+ Url = lists:concat(["http://", Addr, ":", Port, "/"]),
+ Db1Url = lists:concat([Url, "db1"]),
+ create_db(Db1Url),
+ Db2Url = lists:concat([Url, "db2"]),
+ create_db(Db2Url),
+ Url.
+
+teardown(Url) ->
+ Db1Url = lists:concat([Url, "db1"]),
+ Db2Url = lists:concat([Url, "db2"]),
+ delete_db(Db1Url),
+ delete_db(Db2Url),
+ ok = config:delete("admins", ?USER, _Persist=false).
+
+create_db(Url) ->
+ {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
+ ?assert(Status =:= 201 orelse Status =:= 202).
+
+delete_db(Url) ->
+ {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
+
+dbs_info_test_() ->
+ {
+ "chttpd dbs info tests",
+ {
+ setup,
+ fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_return_error_for_get_db_info/1,
+ fun should_return_dbs_info_for_single_db/1,
+ fun should_return_dbs_info_for_multiple_dbs/1,
+ fun should_return_error_for_exceeded_keys/1,
+ fun should_return_error_for_missing_keys/1,
+ fun should_return_dbs_info_for_dbs_with_mixed_state/1
+ ]
+ }
+ }
+ }.
+
+
+should_return_error_for_get_db_info(Url) ->
+ ?_test(begin
+ {ok, Code, _, ResultBody} = test_request:get(Url ++ "/_dbs_info?"
+ ++ "keys=[\"db1\"]", [?CONTENT_JSON, ?AUTH]),
+ {Body} = jiffy:decode(ResultBody),
+ [
+ ?assertEqual(<<"method_not_allowed">>,
+ couch_util:get_value(<<"error">>, Body)),
+ ?assertEqual(405, Code)
+ ]
+ end).
+
+
+should_return_dbs_info_for_single_db(Url) ->
+ ?_test(begin
+ NewDoc = "{\"keys\": [\"db1\"]}",
+ {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/",
+ [?CONTENT_JSON, ?AUTH], NewDoc),
+ BodyJson = jiffy:decode(ResultBody),
+ {Db1Data} = lists:nth(1, BodyJson),
+ [
+ ?assertEqual(<<"db1">>,
+ couch_util:get_value(<<"key">>, Db1Data)),
+ ?assertNotEqual(undefined,
+ couch_util:get_value(<<"info">>, Db1Data))
+ ]
+ end).
+
+
+should_return_dbs_info_for_multiple_dbs(Url) ->
+ ?_test(begin
+ NewDoc = "{\"keys\": [\"db1\", \"db2\"]}",
+ {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/",
+ [?CONTENT_JSON, ?AUTH], NewDoc),
+ BodyJson = jiffy:decode(ResultBody),
+ {Db1Data} = lists:nth(1, BodyJson),
+ {Db2Data} = lists:nth(2, BodyJson),
+ [
+ ?assertEqual(<<"db1">>,
+ couch_util:get_value(<<"key">>, Db1Data)),
+ ?assertNotEqual(undefined,
+ couch_util:get_value(<<"info">>, Db1Data)),
+ ?assertEqual(<<"db2">>,
+ couch_util:get_value(<<"key">>, Db2Data)),
+ ?assertNotEqual(undefined,
+ couch_util:get_value(<<"info">>, Db2Data))
+ ]
+ end).
+
+
+should_return_error_for_exceeded_keys(Url) ->
+ ?_test(begin
+ NewDoc = "{\"keys\": [\"db1\", \"db2\"]}",
+ ok = config:set("chttpd", "max_db_number_for_dbs_info_req", "1"),
+ {ok, Code, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/",
+ [?CONTENT_JSON, ?AUTH], NewDoc),
+ {Body} = jiffy:decode(ResultBody),
+ ok = config:delete("chttpd", "max_db_number_for_dbs_info_req"),
+ [
+ ?assertEqual(<<"bad_request">>,
+ couch_util:get_value(<<"error">>, Body)),
+ ?assertEqual(400, Code)
+ ]
+ end).
+
+
+should_return_error_for_missing_keys(Url) ->
+ ?_test(begin
+ NewDoc = "{\"missingkeys\": [\"db1\", \"db2\"]}",
+ {ok, Code, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/",
+ [?CONTENT_JSON, ?AUTH], NewDoc),
+ {Body} = jiffy:decode(ResultBody),
+ [
+ ?assertEqual(<<"bad_request">>,
+ couch_util:get_value(<<"error">>, Body)),
+ ?assertEqual(400, Code)
+ ]
+ end).
+
+
+should_return_dbs_info_for_dbs_with_mixed_state(Url) ->
+ ?_test(begin
+ NewDoc = "{\"keys\": [\"db1\", \"noexisteddb\"]}",
+ {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/",
+ [?CONTENT_JSON, ?AUTH], NewDoc),
+ Json = jiffy:decode(ResultBody),
+ {Db1Data} = lists:nth(1, Json),
+ {Db2Data} = lists:nth(2, Json),
+ [
+ ?assertEqual(
+ <<"db1">>, couch_util:get_value(<<"key">>, Db1Data)),
+ ?assertNotEqual(undefined,
+ couch_util:get_value(<<"info">>, Db1Data)),
+ ?assertEqual(
+ <<"noexisteddb">>, couch_util:get_value(<<"key">>, Db2Data)),
+ ?assertEqual(undefined, couch_util:get_value(<<"info">>, Db2Data))
+ ]
+ end).
diff --git a/src/chttpd/test/chttpd_security_tests.erl b/src/chttpd/test/chttpd_security_tests.erl
index b80238c78..737a32e11 100644
--- a/src/chttpd/test/chttpd_security_tests.erl
+++ b/src/chttpd/test/chttpd_security_tests.erl
@@ -102,6 +102,8 @@ all_test_() ->
fun setup/0, fun teardown/1,
[
fun should_allow_admin_db_compaction/1,
+ fun should_allow_valid_password_to_create_user/1,
+ fun should_disallow_invalid_password_to_create_user/1,
fun should_disallow_anonymous_db_compaction/1,
fun should_disallow_db_member_db_compaction/1,
fun should_allow_db_admin_db_compaction/1,
@@ -124,6 +126,26 @@ should_allow_admin_db_compaction([Url,_UsersUrl]) ->
couch_util:get_value(<<"ok">>, InnerJson, undefined)
end).
+
+should_allow_valid_password_to_create_user([_Url, UsersUrl]) ->
+ UserDoc = "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\",
+ \"type\": \"user\", \"roles\": [], \"password\": \"bar\"}",
+ {ok, _, _, ResultBody} = test_request:post(UsersUrl,
+ [?CONTENT_JSON, ?AUTH], UserDoc),
+ ResultJson = ?JSON_DECODE(ResultBody),
+ {InnerJson} = ResultJson,
+ ?_assertEqual(true, couch_util:get_value(<<"ok">>, InnerJson)).
+
+should_disallow_invalid_password_to_create_user([_Url, UsersUrl]) ->
+ UserDoc = "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\",
+ \"type\": \"user\", \"roles\": [], \"password\": 123}",
+ {ok, _, _, ResultBody} = test_request:post(UsersUrl,
+ [?CONTENT_JSON, ?AUTH], UserDoc),
+ ResultJson = ?JSON_DECODE(ResultBody),
+ {InnerJson} = ResultJson,
+ ErrType = couch_util:get_value(<<"error">>, InnerJson),
+ ?_assertEqual(<<"forbidden">>, ErrType).
+
should_disallow_anonymous_db_compaction([Url,_UsersUrl]) ->
{ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact",
[?CONTENT_JSON], ""),
diff --git a/src/chttpd/test/chttpd_view_test.erl b/src/chttpd/test/chttpd_view_test.erl
new file mode 100644
index 000000000..3457c6f30
--- /dev/null
+++ b/src/chttpd/test/chttpd_view_test.erl
@@ -0,0 +1,123 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_view_test).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-define(USER, "chttpd_view_test_admin").
+-define(PASS, "pass").
+-define(AUTH, {basic_auth, {?USER, ?PASS}}).
+-define(CONTENT_JSON, {"Content-Type", "application/json"}).
+-define(DDOC, "{\"_id\": \"_design/bar\", \"views\": {\"baz\":
+ {\"map\": \"function(doc) {emit(doc._id, doc._id);}\"}}}").
+
+-define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
+-define(i2l(I), integer_to_list(I)).
+
+setup() ->
+ Hashed = couch_passwords:hash_admin_password(?PASS),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ TmpDb = ?tempdb(),
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
+ Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
+ create_db(Url),
+ Url.
+
+teardown(Url) ->
+ delete_db(Url),
+ ok = config:delete("admins", ?USER, _Persist=false).
+
+create_db(Url) ->
+ {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
+ ?assert(Status =:= 201 orelse Status =:= 202).
+
+
+create_doc(Url, Id) ->
+ test_request:put(Url ++ "/" ++ Id,
+ [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}").
+
+delete_db(Url) ->
+ {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
+
+all_view_test_() ->
+ {
+ "chttpd view tests",
+ {
+ setup,
+ fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_succeed_on_view_with_queries_keys/1,
+ fun should_succeed_on_view_with_queries_limit_skip/1,
+ fun should_succeed_on_view_with_multiple_queries/1
+ ]
+ }
+ }
+ }.
+
+
+should_succeed_on_view_with_queries_keys(Url) ->
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ {ok, _, _, _} = test_request:put(Url ++ "/_design/bar",
+ [?CONTENT_JSON, ?AUTH], ?DDOC),
+ QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\",
+ \"testdoc8\"]}]}",
+ {ok, _, _, RespBody} = test_request:post(Url ++ "/_design/bar/"
+ ++ "_view/baz/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end).
+
+
+should_succeed_on_view_with_queries_limit_skip(Url) ->
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ {ok, _, _, _} = test_request:put(Url ++ "/_design/bar",
+ [?CONTENT_JSON, ?AUTH], ?DDOC),
+ QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(Url ++ "/_design/bar/"
+ ++ "_view/baz/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end).
+
+
+should_succeed_on_view_with_multiple_queries(Url) ->
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ {ok, _, _, _} = test_request:put(Url ++ "/_design/bar",
+ [?CONTENT_JSON, ?AUTH], ?DDOC),
+ QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\",
+ \"testdoc8\"]}, {\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(Url ++ "/_design/bar/"
+ ++ "_view/baz/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson1} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
+ {InnerJson2} = lists:nth(2, ResultJsonBody),
+ ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
+ end).
diff --git a/src/couch/.gitignore b/src/couch/.gitignore
index 30aa17359..73fb0b684 100644
--- a/src/couch/.gitignore
+++ b/src/couch/.gitignore
@@ -11,5 +11,10 @@ priv/*.dll
priv/*.exe
vc120.pdb
+test/engines/coverage/
+test/engines/data/
+test/engines/etc/
+test/engines/log/
+
.rebar/
.eunit
diff --git a/src/couch/include/couch_db.hrl b/src/couch/include/couch_db.hrl
index 37f5792c3..feaa64069 100644
--- a/src/couch/include/couch_db.hrl
+++ b/src/couch/include/couch_db.hrl
@@ -108,7 +108,10 @@
% the json body object.
body = {[]},
- atts = [] :: [couch_att:att()], % attachments
+ % Atts can be a binary when a storage engine
+ % returns attachment info blob in compressed
+ % form.
+ atts = [] :: [couch_att:att()] | binary(), % attachments
deleted = false,
diff --git a/src/couch/include/couch_eunit.hrl b/src/couch/include/couch_eunit.hrl
index 8eb763aff..f4617e1d3 100644
--- a/src/couch/include/couch_eunit.hrl
+++ b/src/couch/include/couch_eunit.hrl
@@ -40,17 +40,14 @@
-define(tempfile,
fun() ->
- A = integer_to_list(couch_util:unique_monotonic_integer()),
- N = node(),
- FileName = lists:flatten(io_lib:format("~p-~s", [N, A])),
+ Suffix = couch_uuids:random(),
+ FileName = io_lib:format("~p-~s", [node(), Suffix]),
filename:join([?TEMPDIR, FileName])
end).
-define(tempdb,
fun() ->
- Nums = integer_to_list(couch_util:unique_monotonic_integer()),
- Prefix = "eunit-test-db",
- Suffix = lists:concat([integer_to_list(Num) || Num <- Nums]),
- list_to_binary(Prefix ++ "-" ++ Suffix)
+ Suffix = couch_uuids:random(),
+ iolist_to_binary(["eunit-test-db-", Suffix])
end).
-define(docid,
fun() ->
diff --git a/src/couch/include/couch_js_functions.hrl b/src/couch/include/couch_js_functions.hrl
index 6851718ff..d969416d6 100644
--- a/src/couch/include/couch_js_functions.hrl
+++ b/src/couch/include/couch_js_functions.hrl
@@ -64,6 +64,15 @@
});
}
+ var available_schemes = [\"simple\", \"pbkdf2\", \"bcrypt\"];
+ if (newDoc.password_scheme
+ && available_schemes.indexOf(newDoc.password_scheme) == -1) {
+ throw({
+ forbidden: 'Password scheme `' + newDoc.password_scheme
+ + '` not supported.'
+ });
+ }
+
if (newDoc.password_scheme === \"pbkdf2\") {
if (typeof(newDoc.iterations) !== \"number\") {
throw({forbidden: \"iterations must be a number.\"});
diff --git a/src/couch/src/couch.app.src b/src/couch/src/couch.app.src
index 8fc0d89ff..524b72868 100644
--- a/src/couch/src/couch.app.src
+++ b/src/couch/src/couch.app.src
@@ -31,6 +31,7 @@
kernel,
stdlib,
crypto,
+ bcrypt,
sasl,
inets,
ssl,
diff --git a/src/couch/src/couch.erl b/src/couch/src/couch.erl
index fd5c9e101..f956b4b3d 100644
--- a/src/couch/src/couch.erl
+++ b/src/couch/src/couch.erl
@@ -21,6 +21,7 @@ deps() ->
inets,
os_mon,
crypto,
+ bcrypt,
public_key,
ssl,
ibrowse,
diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl
index 3380f5739..16edd66ce 100644
--- a/src/couch/src/couch_att.erl
+++ b/src/couch/src/couch_att.erl
@@ -152,9 +152,31 @@
data_prop() | encoding_prop()
].
+-type disk_att_v1() :: {
+ Name :: binary(),
+ Type :: binary(),
+ Sp :: any(),
+ AttLen :: non_neg_integer(),
+ RevPos :: non_neg_integer(),
+ Md5 :: binary()
+}.
+
+-type disk_att_v2() :: {
+ Name :: binary(),
+ Type :: binary(),
+ Sp :: any(),
+ AttLen :: non_neg_integer(),
+ DiskLen :: non_neg_integer(),
+ RevPos :: non_neg_integer(),
+ Md5 :: binary(),
+ Enc :: identity | gzip
+}.
+
+-type disk_att_v3() :: {Base :: tuple(), Extended :: list()}.
--type att() :: #att{} | attachment().
+-type disk_att() :: disk_att_v1() | disk_att_v2() | disk_att_v3().
+-type att() :: #att{} | attachment() | disk_att().
new() ->
%% We construct a record by default for compatability. This will be
@@ -297,11 +319,12 @@ size_info(Atts) ->
%% as safe as possible, avoiding the need for complicated disk versioning
%% schemes.
to_disk_term(#att{} = Att) ->
- {_, StreamIndex} = fetch(data, Att),
+ {stream, StreamEngine} = fetch(data, Att),
+ {ok, Sp} = couch_stream:to_disk_term(StreamEngine),
{
fetch(name, Att),
fetch(type, Att),
- StreamIndex,
+ Sp,
fetch(att_len, Att),
fetch(disk_len, Att),
fetch(revpos, Att),
@@ -314,9 +337,13 @@ to_disk_term(Att) ->
fun
(data, {Props, Values}) ->
case lists:keytake(data, 1, Props) of
- {value, {_, {_Fd, Sp}}, Other} -> {Other, [Sp | Values]};
- {value, {_, Value}, Other} -> {Other, [Value | Values]};
- false -> {Props, [undefined |Values ]}
+ {value, {_, {stream, StreamEngine}}, Other} ->
+ {ok, Sp} = couch_stream:to_disk_term(StreamEngine),
+ {Other, [Sp | Values]};
+ {value, {_, Value}, Other} ->
+ {Other, [Value | Values]};
+ false ->
+ {Props, [undefined | Values]}
end;
(Key, {Props, Values}) ->
case lists:keytake(Key, 1, Props) of
@@ -337,9 +364,11 @@ to_disk_term(Att) ->
%% compression to remove these sorts of common bits (block level compression
%% with something like a shared dictionary that is checkpointed every now and
%% then).
-from_disk_term(Fd, {Base, Extended}) when is_tuple(Base), is_list(Extended) ->
- store(Extended, from_disk_term(Fd, Base));
-from_disk_term(Fd, {Name,Type,Sp,AttLen,DiskLen,RevPos,Md5,Enc}) ->
+from_disk_term(StreamSrc, {Base, Extended})
+ when is_tuple(Base), is_list(Extended) ->
+ store(Extended, from_disk_term(StreamSrc, Base));
+from_disk_term(StreamSrc, {Name,Type,Sp,AttLen,DiskLen,RevPos,Md5,Enc}) ->
+ {ok, Stream} = open_stream(StreamSrc, Sp),
#att{
name=Name,
type=Type,
@@ -347,10 +376,11 @@ from_disk_term(Fd, {Name,Type,Sp,AttLen,DiskLen,RevPos,Md5,Enc}) ->
disk_len=DiskLen,
md5=Md5,
revpos=RevPos,
- data={Fd,Sp},
+ data={stream, Stream},
encoding=upgrade_encoding(Enc)
};
-from_disk_term(Fd, {Name,Type,Sp,AttLen,RevPos,Md5}) ->
+from_disk_term(StreamSrc, {Name,Type,Sp,AttLen,RevPos,Md5}) ->
+ {ok, Stream} = open_stream(StreamSrc, Sp),
#att{
name=Name,
type=Type,
@@ -358,9 +388,10 @@ from_disk_term(Fd, {Name,Type,Sp,AttLen,RevPos,Md5}) ->
disk_len=AttLen,
md5=Md5,
revpos=RevPos,
- data={Fd,Sp}
+ data={stream, Stream}
};
-from_disk_term(Fd, {Name,{Type,Sp,AttLen}}) ->
+from_disk_term(StreamSrc, {Name,{Type,Sp,AttLen}}) ->
+ {ok, Stream} = open_stream(StreamSrc, Sp),
#att{
name=Name,
type=Type,
@@ -368,7 +399,7 @@ from_disk_term(Fd, {Name,{Type,Sp,AttLen}}) ->
disk_len=AttLen,
md5= <<>>,
revpos=0,
- data={Fd,Sp}
+ data={stream, Stream}
}.
@@ -490,37 +521,20 @@ to_json(Att, OutputData, DataToFollow, ShowEncoding) ->
{Name, {Props ++ DigestProp ++ DataProps ++ EncodingProps ++ HeadersProp}}.
-flush(Fd, Att) ->
- flush_data(Fd, fetch(data, Att), Att).
+flush(Db, Att) ->
+ flush_data(Db, fetch(data, Att), Att).
-flush_data(Fd, {stream, {couch_bt_engine_stream, {OtherFd, StreamPointer}}},
- Att) ->
- flush_data(Fd, {OtherFd, StreamPointer}, Att);
-flush_data(Fd, {Fd0, _}, Att) when Fd0 == Fd ->
- % already written to our file, nothing to write
- Att;
-flush_data(Fd, {OtherFd, StreamPointer}, Att) ->
- [InMd5, InDiskLen] = fetch([md5, disk_len], Att),
- {NewStreamData, Len, _IdentityLen, Md5, IdentityMd5} =
- couch_stream:copy_to_new_stream(OtherFd, StreamPointer, Fd),
- couch_db:check_md5(IdentityMd5, InMd5),
- store([
- {data, {Fd, NewStreamData}},
- {md5, Md5},
- {att_len, Len},
- {disk_len, InDiskLen}
- ], Att);
-flush_data(Fd, Data, Att) when is_binary(Data) ->
- couch_db:with_stream(Fd, Att, fun(OutputStream) ->
+flush_data(Db, Data, Att) when is_binary(Data) ->
+ couch_db:with_stream(Db, Att, fun(OutputStream) ->
couch_stream:write(OutputStream, Data)
end);
-flush_data(Fd, Fun, Att) when is_function(Fun) ->
+flush_data(Db, Fun, Att) when is_function(Fun) ->
AttName = fetch(name, Att),
MaxAttSize = max_attachment_size(),
case fetch(att_len, Att) of
undefined ->
- couch_db:with_stream(Fd, Att, fun(OutputStream) ->
+ couch_db:with_stream(Db, Att, fun(OutputStream) ->
% Fun(MaxChunkSize, WriterFun) must call WriterFun
% once for each chunk of the attachment,
Fun(4096,
@@ -545,11 +559,11 @@ flush_data(Fd, Fun, Att) when is_function(Fun) ->
end);
AttLen ->
validate_attachment_size(AttName, AttLen, MaxAttSize),
- couch_db:with_stream(Fd, Att, fun(OutputStream) ->
+ couch_db:with_stream(Db, Att, fun(OutputStream) ->
write_streamed_attachment(OutputStream, Fun, AttLen)
end)
end;
-flush_data(Fd, {follows, Parser, Ref}, Att) ->
+flush_data(Db, {follows, Parser, Ref}, Att) ->
ParserRef = erlang:monitor(process, Parser),
Fun = fun() ->
Parser ! {get_bytes, Ref, self()},
@@ -563,9 +577,23 @@ flush_data(Fd, {follows, Parser, Ref}, Att) ->
end
end,
try
- flush_data(Fd, Fun, store(data, Fun, Att))
+ flush_data(Db, Fun, store(data, Fun, Att))
after
erlang:demonitor(ParserRef, [flush])
+ end;
+flush_data(Db, {stream, StreamEngine}, Att) ->
+ case couch_db:is_active_stream(Db, StreamEngine) of
+ true ->
+ % Already written
+ Att;
+ false ->
+ NewAtt = couch_db:with_stream(Db, Att, fun(OutputStream) ->
+ couch_stream:copy(StreamEngine, OutputStream)
+ end),
+ InMd5 = fetch(md5, Att),
+ OutMd5 = fetch(md5, NewAtt),
+ couch_util:check_md5(OutMd5, InMd5),
+ NewAtt
end.
@@ -594,9 +622,9 @@ foldl(Att, Fun, Acc) ->
foldl(Bin, _Att, Fun, Acc) when is_binary(Bin) ->
Fun(Bin, Acc);
-foldl({Fd, Sp}, Att, Fun, Acc) ->
+foldl({stream, StreamEngine}, Att, Fun, Acc) ->
Md5 = fetch(md5, Att),
- couch_stream:foldl(Fd, Sp, Md5, Fun, Acc);
+ couch_stream:foldl(StreamEngine, Md5, Fun, Acc);
foldl(DataFun, Att, Fun, Acc) when is_function(DataFun) ->
Len = fetch(att_len, Att),
fold_streamed_data(DataFun, Len, Fun, Acc);
@@ -621,14 +649,15 @@ foldl({follows, Parser, Ref}, Att, Fun, Acc) ->
range_foldl(Att, From, To, Fun, Acc) ->
- {Fd, Sp} = fetch(data, Att),
- couch_stream:range_foldl(Fd, Sp, From, To, Fun, Acc).
+ {stream, StreamEngine} = fetch(data, Att),
+ couch_stream:range_foldl(StreamEngine, From, To, Fun, Acc).
foldl_decode(Att, Fun, Acc) ->
case fetch([data, encoding], Att) of
- [{Fd, Sp}, Enc] ->
- couch_stream:foldl_decode(Fd, Sp, fetch(md5, Att), Enc, Fun, Acc);
+ [{stream, StreamEngine}, Enc] ->
+ couch_stream:foldl_decode(
+ StreamEngine, fetch(md5, Att), Enc, Fun, Acc);
[Fun2, identity] ->
fold_streamed_data(Fun2, fetch(att_len, Att), Fun, Acc)
end.
@@ -642,7 +671,7 @@ to_binary(Bin, _Att) when is_binary(Bin) ->
Bin;
to_binary(Iolist, _Att) when is_list(Iolist) ->
iolist_to_binary(Iolist);
-to_binary({_Fd,_Sp}, Att) ->
+to_binary({stream, _StreamEngine}, Att) ->
iolist_to_binary(
lists:reverse(foldl(Att, fun(Bin,Acc) -> [Bin|Acc] end, []))
);
@@ -718,9 +747,25 @@ validate_attachment_size(_AttName, _AttSize, _MAxAttSize) ->
ok.
+open_stream(StreamSrc, Data) ->
+ case couch_db:is_db(StreamSrc) of
+ true ->
+ couch_db:open_read_stream(StreamSrc, Data);
+ false ->
+ case is_function(StreamSrc, 1) of
+ true ->
+ StreamSrc(Data);
+ false ->
+ erlang:error({invalid_stream_source, StreamSrc})
+ end
+ end.
+
+
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
+% Eww...
+-include("couch_bt_engine.hrl").
%% Test utilities
@@ -775,7 +820,7 @@ attachment_disk_term_test_() ->
{disk_len, 0},
{md5, <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>},
{revpos, 4},
- {data, {fake_fd, fake_sp}},
+ {data, {stream, {couch_bt_engine_stream, {fake_fd, fake_sp}}}},
{encoding, identity}
]),
BaseDiskTerm = {
@@ -789,11 +834,12 @@ attachment_disk_term_test_() ->
Headers = [{<<"X-Foo">>, <<"bar">>}],
ExtendedAttachment = store(headers, Headers, BaseAttachment),
ExtendedDiskTerm = {BaseDiskTerm, [{headers, Headers}]},
+ FakeDb = test_util:fake_db([{engine, {couch_bt_engine, #st{fd=fake_fd}}}]),
{"Disk term tests", [
?_assertEqual(BaseDiskTerm, to_disk_term(BaseAttachment)),
- ?_assertEqual(BaseAttachment, from_disk_term(fake_fd, BaseDiskTerm)),
+ ?_assertEqual(BaseAttachment, from_disk_term(FakeDb, BaseDiskTerm)),
?_assertEqual(ExtendedDiskTerm, to_disk_term(ExtendedAttachment)),
- ?_assertEqual(ExtendedAttachment, from_disk_term(fake_fd, ExtendedDiskTerm))
+ ?_assertEqual(ExtendedAttachment, from_disk_term(FakeDb, ExtendedDiskTerm))
]}.
diff --git a/src/couch/src/couch_auth_cache.erl b/src/couch/src/couch_auth_cache.erl
index 45b34e1bd..425cce010 100644
--- a/src/couch/src/couch_auth_cache.erl
+++ b/src/couch/src/couch_auth_cache.erl
@@ -92,6 +92,8 @@ get_admin(UserName) when is_list(UserName) ->
"-pbkdf2-" ++ HashedPwdSaltAndIterations ->
[HashedPwd, Salt, Iterations] = string:tokens(HashedPwdSaltAndIterations, ","),
make_admin_doc(HashedPwd, Salt, Iterations);
+ "-bcrypt-" ++ HashedPwd ->
+ make_admin_doc(HashedPwd);
_Else ->
nil
end.
@@ -109,6 +111,11 @@ make_admin_doc(DerivedKey, Salt, Iterations) ->
{<<"password_scheme">>, <<"pbkdf2">>},
{<<"derived_key">>, ?l2b(DerivedKey)}].
+make_admin_doc(DerivedKey) ->
+ [{<<"roles">>, [<<"_admin">>]},
+ {<<"password_scheme">>, <<"bcrypt">>},
+ {<<"derived_key">>, ?l2b(DerivedKey)}].
+
get_from_cache(UserName) ->
exec_if_auth_db(
fun(_AuthDb) ->
@@ -327,13 +334,8 @@ refresh_entries(AuthDb) ->
AuthDb2Seq = couch_db:get_update_seq(AuthDb2),
case AuthDb2Seq > AuthDbSeq of
true ->
- {ok, _, _} = couch_db:enum_docs_since(
- AuthDb2,
- AuthDbSeq,
- fun(DocInfo, _, _) -> refresh_entry(AuthDb2, DocInfo) end,
- AuthDbSeq,
- []
- ),
+ Fun = fun(DocInfo, _) -> refresh_entry(AuthDb2, DocInfo) end,
+ {ok, _} = couch_db:fold_changes(AuthDb2, AuthDbSeq, Fun, nil),
true = ets:insert(?STATE, {auth_db, AuthDb2});
false ->
ok
diff --git a/src/couch/src/couch_bt_engine.erl b/src/couch/src/couch_bt_engine.erl
new file mode 100644
index 000000000..940689708
--- /dev/null
+++ b/src/couch/src/couch_bt_engine.erl
@@ -0,0 +1,962 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_bt_engine).
+-behavior(couch_db_engine).
+
+-export([
+ exists/1,
+
+ delete/3,
+ delete_compaction_files/3,
+
+ init/2,
+ terminate/2,
+ handle_db_updater_call/2,
+ handle_db_updater_info/2,
+
+ incref/1,
+ decref/1,
+ monitored_by/1,
+
+ last_activity/1,
+
+ get_compacted_seq/1,
+ get_del_doc_count/1,
+ get_disk_version/1,
+ get_doc_count/1,
+ get_epochs/1,
+ get_last_purged/1,
+ get_purge_seq/1,
+ get_revs_limit/1,
+ get_security/1,
+ get_size_info/1,
+ get_update_seq/1,
+ get_uuid/1,
+
+ set_revs_limit/2,
+ set_security/2,
+
+ open_docs/2,
+ open_local_docs/2,
+ read_doc_body/2,
+
+ serialize_doc/2,
+ write_doc_body/2,
+ write_doc_infos/4,
+
+ commit_data/1,
+
+ open_write_stream/2,
+ open_read_stream/2,
+ is_active_stream/2,
+
+ fold_docs/4,
+ fold_local_docs/4,
+ fold_changes/5,
+ count_changes_since/2,
+
+ start_compaction/4,
+ finish_compaction/4
+]).
+
+
+-export([
+ init_state/4
+]).
+
+
+-export([
+ id_tree_split/1,
+ id_tree_join/2,
+ id_tree_reduce/2,
+
+ seq_tree_split/1,
+ seq_tree_join/2,
+ seq_tree_reduce/2,
+
+ local_tree_split/1,
+ local_tree_join/2
+]).
+
+
+% Used by the compactor
+-export([
+ set_update_seq/2,
+ copy_security/2
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include("couch_bt_engine.hrl").
+
+
+exists(FilePath) ->
+ case filelib:is_file(FilePath) of
+ true ->
+ true;
+ false ->
+ filelib:is_file(FilePath ++ ".compact")
+ end.
+
+
+delete(RootDir, FilePath, Async) ->
+ %% Delete any leftover compaction files. If we don't do this a
+ %% subsequent request for this DB will try to open them to use
+ %% as a recovery.
+ delete_compaction_files(RootDir, FilePath, [{context, delete}]),
+
+ % Delete the actual database file
+ couch_file:delete(RootDir, FilePath, Async).
+
+
+delete_compaction_files(RootDir, FilePath, DelOpts) ->
+ lists:foreach(fun(Ext) ->
+ couch_file:delete(RootDir, FilePath ++ Ext, DelOpts)
+ end, [".compact", ".compact.data", ".compact.meta"]).
+
+
+init(FilePath, Options) ->
+ {ok, Fd} = open_db_file(FilePath, Options),
+ Header = case lists:member(create, Options) of
+ true ->
+ delete_compaction_files(FilePath),
+ Header0 = couch_bt_engine_header:new(),
+ ok = couch_file:write_header(Fd, Header0),
+ Header0;
+ false ->
+ case couch_file:read_header(Fd) of
+ {ok, Header0} ->
+ Header0;
+ no_valid_header ->
+ delete_compaction_files(FilePath),
+ Header0 = couch_bt_engine_header:new(),
+ ok = couch_file:write_header(Fd, Header0),
+ Header0
+ end
+ end,
+ {ok, init_state(FilePath, Fd, Header, Options)}.
+
+
+terminate(_Reason, St) ->
+ % If the reason we died is because our fd disappeared
+ % then we don't need to try closing it again.
+ Ref = St#st.fd_monitor,
+ if Ref == closed -> ok; true ->
+ ok = couch_file:close(St#st.fd),
+ receive
+ {'DOWN', Ref, _, _, _} ->
+ ok
+ after 500 ->
+ ok
+ end
+ end,
+ couch_util:shutdown_sync(St#st.fd),
+ ok.
+
+
+handle_db_updater_call(Msg, St) ->
+ {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
+
+
+handle_db_updater_info({'DOWN', Ref, _, _, _}, #st{fd_monitor=Ref} = St) ->
+ {stop, normal, St#st{fd=undefined, fd_monitor=closed}}.
+
+
+incref(St) ->
+ {ok, St#st{fd_monitor = erlang:monitor(process, St#st.fd)}}.
+
+
+decref(St) ->
+ true = erlang:demonitor(St#st.fd_monitor, [flush]),
+ ok.
+
+
+monitored_by(St) ->
+ case erlang:process_info(St#st.fd, monitored_by) of
+ {monitored_by, Pids} ->
+ Pids;
+ _ ->
+ []
+ end.
+
+
+last_activity(#st{fd = Fd}) ->
+ couch_file:last_read(Fd).
+
+
+get_compacted_seq(#st{header = Header}) ->
+ couch_bt_engine_header:get(Header, compacted_seq).
+
+
+get_del_doc_count(#st{} = St) ->
+ {ok, Reds} = couch_btree:full_reduce(St#st.id_tree),
+ element(2, Reds).
+
+
+get_disk_version(#st{header = Header}) ->
+ couch_bt_engine_header:get(Header, disk_version).
+
+
+get_doc_count(#st{} = St) ->
+ {ok, Reds} = couch_btree:full_reduce(St#st.id_tree),
+ element(1, Reds).
+
+
+get_epochs(#st{header = Header}) ->
+ couch_bt_engine_header:get(Header, epochs).
+
+
+get_last_purged(#st{header = Header} = St) ->
+ case couch_bt_engine_header:get(Header, purged_docs) of
+ nil ->
+ [];
+ Pointer ->
+ {ok, PurgeInfo} = couch_file:pread_term(St#st.fd, Pointer),
+ PurgeInfo
+ end.
+
+
+get_purge_seq(#st{header = Header}) ->
+ couch_bt_engine_header:get(Header, purge_seq).
+
+
+get_revs_limit(#st{header = Header}) ->
+ couch_bt_engine_header:get(Header, revs_limit).
+
+
+get_size_info(#st{} = St) ->
+ {ok, FileSize} = couch_file:bytes(St#st.fd),
+ {ok, DbReduction} = couch_btree:full_reduce(St#st.id_tree),
+ SizeInfo0 = element(3, DbReduction),
+ SizeInfo = case SizeInfo0 of
+ SI when is_record(SI, size_info) ->
+ SI;
+ {AS, ES} ->
+ #size_info{active=AS, external=ES};
+ AS ->
+ #size_info{active=AS}
+ end,
+ ActiveSize = active_size(St, SizeInfo),
+ ExternalSize = SizeInfo#size_info.external,
+ [
+ {active, ActiveSize},
+ {external, ExternalSize},
+ {file, FileSize}
+ ].
+
+
+get_security(#st{header = Header} = St) ->
+ case couch_bt_engine_header:get(Header, security_ptr) of
+ undefined ->
+ [];
+ Pointer ->
+ {ok, SecProps} = couch_file:pread_term(St#st.fd, Pointer),
+ SecProps
+ end.
+
+
+get_update_seq(#st{header = Header}) ->
+ couch_bt_engine_header:get(Header, update_seq).
+
+
+get_uuid(#st{header = Header}) ->
+ couch_bt_engine_header:get(Header, uuid).
+
+
+set_revs_limit(#st{header = Header} = St, RevsLimit) ->
+ NewSt = St#st{
+ header = couch_bt_engine_header:set(Header, [
+ {revs_limit, RevsLimit}
+ ]),
+ needs_commit = true
+ },
+ {ok, increment_update_seq(NewSt)}.
+
+
+set_security(#st{header = Header} = St, NewSecurity) ->
+ Options = [{compression, St#st.compression}],
+ {ok, Ptr, _} = couch_file:append_term(St#st.fd, NewSecurity, Options),
+ NewSt = St#st{
+ header = couch_bt_engine_header:set(Header, [
+ {security_ptr, Ptr}
+ ]),
+ needs_commit = true
+ },
+ {ok, increment_update_seq(NewSt)}.
+
+
+open_docs(#st{} = St, DocIds) ->
+ Results = couch_btree:lookup(St#st.id_tree, DocIds),
+ lists:map(fun
+ ({ok, FDI}) -> FDI;
+ (not_found) -> not_found
+ end, Results).
+
+
+open_local_docs(#st{} = St, DocIds) ->
+ Results = couch_btree:lookup(St#st.local_tree, DocIds),
+ lists:map(fun
+ ({ok, Doc}) -> Doc;
+ (not_found) -> not_found
+ end, Results).
+
+
+read_doc_body(#st{} = St, #doc{} = Doc) ->
+ {ok, {Body, Atts}} = couch_file:pread_term(St#st.fd, Doc#doc.body),
+ Doc#doc{
+ body = Body,
+ atts = Atts
+ }.
+
+
+serialize_doc(#st{} = St, #doc{} = Doc) ->
+ Compress = fun(Term) ->
+ case couch_compress:is_compressed(Term, St#st.compression) of
+ true -> Term;
+ false -> couch_compress:compress(Term, St#st.compression)
+ end
+ end,
+ Body = Compress(Doc#doc.body),
+ Atts = Compress(Doc#doc.atts),
+ SummaryBin = ?term_to_bin({Body, Atts}),
+ Md5 = crypto:hash(md5, SummaryBin),
+ Data = couch_file:assemble_file_chunk(SummaryBin, Md5),
+ % TODO: This is a terrible hack to get around the issues
+ % in COUCHDB-3255. We'll need to come back and figure
+ % out a better approach to handling the case when we
+ % need to generate a new revision id after the doc
+ % has been serialized.
+ Doc#doc{
+ body = Data,
+ meta = [{comp_body, Body} | Doc#doc.meta]
+ }.
+
+
+write_doc_body(St, #doc{} = Doc) ->
+ #st{
+ fd = Fd
+ } = St,
+ {ok, Ptr, Written} = couch_file:append_raw_chunk(Fd, Doc#doc.body),
+ {ok, Doc#doc{body = Ptr}, Written}.
+
+
+write_doc_infos(#st{} = St, Pairs, LocalDocs, PurgedIdRevs) ->
+ #st{
+ id_tree = IdTree,
+ seq_tree = SeqTree,
+ local_tree = LocalTree
+ } = St,
+ FinalAcc = lists:foldl(fun({OldFDI, NewFDI}, Acc) ->
+ {AddAcc, RemIdsAcc, RemSeqsAcc} = Acc,
+ case {OldFDI, NewFDI} of
+ {not_found, #full_doc_info{}} ->
+ {[NewFDI | AddAcc], RemIdsAcc, RemSeqsAcc};
+ {#full_doc_info{id = Id}, #full_doc_info{id = Id}} ->
+ NewAddAcc = [NewFDI | AddAcc],
+ NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
+ {NewAddAcc, RemIdsAcc, NewRemSeqsAcc};
+ {#full_doc_info{id = Id}, not_found} ->
+ NewRemIdsAcc = [Id | RemIdsAcc],
+ NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
+ {AddAcc, NewRemIdsAcc, NewRemSeqsAcc}
+ end
+ end, {[], [], []}, Pairs),
+
+ {Add, RemIds, RemSeqs} = FinalAcc,
+ {ok, IdTree2} = couch_btree:add_remove(IdTree, Add, RemIds),
+ {ok, SeqTree2} = couch_btree:add_remove(SeqTree, Add, RemSeqs),
+
+ {AddLDocs, RemLDocIds} = lists:foldl(fun(Doc, {AddAcc, RemAcc}) ->
+ case Doc#doc.deleted of
+ true ->
+ {AddAcc, [Doc#doc.id | RemAcc]};
+ false ->
+ {[Doc | AddAcc], RemAcc}
+ end
+ end, {[], []}, LocalDocs),
+ {ok, LocalTree2} = couch_btree:add_remove(LocalTree, AddLDocs, RemLDocIds),
+
+ NewUpdateSeq = lists:foldl(fun(#full_doc_info{update_seq=Seq}, Acc) ->
+ erlang:max(Seq, Acc)
+ end, get_update_seq(St), Add),
+
+ NewHeader = case PurgedIdRevs of
+ [] ->
+ couch_bt_engine_header:set(St#st.header, [
+ {update_seq, NewUpdateSeq}
+ ]);
+ _ ->
+ {ok, Ptr, _} = couch_file:append_term(St#st.fd, PurgedIdRevs),
+ OldPurgeSeq = couch_bt_engine_header:get(St#st.header, purge_seq),
+ % We bump NewUpdateSeq because we have to ensure that
+ % indexers see that they need to process the new purge
+ % information.
+ couch_bt_engine_header:set(St#st.header, [
+ {update_seq, NewUpdateSeq + 1},
+ {purge_seq, OldPurgeSeq + 1},
+ {purged_docs, Ptr}
+ ])
+ end,
+
+ {ok, St#st{
+ header = NewHeader,
+ id_tree = IdTree2,
+ seq_tree = SeqTree2,
+ local_tree = LocalTree2,
+ needs_commit = true
+ }}.
+
+
+commit_data(St) ->
+ #st{
+ fd = Fd,
+ fsync_options = FsyncOptions,
+ header = OldHeader,
+ needs_commit = NeedsCommit
+ } = St,
+
+ NewHeader = update_header(St, OldHeader),
+
+ case NewHeader /= OldHeader orelse NeedsCommit of
+ true ->
+ Before = lists:member(before_header, FsyncOptions),
+ After = lists:member(after_header, FsyncOptions),
+
+ if Before -> couch_file:sync(Fd); true -> ok end,
+ ok = couch_file:write_header(Fd, NewHeader),
+ if After -> couch_file:sync(Fd); true -> ok end,
+
+ {ok, St#st{
+ header = NewHeader,
+ needs_commit = false
+ }};
+ false ->
+ {ok, St}
+ end.
+
+
+open_write_stream(#st{} = St, Options) ->
+ couch_stream:open({couch_bt_engine_stream, {St#st.fd, []}}, Options).
+
+
+open_read_stream(#st{} = St, StreamSt) ->
+ {ok, {couch_bt_engine_stream, {St#st.fd, StreamSt}}}.
+
+
+is_active_stream(#st{} = St, {couch_bt_engine_stream, {Fd, _}}) ->
+ St#st.fd == Fd;
+is_active_stream(_, _) ->
+ false.
+
+
+fold_docs(St, UserFun, UserAcc, Options) ->
+ fold_docs_int(St, St#st.id_tree, UserFun, UserAcc, Options).
+
+
+fold_local_docs(St, UserFun, UserAcc, Options) ->
+ case fold_docs_int(St, St#st.local_tree, UserFun, UserAcc, Options) of
+ {ok, _Reds, FinalAcc} -> {ok, null, FinalAcc};
+ {ok, FinalAcc} -> {ok, FinalAcc}
+ end.
+
+
+fold_changes(St, SinceSeq, UserFun, UserAcc, Options) ->
+ Fun = fun drop_reductions/4,
+ InAcc = {UserFun, UserAcc},
+ Opts = [{start_key, SinceSeq + 1}] ++ Options,
+ {ok, _, OutAcc} = couch_btree:fold(St#st.seq_tree, Fun, InAcc, Opts),
+ {_, FinalUserAcc} = OutAcc,
+ {ok, FinalUserAcc}.
+
+
+count_changes_since(St, SinceSeq) ->
+ BTree = St#st.seq_tree,
+ FoldFun = fun(_SeqStart, PartialReds, 0) ->
+ {ok, couch_btree:final_reduce(BTree, PartialReds)}
+ end,
+ Opts = [{start_key, SinceSeq + 1}],
+ {ok, Changes} = couch_btree:fold_reduce(BTree, FoldFun, 0, Opts),
+ Changes.
+
+
+start_compaction(St, DbName, Options, Parent) ->
+ Args = [St, DbName, Options, Parent],
+ Pid = spawn_link(couch_bt_engine_compactor, start, Args),
+ {ok, St, Pid}.
+
+
+finish_compaction(OldState, DbName, Options, CompactFilePath) ->
+ {ok, NewState1} = ?MODULE:init(CompactFilePath, Options),
+ OldSeq = get_update_seq(OldState),
+ NewSeq = get_update_seq(NewState1),
+ case OldSeq == NewSeq of
+ true ->
+ finish_compaction_int(OldState, NewState1);
+ false ->
+ couch_log:info("Compaction file still behind main file "
+ "(update seq=~p. compact update seq=~p). Retrying.",
+ [OldSeq, NewSeq]),
+ ok = decref(NewState1),
+ start_compaction(OldState, DbName, Options, self())
+ end.
+
+
+id_tree_split(#full_doc_info{}=Info) ->
+ #full_doc_info{
+ id = Id,
+ update_seq = Seq,
+ deleted = Deleted,
+ sizes = SizeInfo,
+ rev_tree = Tree
+ } = Info,
+ {Id, {Seq, ?b2i(Deleted), split_sizes(SizeInfo), disk_tree(Tree)}}.
+
+
+id_tree_join(Id, {HighSeq, Deleted, DiskTree}) ->
+ % Handle old formats before data_size was added
+ id_tree_join(Id, {HighSeq, Deleted, #size_info{}, DiskTree});
+
+id_tree_join(Id, {HighSeq, Deleted, Sizes, DiskTree}) ->
+ #full_doc_info{
+ id = Id,
+ update_seq = HighSeq,
+ deleted = ?i2b(Deleted),
+ sizes = couch_db_updater:upgrade_sizes(Sizes),
+ rev_tree = rev_tree(DiskTree)
+ }.
+
+
+id_tree_reduce(reduce, FullDocInfos) ->
+ lists:foldl(fun(Info, {NotDeleted, Deleted, Sizes}) ->
+ Sizes2 = reduce_sizes(Sizes, Info#full_doc_info.sizes),
+ case Info#full_doc_info.deleted of
+ true ->
+ {NotDeleted, Deleted + 1, Sizes2};
+ false ->
+ {NotDeleted + 1, Deleted, Sizes2}
+ end
+ end, {0, 0, #size_info{}}, FullDocInfos);
+id_tree_reduce(rereduce, Reds) ->
+ lists:foldl(fun
+ ({NotDeleted, Deleted}, {AccNotDeleted, AccDeleted, _AccSizes}) ->
+ % pre 1.2 format, will be upgraded on compaction
+ {AccNotDeleted + NotDeleted, AccDeleted + Deleted, nil};
+ ({NotDeleted, Deleted, Sizes}, {AccNotDeleted, AccDeleted, AccSizes}) ->
+ AccSizes2 = reduce_sizes(AccSizes, Sizes),
+ {AccNotDeleted + NotDeleted, AccDeleted + Deleted, AccSizes2}
+ end, {0, 0, #size_info{}}, Reds).
+
+
+seq_tree_split(#full_doc_info{}=Info) ->
+ #full_doc_info{
+ id = Id,
+ update_seq = Seq,
+ deleted = Del,
+ sizes = SizeInfo,
+ rev_tree = Tree
+ } = Info,
+ {Seq, {Id, ?b2i(Del), split_sizes(SizeInfo), disk_tree(Tree)}}.
+
+
+seq_tree_join(Seq, {Id, Del, DiskTree}) when is_integer(Del) ->
+ seq_tree_join(Seq, {Id, Del, {0, 0}, DiskTree});
+
+seq_tree_join(Seq, {Id, Del, Sizes, DiskTree}) when is_integer(Del) ->
+ #full_doc_info{
+ id = Id,
+ update_seq = Seq,
+ deleted = ?i2b(Del),
+ sizes = join_sizes(Sizes),
+ rev_tree = rev_tree(DiskTree)
+ };
+
+seq_tree_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) ->
+ % Older versions stored #doc_info records in the seq_tree.
+ % Compact to upgrade.
+ Revs = lists:map(fun({Rev, Seq, Bp}) ->
+ #rev_info{rev = Rev, seq = Seq, deleted = false, body_sp = Bp}
+ end, RevInfos),
+ DeletedRevs = lists:map(fun({Rev, Seq, Bp}) ->
+ #rev_info{rev = Rev, seq = Seq, deleted = true, body_sp = Bp}
+ end, DeletedRevInfos),
+ #doc_info{
+ id = Id,
+ high_seq = KeySeq,
+ revs = Revs ++ DeletedRevs
+ }.
+
+
+seq_tree_reduce(reduce, DocInfos) ->
+ % count the number of documents
+ length(DocInfos);
+seq_tree_reduce(rereduce, Reds) ->
+ lists:sum(Reds).
+
+
+local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_binary(Rev) ->
+ #doc{
+ id = Id,
+ body = BodyData
+ } = Doc,
+ {Id, {binary_to_integer(Rev), BodyData}};
+
+local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_integer(Rev) ->
+ #doc{
+ id = Id,
+ body = BodyData
+ } = Doc,
+ {Id, {Rev, BodyData}}.
+
+
+local_tree_join(Id, {Rev, BodyData}) when is_integer(Rev) ->
+ #doc{
+ id = Id,
+ revs = {0, [integer_to_binary(Rev)]},
+ body = BodyData
+ }.
+
+
+set_update_seq(#st{header = Header} = St, UpdateSeq) ->
+ {ok, St#st{
+ header = couch_bt_engine_header:set(Header, [
+ {update_seq, UpdateSeq}
+ ]),
+ needs_commit = true
+ }}.
+
+
+copy_security(#st{header = Header} = St, SecProps) ->
+ Options = [{compression, St#st.compression}],
+ {ok, Ptr, _} = couch_file:append_term(St#st.fd, SecProps, Options),
+ {ok, St#st{
+ header = couch_bt_engine_header:set(Header, [
+ {security_ptr, Ptr}
+ ]),
+ needs_commit = true
+ }}.
+
+
+open_db_file(FilePath, Options) ->
+ case couch_file:open(FilePath, Options) of
+ {ok, Fd} ->
+ {ok, Fd};
+ {error, enoent} ->
+ % Couldn't find file. is there a compact version? This ca
+ % happen (rarely) if we crashed during the file switch.
+ case couch_file:open(FilePath ++ ".compact", [nologifmissing]) of
+ {ok, Fd} ->
+ Fmt = "Recovering from compaction file: ~s~s",
+ couch_log:info(Fmt, [FilePath, ".compact"]),
+ ok = file:rename(FilePath ++ ".compact", FilePath),
+ ok = couch_file:sync(Fd),
+ {ok, Fd};
+ {error, enoent} ->
+ throw({not_found, no_db_file})
+ end;
+ Error ->
+ throw(Error)
+ end.
+
+
+init_state(FilePath, Fd, Header0, Options) ->
+ DefaultFSync = "[before_header, after_header, on_file_open]",
+ FsyncStr = config:get("couchdb", "fsync_options", DefaultFSync),
+ {ok, FsyncOptions} = couch_util:parse_term(FsyncStr),
+
+ case lists:member(on_file_open, FsyncOptions) of
+ true -> ok = couch_file:sync(Fd);
+ _ -> ok
+ end,
+
+ Compression = couch_compress:get_compression_method(),
+
+ Header1 = couch_bt_engine_header:upgrade(Header0),
+ Header = set_default_security_object(Fd, Header1, Compression, Options),
+
+ IdTreeState = couch_bt_engine_header:id_tree_state(Header),
+ {ok, IdTree} = couch_btree:open(IdTreeState, Fd, [
+ {split, fun ?MODULE:id_tree_split/1},
+ {join, fun ?MODULE:id_tree_join/2},
+ {reduce, fun ?MODULE:id_tree_reduce/2},
+ {compression, Compression}
+ ]),
+
+ SeqTreeState = couch_bt_engine_header:seq_tree_state(Header),
+ {ok, SeqTree} = couch_btree:open(SeqTreeState, Fd, [
+ {split, fun ?MODULE:seq_tree_split/1},
+ {join, fun ?MODULE:seq_tree_join/2},
+ {reduce, fun ?MODULE:seq_tree_reduce/2},
+ {compression, Compression}
+ ]),
+
+ LocalTreeState = couch_bt_engine_header:local_tree_state(Header),
+ {ok, LocalTree} = couch_btree:open(LocalTreeState, Fd, [
+ {split, fun ?MODULE:local_tree_split/1},
+ {join, fun ?MODULE:local_tree_join/2},
+ {compression, Compression}
+ ]),
+
+ ok = couch_file:set_db_pid(Fd, self()),
+
+ St = #st{
+ filepath = FilePath,
+ fd = Fd,
+ fd_monitor = erlang:monitor(process, Fd),
+ fsync_options = FsyncOptions,
+ header = Header,
+ needs_commit = false,
+ id_tree = IdTree,
+ seq_tree = SeqTree,
+ local_tree = LocalTree,
+ compression = Compression
+ },
+
+ % If this is a new database we've just created a
+ % new UUID and default security object which need
+ % to be written to disk.
+ case Header /= Header0 of
+ true ->
+ {ok, NewSt} = commit_data(St),
+ NewSt;
+ false ->
+ St
+ end.
+
+
+update_header(St, Header) ->
+ couch_bt_engine_header:set(Header, [
+ {seq_tree_state, couch_btree:get_state(St#st.seq_tree)},
+ {id_tree_state, couch_btree:get_state(St#st.id_tree)},
+ {local_tree_state, couch_btree:get_state(St#st.local_tree)}
+ ]).
+
+
+increment_update_seq(#st{header = Header} = St) ->
+ UpdateSeq = couch_bt_engine_header:get(Header, update_seq),
+ St#st{
+ header = couch_bt_engine_header:set(Header, [
+ {update_seq, UpdateSeq + 1}
+ ])
+ }.
+
+
+set_default_security_object(Fd, Header, Compression, Options) ->
+ case couch_bt_engine_header:get(Header, security_ptr) of
+ Pointer when is_integer(Pointer) ->
+ Header;
+ _ ->
+ Default = couch_util:get_value(default_security_object, Options),
+ AppendOpts = [{compression, Compression}],
+ {ok, Ptr, _} = couch_file:append_term(Fd, Default, AppendOpts),
+ couch_bt_engine_header:set(Header, security_ptr, Ptr)
+ end.
+
+
+delete_compaction_files(FilePath) ->
+ RootDir = config:get("couchdb", "database_dir", "."),
+ DelOpts = [{context, delete}],
+ delete_compaction_files(RootDir, FilePath, DelOpts).
+
+
+rev_tree(DiskTree) ->
+ couch_key_tree:map(fun
+ (_RevId, {Del, Ptr, Seq}) ->
+ #leaf{
+ deleted = ?i2b(Del),
+ ptr = Ptr,
+ seq = Seq
+ };
+ (_RevId, {Del, Ptr, Seq, Size}) ->
+ #leaf{
+ deleted = ?i2b(Del),
+ ptr = Ptr,
+ seq = Seq,
+ sizes = couch_db_updater:upgrade_sizes(Size)
+ };
+ (_RevId, {Del, Ptr, Seq, Sizes, Atts}) ->
+ #leaf{
+ deleted = ?i2b(Del),
+ ptr = Ptr,
+ seq = Seq,
+ sizes = couch_db_updater:upgrade_sizes(Sizes),
+ atts = Atts
+ };
+ (_RevId, ?REV_MISSING) ->
+ ?REV_MISSING
+ end, DiskTree).
+
+
+disk_tree(RevTree) ->
+ couch_key_tree:map(fun
+ (_RevId, ?REV_MISSING) ->
+ ?REV_MISSING;
+ (_RevId, #leaf{} = Leaf) ->
+ #leaf{
+ deleted = Del,
+ ptr = Ptr,
+ seq = Seq,
+ sizes = Sizes,
+ atts = Atts
+ } = Leaf,
+ {?b2i(Del), Ptr, Seq, split_sizes(Sizes), Atts}
+ end, RevTree).
+
+
+split_sizes(#size_info{}=SI) ->
+ {SI#size_info.active, SI#size_info.external}.
+
+
+join_sizes({Active, External}) when is_integer(Active), is_integer(External) ->
+ #size_info{active=Active, external=External}.
+
+
+reduce_sizes(nil, _) ->
+ nil;
+reduce_sizes(_, nil) ->
+ nil;
+reduce_sizes(#size_info{}=S1, #size_info{}=S2) ->
+ #size_info{
+ active = S1#size_info.active + S2#size_info.active,
+ external = S1#size_info.external + S2#size_info.external
+ };
+reduce_sizes(S1, S2) ->
+ US1 = couch_db_updater:upgrade_sizes(S1),
+ US2 = couch_db_updater:upgrade_sizes(S2),
+ reduce_sizes(US1, US2).
+
+
+active_size(#st{} = St, #size_info{} = SI) ->
+ Trees = [
+ St#st.id_tree,
+ St#st.seq_tree,
+ St#st.local_tree
+ ],
+ lists:foldl(fun(T, Acc) ->
+ case couch_btree:size(T) of
+ _ when Acc == null ->
+ null;
+ nil ->
+ null;
+ Size ->
+ Acc + Size
+ end
+ end, SI#size_info.active, Trees).
+
+
+fold_docs_int(St, Tree, UserFun, UserAcc, Options) ->
+ Fun = case lists:member(include_deleted, Options) of
+ true -> fun include_deleted/4;
+ false -> fun skip_deleted/4
+ end,
+ RedFun = case lists:member(include_reductions, Options) of
+ true -> fun include_reductions/4;
+ false -> fun drop_reductions/4
+ end,
+ InAcc = {RedFun, {UserFun, UserAcc}},
+ {ok, Reds, OutAcc} = couch_btree:fold(Tree, Fun, InAcc, Options),
+ {_, {_, FinalUserAcc}} = OutAcc,
+ case lists:member(include_reductions, Options) of
+ true when Tree == St#st.id_tree ->
+ {ok, fold_docs_reduce_to_count(Reds), FinalUserAcc};
+ true when Tree == St#st.local_tree ->
+ {ok, 0, FinalUserAcc};
+ false ->
+ {ok, FinalUserAcc}
+ end.
+
+
+include_deleted(Case, Entry, Reds, {UserFun, UserAcc}) ->
+ {Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc),
+ {Go, {UserFun, NewUserAcc}}.
+
+
+% First element of the reductions is the total
+% number of undeleted documents.
+skip_deleted(traverse, _Entry, {0, _, _} = _Reds, Acc) ->
+ {skip, Acc};
+skip_deleted(visit, #full_doc_info{deleted = true}, _, Acc) ->
+ {ok, Acc};
+skip_deleted(Case, Entry, Reds, {UserFun, UserAcc}) ->
+ {Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc),
+ {Go, {UserFun, NewUserAcc}}.
+
+
+include_reductions(visit, FDI, Reds, {UserFun, UserAcc}) ->
+ {Go, NewUserAcc} = UserFun(FDI, Reds, UserAcc),
+ {Go, {UserFun, NewUserAcc}};
+include_reductions(_, _, _, Acc) ->
+ {ok, Acc}.
+
+
+drop_reductions(visit, FDI, _Reds, {UserFun, UserAcc}) ->
+ {Go, NewUserAcc} = UserFun(FDI, UserAcc),
+ {Go, {UserFun, NewUserAcc}};
+drop_reductions(_, _, _, Acc) ->
+ {ok, Acc}.
+
+
+fold_docs_reduce_to_count(Reds) ->
+ RedFun = fun id_tree_reduce/2,
+ FinalRed = couch_btree:final_reduce(RedFun, Reds),
+ element(1, FinalRed).
+
+
+finish_compaction_int(#st{} = OldSt, #st{} = NewSt1) ->
+ #st{
+ filepath = FilePath,
+ local_tree = OldLocal
+ } = OldSt,
+ #st{
+ filepath = CompactDataPath,
+ header = Header,
+ local_tree = NewLocal1
+ } = NewSt1,
+
+ % suck up all the local docs into memory and write them to the new db
+ LoadFun = fun(Value, _Offset, Acc) ->
+ {ok, [Value | Acc]}
+ end,
+ {ok, _, LocalDocs} = couch_btree:foldl(OldLocal, LoadFun, []),
+ {ok, NewLocal2} = couch_btree:add(NewLocal1, LocalDocs),
+
+ {ok, NewSt2} = commit_data(NewSt1#st{
+ header = couch_bt_engine_header:set(Header, [
+ {compacted_seq, get_update_seq(OldSt)},
+ {revs_limit, get_revs_limit(OldSt)}
+ ]),
+ local_tree = NewLocal2
+ }),
+
+ % Rename our *.compact.data file to *.compact so that if we
+ % die between deleting the old file and renaming *.compact
+ % we can recover correctly.
+ ok = file:rename(CompactDataPath, FilePath ++ ".compact"),
+
+ % Remove the uncompacted database file
+ RootDir = config:get("couchdb", "database_dir", "."),
+ couch_file:delete(RootDir, FilePath),
+
+ % Move our compacted file into its final location
+ ok = file:rename(FilePath ++ ".compact", FilePath),
+
+ % Delete the old meta compaction file after promoting
+ % the compaction file.
+ couch_file:delete(RootDir, FilePath ++ ".compact.meta"),
+
+ % We're finished with our old state
+ decref(OldSt),
+
+ % And return our finished new state
+ {ok, NewSt2#st{
+ filepath = FilePath
+ }, undefined}.
diff --git a/src/couch/src/couch_bt_engine.hrl b/src/couch/src/couch_bt_engine.hrl
new file mode 100644
index 000000000..7f52d8fdd
--- /dev/null
+++ b/src/couch/src/couch_bt_engine.hrl
@@ -0,0 +1,24 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-record(st, {
+ filepath,
+ fd,
+ fd_monitor,
+ fsync_options,
+ header,
+ needs_commit,
+ id_tree,
+ seq_tree,
+ local_tree,
+ compression
+}).
diff --git a/src/couch/src/couch_bt_engine_compactor.erl b/src/couch/src/couch_bt_engine_compactor.erl
new file mode 100644
index 000000000..2f7a3513a
--- /dev/null
+++ b/src/couch/src/couch_bt_engine_compactor.erl
@@ -0,0 +1,496 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_bt_engine_compactor).
+
+
+-export([
+ start/4
+]).
+
+
+-include_lib("couch/include/couch_db.hrl").
+-include("couch_bt_engine.hrl").
+
+
+-record(comp_header, {
+ db_header,
+ meta_state
+}).
+
+-record(merge_st, {
+ id_tree,
+ seq_tree,
+ curr,
+ rem_seqs,
+ infos
+}).
+
+
+start(#st{} = St, DbName, Options, Parent) ->
+ erlang:put(io_priority, {db_compact, DbName}),
+ #st{
+ filepath = FilePath,
+ header = Header
+ } = St,
+ couch_log:debug("Compaction process spawned for db \"~s\"", [DbName]),
+
+ {ok, NewSt, DName, DFd, MFd, Retry} =
+ open_compaction_files(Header, FilePath, Options),
+ erlang:monitor(process, MFd),
+
+ % This is a bit worrisome. init_db/4 will monitor the data fd
+ % but it doesn't know about the meta fd. For now I'll maintain
+ % that the data fd is the old normal fd and meta fd is special
+ % and hope everything works out for the best.
+ unlink(DFd),
+
+ NewSt1 = copy_purge_info(St, NewSt),
+ NewSt2 = copy_compact(DbName, St, NewSt1, Retry),
+ NewSt3 = sort_meta_data(NewSt2),
+ NewSt4 = commit_compaction_data(NewSt3),
+ NewSt5 = copy_meta_data(NewSt4),
+ {ok, NewSt6} = couch_bt_engine:commit_data(NewSt5),
+ ok = couch_bt_engine:decref(NewSt6),
+ ok = couch_file:close(MFd),
+
+ % Done
+ gen_server:cast(Parent, {compact_done, couch_bt_engine, DName}).
+
+
+open_compaction_files(SrcHdr, DbFilePath, Options) ->
+ DataFile = DbFilePath ++ ".compact.data",
+ MetaFile = DbFilePath ++ ".compact.meta",
+ {ok, DataFd, DataHdr} = open_compaction_file(DataFile),
+ {ok, MetaFd, MetaHdr} = open_compaction_file(MetaFile),
+ DataHdrIsDbHdr = couch_bt_engine_header:is_header(DataHdr),
+ case {DataHdr, MetaHdr} of
+ {#comp_header{}=A, #comp_header{}=A} ->
+ DbHeader = A#comp_header.db_header,
+ St0 = couch_bt_engine:init_state(
+ DataFile, DataFd, DbHeader, Options),
+ St1 = bind_emsort(St0, MetaFd, A#comp_header.meta_state),
+ {ok, St1, DataFile, DataFd, MetaFd, St0#st.id_tree};
+ _ when DataHdrIsDbHdr ->
+ Header = couch_bt_engine_header:from(SrcHdr),
+ ok = reset_compaction_file(MetaFd, Header),
+ St0 = couch_bt_engine:init_state(
+ DataFile, DataFd, DataHdr, Options),
+ St1 = bind_emsort(St0, MetaFd, nil),
+ {ok, St1, DataFile, DataFd, MetaFd, St0#st.id_tree};
+ _ ->
+ Header = couch_bt_engine_header:from(SrcHdr),
+ ok = reset_compaction_file(DataFd, Header),
+ ok = reset_compaction_file(MetaFd, Header),
+ St0 = couch_bt_engine:init_state(DataFile, DataFd, Header, Options),
+ St1 = bind_emsort(St0, MetaFd, nil),
+ {ok, St1, DataFile, DataFd, MetaFd, nil}
+ end.
+
+
+copy_purge_info(OldSt, NewSt) ->
+ OldHdr = OldSt#st.header,
+ NewHdr = NewSt#st.header,
+ OldPurgeSeq = couch_bt_engine_header:purge_seq(OldHdr),
+ case OldPurgeSeq > 0 of
+ true ->
+ Purged = couch_bt_engine:get_last_purged(OldSt),
+ Opts = [{compression, NewSt#st.compression}],
+ {ok, Ptr, _} = couch_file:append_term(NewSt#st.fd, Purged, Opts),
+ NewNewHdr = couch_bt_engine_header:set(NewHdr, [
+ {purge_seq, OldPurgeSeq},
+ {purged_docs, Ptr}
+ ]),
+ NewSt#st{header = NewNewHdr};
+ false ->
+ NewSt
+ end.
+
+
+copy_compact(DbName, St, NewSt0, Retry) ->
+ Compression = couch_compress:get_compression_method(),
+ NewSt = NewSt0#st{compression = Compression},
+ NewUpdateSeq = couch_bt_engine:get_update_seq(NewSt0),
+ TotalChanges = couch_bt_engine:count_changes_since(St, NewUpdateSeq),
+ BufferSize = list_to_integer(
+ config:get("database_compaction", "doc_buffer_size", "524288")),
+ CheckpointAfter = couch_util:to_integer(
+ config:get("database_compaction", "checkpoint_after",
+ BufferSize * 10)),
+
+ EnumBySeqFun =
+ fun(DocInfo, _Offset,
+ {AccNewSt, AccUncopied, AccUncopiedSize, AccCopiedSize}) ->
+
+ Seq = case DocInfo of
+ #full_doc_info{} -> DocInfo#full_doc_info.update_seq;
+ #doc_info{} -> DocInfo#doc_info.high_seq
+ end,
+
+ AccUncopiedSize2 = AccUncopiedSize + ?term_size(DocInfo),
+ if AccUncopiedSize2 >= BufferSize ->
+ NewSt2 = copy_docs(
+ St, AccNewSt, lists:reverse([DocInfo | AccUncopied]), Retry),
+ AccCopiedSize2 = AccCopiedSize + AccUncopiedSize2,
+ if AccCopiedSize2 >= CheckpointAfter ->
+ {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
+ CommNewSt3 = commit_compaction_data(NewSt3),
+ {ok, {CommNewSt3, [], 0, 0}};
+ true ->
+ {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
+ {ok, {NewSt3, [], 0, AccCopiedSize2}}
+ end;
+ true ->
+ {ok, {AccNewSt, [DocInfo | AccUncopied], AccUncopiedSize2,
+ AccCopiedSize}}
+ end
+ end,
+
+ TaskProps0 = [
+ {type, database_compaction},
+ {database, DbName},
+ {progress, 0},
+ {changes_done, 0},
+ {total_changes, TotalChanges}
+ ],
+ case (Retry =/= nil) and couch_task_status:is_task_added() of
+ true ->
+ couch_task_status:update([
+ {retry, true},
+ {progress, 0},
+ {changes_done, 0},
+ {total_changes, TotalChanges}
+ ]);
+ false ->
+ couch_task_status:add_task(TaskProps0),
+ couch_task_status:set_update_frequency(500)
+ end,
+
+ {ok, _, {NewSt2, Uncopied, _, _}} =
+ couch_btree:foldl(St#st.seq_tree, EnumBySeqFun,
+ {NewSt, [], 0, 0},
+ [{start_key, NewUpdateSeq + 1}]),
+
+ NewSt3 = copy_docs(St, NewSt2, lists:reverse(Uncopied), Retry),
+
+ % Copy the security information over
+ SecProps = couch_bt_engine:get_security(St),
+ {ok, NewSt4} = couch_bt_engine:copy_security(NewSt3, SecProps),
+
+ FinalUpdateSeq = couch_bt_engine:get_update_seq(St),
+ {ok, NewSt5} = couch_bt_engine:set_update_seq(NewSt4, FinalUpdateSeq),
+ commit_compaction_data(NewSt5).
+
+
+copy_docs(St, #st{} = NewSt, MixedInfos, Retry) ->
+ DocInfoIds = [Id || #doc_info{id=Id} <- MixedInfos],
+ LookupResults = couch_btree:lookup(St#st.id_tree, DocInfoIds),
+ % COUCHDB-968, make sure we prune duplicates during compaction
+ NewInfos0 = lists:usort(fun(#full_doc_info{id=A}, #full_doc_info{id=B}) ->
+ A =< B
+ end, merge_lookups(MixedInfos, LookupResults)),
+
+ NewInfos1 = lists:map(fun(Info) ->
+ {NewRevTree, FinalAcc} = couch_key_tree:mapfold(fun
+ ({RevPos, RevId}, #leaf{ptr=Sp}=Leaf, leaf, SizesAcc) ->
+ {Body, AttInfos} = copy_doc_attachments(St, Sp, NewSt),
+ % In the future, we should figure out how to do this for
+ % upgrade purposes.
+ ExternalSize = case is_binary(Body) of
+ true ->
+ couch_compress:uncompressed_size(Body);
+ false ->
+ ?term_size(Body)
+ end,
+ Doc0 = #doc{
+ id = Info#full_doc_info.id,
+ revs = {RevPos, [RevId]},
+ deleted = Leaf#leaf.deleted,
+ body = Body,
+ atts = AttInfos
+ },
+ Doc1 = couch_bt_engine:serialize_doc(NewSt, Doc0),
+ {ok, Doc2, ActiveSize} =
+ couch_bt_engine:write_doc_body(NewSt, Doc1),
+ AttSizes = [{element(3,A), element(4,A)} || A <- AttInfos],
+ NewLeaf = Leaf#leaf{
+ ptr = Doc2#doc.body,
+ sizes = #size_info{
+ active = ActiveSize,
+ external = ExternalSize
+ },
+ atts = AttSizes
+ },
+ {NewLeaf, couch_db_updater:add_sizes(leaf, NewLeaf, SizesAcc)};
+ (_Rev, _Leaf, branch, SizesAcc) ->
+ {?REV_MISSING, SizesAcc}
+ end, {0, 0, []}, Info#full_doc_info.rev_tree),
+ {FinalAS, FinalES, FinalAtts} = FinalAcc,
+ TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
+ NewActiveSize = FinalAS + TotalAttSize,
+ NewExternalSize = FinalES + TotalAttSize,
+ Info#full_doc_info{
+ rev_tree = NewRevTree,
+ sizes = #size_info{
+ active = NewActiveSize,
+ external = NewExternalSize
+ }
+ }
+ end, NewInfos0),
+
+ Limit = couch_bt_engine:get_revs_limit(St),
+ NewInfos = lists:map(fun(FDI) ->
+ FDI#full_doc_info{
+ rev_tree = couch_key_tree:stem(FDI#full_doc_info.rev_tree, Limit)
+ }
+ end, NewInfos1),
+
+ RemoveSeqs =
+ case Retry of
+ nil ->
+ [];
+ OldDocIdTree ->
+ % Compaction is being rerun to catch up to writes during the
+ % first pass. This means we may have docs that already exist
+ % in the seq_tree in the .data file. Here we lookup any old
+ % update_seqs so that they can be removed.
+ Ids = [Id || #full_doc_info{id=Id} <- NewInfos],
+ Existing = couch_btree:lookup(OldDocIdTree, Ids),
+ [Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing]
+ end,
+
+ {ok, SeqTree} = couch_btree:add_remove(
+ NewSt#st.seq_tree, NewInfos, RemoveSeqs),
+
+ FDIKVs = lists:map(fun(#full_doc_info{id=Id, update_seq=Seq}=FDI) ->
+ {{Id, Seq}, FDI}
+ end, NewInfos),
+ {ok, IdEms} = couch_emsort:add(NewSt#st.id_tree, FDIKVs),
+ update_compact_task(length(NewInfos)),
+ NewSt#st{id_tree=IdEms, seq_tree=SeqTree}.
+
+
+copy_doc_attachments(#st{} = SrcSt, SrcSp, DstSt) ->
+ {ok, {BodyData, BinInfos0}} = couch_file:pread_term(SrcSt#st.fd, SrcSp),
+ BinInfos = case BinInfos0 of
+ _ when is_binary(BinInfos0) ->
+ couch_compress:decompress(BinInfos0);
+ _ when is_list(BinInfos0) ->
+ % pre 1.2 file format
+ BinInfos0
+ end,
+ % copy the bin values
+ NewBinInfos = lists:map(
+ fun({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}) ->
+ % 010 UPGRADE CODE
+ {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
+ {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
+ ok = couch_stream:copy(SrcStream, DstStream),
+ {NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} =
+ couch_stream:close(DstStream),
+ {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
+ couch_util:check_md5(ExpectedMd5, ActualMd5),
+ {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity};
+ ({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc1}) ->
+ {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
+ {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
+ ok = couch_stream:copy(SrcStream, DstStream),
+ {NewStream, AttLen, _, ActualMd5, _IdentityMd5} =
+ couch_stream:close(DstStream),
+ {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
+ couch_util:check_md5(ExpectedMd5, ActualMd5),
+ Enc = case Enc1 of
+ true ->
+ % 0110 UPGRADE CODE
+ gzip;
+ false ->
+ % 0110 UPGRADE CODE
+ identity;
+ _ ->
+ Enc1
+ end,
+ {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc}
+ end, BinInfos),
+ {BodyData, NewBinInfos}.
+
+
+sort_meta_data(St0) ->
+ {ok, Ems} = couch_emsort:merge(St0#st.id_tree),
+ St0#st{id_tree=Ems}.
+
+
+copy_meta_data(#st{} = St) ->
+ #st{
+ fd = Fd,
+ header = Header,
+ id_tree = Src
+ } = St,
+ DstState = couch_bt_engine_header:id_tree_state(Header),
+ {ok, IdTree0} = couch_btree:open(DstState, Fd, [
+ {split, fun couch_bt_engine:id_tree_split/1},
+ {join, fun couch_bt_engine:id_tree_join/2},
+ {reduce, fun couch_bt_engine:id_tree_reduce/2}
+ ]),
+ {ok, Iter} = couch_emsort:iter(Src),
+ Acc0 = #merge_st{
+ id_tree=IdTree0,
+ seq_tree=St#st.seq_tree,
+ rem_seqs=[],
+ infos=[]
+ },
+ Acc = merge_docids(Iter, Acc0),
+ {ok, IdTree} = couch_btree:add(Acc#merge_st.id_tree, Acc#merge_st.infos),
+ {ok, SeqTree} = couch_btree:add_remove(
+ Acc#merge_st.seq_tree, [], Acc#merge_st.rem_seqs
+ ),
+ St#st{id_tree=IdTree, seq_tree=SeqTree}.
+
+
+open_compaction_file(FilePath) ->
+ case couch_file:open(FilePath, [nologifmissing]) of
+ {ok, Fd} ->
+ case couch_file:read_header(Fd) of
+ {ok, Header} -> {ok, Fd, Header};
+ no_valid_header -> {ok, Fd, nil}
+ end;
+ {error, enoent} ->
+ {ok, Fd} = couch_file:open(FilePath, [create]),
+ {ok, Fd, nil}
+ end.
+
+
+reset_compaction_file(Fd, Header) ->
+ ok = couch_file:truncate(Fd, 0),
+ ok = couch_file:write_header(Fd, Header).
+
+
+commit_compaction_data(#st{}=St) ->
+ % Compaction needs to write headers to both the data file
+ % and the meta file so if we need to restart we can pick
+ % back up from where we left off.
+ commit_compaction_data(St, couch_emsort:get_fd(St#st.id_tree)),
+ commit_compaction_data(St, St#st.fd).
+
+
+commit_compaction_data(#st{header = OldHeader} = St0, Fd) ->
+ DataState = couch_bt_engine_header:id_tree_state(OldHeader),
+ MetaFd = couch_emsort:get_fd(St0#st.id_tree),
+ MetaState = couch_emsort:get_state(St0#st.id_tree),
+ St1 = bind_id_tree(St0, St0#st.fd, DataState),
+ Header = St1#st.header,
+ CompHeader = #comp_header{
+ db_header = Header,
+ meta_state = MetaState
+ },
+ ok = couch_file:sync(Fd),
+ ok = couch_file:write_header(Fd, CompHeader),
+ St2 = St1#st{
+ header = Header
+ },
+ bind_emsort(St2, MetaFd, MetaState).
+
+
+bind_emsort(St, Fd, nil) ->
+ {ok, Ems} = couch_emsort:open(Fd),
+ St#st{id_tree=Ems};
+bind_emsort(St, Fd, State) ->
+ {ok, Ems} = couch_emsort:open(Fd, [{root, State}]),
+ St#st{id_tree=Ems}.
+
+
+bind_id_tree(St, Fd, State) ->
+ {ok, IdBtree} = couch_btree:open(State, Fd, [
+ {split, fun couch_bt_engine:id_tree_split/1},
+ {join, fun couch_bt_engine:id_tree_join/2},
+ {reduce, fun couch_bt_engine:id_tree_reduce/2}
+ ]),
+ St#st{id_tree=IdBtree}.
+
+
+merge_lookups(Infos, []) ->
+ Infos;
+merge_lookups([], _) ->
+ [];
+merge_lookups([#doc_info{}=DI | RestInfos], [{ok, FDI} | RestLookups]) ->
+ % Assert we've matched our lookups
+ if DI#doc_info.id == FDI#full_doc_info.id -> ok; true ->
+ erlang:error({mismatched_doc_infos, DI#doc_info.id})
+ end,
+ [FDI | merge_lookups(RestInfos, RestLookups)];
+merge_lookups([FDI | RestInfos], Lookups) ->
+ [FDI | merge_lookups(RestInfos, Lookups)].
+
+
+merge_docids(Iter, #merge_st{infos=Infos}=Acc) when length(Infos) > 1000 ->
+ #merge_st{
+ id_tree=IdTree0,
+ seq_tree=SeqTree0,
+ rem_seqs=RemSeqs
+ } = Acc,
+ {ok, IdTree1} = couch_btree:add(IdTree0, Infos),
+ {ok, SeqTree1} = couch_btree:add_remove(SeqTree0, [], RemSeqs),
+ Acc1 = Acc#merge_st{
+ id_tree=IdTree1,
+ seq_tree=SeqTree1,
+ rem_seqs=[],
+ infos=[]
+ },
+ merge_docids(Iter, Acc1);
+merge_docids(Iter, #merge_st{curr=Curr}=Acc) ->
+ case next_info(Iter, Curr, []) of
+ {NextIter, NewCurr, FDI, Seqs} ->
+ Acc1 = Acc#merge_st{
+ infos = [FDI | Acc#merge_st.infos],
+ rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
+ curr = NewCurr
+ },
+ merge_docids(NextIter, Acc1);
+ {finished, FDI, Seqs} ->
+ Acc#merge_st{
+ infos = [FDI | Acc#merge_st.infos],
+ rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
+ curr = undefined
+ };
+ empty ->
+ Acc
+ end.
+
+
+next_info(Iter, undefined, []) ->
+ case couch_emsort:next(Iter) of
+ {ok, {{Id, Seq}, FDI}, NextIter} ->
+ next_info(NextIter, {Id, Seq, FDI}, []);
+ finished ->
+ empty
+ end;
+next_info(Iter, {Id, Seq, FDI}, Seqs) ->
+ case couch_emsort:next(Iter) of
+ {ok, {{Id, NSeq}, NFDI}, NextIter} ->
+ next_info(NextIter, {Id, NSeq, NFDI}, [Seq | Seqs]);
+ {ok, {{NId, NSeq}, NFDI}, NextIter} ->
+ {NextIter, {NId, NSeq, NFDI}, FDI, Seqs};
+ finished ->
+ {finished, FDI, Seqs}
+ end.
+
+
+update_compact_task(NumChanges) ->
+ [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
+ Changes2 = Changes + NumChanges,
+ Progress = case Total of
+ 0 ->
+ 0;
+ _ ->
+ (Changes2 * 100) div Total
+ end,
+ couch_task_status:update([{changes_done, Changes2}, {progress, Progress}]).
+
diff --git a/src/couch/src/couch_bt_engine_header.erl b/src/couch/src/couch_bt_engine_header.erl
new file mode 100644
index 000000000..3d24f3189
--- /dev/null
+++ b/src/couch/src/couch_bt_engine_header.erl
@@ -0,0 +1,434 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_bt_engine_header).
+
+
+-export([
+ new/0,
+ from/1,
+ is_header/1,
+ upgrade/1,
+ get/2,
+ get/3,
+ set/2,
+ set/3
+]).
+
+-export([
+ disk_version/1,
+ update_seq/1,
+ id_tree_state/1,
+ seq_tree_state/1,
+ latest/1,
+ local_tree_state/1,
+ purge_seq/1,
+ purged_docs/1,
+ security_ptr/1,
+ revs_limit/1,
+ uuid/1,
+ epochs/1,
+ compacted_seq/1
+]).
+
+
+% This should be updated anytime a header change happens that requires more
+% than filling in new defaults.
+%
+% As long the changes are limited to new header fields (with inline
+% defaults) added to the end of the record, then there is no need to increment
+% the disk revision number.
+%
+% if the disk revision is incremented, then new upgrade logic will need to be
+% added to couch_db_updater:init_db.
+
+-define(LATEST_DISK_VERSION, 6).
+
+-record(db_header, {
+ disk_version = ?LATEST_DISK_VERSION,
+ update_seq = 0,
+ unused = 0,
+ id_tree_state = nil,
+ seq_tree_state = nil,
+ local_tree_state = nil,
+ purge_seq = 0,
+ purged_docs = nil,
+ security_ptr = nil,
+ revs_limit = 1000,
+ uuid,
+ epochs,
+ compacted_seq
+}).
+
+
+new() ->
+ #db_header{
+ uuid = couch_uuids:random(),
+ epochs = [{node(), 0}]
+ }.
+
+
+from(Header0) ->
+ Header = upgrade(Header0),
+ #db_header{
+ uuid = Header#db_header.uuid,
+ epochs = Header#db_header.epochs,
+ compacted_seq = Header#db_header.compacted_seq
+ }.
+
+
+is_header(Header) ->
+ try
+ upgrade(Header),
+ true
+ catch _:_ ->
+ false
+ end.
+
+
+upgrade(Header) ->
+ Funs = [
+ fun upgrade_tuple/1,
+ fun upgrade_disk_version/1,
+ fun upgrade_uuid/1,
+ fun upgrade_epochs/1,
+ fun upgrade_compacted_seq/1
+ ],
+ lists:foldl(fun(F, HdrAcc) ->
+ F(HdrAcc)
+ end, Header, Funs).
+
+
+get(Header, Key) ->
+ ?MODULE:get(Header, Key, undefined).
+
+
+get(Header, Key, Default) ->
+ get_field(Header, Key, Default).
+
+
+set(Header, Key, Value) ->
+ ?MODULE:set(Header, [{Key, Value}]).
+
+
+set(Header0, Fields) ->
+ % A subtlety here is that if a database was open during
+ % the release upgrade that updates to uuids and epochs then
+ % this dynamic upgrade also assigns a uuid and epoch.
+ Header = upgrade(Header0),
+ lists:foldl(fun({Field, Value}, HdrAcc) ->
+ set_field(HdrAcc, Field, Value)
+ end, Header, Fields).
+
+
+disk_version(Header) ->
+ get_field(Header, disk_version).
+
+
+update_seq(Header) ->
+ get_field(Header, update_seq).
+
+
+id_tree_state(Header) ->
+ get_field(Header, id_tree_state).
+
+
+seq_tree_state(Header) ->
+ get_field(Header, seq_tree_state).
+
+
+local_tree_state(Header) ->
+ get_field(Header, local_tree_state).
+
+
+purge_seq(Header) ->
+ get_field(Header, purge_seq).
+
+
+purged_docs(Header) ->
+ get_field(Header, purged_docs).
+
+
+security_ptr(Header) ->
+ get_field(Header, security_ptr).
+
+
+revs_limit(Header) ->
+ get_field(Header, revs_limit).
+
+
+uuid(Header) ->
+ get_field(Header, uuid).
+
+
+epochs(Header) ->
+ get_field(Header, epochs).
+
+
+compacted_seq(Header) ->
+ get_field(Header, compacted_seq).
+
+
+get_field(Header, Field) ->
+ get_field(Header, Field, undefined).
+
+
+get_field(Header, Field, Default) ->
+ Idx = index(Field),
+ case Idx > tuple_size(Header) of
+ true -> Default;
+ false -> element(index(Field), Header)
+ end.
+
+
+set_field(Header, Field, Value) ->
+ setelement(index(Field), Header, Value).
+
+
+index(Field) ->
+ couch_util:get_value(Field, indexes()).
+
+
+indexes() ->
+ Fields = record_info(fields, db_header),
+ Indexes = lists:seq(2, record_info(size, db_header)),
+ lists:zip(Fields, Indexes).
+
+
+upgrade_tuple(Old) when is_record(Old, db_header) ->
+ Old;
+upgrade_tuple(Old) when is_tuple(Old) ->
+ NewSize = record_info(size, db_header),
+ if tuple_size(Old) < NewSize -> ok; true ->
+ erlang:error({invalid_header_size, Old})
+ end,
+ {_, New} = lists:foldl(fun(Val, {Idx, Hdr}) ->
+ {Idx+1, setelement(Idx, Hdr, Val)}
+ end, {1, #db_header{}}, tuple_to_list(Old)),
+ if is_record(New, db_header) -> ok; true ->
+ erlang:error({invalid_header_extension, {Old, New}})
+ end,
+ New.
+
+-define(OLD_DISK_VERSION_ERROR,
+ "Database files from versions smaller than 0.10.0 are no longer supported").
+
+upgrade_disk_version(#db_header{}=Header) ->
+ case element(2, Header) of
+ 1 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
+ 2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
+ 3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
+ 4 -> Header#db_header{security_ptr = nil}; % [0.10 - 0.11)
+ 5 -> Header; % pre 1.2
+ ?LATEST_DISK_VERSION -> Header;
+ _ ->
+ Reason = "Incorrect disk header version",
+ throw({database_disk_version_error, Reason})
+ end.
+
+
+upgrade_uuid(#db_header{}=Header) ->
+ case Header#db_header.uuid of
+ undefined ->
+ % Upgrading this old db file to a newer
+ % on disk format that includes a UUID.
+ Header#db_header{uuid=couch_uuids:random()};
+ _ ->
+ Header
+ end.
+
+
+upgrade_epochs(#db_header{}=Header) ->
+ NewEpochs = case Header#db_header.epochs of
+ undefined ->
+ % This node is taking over ownership of shard with
+ % and old version of couch file. Before epochs there
+ % was always an implicit assumption that a file was
+ % owned since eternity by the node it was on. This
+ % just codifies that assumption.
+ [{node(), 0}];
+ [{Node, _} | _] = Epochs0 when Node == node() ->
+ % Current node is the current owner of this db
+ Epochs0;
+ Epochs1 ->
+ % This node is taking over ownership of this db
+ % and marking the update sequence where it happened.
+ [{node(), Header#db_header.update_seq} | Epochs1]
+ end,
+ % Its possible for a node to open a db and claim
+ % ownership but never make a write to the db. This
+ % removes nodes that claimed ownership but never
+ % changed the database.
+ DedupedEpochs = remove_dup_epochs(NewEpochs),
+ Header#db_header{epochs=DedupedEpochs}.
+
+
+% This is slightly relying on the udpate_seq's being sorted
+% in epochs due to how we only ever push things onto the
+% front. Although if we ever had a case where the update_seq
+% is not monotonically increasing I don't know that we'd
+% want to remove dupes (by calling a sort on the input to this
+% function). So for now we don't sort but are relying on the
+% idea that epochs is always sorted.
+remove_dup_epochs([_]=Epochs) ->
+ Epochs;
+remove_dup_epochs([{N1, S}, {_N2, S}]) ->
+ % Seqs match, keep the most recent owner
+ [{N1, S}];
+remove_dup_epochs([_, _]=Epochs) ->
+ % Seqs don't match.
+ Epochs;
+remove_dup_epochs([{N1, S}, {_N2, S} | Rest]) ->
+ % Seqs match, keep the most recent owner
+ remove_dup_epochs([{N1, S} | Rest]);
+remove_dup_epochs([{N1, S1}, {N2, S2} | Rest]) ->
+ % Seqs don't match, recurse to check others
+ [{N1, S1} | remove_dup_epochs([{N2, S2} | Rest])].
+
+
+upgrade_compacted_seq(#db_header{}=Header) ->
+ case Header#db_header.compacted_seq of
+ undefined ->
+ Header#db_header{compacted_seq=0};
+ _ ->
+ Header
+ end.
+
+latest(?LATEST_DISK_VERSION) ->
+ true;
+latest(N) when is_integer(N), N < ?LATEST_DISK_VERSION ->
+ false;
+latest(_Else) ->
+ undefined.
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+mk_header(Vsn) ->
+ {
+ db_header, % record name
+ Vsn, % disk version
+ 100, % update_seq
+ 0, % unused
+ foo, % id_tree_state
+ bar, % seq_tree_state
+ bam, % local_tree_state
+ 1, % purge_seq
+ baz, % purged_docs
+ bang, % security_ptr
+ 999 % revs_limit
+ }.
+
+
+-ifdef(run_broken_tests).
+
+upgrade_v3_test() ->
+ Vsn3Header = mk_header(3),
+ NewHeader = upgrade_tuple(Vsn3Header),
+
+ % Tuple upgrades don't change
+ ?assert(is_record(NewHeader, db_header)),
+ ?assertEqual(3, disk_version(NewHeader)),
+ ?assertEqual(100, update_seq(NewHeader)),
+ ?assertEqual(foo, id_tree_state(NewHeader)),
+ ?assertEqual(bar, seq_tree_state(NewHeader)),
+ ?assertEqual(bam, local_tree_state(NewHeader)),
+ ?assertEqual(1, purge_seq(NewHeader)),
+ ?assertEqual(baz, purged_docs(NewHeader)),
+ ?assertEqual(bang, security_ptr(NewHeader)),
+ ?assertEqual(999, revs_limit(NewHeader)),
+ ?assertEqual(undefined, uuid(NewHeader)),
+ ?assertEqual(undefined, epochs(NewHeader)),
+
+ % Security ptr isn't changed until upgrade_disk_version/1
+ NewNewHeader = upgrade_disk_version(NewHeader),
+ ?assert(is_record(NewNewHeader, db_header)),
+ ?assertEqual(nil, security_ptr(NewNewHeader)),
+
+ % Assert upgrade works on really old headers
+ NewestHeader = upgrade(Vsn3Header),
+ ?assertMatch(<<_:32/binary>>, uuid(NewestHeader)),
+ ?assertEqual([{node(), 0}], epochs(NewestHeader)).
+
+-endif.
+
+upgrade_v5_test() ->
+ Vsn5Header = mk_header(5),
+ NewHeader = upgrade_disk_version(upgrade_tuple(Vsn5Header)),
+
+ ?assert(is_record(NewHeader, db_header)),
+ ?assertEqual(5, disk_version(NewHeader)),
+
+ % Security ptr isn't changed for v5 headers
+ ?assertEqual(bang, security_ptr(NewHeader)).
+
+
+upgrade_uuid_test() ->
+ Vsn5Header = mk_header(5),
+
+ % Upgraded headers get a new UUID
+ NewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(Vsn5Header))),
+ ?assertMatch(<<_:32/binary>>, uuid(NewHeader)),
+
+ % Headers with a UUID don't have their UUID changed
+ NewNewHeader = upgrade_uuid(upgrade_disk_version(upgrade_tuple(NewHeader))),
+ ?assertEqual(uuid(NewHeader), uuid(NewNewHeader)),
+
+ % Derived empty headers maintain the same UUID
+ ResetHeader = from(NewNewHeader),
+ ?assertEqual(uuid(NewHeader), uuid(ResetHeader)).
+
+
+upgrade_epochs_test() ->
+ Vsn5Header = mk_header(5),
+
+ % Upgraded headers get a default epochs set
+ NewHeader = upgrade(Vsn5Header),
+ ?assertEqual([{node(), 0}], epochs(NewHeader)),
+
+ % Fake an old entry in epochs
+ FakeFields = [
+ {update_seq, 20},
+ {epochs, [{'someothernode@someotherhost', 0}]}
+ ],
+ NotOwnedHeader = set(NewHeader, FakeFields),
+
+ OwnedEpochs = [
+ {node(), 20},
+ {'someothernode@someotherhost', 0}
+ ],
+
+ % Upgrading a header not owned by the local node updates
+ % the epochs appropriately.
+ NowOwnedHeader = upgrade(NotOwnedHeader),
+ ?assertEqual(OwnedEpochs, epochs(NowOwnedHeader)),
+
+ % Headers with epochs stay the same after upgrades
+ NewNewHeader = upgrade(NowOwnedHeader),
+ ?assertEqual(OwnedEpochs, epochs(NewNewHeader)),
+
+ % Getting a reset header maintains the epoch data
+ ResetHeader = from(NewNewHeader),
+ ?assertEqual(OwnedEpochs, epochs(ResetHeader)).
+
+
+get_uuid_from_old_header_test() ->
+ Vsn5Header = mk_header(5),
+ ?assertEqual(undefined, uuid(Vsn5Header)).
+
+
+get_epochs_from_old_header_test() ->
+ Vsn5Header = mk_header(5),
+ ?assertEqual(undefined, epochs(Vsn5Header)).
+
+
+-endif.
diff --git a/src/couch/src/couch_bt_engine_stream.erl b/src/couch/src/couch_bt_engine_stream.erl
new file mode 100644
index 000000000..431894a50
--- /dev/null
+++ b/src/couch/src/couch_bt_engine_stream.erl
@@ -0,0 +1,70 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_bt_engine_stream).
+
+-export([
+ foldl/3,
+ seek/2,
+ write/2,
+ finalize/1,
+ to_disk_term/1
+]).
+
+
+foldl({_Fd, []}, _Fun, Acc) ->
+ Acc;
+
+foldl({Fd, [{Pos, _} | Rest]}, Fun, Acc) ->
+ foldl({Fd, [Pos | Rest]}, Fun, Acc);
+
+foldl({Fd, [Bin | Rest]}, Fun, Acc) when is_binary(Bin) ->
+ % We're processing the first bit of data
+ % after we did a seek for a range fold.
+ foldl({Fd, Rest}, Fun, Fun(Bin, Acc));
+
+foldl({Fd, [Pos | Rest]}, Fun, Acc) when is_integer(Pos) ->
+ {ok, Bin} = couch_file:pread_binary(Fd, Pos),
+ foldl({Fd, Rest}, Fun, Fun(Bin, Acc)).
+
+
+seek({Fd, [{Pos, Length} | Rest]}, Offset) ->
+ case Length =< Offset of
+ true ->
+ seek({Fd, Rest}, Offset - Length);
+ false ->
+ seek({Fd, [Pos | Rest]}, Offset)
+ end;
+
+seek({Fd, [Pos | Rest]}, Offset) when is_integer(Pos) ->
+ {ok, Bin} = couch_file:pread_binary(Fd, Pos),
+ case iolist_size(Bin) =< Offset of
+ true ->
+ seek({Fd, Rest}, Offset - size(Bin));
+ false ->
+ <<_:Offset/binary, Tail/binary>> = Bin,
+ {ok, {Fd, [Tail | Rest]}}
+ end.
+
+
+write({Fd, Written}, Data) when is_pid(Fd) ->
+ {ok, Pos, _} = couch_file:append_binary(Fd, Data),
+ {ok, {Fd, [{Pos, iolist_size(Data)} | Written]}}.
+
+
+finalize({Fd, Written}) ->
+ {ok, {Fd, lists:reverse(Written)}}.
+
+
+to_disk_term({_Fd, Written}) ->
+ {ok, Written}.
+
diff --git a/src/couch/src/couch_changes.erl b/src/couch/src/couch_changes.erl
index 3e4175014..db465ee98 100644
--- a/src/couch/src/couch_changes.erl
+++ b/src/couch/src/couch_changes.erl
@@ -537,7 +537,8 @@ send_changes(Acc, Dir, FirstRound) ->
{#mrview{}, {fast_view, _, _, _}} ->
couch_mrview:view_changes_since(View, StartSeq, DbEnumFun, [{dir, Dir}], Acc);
{undefined, _} ->
- couch_db:changes_since(Db, StartSeq, DbEnumFun, [{dir, Dir}], Acc);
+ Opts = [{dir, Dir}],
+ couch_db:fold_changes(Db, StartSeq, DbEnumFun, Acc, Opts);
{#mrview{}, _} ->
ViewEnumFun = fun view_changes_enumerator/2,
{Go, Acc0} = couch_mrview:view_changes_since(View, StartSeq, ViewEnumFun, [{dir, Dir}], Acc),
@@ -577,18 +578,22 @@ can_optimize(_, _) ->
send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
- Lookups = couch_db:get_full_doc_infos(Db, DocIds),
+ Results = couch_db:get_full_doc_infos(Db, DocIds),
FullInfos = lists:foldl(fun
- ({ok, FDI}, Acc) -> [FDI | Acc];
+ (#full_doc_info{}=FDI, Acc) -> [FDI | Acc];
(not_found, Acc) -> Acc
- end, [], Lookups),
+ end, [], Results),
send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
- KeyOpts = [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}],
- {ok, FullInfos} = couch_db:fold_docs(Db, FoldFun, [], KeyOpts),
+ Opts = [
+ include_deleted,
+ {start_key, <<"_design/">>},
+ {end_key_gt, <<"_design0">>}
+ ],
+ {ok, FullInfos} = couch_db:fold_docs(Db, FoldFun, [], Opts),
send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
@@ -763,6 +768,8 @@ changes_enumerator(Value0, Acc) ->
end,
Results = [Result || Result <- Results0, Result /= null],
Seq = case Value of
+ #full_doc_info{} ->
+ Value#full_doc_info.update_seq;
#doc_info{} ->
Value#doc_info.high_seq;
{{Seq0, _}, _} ->
@@ -822,6 +829,8 @@ view_changes_row(Results, KVs, Acc) ->
] ++ maybe_get_changes_doc({Id, Rev}, Acc)}.
+changes_row(Results, #full_doc_info{} = FDI, Acc) ->
+ changes_row(Results, couch_doc:to_doc_info(FDI), Acc);
changes_row(Results, DocInfo, Acc) ->
#doc_info{
id = Id, high_seq = Seq, revs = [#rev_info{deleted = Del} | _]
diff --git a/src/couch/src/couch_compaction_daemon.erl b/src/couch/src/couch_compaction_daemon.erl
index 7827dd5e2..72d5a17e2 100644
--- a/src/couch/src/couch_compaction_daemon.erl
+++ b/src/couch/src/couch_compaction_daemon.erl
@@ -126,6 +126,7 @@ handle_config_terminate(_Server, _Reason, _State) ->
erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
compact_loop(Parent) ->
+ SnoozePeriod = config:get_integer("compaction_daemon", "snooze_period", 3),
{ok, _} = couch_server:all_databases(
fun(DbName, Acc) ->
case ets:info(?CONFIG_ETS, size) =:= 0 of
@@ -138,7 +139,8 @@ compact_loop(Parent) ->
{ok, Config} ->
case check_period(Config) of
true ->
- maybe_compact_db(Parent, DbName, Config);
+ maybe_compact_db(Parent, DbName, Config),
+ ok = timer:sleep(SnoozePeriod * 1000);
false ->
ok
end
@@ -150,8 +152,7 @@ compact_loop(Parent) ->
true ->
receive {Parent, have_config} -> ok end;
false ->
- PausePeriod = list_to_integer(
- config:get("compaction_daemon", "check_interval", "300")),
+ PausePeriod = config:get_integer("compaction_daemon", "check_interval", 3600),
ok = timer:sleep(PausePeriod * 1000)
end,
compact_loop(Parent).
@@ -229,24 +230,27 @@ maybe_compact_views(DbName, [DDocName | Rest], Config) ->
maybe_compact_views(DbName, Rest, Config);
timeout ->
ok
- end;
+ end,
+ SnoozePeriod = config:get_integer("compaction_daemon", "snooze_period", 3),
+ ok = timer:sleep(SnoozePeriod * 1000);
false ->
ok
end.
db_ddoc_names(Db) ->
- {ok, _, DDocNames} = couch_db:enum_docs(
- Db,
- fun(#full_doc_info{id = <<"_design/", _/binary>>, deleted = true}, _, Acc) ->
- {ok, Acc};
- (#full_doc_info{id = <<"_design/", Id/binary>>}, _, Acc) ->
- {ok, [Id | Acc]};
- (_, _, Acc) ->
- {stop, Acc}
- end, [], [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}]),
+ FoldFun = fun ddoc_name/2,
+ Opts = [{start_key, <<"_design/">>}],
+ {ok, DDocNames} = couch_db:fold_docs(Db, FoldFun, [], Opts),
DDocNames.
+ddoc_name(#full_doc_info{id = <<"_design/", _/binary>>, deleted = true}, Acc) ->
+ {ok, Acc};
+ddoc_name(#full_doc_info{id = <<"_design/", Id/binary>>}, Acc) ->
+ {ok, [Id | Acc]};
+ddoc_name(_, Acc) ->
+ {stop, Acc}.
+
maybe_compact_view(DbName, GroupId, Config) ->
DDocId = <<"_design/", GroupId/binary>>,
@@ -397,21 +401,22 @@ check_frag(Threshold, Frag) ->
frag(Props) ->
- FileSize = couch_util:get_value(disk_size, Props),
+ {Sizes} = couch_util:get_value(sizes, Props),
+ FileSize = couch_util:get_value(file, Sizes),
MinFileSize = list_to_integer(
config:get("compaction_daemon", "min_file_size", "131072")),
case FileSize < MinFileSize of
true ->
{0, FileSize};
false ->
- case couch_util:get_value(data_size, Props) of
- null ->
- {100, FileSize};
+ case couch_util:get_value(active, Sizes) of
0 ->
{0, FileSize};
- DataSize ->
+ DataSize when is_integer(DataSize), DataSize > 0 ->
Frag = round(((FileSize - DataSize) / FileSize * 100)),
- {Frag, space_required(DataSize)}
+ {Frag, space_required(DataSize)};
+ _ ->
+ {100, FileSize}
end
end.
@@ -520,9 +525,9 @@ free_space(Path) ->
free_space_rec(_Path, []) ->
undefined;
-free_space_rec(Path, [{MountPoint0, Total, Usage} | Rest]) ->
- case abs_path(MountPoint0) of
- {ok, MountPoint} ->
+free_space_rec(Path0, [{MountPoint, Total, Usage} | Rest]) ->
+ case abs_path(Path0) of
+ {ok, Path} ->
case MountPoint =:= string:substr(Path, 1, length(MountPoint)) of
false ->
free_space_rec(Path, Rest);
@@ -530,10 +535,10 @@ free_space_rec(Path, [{MountPoint0, Total, Usage} | Rest]) ->
trunc(Total - (Total * (Usage / 100))) * 1024
end;
{error, Reason} ->
- couch_log:warning("Compaction daemon - unable to calculate free space"
- " for `~s`: `~s`",
- [MountPoint0, Reason]),
- free_space_rec(Path, Rest)
+ couch_log:debug("Compaction daemon - unable to calculate free space"
+ " for `~s`: `~s` for mount mount `~p`",
+ [Path0, Reason, MountPoint]),
+ free_space_rec(Path0, Rest)
end.
abs_path(Path0) ->
@@ -558,3 +563,33 @@ abs_path2(Path0) ->
_ ->
{ok, Path ++ "/"}
end.
+
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+free_space_rec_test() ->
+ ?assertEqual(undefined, free_space_rec("", [])),
+ ?assertEqual(51200, free_space_rec("/tmp/", [{"/", 100, 50}])),
+ ?assertEqual(51200, free_space_rec("/tmp/", [
+ {"/floop", 200, 25},
+ {"/run", 0, 0},
+ {"/", 100, 50}
+ ])),
+ ?assertEqual(undefined, free_space_rec("/flopp/", [{"/", 300, 75}])),
+ ?assertEqual(undefined, free_space_rec("/flopp/", [
+ {"/floop", 200, 25},
+ {"/run", 0, 0},
+ {"/", 100, 50}
+ ])),
+ ok.
+
+abs_path2_test() ->
+ ?assertEqual({ok, "/a/"}, abs_path2("/a")),
+ ?assertEqual({ok, "/a/"}, abs_path2("/a/")),
+
+ ?assertEqual({ok, "/a/b/"}, abs_path2("/a/b")),
+ ?assertEqual({ok, "/a/b/"}, abs_path2("/a/b")),
+ ok.
+
+-endif.
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
index 5e720c284..93ea07e65 100644
--- a/src/couch/src/couch_db.erl
+++ b/src/couch/src/couch_db.erl
@@ -32,13 +32,13 @@
check_is_member/1,
name/1,
- compression/1,
get_after_doc_read_fun/1,
get_before_doc_update_fun/1,
get_committed_update_seq/1,
get_compacted_seq/1,
get_compactor_pid/1,
get_db_info/1,
+ get_del_doc_count/1,
get_doc_count/1,
get_epochs/1,
get_filepath/1,
@@ -56,7 +56,6 @@
is_system_db/1,
is_clustered/1,
- increment_update_seq/1,
set_revs_limit/2,
set_security/2,
set_user_ctx/2,
@@ -65,12 +64,12 @@
ensure_full_commit/2,
load_validation_funs/1,
+ reload_validation_funs/1,
open_doc/2,
open_doc/3,
open_doc_revs/4,
open_doc_int/3,
- read_doc/2,
get_doc_info/2,
get_full_doc_info/2,
get_full_doc_infos/2,
@@ -87,16 +86,16 @@
purge_docs/2,
with_stream/3,
+ open_write_stream/2,
+ open_read_stream/2,
+ is_active_stream/2,
+ fold_docs/3,
fold_docs/4,
fold_local_docs/4,
- enum_docs/4,
- enum_docs_reduce_to_count/1,
-
- enum_docs_since/5,
- enum_docs_since_reduce_to_count/1,
- changes_since/4,
- changes_since/5,
+ fold_design_docs/4,
+ fold_changes/4,
+ fold_changes/5,
count_changes_since/2,
calculate_start_seq/3,
@@ -111,14 +110,13 @@
normalize_dbname/1,
validate_dbname/1,
- check_md5/2,
make_doc/5,
new_revid/1
]).
-export([
- start_link/3
+ start_link/4
]).
@@ -130,38 +128,9 @@
"(\\.[0-9]{10,})?$" % but allow an optional shard timestamp at the end
).
-start_link(DbName, Filepath, Options) ->
- case open_db_file(Filepath, Options) of
- {ok, Fd} ->
- {ok, UpdaterPid} = gen_server:start_link(couch_db_updater, {DbName,
- Filepath, Fd, Options}, []),
- unlink(Fd),
- gen_server:call(UpdaterPid, get_db);
- Else ->
- Else
- end.
-
-open_db_file(Filepath, Options) ->
- case couch_file:open(Filepath, Options) of
- {ok, Fd} ->
- {ok, Fd};
- {error, enoent} ->
- % couldn't find file. is there a compact version? This can happen if
- % crashed during the file switch.
- case couch_file:open(Filepath ++ ".compact", [nologifmissing]) of
- {ok, Fd} ->
- couch_log:info("Found ~s~s compaction file, using as primary"
- " storage.", [Filepath, ".compact"]),
- ok = file:rename(Filepath ++ ".compact", Filepath),
- ok = couch_file:sync(Fd),
- {ok, Fd};
- {error, enoent} ->
- {not_found, no_db_file}
- end;
- Error ->
- Error
- end.
-
+start_link(Engine, DbName, Filepath, Options) ->
+ Arg = {Engine, DbName, Filepath, Options},
+ proc_lib:start_link(couch_db_updater, init, [Arg]).
create(DbName, Options) ->
couch_server:create(DbName, Options).
@@ -187,20 +156,19 @@ open(DbName, Options) ->
Else -> Else
end.
-reopen(#db{main_pid = Pid, fd = Fd, fd_monitor = OldRef, user_ctx = UserCtx}) ->
- {ok, #db{fd = NewFd} = NewDb} = gen_server:call(Pid, get_db, infinity),
- case NewFd =:= Fd of
- true ->
- {ok, NewDb#db{user_ctx = UserCtx}};
- false ->
- erlang:demonitor(OldRef, [flush]),
- NewRef = erlang:monitor(process, NewFd),
- {ok, NewDb#db{user_ctx = UserCtx, fd_monitor = NewRef}}
- end.
-incref(#db{fd = Fd} = Db) ->
- Ref = erlang:monitor(process, Fd),
- {ok, Db#db{fd_monitor = Ref}}.
+reopen(#db{} = Db) ->
+ % We could have just swapped out the storage engine
+ % for this database during a compaction so we just
+ % reimplement this as a close/open pair now.
+ close(Db),
+ open(Db#db.name, [{user_ctx, Db#db.user_ctx} | Db#db.options]).
+
+
+% You shouldn't call this. Its part of the ref counting between
+% couch_server and couch_db instances.
+incref(#db{} = Db) ->
+ couch_db_engine:incref(Db).
clustered_db(DbName, UserCtx) ->
clustered_db(DbName, UserCtx, []).
@@ -220,8 +188,8 @@ is_clustered(#db{main_pid = nil}) ->
true;
is_clustered(#db{}) ->
false;
-is_clustered(?NEW_PSE_DB = Db) ->
- ?PSE_DB_MAIN_PID(Db) == undefined.
+is_clustered(?OLD_DB_REC = Db) ->
+ ?OLD_DB_MAIN_PID(Db) == undefined.
ensure_full_commit(#db{main_pid=Pid, instance_start_time=StartTime}) ->
ok = gen_server:call(Pid, full_commit, infinity),
@@ -232,10 +200,9 @@ ensure_full_commit(Db, RequiredSeq) ->
ok = gen_server:call(Pid, {full_commit, RequiredSeq}, infinity),
{ok, StartTime}.
-close(#db{fd_monitor=Ref}) ->
- erlang:demonitor(Ref, [flush]),
- ok;
-close(?NEW_PSE_DB) ->
+close(#db{} = Db) ->
+ ok = couch_db_engine:decref(Db);
+close(?OLD_DB_REC) ->
ok.
is_idle(#db{compactor_pid=nil, waiting_delayed_commit=nil} = Db) ->
@@ -244,20 +211,20 @@ is_idle(_Db) ->
false.
monitored_by(Db) ->
- case erlang:process_info(Db#db.fd, monitored_by) of
- undefined ->
- [];
- {monitored_by, Pids} ->
- PidTracker = whereis(couch_stats_process_tracker),
- Pids -- [Db#db.main_pid, PidTracker]
+ case couch_db_engine:monitored_by(Db) of
+ Pids when is_list(Pids) ->
+ PidTracker = whereis(couch_stats_process_tracker),
+ Pids -- [Db#db.main_pid, PidTracker];
+ undefined ->
+ []
end.
monitor(#db{main_pid=MainPid}) ->
erlang:monitor(process, MainPid).
-start_compact(#db{main_pid=Pid}) ->
- gen_server:call(Pid, start_compact).
+start_compact(#db{} = Db) ->
+ gen_server:call(Db#db.main_pid, start_compact).
cancel_compact(#db{main_pid=Pid}) ->
gen_server:call(Pid, cancel_compact).
@@ -357,7 +324,8 @@ get_missing_revs(Db, IdRevsList) ->
find_missing([], []) ->
[];
-find_missing([{Id, Revs}|RestIdRevs], [{ok, FullInfo} | RestLookupInfo]) ->
+find_missing([{Id, Revs}|RestIdRevs], [FullInfo | RestLookupInfo])
+ when is_record(FullInfo, full_doc_info) ->
case couch_key_tree:find_missing(FullInfo#full_doc_info.rev_tree, Revs) of
[] ->
find_missing(RestIdRevs, RestLookupInfo);
@@ -385,22 +353,18 @@ find_missing([{Id, Revs}|RestIdRevs], [not_found | RestLookupInfo]) ->
get_doc_info(Db, Id) ->
case get_full_doc_info(Db, Id) of
- {ok, DocInfo} ->
- {ok, couch_doc:to_doc_info(DocInfo)};
+ #full_doc_info{} = FDI ->
+ {ok, couch_doc:to_doc_info(FDI)};
Else ->
Else
end.
-% returns {ok, DocInfo} or not_found
get_full_doc_info(Db, Id) ->
[Result] = get_full_doc_infos(Db, [Id]),
Result.
get_full_doc_infos(Db, Ids) ->
- couch_btree:lookup(Db#db.id_tree, Ids).
-
-increment_update_seq(#db{main_pid=Pid}) ->
- gen_server:call(Pid, increment_update_seq).
+ couch_db_engine:open_docs(Db, Ids).
purge_docs(#db{main_pid=Pid}, IdsRevs) ->
gen_server:call(Pid, {purge_docs, IdsRevs}).
@@ -414,37 +378,34 @@ get_before_doc_update_fun(#db{before_doc_update = Fun}) ->
get_committed_update_seq(#db{committed_update_seq=Seq}) ->
Seq.
-get_update_seq(#db{update_seq=Seq})->
- Seq.
+get_update_seq(#db{} = Db)->
+ couch_db_engine:get_update_seq(Db).
get_user_ctx(#db{user_ctx = UserCtx}) ->
UserCtx;
-get_user_ctx(?NEW_PSE_DB = Db) ->
- ?PSE_DB_USER_CTX(Db).
+get_user_ctx(?OLD_DB_REC = Db) ->
+ ?OLD_DB_USER_CTX(Db).
get_purge_seq(#db{}=Db) ->
- couch_db_header:purge_seq(Db#db.header).
+ {ok, couch_db_engine:get_purge_seq(Db)}.
get_last_purged(#db{}=Db) ->
- case couch_db_header:purged_docs(Db#db.header) of
- nil ->
- {ok, []};
- Pointer ->
- couch_file:pread_term(Db#db.fd, Pointer)
- end.
+ {ok, couch_db_engine:get_last_purged(Db)}.
get_pid(#db{main_pid = Pid}) ->
Pid.
+get_del_doc_count(Db) ->
+ {ok, couch_db_engine:get_del_doc_count(Db)}.
+
get_doc_count(Db) ->
- {ok, Reds} = couch_btree:full_reduce(Db#db.id_tree),
- {ok, element(1, Reds)}.
+ {ok, couch_db_engine:get_doc_count(Db)}.
get_uuid(#db{}=Db) ->
- couch_db_header:uuid(Db#db.header).
+ couch_db_engine:get_uuid(Db).
get_epochs(#db{}=Db) ->
- Epochs = couch_db_header:epochs(Db#db.header),
+ Epochs = couch_db_engine:get_epochs(Db),
validate_epochs(Epochs),
Epochs.
@@ -455,34 +416,25 @@ get_instance_start_time(#db{instance_start_time = IST}) ->
IST.
get_compacted_seq(#db{}=Db) ->
- couch_db_header:compacted_seq(Db#db.header).
+ couch_db_engine:get_compacted_seq(Db).
get_compactor_pid(#db{compactor_pid = Pid}) ->
Pid.
get_db_info(Db) ->
- #db{fd=Fd,
- header=Header,
- compactor_pid=Compactor,
- update_seq=SeqNum,
- name=Name,
- instance_start_time=StartTime,
- committed_update_seq=CommittedUpdateSeq,
- id_tree = IdBtree
+ #db{
+ name = Name,
+ compactor_pid = Compactor,
+ instance_start_time = StartTime,
+ committed_update_seq = CommittedUpdateSeq
} = Db,
- {ok, FileSize} = couch_file:bytes(Fd),
- {ok, DbReduction} = couch_btree:full_reduce(IdBtree),
- SizeInfo0 = element(3, DbReduction),
- SizeInfo = case SizeInfo0 of
- SI when is_record(SI, size_info) ->
- SI;
- {AS, ES} ->
- #size_info{active=AS, external=ES};
- AS ->
- #size_info{active=AS}
- end,
- ActiveSize = active_size(Db, SizeInfo),
- DiskVersion = couch_db_header:disk_version(Header),
+ {ok, DocCount} = get_doc_count(Db),
+ {ok, DelDocCount} = get_del_doc_count(Db),
+ SizeInfo = couch_db_engine:get_size_info(Db),
+ FileSize = couch_util:get_value(file, SizeInfo, null),
+ ActiveSize = couch_util:get_value(active, SizeInfo, null),
+ ExternalSize = couch_util:get_value(external, SizeInfo, null),
+ DiskVersion = couch_db_engine:get_disk_version(Db),
Uuid = case get_uuid(Db) of
undefined -> null;
Uuid0 -> Uuid0
@@ -493,63 +445,38 @@ get_db_info(Db) ->
end,
InfoList = [
{db_name, Name},
- {doc_count, element(1, DbReduction)},
- {doc_del_count, element(2, DbReduction)},
- {update_seq, SeqNum},
- {purge_seq, couch_db:get_purge_seq(Db)},
- {compact_running, Compactor/=nil},
+ {engine, couch_db_engine:get_engine(Db)},
+ {doc_count, DocCount},
+ {doc_del_count, DelDocCount},
+ {update_seq, get_update_seq(Db)},
+ {purge_seq, couch_db_engine:get_purge_seq(Db)},
+ {compact_running, Compactor /= nil},
+ {sizes, {SizeInfo}},
+ % TODO: Remove this in 3.0
+ % These are legacy and have been duplicated under
+ % the sizes key since 2.0. We should make a note
+ % in our release notes that we'll remove these
+ % old versions in 3.0
{disk_size, FileSize}, % legacy
- {other, {[{data_size, SizeInfo#size_info.external}]}}, % legacy
- {data_size, ActiveSize}, % legacy
- {sizes, {[
- {file, FileSize},
- {active, ActiveSize},
- {external, SizeInfo#size_info.external}
- ]}},
+ {data_size, ActiveSize},
+ {other, {[{data_size, ExternalSize}]}},
{instance_start_time, StartTime},
{disk_format_version, DiskVersion},
{committed_update_seq, CommittedUpdateSeq},
{compacted_seq, CompactedSeq},
{uuid, Uuid}
- ],
- {ok, InfoList}.
-
-active_size(#db{}=Db, Size) when is_integer(Size) ->
- active_size(Db, #size_info{active=Size});
-active_size(#db{}=Db, #size_info{}=SI) ->
- Trees = [
- Db#db.id_tree,
- Db#db.seq_tree,
- Db#db.local_tree
],
- lists:foldl(fun(T, Acc) ->
- case couch_btree:size(T) of
- _ when Acc == null ->
- null;
- nil ->
- null;
- Size ->
- Acc + Size
- end
- end, SI#size_info.active, Trees).
+ {ok, InfoList}.
get_design_docs(#db{name = <<"shards/", _:18/binary, DbName/binary>>}) ->
{_, Ref} = spawn_monitor(fun() -> exit(fabric:design_docs(DbName)) end),
receive {'DOWN', Ref, _, _, Response} ->
Response
end;
-get_design_docs(#db{id_tree = IdBtree}) ->
- FoldFun = pipe([fun skip_deleted/4], fun
- (#full_doc_info{deleted = true}, _Reds, Acc) ->
- {ok, Acc};
- (#full_doc_info{id= <<"_design/",_/binary>>}=FullDocInfo, _Reds, Acc) ->
- {ok, [FullDocInfo | Acc]};
- (_, _Reds, Acc) ->
- {stop, Acc}
- end),
- KeyOpts = [{start_key, <<"_design/">>}, {end_key_gt, <<"_design0">>}],
- {ok, _, Docs} = couch_btree:fold(IdBtree, FoldFun, [], KeyOpts),
- {ok, Docs}.
+get_design_docs(#db{} = Db) ->
+ FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
+ {ok, Docs} = fold_design_docs(Db, FoldFun, [], []),
+ {ok, lists:reverse(Docs)}.
check_is_admin(#db{user_ctx=UserCtx}=Db) ->
@@ -639,8 +566,8 @@ get_members(#db{security=SecProps}) ->
get_security(#db{security=SecProps}) ->
{SecProps};
-get_security(?NEW_PSE_DB = Db) ->
- {?PSE_DB_SECURITY(Db)}.
+get_security(?OLD_DB_REC = Db) ->
+ {?OLD_DB_SECURITY(Db)}.
set_security(#db{main_pid=Pid}=Db, {NewSecProps}) when is_list(NewSecProps) ->
check_is_admin(Db),
@@ -679,8 +606,8 @@ validate_names_and_roles({Props}) when is_list(Props) ->
end,
ok.
-get_revs_limit(#db{revs_limit=Limit}) ->
- Limit.
+get_revs_limit(#db{} = Db) ->
+ couch_db_engine:get_revs_limit(Db).
set_revs_limit(#db{main_pid=Pid}=Db, Limit) when Limit > 0 ->
check_is_admin(Db),
@@ -690,11 +617,8 @@ set_revs_limit(_Db, _Limit) ->
name(#db{name=Name}) ->
Name;
-name(?NEW_PSE_DB = Db) ->
- ?PSE_DB_NAME(Db).
-
-compression(#db{compression=Compression}) ->
- Compression.
+name(?OLD_DB_REC = Db) ->
+ ?OLD_DB_NAME(Db).
update_doc(Db, Doc, Options) ->
update_doc(Db, Doc, Options, interactive_edit).
@@ -825,6 +749,9 @@ load_validation_funs(#db{main_pid=Pid}=Db) ->
gen_server:cast(Pid, {load_validation_funs, Funs}),
Funs.
+reload_validation_funs(#db{} = Db) ->
+ gen_server:cast(Db#db.main_pid, {load_validation_funs, undefined}).
+
prep_and_validate_update(Db, #doc{id=Id,revs={RevStart, Revs}}=Doc,
OldFullDocInfo, LeafRevsDict, AllowConflict) ->
case Revs of
@@ -891,7 +818,7 @@ prep_and_validate_updates(Db, [DocBucket|RestBuckets], [not_found|RestLookups],
prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
[PreppedBucket | AccPrepped], AccErrors3);
prep_and_validate_updates(Db, [DocBucket|RestBuckets],
- [{ok, #full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo}|RestLookups],
+ [#full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo|RestLookups],
AllowConflict, AccPrepped, AccErrors) ->
Leafs = couch_key_tree:get_all_leafs(OldRevTree),
LeafRevsDict = dict:from_list([
@@ -942,13 +869,14 @@ prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldI
end,
{[], AccErrors}, Bucket),
prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3);
- {ok, #full_doc_info{rev_tree=OldTree}} ->
+ #full_doc_info{rev_tree=OldTree} ->
+ RevsLimit = get_revs_limit(Db),
OldLeafs = couch_key_tree:get_all_leafs_full(OldTree),
OldLeafsLU = [{Start, RevId} || {Start, [{RevId, _}|_]} <- OldLeafs],
NewRevTree = lists:foldl(
fun(NewDoc, AccTree) ->
{NewTree, _} = couch_key_tree:merge(AccTree,
- couch_doc:to_path(NewDoc), Db#db.revs_limit),
+ couch_doc:to_path(NewDoc), RevsLimit),
NewTree
end,
OldTree, Bucket),
@@ -1002,7 +930,7 @@ prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldI
-new_revid(#doc{body=Body0, revs={OldStart,OldRevs}, atts=Atts, deleted=Deleted}) ->
+new_revid(#doc{body=Body, revs={OldStart,OldRevs}, atts=Atts, deleted=Deleted}) ->
DigestedAtts = lists:foldl(fun(Att, Acc) ->
[N, T, M] = couch_att:fetch([name, type, md5], Att),
case M == <<>> of
@@ -1010,16 +938,6 @@ new_revid(#doc{body=Body0, revs={OldStart,OldRevs}, atts=Atts, deleted=Deleted})
false -> [{N, T, M} | Acc]
end
end, [], Atts),
- Body = case Body0 of
- {summary, [_Len, _Md5, BodyAtts], _SizeInfo, _AttsFd} ->
- {CompBody, _CompAtts} = binary_to_term(BodyAtts),
- couch_compress:decompress(CompBody);
- {summary, [_Len, BodyAtts], _SizeInfo, _AttsFd} ->
- {CompBody, _CompAtts} = binary_to_term(BodyAtts),
- couch_compress:decompress(CompBody);
- Else ->
- Else
- end,
case DigestedAtts of
Atts2 when length(Atts) =/= length(Atts2) ->
% We must have old style non-md5 attachments
@@ -1084,7 +1002,7 @@ update_docs(Db, Docs0, Options, replicated_changes) ->
DocErrors = [],
DocBuckets3 = DocBuckets
end,
- DocBuckets4 = [[doc_flush_atts(check_dup_atts(Doc), Db#db.fd)
+ DocBuckets4 = [[doc_flush_atts(Db, check_dup_atts(Doc))
|| Doc <- Bucket] || Bucket <- DocBuckets3],
{ok, []} = write_and_commit(Db, DocBuckets4, [], [merge_conflicts | Options]),
{ok, DocErrors};
@@ -1138,8 +1056,8 @@ update_docs(Db, Docs0, Options, interactive_edit) ->
Options2 = if AllOrNothing -> [merge_conflicts];
true -> [] end ++ Options,
DocBuckets3 = [[
- doc_flush_atts(set_new_att_revpos(
- check_dup_atts(Doc)), Db#db.fd)
+ doc_flush_atts(Db, set_new_att_revpos(
+ check_dup_atts(Doc)))
|| Doc <- B] || B <- DocBuckets2],
{DocBuckets4, IdRevs} = new_revs(DocBuckets3, [], []),
@@ -1223,7 +1141,7 @@ write_and_commit(#db{main_pid=Pid, user_ctx=Ctx}=Db, DocBuckets1,
% compaction. Retry by reopening the db and writing to the current file
{ok, Db2} = open(Db#db.name, [{user_ctx, Ctx}]),
DocBuckets2 = [
- [doc_flush_atts(Doc, Db2#db.fd) || Doc <- Bucket] ||
+ [doc_flush_atts(Db2, Doc) || Doc <- Bucket] ||
Bucket <- DocBuckets1
],
% We only retry once
@@ -1242,22 +1160,25 @@ write_and_commit(#db{main_pid=Pid, user_ctx=Ctx}=Db, DocBuckets1,
prepare_doc_summaries(Db, BucketList) ->
[lists:map(
- fun(#doc{body = Body, atts = Atts} = Doc) ->
+ fun(#doc{body = Body, atts = Atts} = Doc0) ->
DiskAtts = [couch_att:to_disk_term(Att) || Att <- Atts],
{ok, SizeInfo} = couch_att:size_info(Atts),
- AttsFd = case Atts of
- [Att | _] ->
- {Fd, _} = couch_att:fetch(data, Att),
- Fd;
- [] ->
- nil
+ AttsStream = case Atts of
+ [Att | _] ->
+ {stream, StreamEngine} = couch_att:fetch(data, Att),
+ StreamEngine;
+ [] ->
+ nil
end,
- SummaryChunk = couch_db_updater:make_doc_summary(Db, {Body, DiskAtts}),
- Meta = Doc#doc.meta,
- Doc#doc{
- body = {summary, SummaryChunk, SizeInfo, AttsFd},
- meta = [{ejson_size, ?term_size(Body)} | Meta]
- }
+ Doc1 = Doc0#doc{
+ atts = DiskAtts,
+ meta = [
+ {size_info, SizeInfo},
+ {atts_stream, AttsStream},
+ {ejson_size, ?term_size(Body)}
+ ] ++ Doc0#doc.meta
+ },
+ couch_db_engine:serialize_doc(Db, Doc1)
end,
Bucket) || Bucket <- BucketList].
@@ -1275,19 +1196,18 @@ set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts0}=Doc) ->
Atts = lists:map(
fun(Att) ->
case couch_att:fetch(data, Att) of
- {_Fd, _Sp} -> Att; % already commited to disk, don't set new rev
+ % already commited to disk, don't set new rev
+ {stream, _} -> Att;
+ {Fd, _} when is_pid(Fd) -> Att;
+ % write required so update RevPos
_ -> couch_att:store(revpos, RevPos+1, Att)
end
end, Atts0),
Doc#doc{atts = Atts}.
-doc_flush_atts(Doc, Fd) ->
- Doc#doc{atts=[couch_att:flush(Fd, Att) || Att <- Doc#doc.atts]}.
-
-check_md5(_NewSig, <<>>) -> ok;
-check_md5(Sig, Sig) -> ok;
-check_md5(_, _) -> throw(md5_mismatch).
+doc_flush_atts(Db, Doc) ->
+ Doc#doc{atts=[couch_att:flush(Db, Att) || Att <- Doc#doc.atts]}.
compressible_att_type(MimeType) when is_binary(MimeType) ->
@@ -1317,21 +1237,24 @@ compressible_att_type(MimeType) ->
% is present in the request, but there is no Content-MD5
% trailer, we're free to ignore this inconsistency and
% pretend that no Content-MD5 exists.
-with_stream(Fd, Att, Fun) ->
+with_stream(Db, Att, Fun) ->
[InMd5, Type, Enc] = couch_att:fetch([md5, type, encoding], Att),
BufferSize = list_to_integer(
config:get("couchdb", "attachment_stream_buffer_size", "4096")),
- {ok, OutputStream} = case (Enc =:= identity) andalso
- compressible_att_type(Type) of
- true ->
- CompLevel = list_to_integer(
- config:get("attachments", "compression_level", "0")
- ),
- couch_stream:open(Fd, [{buffer_size, BufferSize},
- {encoding, gzip}, {compression_level, CompLevel}]);
- _ ->
- couch_stream:open(Fd, [{buffer_size, BufferSize}])
+ Options = case (Enc =:= identity) andalso compressible_att_type(Type) of
+ true ->
+ CompLevel = list_to_integer(
+ config:get("attachments", "compression_level", "0")
+ ),
+ [
+ {buffer_size, BufferSize},
+ {encoding, gzip},
+ {compression_level, CompLevel}
+ ];
+ _ ->
+ [{buffer_size, BufferSize}]
end,
+ {ok, OutputStream} = open_write_stream(Db, Options),
ReqMd5 = case Fun(OutputStream) of
{md5, FooterMd5} ->
case InMd5 of
@@ -1341,9 +1264,9 @@ with_stream(Fd, Att, Fun) ->
_ ->
InMd5
end,
- {StreamInfo, Len, IdentityLen, Md5, IdentityMd5} =
+ {StreamEngine, Len, IdentityLen, Md5, IdentityMd5} =
couch_stream:close(OutputStream),
- check_md5(IdentityMd5, ReqMd5),
+ couch_util:check_md5(IdentityMd5, ReqMd5),
{AttLen, DiskLen, NewEnc} = case Enc of
identity ->
case {Md5, IdentityMd5} of
@@ -1365,7 +1288,7 @@ with_stream(Fd, Att, Fun) ->
end
end,
couch_att:store([
- {data, {Fd,StreamInfo}},
+ {data, {stream, StreamEngine}},
{att_len, AttLen},
{disk_len, DiskLen},
{md5, Md5},
@@ -1373,83 +1296,16 @@ with_stream(Fd, Att, Fun) ->
], Att).
-enum_docs_since_reduce_to_count(Reds) ->
- couch_btree:final_reduce(
- fun couch_db_updater:btree_by_seq_reduce/2, Reds).
+open_write_stream(Db, Options) ->
+ couch_db_engine:open_write_stream(Db, Options).
-enum_docs_reduce_to_count(Reds) ->
- FinalRed = couch_btree:final_reduce(
- fun couch_db_updater:btree_by_id_reduce/2, Reds),
- element(1, FinalRed).
-changes_since(Db, StartSeq, Fun, Acc) ->
- changes_since(Db, StartSeq, Fun, [], Acc).
+open_read_stream(Db, AttState) ->
+ couch_db_engine:open_read_stream(Db, AttState).
-changes_since(Db, StartSeq, Fun, Options, Acc) when is_record(Db, db) ->
- changes_since(Db#db.seq_tree, StartSeq, Fun, Options, Acc);
-changes_since(SeqTree, StartSeq, Fun, Options, Acc) ->
- Wrapper = fun(FullDocInfo, _Offset, Acc2) ->
- DocInfo = case FullDocInfo of
- #full_doc_info{} ->
- couch_doc:to_doc_info(FullDocInfo);
- #doc_info{} ->
- FullDocInfo
- end,
- Fun(DocInfo, Acc2)
- end,
- {ok, _LastReduction, AccOut} = couch_btree:fold(SeqTree,
- Wrapper, Acc, [{start_key, StartSeq + 1}] ++ Options),
- {ok, AccOut}.
-count_changes_since(Db, SinceSeq) ->
- BTree = Db#db.seq_tree,
- {ok, Changes} =
- couch_btree:fold_reduce(BTree,
- fun(_SeqStart, PartialReds, 0) ->
- {ok, couch_btree:final_reduce(BTree, PartialReds)}
- end,
- 0, [{start_key, SinceSeq + 1}]),
- Changes.
-
-enum_docs_since(Db, SinceSeq, InFun, Acc, Options) ->
- {ok, LastReduction, AccOut} = couch_btree:fold(
- Db#db.seq_tree, InFun, Acc,
- [{start_key, SinceSeq + 1} | Options]),
- {ok, enum_docs_since_reduce_to_count(LastReduction), AccOut}.
-
-
-fold_docs(Db, InFun, InAcc, Opts) ->
- Wrapper = fun(FDI, _, Acc) -> InFun(FDI, Acc) end,
- {ok, _, AccOut} = couch_btree:fold(Db#db.id_tree, Wrapper, InAcc, Opts),
- {ok, AccOut}.
-
-fold_local_docs(Db, InFun, InAcc, Opts) ->
- Wrapper = fun(FDI, _, Acc) -> InFun(FDI, Acc) end,
- {ok, _, AccOut} = couch_btree:fold(Db#db.local_tree, Wrapper, InAcc, Opts),
- {ok, AccOut}.
-
-enum_docs(Db, InFun, InAcc, Options0) ->
- {NS, Options} = extract_namespace(Options0),
- enum_docs(Db, NS, InFun, InAcc, Options).
-
-enum_docs(Db, undefined, InFun, InAcc, Options) ->
- FoldFun = pipe([fun skip_deleted/4], InFun),
- {ok, LastReduce, OutAcc} = couch_btree:fold(
- Db#db.id_tree, FoldFun, InAcc, Options),
- {ok, enum_docs_reduce_to_count(LastReduce), OutAcc};
-enum_docs(Db, <<"_local">>, InFun, InAcc, Options) ->
- FoldFun = pipe([fun skip_deleted/4], InFun),
- {ok, _LastReduce, OutAcc} = couch_btree:fold(
- Db#db.local_tree, FoldFun, InAcc, Options),
- {ok, null, OutAcc};
-enum_docs(Db, NS, InFun, InAcc, Options0) ->
- FoldFun = pipe([
- fun skip_deleted/4,
- stop_on_leaving_namespace(NS)], InFun),
- Options = set_namespace_range(Options0, NS),
- {ok, LastReduce, OutAcc} = couch_btree:fold(
- Db#db.id_tree, FoldFun, InAcc, Options),
- {ok, enum_docs_reduce_to_count(LastReduce), OutAcc}.
+is_active_stream(Db, StreamEngine) ->
+ couch_db_engine:is_active_stream(Db, StreamEngine).
calculate_start_seq(_Db, _Node, Seq) when is_integer(Seq) ->
@@ -1523,13 +1379,33 @@ start_seq([], OrigNode, Seq) ->
erlang:error({epoch_mismatch, OrigNode, Seq}).
-extract_namespace(Options0) ->
- case proplists:split(Options0, [namespace]) of
- {[[{namespace, NS}]], Options} ->
- {NS, Options};
- {_, Options} ->
- {undefined, Options}
- end.
+fold_docs(Db, UserFun, UserAcc) ->
+ fold_docs(Db, UserFun, UserAcc, []).
+
+fold_docs(Db, UserFun, UserAcc, Options) ->
+ couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options).
+
+
+fold_local_docs(Db, UserFun, UserAcc, Options) ->
+ couch_db_engine:fold_local_docs(Db, UserFun, UserAcc, Options).
+
+
+fold_design_docs(Db, UserFun, UserAcc, Options1) ->
+ Options2 = set_design_doc_keys(Options1),
+ couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options2).
+
+
+fold_changes(Db, StartSeq, UserFun, UserAcc) ->
+ fold_changes(Db, StartSeq, UserFun, UserAcc, []).
+
+
+fold_changes(Db, StartSeq, UserFun, UserAcc, Opts) ->
+ couch_db_engine:fold_changes(Db, StartSeq, UserFun, UserAcc, Opts).
+
+
+count_changes_since(Db, SinceSeq) ->
+ couch_db_engine:count_changes_since(Db, SinceSeq).
+
%%% Internal function %%%
open_doc_revs_int(Db, IdRevs, Options) ->
@@ -1538,7 +1414,7 @@ open_doc_revs_int(Db, IdRevs, Options) ->
lists:zipwith(
fun({Id, Revs}, Lookup) ->
case Lookup of
- {ok, #full_doc_info{rev_tree=RevTree}} ->
+ #full_doc_info{rev_tree=RevTree} ->
{FoundRevs, MissingRevs} =
case Revs of
all ->
@@ -1572,9 +1448,8 @@ open_doc_revs_int(Db, IdRevs, Options) ->
IdRevs, LookupResults).
open_doc_int(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = Id, Options) ->
- case couch_btree:lookup(Db#db.local_tree, [Id]) of
- [{ok, {_, {Rev, BodyData}}}] ->
- Doc = #doc{id=Id, revs={0, [?l2b(integer_to_list(Rev))]}, body=BodyData},
+ case couch_db_engine:open_local_docs(Db, [Id]) of
+ [#doc{} = Doc] ->
apply_open_options({ok, Doc}, Options);
[not_found] ->
{not_found, missing}
@@ -1593,7 +1468,7 @@ open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) ->
{ok, Doc#doc{meta=doc_meta_info(DocInfo, RevTree, Options)}}, Options);
open_doc_int(Db, Id, Options) ->
case get_full_doc_info(Db, Id) of
- {ok, FullDocInfo} ->
+ #full_doc_info{} = FullDocInfo ->
open_doc_int(Db, FullDocInfo, Options);
not_found ->
{not_found, missing}
@@ -1639,9 +1514,6 @@ doc_meta_info(#doc_info{high_seq=Seq,revs=[#rev_info{rev=Rev}|RestInfo]}, RevTre
true -> [{local_seq, Seq}]
end.
-read_doc(#db{fd=Fd}, Pos) ->
- couch_file:pread_term(Fd, Pos).
-
make_doc(_Db, Id, Deleted, nil = _Bp, RevisionPath) ->
#doc{
@@ -1651,28 +1523,25 @@ make_doc(_Db, Id, Deleted, nil = _Bp, RevisionPath) ->
atts = [],
deleted = Deleted
};
-make_doc(#db{fd=Fd, revs_limit=RevsLimit}=Db, Id, Deleted, Bp, {Pos, Revs}) ->
- {BodyData, Atts0} = case Bp of
- nil ->
- {[], []};
- _ ->
- case read_doc(Db, Bp) of
- {ok, {BodyData0, Atts1}} when is_binary(Atts1) ->
- {BodyData0, couch_compress:decompress(Atts1)};
- {ok, {BodyData0, Atts1}} when is_list(Atts1) ->
- % pre 1.2 format
- {BodyData0, Atts1}
- end
- end,
- Atts = [couch_att:from_disk_term(Fd, T) || T <- Atts0],
- Doc = #doc{
+make_doc(#db{} = Db, Id, Deleted, Bp, {Pos, Revs}) ->
+ RevsLimit = get_revs_limit(Db),
+ Doc0 = couch_db_engine:read_doc_body(Db, #doc{
id = Id,
revs = {Pos, lists:sublist(Revs, 1, RevsLimit)},
- body = BodyData,
- atts = Atts,
+ body = Bp,
deleted = Deleted
- },
- after_doc_read(Db, Doc).
+ }),
+ Doc1 = case Doc0#doc.atts of
+ BinAtts when is_binary(BinAtts) ->
+ Doc0#doc{
+ atts = couch_compress:decompress(BinAtts)
+ };
+ ListAtts when is_list(ListAtts) ->
+ Doc0
+ end,
+ after_doc_read(Db, Doc1#doc{
+ atts = [couch_att:from_disk_term(Db, T) || T <- Doc1#doc.atts]
+ }).
after_doc_read(#db{} = Db, Doc) ->
@@ -1687,72 +1556,6 @@ increment_stat(#db{options = Options}, Stat) ->
couch_stats:increment_counter(Stat)
end.
-skip_deleted(traverse, LK, {Undeleted, _, _} = Reds, Acc) when Undeleted == 0 ->
- {skip, LK, Reds, Acc};
-skip_deleted(Case, A, B, C) ->
- {Case, A, B, C}.
-
-stop_on_leaving_namespace(NS) ->
- fun
- (visit, #full_doc_info{id = Key} = FullInfo, Reds, Acc) ->
- case has_prefix(Key, NS) of
- true ->
- {visit, FullInfo, Reds, Acc};
- false ->
- {stop, FullInfo, Reds, Acc}
- end;
- (Case, KV, Reds, Acc) ->
- {Case, KV, Reds, Acc}
- end.
-
-has_prefix(Bin, Prefix) ->
- S = byte_size(Prefix),
- case Bin of
- <<Prefix:S/binary, "/", _/binary>> ->
- true;
- _Else ->
- false
- end.
-
-pipe(Filters, Final) ->
- Wrap =
- fun
- (visit, KV, Reds, Acc) ->
- Final(KV, Reds, Acc);
- (skip, _KV, _Reds, Acc) ->
- {skip, Acc};
- (stop, _KV, _Reds, Acc) ->
- {stop, Acc};
- (traverse, _, _, Acc) ->
- {ok, Acc}
- end,
- do_pipe(Filters, Wrap).
-
-do_pipe([], Fun) -> Fun;
-do_pipe([Filter|Rest], F0) ->
- F1 = fun(C0, KV0, Reds0, Acc0) ->
- {C, KV, Reds, Acc} = Filter(C0, KV0, Reds0, Acc0),
- F0(C, KV, Reds, Acc)
- end,
- do_pipe(Rest, F1).
-
-set_namespace_range(Options, undefined) -> Options;
-set_namespace_range(Options, NS) ->
- %% FIXME depending on order we might need to swap keys
- SK = select_gt(
- proplists:get_value(start_key, Options, <<"">>),
- <<NS/binary, "/">>),
- EK = select_lt(
- proplists:get_value(end_key, Options, <<NS/binary, "0">>),
- <<NS/binary, "0">>),
- [{start_key, SK}, {end_key_gt, EK}].
-
-select_gt(V1, V2) when V1 < V2 -> V2;
-select_gt(V1, _V2) -> V1.
-
-select_lt(V1, V2) when V1 > V2 -> V2;
-select_lt(V1, _V2) -> V1.
-
-spec normalize_dbname(list() | binary()) -> binary().
normalize_dbname(DbName) when is_list(DbName) ->
@@ -1791,6 +1594,70 @@ is_systemdb(DbName) when is_list(DbName) ->
is_systemdb(DbName) when is_binary(DbName) ->
lists:member(dbname_suffix(DbName), ?SYSTEM_DATABASES).
+
+set_design_doc_keys(Options1) ->
+ Dir = case lists:keyfind(dir, 1, Options1) of
+ {dir, D0} -> D0;
+ _ -> fwd
+ end,
+ Options2 = set_design_doc_start_key(Options1, Dir),
+ set_design_doc_end_key(Options2, Dir).
+
+
+-define(FIRST_DDOC_KEY, <<"_design/">>).
+-define(LAST_DDOC_KEY, <<"_design0">>).
+
+
+set_design_doc_start_key(Options, fwd) ->
+ Key1 = couch_util:get_value(start_key, Options, ?FIRST_DDOC_KEY),
+ Key2 = case Key1 < ?FIRST_DDOC_KEY of
+ true -> ?FIRST_DDOC_KEY;
+ false -> Key1
+ end,
+ lists:keystore(start_key, 1, Options, {start_key, Key2});
+set_design_doc_start_key(Options, rev) ->
+ Key1 = couch_util:get_value(start_key, Options, ?LAST_DDOC_KEY),
+ Key2 = case Key1 > ?LAST_DDOC_KEY of
+ true -> ?LAST_DDOC_KEY;
+ false -> Key1
+ end,
+ lists:keystore(start_key, 1, Options, {start_key, Key2}).
+
+
+set_design_doc_end_key(Options, fwd) ->
+ case couch_util:get_value(end_key_gt, Options) of
+ undefined ->
+ Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY),
+ Key2 = case Key1 > ?LAST_DDOC_KEY of
+ true -> ?LAST_DDOC_KEY;
+ false -> Key1
+ end,
+ lists:keystore(end_key, 1, Options, {end_key, Key2});
+ EKeyGT ->
+ Key2 = case EKeyGT > ?LAST_DDOC_KEY of
+ true -> ?LAST_DDOC_KEY;
+ false -> EKeyGT
+ end,
+ lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
+ end;
+set_design_doc_end_key(Options, rev) ->
+ case couch_util:get_value(end_key_gt, Options) of
+ undefined ->
+ Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY),
+ Key2 = case Key1 < ?FIRST_DDOC_KEY of
+ true -> ?FIRST_DDOC_KEY;
+ false -> Key1
+ end,
+ lists:keystore(end_key, 1, Options, {end_key, Key2});
+ EKeyGT ->
+ Key2 = case EKeyGT < ?FIRST_DDOC_KEY of
+ true -> ?FIRST_DDOC_KEY;
+ false -> EKeyGT
+ end,
+ lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
+ end.
+
+
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -1861,19 +1728,58 @@ should_fail_validate_dbname(DbName) ->
ok
end)}.
-calculate_start_seq_test() ->
- %% uuid mismatch is always a rewind.
- Hdr1 = couch_db_header:new(),
- Hdr2 = couch_db_header:set(Hdr1, [{epochs, [{node1, 1}]}, {uuid, <<"uuid1">>}]),
- ?assertEqual(0, calculate_start_seq(#db{header=Hdr2}, node1, {1, <<"uuid2">>})),
- %% uuid matches and seq is owned by node.
- Hdr3 = couch_db_header:set(Hdr2, [{epochs, [{node1, 1}]}]),
- ?assertEqual(2, calculate_start_seq(#db{header=Hdr3}, node1, {2, <<"uuid1">>})),
- %% uuids match but seq is not owned by node.
- Hdr4 = couch_db_header:set(Hdr2, [{epochs, [{node2, 2}, {node1, 1}]}]),
- ?assertEqual(0, calculate_start_seq(#db{header=Hdr4}, node1, {3, <<"uuid1">>})),
- %% return integer if we didn't get a vector.
- ?assertEqual(4, calculate_start_seq(#db{}, foo, 4)).
+calculate_start_seq_test_() ->
+ {
+ foreach,
+ fun setup_start_seq/0,
+ fun teardown_start_seq/1,
+ [
+ t_calculate_start_seq_uuid_mismatch(),
+ t_calculate_start_seq_is_owner(),
+ t_calculate_start_seq_not_owner(),
+ t_calculate_start_seq_raw()
+ ]
+ }.
+
+setup_start_seq() ->
+ meck:new(couch_db_engine, [passthrough]),
+ meck:expect(couch_db_engine, get_uuid, fun(_) -> <<"foo">> end),
+ Epochs = [
+ {node2, 10},
+ {node1, 1}
+ ],
+ meck:expect(couch_db_engine, get_epochs, fun(_) -> Epochs end).
+
+teardown_start_seq(_) ->
+ meck:unload().
+
+t_calculate_start_seq_uuid_mismatch() ->
+ ?_test(begin
+ Db = test_util:fake_db([]),
+ Seq = calculate_start_seq(Db, node2, {15, <<"baz">>}),
+ ?assertEqual(0, Seq)
+ end).
+
+t_calculate_start_seq_is_owner() ->
+ ?_test(begin
+ Db = test_util:fake_db([]),
+ Seq = calculate_start_seq(Db, node2, {15, <<"foo">>}),
+ ?assertEqual(15, Seq)
+ end).
+
+t_calculate_start_seq_not_owner() ->
+ ?_test(begin
+ Db = test_util:fake_db([]),
+ Seq = calculate_start_seq(Db, node1, {15, <<"foo">>}),
+ ?assertEqual(0, Seq)
+ end).
+
+t_calculate_start_seq_raw() ->
+ ?_test(begin
+ Db = test_util:fake_db([]),
+ Seq = calculate_start_seq(Db, node1, 13),
+ ?assertEqual(13, Seq)
+ end).
is_owner_test() ->
?assertNot(is_owner(foo, 1, [])),
diff --git a/src/couch/src/couch_db_engine.erl b/src/couch/src/couch_db_engine.erl
new file mode 100644
index 000000000..502faa7ee
--- /dev/null
+++ b/src/couch/src/couch_db_engine.erl
@@ -0,0 +1,893 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_db_engine).
+
+
+-include("couch_db.hrl").
+-include("couch_db_int.hrl").
+
+
+-type filepath() :: iolist().
+-type docid() :: binary().
+-type rev() :: {non_neg_integer(), binary()}.
+-type revs() :: [rev()].
+-type json() :: any().
+
+-type doc_pair() :: {
+ #full_doc_info{} | not_found,
+ #full_doc_info{} | not_found
+ }.
+
+-type doc_pairs() :: [doc_pair()].
+
+-type db_open_options() :: [
+ create
+ ].
+
+-type delete_options() :: [
+ {context, delete | compaction} |
+ sync
+ ].
+
+-type purge_info() :: [{docid(), revs()}].
+-type epochs() :: [{Node::atom(), UpdateSeq::non_neg_integer()}].
+-type size_info() :: [{Name::atom(), Size::non_neg_integer()}].
+
+-type write_stream_options() :: [
+ {buffer_size, Size::pos_integer()} |
+ {encoding, atom()} |
+ {compression_level, non_neg_integer()}
+ ].
+
+-type doc_fold_options() :: [
+ {start_key, Key::any()} |
+ {end_key, Key::any()} |
+ {end_key_gt, Key::any()} |
+ {dir, fwd | rev} |
+ include_reductions |
+ include_deleted
+ ].
+
+-type changes_fold_options() :: [
+ {dir, fwd | rev}
+ ].
+
+-type db_handle() :: any().
+
+-type doc_fold_fun() :: fun((#full_doc_info{}, UserAcc::any()) ->
+ {ok, NewUserAcc::any()} |
+ {stop, NewUserAcc::any()}).
+
+-type local_doc_fold_fun() :: fun((#doc{}, UserAcc::any()) ->
+ {ok, NewUserAcc::any()} |
+ {stop, NewUserAcc::any()}).
+
+-type changes_fold_fun() :: fun((#doc_info{}, UserAcc::any()) ->
+ {ok, NewUserAcc::any()} |
+ {stop, NewUserAcc::any()}).
+
+
+% This is called by couch_server to determine which
+% engine should be used for the given database. DbPath
+% is calculated based on the DbName and the configured
+% extension for a given engine. The first engine to
+% return true is the engine that will be used for the
+% database.
+-callback exists(DbPath::filepath()) -> boolean().
+
+
+% This is called by couch_server to delete a database. It
+% is called from inside the couch_server process which
+% means that the storage engine does not have to guarantee
+% its own consistency checks when executing in this
+% context. Although since this is executed in the context
+% of couch_server it should return relatively quickly.
+-callback delete(
+ RootDir::filepath(),
+ DbPath::filepath(),
+ DelOpts::delete_options()) ->
+ ok | {error, Reason::atom()}.
+
+
+% This function can be called from multiple contexts. It
+% will either be called just before a call to delete/3 above
+% or when a compaction is cancelled which executes in the
+% context of a couch_db_updater process. It is intended to
+% remove any temporary files used during compaction that
+% may be used to recover from a failed compaction swap.
+-callback delete_compaction_files(
+ RootDir::filepath(),
+ DbPath::filepath(),
+ DelOpts::delete_options()) ->
+ ok.
+
+
+% This is called from the couch_db_updater:init/1 context. As
+% such this means that it is guaranteed to only have one process
+% executing for a given DbPath argument (ie, opening a given
+% database is guaranteed to only happen in a single process).
+% However, multiple process may be trying to open different
+% databases concurrently so if a database requires a shared
+% resource that will require concurrency control at the storage
+% engine layer.
+%
+% The returned DbHandle should be a term that can be freely
+% copied between processes and accessed concurrently. However
+% its guaranteed that the handle will only ever be mutated
+% in a single threaded context (ie, within the couch_db_updater
+% process).
+-callback init(DbPath::filepath(), db_open_options()) ->
+ {ok, DbHandle::db_handle()}.
+
+
+% This is called in the context of couch_db_updater:terminate/2
+% and as such has the same properties for init/2. It's guaranteed
+% to be consistent for a given database but may be called by many
+% databases concurrently.
+-callback terminate(Reason::any(), DbHandle::db_handle()) -> Ignored::any().
+
+
+% This is called in the context of couch_db_updater:handle_call/3
+% for any message that is unknown. It can be used to handle messages
+% from asynchronous processes like the engine's compactor if it has one.
+-callback handle_db_updater_call(Msg::any(), DbHandle::db_handle()) ->
+ {reply, Resp::any(), NewDbHandle::db_handle()} |
+ {stop, Reason::any(), Resp::any(), NewDbHandle::db_handle()}.
+
+
+% This is called in the context of couch_db_updater:handle_info/2
+% and has the same properties as handle_call/3.
+-callback handle_db_updater_info(Msg::any(), DbHandle::db_handle()) ->
+ {noreply, NewDbHandle::db_handle()} |
+ {noreply, NewDbHandle::db_handle(), Timeout::timeout()} |
+ {stop, Reason::any(), NewDbHandle::db_handle()}.
+
+
+% These functions are called by any process opening or closing
+% a database. As such they need to be able to handle being
+% called concurrently. For example, the legacy engine uses these
+% to add monitors to the main engine process.
+-callback incref(DbHandle::db_handle()) -> {ok, NewDbHandle::db_handle()}.
+-callback decref(DbHandle::db_handle()) -> ok.
+-callback monitored_by(DbHande::db_handle()) -> [pid()].
+
+
+% This is called in the context of couch_db_updater:handle_info/2
+% and should return the timestamp of the last activity of
+% the database. If a storage has no notion of activity or the
+% value would be hard to report its ok to just return the
+% result of os:timestamp/0 as this will just disable idle
+% databases from automatically closing.
+-callback last_activity(DbHandle::db_handle()) -> elrang:timestamp().
+
+
+% All of the get_* functions may be called from many
+% processes concurrently.
+
+% The database should make a note of the update sequence when it
+% was last compacted. If the database doesn't need compacting it
+% can just hard code a return value of 0.
+-callback get_compacted_seq(DbHandle::db_handle()) ->
+ CompactedSeq::non_neg_integer().
+
+
+% The number of documents in the database which have all leaf
+% revisions marked as deleted.
+-callback get_del_doc_count(DbHandle::db_handle()) ->
+ DelDocCount::non_neg_integer().
+
+
+% This number is reported in the database info properties and
+% as such can be any JSON value.
+-callback get_disk_version(DbHandle::db_handle()) -> Version::json().
+
+
+% The number of documents in the database that have one or more
+% leaf revisions not marked as deleted.
+-callback get_doc_count(DbHandle::db_handle()) -> DocCount::non_neg_integer().
+
+
+% The epochs track which node owned the database starting at
+% a given update sequence. Each time a database is opened it
+% should look at the epochs. If the most recent entry is not
+% for the current node it should add an entry that will be
+% written the next time a write is performed. An entry is
+% simply a {node(), CurrentUpdateSeq} tuple.
+-callback get_epochs(DbHandle::db_handle()) -> Epochs::epochs().
+
+
+% Get the last purge request performed.
+-callback get_last_purged(DbHandle::db_handle()) -> LastPurged::purge_info().
+
+
+% Get the current purge sequence. This should be incremented
+% for every purge operation.
+-callback get_purge_seq(DbHandle::db_handle()) -> PurgeSeq::non_neg_integer().
+
+
+% Get the revision limit. This should just return the last
+% value that was passed to set_revs_limit/2.
+-callback get_revs_limit(DbHandle::db_handle()) -> RevsLimit::pos_integer().
+
+
+% Get the current security properties. This should just return
+% the last value that was passed to set_security/2.
+-callback get_security(DbHandle::db_handle()) -> SecProps::any().
+
+
+% This information is displayed in the database info poperties. It
+% should just be a list of {Name::atom(), Size::non_neg_integer()}
+% tuples that will then be combined across shards. Currently,
+% various modules expect there to at least be values for:
+%
+% file - Number of bytes on disk
+%
+% active - Theoretical minimum number of bytes to store this db on disk
+% which is used to guide decisions on compaction
+%
+% external - Number of bytes that would be required to represent the
+% contents outside of the database (for capacity and backup
+% planning)
+-callback get_size_info(DbHandle::db_handle()) -> SizeInfo::size_info().
+
+
+% The current update sequence of the database. The update
+% sequence should be incrememnted for every revision added to
+% the database.
+-callback get_update_seq(DbHandle::db_handle()) -> UpdateSeq::non_neg_integer().
+
+
+% Whenever a database is created it should generate a
+% persistent UUID for identification in case the shard should
+% ever need to be moved between nodes in a cluster.
+-callback get_uuid(DbHandle::db_handle()) -> UUID::binary().
+
+
+% These functions are only called by couch_db_updater and
+% as such are guaranteed to be single threaded calls. The
+% database should simply store these values somewhere so
+% they can be returned by the corresponding get_* calls.
+
+-callback set_revs_limit(DbHandle::db_handle(), RevsLimit::pos_integer()) ->
+ {ok, NewDbHandle::db_handle()}.
+
+-callback set_security(DbHandle::db_handle(), SecProps::any()) ->
+ {ok, NewDbHandle::db_handle()}.
+
+
+% This function will be called by many processes concurrently.
+% It should return a #full_doc_info{} record or not_found for
+% every provided DocId in the order those DocId's appear in
+% the input.
+%
+% Traditionally this function will only return documents that
+% were present in the database when the DbHandle was retrieved
+% from couch_server. It is currently unknown what would break
+% if a storage engine deviated from that property.
+-callback open_docs(DbHandle::db_handle(), DocIds::[docid()]) ->
+ [#full_doc_info{} | not_found].
+
+
+% This function will be called by many processes concurrently.
+% It should return a #doc{} record or not_found for every
+% provided DocId in the order they appear in the input.
+%
+% The same caveats around database snapshots from open_docs
+% apply to this function (although this function is called
+% rather less frequently so it may not be as big of an
+% issue).
+-callback open_local_docs(DbHandle::db_handle(), DocIds::[docid()]) ->
+ [#doc{} | not_found].
+
+
+% This function will be called from many contexts concurrently.
+% The provided RawDoc is a #doc{} record that has its body
+% value set to the body value returned from write_doc_body/2.
+%
+% This API exists so that storage engines can store document
+% bodies externally from the #full_doc_info{} record (which
+% is the traditional approach and is recommended).
+-callback read_doc_body(DbHandle::db_handle(), RawDoc::doc()) ->
+ doc().
+
+
+% This function is called concurrently by any client process
+% that is writing a document. It should accept a #doc{}
+% record and return a #doc{} record with a mutated body it
+% wishes to have written to disk by write_doc_body/2.
+%
+% This API exists so that storage engines can compress
+% document bodies in parallel by client processes rather
+% than forcing all compression to occur single threaded
+% in the context of the couch_db_updater process.
+-callback serialize_doc(DbHandle::db_handle(), Doc::doc()) ->
+ doc().
+
+
+% This function is called in the context of a couch_db_updater
+% which means its single threaded for the given DbHandle.
+%
+% The returned #doc{} record should have its Body set to a value
+% that will be stored in the #full_doc_info{} record's revision
+% tree leaves which is passed to read_doc_body/2 above when
+% a client wishes to read a document.
+%
+% The BytesWritten return value is used to determine the number
+% of active bytes in the database which can is used to make
+% a determination of when to compact this database.
+-callback write_doc_body(DbHandle::db_handle(), Doc::doc()) ->
+ {ok, FlushedDoc::doc(), BytesWritten::non_neg_integer()}.
+
+
+% This function is called from the context of couch_db_updater
+% and as such is guaranteed single threaded for the given
+% DbHandle.
+%
+% This is probably the most complicated function in the entire
+% API due to a few subtle behavior requirements required by
+% CouchDB's storage model.
+%
+% The Pairs argument is a list of pairs (2-tuples) of
+% #full_doc_info{} records. The first element of the pair is
+% the #full_doc_info{} that exists on disk. The second element
+% is the new version that should be written to disk. There are
+% three basic cases that should be followed:
+%
+% 1. {not_found, #full_doc_info{}} - A new document was created
+% 2. {#full_doc_info{}, #full_doc_info{}} - A document was updated
+% 3. {#full_doc_info{}, not_found} - A document was purged completely
+%
+% Number one and two are fairly straight forward as long as proper
+% accounting for moving entries in the udpate sequence are accounted
+% for. However, case 3 you'll notice is "purged completely" which
+% means it needs to be removed from the database including the
+% update sequence. Also, for engines that are not using append
+% only storage like the legacy engine, case 2 can be the result of
+% a purge so special care will be needed to see which revisions
+% should be removed.
+%
+% The LocalDocs variable is applied separately. Its important to
+% note for new storage engine authors that these documents are
+% separate because they should *not* be included as part of the
+% changes index for the database.
+%
+% The PurgedDocIdRevs is the list of Ids and Revisions that were
+% purged during this update. While its not guaranteed by the API,
+% currently there will never be purge changes comingled with
+% standard updates.
+%
+% Traditionally an invocation of write_doc_infos should be all
+% or nothing in so much that if an error occurs (or the VM dies)
+% then the database doesn't retain any of the changes. However
+% as long as a storage engine maintains consistency this should
+% not be an issue as it has never been a guarantee and the
+% batches are non-deterministic (from the point of view of the
+% client).
+-callback write_doc_infos(
+ DbHandle::db_handle(),
+ Pairs::doc_pairs(),
+ LocalDocs::[#doc{}],
+ PurgedDocIdRevs::[{docid(), revs()}]) ->
+ {ok, NewDbHandle::db_handle()}.
+
+
+% This function is called in the context of couch_db_udpater and
+% as such is single threaded for any given DbHandle.
+%
+% This call is made periodically to ensure that the database has
+% stored all updates on stable storage. (ie, here is where you fsync).
+-callback commit_data(DbHandle::db_handle()) ->
+ {ok, NewDbHande::db_handle()}.
+
+
+% This function is called by multiple processes concurrently.
+%
+% This function along with open_read_stream are part of the
+% attachments API. For the time being I'm leaving these mostly
+% undocumented. There are implementations of this in both the
+% legacy btree engine as well as the alternative engine
+% implementations for the curious, however this is a part of the
+% API for which I'd like feed back.
+%
+% Currently an engine can elect to not implement these API's
+% by throwing the atom not_supported.
+-callback open_write_stream(
+ DbHandle::db_handle(),
+ Options::write_stream_options()) ->
+ {ok, pid()}.
+
+
+% See the documentation for open_write_stream
+-callback open_read_stream(DbHandle::db_handle(), StreamDiskInfo::any()) ->
+ {ok, {Module::atom(), ReadStreamState::any()}}.
+
+
+% See the documentation for open_write_stream
+-callback is_active_stream(DbHandle::db_handle(), ReadStreamState::any()) ->
+ boolean().
+
+
+% This funciton is called by many processes concurrently.
+%
+% This function is called to fold over the documents in
+% the database sorted by the raw byte collation order of
+% the document id. For each document id, the supplied user
+% function should be invoked with the first argument set
+% to the #full_doc_info{} record and the second argument
+% set to the current user supplied accumulator. The return
+% value of the user function is a 2-tuple of {Go, NewUserAcc}.
+% The NewUserAcc value should then replace the current
+% user accumulator. If Go is the atom ok, iteration over
+% documents should continue. If Go is the atom stop, then
+% iteration should halt and the return value should be
+% {ok, NewUserAcc}.
+%
+% Possible options to this function include:
+%
+% 1. start_key - Start iteration at the provided key or
+% or just after if the key doesn't exist
+% 2. end_key - Stop iteration prior to visiting the provided
+% key
+% 3. end_key_gt - Stop iteration just after the provided key
+% 4. dir - The atom fwd or rev. This is to be able to iterate
+% over documents in reverse order. The logic for comparing
+% start_key, end_key, and end_key_gt are then reversed (ie,
+% when rev, start_key should be greater than end_key if the
+% user wishes to see results)
+% 5. include_reductions - This is a hack for _all_docs since
+% it currently relies on reductions to count an offset. This
+% is a terrible hack that will need to be addressed by the
+% API in the future. If this option is present the supplied
+% user function expects three arguments, where the first
+% argument is a #full_doc_info{} record, the second argument
+% is the current list of reductions to the left of the current
+% document, and the third argument is the current user
+% accumulator. The return value from the user function is
+% unaffected. However the final return value of the function
+% should include the final total reductions as the second
+% element of a 3-tuple. Like I said, this is a hack.
+% 6. include_deleted - By default deleted documents are not
+% included in fold_docs calls. However in some special
+% cases we do want to see them (as of now, just in couch_changes
+% during the design document changes optimization)
+%
+% Historically, if a process calls this function repeatedly it
+% would see the same results returned even if there were concurrent
+% updates happening. However there doesn't seem to be any instance of
+% that actually happening so a storage engine that includes new results
+% between invocations shouldn't have any issues.
+-callback fold_docs(
+ DbHandle::db_handle(),
+ UserFold::doc_fold_fun(),
+ UserAcc::any(),
+ doc_fold_options()) ->
+ {ok, LastUserAcc::any()}.
+
+
+% This function may be called by many processes concurrently.
+%
+% This should behave exactly the same as fold_docs/4 except that it
+% should only return local documents and the first argument to the
+% user function is a #doc{} record, not a #full_doc_info{}.
+-callback fold_local_docs(
+ DbHandle::db_handle(),
+ UserFold::local_doc_fold_fun(),
+ UserAcc::any(),
+ doc_fold_options()) ->
+ {ok, LastUserAcc::any()}.
+
+
+% This function may be called by many processes concurrently.
+%
+% This function is called to fold over the documents (not local
+% documents) in order of their most recent update. Each document
+% in the database should have exactly one entry in this sequence.
+% If a document is updated during a call to this funciton it should
+% not be included twice as that will probably lead to Very Bad Things.
+%
+% This should behave similarly to fold_docs/4 in that the supplied
+% user function should be invoked with a #full_doc_info{} record
+% as the first arugment and the current user accumulator as the
+% second argument. The same semantics for the return value from the
+% user function should be handled as in fold_docs/4.
+%
+% The StartSeq parameter indicates where the fold should start
+% *after*. As in, if a change with a value of StartSeq exists in the
+% database it should not be included in the fold.
+%
+% The only option currently supported by the API is the `dir`
+% option that should behave the same as for fold_docs.
+-callback fold_changes(
+ DbHandle::db_handle(),
+ StartSeq::non_neg_integer(),
+ UserFold::changes_fold_fun(),
+ UserAcc::any(),
+ changes_fold_options()) ->
+ {ok, LastUserAcc::any()}.
+
+
+% This function may be called by many processes concurrently.
+%
+% This function is called to count the number of documents changed
+% since the given UpdateSeq (ie, not including the possible change
+% at exactly UpdateSeq). It is currently only used internally to
+% provide a status update in a replication's _active_tasks entry
+% to indicate how many documents are left to be processed.
+%
+% This is a fairly difficult thing to support in engine's that don't
+% behave exactly like a tree with efficient support for counting rows
+% between keys. As such returning 0 or even just the difference between
+% the current update sequence is possibly the best some storage engines
+% can provide. This may lead to some confusion when interpreting the
+% _active_tasks entry if the storage engine isn't accounted for by the
+% client.
+-callback count_changes_since(
+ DbHandle::db_handle(),
+ UpdateSeq::non_neg_integer()) ->
+ TotalChanges::non_neg_integer().
+
+
+% This function is called in the context of couch_db_updater and as
+% such is guaranteed to be single threaded for the given DbHandle.
+%
+% If a storage engine requires compaction this is a trigger to start
+% it off. However a storage engine can do whatever it wants here. As
+% this is fairly engine specific there's not a lot guidance that is
+% generally applicable.
+%
+% When compaction is finished the compactor should use
+% gen_server:cast/2 to send a {compact_done, CompactEngine, CompactInfo}
+% message to the Parent pid provided. Currently CompactEngine
+% must be the same engine that started the compaction and CompactInfo
+% is an arbitrary term that's passed to finish_compaction/4.
+-callback start_compaction(
+ DbHandle::db_handle(),
+ DbName::binary(),
+ Options::db_open_options(),
+ Parent::pid()) ->
+ {ok, NewDbHandle::db_handle(), CompactorPid::pid()}.
+
+
+% This function is called in the context of couch_db_udpater and as
+% such is guarnateed to be single threaded for the given DbHandle.
+%
+% Same as for start_compaction, this will be extremely specific to
+% any given storage engine.
+%
+% The split in the API here is so that if the storage engine needs
+% to update the DbHandle state of the couch_db_updater it can as
+% finish_compaction/4 is called in the context of the couch_db_updater.
+-callback finish_compaction(
+ OldDbHandle::db_handle(),
+ DbName::binary(),
+ Options::db_open_options(),
+ CompactInfo::any()) ->
+ {ok, CompactedDbHandle::db_handle(), CompactorPid::pid() | undefined}.
+
+
+-export([
+ exists/2,
+ delete/4,
+ delete_compaction_files/4,
+
+ init/3,
+ terminate/2,
+ handle_db_updater_call/3,
+ handle_db_updater_info/2,
+
+ incref/1,
+ decref/1,
+ monitored_by/1,
+
+ last_activity/1,
+
+ get_engine/1,
+ get_compacted_seq/1,
+ get_del_doc_count/1,
+ get_disk_version/1,
+ get_doc_count/1,
+ get_epochs/1,
+ get_last_purged/1,
+ get_purge_seq/1,
+ get_revs_limit/1,
+ get_security/1,
+ get_size_info/1,
+ get_update_seq/1,
+ get_uuid/1,
+
+ set_revs_limit/2,
+ set_security/2,
+
+ open_docs/2,
+ open_local_docs/2,
+ read_doc_body/2,
+
+ serialize_doc/2,
+ write_doc_body/2,
+ write_doc_infos/4,
+ commit_data/1,
+
+ open_write_stream/2,
+ open_read_stream/2,
+ is_active_stream/2,
+
+ fold_docs/4,
+ fold_local_docs/4,
+ fold_changes/5,
+ count_changes_since/2,
+
+ start_compaction/1,
+ finish_compaction/2
+]).
+
+
+exists(Engine, DbPath) ->
+ Engine:exists(DbPath).
+
+
+delete(Engine, RootDir, DbPath, DelOpts) when is_list(DelOpts) ->
+ Engine:delete(RootDir, DbPath, DelOpts).
+
+
+delete_compaction_files(Engine, RootDir, DbPath, DelOpts)
+ when is_list(DelOpts) ->
+ Engine:delete_compaction_files(RootDir, DbPath, DelOpts).
+
+
+init(Engine, DbPath, Options) ->
+ case Engine:init(DbPath, Options) of
+ {ok, EngineState} ->
+ {ok, {Engine, EngineState}};
+ Error ->
+ throw(Error)
+ end.
+
+
+terminate(Reason, #db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:terminate(Reason, EngineState).
+
+
+handle_db_updater_call(Msg, _From, #db{} = Db) ->
+ #db{
+ engine = {Engine, EngineState}
+ } = Db,
+ case Engine:handle_db_updater_call(Msg, EngineState) of
+ {reply, Resp, NewState} ->
+ {reply, Resp, Db#db{engine = {Engine, NewState}}};
+ {stop, Reason, Resp, NewState} ->
+ {stop, Reason, Resp, Db#db{engine = {Engine, NewState}}}
+ end.
+
+
+handle_db_updater_info(Msg, #db{} = Db) ->
+ #db{
+ name = Name,
+ engine = {Engine, EngineState}
+ } = Db,
+ case Engine:handle_db_updater_info(Msg, EngineState) of
+ {noreply, NewState} ->
+ {noreply, Db#db{engine = {Engine, NewState}}};
+ {noreply, NewState, Timeout} ->
+ {noreply, Db#db{engine = {Engine, NewState}}, Timeout};
+ {stop, Reason, NewState} ->
+ couch_log:error("DB ~s shutting down: ~p", [Name, Msg]),
+ {stop, Reason, Db#db{engine = {Engine, NewState}}}
+ end.
+
+
+incref(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ {ok, NewState} = Engine:incref(EngineState),
+ {ok, Db#db{engine = {Engine, NewState}}}.
+
+
+decref(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:decref(EngineState).
+
+
+monitored_by(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:monitored_by(EngineState).
+
+
+last_activity(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:last_activity(EngineState).
+
+
+get_engine(#db{} = Db) ->
+ #db{engine = {Engine, _}} = Db,
+ Engine.
+
+
+get_compacted_seq(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:get_compacted_seq(EngineState).
+
+
+get_del_doc_count(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:get_del_doc_count(EngineState).
+
+
+get_disk_version(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:get_disk_version(EngineState).
+
+
+get_doc_count(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:get_doc_count(EngineState).
+
+
+get_epochs(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:get_epochs(EngineState).
+
+
+get_last_purged(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:get_last_purged(EngineState).
+
+
+get_purge_seq(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:get_purge_seq(EngineState).
+
+
+get_revs_limit(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:get_revs_limit(EngineState).
+
+
+get_security(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:get_security(EngineState).
+
+
+get_size_info(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:get_size_info(EngineState).
+
+
+get_update_seq(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:get_update_seq(EngineState).
+
+get_uuid(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:get_uuid(EngineState).
+
+
+set_revs_limit(#db{} = Db, RevsLimit) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ {ok, NewSt} = Engine:set_revs_limit(EngineState, RevsLimit),
+ {ok, Db#db{engine = {Engine, NewSt}}}.
+
+
+set_security(#db{} = Db, SecProps) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ {ok, NewSt} = Engine:set_security(EngineState, SecProps),
+ {ok, Db#db{engine = {Engine, NewSt}}}.
+
+
+open_docs(#db{} = Db, DocIds) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:open_docs(EngineState, DocIds).
+
+
+open_local_docs(#db{} = Db, DocIds) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:open_local_docs(EngineState, DocIds).
+
+
+read_doc_body(#db{} = Db, RawDoc) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:read_doc_body(EngineState, RawDoc).
+
+
+serialize_doc(#db{} = Db, #doc{} = Doc) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:serialize_doc(EngineState, Doc).
+
+
+write_doc_body(#db{} = Db, #doc{} = Doc) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:write_doc_body(EngineState, Doc).
+
+
+write_doc_infos(#db{} = Db, DocUpdates, LocalDocs, PurgedDocIdRevs) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ {ok, NewSt} = Engine:write_doc_infos(
+ EngineState, DocUpdates, LocalDocs, PurgedDocIdRevs),
+ {ok, Db#db{engine = {Engine, NewSt}}}.
+
+
+commit_data(#db{} = Db) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ {ok, NewSt} = Engine:commit_data(EngineState),
+ {ok, Db#db{engine = {Engine, NewSt}}}.
+
+
+open_write_stream(#db{} = Db, Options) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:open_write_stream(EngineState, Options).
+
+
+open_read_stream(#db{} = Db, StreamDiskInfo) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:open_read_stream(EngineState, StreamDiskInfo).
+
+
+is_active_stream(#db{} = Db, ReadStreamState) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:is_active_stream(EngineState, ReadStreamState).
+
+
+fold_docs(#db{} = Db, UserFun, UserAcc, Options) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:fold_docs(EngineState, UserFun, UserAcc, Options).
+
+
+fold_local_docs(#db{} = Db, UserFun, UserAcc, Options) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:fold_local_docs(EngineState, UserFun, UserAcc, Options).
+
+
+fold_changes(#db{} = Db, StartSeq, UserFun, UserAcc, Options) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:fold_changes(EngineState, StartSeq, UserFun, UserAcc, Options).
+
+
+count_changes_since(#db{} = Db, StartSeq) ->
+ #db{engine = {Engine, EngineState}} = Db,
+ Engine:count_changes_since(EngineState, StartSeq).
+
+
+start_compaction(#db{} = Db) ->
+ #db{
+ engine = {Engine, EngineState},
+ name = DbName,
+ options = Options
+ } = Db,
+ {ok, NewEngineState, Pid} = Engine:start_compaction(
+ EngineState, DbName, Options, self()),
+ {ok, Db#db{
+ engine = {Engine, NewEngineState},
+ compactor_pid = Pid
+ }}.
+
+
+finish_compaction(Db, CompactInfo) ->
+ #db{
+ engine = {Engine, St},
+ name = DbName,
+ options = Options
+ } = Db,
+ NewDb = case Engine:finish_compaction(St, DbName, Options, CompactInfo) of
+ {ok, NewState, undefined} ->
+ couch_event:notify(DbName, compacted),
+ Db#db{
+ engine = {Engine, NewState},
+ compactor_pid = nil
+ };
+ {ok, NewState, CompactorPid} when is_pid(CompactorPid) ->
+ Db#db{
+ engine = {Engine, NewState},
+ compactor_pid = CompactorPid
+ }
+ end,
+ ok = gen_server:call(couch_server, {db_updated, NewDb}, infinity),
+ {ok, NewDb}.
diff --git a/src/couch/src/couch_db_int.hrl b/src/couch/src/couch_db_int.hrl
index da1e45d75..a412b338b 100644
--- a/src/couch/src/couch_db_int.hrl
+++ b/src/couch/src/couch_db_int.hrl
@@ -10,36 +10,9 @@
% License for the specific language governing permissions and limitations under
% the License.
--record(db, {
- main_pid = nil,
- compactor_pid = nil,
- instance_start_time, % number of microsecs since jan 1 1970 as a binary string
- fd,
- fd_monitor,
- header = couch_db_header:new(),
- committed_update_seq,
- id_tree,
- seq_tree,
- local_tree,
- update_seq,
- name,
- filepath,
- validate_doc_funs = undefined,
- security = [],
- security_ptr = nil,
- user_ctx = #user_ctx{},
- waiting_delayed_commit = nil,
- revs_limit = 1000,
- fsync_options = [],
- options = [],
- compression,
- before_doc_update = nil, % nil | fun(Doc, Db) -> NewDoc
- after_doc_read = nil % nil | fun(Doc, Db) -> NewDoc
-}).
-
--record(new_pse_db, {
- vsn,
+-record(db, {
+ vsn = 1,
name,
filepath,
@@ -66,28 +39,36 @@
}).
--define(NEW_PSE_DB, {
+-define(OLD_DB_REC, {
db,
- _, % Version
- _, % Name
- _, % FilePath
- _, % Engine
_, % MainPid
_, % CompactorPid
- _, % CommittedUpdateSeq
_, % InstanceStartTime
- _, % UserCtx
- _, % Security
+ _, % Fd
+ _, % FdMonitor
+ _, % Header
+ _, % CommittedUpdateSeq
+ _, % IdTree
+ _, % SeqTree
+ _, % LocalTree
+ _, % UpdateSeq
+ _, % Name
+ _, % FilePath
_, % ValidateDocFuns
- _, % BeforeDocUpdate
- _, % AfterDocRead
+ _, % Security
+ _, % SecurityPtr
+ _, % UserCtx
_, % WaitingDelayedCommit
+ _, % RevsLimit
+ _, % FsyncOptions
_, % Options
- _ % Compression
+ _, % Compression
+ _, % BeforeDocUpdate
+ _ % AfterDocRead
}).
--define(PSE_DB_NAME(Db), element(3, Db)).
--define(PSE_DB_MAIN_PID(Db), element(6, Db)).
--define(PSE_DB_USER_CTX(Db), element(10, Db)).
--define(PSE_DB_SECURITY(Db), element(11, Db)).
+-define(OLD_DB_NAME(Db), element(2, Db)).
+-define(OLD_DB_MAIN_PID(Db), element(13, Db)).
+-define(OLD_DB_USER_CTX(Db), element(18, Db)).
+-define(OLD_DB_SECURITY(Db), element(16, Db)).
diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl
index ca61e04c6..79567e91b 100644
--- a/src/couch/src/couch_db_updater.erl
+++ b/src/couch/src/couch_db_updater.erl
@@ -14,9 +14,7 @@
-behaviour(gen_server).
-vsn(1).
--export([btree_by_id_split/1, btree_by_id_join/2, btree_by_id_reduce/2]).
--export([btree_by_seq_split/1, btree_by_seq_join/2, btree_by_seq_reduce/2]).
--export([make_doc_summary/2]).
+-export([add_sizes/3, upgrade_sizes/1]).
-export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
-include_lib("couch/include/couch_db.hrl").
@@ -24,67 +22,36 @@
-define(IDLE_LIMIT_DEFAULT, 61000).
--record(comp_header, {
- db_header,
- meta_state
-}).
--record(merge_st, {
- id_tree,
- seq_tree,
- curr,
- rem_seqs,
- infos
-}).
-
-init({DbName, Filepath, Fd, Options}) ->
+init({Engine, DbName, FilePath, Options0}) ->
erlang:put(io_priority, {db_update, DbName}),
update_idle_limit_from_config(),
- case lists:member(create, Options) of
- true ->
- % create a new header and writes it to the file
- Header = couch_db_header:new(),
- ok = couch_file:write_header(Fd, Header),
- % delete any old compaction files that might be hanging around
- RootDir = config:get("couchdb", "database_dir", "."),
- couch_file:delete(RootDir, Filepath ++ ".compact"),
- couch_file:delete(RootDir, Filepath ++ ".compact.data"),
- couch_file:delete(RootDir, Filepath ++ ".compact.meta");
- false ->
- case couch_file:read_header(Fd) of
- {ok, Header} ->
- ok;
- no_valid_header ->
- % create a new header and writes it to the file
- Header = couch_db_header:new(),
- ok = couch_file:write_header(Fd, Header),
- % delete any old compaction files that might be hanging around
- file:delete(Filepath ++ ".compact"),
- file:delete(Filepath ++ ".compact.data"),
- file:delete(Filepath ++ ".compact.meta")
- end
- end,
- Db = init_db(DbName, Filepath, Fd, Header, Options),
- case lists:member(sys_db, Options) of
- false ->
- couch_stats_process_tracker:track([couchdb, open_databases]);
- true ->
- ok
- end,
- % we don't load validation funs here because the fabric query is liable to
- % race conditions. Instead see couch_db:validate_doc_update, which loads
- % them lazily
- {ok, Db#db{main_pid = self()}, idle_limit()}.
+ DefaultSecObj = default_security_object(DbName),
+ Options = [{default_security_object, DefaultSecObj} | Options0],
+ try
+ {ok, EngineState} = couch_db_engine:init(Engine, FilePath, Options),
+ Db = init_db(DbName, FilePath, EngineState, Options),
+ case lists:member(sys_db, Options) of
+ false ->
+ couch_stats_process_tracker:track([couchdb, open_databases]);
+ true ->
+ ok
+ end,
+ % Don't load validation funs here because the fabric query is
+ % liable to race conditions. Instead see
+ % couch_db:validate_doc_update, which loads them lazily.
+ NewDb = Db#db{main_pid = self()},
+ proc_lib:init_ack({ok, NewDb}),
+ gen_server:enter_loop(?MODULE, [], NewDb, idle_limit())
+ catch
+ throw:InitError ->
+ proc_lib:init_ack(InitError)
+ end.
-terminate(_Reason, Db) ->
- % If the reason we died is because our fd disappeared
- % then we don't need to try closing it again.
- if Db#db.fd_monitor == closed -> ok; true ->
- ok = couch_file:close(Db#db.fd)
- end,
+terminate(Reason, Db) ->
couch_util:shutdown_sync(Db#db.compactor_pid),
- couch_util:shutdown_sync(Db#db.fd),
+ couch_db_engine:terminate(Reason, Db),
ok.
handle_call(get_db, _From, Db) ->
@@ -108,28 +75,21 @@ handle_call(cancel_compact, _From, #db{compactor_pid = nil} = Db) ->
handle_call(cancel_compact, _From, #db{compactor_pid = Pid} = Db) ->
unlink(Pid),
exit(Pid, kill),
- RootDir = config:get("couchdb", "database_dir", "."),
- ok = couch_file:delete(RootDir, Db#db.filepath ++ ".compact"),
+ couch_server:delete_compaction_files(Db#db.name),
Db2 = Db#db{compactor_pid = nil},
ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
{reply, ok, Db2, idle_limit()};
-handle_call(increment_update_seq, _From, Db) ->
- Db2 = commit_data(Db#db{update_seq=Db#db.update_seq+1}),
- ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
- couch_event:notify(Db#db.name, updated),
- {reply, {ok, Db2#db.update_seq}, Db2, idle_limit()};
-handle_call({set_security, NewSec}, _From, #db{compression = Comp} = Db) ->
- {ok, Ptr, _} = couch_file:append_term(
- Db#db.fd, NewSec, [{compression, Comp}]),
- Db2 = commit_data(Db#db{security=NewSec, security_ptr=Ptr,
- update_seq=Db#db.update_seq+1}),
- ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
- {reply, ok, Db2, idle_limit()};
+handle_call({set_security, NewSec}, _From, #db{} = Db) ->
+ {ok, NewDb} = couch_db_engine:set_security(Db, NewSec),
+ NewSecDb = NewDb#db{
+ security = NewSec
+ },
+ ok = gen_server:call(couch_server, {db_updated, NewSecDb}, infinity),
+ {reply, ok, NewSecDb, idle_limit()};
handle_call({set_revs_limit, Limit}, _From, Db) ->
- Db2 = commit_data(Db#db{revs_limit=Limit,
- update_seq=Db#db.update_seq+1}),
+ {ok, Db2} = couch_db_engine:set_revs_limit(Db, Limit),
ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
{reply, ok, Db2, idle_limit()};
@@ -137,74 +97,83 @@ handle_call({purge_docs, _IdRevs}, _From,
#db{compactor_pid=Pid}=Db) when Pid /= nil ->
{reply, {error, purge_during_compaction}, Db, idle_limit()};
handle_call({purge_docs, IdRevs}, _From, Db) ->
- #db{
- fd = Fd,
- id_tree = DocInfoByIdBTree,
- seq_tree = DocInfoBySeqBTree,
- update_seq = LastSeq,
- header = Header,
- compression = Comp
- } = Db,
- DocLookups = couch_btree:lookup(DocInfoByIdBTree,
- [Id || {Id, _Revs} <- IdRevs]),
-
- NewDocInfos = lists:zipwith(
- fun({_Id, Revs}, {ok, #full_doc_info{rev_tree=Tree}=FullDocInfo}) ->
+ DocIds = [Id || {Id, _Revs} <- IdRevs],
+ OldDocInfos = couch_db_engine:open_docs(Db, DocIds),
+
+ NewDocInfos = lists:flatmap(fun
+ ({{Id, Revs}, #full_doc_info{id = Id, rev_tree = Tree} = FDI}) ->
case couch_key_tree:remove_leafs(Tree, Revs) of
- {_, []=_RemovedRevs} -> % no change
- nil;
- {NewTree, RemovedRevs} ->
- {FullDocInfo#full_doc_info{rev_tree=NewTree},RemovedRevs}
+ {_, [] = _RemovedRevs} -> % no change
+ [];
+ {NewTree, RemovedRevs} ->
+ NewFDI = FDI#full_doc_info{rev_tree = NewTree},
+ [{FDI, NewFDI, RemovedRevs}]
end;
- (_, not_found) ->
- nil
+ ({_, not_found}) ->
+ []
+ end, lists:zip(IdRevs, OldDocInfos)),
+
+ InitUpdateSeq = couch_db_engine:get_update_seq(Db),
+ InitAcc = {InitUpdateSeq, [], []},
+ FinalAcc = lists:foldl(fun({_, #full_doc_info{} = OldFDI, RemRevs}, Acc) ->
+ #full_doc_info{
+ id = Id,
+ rev_tree = OldTree
+ } = OldFDI,
+ {SeqAcc0, FDIAcc, IdRevsAcc} = Acc,
+
+ {NewFDIAcc, NewSeqAcc} = case OldTree of
+ [] ->
+ % If we purged every #leaf{} in the doc record
+ % then we're removing it completely from the
+ % database.
+ FDIAcc;
+ _ ->
+ % Its possible to purge the #leaf{} that contains
+ % the update_seq where this doc sits in the update_seq
+ % sequence. Rather than do a bunch of complicated checks
+ % we just re-label every #leaf{} and reinsert it into
+ % the update_seq sequence.
+ {NewTree, SeqAcc1} = couch_key_tree:mapfold(fun
+ (_RevId, Leaf, leaf, InnerSeqAcc) ->
+ {Leaf#leaf{seq = InnerSeqAcc + 1}, InnerSeqAcc + 1};
+ (_RevId, Value, _Type, InnerSeqAcc) ->
+ {Value, InnerSeqAcc}
+ end, SeqAcc0, OldTree),
+
+ NewFDI = OldFDI#full_doc_info{
+ update_seq = SeqAcc1,
+ rev_tree = NewTree
+ },
+
+ {[NewFDI | FDIAcc], SeqAcc1}
end,
- IdRevs, DocLookups),
-
- SeqsToRemove = [Seq
- || {#full_doc_info{update_seq=Seq},_} <- NewDocInfos],
-
- FullDocInfoToUpdate = [FullInfo
- || {#full_doc_info{rev_tree=Tree}=FullInfo,_}
- <- NewDocInfos, Tree /= []],
-
- IdRevsPurged = [{Id, Revs}
- || {#full_doc_info{id=Id}, Revs} <- NewDocInfos],
-
- {DocInfoToUpdate, NewSeq} = lists:mapfoldl(
- fun(#full_doc_info{rev_tree=Tree}=FullInfo, SeqAcc) ->
- Tree2 = couch_key_tree:map_leafs(
- fun(_RevId, Leaf) ->
- Leaf#leaf{seq=SeqAcc+1}
- end, Tree),
- {FullInfo#full_doc_info{rev_tree=Tree2}, SeqAcc + 1}
- end, LastSeq, FullDocInfoToUpdate),
-
- IdsToRemove = [Id || {#full_doc_info{id=Id,rev_tree=[]},_}
- <- NewDocInfos],
-
- {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree,
- DocInfoToUpdate, SeqsToRemove),
- {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree,
- FullDocInfoToUpdate, IdsToRemove),
- {ok, Pointer, _} = couch_file:append_term(
- Fd, IdRevsPurged, [{compression, Comp}]),
-
- NewHeader = couch_db_header:set(Header, [
- {purge_seq, couch_db_header:purge_seq(Header) + 1},
- {purged_docs, Pointer}
- ]),
- Db2 = commit_data(
- Db#db{
- id_tree = DocInfoByIdBTree2,
- seq_tree = DocInfoBySeqBTree2,
- update_seq = NewSeq + 1,
- header=NewHeader}),
+ NewIdRevsAcc = [{Id, RemRevs} | IdRevsAcc],
+ {NewSeqAcc, NewFDIAcc, NewIdRevsAcc}
+ end, InitAcc, NewDocInfos),
+
+ {_FinalSeq, FDIs, PurgedIdRevs} = FinalAcc,
+
+ % We need to only use the list of #full_doc_info{} records
+ % that we have actually changed due to a purge.
+ PreviousFDIs = [PrevFDI || {PrevFDI, _, _} <- NewDocInfos],
+ Pairs = pair_purge_info(PreviousFDIs, FDIs),
+
+ {ok, Db2} = couch_db_engine:write_doc_infos(Db, Pairs, [], PurgedIdRevs),
ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
couch_event:notify(Db#db.name, updated),
- {reply, {ok, couch_db_header:purge_seq(NewHeader), IdRevsPurged}, Db2,
- idle_limit()}.
+
+ PurgeSeq = couch_db_engine:get_purge_seq(Db2),
+ {reply, {ok, PurgeSeq, PurgedIdRevs}, Db2, idle_limit()};
+
+handle_call(Msg, From, Db) ->
+ case couch_db_engine:handle_db_updater_call(Msg, From, Db) of
+ {reply, Resp, NewDb} ->
+ {reply, Resp, NewDb, idle_limit()};
+ Else ->
+ Else
+ end.
handle_cast({load_validation_funs, ValidationFuns}, Db) ->
@@ -213,68 +182,24 @@ handle_cast({load_validation_funs, ValidationFuns}, Db) ->
{noreply, Db2, idle_limit()};
handle_cast(start_compact, Db) ->
case Db#db.compactor_pid of
- nil ->
- couch_log:info("Starting compaction for db \"~s\"", [Db#db.name]),
- Pid = spawn_link(fun() -> start_copy_compact(Db) end),
- Db2 = Db#db{compactor_pid=Pid},
- ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
- {noreply, Db2, idle_limit()};
- _ ->
- % compact currently running, this is a no-op
- {noreply, Db, idle_limit()}
- end;
-handle_cast({compact_done, CompactFilepath}, #db{filepath=Filepath,fd=Fd}=Db) ->
- {ok, NewFd} = couch_file:open(CompactFilepath),
- {ok, NewHeader0} = couch_file:read_header(NewFd),
- NewHeader = couch_db_header:set(NewHeader0, [
- {compacted_seq, Db#db.update_seq}
- ]),
- #db{update_seq=NewSeq} = NewDb =
- init_db(Db#db.name, Filepath, NewFd, NewHeader, Db#db.options),
- unlink(NewFd),
- case Db#db.update_seq == NewSeq of
- true ->
- % suck up all the local docs into memory and write them to the new db
- {ok, _, LocalDocs} = couch_btree:foldl(Db#db.local_tree,
- fun(Value, _Offset, Acc) -> {ok, [Value | Acc]} end, []),
- {ok, NewLocalBtree} = couch_btree:add(NewDb#db.local_tree, LocalDocs),
-
- NewDb2 = commit_data(NewDb#db{
- local_tree = NewLocalBtree,
- main_pid = self(),
- filepath = Filepath,
- instance_start_time = Db#db.instance_start_time,
- revs_limit = Db#db.revs_limit
- }),
-
- {ok, Pre} = couch_file:bytes(Fd),
- {ok, Post} = couch_file:bytes(NewFd),
-
- couch_log:notice("Compaction swap for db: ~s ~p ~p", [Filepath,
- Pre, Post]),
- ok = file:rename(CompactFilepath, Filepath ++ ".compact"),
- RootDir = config:get("couchdb", "database_dir", "."),
- couch_file:delete(RootDir, Filepath),
- ok = file:rename(Filepath ++ ".compact", Filepath),
- % Delete the old meta compaction file after promoting
- % the compaction file.
- couch_file:delete(RootDir, Filepath ++ ".compact.meta"),
- close_db(Db),
- NewDb3 = refresh_validate_doc_funs(NewDb2),
- ok = gen_server:call(couch_server, {db_updated, NewDb3}, infinity),
- couch_event:notify(NewDb3#db.name, compacted),
- couch_log:info("Compaction for db \"~s\" completed.", [Db#db.name]),
- {noreply, NewDb3#db{compactor_pid=nil}, idle_limit()};
- false ->
- couch_log:info("Compaction file still behind main file "
- "(update seq=~p. compact update seq=~p). Retrying.",
- [Db#db.update_seq, NewSeq]),
- close_db(NewDb),
- Pid = spawn_link(fun() -> start_copy_compact(Db) end),
- Db2 = Db#db{compactor_pid=Pid},
- ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
- {noreply, Db2, idle_limit()}
+ nil ->
+ % For now we only support compacting to the same
+ % storage engine. After the first round of patches
+ % we'll add a field that sets the target engine
+ % type to compact to with a new copy compactor.
+ UpdateSeq = couch_db_engine:get_update_seq(Db),
+ Args = [Db#db.name, UpdateSeq],
+ couch_log:info("Starting compaction for db \"~s\" at ~p", Args),
+ {ok, Db2} = couch_db_engine:start_compaction(Db),
+ ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
+ {noreply, Db2, idle_limit()};
+ _ ->
+ % compact currently running, this is a no-op
+ {noreply, Db, idle_limit()}
end;
+handle_cast({compact_done, _Engine, CompactInfo}, #db{} = OldDb) ->
+ {ok, NewDb} = couch_db_engine:finish_compaction(OldDb, CompactInfo),
+ {noreply, NewDb};
handle_cast(wakeup, Db) ->
{noreply, Db, idle_limit()};
@@ -301,9 +226,9 @@ handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts,
FullCommit2) of
{ok, Db2, UpdatedDDocIds} ->
ok = gen_server:call(couch_server, {db_updated, Db2}, infinity),
- if Db2#db.update_seq /= Db#db.update_seq ->
- couch_event:notify(Db2#db.name, updated);
- true -> ok
+ case {couch_db:get_update_seq(Db), couch_db:get_update_seq(Db2)} of
+ {Seq, Seq} -> ok;
+ _ -> couch_event:notify(Db2#db.name, updated)
end,
if NonRepDocs2 /= [] ->
couch_event:notify(Db2#db.name, local_updated);
@@ -346,15 +271,14 @@ handle_info({'EXIT', _Pid, normal}, Db) ->
{noreply, Db, idle_limit()};
handle_info({'EXIT', _Pid, Reason}, Db) ->
{stop, Reason, Db};
-handle_info({'DOWN', Ref, _, _, Reason}, #db{fd_monitor=Ref, name=Name} = Db) ->
- couch_log:error("DB ~s shutting down - Fd ~p", [Name, Reason]),
- {stop, normal, Db#db{fd=undefined, fd_monitor=closed}};
-handle_info(timeout, #db{fd=Fd, name=DbName} = Db) ->
+handle_info(timeout, #db{name=DbName} = Db) ->
IdleLimitMSec = update_idle_limit_from_config(),
case couch_db:is_idle(Db) of
true ->
- MSecSinceLastRead = couch_file:msec_since_last_read(Fd),
- case MSecSinceLastRead > IdleLimitMSec of
+ LastActivity = couch_db_engine:last_activity(Db),
+ DtMSec = timer:now_diff(os:timestamp(), LastActivity) div 1000,
+ MSecSinceLastActivity = max(0, DtMSec),
+ case MSecSinceLastActivity > IdleLimitMSec of
true ->
ok = couch_server:close_db_if_idle(DbName);
false ->
@@ -366,7 +290,15 @@ handle_info(timeout, #db{fd=Fd, name=DbName} = Db) ->
% Send a message to wake up and then hibernate. Hibernation here is done to
% force a thorough garbage collection.
gen_server:cast(self(), wakeup),
- {noreply, Db, hibernate}.
+ {noreply, Db, hibernate};
+
+handle_info(Msg, Db) ->
+ case couch_db_engine:handle_db_updater_info(Msg, Db) of
+ {noreply, NewDb} ->
+ {noreply, NewDb, idle_limit()};
+ Else ->
+ Else
+ end.
code_change(_OldVsn, State, _Extra) ->
@@ -418,235 +350,32 @@ collect_updates(GroupedDocsAcc, ClientsAcc, MergeConflicts, FullCommit) ->
{GroupedDocsAcc, ClientsAcc, FullCommit}
end.
-rev_tree(DiskTree) ->
- couch_key_tree:map(fun
- (_RevId, {Del, Ptr, Seq}) ->
- #leaf{
- deleted = ?i2b(Del),
- ptr = Ptr,
- seq = Seq
- };
- (_RevId, {Del, Ptr, Seq, Size}) ->
- #leaf{
- deleted = ?i2b(Del),
- ptr = Ptr,
- seq = Seq,
- sizes = upgrade_sizes(Size)
- };
- (_RevId, {Del, Ptr, Seq, Sizes, Atts}) ->
- #leaf{
- deleted = ?i2b(Del),
- ptr = Ptr,
- seq = Seq,
- sizes = upgrade_sizes(Sizes),
- atts = Atts
- };
- (_RevId, ?REV_MISSING) ->
- ?REV_MISSING
- end, DiskTree).
-
-disk_tree(RevTree) ->
- couch_key_tree:map(fun
- (_RevId, ?REV_MISSING) ->
- ?REV_MISSING;
- (_RevId, #leaf{} = Leaf) ->
- #leaf{
- deleted = Del,
- ptr = Ptr,
- seq = Seq,
- sizes = Sizes,
- atts = Atts
- } = Leaf,
- {?b2i(Del), Ptr, Seq, split_sizes(Sizes), Atts}
- end, RevTree).
-upgrade_sizes(#size_info{}=SI) ->
- SI;
-upgrade_sizes({D, E}) ->
- #size_info{active=D, external=E};
-upgrade_sizes(S) when is_integer(S) ->
- #size_info{active=S, external=0}.
-
-split_sizes(#size_info{}=SI) ->
- {SI#size_info.active, SI#size_info.external}.
-
-join_sizes({Active, External}) when is_integer(Active), is_integer(External) ->
- #size_info{active=Active, external=External}.
-
-btree_by_seq_split(#full_doc_info{}=Info) ->
- #full_doc_info{
- id = Id,
- update_seq = Seq,
- deleted = Del,
- sizes = SizeInfo,
- rev_tree = Tree
- } = Info,
- {Seq, {Id, ?b2i(Del), split_sizes(SizeInfo), disk_tree(Tree)}}.
-
-btree_by_seq_join(Seq, {Id, Del, DiskTree}) when is_integer(Del) ->
- btree_by_seq_join(Seq, {Id, Del, {0, 0}, DiskTree});
-btree_by_seq_join(Seq, {Id, Del, Sizes, DiskTree}) when is_integer(Del) ->
- #full_doc_info{
- id = Id,
- update_seq = Seq,
- deleted = ?i2b(Del),
- sizes = join_sizes(Sizes),
- rev_tree = rev_tree(DiskTree)
- };
-btree_by_seq_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) ->
- % Older versions stored #doc_info records in the seq_tree.
- % Compact to upgrade.
- #doc_info{
- id = Id,
- high_seq=KeySeq,
- revs =
- [#rev_info{rev=Rev,seq=Seq,deleted=false,body_sp = Bp} ||
- {Rev, Seq, Bp} <- RevInfos] ++
- [#rev_info{rev=Rev,seq=Seq,deleted=true,body_sp = Bp} ||
- {Rev, Seq, Bp} <- DeletedRevInfos]}.
-
-btree_by_id_split(#full_doc_info{}=Info) ->
- #full_doc_info{
- id = Id,
- update_seq = Seq,
- deleted = Deleted,
- sizes = SizeInfo,
- rev_tree = Tree
- } = Info,
- {Id, {Seq, ?b2i(Deleted), split_sizes(SizeInfo), disk_tree(Tree)}}.
-
-% Handle old formats before data_size was added
-btree_by_id_join(Id, {HighSeq, Deleted, DiskTree}) ->
- btree_by_id_join(Id, {HighSeq, Deleted, #size_info{}, DiskTree});
-
-btree_by_id_join(Id, {HighSeq, Deleted, Sizes, DiskTree}) ->
- #full_doc_info{
- id = Id,
- update_seq = HighSeq,
- deleted = ?i2b(Deleted),
- sizes = upgrade_sizes(Sizes),
- rev_tree = rev_tree(DiskTree)
- }.
-
-btree_by_id_reduce(reduce, FullDocInfos) ->
- lists:foldl(
- fun(Info, {NotDeleted, Deleted, Sizes}) ->
- Sizes2 = reduce_sizes(Sizes, Info#full_doc_info.sizes),
- case Info#full_doc_info.deleted of
- true ->
- {NotDeleted, Deleted + 1, Sizes2};
- false ->
- {NotDeleted + 1, Deleted, Sizes2}
- end
- end,
- {0, 0, #size_info{}}, FullDocInfos);
-btree_by_id_reduce(rereduce, Reds) ->
- lists:foldl(
- fun({NotDeleted, Deleted}, {AccNotDeleted, AccDeleted, _AccSizes}) ->
- % pre 1.2 format, will be upgraded on compaction
- {AccNotDeleted + NotDeleted, AccDeleted + Deleted, nil};
- ({NotDeleted, Deleted, Sizes}, {AccNotDeleted, AccDeleted, AccSizes}) ->
- AccSizes2 = reduce_sizes(AccSizes, Sizes),
- {AccNotDeleted + NotDeleted, AccDeleted + Deleted, AccSizes2}
- end,
- {0, 0, #size_info{}}, Reds).
-
-reduce_sizes(nil, _) ->
- nil;
-reduce_sizes(_, nil) ->
- nil;
-reduce_sizes(#size_info{}=S1, #size_info{}=S2) ->
- #size_info{
- active = S1#size_info.active + S2#size_info.active,
- external = S1#size_info.external + S2#size_info.external
- };
-reduce_sizes(S1, S2) ->
- reduce_sizes(upgrade_sizes(S1), upgrade_sizes(S2)).
-
-btree_by_seq_reduce(reduce, DocInfos) ->
- % count the number of documents
- length(DocInfos);
-btree_by_seq_reduce(rereduce, Reds) ->
- lists:sum(Reds).
-
-init_db(DbName, Filepath, Fd, Header0, Options) ->
- Header = couch_db_header:upgrade(Header0),
-
- {ok, FsyncOptions} = couch_util:parse_term(
- config:get("couchdb", "fsync_options",
- "[before_header, after_header, on_file_open]")),
-
- case lists:member(on_file_open, FsyncOptions) of
- true -> ok = couch_file:sync(Fd);
- _ -> ok
- end,
-
- Compression = couch_compress:get_compression_method(),
-
- IdTreeState = couch_db_header:id_tree_state(Header),
- SeqTreeState = couch_db_header:seq_tree_state(Header),
- LocalTreeState = couch_db_header:local_tree_state(Header),
- {ok, IdBtree} = couch_btree:open(IdTreeState, Fd,
- [{split, fun ?MODULE:btree_by_id_split/1},
- {join, fun ?MODULE:btree_by_id_join/2},
- {reduce, fun ?MODULE:btree_by_id_reduce/2},
- {compression, Compression}]),
- {ok, SeqBtree} = couch_btree:open(SeqTreeState, Fd,
- [{split, fun ?MODULE:btree_by_seq_split/1},
- {join, fun ?MODULE:btree_by_seq_join/2},
- {reduce, fun ?MODULE:btree_by_seq_reduce/2},
- {compression, Compression}]),
- {ok, LocalDocsBtree} = couch_btree:open(LocalTreeState, Fd,
- [{compression, Compression}]),
- case couch_db_header:security_ptr(Header) of
- nil ->
- Security = default_security_object(DbName),
- SecurityPtr = nil;
- SecurityPtr ->
- {ok, Security} = couch_file:pread_term(Fd, SecurityPtr)
- end,
+init_db(DbName, FilePath, EngineState, Options) ->
% convert start time tuple to microsecs and store as a binary string
{MegaSecs, Secs, MicroSecs} = os:timestamp(),
StartTime = ?l2b(io_lib:format("~p",
[(MegaSecs*1000000*1000000) + (Secs*1000000) + MicroSecs])),
- ok = couch_file:set_db_pid(Fd, self()),
- Db = #db{
- fd=Fd,
- fd_monitor = erlang:monitor(process, Fd),
- header=Header,
- id_tree = IdBtree,
- seq_tree = SeqBtree,
- local_tree = LocalDocsBtree,
- committed_update_seq = couch_db_header:update_seq(Header),
- update_seq = couch_db_header:update_seq(Header),
+
+ BDU = couch_util:get_value(before_doc_update, Options, nil),
+ ADR = couch_util:get_value(after_doc_read, Options, nil),
+
+ CleanedOpts = [Opt || Opt <- Options, Opt /= create],
+
+ InitDb = #db{
name = DbName,
- filepath = Filepath,
- security = Security,
- security_ptr = SecurityPtr,
+ filepath = FilePath,
+ engine = EngineState,
instance_start_time = StartTime,
- revs_limit = couch_db_header:revs_limit(Header),
- fsync_options = FsyncOptions,
- options = Options,
- compression = Compression,
- before_doc_update = couch_util:get_value(before_doc_update, Options, nil),
- after_doc_read = couch_util:get_value(after_doc_read, Options, nil)
+ options = CleanedOpts,
+ before_doc_update = BDU,
+ after_doc_read = ADR
},
- % If we just created a new UUID while upgrading a
- % database then we want to flush that to disk or
- % we risk sending out the uuid and having the db
- % crash which would result in it generating a new
- % uuid each time it was reopened.
- case Header /= Header0 of
- true ->
- sync_header(Db, Header);
- false ->
- Db
- end.
-
-
-close_db(#db{fd_monitor = Ref}) ->
- erlang:demonitor(Ref).
+ InitDb#db{
+ committed_update_seq = couch_db_engine:get_update_seq(InitDb),
+ security = couch_db_engine:get_security(InitDb)
+ }.
refresh_validate_doc_funs(#db{name = <<"shards/", _/binary>> = Name} = Db) ->
@@ -670,50 +399,36 @@ refresh_validate_doc_funs(Db0) ->
flush_trees(_Db, [], AccFlushedTrees) ->
{ok, lists:reverse(AccFlushedTrees)};
-flush_trees(#db{fd = Fd} = Db,
+flush_trees(#db{} = Db,
[InfoUnflushed | RestUnflushed], AccFlushed) ->
#full_doc_info{update_seq=UpdateSeq, rev_tree=Unflushed} = InfoUnflushed,
{Flushed, FinalAcc} = couch_key_tree:mapfold(
fun(_Rev, Value, Type, SizesAcc) ->
case Value of
- #doc{deleted = IsDeleted, body = {summary, _, _, _} = DocSummary} ->
- {summary, Summary, AttSizeInfo, AttsFd} = DocSummary,
- ExternalSize = get_meta_body_size(Value#doc.meta, Summary),
- % this node value is actually an unwritten document summary,
- % write to disk.
- % make sure the Fd in the written bins is the same Fd we are
- % and convert bins, removing the FD.
- % All bins should have been written to disk already.
- case {AttsFd, Fd} of
- {nil, _} ->
- ok;
- {SameFd, SameFd} ->
- ok;
- _ ->
- % Fd where the attachments were written to is not the same
- % as our Fd. This can happen when a database is being
- % switched out during a compaction.
- couch_log:debug("File where the attachments are written has"
- " changed. Possibly retrying.", []),
- throw(retry)
- end,
- {ok, NewSummaryPointer, SummarySize} =
- couch_file:append_raw_chunk(Fd, Summary),
- Leaf = #leaf{
- deleted = IsDeleted,
- ptr = NewSummaryPointer,
- seq = UpdateSeq,
- sizes = #size_info{
- active = SummarySize,
- external = ExternalSize
+ % This node is a document summary that needs to be
+ % flushed to disk.
+ #doc{} = Doc ->
+ check_doc_atts(Db, Doc),
+ ExternalSize = get_meta_body_size(Value#doc.meta),
+ {size_info, AttSizeInfo} =
+ lists:keyfind(size_info, 1, Doc#doc.meta),
+ {ok, NewDoc, WrittenSize} =
+ couch_db_engine:write_doc_body(Db, Doc),
+ Leaf = #leaf{
+ deleted = Doc#doc.deleted,
+ ptr = NewDoc#doc.body,
+ seq = UpdateSeq,
+ sizes = #size_info{
+ active = WrittenSize,
+ external = ExternalSize
+ },
+ atts = AttSizeInfo
},
- atts = AttSizeInfo
- },
- {Leaf, add_sizes(Type, Leaf, SizesAcc)};
- #leaf{} ->
- {Value, add_sizes(Type, Value, SizesAcc)};
- _ ->
- {Value, SizesAcc}
+ {Leaf, add_sizes(Type, Leaf, SizesAcc)};
+ #leaf{} ->
+ {Value, add_sizes(Type, Value, SizesAcc)};
+ _ ->
+ {Value, SizesAcc}
end
end, {0, 0, []}, Unflushed),
{FinalAS, FinalES, FinalAtts} = FinalAcc,
@@ -727,6 +442,29 @@ flush_trees(#db{fd = Fd} = Db,
},
flush_trees(Db, RestUnflushed, [NewInfo | AccFlushed]).
+
+check_doc_atts(Db, Doc) ->
+ {atts_stream, Stream} = lists:keyfind(atts_stream, 1, Doc#doc.meta),
+ % Make sure that the attachments were written to the currently
+ % active attachment stream. If compaction swaps during a write
+ % request we may have to rewrite our attachment bodies.
+ if Stream == nil -> ok; true ->
+ case couch_db:is_active_stream(Db, Stream) of
+ true ->
+ ok;
+ false ->
+ % Stream where the attachments were written to is
+ % no longer the current attachment stream. This
+ % can happen when a database is switched at
+ % compaction time.
+ couch_log:debug("Stream where the attachments were"
+ " written has changed."
+ " Possibly retrying.", []),
+ throw(retry)
+ end
+ end.
+
+
add_sizes(Type, #leaf{sizes=Sizes, atts=AttSizes}, Acc) ->
% Maybe upgrade from disk_size only
#size_info{
@@ -739,6 +477,15 @@ add_sizes(Type, #leaf{sizes=Sizes, atts=AttSizes}, Acc) ->
NewAttsAcc = lists:umerge(AttSizes, AttsAcc),
{NewASAcc, NewESAcc, NewAttsAcc}.
+
+upgrade_sizes(#size_info{}=SI) ->
+ SI;
+upgrade_sizes({D, E}) ->
+ #size_info{active=D, external=E};
+upgrade_sizes(S) when is_integer(S) ->
+ #size_info{active=S, external=0}.
+
+
send_result(Client, Doc, NewResult) ->
% used to send a result to the client
catch(Client ! {result, self(), {doc_tag(Doc), NewResult}}).
@@ -810,7 +557,17 @@ merge_rev_tree(OldInfo, NewDoc, Client, Limit, false)
% Update the new doc based on revisions in OldInfo
#doc_info{revs=[WinningRev | _]} = couch_doc:to_doc_info(OldInfo),
#rev_info{rev={OldPos, OldRev}} = WinningRev,
- NewRevId = couch_db:new_revid(NewDoc#doc{revs={OldPos, [OldRev]}}),
+ Body = case couch_util:get_value(comp_body, NewDoc#doc.meta) of
+ CompBody when is_binary(CompBody) ->
+ couch_compress:decompress(CompBody);
+ _ ->
+ NewDoc#doc.body
+ end,
+ RevIdDoc = NewDoc#doc{
+ revs = {OldPos, [OldRev]},
+ body = Body
+ },
+ NewRevId = couch_db:new_revid(RevIdDoc),
NewDoc2 = NewDoc#doc{revs={OldPos + 1, [NewRevId, OldRev]}},
% Merge our modified new doc into the tree
@@ -865,58 +622,40 @@ merge_rev_tree(OldInfo, NewDoc, _Client, Limit, true) ->
{NewTree, _} = couch_key_tree:merge(OldTree, NewTree0, Limit),
OldInfo#full_doc_info{rev_tree = NewTree}.
-stem_full_doc_infos(#db{revs_limit=Limit}, DocInfos) ->
- [Info#full_doc_info{rev_tree=couch_key_tree:stem(Tree, Limit)} ||
- #full_doc_info{rev_tree=Tree}=Info <- DocInfos].
+update_docs_int(Db, DocsList, LocalDocs, MergeConflicts, FullCommit) ->
+ UpdateSeq = couch_db_engine:get_update_seq(Db),
+ RevsLimit = couch_db_engine:get_revs_limit(Db),
-update_docs_int(Db, DocsList, NonRepDocs, MergeConflicts, FullCommit) ->
- #db{
- id_tree = DocInfoByIdBTree,
- seq_tree = DocInfoBySeqBTree,
- update_seq = LastSeq,
- revs_limit = RevsLimit
- } = Db,
Ids = [Id || [{_Client, #doc{id=Id}}|_] <- DocsList],
% lookup up the old documents, if they exist.
- OldDocLookups = couch_btree:lookup(DocInfoByIdBTree, Ids),
- OldDocInfos = lists:zipwith(
- fun(_Id, {ok, FullDocInfo}) ->
- FullDocInfo;
+ OldDocLookups = couch_db_engine:open_docs(Db, Ids),
+ OldDocInfos = lists:zipwith(fun
+ (_Id, #full_doc_info{} = FDI) ->
+ FDI;
(Id, not_found) ->
#full_doc_info{id=Id}
- end,
- Ids, OldDocLookups),
+ end, Ids, OldDocLookups),
% Merge the new docs into the revision trees.
- {ok, NewFullDocInfos, RemoveSeqs, NewSeq} = merge_rev_trees(RevsLimit,
- MergeConflicts, DocsList, OldDocInfos, [], [], LastSeq),
-
- % All documents are now ready to write.
-
- {ok, Db2} = update_local_docs(Db, NonRepDocs),
+ {ok, NewFullDocInfos, RemSeqs, _} = merge_rev_trees(RevsLimit,
+ MergeConflicts, DocsList, OldDocInfos, [], [], UpdateSeq),
% Write out the document summaries (the bodies are stored in the nodes of
% the trees, the attachments are already written to disk)
- {ok, IndexFullDocInfos} = flush_trees(Db2, NewFullDocInfos, []),
-
- % and the indexes
- {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree, IndexFullDocInfos, []),
- {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree, IndexFullDocInfos, RemoveSeqs),
+ {ok, IndexFDIs} = flush_trees(Db, NewFullDocInfos, []),
+ Pairs = pair_write_info(OldDocLookups, IndexFDIs),
+ LocalDocs2 = update_local_doc_revs(LocalDocs),
+ {ok, Db1} = couch_db_engine:write_doc_infos(Db, Pairs, LocalDocs2, []),
- WriteCount = length(IndexFullDocInfos),
+ WriteCount = length(IndexFDIs),
couch_stats:increment_counter([couchdb, document_inserts],
- WriteCount - length(RemoveSeqs)),
+ WriteCount - length(RemSeqs)),
couch_stats:increment_counter([couchdb, document_writes], WriteCount),
couch_stats:increment_counter(
[couchdb, local_document_writes],
- length(NonRepDocs)
+ length(LocalDocs2)
),
- Db3 = Db2#db{
- id_tree = DocInfoByIdBTree2,
- seq_tree = DocInfoBySeqBTree2,
- update_seq = NewSeq},
-
% Check if we just updated any design documents, and update the validation
% funs if we did.
UpdatedDDocIds = lists:flatmap(fun
@@ -924,54 +663,33 @@ update_docs_int(Db, DocsList, NonRepDocs, MergeConflicts, FullCommit) ->
(_) -> []
end, Ids),
- {ok, commit_data(Db3, not FullCommit), UpdatedDDocIds}.
-
-update_local_docs(Db, []) ->
- {ok, Db};
-update_local_docs(#db{local_tree=Btree}=Db, Docs) ->
- BtreeEntries = lists:map(
- fun({Client, NewDoc}) ->
- #doc{
- id = Id,
- deleted = Delete,
- revs = {0, PrevRevs},
- body = Body
- } = NewDoc,
- case PrevRevs of
- [RevStr|_] ->
- PrevRev = list_to_integer(?b2l(RevStr));
- [] ->
- PrevRev = 0
- end,
- case Delete of
- false ->
- send_result(Client, NewDoc, {ok,
- {0, ?l2b(integer_to_list(PrevRev + 1))}}),
- {update, {Id, {PrevRev + 1, Body}}};
- true ->
- send_result(Client, NewDoc,
- {ok, {0, <<"0">>}}),
- {remove, Id}
- end
- end, Docs),
+ {ok, commit_data(Db1, not FullCommit), UpdatedDDocIds}.
- BtreeIdsRemove = [Id || {remove, Id} <- BtreeEntries],
- BtreeIdsUpdate = [{Key, Val} || {update, {Key, Val}} <- BtreeEntries],
- {ok, Btree2} =
- couch_btree:add_remove(Btree, BtreeIdsUpdate, BtreeIdsRemove),
-
- {ok, Db#db{local_tree = Btree2}}.
+update_local_doc_revs(Docs) ->
+ lists:map(fun({Client, NewDoc}) ->
+ #doc{
+ deleted = Delete,
+ revs = {0, PrevRevs}
+ } = NewDoc,
+ case PrevRevs of
+ [RevStr | _] ->
+ PrevRev = binary_to_integer(RevStr);
+ [] ->
+ PrevRev = 0
+ end,
+ NewRev = case Delete of
+ false ->
+ PrevRev + 1;
+ true ->
+ 0
+ end,
+ send_result(Client, NewDoc, {ok, {0, integer_to_binary(NewRev)}}),
+ NewDoc#doc{
+ revs = {0, [NewRev]}
+ }
+ end, Docs).
-db_to_header(Db, Header) ->
- couch_db_header:set(Header, [
- {update_seq, Db#db.update_seq},
- {seq_tree_state, couch_btree:get_state(Db#db.seq_tree)},
- {id_tree_state, couch_btree:get_state(Db#db.id_tree)},
- {local_tree_state, couch_btree:get_state(Db#db.local_tree)},
- {security_ptr, Db#db.security_ptr},
- {revs_limit, Db#db.revs_limit}
- ]).
commit_data(Db) ->
commit_data(Db, false).
@@ -983,497 +701,37 @@ commit_data(Db, true) ->
Db;
commit_data(Db, _) ->
#db{
- header = OldHeader,
- waiting_delayed_commit = Timer
- } = Db,
- if is_reference(Timer) -> erlang:cancel_timer(Timer); true -> ok end,
- case db_to_header(Db, OldHeader) of
- OldHeader -> Db#db{waiting_delayed_commit=nil};
- NewHeader -> sync_header(Db, NewHeader)
- end.
-
-sync_header(Db, NewHeader) ->
- #db{
- fd = Fd,
- filepath = FilePath,
- fsync_options = FsyncOptions,
waiting_delayed_commit = Timer
} = Db,
-
if is_reference(Timer) -> erlang:cancel_timer(Timer); true -> ok end,
-
- Before = lists:member(before_header, FsyncOptions),
- After = lists:member(after_header, FsyncOptions),
-
- if Before -> couch_file:sync(FilePath); true -> ok end,
- ok = couch_file:write_header(Fd, NewHeader),
- if After -> couch_file:sync(FilePath); true -> ok end,
-
- Db#db{
- header=NewHeader,
- committed_update_seq=Db#db.update_seq,
- waiting_delayed_commit=nil
+ {ok, Db1} = couch_db_engine:commit_data(Db),
+ Db1#db{
+ waiting_delayed_commit = nil,
+ committed_update_seq = couch_db_engine:get_update_seq(Db)
}.
-copy_doc_attachments(#db{fd = SrcFd} = SrcDb, SrcSp, DestFd) ->
- {ok, {BodyData, BinInfos0}} = couch_db:read_doc(SrcDb, SrcSp),
- BinInfos = case BinInfos0 of
- _ when is_binary(BinInfos0) ->
- couch_compress:decompress(BinInfos0);
- _ when is_list(BinInfos0) ->
- % pre 1.2 file format
- BinInfos0
- end,
- % copy the bin values
- NewBinInfos = lists:map(
- fun({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}) ->
- % 010 UPGRADE CODE
- {NewBinSp, AttLen, AttLen, ActualMd5, _IdentityMd5} =
- couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
- check_md5(ExpectedMd5, ActualMd5),
- {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity};
- ({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc1}) ->
- {NewBinSp, AttLen, _, ActualMd5, _IdentityMd5} =
- couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
- check_md5(ExpectedMd5, ActualMd5),
- Enc = case Enc1 of
- true ->
- % 0110 UPGRADE CODE
- gzip;
- false ->
- % 0110 UPGRADE CODE
- identity;
- _ ->
- Enc1
- end,
- {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc}
- end, BinInfos),
- {BodyData, NewBinInfos}.
-
-merge_lookups(Infos, []) ->
- Infos;
-merge_lookups([], _) ->
- [];
-merge_lookups([#doc_info{}=DI | RestInfos], [{ok, FDI} | RestLookups]) ->
- % Assert we've matched our lookups
- if DI#doc_info.id == FDI#full_doc_info.id -> ok; true ->
- erlang:error({mismatched_doc_infos, DI#doc_info.id})
- end,
- [FDI | merge_lookups(RestInfos, RestLookups)];
-merge_lookups([FDI | RestInfos], Lookups) ->
- [FDI | merge_lookups(RestInfos, Lookups)].
-
-check_md5(Md5, Md5) -> ok;
-check_md5(_, _) -> throw(md5_mismatch).
-
-copy_docs(Db, #db{fd = DestFd} = NewDb, MixedInfos, Retry) ->
- DocInfoIds = [Id || #doc_info{id=Id} <- MixedInfos],
- LookupResults = couch_btree:lookup(Db#db.id_tree, DocInfoIds),
- % COUCHDB-968, make sure we prune duplicates during compaction
- NewInfos0 = lists:usort(fun(#full_doc_info{id=A}, #full_doc_info{id=B}) ->
- A =< B
- end, merge_lookups(MixedInfos, LookupResults)),
-
- NewInfos1 = lists:map(fun(Info) ->
- {NewRevTree, FinalAcc} = couch_key_tree:mapfold(fun
- (_Rev, #leaf{ptr=Sp}=Leaf, leaf, SizesAcc) ->
- {Body, AttInfos} = copy_doc_attachments(Db, Sp, DestFd),
- % In the future, we should figure out how to do this for
- % upgrade purposes.
- ExternalSize = case is_binary(Body) of
- true ->
- couch_compress:uncompressed_size(Body);
- false ->
- ?term_size(Body)
- end,
- SummaryChunk = make_doc_summary(NewDb, {Body, AttInfos}),
- {ok, Pos, SummarySize} = couch_file:append_raw_chunk(
- DestFd, SummaryChunk),
- AttSizes = [{element(3,A), element(4,A)} || A <- AttInfos],
- NewLeaf = Leaf#leaf{
- ptr = Pos,
- sizes = #size_info{
- active = SummarySize,
- external = ExternalSize
- },
- atts = AttSizes
- },
- {NewLeaf, add_sizes(leaf, NewLeaf, SizesAcc)};
- (_Rev, _Leaf, branch, SizesAcc) ->
- {?REV_MISSING, SizesAcc}
- end, {0, 0, []}, Info#full_doc_info.rev_tree),
- {FinalAS, FinalES, FinalAtts} = FinalAcc,
- TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
- NewActiveSize = FinalAS + TotalAttSize,
- NewExternalSize = FinalES + TotalAttSize,
- Info#full_doc_info{
- rev_tree = NewRevTree,
- sizes = #size_info{
- active = NewActiveSize,
- external = NewExternalSize
- }
- }
- end, NewInfos0),
-
- NewInfos = stem_full_doc_infos(Db, NewInfos1),
- RemoveSeqs =
- case Retry of
- nil ->
- [];
- OldDocIdTree ->
- % Compaction is being rerun to catch up to writes during the
- % first pass. This means we may have docs that already exist
- % in the seq_tree in the .data file. Here we lookup any old
- % update_seqs so that they can be removed.
- Ids = [Id || #full_doc_info{id=Id} <- NewInfos],
- Existing = couch_btree:lookup(OldDocIdTree, Ids),
- [Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing]
- end,
- {ok, SeqTree} = couch_btree:add_remove(
- NewDb#db.seq_tree, NewInfos, RemoveSeqs),
-
- FDIKVs = lists:map(fun(#full_doc_info{id=Id, update_seq=Seq}=FDI) ->
- {{Id, Seq}, FDI}
- end, NewInfos),
- {ok, IdEms} = couch_emsort:add(NewDb#db.id_tree, FDIKVs),
- update_compact_task(length(NewInfos)),
- NewDb#db{id_tree=IdEms, seq_tree=SeqTree}.
-
-
-copy_compact(Db, NewDb0, Retry) ->
- Compression = couch_compress:get_compression_method(),
- NewDb = NewDb0#db{compression=Compression},
- TotalChanges = couch_db:count_changes_since(Db, NewDb#db.update_seq),
- BufferSize = list_to_integer(
- config:get("database_compaction", "doc_buffer_size", "524288")),
- CheckpointAfter = couch_util:to_integer(
- config:get("database_compaction", "checkpoint_after",
- BufferSize * 10)),
-
- EnumBySeqFun =
- fun(DocInfo, _Offset,
- {AccNewDb, AccUncopied, AccUncopiedSize, AccCopiedSize}) ->
-
- Seq = case DocInfo of
- #full_doc_info{} -> DocInfo#full_doc_info.update_seq;
- #doc_info{} -> DocInfo#doc_info.high_seq
- end,
-
- AccUncopiedSize2 = AccUncopiedSize + ?term_size(DocInfo),
- if AccUncopiedSize2 >= BufferSize ->
- NewDb2 = copy_docs(
- Db, AccNewDb, lists:reverse([DocInfo | AccUncopied]), Retry),
- AccCopiedSize2 = AccCopiedSize + AccUncopiedSize2,
- if AccCopiedSize2 >= CheckpointAfter ->
- CommNewDb2 = commit_compaction_data(NewDb2#db{update_seq=Seq}),
- {ok, {CommNewDb2, [], 0, 0}};
- true ->
- {ok, {NewDb2#db{update_seq = Seq}, [], 0, AccCopiedSize2}}
- end;
- true ->
- {ok, {AccNewDb, [DocInfo | AccUncopied], AccUncopiedSize2,
- AccCopiedSize}}
+pair_write_info(Old, New) ->
+ lists:map(fun(FDI) ->
+ case lists:keyfind(FDI#full_doc_info.id, #full_doc_info.id, Old) of
+ #full_doc_info{} = OldFDI -> {OldFDI, FDI};
+ false -> {not_found, FDI}
end
- end,
-
- TaskProps0 = [
- {type, database_compaction},
- {database, Db#db.name},
- {progress, 0},
- {changes_done, 0},
- {total_changes, TotalChanges}
- ],
- case (Retry =/= nil) and couch_task_status:is_task_added() of
- true ->
- couch_task_status:update([
- {retry, true},
- {progress, 0},
- {changes_done, 0},
- {total_changes, TotalChanges}
- ]);
- false ->
- couch_task_status:add_task(TaskProps0),
- couch_task_status:set_update_frequency(500)
- end,
-
- {ok, _, {NewDb2, Uncopied, _, _}} =
- couch_btree:foldl(Db#db.seq_tree, EnumBySeqFun,
- {NewDb, [], 0, 0},
- [{start_key, NewDb#db.update_seq + 1}]),
-
- NewDb3 = copy_docs(Db, NewDb2, lists:reverse(Uncopied), Retry),
-
- % copy misc header values
- if NewDb3#db.security /= Db#db.security ->
- {ok, Ptr, _} = couch_file:append_term(
- NewDb3#db.fd, Db#db.security,
- [{compression, NewDb3#db.compression}]),
- NewDb4 = NewDb3#db{security=Db#db.security, security_ptr=Ptr};
- true ->
- NewDb4 = NewDb3
- end,
-
- commit_compaction_data(NewDb4#db{update_seq=Db#db.update_seq}).
-
-
-start_copy_compact(#db{}=Db) ->
- erlang:put(io_priority, {db_compact, Db#db.name}),
- #db{name=Name, filepath=Filepath, options=Options, header=Header} = Db,
- couch_log:debug("Compaction process spawned for db \"~s\"", [Name]),
-
- {ok, NewDb, DName, DFd, MFd, Retry} =
- open_compaction_files(Name, Header, Filepath, Options),
- erlang:monitor(process, MFd),
-
- % This is a bit worrisome. init_db/4 will monitor the data fd
- % but it doesn't know about the meta fd. For now I'll maintain
- % that the data fd is the old normal fd and meta fd is special
- % and hope everything works out for the best.
- unlink(DFd),
-
- NewDb1 = copy_purge_info(Db, NewDb),
- NewDb2 = copy_compact(Db, NewDb1, Retry),
- NewDb3 = sort_meta_data(NewDb2),
- NewDb4 = commit_compaction_data(NewDb3),
- NewDb5 = copy_meta_data(NewDb4),
- NewDb6 = sync_header(NewDb5, db_to_header(NewDb5, NewDb5#db.header)),
- close_db(NewDb6),
-
- ok = couch_file:close(MFd),
- gen_server:cast(Db#db.main_pid, {compact_done, DName}).
-
-
-open_compaction_files(DbName, SrcHdr, DbFilePath, Options) ->
- DataFile = DbFilePath ++ ".compact.data",
- MetaFile = DbFilePath ++ ".compact.meta",
- {ok, DataFd, DataHdr} = open_compaction_file(DataFile),
- {ok, MetaFd, MetaHdr} = open_compaction_file(MetaFile),
- DataHdrIsDbHdr = couch_db_header:is_header(DataHdr),
- case {DataHdr, MetaHdr} of
- {#comp_header{}=A, #comp_header{}=A} ->
- DbHeader = A#comp_header.db_header,
- Db0 = init_db(DbName, DataFile, DataFd, DbHeader, Options),
- Db1 = bind_emsort(Db0, MetaFd, A#comp_header.meta_state),
- {ok, Db1, DataFile, DataFd, MetaFd, Db0#db.id_tree};
- _ when DataHdrIsDbHdr ->
- ok = reset_compaction_file(MetaFd, couch_db_header:from(SrcHdr)),
- Db0 = init_db(DbName, DataFile, DataFd, DataHdr, Options),
- Db1 = bind_emsort(Db0, MetaFd, nil),
- {ok, Db1, DataFile, DataFd, MetaFd, Db0#db.id_tree};
- _ ->
- Header = couch_db_header:from(SrcHdr),
- ok = reset_compaction_file(DataFd, Header),
- ok = reset_compaction_file(MetaFd, Header),
- Db0 = init_db(DbName, DataFile, DataFd, Header, Options),
- Db1 = bind_emsort(Db0, MetaFd, nil),
- {ok, Db1, DataFile, DataFd, MetaFd, nil}
- end.
-
-
-open_compaction_file(FilePath) ->
- case couch_file:open(FilePath, [nologifmissing]) of
- {ok, Fd} ->
- case couch_file:read_header(Fd) of
- {ok, Header} -> {ok, Fd, Header};
- no_valid_header -> {ok, Fd, nil}
- end;
- {error, enoent} ->
- {ok, Fd} = couch_file:open(FilePath, [create]),
- {ok, Fd, nil}
- end.
-
-
-reset_compaction_file(Fd, Header) ->
- ok = couch_file:truncate(Fd, 0),
- ok = couch_file:write_header(Fd, Header).
-
-
-copy_purge_info(OldDb, NewDb) ->
- OldHdr = OldDb#db.header,
- NewHdr = NewDb#db.header,
- OldPurgeSeq = couch_db_header:purge_seq(OldHdr),
- if OldPurgeSeq > 0 ->
- {ok, PurgedIdsRevs} = couch_db:get_last_purged(OldDb),
- Opts = [{compression, NewDb#db.compression}],
- {ok, Ptr, _} = couch_file:append_term(NewDb#db.fd, PurgedIdsRevs, Opts),
- NewNewHdr = couch_db_header:set(NewHdr, [
- {purge_seq, OldPurgeSeq},
- {purged_docs, Ptr}
- ]),
- NewDb#db{header = NewNewHdr};
- true ->
- NewDb
- end.
-
+ end, New).
-commit_compaction_data(#db{}=Db) ->
- % Compaction needs to write headers to both the data file
- % and the meta file so if we need to restart we can pick
- % back up from where we left off.
- commit_compaction_data(Db, couch_emsort:get_fd(Db#db.id_tree)),
- commit_compaction_data(Db, Db#db.fd).
-
-
-commit_compaction_data(#db{header=OldHeader}=Db0, Fd) ->
- % Mostly copied from commit_data/2 but I have to
- % replace the logic to commit and fsync to a specific
- % fd instead of the Filepath stuff that commit_data/2
- % does.
- DataState = couch_db_header:id_tree_state(OldHeader),
- MetaFd = couch_emsort:get_fd(Db0#db.id_tree),
- MetaState = couch_emsort:get_state(Db0#db.id_tree),
- Db1 = bind_id_tree(Db0, Db0#db.fd, DataState),
- Header = db_to_header(Db1, OldHeader),
- CompHeader = #comp_header{
- db_header = Header,
- meta_state = MetaState
- },
- ok = couch_file:sync(Fd),
- ok = couch_file:write_header(Fd, CompHeader),
- Db2 = Db1#db{
- waiting_delayed_commit=nil,
- header=Header,
- committed_update_seq=Db1#db.update_seq
- },
- bind_emsort(Db2, MetaFd, MetaState).
-
-
-bind_emsort(Db, Fd, nil) ->
- {ok, Ems} = couch_emsort:open(Fd),
- Db#db{id_tree=Ems};
-bind_emsort(Db, Fd, State) ->
- {ok, Ems} = couch_emsort:open(Fd, [{root, State}]),
- Db#db{id_tree=Ems}.
-
-
-bind_id_tree(Db, Fd, State) ->
- {ok, IdBtree} = couch_btree:open(State, Fd, [
- {split, fun ?MODULE:btree_by_id_split/1},
- {join, fun ?MODULE:btree_by_id_join/2},
- {reduce, fun ?MODULE:btree_by_id_reduce/2}
- ]),
- Db#db{id_tree=IdBtree}.
-
-
-sort_meta_data(Db0) ->
- {ok, Ems} = couch_emsort:merge(Db0#db.id_tree),
- Db0#db{id_tree=Ems}.
-
-
-copy_meta_data(#db{fd=Fd, header=Header}=Db) ->
- Src = Db#db.id_tree,
- DstState = couch_db_header:id_tree_state(Header),
- {ok, IdTree0} = couch_btree:open(DstState, Fd, [
- {split, fun ?MODULE:btree_by_id_split/1},
- {join, fun ?MODULE:btree_by_id_join/2},
- {reduce, fun ?MODULE:btree_by_id_reduce/2}
- ]),
- {ok, Iter} = couch_emsort:iter(Src),
- Acc0 = #merge_st{
- id_tree=IdTree0,
- seq_tree=Db#db.seq_tree,
- rem_seqs=[],
- infos=[]
- },
- Acc = merge_docids(Iter, Acc0),
- {ok, IdTree} = couch_btree:add(Acc#merge_st.id_tree, Acc#merge_st.infos),
- {ok, SeqTree} = couch_btree:add_remove(
- Acc#merge_st.seq_tree, [], Acc#merge_st.rem_seqs
- ),
- Db#db{id_tree=IdTree, seq_tree=SeqTree}.
-
-
-merge_docids(Iter, #merge_st{infos=Infos}=Acc) when length(Infos) > 1000 ->
- #merge_st{
- id_tree=IdTree0,
- seq_tree=SeqTree0,
- rem_seqs=RemSeqs
- } = Acc,
- {ok, IdTree1} = couch_btree:add(IdTree0, Infos),
- {ok, SeqTree1} = couch_btree:add_remove(SeqTree0, [], RemSeqs),
- Acc1 = Acc#merge_st{
- id_tree=IdTree1,
- seq_tree=SeqTree1,
- rem_seqs=[],
- infos=[]
- },
- merge_docids(Iter, Acc1);
-merge_docids(Iter, #merge_st{curr=Curr}=Acc) ->
- case next_info(Iter, Curr, []) of
- {NextIter, NewCurr, FDI, Seqs} ->
- Acc1 = Acc#merge_st{
- infos = [FDI | Acc#merge_st.infos],
- rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
- curr = NewCurr
- },
- merge_docids(NextIter, Acc1);
- {finished, FDI, Seqs} ->
- Acc#merge_st{
- infos = [FDI | Acc#merge_st.infos],
- rem_seqs = Seqs ++ Acc#merge_st.rem_seqs,
- curr = undefined
- };
- empty ->
- Acc
- end.
-
-
-next_info(Iter, undefined, []) ->
- case couch_emsort:next(Iter) of
- {ok, {{Id, Seq}, FDI}, NextIter} ->
- next_info(NextIter, {Id, Seq, FDI}, []);
- finished ->
- empty
- end;
-next_info(Iter, {Id, Seq, FDI}, Seqs) ->
- case couch_emsort:next(Iter) of
- {ok, {{Id, NSeq}, NFDI}, NextIter} ->
- next_info(NextIter, {Id, NSeq, NFDI}, [Seq | Seqs]);
- {ok, {{NId, NSeq}, NFDI}, NextIter} ->
- {NextIter, {NId, NSeq, NFDI}, FDI, Seqs};
- finished ->
- {finished, FDI, Seqs}
- end.
-
-update_compact_task(NumChanges) ->
- [Changes, Total] = couch_task_status:get([changes_done, total_changes]),
- Changes2 = Changes + NumChanges,
- Progress = case Total of
- 0 ->
- 0;
- _ ->
- (Changes2 * 100) div Total
- end,
- couch_task_status:update([{changes_done, Changes2}, {progress, Progress}]).
-
-
-make_doc_summary(#db{compression = Comp}, {Body0, Atts0}) ->
- Body = case couch_compress:is_compressed(Body0, Comp) of
- true ->
- Body0;
- false ->
- % pre 1.2 database file format
- couch_compress:compress(Body0, Comp)
- end,
- Atts = case couch_compress:is_compressed(Atts0, Comp) of
- true ->
- Atts0;
- false ->
- couch_compress:compress(Atts0, Comp)
- end,
- SummaryBin = ?term_to_bin({Body, Atts}),
- couch_file:assemble_file_chunk(SummaryBin, crypto:hash(md5, SummaryBin)).
+pair_purge_info(Old, New) ->
+ lists:map(fun(OldFDI) ->
+ case lists:keyfind(OldFDI#full_doc_info.id, #full_doc_info.id, New) of
+ #full_doc_info{} = NewFDI -> {OldFDI, NewFDI};
+ false -> {OldFDI, not_found}
+ end
+ end, Old).
-get_meta_body_size(Meta, Summary) ->
- case lists:keyfind(ejson_size, 1, Meta) of
- {ejson_size, ExternalSize} ->
- ExternalSize;
- false ->
- couch_compress:uncompressed_size(Summary)
- end.
+get_meta_body_size(Meta) ->
+ {ejson_size, ExternalSize} = lists:keyfind(ejson_size, 1, Meta),
+ ExternalSize.
default_security_object(<<"shards/", _/binary>>) ->
diff --git a/src/couch/src/couch_file.erl b/src/couch/src/couch_file.erl
index acd4fda78..9f668ea69 100644
--- a/src/couch/src/couch_file.erl
+++ b/src/couch/src/couch_file.erl
@@ -44,7 +44,7 @@
-export([append_term/2, append_term/3, append_term_md5/2, append_term_md5/3]).
-export([write_header/2, read_header/1]).
-export([delete/2, delete/3, nuke_dir/2, init_delete_dir/1]).
--export([msec_since_last_read/1]).
+-export([last_read/1]).
% gen_server callbacks
-export([init/1, terminate/2, code_change/3]).
@@ -213,8 +213,12 @@ truncate(Fd, Pos) ->
%%----------------------------------------------------------------------
sync(Filepath) when is_list(Filepath) ->
- {ok, Fd} = file:open(Filepath, [append, raw]),
- try ok = file:sync(Fd) after ok = file:close(Fd) end;
+ case file:open(Filepath, [append, raw]) of
+ {ok, Fd} ->
+ try ok = file:sync(Fd) after ok = file:close(Fd) end;
+ {error, Error} ->
+ erlang:error(Error)
+ end;
sync(Fd) ->
gen_server:call(Fd, sync, infinity).
@@ -340,15 +344,9 @@ init_status_error(ReturnPid, Ref, Error) ->
ignore.
-% Return time since last read. The return value is conservative in the
-% sense that if no read timestamp has been found, it would return 0. This
-% result is used to decide if reader is idle so returning 0 will avoid marking
-% it idle by accident when process is starting up.
-msec_since_last_read(Fd) when is_pid(Fd) ->
+last_read(Fd) when is_pid(Fd) ->
Now = os:timestamp(),
- LastRead = couch_util:process_dict_get(Fd, read_timestamp, Now),
- DtMSec = timer:now_diff(Now, LastRead) div 1000,
- max(0, DtMSec).
+ couch_util:process_dict_get(Fd, read_timestamp, Now).
% server functions
@@ -395,14 +393,18 @@ init({Filepath, Options, ReturnPid, Ref}) ->
% open in read mode first, so we don't create the file if it doesn't exist.
case file:open(Filepath, [read, raw]) of
{ok, Fd_Read} ->
- {ok, Fd} = file:open(Filepath, OpenOptions),
- %% Save Fd in process dictionary for debugging purposes
- put(couch_file_fd, {Fd, Filepath}),
- ok = file:close(Fd_Read),
- maybe_track_open_os_files(Options),
- {ok, Eof} = file:position(Fd, eof),
- erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
- {ok, #file{fd=Fd, eof=Eof, is_sys=IsSys, pread_limit=Limit}};
+ case file:open(Filepath, OpenOptions) of
+ {ok, Fd} ->
+ %% Save Fd in process dictionary for debugging purposes
+ put(couch_file_fd, {Fd, Filepath}),
+ ok = file:close(Fd_Read),
+ maybe_track_open_os_files(Options),
+ {ok, Eof} = file:position(Fd, eof),
+ erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
+ {ok, #file{fd=Fd, eof=Eof, is_sys=IsSys, pread_limit=Limit}};
+ Error ->
+ init_status_error(ReturnPid, Ref, Error)
+ end;
Error ->
init_status_error(ReturnPid, Ref, Error)
end
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index 1694ac87f..050282a0c 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -1170,6 +1170,19 @@ before_response(Req0, Code0, Headers0, Args0) ->
respond_(#httpd{mochi_req = MochiReq}, Code, Headers, _Args, start_response) ->
MochiReq:start_response({Code, Headers});
+respond_(#httpd{mochi_req = MochiReq}, 413, Headers, Args, Type) ->
+ % Special handling for the 413 response. Make sure the socket is closed as
+ % we don't know how much data was read before the error was thrown. Also
+ % drain all the data in the receive buffer to avoid connction being reset
+ % before the 413 response is parsed by the client. This is still racy, it
+ % just increases the chances of 413 being detected correctly by the client
+ % (rather than getting a brutal TCP reset).
+ erlang:put(mochiweb_request_force_close, true),
+ Socket = MochiReq:get(socket),
+ mochiweb_socket:recv(Socket, 0, 0),
+ Result = MochiReq:Type({413, Headers, Args}),
+ mochiweb_socket:recv(Socket, 0, 0),
+ Result;
respond_(#httpd{mochi_req = MochiReq}, Code, Headers, Args, Type) ->
MochiReq:Type({Code, Headers, Args}).
diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl
index 6ac7b75af..74cbe5a08 100644
--- a/src/couch/src/couch_httpd_auth.erl
+++ b/src/couch/src/couch_httpd_auth.erl
@@ -309,7 +309,12 @@ handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req, AuthModule) ->
Secret = ?l2b(ensure_cookie_auth_secret()),
UserSalt = couch_util:get_value(<<"salt">>, UserProps),
CurrentTime = make_cookie_time(),
- Cookie = cookie_auth_cookie(Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime),
+ Cookie = case UserSalt of
+ undefined ->
+ cookie_auth_cookie(Req, ?b2l(UserName), <<Secret/binary>>, CurrentTime);
+ _ ->
+ cookie_auth_cookie(Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime)
+ end,
% TODO document the "next" feature in Futon
{Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
nil ->
@@ -401,7 +406,10 @@ authenticate(Pass, UserProps) ->
Iterations = couch_util:get_value(<<"iterations">>, UserProps, 10000),
verify_iterations(Iterations),
{couch_passwords:pbkdf2(Pass, UserSalt, Iterations),
- couch_util:get_value(<<"derived_key">>, UserProps, nil)}
+ couch_util:get_value(<<"derived_key">>, UserProps, nil)};
+ <<"bcrypt">> ->
+ UserHash = couch_util:get_value(<<"derived_key">>, UserProps, nil),
+ {couch_passwords:bcrypt(Pass, UserHash), UserHash}
end,
couch_passwords:verify(PasswordHash, ExpectedHash).
diff --git a/src/couch/src/couch_httpd_db.erl b/src/couch/src/couch_httpd_db.erl
index 05e63ba97..99b1192a9 100644
--- a/src/couch/src/couch_httpd_db.erl
+++ b/src/couch/src/couch_httpd_db.erl
@@ -217,7 +217,13 @@ handle_design_info_req(#httpd{
create_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
ok = couch_httpd:verify_is_server_admin(Req),
- case couch_server:create(DbName, [{user_ctx, UserCtx}]) of
+ Engine = case couch_httpd:qs_value(Req, "engine") of
+ EngineStr when is_list(EngineStr) ->
+ [{engine, iolist_to_binary(EngineStr)}];
+ _ ->
+ []
+ end,
+ case couch_server:create(DbName, [{user_ctx, UserCtx}] ++ Engine) of
{ok, Db} ->
couch_db:close(Db),
DbUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
@@ -616,7 +622,8 @@ db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) ->
missing_rev -> nil;
Rev -> Rev
end,
- {TargetDocId, TargetRevs} = parse_copy_destination_header(Req),
+ {TargetDocId0, TargetRevs} = parse_copy_destination_header(Req),
+ TargetDocId = list_to_binary(mochiweb_util:unquote(TargetDocId0)),
% open old doc
Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
% save new doc
diff --git a/src/couch/src/couch_httpd_misc_handlers.erl b/src/couch/src/couch_httpd_misc_handlers.erl
index 1def94853..e2fc9f2fc 100644
--- a/src/couch/src/couch_httpd_misc_handlers.erl
+++ b/src/couch/src/couch_httpd_misc_handlers.erl
@@ -17,8 +17,6 @@
handle_uuids_req/1,handle_config_req/1,
handle_task_status_req/1, handle_file_req/2]).
--export([increment_update_seq_req/2]).
-
-include_lib("couch/include/couch_db.hrl").
@@ -63,30 +61,9 @@ handle_file_req(#httpd{method='GET'}=Req, Document) ->
handle_file_req(Req, _) ->
send_method_not_allowed(Req, "GET,HEAD").
-handle_utils_dir_req(#httpd{method='GET'}=Req, DocumentRoot) ->
- "/" ++ UrlPath = couch_httpd:path(Req),
- case couch_httpd:partition(UrlPath) of
- {_ActionKey, "/", RelativePath} ->
- % GET /_utils/path or GET /_utils/
- CachingHeaders = [{"Cache-Control", "private, must-revalidate"}],
- EnableCsp = config:get("csp", "enable", "false"),
- Headers = maybe_add_csp_headers(CachingHeaders, EnableCsp),
- couch_httpd:serve_file(Req, RelativePath, DocumentRoot, Headers);
- {_ActionKey, "", _RelativePath} ->
- % GET /_utils
- RedirectPath = couch_httpd:path(Req) ++ "/",
- couch_httpd:send_redirect(Req, RedirectPath)
- end;
handle_utils_dir_req(Req, _) ->
- send_method_not_allowed(Req, "GET,HEAD").
-
-maybe_add_csp_headers(Headers, "true") ->
- DefaultValues = "default-src 'self'; img-src 'self' data:; font-src 'self'; "
- "script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';",
- Value = config:get("csp", "header_value", DefaultValues),
- [{"Content-Security-Policy", Value} | Headers];
-maybe_add_csp_headers(Headers, _) ->
- Headers.
+ send_error(Req, 410, <<"no_node_local_fauxton">>,
+ ?l2b("The web interface is no longer available on the node-local port.")).
handle_all_dbs_req(#httpd{method='GET'}=Req) ->
@@ -311,14 +288,3 @@ handle_approved_config_req(#httpd{method='DELETE',path_parts=[_,Section,Key]}=Re
send_json(Req, 200, list_to_binary(OldValue))
end.
-
-% httpd db handlers
-
-increment_update_seq_req(#httpd{method='POST'}=Req, Db) ->
- couch_httpd:validate_ctype(Req, "application/json"),
- {ok, NewSeq} = couch_db:increment_update_seq(Db),
- send_json(Req, {[{ok, true},
- {update_seq, NewSeq}
- ]});
-increment_update_seq_req(Req, _Db) ->
- send_method_not_allowed(Req, "POST").
diff --git a/src/couch/src/couch_httpd_multipart.erl b/src/couch/src/couch_httpd_multipart.erl
index 6ce3c76fe..e556b283f 100644
--- a/src/couch/src/couch_httpd_multipart.erl
+++ b/src/couch/src/couch_httpd_multipart.erl
@@ -99,12 +99,24 @@ mp_parse_atts(eof, {Ref, Chunks, Offset, Counters, Waiting}) ->
abort_parsing ->
ok;
{get_bytes, Ref, From} ->
- C2 = orddict:update_counter(From, 1, Counters),
- NewAcc = maybe_send_data({Ref, Chunks, Offset, C2, [From|Waiting]}),
- mp_parse_atts(eof, NewAcc);
+ C2 = update_writer(From, Counters),
+ case maybe_send_data({Ref, Chunks, Offset, C2, [From|Waiting]}) of
+ abort_parsing ->
+ ok;
+ NewAcc ->
+ mp_parse_atts(eof, NewAcc)
+ end;
{'DOWN', ParentRef, _, _, _} ->
- exit(mp_reader_coordinator_died)
- after 3600000 ->
+ exit(mp_reader_coordinator_died);
+ {'DOWN', WriterRef, _, WriterPid, _} ->
+ case remove_writer(WriterPid, WriterRef, Counters) of
+ abort_parsing ->
+ ok;
+ C2 ->
+ NewAcc = {Ref, Chunks, Offset, C2, Waiting -- [WriterPid]},
+ mp_parse_atts(eof, NewAcc)
+ end
+ after 300000 ->
ok
end
end.
@@ -116,12 +128,12 @@ mp_abort_parse_atts(_, _) ->
maybe_send_data({Ref, Chunks, Offset, Counters, Waiting}) ->
receive {get_bytes, Ref, From} ->
- NewCounters = orddict:update_counter(From, 1, Counters),
+ NewCounters = update_writer(From, Counters),
maybe_send_data({Ref, Chunks, Offset, NewCounters, [From|Waiting]})
after 0 ->
% reply to as many writers as possible
NewWaiting = lists:filter(fun(Writer) ->
- WhichChunk = orddict:fetch(Writer, Counters),
+ {_, WhichChunk} = orddict:fetch(Writer, Counters),
ListIndex = WhichChunk - Offset,
if ListIndex =< length(Chunks) ->
Writer ! {bytes, Ref, lists:nth(ListIndex, Chunks)},
@@ -132,11 +144,11 @@ maybe_send_data({Ref, Chunks, Offset, Counters, Waiting}) ->
end, Waiting),
% check if we can drop a chunk from the head of the list
- case Counters of
+ SmallestIndex = case Counters of
[] ->
- SmallestIndex = 0;
+ 0;
_ ->
- SmallestIndex = lists:min(element(2, lists:unzip(Counters)))
+ lists:min([C || {_WPid, {_WRef, C}} <- Counters])
end,
Size = length(Counters),
N = num_mp_writers(),
@@ -149,7 +161,7 @@ maybe_send_data({Ref, Chunks, Offset, Counters, Waiting}) ->
end,
% we should wait for a writer if no one has written the last chunk
- LargestIndex = lists:max([0|element(2, lists:unzip(Counters))]),
+ LargestIndex = lists:max([0] ++ [C || {_WPid, {_WRef, C}} <- Counters]),
if LargestIndex >= (Offset + length(Chunks)) ->
% someone has written all possible chunks, keep moving
{Ref, NewChunks, NewOffset, Counters, NewWaiting};
@@ -160,14 +172,55 @@ maybe_send_data({Ref, Chunks, Offset, Counters, Waiting}) ->
abort_parsing;
{'DOWN', ParentRef, _, _, _} ->
exit(mp_reader_coordinator_died);
+ {'DOWN', WriterRef, _, WriterPid, _} ->
+ case remove_writer(WriterPid, WriterRef, Counters) of
+ abort_parsing ->
+ abort_parsing;
+ C2 ->
+ RestWaiting = NewWaiting -- [WriterPid],
+ NewAcc = {Ref, NewChunks, NewOffset, C2, RestWaiting},
+ maybe_send_data(NewAcc)
+ end;
{get_bytes, Ref, X} ->
- C2 = orddict:update_counter(X, 1, Counters),
+ C2 = update_writer(X, Counters),
maybe_send_data({Ref, NewChunks, NewOffset, C2, [X|NewWaiting]})
+ after 300000 ->
+ abort_parsing
end
end
end.
+update_writer(WriterPid, Counters) ->
+ UpdateFun = fun({WriterRef, Count}) -> {WriterRef, Count + 1} end,
+ InitialValue = case orddict:find(WriterPid, Counters) of
+ {ok, IV} ->
+ IV;
+ error ->
+ WriterRef = erlang:monitor(process, WriterPid),
+ {WriterRef, 1}
+ end,
+ orddict:update(WriterPid, UpdateFun, InitialValue, Counters).
+
+
+remove_writer(WriterPid, WriterRef, Counters) ->
+ case orddict:find(WriterPid, Counters) of
+ {ok, {WriterRef, _}} ->
+ case num_mp_writers() of
+ N when N > 1 ->
+ num_mp_writers(N - 1);
+ _ ->
+ abort_parsing
+ end;
+ {ok, _} ->
+ % We got a different ref fired for a known worker
+ abort_parsing;
+ error ->
+ % Unknown worker pid?
+ abort_parsing
+ end.
+
+
num_mp_writers(N) ->
erlang:put(mp_att_writers, N).
diff --git a/src/couch/src/couch_key_tree.erl b/src/couch/src/couch_key_tree.erl
index e2e187eb1..cd661e29a 100644
--- a/src/couch/src/couch_key_tree.erl
+++ b/src/couch/src/couch_key_tree.erl
@@ -73,7 +73,7 @@ stem/2
%% @doc Merge a path into the given tree and then stem the result.
%% Although Tree is of type tree(), it must not contain any branches.
--spec merge(revtree(), tree(), pos_integer()) ->
+-spec merge(revtree(), tree() | path(), pos_integer()) ->
{revtree(), new_leaf | new_branch | internal_node}.
merge(RevTree, Tree, StemDepth) ->
{Merged, Result} = merge(RevTree, Tree),
@@ -84,7 +84,7 @@ merge(RevTree, Tree, StemDepth) ->
%% @doc Merge a path into a tree.
--spec merge(revtree(), tree()) ->
+-spec merge(revtree(), tree() | path()) ->
{revtree(), new_leaf | new_branch | internal_node}.
merge(RevTree, Tree) ->
{Merged, Result} = merge_tree(RevTree, Tree, []),
@@ -94,7 +94,7 @@ merge(RevTree, Tree) ->
%% @doc Attempt to merge Tree into each branch of the RevTree.
%% If it can't find a branch that the new tree merges into, add it as a
%% new branch in the RevTree.
--spec merge_tree(revtree(), tree(), revtree()) ->
+-spec merge_tree(revtree(), tree() | path(), revtree()) ->
{revtree(), new_leaf | new_branch | internal_node}.
merge_tree([], Tree, []) ->
{[Tree], new_leaf};
diff --git a/src/couch/src/couch_passwords.erl b/src/couch/src/couch_passwords.erl
index 677ef6559..77e136144 100644
--- a/src/couch/src/couch_passwords.erl
+++ b/src/couch/src/couch_passwords.erl
@@ -12,7 +12,7 @@
-module(couch_passwords).
--export([simple/2, pbkdf2/3, pbkdf2/4, verify/2]).
+-export([simple/2, pbkdf2/3, pbkdf2/4, bcrypt/2, verify/2]).
-export([hash_admin_password/1, get_unhashed_admins/0]).
-include_lib("couch/include/couch_db.hrl").
@@ -23,7 +23,13 @@
%% legacy scheme, not used for new passwords.
-spec simple(binary(), binary()) -> binary().
simple(Password, Salt) when is_binary(Password), is_binary(Salt) ->
- ?l2b(couch_util:to_hex(crypto:hash(sha, <<Password/binary, Salt/binary>>))).
+ ?l2b(couch_util:to_hex(crypto:hash(sha, <<Password/binary, Salt/binary>>)));
+simple(Password, Salt) when is_binary(Salt) ->
+ Msg = io_lib:format("Password value of '~p' is invalid.", [Password]),
+ throw({forbidden, Msg});
+simple(Password, Salt) when is_binary(Password) ->
+ Msg = io_lib:format("Salt value of '~p' is invalid.", [Salt]),
+ throw({forbidden, Msg}).
%% CouchDB utility functions
-spec hash_admin_password(binary() | list()) -> binary().
@@ -45,7 +51,10 @@ hash_admin_password("pbkdf2", ClearPassword) ->
Salt ,list_to_integer(Iterations)),
?l2b("-pbkdf2-" ++ ?b2l(DerivedKey) ++ ","
++ ?b2l(Salt) ++ ","
- ++ Iterations).
+ ++ Iterations);
+hash_admin_password("bcrypt", ClearPassword) ->
+ LogRounds = list_to_integer(config:get("couch_httpd_auth", "log_rounds", "10")),
+ ?l2b("-bcrypt-" ++ couch_passwords:bcrypt(couch_util:to_binary(ClearPassword), LogRounds)).
-spec get_unhashed_admins() -> list().
get_unhashed_admins() ->
@@ -54,6 +63,8 @@ get_unhashed_admins() ->
false; % already hashed
({_User, "-pbkdf2-" ++ _}) ->
false; % already hashed
+ ({_User, "-bcrypt-" ++ _}) ->
+ false; % already hashed
({_User, _ClearPassword}) ->
true
end,
@@ -66,7 +77,17 @@ pbkdf2(Password, Salt, Iterations) when is_binary(Password),
is_integer(Iterations),
Iterations > 0 ->
{ok, Result} = pbkdf2(Password, Salt, Iterations, ?SHA1_OUTPUT_LENGTH),
- Result.
+ Result;
+pbkdf2(Password, Salt, Iterations) when is_binary(Salt),
+ is_integer(Iterations),
+ Iterations > 0 ->
+ Msg = io_lib:format("Password value of '~p' is invalid.", [Password]),
+ throw({forbidden, Msg});
+pbkdf2(Password, Salt, Iterations) when is_binary(Password),
+ is_integer(Iterations),
+ Iterations > 0 ->
+ Msg = io_lib:format("Salt value of '~p' is invalid.", [Salt]),
+ throw({forbidden, Msg}).
-spec pbkdf2(binary(), binary(), integer(), integer())
-> {ok, binary()} | {error, derived_key_too_long}.
@@ -107,6 +128,16 @@ pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration, Prev, Acc) ->
pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration + 1,
Next, crypto:exor(Next, Acc)).
+%% Define the bcrypt functions to hash a password
+-spec bcrypt(binary(), binary()) -> binary();
+ (binary(), integer()) -> binary().
+bcrypt(Password, Salt) when is_binary(Salt) ->
+ {ok, Hash} = bcrypt:hashpw(Password, Salt),
+ list_to_binary(Hash);
+bcrypt(Password, LogRounds) when is_integer(LogRounds) ->
+ {ok, Salt} = bcrypt:gen_salt(LogRounds),
+ bcrypt(Password, list_to_binary(Salt)).
+
%% verify two lists for equality without short-circuits to avoid timing attacks.
-spec verify(string(), string(), integer()) -> boolean().
verify([X|RestX], [Y|RestY], Result) ->
diff --git a/src/couch/src/couch_query_servers.erl b/src/couch/src/couch_query_servers.erl
index 4928eea32..f31d24c6c 100644
--- a/src/couch/src/couch_query_servers.erl
+++ b/src/couch/src/couch_query_servers.erl
@@ -324,7 +324,7 @@ validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
couch_stats:increment_counter([couchdb, query_server, vdu_rejects], 1)
end,
case Resp of
- 1 ->
+ RespCode when RespCode =:= 1; RespCode =:= ok; RespCode =:= true ->
ok;
{[{<<"forbidden">>, Message}]} ->
throw({forbidden, Message});
diff --git a/src/couch/src/couch_server.erl b/src/couch/src/couch_server.erl
index efcef714e..ff1bf9eb6 100644
--- a/src/couch/src/couch_server.erl
+++ b/src/couch/src/couch_server.erl
@@ -22,6 +22,9 @@
-export([dev_start/0,is_admin/2,has_admins/0,get_stats/0]).
-export([close_lru/0]).
-export([close_db_if_idle/1]).
+-export([delete_compaction_files/1]).
+-export([exists/1]).
+-export([get_engine_extensions/0]).
% config_listener api
-export([handle_config_change/5, handle_config_terminate/3]).
@@ -34,6 +37,7 @@
-record(server,{
root_dir = [],
+ engines = [],
max_dbs_open=?MAX_DBS_OPEN,
dbs_open=0,
start_time="",
@@ -118,6 +122,27 @@ create(DbName, Options0) ->
delete(DbName, Options) ->
gen_server:call(couch_server, {delete, DbName, Options}, infinity).
+
+exists(DbName) ->
+ RootDir = config:get("couchdb", "database_dir", "."),
+ Engines = get_configured_engines(),
+ Possible = get_possible_engines(DbName, RootDir, Engines),
+ Possible /= [].
+
+
+delete_compaction_files(DbName) ->
+ delete_compaction_files(DbName, []).
+
+delete_compaction_files(DbName, DelOpts) when is_list(DbName) ->
+ RootDir = config:get("couchdb", "database_dir", "."),
+ lists:foreach(fun({Ext, Engine}) ->
+ FPath = make_filepath(RootDir, DbName, Ext),
+ couch_db_engine:delete_compaction_files(Engine, RootDir, FPath, DelOpts)
+ end, get_configured_engines()),
+ ok;
+delete_compaction_files(DbName, DelOpts) when is_binary(DbName) ->
+ delete_compaction_files(?b2l(DbName), DelOpts).
+
maybe_add_sys_db_callbacks(DbName, Options) when is_binary(DbName) ->
maybe_add_sys_db_callbacks(?b2l(DbName), Options);
maybe_add_sys_db_callbacks(DbName, Options) ->
@@ -165,9 +190,6 @@ is_admin(User, ClearPwd) ->
has_admins() ->
config:get("admins") /= [].
-get_full_filename(Server, DbName) ->
- filename:join([Server#server.root_dir, "./" ++ DbName ++ ".couch"]).
-
hash_admin_passwords() ->
hash_admin_passwords(true).
@@ -188,12 +210,16 @@ close_db_if_idle(DbName) ->
init([]) ->
+ % Mark pluggable storage engines as a supported feature
+ config:enable_feature('pluggable-storage-engines'),
+
% read config and register for configuration changes
% just stop if one of the config settings change. couch_server_sup
% will restart us and then we will pick up the new settings.
RootDir = config:get("couchdb", "database_dir", "."),
+ Engines = get_configured_engines(),
MaxDbsOpen = list_to_integer(
config:get("couchdb", "max_dbs_open", integer_to_list(?MAX_DBS_OPEN))),
UpdateLruOnRead =
@@ -205,6 +231,7 @@ init([]) ->
ets:new(couch_dbs_pid_to_name, [set, protected, named_table]),
process_flag(trap_exit, true),
{ok, #server{root_dir=RootDir,
+ engines = Engines,
max_dbs_open=MaxDbsOpen,
update_lru_on_read=UpdateLruOnRead,
start_time=couch_util:rfc1123_date()}}.
@@ -229,6 +256,8 @@ handle_config_change("couchdb", "max_dbs_open", Max, _, _) when is_list(Max) ->
{ok, gen_server:call(couch_server,{set_max_dbs_open,list_to_integer(Max)})};
handle_config_change("couchdb", "max_dbs_open", _, _, _) ->
{ok, gen_server:call(couch_server,{set_max_dbs_open,?MAX_DBS_OPEN})};
+handle_config_change("couchdb_engines", _, _, _, _) ->
+ {ok, gen_server:call(couch_server, reload_engines)};
handle_config_change("admins", _, _, Persist, _) ->
% spawn here so couch event manager doesn't deadlock
{ok, spawn(fun() -> hash_admin_passwords(Persist) end)};
@@ -263,11 +292,15 @@ all_databases() ->
all_databases(Fun, Acc0) ->
{ok, #server{root_dir=Root}} = gen_server:call(couch_server, get_server),
NormRoot = couch_util:normpath(Root),
- FinalAcc = try
- filelib:fold_files(Root,
+ Extensions = get_engine_extensions(),
+ ExtRegExp = "(" ++ string:join(Extensions, "|") ++ ")",
+ RegExp =
"^[a-z0-9\\_\\$()\\+\\-]*" % stock CouchDB name regex
"(\\.[0-9]{10,})?" % optional shard timestamp
- "\\.couch$", % filename extension
+ "\\." ++ ExtRegExp ++ "$", % filename extension
+ FinalAcc = try
+ couch_util:fold_files(Root,
+ RegExp,
true,
fun(Filename, AccIn) ->
NormFilename = couch_util:normpath(Filename),
@@ -275,7 +308,8 @@ all_databases(Fun, Acc0) ->
[$/ | RelativeFilename] -> ok;
RelativeFilename -> ok
end,
- case Fun(couch_util:drop_dot_couch_ext(?l2b(RelativeFilename)), AccIn) of
+ Ext = filename:extension(RelativeFilename),
+ case Fun(?l2b(filename:rootname(RelativeFilename, Ext)), AccIn) of
{ok, NewAcc} -> NewAcc;
{stop, NewAcc} -> throw({stop, Fun, NewAcc})
end
@@ -303,11 +337,11 @@ maybe_close_lru_db(#server{lru=Lru}=Server) ->
{error, all_dbs_active}
end.
-open_async(Server, From, DbName, Filepath, Options) ->
+open_async(Server, From, DbName, {Module, Filepath}, Options) ->
Parent = self(),
T0 = os:timestamp(),
Opener = spawn_link(fun() ->
- Res = couch_db:start_link(DbName, Filepath, Options),
+ Res = couch_db:start_link(Module, DbName, Filepath, Options),
case {Res, lists:member(create, Options)} of
{{ok, _Db}, true} ->
couch_event:notify(DbName, created);
@@ -345,6 +379,8 @@ handle_call({set_update_lru_on_read, UpdateOnRead}, _From, Server) ->
{reply, ok, Server#server{update_lru_on_read=UpdateOnRead}};
handle_call({set_max_dbs_open, Max}, _From, Server) ->
{reply, ok, Server#server{max_dbs_open=Max}};
+handle_call(reload_engines, _From, Server) ->
+ {reply, ok, Server#server{engines = get_configured_engines()}};
handle_call(get_server, _From, Server) ->
{reply, {ok, Server}, Server};
handle_call({open_result, T0, DbName, {ok, Db}}, {FromPid, _Tag}, Server) ->
@@ -362,7 +398,7 @@ handle_call({open_result, T0, DbName, {ok, Db}}, {FromPid, _Tag}, Server) ->
[gen_server:reply(Waiter, {ok, Db}) || Waiter <- Waiters],
% Cancel the creation request if it exists.
case ReqType of
- {create, DbName, _Filepath, _Options, CrFrom} ->
+ {create, DbName, _Engine, _Options, CrFrom} ->
gen_server:reply(CrFrom, file_exists);
_ ->
ok
@@ -397,8 +433,8 @@ handle_call({open_result, _T0, DbName, Error}, {FromPid, _Tag}, Server) ->
true = ets:delete(couch_dbs, DbName),
true = ets:delete(couch_dbs_pid_to_name, FromPid),
NewServer = case ReqType of
- {create, DbName, Filepath, Options, CrFrom} ->
- open_async(Server, CrFrom, DbName, Filepath, Options);
+ {create, DbName, Engine, Options, CrFrom} ->
+ open_async(Server, CrFrom, DbName, Engine, Options);
_ ->
Server
end,
@@ -412,8 +448,8 @@ handle_call({open, DbName, Options}, From, Server) ->
ok ->
case make_room(Server, Options) of
{ok, Server2} ->
- Filepath = get_full_filename(Server, DbNameList),
- {noreply, open_async(Server2, From, DbName, Filepath, Options)};
+ {ok, Engine} = get_engine(Server2, DbNameList),
+ {noreply, open_async(Server2, From, DbName, Engine, Options)};
CloseError ->
{reply, CloseError, Server}
end;
@@ -432,28 +468,32 @@ handle_call({open, DbName, Options}, From, Server) ->
end;
handle_call({create, DbName, Options}, From, Server) ->
DbNameList = binary_to_list(DbName),
- Filepath = get_full_filename(Server, DbNameList),
- case check_dbname(Server, DbNameList) of
- ok ->
- case ets:lookup(couch_dbs, DbName) of
- [] ->
- case make_room(Server, Options) of
- {ok, Server2} ->
- {noreply, open_async(Server2, From, DbName, Filepath,
- [create | Options])};
- CloseError ->
- {reply, CloseError, Server}
+ case get_engine(Server, DbNameList, Options) of
+ {ok, Engine} ->
+ case check_dbname(Server, DbNameList) of
+ ok ->
+ case ets:lookup(couch_dbs, DbName) of
+ [] ->
+ case make_room(Server, Options) of
+ {ok, Server2} ->
+ {noreply, open_async(Server2, From, DbName, Engine,
+ [create | Options])};
+ CloseError ->
+ {reply, CloseError, Server}
+ end;
+ [#entry{req_type = open} = Entry] ->
+ % We're trying to create a database while someone is in
+ % the middle of trying to open it. We allow one creator
+ % to wait while we figure out if it'll succeed.
+ CrOptions = [create | Options],
+ Req = {create, DbName, Engine, CrOptions, From},
+ true = ets:insert(couch_dbs, Entry#entry{req_type = Req}),
+ {noreply, Server};
+ [_AlreadyRunningDb] ->
+ {reply, file_exists, Server}
end;
- [#entry{req_type = open} = Entry] ->
- % We're trying to create a database while someone is in
- % the middle of trying to open it. We allow one creator
- % to wait while we figure out if it'll succeed.
- CrOptions = [create | Options],
- Req = {create, DbName, Filepath, CrOptions, From},
- true = ets:insert(couch_dbs, Entry#entry{req_type = Req}),
- {noreply, Server};
- [_AlreadyRunningDb] ->
- {reply, file_exists, Server}
+ Error ->
+ {reply, Error, Server}
end;
Error ->
{reply, Error, Server}
@@ -462,7 +502,6 @@ handle_call({delete, DbName, Options}, _From, Server) ->
DbNameList = binary_to_list(DbName),
case check_dbname(Server, DbNameList) of
ok ->
- FullFilepath = get_full_filename(Server, DbNameList),
Server2 =
case ets:lookup(couch_dbs, DbName) of
[] -> Server;
@@ -479,18 +518,16 @@ handle_call({delete, DbName, Options}, _From, Server) ->
db_closed(Server, Entry#entry.db_options)
end,
- %% Delete any leftover compaction files. If we don't do this a
- %% subsequent request for this DB will try to open them to use
- %% as a recovery.
- lists:foreach(fun(Ext) ->
- couch_file:delete(Server#server.root_dir, FullFilepath ++ Ext)
- end, [".compact", ".compact.data", ".compact.meta"]),
- couch_file:delete(Server#server.root_dir, FullFilepath ++ ".compact"),
-
couch_db_plugin:on_delete(DbName, Options),
DelOpt = [{context, delete} | Options],
- case couch_file:delete(Server#server.root_dir, FullFilepath, DelOpt) of
+
+ % Make sure and remove all compaction data
+ delete_compaction_files(DbNameList, DelOpt),
+
+ {ok, {Engine, FilePath}} = get_engine(Server, DbNameList),
+ RootDir = Server#server.root_dir,
+ case couch_db_engine:delete(Engine, RootDir, FilePath, DelOpt) of
ok ->
couch_event:notify(DbName, deleted),
{reply, ok, Server2};
@@ -589,6 +626,115 @@ db_closed(Server, Options) ->
true -> Server
end.
+
+get_configured_engines() ->
+ ConfigEntries = config:get("couchdb_engines"),
+ Engines = lists:flatmap(fun({Extension, ModuleStr}) ->
+ try
+ [{Extension, list_to_atom(ModuleStr)}]
+ catch _T:_R ->
+ []
+ end
+ end, ConfigEntries),
+ case Engines of
+ [] ->
+ [{"couch", couch_bt_engine}];
+ Else ->
+ Else
+ end.
+
+
+get_engine(Server, DbName, Options) ->
+ #server{
+ root_dir = RootDir,
+ engines = Engines
+ } = Server,
+ case couch_util:get_value(engine, Options) of
+ Ext when is_binary(Ext) ->
+ ExtStr = binary_to_list(Ext),
+ case lists:keyfind(ExtStr, 1, Engines) of
+ {ExtStr, Engine} ->
+ Path = make_filepath(RootDir, DbName, ExtStr),
+ {ok, {Engine, Path}};
+ false ->
+ {error, {invalid_engine_extension, Ext}}
+ end;
+ _ ->
+ get_engine(Server, DbName)
+ end.
+
+
+get_engine(Server, DbName) ->
+ #server{
+ root_dir = RootDir,
+ engines = Engines
+ } = Server,
+ Possible = get_possible_engines(DbName, RootDir, Engines),
+ case Possible of
+ [] ->
+ get_default_engine(Server, DbName);
+ [Engine] ->
+ {ok, Engine};
+ _ ->
+ erlang:error(engine_conflict)
+ end.
+
+
+get_possible_engines(DbName, RootDir, Engines) ->
+ lists:foldl(fun({Extension, Engine}, Acc) ->
+ Path = make_filepath(RootDir, DbName, Extension),
+ case couch_db_engine:exists(Engine, Path) of
+ true ->
+ [{Engine, Path} | Acc];
+ false ->
+ Acc
+ end
+ end, [], Engines).
+
+
+get_default_engine(Server, DbName) ->
+ #server{
+ root_dir = RootDir,
+ engines = Engines
+ } = Server,
+ Default = {couch_bt_engine, make_filepath(RootDir, DbName, "couch")},
+ case config:get("couchdb", "default_engine") of
+ Extension when is_list(Extension) ->
+ case lists:keyfind(Extension, 1, Engines) of
+ {Extension, Module} ->
+ {ok, {Module, make_filepath(RootDir, DbName, Extension)}};
+ false ->
+ Fmt = "Invalid storage engine extension ~s,"
+ " configured engine extensions are: ~s",
+ Exts = [E || {E, _} <- Engines],
+ Args = [Extension, string:join(Exts, ", ")],
+ couch_log:error(Fmt, Args),
+ {ok, Default}
+ end;
+ _ ->
+ {ok, Default}
+ end.
+
+
+make_filepath(RootDir, DbName, Extension) when is_binary(RootDir) ->
+ make_filepath(binary_to_list(RootDir), DbName, Extension);
+make_filepath(RootDir, DbName, Extension) when is_binary(DbName) ->
+ make_filepath(RootDir, binary_to_list(DbName), Extension);
+make_filepath(RootDir, DbName, Extension) when is_binary(Extension) ->
+ make_filepath(RootDir, DbName, binary_to_list(Extension));
+make_filepath(RootDir, DbName, Extension) ->
+ filename:join([RootDir, "./" ++ DbName ++ "." ++ Extension]).
+
+
+get_engine_extensions() ->
+ case config:get("couchdb_engines") of
+ [] ->
+ ["couch"];
+ Entries ->
+ [Ext || {Ext, _Mod} <- Entries]
+ end.
+
+
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/couch/src/couch_stream.erl b/src/couch/src/couch_stream.erl
index eb64484df..83b0611eb 100644
--- a/src/couch/src/couch_stream.erl
+++ b/src/couch/src/couch_stream.erl
@@ -14,21 +14,39 @@
-behaviour(gen_server).
-vsn(1).
-% public API
--export([open/1, open/2, close/1]).
--export([foldl/4, foldl/5, foldl_decode/6, range_foldl/6]).
--export([copy_to_new_stream/3, write/2]).
-% gen_server callbacks
--export([init/1, terminate/2, code_change/3]).
--export([handle_cast/2, handle_call/3, handle_info/2]).
+-export([
+ open/1,
+ open/2,
+ close/1,
+
+ copy/2,
+ write/2,
+ to_disk_term/1,
+
+ foldl/3,
+ foldl/4,
+ foldl_decode/5,
+ range_foldl/5
+]).
+
+-export([
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3
+]).
+
-include_lib("couch/include/couch_db.hrl").
-define(DEFAULT_BUFFER_SIZE, 4096).
--record(stream,
- {fd = 0,
+
+-record(stream, {
+ engine,
opener_monitor,
written_pointers=[],
buffer_list = [],
@@ -42,114 +60,94 @@
identity_len = 0,
encoding_fun,
end_encoding_fun
- }).
+}).
+
+open({_StreamEngine, _StreamEngineState} = Engine) ->
+ open(Engine, []).
-%%% Interface functions %%%
-open(Fd) ->
- open(Fd, []).
+open({_StreamEngine, _StreamEngineState} = Engine, Options) ->
+ gen_server:start_link(?MODULE, {Engine, self(), erlang:get(io_priority), Options}, []).
-open(Fd, Options) ->
- gen_server:start_link(couch_stream, {Fd, self(), erlang:get(io_priority), Options}, []).
close(Pid) ->
gen_server:call(Pid, close, infinity).
-copy_to_new_stream(Fd, PosList, DestFd) ->
- {ok, Dest} = open(DestFd),
- foldl(Fd, PosList,
- fun(Bin, _) ->
- ok = write(Dest, Bin)
- end, ok),
- close(Dest).
-
-foldl(_Fd, [], _Fun, Acc) ->
- Acc;
-foldl(Fd, [Pos|Rest], Fun, Acc) ->
- {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
- foldl(Fd, Rest, Fun, Fun(Bin, Acc)).
-
-foldl(Fd, PosList, <<>>, Fun, Acc) ->
- foldl(Fd, PosList, Fun, Acc);
-foldl(Fd, PosList, Md5, Fun, Acc) ->
- foldl(Fd, PosList, Md5, crypto:hash_init(md5), Fun, Acc).
-
-foldl_decode(Fd, PosList, Md5, Enc, Fun, Acc) ->
+
+copy(Src, Dst) ->
+ foldl(Src, fun(Bin, _) ->
+ ok = write(Dst, Bin)
+ end, ok).
+
+
+write(_Pid, <<>>) ->
+ ok;
+write(Pid, Bin) ->
+ gen_server:call(Pid, {write, Bin}, infinity).
+
+
+to_disk_term({Engine, EngineState}) ->
+ Engine:to_disk_term(EngineState).
+
+
+foldl({Engine, EngineState}, Fun, Acc) ->
+ Engine:foldl(EngineState, Fun, Acc).
+
+
+foldl(Engine, <<>>, Fun, Acc) ->
+ foldl(Engine, Fun, Acc);
+foldl(Engine, Md5, UserFun, UserAcc) ->
+ InitAcc = {crypto:hash_init(md5), UserFun, UserAcc},
+ {Md5Acc, _, OutAcc} = foldl(Engine, fun foldl_md5/2, InitAcc),
+ Md5 = crypto:hash_final(Md5Acc),
+ OutAcc.
+
+
+foldl_decode(Engine, Md5, Enc, UserFun, UserAcc1) ->
{DecDataFun, DecEndFun} = case Enc of
- gzip ->
- ungzip_init();
- identity ->
- identity_enc_dec_funs()
+ gzip -> ungzip_init();
+ identity -> identity_enc_dec_funs()
end,
- Result = foldl_decode(
- DecDataFun, Fd, PosList, Md5, crypto:hash_init(md5), Fun, Acc
- ),
+ InitAcc = {DecDataFun, UserFun, UserAcc1},
+ {_, _, UserAcc2} = foldl(Engine, Md5, fun foldl_decode/2, InitAcc),
DecEndFun(),
- Result.
+ UserAcc2.
+
+
+range_foldl(Engine, From, To, UserFun, UserAcc) when To >= From ->
+ NewEngine = do_seek(Engine, From),
+ InitAcc = {To - From, UserFun, UserAcc},
+ try
+ {_, _, UserAcc2} = foldl(NewEngine, fun foldl_length/2, InitAcc),
+ UserAcc2
+ catch
+ throw:{finished, UserAcc3} ->
+ UserAcc3
+ end.
-foldl(_Fd, [], Md5, Md5Acc, _Fun, Acc) ->
- Md5 = crypto:hash_final(Md5Acc),
- Acc;
-foldl(Fd, [{Pos, _Size}], Md5, Md5Acc, Fun, Acc) -> % 0110 UPGRADE CODE
- foldl(Fd, [Pos], Md5, Md5Acc, Fun, Acc);
-foldl(Fd, [Pos], Md5, Md5Acc, Fun, Acc) ->
- {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
- Md5 = crypto:hash_final(crypto:hash_update(Md5Acc, Bin)),
- Fun(Bin, Acc);
-foldl(Fd, [{Pos, _Size}|Rest], Md5, Md5Acc, Fun, Acc) ->
- foldl(Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc);
-foldl(Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc) ->
- {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
- foldl(Fd, Rest, Md5, crypto:hash_update(Md5Acc, Bin), Fun, Fun(Bin, Acc)).
-
-range_foldl(Fd, PosList, From, To, Fun, Acc) ->
- range_foldl(Fd, PosList, From, To, 0, Fun, Acc).
-
-range_foldl(_Fd, _PosList, _From, To, Off, _Fun, Acc) when Off >= To ->
- Acc;
-range_foldl(Fd, [Pos|Rest], From, To, Off, Fun, Acc) when is_integer(Pos) -> % old-style attachment
- {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
- range_foldl(Fd, [{Pos, iolist_size(Bin)}] ++ Rest, From, To, Off, Fun, Acc);
-range_foldl(Fd, [{_Pos, Size}|Rest], From, To, Off, Fun, Acc) when From > Off + Size ->
- range_foldl(Fd, Rest, From, To, Off + Size, Fun, Acc);
-range_foldl(Fd, [{Pos, Size}|Rest], From, To, Off, Fun, Acc) ->
- {ok, Bin} = couch_file:pread_iolist(Fd, Pos),
- Bin1 = if
- From =< Off andalso To >= Off + Size -> Bin; %% the whole block is covered
- true ->
- PrefixLen = clip(From - Off, 0, Size),
- PostfixLen = clip(Off + Size - To, 0, Size),
- MatchLen = Size - PrefixLen - PostfixLen,
- <<_Prefix:PrefixLen/binary,Match:MatchLen/binary,_Postfix:PostfixLen/binary>> = iolist_to_binary(Bin),
- Match
- end,
- range_foldl(Fd, Rest, From, To, Off + Size, Fun, Fun(Bin1, Acc)).
-clip(Value, Lo, Hi) ->
- if
- Value < Lo -> Lo;
- Value > Hi -> Hi;
- true -> Value
+foldl_md5(Bin, {Md5Acc, UserFun, UserAcc}) ->
+ NewMd5Acc = crypto:hash_update(Md5Acc, Bin),
+ {NewMd5Acc, UserFun, UserFun(Bin, UserAcc)}.
+
+
+foldl_decode(EncBin, {DecFun, UserFun, UserAcc}) ->
+ case DecFun(EncBin) of
+ <<>> -> {DecFun, UserFun, UserAcc};
+ Dec -> {DecFun, UserFun, UserFun(Dec, UserAcc)}
end.
-foldl_decode(_DecFun, _Fd, [], Md5, Md5Acc, _Fun, Acc) ->
- Md5 = crypto:hash_final(Md5Acc),
- Acc;
-foldl_decode(DecFun, Fd, [{Pos, _Size}], Md5, Md5Acc, Fun, Acc) ->
- foldl_decode(DecFun, Fd, [Pos], Md5, Md5Acc, Fun, Acc);
-foldl_decode(DecFun, Fd, [Pos], Md5, Md5Acc, Fun, Acc) ->
- {ok, EncBin} = couch_file:pread_iolist(Fd, Pos),
- Md5 = crypto:hash_final(crypto:hash_update(Md5Acc, EncBin)),
- Bin = DecFun(EncBin),
- Fun(Bin, Acc);
-foldl_decode(DecFun, Fd, [{Pos, _Size}|Rest], Md5, Md5Acc, Fun, Acc) ->
- foldl_decode(DecFun, Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc);
-foldl_decode(DecFun, Fd, [Pos|Rest], Md5, Md5Acc, Fun, Acc) ->
- {ok, EncBin} = couch_file:pread_iolist(Fd, Pos),
- Bin = DecFun(EncBin),
- Md5Acc2 = crypto:hash_update(Md5Acc, EncBin),
- foldl_decode(DecFun, Fd, Rest, Md5, Md5Acc2, Fun, Fun(Bin, Acc)).
+
+foldl_length(Bin, {Length, UserFun, UserAcc}) ->
+ BinSize = size(Bin),
+ case BinSize =< Length of
+ true ->
+ {Length - BinSize, UserFun, UserFun(Bin, UserAcc)};
+ false ->
+ <<Trunc:BinSize/binary, _/binary>> = Bin,
+ throw({finished, UserFun(Trunc, UserAcc)})
+ end.
gzip_init(Options) ->
case couch_util:get_value(compression_level, Options, 0) of
@@ -192,23 +190,16 @@ identity_enc_dec_funs() ->
fun() -> [] end
}.
-write(_Pid, <<>>) ->
- ok;
-write(Pid, Bin) ->
- gen_server:call(Pid, {write, Bin}, infinity).
-
-init({Fd, OpenerPid, OpenerPriority, Options}) ->
+init({Engine, OpenerPid, OpenerPriority, Options}) ->
erlang:put(io_priority, OpenerPriority),
{EncodingFun, EndEncodingFun} =
case couch_util:get_value(encoding, Options, identity) of
- identity ->
- identity_enc_dec_funs();
- gzip ->
- gzip_init(Options)
+ identity -> identity_enc_dec_funs();
+ gzip -> gzip_init(Options)
end,
{ok, #stream{
- fd=Fd,
+ engine=Engine,
opener_monitor=erlang:monitor(process, OpenerPid),
md5=crypto:hash_init(md5),
identity_md5=crypto:hash_init(md5),
@@ -225,9 +216,8 @@ terminate(_Reason, _Stream) ->
handle_call({write, Bin}, _From, Stream) ->
BinSize = iolist_size(Bin),
#stream{
- fd = Fd,
+ engine = Engine,
written_len = WrittenLen,
- written_pointers = Written,
buffer_len = BufferLen,
buffer_list = Buffer,
max_buffer = Max,
@@ -242,19 +232,18 @@ handle_call({write, Bin}, _From, Stream) ->
[] ->
% case where the encoder did some internal buffering
% (zlib does it for example)
+ NewEngine = Engine,
WrittenLen2 = WrittenLen,
- Md5_2 = Md5,
- Written2 = Written;
+ Md5_2 = Md5;
WriteBin2 ->
- {ok, Pos, _} = couch_file:append_binary(Fd, WriteBin2),
+ NewEngine = do_write(Engine, WriteBin2),
WrittenLen2 = WrittenLen + iolist_size(WriteBin2),
- Md5_2 = crypto:hash_update(Md5, WriteBin2),
- Written2 = [{Pos, iolist_size(WriteBin2)}|Written]
+ Md5_2 = crypto:hash_update(Md5, WriteBin2)
end,
{reply, ok, Stream#stream{
+ engine = NewEngine,
written_len=WrittenLen2,
- written_pointers=Written2,
buffer_list=[],
buffer_len=0,
md5=Md5_2,
@@ -268,10 +257,9 @@ handle_call({write, Bin}, _From, Stream) ->
end;
handle_call(close, _From, Stream) ->
#stream{
- fd = Fd,
+ engine = Engine,
opener_monitor = MonRef,
written_len = WrittenLen,
- written_pointers = Written,
buffer_list = Buffer,
md5 = Md5,
identity_md5 = IdenMd5,
@@ -285,12 +273,11 @@ handle_call(close, _From, Stream) ->
Md5Final = crypto:hash_final(crypto:hash_update(Md5, WriteBin2)),
Result = case WriteBin2 of
[] ->
- {lists:reverse(Written), WrittenLen, IdenLen, Md5Final, IdenMd5Final};
+ {do_finalize(Engine), WrittenLen, IdenLen, Md5Final, IdenMd5Final};
_ ->
- {ok, Pos, _} = couch_file:append_binary(Fd, WriteBin2),
- StreamInfo = lists:reverse(Written, [{Pos, iolist_size(WriteBin2)}]),
+ NewEngine = do_write(Engine, WriteBin2),
StreamLen = WrittenLen + iolist_size(WriteBin2),
- {StreamInfo, StreamLen, IdenLen, Md5Final, IdenMd5Final}
+ {do_finalize(NewEngine), StreamLen, IdenLen, Md5Final, IdenMd5Final}
end,
erlang:demonitor(MonRef),
{stop, normal, Result, Stream}.
@@ -305,3 +292,17 @@ handle_info({'DOWN', Ref, _, _, _}, #stream{opener_monitor=Ref} = State) ->
{stop, normal, State};
handle_info(_Info, State) ->
{noreply, State}.
+
+
+do_seek({Engine, EngineState}, Offset) ->
+ {ok, NewState} = Engine:seek(EngineState, Offset),
+ {Engine, NewState}.
+
+do_write({Engine, EngineState}, Data) ->
+ {ok, NewState} = Engine:write(EngineState, Data),
+ {Engine, NewState}.
+
+do_finalize({Engine, EngineState}) ->
+ {ok, NewState} = Engine:finalize(EngineState),
+ {Engine, NewState}.
+
diff --git a/src/couch/src/couch_users_db.erl b/src/couch/src/couch_users_db.erl
index c7b41f1fc..dd6d3208c 100644
--- a/src/couch/src/couch_users_db.erl
+++ b/src/couch/src/couch_users_db.erl
@@ -23,6 +23,7 @@
-define(SIMPLE, <<"simple">>).
-define(PASSWORD_SHA, <<"password_sha">>).
-define(PBKDF2, <<"pbkdf2">>).
+-define(BCRYPT, <<"bcrypt">>).
-define(ITERATIONS, <<"iterations">>).
-define(SALT, <<"salt">>).
-define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
@@ -59,7 +60,7 @@ before_doc_update(Doc, Db) ->
% newDoc.salt = salt
% newDoc.password = null
save_doc(#doc{body={Body}} = Doc) ->
- %% Support both schemes to smooth migration from legacy scheme
+ %% Support all schemes to smooth migration from legacy scheme
Scheme = config:get("couch_httpd_auth", "password_scheme", "pbkdf2"),
case {couch_util:get_value(?PASSWORD, Body), Scheme} of
{null, _} -> % server admins don't have a user-db password entry
@@ -84,6 +85,13 @@ save_doc(#doc{body={Body}} = Doc) ->
Body3 = ?replace(Body2, ?SALT, Salt),
Body4 = proplists:delete(?PASSWORD, Body3),
Doc#doc{body={Body4}};
+ {ClearPassword, "bcrypt"} ->
+ LogRounds = list_to_integer(config:get("couch_httpd_auth", "log_rounds", "10")),
+ DerivedKey = couch_passwords:bcrypt(ClearPassword, LogRounds),
+ Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?BCRYPT),
+ Body1 = ?replace(Body0, ?DERIVED_KEY, DerivedKey),
+ Body2 = proplists:delete(?PASSWORD, Body1),
+ Doc#doc{body={Body2}};
{_ClearPassword, Scheme} ->
couch_log:error("[couch_httpd_auth] password_scheme value of '~p' is invalid.", [Scheme]),
throw({forbidden, "Server cannot hash passwords at this time."})
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index b8a0b623b..f3a9249f7 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -12,7 +12,7 @@
-module(couch_util).
--export([priv_dir/0, normpath/1]).
+-export([priv_dir/0, normpath/1, fold_files/5]).
-export([should_flush/0, should_flush/1, to_existing_atom/1]).
-export([rand32/0, implode/2, collate/2, collate/3]).
-export([abs_pathname/1,abs_pathname/2, trim/1, drop_dot_couch_ext/1]).
@@ -36,6 +36,7 @@
-export([process_dict_get/2, process_dict_get/3]).
-export([unique_monotonic_integer/0]).
-export([check_config_blacklist/1]).
+-export([check_md5/2]).
-include_lib("couch/include/couch_db.hrl").
@@ -77,6 +78,44 @@ normparts(["." | RestParts], Acc) ->
normparts([Part | RestParts], Acc) ->
normparts(RestParts, [Part | Acc]).
+
+% This is implementation is similar the builtin filelib:fold_files/5
+% except that this version will run the user supplied function
+% on directories that match the regular expression as well.
+%
+% This is motivated by the case when couch_server is searching
+% for pluggable storage engines. This change allows a
+% database to be either a file or a directory.
+fold_files(Dir, RegExp, Recursive, Fun, Acc) ->
+ {ok, Re} = re:compile(RegExp, [unicode]),
+ fold_files1(Dir, Re, Recursive, Fun, Acc).
+
+fold_files1(Dir, RegExp, Recursive, Fun, Acc) ->
+ case file:list_dir(Dir) of
+ {ok, Files} ->
+ fold_files2(Files, Dir, RegExp, Recursive, Fun, Acc);
+ {error, _} ->
+ Acc
+ end.
+
+fold_files2([], _Dir, _RegExp, _Recursive, _Fun, Acc) ->
+ Acc;
+fold_files2([File | Rest], Dir, RegExp, Recursive, Fun, Acc0) ->
+ FullName = filename:join(Dir, File),
+ case (catch re:run(File, RegExp, [{capture, none}])) of
+ match ->
+ Acc1 = Fun(FullName, Acc0),
+ fold_files2(Rest, Dir, RegExp, Recursive, Fun, Acc1);
+ _ ->
+ case Recursive andalso filelib:is_dir(FullName) of
+ true ->
+ Acc1 = fold_files1(FullName, RegExp, Recursive, Fun, Acc0),
+ fold_files2(Rest, Dir, RegExp, Recursive, Fun, Acc1);
+ false ->
+ fold_files2(Rest, Dir, RegExp, Recursive, Fun, Acc0)
+ end
+ end.
+
% works like list_to_existing_atom, except can be list or binary and it
% gives you the original value instead of an error if no existing atom.
to_existing_atom(V) when is_list(V) ->
@@ -594,6 +633,12 @@ validate_callback_exists(Module, Function, Arity) ->
{undefined_callback, CallbackStr, {Module, Function, Arity}}})
end.
+
+check_md5(_NewSig, <<>>) -> ok;
+check_md5(Sig, Sig) -> ok;
+check_md5(_, _) -> throw(md5_mismatch).
+
+
ensure_loaded(Module) when is_atom(Module) ->
case code:ensure_loaded(Module) of
{module, Module} ->
diff --git a/src/couch/src/test_engine_attachments.erl b/src/couch/src/test_engine_attachments.erl
new file mode 100644
index 000000000..691d4bd3c
--- /dev/null
+++ b/src/couch/src/test_engine_attachments.erl
@@ -0,0 +1,93 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(test_engine_attachments).
+-compile(export_all).
+
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+
+cet_write_attachment() ->
+ {ok, Engine, DbPath, St1} = test_engine_util:init_engine(dbpath),
+
+ AttBin = crypto:strong_rand_bytes(32768),
+
+ try
+ [Att0] = test_engine_util:prep_atts(Engine, St1, [
+ {<<"ohai.txt">>, AttBin}
+ ]),
+
+ {stream, Stream} = couch_att:fetch(data, Att0),
+ ?assertEqual(true, Engine:is_active_stream(St1, Stream)),
+
+ Actions = [{create, {<<"first">>, [], [Att0]}}],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+ {ok, St3} = Engine:commit_data(St2),
+ Engine:terminate(normal, St3),
+
+ {ok, St4} = Engine:init(DbPath, []),
+ [FDI] = Engine:open_docs(St4, [<<"first">>]),
+
+ #rev_info{
+ rev = {RevPos, PrevRevId},
+ deleted = Deleted,
+ body_sp = DocPtr
+ } = test_engine_util:prev_rev(FDI),
+
+ Doc0 = #doc{
+ id = <<"foo">>,
+ revs = {RevPos, [PrevRevId]},
+ deleted = Deleted,
+ body = DocPtr
+ },
+
+ Doc1 = Engine:read_doc_body(St4, Doc0),
+ Atts1 = if not is_binary(Doc1#doc.atts) -> Doc1#doc.atts; true ->
+ couch_compress:decompress(Doc1#doc.atts)
+ end,
+
+ StreamSrc = fun(Sp) -> Engine:open_read_stream(St4, Sp) end,
+ [Att1] = [couch_att:from_disk_term(StreamSrc, T) || T <- Atts1],
+ ReadBin = couch_att:to_binary(Att1),
+ ?assertEqual(AttBin, ReadBin)
+ catch throw:not_supported ->
+ ok
+ end.
+
+
+% N.B. This test may be overly specific for some theoretical
+% storage engines that don't re-initialize their
+% attachments streams when restarting (for instance if
+% we ever have something that stores attachemnts in
+% an external object store)
+cet_inactive_stream() ->
+ {ok, Engine, DbPath, St1} = test_engine_util:init_engine(dbpath),
+
+ AttBin = crypto:strong_rand_bytes(32768),
+
+ try
+ [Att0] = test_engine_util:prep_atts(Engine, St1, [
+ {<<"ohai.txt">>, AttBin}
+ ]),
+
+ {stream, Stream} = couch_att:fetch(data, Att0),
+ ?assertEqual(true, Engine:is_active_stream(St1, Stream)),
+
+ Engine:terminate(normal, St1),
+ {ok, St2} = Engine:init(DbPath, []),
+
+ ?assertEqual(false, Engine:is_active_stream(St2, Stream))
+ catch throw:not_supported ->
+ ok
+ end.
diff --git a/src/couch/src/test_engine_compaction.erl b/src/couch/src/test_engine_compaction.erl
new file mode 100644
index 000000000..09a1e4ea5
--- /dev/null
+++ b/src/couch/src/test_engine_compaction.erl
@@ -0,0 +1,185 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(test_engine_compaction).
+-compile(export_all).
+
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+
+cet_compact_empty() ->
+ {ok, Engine, Path, St1} = test_engine_util:init_engine(dbpath),
+ Db1 = test_engine_util:db_as_term(Engine, St1),
+ {ok, St2, DbName, _, Term} = test_engine_util:compact(Engine, St1, Path),
+ {ok, St3, undefined} = Engine:finish_compaction(St2, DbName, [], Term),
+ Db2 = test_engine_util:db_as_term(Engine, St3),
+ Diff = test_engine_util:term_diff(Db1, Db2),
+ ?assertEqual(nodiff, Diff).
+
+
+cet_compact_doc() ->
+ {ok, Engine, Path, St1} = test_engine_util:init_engine(dbpath),
+ Actions = [{create, {<<"foo">>, []}}],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+ Db1 = test_engine_util:db_as_term(Engine, St2),
+ {ok, St3, DbName, _, Term} = test_engine_util:compact(Engine, St2, Path),
+ {ok, St4, undefined} = Engine:finish_compaction(St3, DbName, [], Term),
+ Db2 = test_engine_util:db_as_term(Engine, St4),
+ Diff = test_engine_util:term_diff(Db1, Db2),
+ ?assertEqual(nodiff, Diff).
+
+
+cet_compact_local_doc() ->
+ {ok, Engine, Path, St1} = test_engine_util:init_engine(dbpath),
+ Actions = [{create, {<<"_local/foo">>, []}}],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+ Db1 = test_engine_util:db_as_term(Engine, St2),
+ {ok, St3, DbName, _, Term} = test_engine_util:compact(Engine, St2, Path),
+ {ok, St4, undefined} = Engine:finish_compaction(St3, DbName, [], Term),
+ Db2 = test_engine_util:db_as_term(Engine, St4),
+ Diff = test_engine_util:term_diff(Db1, Db2),
+ ?assertEqual(nodiff, Diff).
+
+
+cet_compact_with_everything() ->
+ {ok, Engine, Path, St1} = test_engine_util:init_engine(dbpath),
+
+ % Add a whole bunch of docs
+ DocActions = lists:map(fun(Seq) ->
+ {create, {docid(Seq), [{<<"int">>, Seq}]}}
+ end, lists:seq(1, 1000)),
+
+ LocalActions = lists:map(fun(I) ->
+ {create, {local_docid(I), [{<<"int">>, I}]}}
+ end, lists:seq(1, 25)),
+
+ Actions1 = DocActions ++ LocalActions,
+
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions1),
+ {ok, St3} = Engine:set_security(St2, [{<<"readers">>, <<"ohai">>}]),
+ {ok, St4} = Engine:set_revs_limit(St3, 500),
+
+ Actions2 = [
+ {create, {<<"foo">>, []}},
+ {create, {<<"bar">>, [{<<"hooray">>, <<"purple">>}]}},
+ {conflict, {<<"bar">>, [{<<"booo">>, false}]}}
+ ],
+
+ {ok, St5} = test_engine_util:apply_actions(Engine, St4, Actions2),
+
+ [FooFDI, BarFDI] = Engine:open_docs(St5, [<<"foo">>, <<"bar">>]),
+
+ FooRev = test_engine_util:prev_rev(FooFDI),
+ BarRev = test_engine_util:prev_rev(BarFDI),
+
+ Actions3 = [
+ {batch, [
+ {purge, {<<"foo">>, FooRev#rev_info.rev}},
+ {purge, {<<"bar">>, BarRev#rev_info.rev}}
+ ]}
+ ],
+
+ {ok, St6} = test_engine_util:apply_actions(Engine, St5, Actions3),
+
+ PurgedIdRevs = [
+ {<<"bar">>, [BarRev#rev_info.rev]},
+ {<<"foo">>, [FooRev#rev_info.rev]}
+ ],
+
+ ?assertEqual(PurgedIdRevs, lists:sort(Engine:get_last_purged(St6))),
+
+ {ok, St7} = try
+ [Att0, Att1, Att2, Att3, Att4] = test_engine_util:prep_atts(Engine, St6, [
+ {<<"ohai.txt">>, crypto:strong_rand_bytes(2048)},
+ {<<"stuff.py">>, crypto:strong_rand_bytes(32768)},
+ {<<"a.erl">>, crypto:strong_rand_bytes(29)},
+ {<<"a.hrl">>, crypto:strong_rand_bytes(5000)},
+ {<<"a.app">>, crypto:strong_rand_bytes(400)}
+ ]),
+
+ Actions4 = [
+ {create, {<<"small_att">>, [], [Att0]}},
+ {create, {<<"large_att">>, [], [Att1]}},
+ {create, {<<"multi_att">>, [], [Att2, Att3, Att4]}}
+ ],
+ test_engine_util:apply_actions(Engine, St6, Actions4)
+ catch throw:not_supported ->
+ {ok, St6}
+ end,
+ {ok, St8} = Engine:commit_data(St7),
+
+ Db1 = test_engine_util:db_as_term(Engine, St8),
+
+ Config = [
+ {"database_compaction", "doc_buffer_size", "1024"},
+ {"database_compaction", "checkpoint_after", "2048"}
+ ],
+
+ {ok, St9, DbName, _, Term} = test_engine_util:with_config(Config, fun() ->
+ test_engine_util:compact(Engine, St8, Path)
+ end),
+
+ {ok, St10, undefined} = Engine:finish_compaction(St9, DbName, [], Term),
+ Db2 = test_engine_util:db_as_term(Engine, St10),
+ Diff = test_engine_util:term_diff(Db1, Db2),
+ ?assertEqual(nodiff, Diff).
+
+
+cet_recompact_updates() ->
+ {ok, Engine, Path, St1} = test_engine_util:init_engine(dbpath),
+
+ Actions1 = [
+ {create, {<<"foo">>, []}},
+ {create, {<<"bar">>, []}}
+ ],
+
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions1),
+ {ok, St3, DbName, _, Term} = test_engine_util:compact(Engine, St2, Path),
+
+ Actions2 = [
+ {update, {<<"foo">>, [{<<"updated">>, true}]}},
+ {create, {<<"baz">>, []}}
+ ],
+
+ {ok, St4} = test_engine_util:apply_actions(Engine, St3, Actions2),
+ Db1 = test_engine_util:db_as_term(Engine, St4),
+
+ {ok, St5, NewPid} = Engine:finish_compaction(St4, DbName, [], Term),
+
+ ?assertEqual(true, is_pid(NewPid)),
+ Ref = erlang:monitor(process, NewPid),
+
+ NewTerm = receive
+ {'$gen_cast', {compact_done, Engine, Term0}} ->
+ Term0;
+ {'DOWN', Ref, _, _, Reason} ->
+ erlang:error({compactor_died, Reason})
+ after 10000 ->
+ erlang:error(compactor_timed_out)
+ end,
+
+ {ok, St6, undefined} = Engine:finish_compaction(St5, DbName, [], NewTerm),
+ Db2 = test_engine_util:db_as_term(Engine, St6),
+ Diff = test_engine_util:term_diff(Db1, Db2),
+ ?assertEqual(nodiff, Diff).
+
+
+docid(I) ->
+ Str = io_lib:format("~4..0b", [I]),
+ iolist_to_binary(Str).
+
+
+local_docid(I) ->
+ Str = io_lib:format("_local/~4..0b", [I]),
+ iolist_to_binary(Str).
diff --git a/src/couch/src/test_engine_fold_changes.erl b/src/couch/src/test_engine_fold_changes.erl
new file mode 100644
index 000000000..6e97fda9b
--- /dev/null
+++ b/src/couch/src/test_engine_fold_changes.erl
@@ -0,0 +1,190 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(test_engine_fold_changes).
+-compile(export_all).
+
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+
+-define(NUM_DOCS, 100).
+
+
+cet_empty_changes() ->
+ {ok, Engine, St} = test_engine_util:init_engine(),
+
+ ?assertEqual(0, Engine:count_changes_since(St, 0)),
+ ?assertEqual({ok, []}, Engine:fold_changes(St, 0, fun fold_fun/2, [], [])).
+
+
+cet_single_change() ->
+ {ok, Engine, St1} = test_engine_util:init_engine(),
+ Actions = [{create, {<<"a">>, []}}],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+
+ ?assertEqual(1, Engine:count_changes_since(St2, 0)),
+ ?assertEqual({ok, [{<<"a">>, 1}]},
+ Engine:fold_changes(St2, 0, fun fold_fun/2, [], [])).
+
+
+cet_two_changes() ->
+ {ok, Engine, St1} = test_engine_util:init_engine(),
+ Actions = [
+ {create, {<<"a">>, []}},
+ {create, {<<"b">>, []}}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+
+ ?assertEqual(2, Engine:count_changes_since(St2, 0)),
+ {ok, Changes} = Engine:fold_changes(St2, 0, fun fold_fun/2, [], []),
+ ?assertEqual([{<<"a">>, 1}, {<<"b">>, 2}], lists:reverse(Changes)).
+
+
+cet_two_changes_batch() ->
+ {ok, Engine, St1} = test_engine_util:init_engine(),
+ Actions1 = [
+ {batch, [
+ {create, {<<"a">>, []}},
+ {create, {<<"b">>, []}}
+ ]}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions1),
+
+ ?assertEqual(2, Engine:count_changes_since(St2, 0)),
+ {ok, Changes1} = Engine:fold_changes(St2, 0, fun fold_fun/2, [], []),
+ ?assertEqual([{<<"a">>, 1}, {<<"b">>, 2}], lists:reverse(Changes1)),
+
+ {ok, Engine, St3} = test_engine_util:init_engine(),
+ Actions2 = [
+ {batch, [
+ {create, {<<"b">>, []}},
+ {create, {<<"a">>, []}}
+ ]}
+ ],
+ {ok, St4} = test_engine_util:apply_actions(Engine, St3, Actions2),
+
+ ?assertEqual(2, Engine:count_changes_since(St4, 0)),
+ {ok, Changes2} = Engine:fold_changes(St4, 0, fun fold_fun/2, [], []),
+ ?assertEqual([{<<"b">>, 1}, {<<"a">>, 2}], lists:reverse(Changes2)).
+
+
+cet_update_one() ->
+ {ok, Engine, St1} = test_engine_util:init_engine(),
+ Actions = [
+ {create, {<<"a">>, []}},
+ {update, {<<"a">>, []}}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+
+ ?assertEqual(1, Engine:count_changes_since(St2, 0)),
+ ?assertEqual({ok, [{<<"a">>, 2}]},
+ Engine:fold_changes(St2, 0, fun fold_fun/2, [], [])).
+
+
+cet_update_first_of_two() ->
+ {ok, Engine, St1} = test_engine_util:init_engine(),
+ Actions = [
+ {create, {<<"a">>, []}},
+ {create, {<<"b">>, []}},
+ {update, {<<"a">>, []}}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+
+ ?assertEqual(2, Engine:count_changes_since(St2, 0)),
+ {ok, Changes} = Engine:fold_changes(St2, 0, fun fold_fun/2, [], []),
+ ?assertEqual([{<<"b">>, 2}, {<<"a">>, 3}], lists:reverse(Changes)).
+
+
+cet_update_second_of_two() ->
+ {ok, Engine, St1} = test_engine_util:init_engine(),
+ Actions = [
+ {create, {<<"a">>, []}},
+ {create, {<<"b">>, []}},
+ {update, {<<"b">>, []}}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+
+ ?assertEqual(2, Engine:count_changes_since(St2, 0)),
+ {ok, Changes} = Engine:fold_changes(St2, 0, fun fold_fun/2, [], []),
+ ?assertEqual([{<<"a">>, 1}, {<<"b">>, 3}], lists:reverse(Changes)).
+
+
+cet_check_mutation_ordering() ->
+ Actions = shuffle(lists:map(fun(Seq) ->
+ {create, {docid(Seq), []}}
+ end, lists:seq(1, ?NUM_DOCS))),
+
+ DocIdOrder = [DocId || {_, {DocId, _}} <- Actions],
+ DocSeqs = lists:zip(DocIdOrder, lists:seq(1, ?NUM_DOCS)),
+
+ {ok, Engine, St1} = test_engine_util:init_engine(),
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+
+ % First lets see that we can get the correct
+ % suffix/prefix starting at every update sequence
+ lists:foreach(fun(Seq) ->
+ {ok, Suffix} = Engine:fold_changes(St2, Seq, fun fold_fun/2, [], []),
+ ?assertEqual(lists:nthtail(Seq, DocSeqs), lists:reverse(Suffix)),
+
+ {ok, Prefix} = Engine:fold_changes(St2, Seq, fun fold_fun/2, [], [
+ {dir, rev}
+ ]),
+ ?assertEqual(lists:sublist(DocSeqs, Seq + 1), Prefix)
+ end, lists:seq(0, ?NUM_DOCS)),
+
+ ok = do_mutation_ordering(Engine, St2, ?NUM_DOCS + 1, DocSeqs, []).
+
+
+do_mutation_ordering(Engine, St, _Seq, [], FinalDocSeqs) ->
+ {ok, RevOrder} = Engine:fold_changes(St, 0, fun fold_fun/2, [], []),
+ ?assertEqual(FinalDocSeqs, lists:reverse(RevOrder)),
+ ok;
+
+do_mutation_ordering(Engine, St, Seq, [{DocId, _OldSeq} | Rest], DocSeqAcc) ->
+ Actions = [{update, {DocId, []}}],
+ {ok, NewSt} = test_engine_util:apply_actions(Engine, St, Actions),
+ NewAcc = DocSeqAcc ++ [{DocId, Seq}],
+ Expected = Rest ++ NewAcc,
+ {ok, RevOrder} = Engine:fold_changes(NewSt, 0, fun fold_fun/2, [], []),
+ ?assertEqual(Expected, lists:reverse(RevOrder)),
+ do_mutation_ordering(Engine, NewSt, Seq + 1, Rest, NewAcc).
+
+
+shuffle(List) ->
+ random:seed(os:timestamp()),
+ Paired = [{random:uniform(), I} || I <- List],
+ Sorted = lists:sort(Paired),
+ [I || {_, I} <- Sorted].
+
+
+remove_random(List) ->
+ Pos = random:uniform(length(List)),
+ remove_random(Pos, List).
+
+
+remove_random(1, [Item | Rest]) ->
+ {Item, Rest};
+
+remove_random(N, [Skip | Rest]) when N > 1 ->
+ {Item, Tail} = remove_random(N - 1, Rest),
+ {Item, [Skip | Tail]}.
+
+
+fold_fun(#full_doc_info{id=Id, update_seq=Seq}, Acc) ->
+ {ok, [{Id, Seq} | Acc]}.
+
+
+docid(I) ->
+ Str = io_lib:format("~4..0b", [I]),
+ iolist_to_binary(Str).
diff --git a/src/couch/src/test_engine_fold_docs.erl b/src/couch/src/test_engine_fold_docs.erl
new file mode 100644
index 000000000..458878d97
--- /dev/null
+++ b/src/couch/src/test_engine_fold_docs.erl
@@ -0,0 +1,390 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(test_engine_fold_docs).
+-compile(export_all).
+
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+
+-define(NUM_DOCS, 100).
+
+
+cet_fold_all() ->
+ fold_all(fold_docs, fun docid/1).
+
+
+cet_fold_all_local() ->
+ fold_all(fold_local_docs, fun local_docid/1).
+
+
+cet_fold_start_key() ->
+ fold_start_key(fold_docs, fun docid/1).
+
+
+cet_fold_start_key_local() ->
+ fold_start_key(fold_local_docs, fun local_docid/1).
+
+
+cet_fold_end_key() ->
+ fold_end_key(fold_docs, fun docid/1).
+
+
+cet_fold_end_key_local() ->
+ fold_end_key(fold_local_docs, fun local_docid/1).
+
+
+cet_fold_end_key_gt() ->
+ fold_end_key_gt(fold_docs, fun docid/1).
+
+
+cet_fold_end_key_gt_local() ->
+ fold_end_key_gt(fold_local_docs, fun local_docid/1).
+
+
+cet_fold_range() ->
+ fold_range(fold_docs, fun docid/1).
+
+
+cet_fold_range_local() ->
+ fold_range(fold_local_docs, fun local_docid/1).
+
+
+cet_fold_stop() ->
+ fold_stop(fold_docs, fun docid/1).
+
+
+cet_fold_stop_local() ->
+ fold_stop(fold_local_docs, fun local_docid/1).
+
+
+% This is a loose test but we have to have this until
+% I figure out what to do about the total_rows/offset
+% meta data included in _all_docs
+cet_fold_include_reductions() ->
+ {ok, Engine, St} = init_st(fun docid/1),
+ FoldFun = fun(_, _, nil) -> {ok, nil} end,
+ {ok, Count, nil} = Engine:fold_docs(St, FoldFun, nil, [include_reductions]),
+ ?assert(is_integer(Count)),
+ ?assert(Count >= 0).
+
+
+fold_all(FoldFun, DocIdFun) ->
+ DocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
+ {ok, Engine, St} = init_st(DocIdFun),
+
+ {ok, DocIdAccFwd} = Engine:FoldFun(St, fun fold_fun/2, [], []),
+ ?assertEqual(?NUM_DOCS, length(DocIdAccFwd)),
+ ?assertEqual(DocIds, lists:reverse(DocIdAccFwd)),
+
+ {ok, DocIdAccRev} = Engine:FoldFun(St, fun fold_fun/2, [], [{dir, rev}]),
+ ?assertEqual(?NUM_DOCS, length(DocIdAccRev)),
+ ?assertEqual(DocIds, DocIdAccRev).
+
+
+fold_start_key(FoldFun, DocIdFun) ->
+ {ok, Engine, St} = init_st(DocIdFun),
+
+ StartKeyNum = ?NUM_DOCS div 4,
+ StartKey = DocIdFun(StartKeyNum),
+
+ AllDocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
+ DocIdsFwd = [DocIdFun(I) || I <- lists:seq(StartKeyNum, ?NUM_DOCS)],
+ DocIdsRev = [DocIdFun(I) || I <- lists:seq(1, StartKeyNum)],
+
+ ?assertEqual({ok, []}, Engine:FoldFun(St, fun fold_fun/2, [], [
+ {start_key, <<255>>}
+ ])),
+
+ ?assertEqual({ok, []}, Engine:FoldFun(St, fun fold_fun/2, [], [
+ {dir, rev},
+ {start_key, <<"">>}
+ ])),
+
+ {ok, AllDocIdAccFwd} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {start_key, <<"">>}
+ ]),
+ ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
+ ?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
+
+ {ok, AllDocIdAccRev} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {dir, rev},
+ {start_key, <<255>>}
+ ]),
+ ?assertEqual(length(AllDocIds), length(AllDocIdAccRev)),
+ ?assertEqual(AllDocIds, AllDocIdAccRev),
+
+ {ok, DocIdAccFwd} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {start_key, StartKey}
+ ]),
+ ?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
+ ?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
+
+ {ok, DocIdAccRev} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {dir, rev},
+ {start_key, StartKey}
+ ]),
+ ?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
+ ?assertEqual(DocIdsRev, DocIdAccRev).
+
+
+fold_end_key(FoldFun, DocIdFun) ->
+ {ok, Engine, St} = init_st(DocIdFun),
+
+ EndKeyNum = ?NUM_DOCS div 4,
+ EndKey = DocIdFun(EndKeyNum),
+
+ ?assertEqual({ok, []}, Engine:FoldFun(St, fun fold_fun/2, [], [
+ {end_key, <<"">>}
+ ])),
+
+ ?assertEqual({ok, []}, Engine:FoldFun(St, fun fold_fun/2, [], [
+ {dir, rev},
+ {end_key, <<255>>}
+ ])),
+
+ AllDocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
+
+ {ok, AllDocIdAccFwd} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {end_key, <<255>>}
+ ]),
+ ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
+ ?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
+
+ {ok, AllDocIdAccRev} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {dir, rev},
+ {end_key, <<"">>}
+ ]),
+ ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
+ ?assertEqual(AllDocIds, AllDocIdAccRev),
+
+ DocIdsFwd = [DocIdFun(I) || I <- lists:seq(1, EndKeyNum)],
+
+ {ok, DocIdAccFwd} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {end_key, EndKey}
+ ]),
+ ?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
+ ?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
+
+ DocIdsRev = [DocIdFun(I) || I <- lists:seq(EndKeyNum, ?NUM_DOCS)],
+
+ {ok, DocIdAccRev} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {dir, rev},
+ {end_key, EndKey}
+ ]),
+ ?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
+ ?assertEqual(DocIdsRev, DocIdAccRev).
+
+
+fold_end_key_gt(FoldFun, DocIdFun) ->
+ {ok, Engine, St} = init_st(DocIdFun),
+
+ EndKeyNum = ?NUM_DOCS div 4,
+ EndKey = DocIdFun(EndKeyNum),
+
+ ?assertEqual({ok, []}, Engine:FoldFun(St, fun fold_fun/2, [], [
+ {end_key_gt, <<"">>}
+ ])),
+
+ ?assertEqual({ok, []}, Engine:FoldFun(St, fun fold_fun/2, [], [
+ {dir, rev},
+ {end_key_gt, <<255>>}
+ ])),
+
+ AllDocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
+
+ {ok, AllDocIdAccFwd} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {end_key_gt, <<255>>}
+ ]),
+ ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
+ ?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
+
+ {ok, AllDocIdAccRev} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {dir, rev},
+ {end_key_gt, <<"">>}
+ ]),
+ ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
+ ?assertEqual(AllDocIds, AllDocIdAccRev),
+
+ DocIdsFwd = [DocIdFun(I) || I <- lists:seq(1, EndKeyNum - 1)],
+
+ {ok, DocIdAccFwd} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {end_key_gt, EndKey}
+ ]),
+ ?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
+ ?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
+
+ DocIdsRev = [DocIdFun(I) || I <- lists:seq(EndKeyNum + 1, ?NUM_DOCS)],
+
+ {ok, DocIdAccRev} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {dir, rev},
+ {end_key_gt, EndKey}
+ ]),
+ ?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
+ ?assertEqual(DocIdsRev, DocIdAccRev).
+
+
+fold_range(FoldFun, DocIdFun) ->
+ {ok, Engine, St} = init_st(DocIdFun),
+
+ StartKeyNum = ?NUM_DOCS div 4,
+ EndKeyNum = StartKeyNum * 3,
+
+ StartKey = DocIdFun(StartKeyNum),
+ EndKey = DocIdFun(EndKeyNum),
+
+ ?assertEqual({ok, []}, Engine:FoldFun(St, fun fold_fun/2, [], [
+ {start_key, <<"">>},
+ {end_key, <<"">>}
+ ])),
+
+ ?assertEqual({ok, []}, Engine:FoldFun(St, fun fold_fun/2, [], [
+ {dir, rev},
+ {start_key, <<"">>},
+ {end_key, <<255>>}
+ ])),
+
+ AllDocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
+
+ {ok, AllDocIdAccFwd} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {start_key, <<"">>},
+ {end_key, <<255>>}
+ ]),
+ ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
+ ?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
+
+ {ok, AllDocIdAccRev} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {dir, rev},
+ {start_key, <<255>>},
+ {end_key_gt, <<"">>}
+ ]),
+ ?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
+ ?assertEqual(AllDocIds, AllDocIdAccRev),
+
+ DocIdsFwd = [DocIdFun(I) || I <- lists:seq(StartKeyNum, EndKeyNum)],
+
+ {ok, DocIdAccFwd} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {start_key, StartKey},
+ {end_key, EndKey}
+ ]),
+ ?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
+ ?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
+
+ DocIdsRev = [DocIdFun(I) || I <- lists:seq(StartKeyNum, EndKeyNum)],
+
+ ?assertEqual({ok, []}, Engine:FoldFun(St, fun fold_fun/2, [], [
+ {dir, rev},
+ {start_key, StartKey},
+ {end_key, EndKey}
+ ])),
+
+ {ok, DocIdAccRev} = Engine:FoldFun(St, fun fold_fun/2, [], [
+ {dir, rev},
+ {start_key, EndKey},
+ {end_key, StartKey}
+ ]),
+ ?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
+ ?assertEqual(DocIdsRev, DocIdAccRev).
+
+
+fold_stop(FoldFun, DocIdFun) ->
+ {ok, Engine, St} = init_st(DocIdFun),
+
+ StartKeyNum = ?NUM_DOCS div 4,
+ StartKey = DocIdFun(StartKeyNum),
+
+ ?assertEqual({ok, []}, Engine:FoldFun(St, fun fold_fun_stop/2, [], [
+ {start_key, <<255>>}
+ ])),
+
+ ?assertEqual({ok, []}, Engine:FoldFun(St, fun fold_fun_stop/2, [], [
+ {dir, rev},
+ {start_key, <<"">>}
+ ])),
+
+ SuffixDocIds = [DocIdFun(I) || I <- lists:seq(?NUM_DOCS - 3, ?NUM_DOCS)],
+
+ {ok, SuffixDocIdAcc} = Engine:FoldFun(St, fun fold_fun_stop/2, [], [
+ {start_key, DocIdFun(?NUM_DOCS - 3)}
+ ]),
+ ?assertEqual(length(SuffixDocIds), length(SuffixDocIdAcc)),
+ ?assertEqual(SuffixDocIds, lists:reverse(SuffixDocIdAcc)),
+
+ PrefixDocIds = [DocIdFun(I) || I <- lists:seq(1, 3)],
+
+ {ok, PrefixDocIdAcc} = Engine:FoldFun(St, fun fold_fun_stop/2, [], [
+ {dir, rev},
+ {start_key, DocIdFun(3)}
+ ]),
+ ?assertEqual(3, length(PrefixDocIdAcc)),
+ ?assertEqual(PrefixDocIds, PrefixDocIdAcc),
+
+ FiveDocIdsFwd = [DocIdFun(I)
+ || I <- lists:seq(StartKeyNum, StartKeyNum + 5)],
+
+ {ok, FiveDocIdAccFwd} = Engine:FoldFun(St, fun fold_fun_stop/2, [], [
+ {start_key, StartKey}
+ ]),
+ ?assertEqual(length(FiveDocIdsFwd), length(FiveDocIdAccFwd)),
+ ?assertEqual(FiveDocIdsFwd, lists:reverse(FiveDocIdAccFwd)),
+
+ FiveDocIdsRev = [DocIdFun(I)
+ || I <- lists:seq(StartKeyNum - 5, StartKeyNum)],
+
+ {ok, FiveDocIdAccRev} = Engine:FoldFun(St, fun fold_fun_stop/2, [], [
+ {dir, rev},
+ {start_key, StartKey}
+ ]),
+ ?assertEqual(length(FiveDocIdsRev), length(FiveDocIdAccRev)),
+ ?assertEqual(FiveDocIdsRev, FiveDocIdAccRev).
+
+
+init_st(DocIdFun) ->
+ {ok, Engine, St1} = test_engine_util:init_engine(),
+ Actions = lists:map(fun(Id) ->
+ {create, {DocIdFun(Id), [{<<"int">>, Id}]}}
+ end, lists:seq(1, ?NUM_DOCS)),
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+ {ok, Engine, St2}.
+
+
+fold_fun(Doc, Acc) ->
+ Id = case Doc of
+ #doc{id = Id0} -> Id0;
+ #full_doc_info{id = Id0} -> Id0
+ end,
+ {ok, [Id | Acc]}.
+
+
+fold_fun_stop(Doc, Acc) ->
+ Id = case Doc of
+ #doc{id = Id0} -> Id0;
+ #full_doc_info{id = Id0} -> Id0
+ end,
+ case length(Acc) of
+ N when N =< 4 ->
+ {ok, [Id | Acc]};
+ _ ->
+ {stop, [Id | Acc]}
+ end.
+
+
+docid(I) ->
+ Str = io_lib:format("~4..0b", [I]),
+ iolist_to_binary(Str).
+
+
+local_docid(I) ->
+ Str = io_lib:format("_local/~4..0b", [I]),
+ iolist_to_binary(Str).
diff --git a/src/couch/src/test_engine_get_set_props.erl b/src/couch/src/test_engine_get_set_props.erl
new file mode 100644
index 000000000..6d2a44779
--- /dev/null
+++ b/src/couch/src/test_engine_get_set_props.erl
@@ -0,0 +1,70 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(test_engine_get_set_props).
+-compile(export_all).
+
+
+-include_lib("eunit/include/eunit.hrl").
+
+
+cet_default_props() ->
+ Engine = test_engine_util:get_engine(),
+ DbPath = test_engine_util:dbpath(),
+
+ {ok, St} = Engine:init(DbPath, [
+ create,
+ {default_security_object, dso}
+ ]),
+
+ Node = node(),
+
+ ?assertEqual(0, Engine:get_doc_count(St)),
+ ?assertEqual(0, Engine:get_del_doc_count(St)),
+ ?assertEqual(true, is_list(Engine:get_size_info(St))),
+ ?assertEqual(true, is_integer(Engine:get_disk_version(St))),
+ ?assertEqual(0, Engine:get_update_seq(St)),
+ ?assertEqual(0, Engine:get_purge_seq(St)),
+ ?assertEqual([], Engine:get_last_purged(St)),
+ ?assertEqual(dso, Engine:get_security(St)),
+ ?assertEqual(1000, Engine:get_revs_limit(St)),
+ ?assertMatch(<<_:32/binary>>, Engine:get_uuid(St)),
+ ?assertEqual([{Node, 0}], Engine:get_epochs(St)),
+ ?assertEqual(0, Engine:get_compacted_seq(St)).
+
+
+cet_set_security() ->
+ check_prop_set(get_security, set_security, dso, [{<<"readers">>, []}]).
+
+
+cet_set_revs_limit() ->
+ check_prop_set(get_revs_limit, set_revs_limit, 1000, 50).
+
+
+check_prop_set(GetFun, SetFun, Default, Value) ->
+ Engine = test_engine_util:get_engine(),
+ DbPath = test_engine_util:dbpath(),
+
+ {ok, St0} = Engine:init(DbPath, [
+ create,
+ {default_security_object, dso}
+ ]),
+ ?assertEqual(Default, Engine:GetFun(St0)),
+
+ {ok, St1} = Engine:SetFun(St0, Value),
+ ?assertEqual(Value, Engine:GetFun(St1)),
+
+ {ok, St2} = Engine:commit_data(St1),
+ Engine:terminate(normal, St2),
+
+ {ok, St3} = Engine:init(DbPath, []),
+ ?assertEqual(Value, Engine:GetFun(St3)).
diff --git a/src/couch/src/test_engine_open_close_delete.erl b/src/couch/src/test_engine_open_close_delete.erl
new file mode 100644
index 000000000..b099d9fb0
--- /dev/null
+++ b/src/couch/src/test_engine_open_close_delete.erl
@@ -0,0 +1,81 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(test_engine_open_close_delete).
+-compile(export_all).
+
+
+-include_lib("eunit/include/eunit.hrl").
+
+
+cet_open_non_existent() ->
+ Engine = test_engine_util:get_engine(),
+ DbPath = test_engine_util:dbpath(),
+
+ ?assertEqual(false, Engine:exists(DbPath)),
+ ?assertThrow({not_found, no_db_file}, Engine:init(DbPath, [])),
+ ?assertEqual(false, Engine:exists(DbPath)).
+
+
+cet_open_create() ->
+ process_flag(trap_exit, true),
+ Engine = test_engine_util:get_engine(),
+ DbPath = test_engine_util:dbpath(),
+
+ ?assertEqual(false, Engine:exists(DbPath)),
+ ?assertMatch({ok, _}, Engine:init(DbPath, [create])),
+ ?assertEqual(true, Engine:exists(DbPath)).
+
+
+cet_open_when_exists() ->
+ Engine = test_engine_util:get_engine(),
+ DbPath = test_engine_util:dbpath(),
+
+ ?assertEqual(false, Engine:exists(DbPath)),
+ ?assertMatch({ok, _}, Engine:init(DbPath, [create])),
+ ?assertThrow({error, eexist}, Engine:init(DbPath, [create])).
+
+
+cet_terminate() ->
+ Engine = test_engine_util:get_engine(),
+ DbPath = test_engine_util:dbpath(),
+
+ ?assertEqual(false, Engine:exists(DbPath)),
+ {ok, St} = Engine:init(DbPath, [create]),
+ Engine:terminate(normal, St),
+ ?assertEqual(true, Engine:exists(DbPath)).
+
+
+cet_rapid_recycle() ->
+ Engine = test_engine_util:get_engine(),
+ DbPath = test_engine_util:dbpath(),
+
+ {ok, St0} = Engine:init(DbPath, [create]),
+ Engine:terminate(normal, St0),
+
+ lists:foreach(fun(_) ->
+ {ok, St1} = Engine:init(DbPath, []),
+ Engine:terminate(normal, St1)
+ end, lists:seq(1, 100)).
+
+
+cet_delete() ->
+ Engine = test_engine_util:get_engine(),
+ RootDir = test_engine_util:rootdir(),
+ DbPath = test_engine_util:dbpath(),
+
+ ?assertEqual(false, Engine:exists(DbPath)),
+ {ok, St} = Engine:init(DbPath, [create]),
+ Engine:terminate(normal, St),
+ ?assertEqual(true, Engine:exists(DbPath)),
+ ?assertEqual(ok, Engine:delete(RootDir, DbPath, [async])),
+ ?assertEqual(false, Engine:exists(DbPath)).
diff --git a/src/couch/src/test_engine_purge_docs.erl b/src/couch/src/test_engine_purge_docs.erl
new file mode 100644
index 000000000..e5bf249eb
--- /dev/null
+++ b/src/couch/src/test_engine_purge_docs.erl
@@ -0,0 +1,158 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(test_engine_purge_docs).
+-compile(export_all).
+
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+
+cet_purge_simple() ->
+ {ok, Engine, St1} = test_engine_util:init_engine(),
+
+ Actions1 = [
+ {create, {<<"foo">>, [{<<"vsn">>, 1}]}}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions1),
+
+ ?assertEqual(1, Engine:get_doc_count(St2)),
+ ?assertEqual(0, Engine:get_del_doc_count(St2)),
+ ?assertEqual(1, Engine:get_update_seq(St2)),
+ ?assertEqual(0, Engine:get_purge_seq(St2)),
+ ?assertEqual([], Engine:get_last_purged(St2)),
+
+ [FDI] = Engine:open_docs(St2, [<<"foo">>]),
+ PrevRev = test_engine_util:prev_rev(FDI),
+ Rev = PrevRev#rev_info.rev,
+
+ Actions2 = [
+ {purge, {<<"foo">>, Rev}}
+ ],
+ {ok, St3} = test_engine_util:apply_actions(Engine, St2, Actions2),
+
+ ?assertEqual(0, Engine:get_doc_count(St3)),
+ ?assertEqual(0, Engine:get_del_doc_count(St3)),
+ ?assertEqual(2, Engine:get_update_seq(St3)),
+ ?assertEqual(1, Engine:get_purge_seq(St3)),
+ ?assertEqual([{<<"foo">>, [Rev]}], Engine:get_last_purged(St3)).
+
+
+cet_purge_conflicts() ->
+ {ok, Engine, St1} = test_engine_util:init_engine(),
+
+ Actions1 = [
+ {create, {<<"foo">>, [{<<"vsn">>, 1}]}},
+ {conflict, {<<"foo">>, [{<<"vsn">>, 2}]}}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions1),
+
+ ?assertEqual(1, Engine:get_doc_count(St2)),
+ ?assertEqual(0, Engine:get_del_doc_count(St2)),
+ ?assertEqual(2, Engine:get_update_seq(St2)),
+ ?assertEqual(0, Engine:get_purge_seq(St2)),
+ ?assertEqual([], Engine:get_last_purged(St2)),
+
+ [FDI1] = Engine:open_docs(St2, [<<"foo">>]),
+ PrevRev1 = test_engine_util:prev_rev(FDI1),
+ Rev1 = PrevRev1#rev_info.rev,
+
+ Actions2 = [
+ {purge, {<<"foo">>, Rev1}}
+ ],
+ {ok, St3} = test_engine_util:apply_actions(Engine, St2, Actions2),
+
+ ?assertEqual(1, Engine:get_doc_count(St3)),
+ ?assertEqual(0, Engine:get_del_doc_count(St3)),
+ ?assertEqual(4, Engine:get_update_seq(St3)),
+ ?assertEqual(1, Engine:get_purge_seq(St3)),
+ ?assertEqual([{<<"foo">>, [Rev1]}], Engine:get_last_purged(St3)),
+
+ [FDI2] = Engine:open_docs(St3, [<<"foo">>]),
+ PrevRev2 = test_engine_util:prev_rev(FDI2),
+ Rev2 = PrevRev2#rev_info.rev,
+
+ Actions3 = [
+ {purge, {<<"foo">>, Rev2}}
+ ],
+ {ok, St4} = test_engine_util:apply_actions(Engine, St3, Actions3),
+
+ ?assertEqual(0, Engine:get_doc_count(St4)),
+ ?assertEqual(0, Engine:get_del_doc_count(St4)),
+ ?assertEqual(5, Engine:get_update_seq(St4)),
+ ?assertEqual(2, Engine:get_purge_seq(St4)),
+ ?assertEqual([{<<"foo">>, [Rev2]}], Engine:get_last_purged(St4)).
+
+
+cet_add_delete_purge() ->
+ {ok, Engine, St1} = test_engine_util:init_engine(),
+
+ Actions1 = [
+ {create, {<<"foo">>, [{<<"vsn">>, 1}]}},
+ {delete, {<<"foo">>, [{<<"vsn">>, 2}]}}
+ ],
+
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions1),
+
+ ?assertEqual(0, Engine:get_doc_count(St2)),
+ ?assertEqual(1, Engine:get_del_doc_count(St2)),
+ ?assertEqual(2, Engine:get_update_seq(St2)),
+ ?assertEqual(0, Engine:get_purge_seq(St2)),
+ ?assertEqual([], Engine:get_last_purged(St2)),
+
+ [FDI] = Engine:open_docs(St2, [<<"foo">>]),
+ PrevRev = test_engine_util:prev_rev(FDI),
+ Rev = PrevRev#rev_info.rev,
+
+ Actions2 = [
+ {purge, {<<"foo">>, Rev}}
+ ],
+ {ok, St3} = test_engine_util:apply_actions(Engine, St2, Actions2),
+
+ ?assertEqual(0, Engine:get_doc_count(St3)),
+ ?assertEqual(0, Engine:get_del_doc_count(St3)),
+ ?assertEqual(3, Engine:get_update_seq(St3)),
+ ?assertEqual(1, Engine:get_purge_seq(St3)),
+ ?assertEqual([{<<"foo">>, [Rev]}], Engine:get_last_purged(St3)).
+
+
+cet_add_two_purge_one() ->
+ {ok, Engine, St1} = test_engine_util:init_engine(),
+
+ Actions1 = [
+ {create, {<<"foo">>, [{<<"vsn">>, 1}]}},
+ {create, {<<"bar">>, []}}
+ ],
+
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions1),
+
+ ?assertEqual(2, Engine:get_doc_count(St2)),
+ ?assertEqual(0, Engine:get_del_doc_count(St2)),
+ ?assertEqual(2, Engine:get_update_seq(St2)),
+ ?assertEqual(0, Engine:get_purge_seq(St2)),
+ ?assertEqual([], Engine:get_last_purged(St2)),
+
+ [FDI] = Engine:open_docs(St2, [<<"foo">>]),
+ PrevRev = test_engine_util:prev_rev(FDI),
+ Rev = PrevRev#rev_info.rev,
+
+ Actions2 = [
+ {purge, {<<"foo">>, Rev}}
+ ],
+ {ok, St3} = test_engine_util:apply_actions(Engine, St2, Actions2),
+
+ ?assertEqual(1, Engine:get_doc_count(St3)),
+ ?assertEqual(0, Engine:get_del_doc_count(St3)),
+ ?assertEqual(3, Engine:get_update_seq(St3)),
+ ?assertEqual(1, Engine:get_purge_seq(St3)),
+ ?assertEqual([{<<"foo">>, [Rev]}], Engine:get_last_purged(St3)).
diff --git a/src/couch/src/test_engine_read_write_docs.erl b/src/couch/src/test_engine_read_write_docs.erl
new file mode 100644
index 000000000..4307702d4
--- /dev/null
+++ b/src/couch/src/test_engine_read_write_docs.erl
@@ -0,0 +1,317 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(test_engine_read_write_docs).
+-compile(export_all).
+
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+
+cet_read_empty_docs() ->
+ {ok, Engine, St} = test_engine_util:init_engine(),
+
+ ?assertEqual([not_found], Engine:open_docs(St, [<<"foo">>])),
+ ?assertEqual(
+ [not_found, not_found],
+ Engine:open_docs(St, [<<"a">>, <<"b">>])
+ ).
+
+
+cet_read_empty_local_docs() ->
+ {ok, Engine, St} = test_engine_util:init_engine(),
+
+ ?assertEqual([not_found], Engine:open_local_docs(St, [<<"_local/foo">>])),
+ ?assertEqual(
+ [not_found, not_found],
+ Engine:open_local_docs(St, [<<"_local/a">>, <<"_local/b">>])
+ ).
+
+
+cet_write_one_doc() ->
+ {ok, Engine, DbPath, St1} = test_engine_util:init_engine(dbpath),
+
+ ?assertEqual(0, Engine:get_doc_count(St1)),
+ ?assertEqual(0, Engine:get_del_doc_count(St1)),
+ ?assertEqual(0, Engine:get_update_seq(St1)),
+
+ Actions = [
+ {create, {<<"foo">>, [{<<"vsn">>, 1}]}}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+ {ok, St3} = Engine:commit_data(St2),
+ Engine:terminate(normal, St3),
+ {ok, St4} = Engine:init(DbPath, []),
+
+ ?assertEqual(1, Engine:get_doc_count(St4)),
+ ?assertEqual(0, Engine:get_del_doc_count(St4)),
+ ?assertEqual(1, Engine:get_update_seq(St4)),
+
+ [FDI] = Engine:open_docs(St4, [<<"foo">>]),
+ #rev_info{
+ rev = {RevPos, PrevRevId},
+ deleted = Deleted,
+ body_sp = DocPtr
+ } = test_engine_util:prev_rev(FDI),
+
+ Doc0 = #doc{
+ id = <<"foo">>,
+ revs = {RevPos, [PrevRevId]},
+ deleted = Deleted,
+ body = DocPtr
+ },
+
+ Doc1 = Engine:read_doc_body(St4, Doc0),
+ Body1 = if not is_binary(Doc1#doc.body) -> Doc1#doc.body; true ->
+ couch_compress:decompress(Doc1#doc.body)
+ end,
+ ?assertEqual([{<<"vsn">>, 1}], Body1).
+
+
+cet_write_two_docs() ->
+ {ok, Engine, DbPath, St1} = test_engine_util:init_engine(dbpath),
+
+ ?assertEqual(0, Engine:get_doc_count(St1)),
+ ?assertEqual(0, Engine:get_del_doc_count(St1)),
+ ?assertEqual(0, Engine:get_update_seq(St1)),
+
+ Actions = [
+ {create, {<<"foo">>, [{<<"vsn">>, 1}]}},
+ {create, {<<"bar">>, [{<<"stuff">>, true}]}}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+ {ok, St3} = Engine:commit_data(St2),
+ Engine:terminate(normal, St3),
+ {ok, St4} = Engine:init(DbPath, []),
+
+ ?assertEqual(2, Engine:get_doc_count(St4)),
+ ?assertEqual(0, Engine:get_del_doc_count(St4)),
+ ?assertEqual(2, Engine:get_update_seq(St4)),
+
+ Resps = Engine:open_docs(St4, [<<"foo">>, <<"bar">>]),
+ ?assertEqual(false, lists:member(not_found, Resps)).
+
+
+cet_write_three_doc_batch() ->
+ {ok, Engine, DbPath, St1} = test_engine_util:init_engine(dbpath),
+
+ ?assertEqual(0, Engine:get_doc_count(St1)),
+ ?assertEqual(0, Engine:get_del_doc_count(St1)),
+ ?assertEqual(0, Engine:get_update_seq(St1)),
+
+ Actions = [
+ {batch, [
+ {create, {<<"foo">>, [{<<"vsn">>, 1}]}},
+ {create, {<<"bar">>, [{<<"stuff">>, true}]}},
+ {create, {<<"baz">>, []}}
+ ]}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+ {ok, St3} = Engine:commit_data(St2),
+ Engine:terminate(normal, St3),
+ {ok, St4} = Engine:init(DbPath, []),
+
+ ?assertEqual(3, Engine:get_doc_count(St4)),
+ ?assertEqual(0, Engine:get_del_doc_count(St4)),
+ ?assertEqual(3, Engine:get_update_seq(St4)),
+
+ Resps = Engine:open_docs(St4, [<<"foo">>, <<"bar">>, <<"baz">>]),
+ ?assertEqual(false, lists:member(not_found, Resps)).
+
+
+cet_update_doc() ->
+ {ok, Engine, DbPath, St1} = test_engine_util:init_engine(dbpath),
+
+ ?assertEqual(0, Engine:get_doc_count(St1)),
+ ?assertEqual(0, Engine:get_del_doc_count(St1)),
+ ?assertEqual(0, Engine:get_update_seq(St1)),
+
+ Actions = [
+ {create, {<<"foo">>, [{<<"vsn">>, 1}]}},
+ {update, {<<"foo">>, [{<<"vsn">>, 2}]}}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+ {ok, St3} = Engine:commit_data(St2),
+ Engine:terminate(normal, St3),
+ {ok, St4} = Engine:init(DbPath, []),
+
+ ?assertEqual(1, Engine:get_doc_count(St4)),
+ ?assertEqual(0, Engine:get_del_doc_count(St4)),
+ ?assertEqual(2, Engine:get_update_seq(St4)),
+
+ [FDI] = Engine:open_docs(St4, [<<"foo">>]),
+
+ #rev_info{
+ rev = {RevPos, PrevRevId},
+ deleted = Deleted,
+ body_sp = DocPtr
+ } = test_engine_util:prev_rev(FDI),
+
+ Doc0 = #doc{
+ id = <<"foo">>,
+ revs = {RevPos, [PrevRevId]},
+ deleted = Deleted,
+ body = DocPtr
+ },
+
+ Doc1 = Engine:read_doc_body(St4, Doc0),
+ Body1 = if not is_binary(Doc1#doc.body) -> Doc1#doc.body; true ->
+ couch_compress:decompress(Doc1#doc.body)
+ end,
+
+ ?assertEqual([{<<"vsn">>, 2}], Body1).
+
+
+cet_delete_doc() ->
+ {ok, Engine, DbPath, St1} = test_engine_util:init_engine(dbpath),
+
+ ?assertEqual(0, Engine:get_doc_count(St1)),
+ ?assertEqual(0, Engine:get_del_doc_count(St1)),
+ ?assertEqual(0, Engine:get_update_seq(St1)),
+
+ Actions = [
+ {create, {<<"foo">>, [{<<"vsn">>, 1}]}},
+ {delete, {<<"foo">>, []}}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+ {ok, St3} = Engine:commit_data(St2),
+ Engine:terminate(normal, St3),
+ {ok, St4} = Engine:init(DbPath, []),
+
+ ?assertEqual(0, Engine:get_doc_count(St4)),
+ ?assertEqual(1, Engine:get_del_doc_count(St4)),
+ ?assertEqual(2, Engine:get_update_seq(St4)),
+
+ [FDI] = Engine:open_docs(St4, [<<"foo">>]),
+
+ #rev_info{
+ rev = {RevPos, PrevRevId},
+ deleted = Deleted,
+ body_sp = DocPtr
+ } = test_engine_util:prev_rev(FDI),
+
+ Doc0 = #doc{
+ id = <<"foo">>,
+ revs = {RevPos, [PrevRevId]},
+ deleted = Deleted,
+ body = DocPtr
+ },
+
+ Doc1 = Engine:read_doc_body(St4, Doc0),
+ Body1 = if not is_binary(Doc1#doc.body) -> Doc1#doc.body; true ->
+ couch_compress:decompress(Doc1#doc.body)
+ end,
+
+ ?assertEqual([], Body1).
+
+
+cet_write_local_doc() ->
+ {ok, Engine, DbPath, St1} = test_engine_util:init_engine(dbpath),
+
+ ?assertEqual(0, Engine:get_doc_count(St1)),
+ ?assertEqual(0, Engine:get_del_doc_count(St1)),
+ ?assertEqual(0, Engine:get_update_seq(St1)),
+
+ Actions = [
+ {create, {<<"_local/foo">>, [{<<"yay">>, false}]}}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+ {ok, St3} = Engine:commit_data(St2),
+ Engine:terminate(normal, St3),
+ {ok, St4} = Engine:init(DbPath, []),
+
+ ?assertEqual(0, Engine:get_doc_count(St4)),
+ ?assertEqual(0, Engine:get_del_doc_count(St4)),
+ ?assertEqual(0, Engine:get_update_seq(St4)),
+
+ [not_found] = Engine:open_docs(St4, [<<"_local/foo">>]),
+ [#doc{} = Doc] = Engine:open_local_docs(St4, [<<"_local/foo">>]),
+ ?assertEqual([{<<"yay">>, false}], Doc#doc.body).
+
+
+cet_write_mixed_batch() ->
+ {ok, Engine, DbPath, St1} = test_engine_util:init_engine(dbpath),
+
+ ?assertEqual(0, Engine:get_doc_count(St1)),
+ ?assertEqual(0, Engine:get_del_doc_count(St1)),
+ ?assertEqual(0, Engine:get_update_seq(St1)),
+
+ Actions = [
+ {batch, [
+ {create, {<<"bar">>, []}},
+ {create, {<<"_local/foo">>, [{<<"yay">>, false}]}}
+ ]}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+ {ok, St3} = Engine:commit_data(St2),
+ Engine:terminate(normal, St3),
+ {ok, St4} = Engine:init(DbPath, []),
+
+ ?assertEqual(1, Engine:get_doc_count(St4)),
+ ?assertEqual(0, Engine:get_del_doc_count(St4)),
+ ?assertEqual(1, Engine:get_update_seq(St4)),
+
+ [#full_doc_info{}] = Engine:open_docs(St4, [<<"bar">>]),
+ [not_found] = Engine:open_docs(St4, [<<"_local/foo">>]),
+
+ [not_found] = Engine:open_local_docs(St4, [<<"bar">>]),
+ [#doc{}] = Engine:open_local_docs(St4, [<<"_local/foo">>]).
+
+
+cet_update_local_doc() ->
+ {ok, Engine, DbPath, St1} = test_engine_util:init_engine(dbpath),
+
+ ?assertEqual(0, Engine:get_doc_count(St1)),
+ ?assertEqual(0, Engine:get_del_doc_count(St1)),
+ ?assertEqual(0, Engine:get_update_seq(St1)),
+
+ Actions = [
+ {create, {<<"_local/foo">>, []}},
+ {update, {<<"_local/foo">>, [{<<"stuff">>, null}]}}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+ {ok, St3} = Engine:commit_data(St2),
+ Engine:terminate(normal, St3),
+ {ok, St4} = Engine:init(DbPath, []),
+
+ ?assertEqual(0, Engine:get_doc_count(St4)),
+ ?assertEqual(0, Engine:get_del_doc_count(St4)),
+ ?assertEqual(0, Engine:get_update_seq(St4)),
+
+ [not_found] = Engine:open_docs(St4, [<<"_local/foo">>]),
+ [#doc{} = Doc] = Engine:open_local_docs(St4, [<<"_local/foo">>]),
+ ?assertEqual([{<<"stuff">>, null}], Doc#doc.body).
+
+
+cet_delete_local_doc() ->
+ {ok, Engine, DbPath, St1} = test_engine_util:init_engine(dbpath),
+
+ ?assertEqual(0, Engine:get_doc_count(St1)),
+ ?assertEqual(0, Engine:get_del_doc_count(St1)),
+ ?assertEqual(0, Engine:get_update_seq(St1)),
+
+ Actions = [
+ {create, {<<"_local/foo">>, []}},
+ {delete, {<<"_local/foo">>, []}}
+ ],
+ {ok, St2} = test_engine_util:apply_actions(Engine, St1, Actions),
+ {ok, St3} = Engine:commit_data(St2),
+ Engine:terminate(normal, St3),
+ {ok, St4} = Engine:init(DbPath, []),
+
+ ?assertEqual(0, Engine:get_doc_count(St4)),
+ ?assertEqual(0, Engine:get_del_doc_count(St4)),
+ ?assertEqual(0, Engine:get_update_seq(St4)),
+
+ [not_found] = Engine:open_docs(St4, [<<"_local/foo">>]),
+ ?assertEqual([not_found], Engine:open_local_docs(St4, [<<"_local/foo">>])).
diff --git a/src/couch/src/test_engine_ref_counting.erl b/src/couch/src/test_engine_ref_counting.erl
new file mode 100644
index 000000000..18e75fb5a
--- /dev/null
+++ b/src/couch/src/test_engine_ref_counting.erl
@@ -0,0 +1,103 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(test_engine_ref_counting).
+-compile(export_all).
+
+
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+
+-define(NUM_CLIENTS, 1000).
+
+
+cet_empty_monitors() ->
+ {ok, Engine, St} = test_engine_util:init_engine(),
+ Pids = Engine:monitored_by(St),
+ ?assert(is_list(Pids)),
+ ?assertEqual([], Pids -- [self(), whereis(couch_stats_process_tracker)]).
+
+
+cet_incref_decref() ->
+ {ok, Engine, St} = test_engine_util:init_engine(),
+
+ {Pid, _} = Client = start_client(Engine, St),
+ wait_client(Client),
+
+ Pids1 = Engine:monitored_by(St),
+ ?assert(lists:member(Pid, Pids1)),
+
+ close_client(Client),
+
+ Pids2 = Engine:monitored_by(St),
+ ?assert(not lists:member(Pid, Pids2)).
+
+
+cet_incref_decref_many() ->
+ {ok, Engine, St} = test_engine_util:init_engine(),
+ Clients = lists:map(fun(_) ->
+ start_client(Engine, St)
+ end, lists:seq(1, ?NUM_CLIENTS)),
+
+ lists:foreach(fun(C) -> wait_client(C) end, Clients),
+
+ Pids1 = Engine:monitored_by(St),
+ % +2 for db pid and process tracker
+ ?assertEqual(?NUM_CLIENTS + 2, length(Pids1)),
+
+ lists:foreach(fun(C) -> close_client(C) end, Clients),
+
+ Pids2 = Engine:monitored_by(St),
+ ?assertEqual(2, length(Pids2)).
+
+
+start_client(Engine, St1) ->
+ spawn_monitor(fun() ->
+ {ok, St2} = Engine:incref(St1),
+
+ receive
+ {waiting, Pid} ->
+ Pid ! go
+ after 1000 ->
+ erlang:error(timeout)
+ end,
+
+ receive
+ close ->
+ ok
+ after 1000 ->
+ erlang:error(timeout)
+ end,
+
+ Engine:decref(St2)
+ end).
+
+
+wait_client({Pid, _Ref}) ->
+ Pid ! {waiting, self()},
+ receive
+ go -> ok
+ after 1000 ->
+ erlang:error(timeout)
+ end.
+
+
+close_client({Pid, Ref}) ->
+ Pid ! close,
+ receive
+ {'DOWN', Ref, _, _, _} ->
+ ok
+ after 1000 ->
+ erlang:error(timeout)
+ end.
+
diff --git a/src/couch/src/test_engine_util.erl b/src/couch/src/test_engine_util.erl
new file mode 100644
index 000000000..89997538d
--- /dev/null
+++ b/src/couch/src/test_engine_util.erl
@@ -0,0 +1,607 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(test_engine_util).
+-compile(export_all).
+
+
+-include_lib("couch/include/couch_db.hrl").
+
+
+-define(TEST_MODULES, [
+ test_engine_open_close_delete,
+ test_engine_get_set_props,
+ test_engine_read_write_docs,
+ test_engine_attachments,
+ test_engine_fold_docs,
+ test_engine_fold_changes,
+ test_engine_purge_docs,
+ test_engine_compaction,
+ test_engine_ref_counting
+]).
+
+-define(COMPACTOR_TIMEOUT, 50000).
+-define(ATTACHMENT_WRITE_TIMEOUT, 10000).
+-define(MAKE_DOC_SUMMARY_TIMEOUT, 5000).
+
+create_tests(EngineApp) ->
+ create_tests(EngineApp, EngineApp).
+
+
+create_tests(EngineApp, EngineModule) ->
+ application:set_env(couch, test_engine, {EngineApp, EngineModule}),
+ Tests = lists:map(fun(TestMod) ->
+ {atom_to_list(TestMod), gather(TestMod)}
+ end, ?TEST_MODULES),
+ Setup = fun() ->
+ Ctx = test_util:start_couch(),
+ config:set("log", "include_sasl", "false", false),
+ Ctx
+ end,
+ {
+ setup,
+ Setup,
+ fun test_util:stop_couch/1,
+ fun(_) -> Tests end
+ }.
+
+
+gather(Module) ->
+ Exports = Module:module_info(exports),
+ Tests = lists:foldl(fun({Fun, Arity}, Acc) ->
+ case {atom_to_list(Fun), Arity} of
+ {[$c, $e, $t, $_ | _], 0} ->
+ TestFun = make_test_fun(Module, Fun),
+ [{timeout, 60, {spawn, TestFun}} | Acc];
+ _ ->
+ Acc
+ end
+ end, [], Exports),
+ lists:reverse(Tests).
+
+
+make_test_fun(Module, Fun) ->
+ Name = lists:flatten(io_lib:format("~s:~s", [Module, Fun])),
+ Wrapper = fun() ->
+ process_flag(trap_exit, true),
+ Module:Fun()
+ end,
+ {Name, Wrapper}.
+
+rootdir() ->
+ config:get("couchdb", "database_dir", ".").
+
+
+dbpath() ->
+ binary_to_list(filename:join(rootdir(), couch_uuids:random())).
+
+
+get_engine() ->
+ case application:get_env(couch, test_engine) of
+ {ok, {_, Engine}} ->
+ Engine;
+ _ ->
+ couch_bt_engine
+ end.
+
+
+init_engine() ->
+ init_engine(default).
+
+
+init_engine(default) ->
+ Engine = get_engine(),
+ DbPath = dbpath(),
+ {ok, St} = Engine:init(DbPath, [
+ create,
+ {default_security_object, []}
+ ]),
+ {ok, Engine, St};
+
+init_engine(dbpath) ->
+ Engine = get_engine(),
+ DbPath = dbpath(),
+ {ok, St} = Engine:init(DbPath, [
+ create,
+ {default_security_object, []}
+ ]),
+ {ok, Engine, DbPath, St}.
+
+
+apply_actions(_Engine, St, []) ->
+ {ok, St};
+
+apply_actions(Engine, St, [Action | Rest]) ->
+ NewSt = apply_action(Engine, St, Action),
+ apply_actions(Engine, NewSt, Rest).
+
+
+apply_action(Engine, St, {batch, BatchActions}) ->
+ apply_batch(Engine, St, BatchActions);
+
+apply_action(Engine, St, Action) ->
+ apply_batch(Engine, St, [Action]).
+
+
+apply_batch(Engine, St, Actions) ->
+ UpdateSeq = Engine:get_update_seq(St) + 1,
+ AccIn = {UpdateSeq, [], [], []},
+ AccOut = lists:foldl(fun(Action, Acc) ->
+ {SeqAcc, DocAcc, LDocAcc, PurgeAcc} = Acc,
+ case Action of
+ {_, {<<"_local/", _/binary>>, _}} ->
+ LDoc = gen_local_write(Engine, St, Action),
+ {SeqAcc, DocAcc, [LDoc | LDocAcc], PurgeAcc};
+ _ ->
+ case gen_write(Engine, St, Action, SeqAcc) of
+ {_OldFDI, _NewFDI} = Pair ->
+ {SeqAcc + 1, [Pair | DocAcc], LDocAcc, PurgeAcc};
+ {Pair, NewSeqAcc, NewPurgeInfo} ->
+ NewPurgeAcc = [NewPurgeInfo | PurgeAcc],
+ {NewSeqAcc, [Pair | DocAcc], LDocAcc, NewPurgeAcc}
+ end
+ end
+ end, AccIn, Actions),
+ {_, Docs0, LDocs, PurgeIdRevs} = AccOut,
+ Docs = lists:reverse(Docs0),
+ {ok, NewSt} = Engine:write_doc_infos(St, Docs, LDocs, PurgeIdRevs),
+ NewSt.
+
+
+gen_local_write(Engine, St, {Action, {DocId, Body}}) ->
+ PrevRev = case Engine:open_local_docs(St, [DocId]) of
+ [not_found] ->
+ 0;
+ [#doc{revs = {0, []}}] ->
+ 0;
+ [#doc{revs = {0, [RevStr | _]}}] ->
+ binary_to_integer(RevStr)
+ end,
+ {RevId, Deleted} = case Action of
+ Action when Action == create; Action == update ->
+ {PrevRev + 1, false};
+ delete ->
+ {0, true}
+ end,
+ #doc{
+ id = DocId,
+ revs = {0, [RevId]},
+ body = Body,
+ deleted = Deleted
+ }.
+
+gen_write(Engine, St, {Action, {DocId, Body}}, UpdateSeq) ->
+ gen_write(Engine, St, {Action, {DocId, Body, []}}, UpdateSeq);
+
+gen_write(Engine, St, {create, {DocId, Body, Atts0}}, UpdateSeq) ->
+ [not_found] = Engine:open_docs(St, [DocId]),
+ Atts = [couch_att:to_disk_term(Att) || Att <- Atts0],
+
+ Rev = crypto:hash(md5, term_to_binary({DocId, Body, Atts})),
+
+ Doc0 = #doc{
+ id = DocId,
+ revs = {0, [Rev]},
+ deleted = false,
+ body = Body,
+ atts = Atts
+ },
+
+ Doc1 = make_doc_summary(Engine, St, Doc0),
+ {ok, Doc2, Len} = Engine:write_doc_body(St, Doc1),
+
+ Sizes = #size_info{
+ active = Len,
+ external = erlang:external_size(Doc1#doc.body)
+ },
+
+ Leaf = #leaf{
+ deleted = false,
+ ptr = Doc2#doc.body,
+ seq = UpdateSeq,
+ sizes = Sizes,
+ atts = Atts
+ },
+
+ {not_found, #full_doc_info{
+ id = DocId,
+ deleted = false,
+ update_seq = UpdateSeq,
+ rev_tree = [{0, {Rev, Leaf, []}}],
+ sizes = Sizes
+ }};
+
+gen_write(Engine, St, {purge, {DocId, PrevRevs0, _}}, UpdateSeq) ->
+ [#full_doc_info{} = PrevFDI] = Engine:open_docs(St, [DocId]),
+ PrevRevs = if is_list(PrevRevs0) -> PrevRevs0; true -> [PrevRevs0] end,
+
+ #full_doc_info{
+ rev_tree = PrevTree
+ } = PrevFDI,
+
+ {NewTree, RemRevs} = couch_key_tree:remove_leafs(PrevTree, PrevRevs),
+ RemovedAll = lists:sort(RemRevs) == lists:sort(PrevRevs),
+ if RemovedAll -> ok; true ->
+ % If we didn't purge all the requested revisions
+ % then its a bug in the test.
+ erlang:error({invalid_purge_test_revs, PrevRevs})
+ end,
+
+ case NewTree of
+ [] ->
+ % We've completely purged the document
+ {{PrevFDI, not_found}, UpdateSeq, {DocId, RemRevs}};
+ _ ->
+ % We have to relabel the update_seq of all
+ % leaves. See couch_db_updater for details.
+ {NewNewTree, NewUpdateSeq} = couch_key_tree:mapfold(fun
+ (_RevId, Leaf, leaf, InnerSeqAcc) ->
+ {Leaf#leaf{seq = InnerSeqAcc}, InnerSeqAcc + 1};
+ (_RevId, Value, _Type, InnerSeqAcc) ->
+ {Value, InnerSeqAcc}
+ end, UpdateSeq, NewTree),
+ NewFDI = PrevFDI#full_doc_info{
+ update_seq = NewUpdateSeq - 1,
+ rev_tree = NewNewTree
+ },
+ {{PrevFDI, NewFDI}, NewUpdateSeq, {DocId, RemRevs}}
+ end;
+
+gen_write(Engine, St, {Action, {DocId, Body, Atts0}}, UpdateSeq) ->
+ [#full_doc_info{} = PrevFDI] = Engine:open_docs(St, [DocId]),
+ Atts = [couch_att:to_disk_term(Att) || Att <- Atts0],
+
+ #full_doc_info{
+ id = DocId,
+ rev_tree = PrevRevTree
+ } = PrevFDI,
+
+ #rev_info{
+ rev = PrevRev
+ } = prev_rev(PrevFDI),
+
+ {RevPos, PrevRevId} = PrevRev,
+
+ Rev = gen_revision(Action, DocId, PrevRev, Body, Atts),
+
+ Doc0 = #doc{
+ id = DocId,
+ revs = {RevPos + 1, [Rev, PrevRevId]},
+ deleted = false,
+ body = Body,
+ atts = Atts
+ },
+
+ Doc1 = make_doc_summary(Engine, St, Doc0),
+ {ok, Doc2, Len} = Engine:write_doc_body(St, Doc1),
+
+ Deleted = case Action of
+ update -> false;
+ conflict -> false;
+ delete -> true
+ end,
+
+ Sizes = #size_info{
+ active = Len,
+ external = erlang:external_size(Doc1#doc.body)
+ },
+
+ Leaf = #leaf{
+ deleted = Deleted,
+ ptr = Doc2#doc.body,
+ seq = UpdateSeq,
+ sizes = Sizes,
+ atts = Atts
+ },
+
+ Path = gen_path(Action, RevPos, PrevRevId, Rev, Leaf),
+ RevsLimit = Engine:get_revs_limit(St),
+ NodeType = case Action of
+ conflict -> new_branch;
+ _ -> new_leaf
+ end,
+ {NewTree, NodeType} = couch_key_tree:merge(PrevRevTree, Path, RevsLimit),
+
+ NewFDI = PrevFDI#full_doc_info{
+ deleted = couch_doc:is_deleted(NewTree),
+ update_seq = UpdateSeq,
+ rev_tree = NewTree,
+ sizes = Sizes
+ },
+
+ {PrevFDI, NewFDI}.
+
+
+gen_revision(conflict, DocId, _PrevRev, Body, Atts) ->
+ crypto:hash(md5, term_to_binary({DocId, Body, Atts}));
+gen_revision(delete, DocId, PrevRev, Body, Atts) ->
+ gen_revision(update, DocId, PrevRev, Body, Atts);
+gen_revision(update, DocId, PrevRev, Body, Atts) ->
+ crypto:hash(md5, term_to_binary({DocId, PrevRev, Body, Atts})).
+
+
+gen_path(conflict, _RevPos, _PrevRevId, Rev, Leaf) ->
+ {0, {Rev, Leaf, []}};
+gen_path(delete, RevPos, PrevRevId, Rev, Leaf) ->
+ gen_path(update, RevPos, PrevRevId, Rev, Leaf);
+gen_path(update, RevPos, PrevRevId, Rev, Leaf) ->
+ {RevPos, {PrevRevId, ?REV_MISSING, [{Rev, Leaf, []}]}}.
+
+
+make_doc_summary(Engine, St, DocData) ->
+ {_, Ref} = spawn_monitor(fun() ->
+ exit({result, Engine:serialize_doc(St, DocData)})
+ end),
+ receive
+ {'DOWN', Ref, _, _, {result, Summary}} ->
+ Summary;
+ {'DOWN', Ref, _, _, Error} ->
+ erlang:error({make_doc_summary_error, Error})
+ after ?MAKE_DOC_SUMMARY_TIMEOUT ->
+ erlang:error(make_doc_summary_timeout)
+ end.
+
+
+prep_atts(_Engine, _St, []) ->
+ [];
+
+prep_atts(Engine, St, [{FileName, Data} | Rest]) ->
+ {_, Ref} = spawn_monitor(fun() ->
+ {ok, Stream} = Engine:open_write_stream(St, []),
+ exit(write_att(Stream, FileName, Data, Data))
+ end),
+ Att = receive
+ {'DOWN', Ref, _, _, {{no_catch, not_supported}, _}} ->
+ throw(not_supported);
+ {'DOWN', Ref, _, _, Resp} ->
+ Resp
+ after ?ATTACHMENT_WRITE_TIMEOUT ->
+ erlang:error(attachment_write_timeout)
+ end,
+ [Att | prep_atts(Engine, St, Rest)].
+
+
+write_att(Stream, FileName, OrigData, <<>>) ->
+ {StreamEngine, Len, Len, Md5, Md5} = couch_stream:close(Stream),
+ couch_util:check_md5(Md5, crypto:hash(md5, OrigData)),
+ Len = size(OrigData),
+ couch_att:new([
+ {name, FileName},
+ {type, <<"application/octet-stream">>},
+ {data, {stream, StreamEngine}},
+ {att_len, Len},
+ {disk_len, Len},
+ {md5, Md5},
+ {encoding, identity}
+ ]);
+
+write_att(Stream, FileName, OrigData, Data) ->
+ {Chunk, Rest} = case size(Data) > 4096 of
+ true ->
+ <<Head:4096/binary, Tail/binary>> = Data,
+ {Head, Tail};
+ false ->
+ {Data, <<>>}
+ end,
+ ok = couch_stream:write(Stream, Chunk),
+ write_att(Stream, FileName, OrigData, Rest).
+
+
+prev_rev(#full_doc_info{} = FDI) ->
+ #doc_info{
+ revs = [#rev_info{} = PrevRev | _]
+ } = couch_doc:to_doc_info(FDI),
+ PrevRev.
+
+
+db_as_term(Engine, St) ->
+ [
+ {props, db_props_as_term(Engine, St)},
+ {docs, db_docs_as_term(Engine, St)},
+ {local_docs, db_local_docs_as_term(Engine, St)},
+ {changes, db_changes_as_term(Engine, St)}
+ ].
+
+
+db_props_as_term(Engine, St) ->
+ Props = [
+ get_doc_count,
+ get_del_doc_count,
+ get_disk_version,
+ get_update_seq,
+ get_purge_seq,
+ get_last_purged,
+ get_security,
+ get_revs_limit,
+ get_uuid,
+ get_epochs
+ ],
+ lists:map(fun(Fun) ->
+ {Fun, Engine:Fun(St)}
+ end, Props).
+
+
+db_docs_as_term(Engine, St) ->
+ FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
+ {ok, FDIs} = Engine:fold_docs(St, FoldFun, [], []),
+ lists:reverse(lists:map(fun(FDI) ->
+ fdi_to_term(Engine, St, FDI)
+ end, FDIs)).
+
+
+db_local_docs_as_term(Engine, St) ->
+ FoldFun = fun(Doc, Acc) -> {ok, [Doc | Acc]} end,
+ {ok, LDocs} = Engine:fold_local_docs(St, FoldFun, [], []),
+ lists:reverse(LDocs).
+
+
+db_changes_as_term(Engine, St) ->
+ FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
+ {ok, Changes} = Engine:fold_changes(St, 0, FoldFun, [], []),
+ lists:reverse(lists:map(fun(FDI) ->
+ fdi_to_term(Engine, St, FDI)
+ end, Changes)).
+
+
+fdi_to_term(Engine, St, FDI) ->
+ #full_doc_info{
+ id = DocId,
+ rev_tree = OldTree
+ } = FDI,
+ {NewRevTree, _} = couch_key_tree:mapfold(fun(Rev, Node, Type, Acc) ->
+ tree_to_term(Rev, Node, Type, Acc, DocId)
+ end, {Engine, St}, OldTree),
+ FDI#full_doc_info{
+ rev_tree = NewRevTree,
+ % Blank out sizes because we allow storage
+ % engines to handle this with their own
+ % definition until further notice.
+ sizes = #size_info{
+ active = -1,
+ external = -1
+ }
+ }.
+
+
+tree_to_term(_Rev, _Leaf, branch, Acc, _DocId) ->
+ {?REV_MISSING, Acc};
+
+tree_to_term({Pos, RevId}, #leaf{} = Leaf, leaf, {Engine, St}, DocId) ->
+ #leaf{
+ deleted = Deleted,
+ ptr = Ptr
+ } = Leaf,
+
+ Doc0 = #doc{
+ id = DocId,
+ revs = {Pos, [RevId]},
+ deleted = Deleted,
+ body = Ptr
+ },
+
+ Doc1 = Engine:read_doc_body(St, Doc0),
+
+ Body = if not is_binary(Doc1#doc.body) -> Doc1#doc.body; true ->
+ couch_compress:decompress(Doc1#doc.body)
+ end,
+
+ Atts1 = if not is_binary(Doc1#doc.atts) -> Doc1#doc.atts; true ->
+ couch_compress:decompress(Doc1#doc.atts)
+ end,
+
+ StreamSrc = fun(Sp) -> Engine:open_read_stream(St, Sp) end,
+ Atts2 = [couch_att:from_disk_term(StreamSrc, Att) || Att <- Atts1],
+ Atts = [att_to_term(Att) || Att <- Atts2],
+
+ NewLeaf = Leaf#leaf{
+ ptr = Body,
+ sizes = #size_info{active = -1, external = -1},
+ atts = Atts
+ },
+ {NewLeaf, {Engine, St}}.
+
+
+att_to_term(Att) ->
+ Bin = couch_att:to_binary(Att),
+ couch_att:store(data, Bin, Att).
+
+
+term_diff(T1, T2) when is_tuple(T1), is_tuple(T2) ->
+ tuple_diff(tuple_to_list(T1), tuple_to_list(T2));
+
+term_diff(L1, L2) when is_list(L1), is_list(L2) ->
+ list_diff(L1, L2);
+
+term_diff(V1, V2) when V1 == V2 ->
+ nodiff;
+
+term_diff(V1, V2) ->
+ {V1, V2}.
+
+
+tuple_diff([], []) ->
+ nodiff;
+
+tuple_diff([T1 | _], []) ->
+ {longer, T1};
+
+tuple_diff([], [T2 | _]) ->
+ {shorter, T2};
+
+tuple_diff([T1 | R1], [T2 | R2]) ->
+ case term_diff(T1, T2) of
+ nodiff ->
+ tuple_diff(R1, R2);
+ Else ->
+ {T1, Else}
+ end.
+
+
+list_diff([], []) ->
+ nodiff;
+
+list_diff([T1 | _], []) ->
+ {longer, T1};
+
+list_diff([], [T2 | _]) ->
+ {shorter, T2};
+
+list_diff([T1 | R1], [T2 | R2]) ->
+ case term_diff(T1, T2) of
+ nodiff ->
+ list_diff(R1, R2);
+ Else ->
+ {T1, Else}
+ end.
+
+
+compact(Engine, St1, DbPath) ->
+ DbName = filename:basename(DbPath),
+ {ok, St2, Pid} = Engine:start_compaction(St1, DbName, [], self()),
+ Ref = erlang:monitor(process, Pid),
+
+ % Ideally I'd assert that Pid is linked to us
+ % at this point but its technically possible
+ % that it could have finished compacting by
+ % the time we check... Quite the quandry.
+
+ Term = receive
+ {'$gen_cast', {compact_done, Engine, Term0}} ->
+ Term0;
+ {'DOWN', Ref, _, _, Reason} ->
+ erlang:error({compactor_died, Reason})
+ after ?COMPACTOR_TIMEOUT ->
+ erlang:error(compactor_timed_out)
+ end,
+
+ {ok, St2, DbName, Pid, Term}.
+
+
+with_config(Config, Fun) ->
+ OldConfig = apply_config(Config),
+ try
+ Fun()
+ after
+ apply_config(OldConfig)
+ end.
+
+
+apply_config([]) ->
+ [];
+
+apply_config([{Section, Key, Value} | Rest]) ->
+ Orig = config:get(Section, Key),
+ case Value of
+ undefined -> config:delete(Section, Key);
+ _ -> config:set(Section, Key, Value)
+ end,
+ [{Section, Key, Orig} | apply_config(Rest)].
diff --git a/src/couch/src/test_request.erl b/src/couch/src/test_request.erl
index a1b8b57c5..4dfde1a33 100644
--- a/src/couch/src/test_request.erl
+++ b/src/couch/src/test_request.erl
@@ -12,6 +12,7 @@
-module(test_request).
+-export([copy/1, copy/2, copy/3]).
-export([get/1, get/2, get/3]).
-export([post/2, post/3, post/4]).
-export([put/2, put/3, put/4]).
@@ -19,6 +20,15 @@
-export([options/1, options/2, options/3]).
-export([request/3, request/4, request/5]).
+copy(Url) ->
+ copy(Url, []).
+
+copy(Url, Headers) ->
+ copy(Url, Headers, []).
+
+copy(Url, Headers, Opts) ->
+ request(copy, Url, Headers, [], Opts).
+
get(Url) ->
get(Url, []).
diff --git a/src/couch/test/chttpd_endpoints_tests.erl b/src/couch/test/chttpd_endpoints_tests.erl
index 715576713..9b7430823 100644
--- a/src/couch/test/chttpd_endpoints_tests.erl
+++ b/src/couch/test/chttpd_endpoints_tests.erl
@@ -41,6 +41,7 @@ handlers(url_handler) ->
{<<"favicon.ico">>, chttpd_misc, handle_favicon_req},
{<<"_utils">>, chttpd_misc, handle_utils_dir_req},
{<<"_all_dbs">>, chttpd_misc, handle_all_dbs_req},
+ {<<"_dbs_info">>, chttpd_misc, handle_dbs_info_req},
{<<"_active_tasks">>, chttpd_misc, handle_task_status_req},
{<<"_node">>, chttpd_misc, handle_node_req},
{<<"_reload_query_servers">>, chttpd_misc, handle_reload_query_servers_req},
diff --git a/src/couch/test/couch_bt_engine_tests.erl b/src/couch/test/couch_bt_engine_tests.erl
new file mode 100644
index 000000000..df200df47
--- /dev/null
+++ b/src/couch/test/couch_bt_engine_tests.erl
@@ -0,0 +1,20 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_bt_engine_tests).
+
+
+-include_lib("eunit/include/eunit.hrl").
+
+
+couch_bt_engine_test_()->
+ test_engine_util:create_tests(couch, couch_bt_engine).
diff --git a/src/couch/test/couch_db_plugin_tests.erl b/src/couch/test/couch_db_plugin_tests.erl
index 94dd3dfa5..52533fec2 100644
--- a/src/couch/test/couch_db_plugin_tests.erl
+++ b/src/couch/test/couch_db_plugin_tests.erl
@@ -43,7 +43,7 @@ data_providers() -> [].
data_subscriptions() -> [].
processes() -> [].
notify(_, _, _) -> ok.
-fake_db() -> element(2, couch_db:clustered_db(fake, totes_fake)).
+fake_db() -> test_util:fake_db([]).
setup() ->
couch_tests:setup([
diff --git a/src/couch/test/couch_db_tests.erl b/src/couch/test/couch_db_tests.erl
index 62e059070..d64f7c640 100644
--- a/src/couch/test/couch_db_tests.erl
+++ b/src/couch/test/couch_db_tests.erl
@@ -128,15 +128,13 @@ should_delete_multiple_dbs(DbNames) ->
should_create_delete_database_continuously(Times, DbName) ->
{lists:flatten(io_lib:format("~b times", [Times])),
- ?_test(begin
+ {timeout, ?TIMEOUT, ?_test(begin
?assert(create_db(DbName)),
lists:foreach(fun(_) ->
- {timeout, ?TIMEOUT, [
- ?assert(delete_db(DbName)),
- ?assert(create_db(DbName))
- ]}
+ ?assert(delete_db(DbName)),
+ ?assert(create_db(DbName))
end, lists:seq(1, Times))
- end)}.
+ end)}}.
should_create_db_if_missing(DbName) ->
?_test(begin
diff --git a/src/couch/test/couch_key_tree_tests.erl b/src/couch/test/couch_key_tree_tests.erl
index 8aa886fc8..88d920363 100644
--- a/src/couch/test/couch_key_tree_tests.erl
+++ b/src/couch/test/couch_key_tree_tests.erl
@@ -17,14 +17,18 @@
-define(DEPTH, 10).
setup() ->
- test_util:start(?MODULE, [], [{dont_mock, [config]}]).
+ meck:new(config),
+ meck:expect(config, get, fun(_, _, Default) -> Default end).
+
+teardown(_) ->
+ meck:unload(config).
key_tree_merge_test_()->
{
"Key tree merge",
{
setup,
- fun setup/0, fun test_util:stop/1,
+ fun setup/0, fun teardown/1,
[
should_merge_with_empty_tree(),
should_merge_reflexive(),
@@ -51,7 +55,7 @@ key_tree_missing_leaves_test_()->
"Missing tree leaves",
{
setup,
- fun setup/0, fun test_util:stop/1,
+ fun setup/0, fun teardown/1,
[
should_not_find_missing_leaves(),
should_find_missing_leaves()
@@ -64,7 +68,7 @@ key_tree_remove_leaves_test_()->
"Remove tree leaves",
{
setup,
- fun setup/0, fun test_util:stop/1,
+ fun setup/0, fun teardown/1,
[
should_have_no_effect_on_removing_no_leaves(),
should_have_no_effect_on_removing_non_existant_branch(),
@@ -81,7 +85,7 @@ key_tree_get_leaves_test_()->
"Leaves retrieving",
{
setup,
- fun setup/0, fun test_util:stop/1,
+ fun setup/0, fun teardown/1,
[
should_extract_subtree(),
should_extract_subsubtree(),
@@ -103,7 +107,7 @@ key_tree_leaf_counting_test_()->
"Leaf counting",
{
setup,
- fun setup/0, fun test_util:stop/1,
+ fun setup/0, fun teardown/1,
[
should_have_no_leaves_for_empty_tree(),
should_have_single_leaf_for_tree_with_single_node(),
@@ -118,7 +122,7 @@ key_tree_stemming_test_()->
"Stemming",
{
setup,
- fun setup/0, fun test_util:stop/1,
+ fun setup/0, fun teardown/1,
[
should_have_no_effect_for_stemming_more_levels_than_exists(),
should_return_one_deepest_node(),
diff --git a/src/couch/test/couch_passwords_tests.erl b/src/couch/test/couch_passwords_tests.erl
index dea6d6b7b..a56627361 100644
--- a/src/couch/test/couch_passwords_tests.erl
+++ b/src/couch/test/couch_passwords_tests.erl
@@ -14,7 +14,6 @@
-include_lib("couch/include/couch_eunit.hrl").
-
pbkdf2_test_()->
{"PBKDF2",
[
@@ -52,3 +51,44 @@ pbkdf2_test_()->
{ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>},
couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 16777216, 20)
)}}]}.
+
+
+bcrypt_test_() ->
+ {
+ "Bcrypt",
+ {
+ setup,
+ fun() ->
+ test_util:start_applications([bcrypt])
+ end,
+ fun test_util:stop_applications/1,
+ [
+ {"Log rounds: 4",
+ {timeout, 1, fun bcrypt_logRounds_4/0}},
+ {"Log rounds: 5",
+ {timeout, 1, fun bcrypt_logRounds_5/0}},
+ {"Log rounds: 12",
+ {timeout, 5, fun bcrypt_logRounds_12/0}},
+ {"Null byte",
+ {timeout, 5, fun bcrypt_null_byte/0}}
+
+ ]
+ }
+ }.
+
+bcrypt_logRounds_4() ->
+ bcrypt_assert_equal(<<"password">>, 4).
+
+bcrypt_logRounds_5() ->
+ bcrypt_assert_equal(<<"password">>, 5).
+
+bcrypt_logRounds_12() ->
+ bcrypt_assert_equal(<<"password">>, 12).
+
+bcrypt_null_byte() ->
+ bcrypt_assert_equal(<<"passw\0rd">>, 12).
+
+bcrypt_assert_equal(Password, Rounds) when is_integer(Rounds) ->
+ HashPass = couch_passwords:bcrypt(Password, Rounds),
+ ReHashPass = couch_passwords:bcrypt(Password, HashPass),
+ ?assertEqual(HashPass, ReHashPass).
diff --git a/src/couch/test/couch_server_tests.erl b/src/couch/test/couch_server_tests.erl
index c52b3f6b0..a79f20f9b 100644
--- a/src/couch/test/couch_server_tests.erl
+++ b/src/couch/test/couch_server_tests.erl
@@ -89,3 +89,19 @@ should_delete(_, Db) ->
deleted_files(ViewFile) ->
filelib:wildcard(filename:rootname(ViewFile) ++ "*.deleted.*").
+
+
+bad_engine_option_test_() ->
+ {
+ setup,
+ fun start/0,
+ fun test_util:stop/1,
+ [
+ fun t_bad_engine_option/0
+ ]
+ }.
+
+
+t_bad_engine_option() ->
+ Resp = couch_server:create(?tempdb(), [{engine, <<"cowabunga!">>}]),
+ ?assertEqual(Resp, {error, {invalid_engine_extension, <<"cowabunga!">>}}).
diff --git a/src/couch/test/couch_stream_tests.erl b/src/couch/test/couch_stream_tests.erl
index 3d7bf097f..a7fedf0af 100644
--- a/src/couch/test/couch_stream_tests.erl
+++ b/src/couch/test/couch_stream_tests.erl
@@ -14,10 +14,11 @@
-include_lib("couch/include/couch_eunit.hrl").
+-define(ENGINE(FdVar), {couch_bt_engine_stream, {FdVar, []}}).
setup() ->
{ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- {ok, Stream} = couch_stream:open(Fd),
+ {ok, Stream} = couch_stream:open(?ENGINE(Fd), []),
{Fd, Stream}.
teardown({Fd, _}) ->
@@ -61,7 +62,8 @@ should_write_empty_binary({_, Stream}) ->
should_return_file_pointers_on_close({_, Stream}) ->
couch_stream:write(Stream, <<"foodfoob">>),
- {Ptrs, _, _, _, _} = couch_stream:close(Stream),
+ {NewEngine, _, _, _, _} = couch_stream:close(Stream),
+ {ok, Ptrs} = couch_stream:to_disk_term(NewEngine),
?_assertEqual([{0, 8}], Ptrs).
should_return_stream_size_on_close({_, Stream}) ->
@@ -69,41 +71,43 @@ should_return_stream_size_on_close({_, Stream}) ->
{_, Length, _, _, _} = couch_stream:close(Stream),
?_assertEqual(8, Length).
-should_return_valid_pointers({Fd, Stream}) ->
+should_return_valid_pointers({_Fd, Stream}) ->
couch_stream:write(Stream, <<"foodfoob">>),
- {Ptrs, _, _, _, _} = couch_stream:close(Stream),
- ?_assertEqual(<<"foodfoob">>, read_all(Fd, Ptrs)).
+ {NewEngine, _, _, _, _} = couch_stream:close(Stream),
+ ?_assertEqual(<<"foodfoob">>, read_all(NewEngine)).
should_recall_last_pointer_position({Fd, Stream}) ->
couch_stream:write(Stream, <<"foodfoob">>),
{_, _, _, _, _} = couch_stream:close(Stream),
{ok, ExpPtr} = couch_file:bytes(Fd),
- {ok, Stream2} = couch_stream:open(Fd),
+ {ok, Stream2} = couch_stream:open(?ENGINE(Fd)),
ZeroBits = <<0:(8 * 10)>>,
OneBits = <<1:(8 * 10)>>,
ok = couch_stream:write(Stream2, OneBits),
ok = couch_stream:write(Stream2, ZeroBits),
- {Ptrs, 20, _, _, _} = couch_stream:close(Stream2),
+ {NewEngine, 20, _, _, _} = couch_stream:close(Stream2),
+ {ok, Ptrs} = couch_stream:to_disk_term(NewEngine),
[{ExpPtr, 20}] = Ptrs,
AllBits = iolist_to_binary([OneBits, ZeroBits]),
- ?_assertEqual(AllBits, read_all(Fd, Ptrs)).
+ ?_assertEqual(AllBits, read_all(NewEngine)).
should_stream_more_with_4K_chunk_size({Fd, _}) ->
- {ok, Stream} = couch_stream:open(Fd, [{buffer_size, 4096}]),
+ {ok, Stream} = couch_stream:open(?ENGINE(Fd), [{buffer_size, 4096}]),
lists:foldl(
fun(_, Acc) ->
Data = <<"a1b2c">>,
couch_stream:write(Stream, Data),
[Data | Acc]
end, [], lists:seq(1, 1024)),
- ?_assertMatch({[{0, 4100}, {4106, 1020}], 5120, _, _, _},
- couch_stream:close(Stream)).
+ {NewEngine, Length, _, _, _} = couch_stream:close(Stream),
+ {ok, Ptrs} = couch_stream:to_disk_term(NewEngine),
+ ?_assertMatch({[{0, 4100}, {4106, 1020}], 5120}, {Ptrs, Length}).
should_stop_on_normal_exit_of_stream_opener({Fd, _}) ->
RunnerPid = self(),
OpenerPid = spawn(
fun() ->
- {ok, StreamPid} = couch_stream:open(Fd),
+ {ok, StreamPid} = couch_stream:open(?ENGINE(Fd)),
RunnerPid ! {pid, StreamPid}
end),
StreamPid = receive
@@ -115,6 +119,6 @@ should_stop_on_normal_exit_of_stream_opener({Fd, _}) ->
?_assertNot(is_process_alive(StreamPid)).
-read_all(Fd, PosList) ->
- Data = couch_stream:foldl(Fd, PosList, fun(Bin, Acc) -> [Bin, Acc] end, []),
+read_all(Engine) ->
+ Data = couch_stream:foldl(Engine, fun(Bin, Acc) -> [Bin, Acc] end, []),
iolist_to_binary(Data).
diff --git a/src/couch/test/couchdb_compaction_daemon_tests.erl b/src/couch/test/couchdb_compaction_daemon_tests.erl
index 0d7a46862..c10ddee12 100644
--- a/src/couch/test/couchdb_compaction_daemon_tests.erl
+++ b/src/couch/test/couchdb_compaction_daemon_tests.erl
@@ -24,6 +24,7 @@
start() ->
Ctx = test_util:start_couch(),
ok = config:set("compaction_daemon", "check_interval", "3", false),
+ ok = config:set("compaction_daemon", "snooze_period", "0", false),
ok = config:set("compaction_daemon", "min_file_size", "100000", false),
ok = config:delete("compactions", "_default", false),
ok = meck:new(?MODS_TO_MOCK, [passthrough]),
@@ -236,7 +237,7 @@ spawn_compaction_monitor(DbName) ->
1,
couch_db_updater,
handle_cast,
- [{compact_done, '_'}, '_'],
+ [{compact_done, '_', '_'}, '_'],
DbPid,
?TIMEOUT
),
diff --git a/src/couch/test/couchdb_vhosts_tests.erl b/src/couch/test/couchdb_vhosts_tests.erl
index dfac73cb3..2562a0653 100644
--- a/src/couch/test/couchdb_vhosts_tests.erl
+++ b/src/couch/test/couchdb_vhosts_tests.erl
@@ -46,14 +46,6 @@ setup() ->
couch_db:ensure_full_commit(Db),
couch_db:close(Db),
- test_util:with_process_restart(couch_httpd, fun() ->
- config:set("httpd_global_handlers", "_utils",
- "{couch_httpd_misc_handlers, handle_utils_dir_req, <<\""
- ++ ?TEMPDIR
- ++ "\">>}"
- )
- end),
-
Addr = config:get("httpd", "bind_address", "127.0.0.1"),
Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
Url = "http://" ++ Addr ++ ":" ++ Port,
@@ -76,7 +68,6 @@ vhosts_test_() ->
[
fun should_return_database_info/1,
fun should_return_revs_info/1,
- fun should_serve_utils_for_vhost/1,
fun should_return_virtual_request_path_field_in_request/1,
fun should_return_real_request_path_field_in_request/1,
fun should_match_wildcard_vhost/1,
@@ -122,22 +113,6 @@ should_return_revs_info({Url, DbName}) ->
end
end).
-should_serve_utils_for_vhost({Url, DbName}) ->
- ?_test(begin
- ok = config:set("vhosts", "example.com", "/" ++ DbName, false),
- ensure_index_file(),
- case test_request:get(Url ++ "/_utils/index.html", [],
- [{host_header, "example.com"}]) of
- {ok, _, _, Body} ->
- ?assertMatch(<<"<!DOCTYPE html>", _/binary>>, Body);
- Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
- end
- end).
-
should_return_virtual_request_path_field_in_request({Url, DbName}) ->
?_test(begin
ok = config:set("vhosts", "example1.com",
diff --git a/src/couch/test/couchdb_views_tests.erl b/src/couch/test/couchdb_views_tests.erl
index c0505f3db..1b1a8e56b 100644
--- a/src/couch/test/couchdb_views_tests.erl
+++ b/src/couch/test/couchdb_views_tests.erl
@@ -539,22 +539,25 @@ has_doc(DocId1, Rows) ->
lists:any(fun({R}) -> lists:member({<<"id">>, DocId}, R) end, Rows).
backup_db_file(DbName) ->
- DbDir = config:get("couchdb", "database_dir"),
- DbFile = filename:join([DbDir, ?b2l(DbName) ++ ".couch"]),
- {ok, _} = file:copy(DbFile, DbFile ++ ".backup"),
- ok.
+ {ok, Db} = couch_db:open_int(DbName, []),
+ try
+ SrcPath = couch_db:get_filepath(Db),
+ Src = if
+ is_list(SrcPath) -> SrcPath;
+ true -> binary_to_list(SrcPath)
+ end,
+ ok = copy_tree(Src, Src ++ ".backup")
+ after
+ couch_db:close(Db)
+ end.
restore_backup_db_file(DbName) ->
- DbDir = config:get("couchdb", "database_dir"),
-
{ok, Db} = couch_db:open_int(DbName, []),
+ Src = couch_db:get_filepath(Db),
ok = couch_db:close(Db),
DbPid = couch_db:get_pid(Db),
exit(DbPid, shutdown),
-
- DbFile = filename:join([DbDir, ?b2l(DbName) ++ ".couch"]),
- ok = file:delete(DbFile),
- ok = file:rename(DbFile ++ ".backup", DbFile),
+ ok = copy_tree(Src ++ ".backup", Src),
test_util:wait(fun() ->
case couch_server:open(DbName, [{timeout, ?TIMEOUT}]) of
@@ -644,3 +647,22 @@ wait_indexer(IndexerPid) ->
ok
end
end).
+
+copy_tree(Src, Dst) ->
+ case filelib:is_dir(Src) of
+ true ->
+ {ok, Files} = file:list_dir(Src),
+ copy_tree(Files, Src, Dst);
+ false ->
+ ok = filelib:ensure_dir(Dst),
+ {ok, _} = file:copy(Src, Dst),
+ ok
+ end.
+
+copy_tree([], _Src, _Dst) ->
+ ok;
+copy_tree([File | Rest], Src, Dst) ->
+ FullSrc = filename:join(Src, File),
+ FullDst = filename:join(Dst, File),
+ ok = copy_tree(FullSrc, FullDst),
+ copy_tree(Rest, Src, Dst).
diff --git a/src/couch_epi/rebar.config b/src/couch_epi/rebar.config
index 82db830a2..3c7f8af73 100644
--- a/src/couch_epi/rebar.config
+++ b/src/couch_epi/rebar.config
@@ -1,3 +1,7 @@
{cover_enabled, true}.
{cover_print_enabled, true}.
+
+{erl_opts, [
+ {platform_define, "^R16", 'pre18'},
+ {platform_define, "^17", 'pre18'}]}.
diff --git a/src/couch_epi/src/couch_epi_codegen.erl b/src/couch_epi/src/couch_epi_codegen.erl
index 978f0bb58..89b82a1f9 100644
--- a/src/couch_epi/src/couch_epi_codegen.erl
+++ b/src/couch_epi/src/couch_epi_codegen.erl
@@ -70,11 +70,16 @@ fixup_terminator(Tokens) ->
Tokens ++ [{dot, Line}]
end.
+
+-ifdef(pre18).
+
line(Token) ->
- case erlang:function_exported(erl_scan, line, 1) of
- true ->
- erl_scan:line(Token);
- false ->
- {line, Line} = erl_scan:token_info(Token, line),
- Line
- end.
+ {line, Line} = erl_scan:token_info(Token, line),
+ Line.
+
+-else.
+
+line(Token) ->
+ erl_scan:line(Token).
+
+-endif.
diff --git a/src/couch_event/src/couch_event_listener.erl b/src/couch_event/src/couch_event_listener.erl
index 9d4c8da3a..a9ed33199 100644
--- a/src/couch_event/src/couch_event_listener.erl
+++ b/src/couch_event/src/couch_event_listener.erl
@@ -23,10 +23,6 @@
]).
-export([
- behaviour_info/1
-]).
-
--export([
do_init/3,
loop/2
]).
@@ -38,16 +34,20 @@
}).
-behaviour_info(callbacks) ->
- [
- {init,1},
- {terminate,2},
- {handle_cast,2},
- {handle_event,3},
- {handle_info,2}
- ];
-behaviour_info(_) ->
- undefined.
+-callback init(Arg :: term()) ->
+ term().
+
+-callback terminate(Reason :: term(), State :: term()) ->
+ term().
+
+-callback handle_cast(Message :: term(), State :: term()) ->
+ term().
+
+-callback handle_event(DbName :: term(), Event :: term(), State :: term()) ->
+ term().
+
+-callback handle_info(Message :: term(), State :: term()) ->
+ term().
start(Mod, Arg, Options) ->
diff --git a/src/couch_index/src/couch_index.erl b/src/couch_index/src/couch_index.erl
index b3a800fc1..8fba0a23f 100644
--- a/src/couch_index/src/couch_index.erl
+++ b/src/couch_index/src/couch_index.erl
@@ -104,7 +104,7 @@ init({Mod, IdxState}) ->
Mod:get(idx_name, IdxState),
couch_index_util:hexsig(Mod:get(signature, IdxState))
],
- couch_log:info("Opening index for db: ~s idx: ~s sig: ~p", Args),
+ couch_log:debug("Opening index for db: ~s idx: ~s sig: ~p", Args),
proc_lib:init_ack({ok, self()}),
gen_server:enter_loop(?MODULE, [], State);
Other ->
@@ -131,7 +131,7 @@ terminate(Reason0, State) ->
couch_index_util:hexsig(Mod:get(signature, IdxState)),
Reason
],
- couch_log:info("Closing index for db: ~s idx: ~s sig: ~p because ~r", Args),
+ couch_log:debug("Closing index for db: ~s idx: ~s sig: ~p because ~r", Args),
ok.
@@ -366,7 +366,7 @@ handle_info(maybe_close, State) ->
end;
handle_info({'DOWN', _, _, _Pid, _}, #st{mod=Mod, idx_state=IdxState}=State) ->
Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
- couch_log:info("Index shutdown by monitor notice for db: ~s idx: ~s", Args),
+ couch_log:debug("Index shutdown by monitor notice for db: ~s idx: ~s", Args),
catch send_all(State#st.waiters, shutdown),
{stop, normal, State#st{waiters=[]}}.
diff --git a/src/couch_index/src/couch_index_updater.erl b/src/couch_index/src/couch_index_updater.erl
index 4f63e9f10..5ab9ea809 100644
--- a/src/couch_index/src/couch_index_updater.erl
+++ b/src/couch_index/src/couch_index_updater.erl
@@ -174,7 +174,7 @@ update(Idx, Mod, IdxState) ->
end
end,
- Proc = fun(DocInfo, _, {IdxStateAcc, _}) ->
+ Proc = fun(DocInfo, {IdxStateAcc, _}) ->
case CommittedOnly and (GetSeq(DocInfo) > DbCommittedSeq) of
true ->
{stop, {IdxStateAcc, false}};
@@ -188,7 +188,7 @@ update(Idx, Mod, IdxState) ->
{ok, InitIdxState} = Mod:start_update(Idx, PurgedIdxState, NumChanges),
Acc0 = {InitIdxState, true},
- {ok, _, Acc} = couch_db:enum_docs_since(Db, CurrSeq, Proc, Acc0, []),
+ {ok, Acc} = couch_db:fold_changes(Db, CurrSeq, Proc, Acc0, []),
{ProcIdxSt, SendLast} = Acc,
% If we didn't bail due to hitting the last committed seq we need
@@ -206,7 +206,7 @@ update(Idx, Mod, IdxState) ->
purge_index(Db, Mod, IdxState) ->
- DbPurgeSeq = couch_db:get_purge_seq(Db),
+ {ok, DbPurgeSeq} = couch_db:get_purge_seq(Db),
IdxPurgeSeq = Mod:get(purge_seq, IdxState),
if
DbPurgeSeq == IdxPurgeSeq ->
diff --git a/src/couch_log/src/couch_log.erl b/src/couch_log/src/couch_log.erl
index 0ce4739a4..a8dc5d48d 100644
--- a/src/couch_log/src/couch_log.erl
+++ b/src/couch_log/src/couch_log.erl
@@ -68,6 +68,7 @@ set_level(Level) ->
log(Level, Fmt, Args) ->
case couch_log_util:should_log(Level) of
true ->
+ couch_stats:increment_counter([couch_log, level, Level]),
Entry = couch_log_formatter:format(Level, self(), Fmt, Args),
ok = couch_log_server:log(Entry);
false ->
diff --git a/src/couch_log/test/couch_log_test_util.erl b/src/couch_log/test/couch_log_test_util.erl
index 250366982..05d64d8a9 100644
--- a/src/couch_log/test/couch_log_test_util.erl
+++ b/src/couch_log/test/couch_log_test_util.erl
@@ -22,12 +22,15 @@ start() ->
application:set_env(config, ini_files, config_files()),
application:start(config),
ignore_common_loggers(),
- application:start(couch_log).
+ application:start(couch_log),
+ meck:new(couch_stats),
+ ok = meck:expect(couch_stats, increment_counter, ['_'], ok).
stop(_) ->
application:stop(config),
- application:stop(couch_log).
+ application:stop(couch_log),
+ meck:unload(couch_stats).
with_level(Name, Fun) ->
diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl
index 83a03ffa1..a099f377e 100644
--- a/src/couch_mrview/src/couch_mrview.erl
+++ b/src/couch_mrview/src/couch_mrview.erl
@@ -423,8 +423,18 @@ all_docs_fold(Db, #mrargs{keys=undefined}=Args, Callback, UAcc) ->
update_seq=UpdateSeq,
args=Args
},
- [Opts] = couch_mrview_util:all_docs_key_opts(Args),
- {ok, Offset, FinalAcc} = couch_db:enum_docs(Db, fun map_fold/3, Acc, Opts),
+ [Opts1] = couch_mrview_util:all_docs_key_opts(Args),
+ % TODO: This is a terrible hack for now. We'll probably have
+ % to rewrite _all_docs to not be part of mrview and not expect
+ % a btree. For now non-btree's will just have to pass 0 or
+ % some fake reductions to get an offset.
+ Opts2 = [include_reductions | Opts1],
+ FunName = case couch_util:get_value(namespace, Args#mrargs.extra) of
+ <<"_design">> -> fold_design_docs;
+ <<"_local">> -> fold_local_docs;
+ _ -> fold_docs
+ end,
+ {ok, Offset, FinalAcc} = couch_db:FunName(Db, fun map_fold/3, Acc, Opts2),
finish_fold(FinalAcc, [{total, Total}, {offset, Offset}]);
all_docs_fold(Db, #mrargs{direction=Dir, keys=Keys0}=Args, Callback, UAcc) ->
ReduceFun = get_reduce_fun(Args),
@@ -539,17 +549,25 @@ map_fold({{Key, Id}, Val}, _Offset, Acc) ->
user_acc=UAcc1,
last_go=Go
}};
-map_fold({<<"_local/",_/binary>> = DocId, {Rev0, Body}}, _Offset, #mracc{} = Acc) ->
+map_fold(#doc{id = <<"_local/", _/binary>>} = Doc, _Offset, #mracc{} = Acc) ->
#mracc{
limit=Limit,
callback=Callback,
user_acc=UAcc0,
args=Args
} = Acc,
- Rev = {0, list_to_binary(integer_to_list(Rev0))},
- Value = {[{rev, couch_doc:rev_to_str(Rev)}]},
- Doc = if Args#mrargs.include_docs -> [{doc, Body}]; true -> [] end,
- Row = [{id, DocId}, {key, DocId}, {value, Value}] ++ Doc,
+ #doc{
+ id = DocId,
+ revs = {Pos, [RevId | _]}
+ } = Doc,
+ Rev = {Pos, RevId},
+ Row = [
+ {id, DocId},
+ {key, DocId},
+ {value, {[{rev, couch_doc:rev_to_str(Rev)}]}}
+ ] ++ if not Args#mrargs.include_docs -> []; true ->
+ [{doc, couch_doc:to_json_obj(Doc, Args#mrargs.doc_options)}]
+ end,
{Go, UAcc1} = Callback({row, Row}, UAcc0),
{Go, Acc#mracc{
limit=Limit-1,
diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl
index d26df94f2..0c6e5fc88 100644
--- a/src/couch_mrview/src/couch_mrview_util.erl
+++ b/src/couch_mrview/src/couch_mrview_util.erl
@@ -196,7 +196,7 @@ extract_view(Lang, #mrargs{view_type=red}=Args, Name, [View | Rest]) ->
view_sig(Db, State, View, #mrargs{include_docs=true}=Args) ->
BaseSig = view_sig(Db, State, View, Args#mrargs{include_docs=false}),
UpdateSeq = couch_db:get_update_seq(Db),
- PurgeSeq = couch_db:get_purge_seq(Db),
+ {ok, PurgeSeq} = couch_db:get_purge_seq(Db),
#mrst{
seq_indexed=SeqIndexed,
keyseq_indexed=KeySeqIndexed
@@ -230,9 +230,10 @@ view_sig_term(BaseSig, UpdateSeq, PurgeSeq, KeySeqIndexed, SeqIndexed, Args) ->
init_state(Db, Fd, #mrst{views=Views}=State, nil) ->
+ {ok, PurgeSeq} = couch_db:get_purge_seq(Db),
Header = #mrheader{
seq=0,
- purge_seq=couch_db:get_purge_seq(Db),
+ purge_seq=PurgeSeq,
id_btree_state=nil,
log_btree_state=nil,
view_states=[make_view_state(#mrview{}) || _ <- Views]
@@ -267,7 +268,9 @@ init_state(Db, Fd, State, Header) ->
view_states=ViewStates
} = Header,
- IdBtOpts = [{compression, couch_db:compression(Db)}],
+ IdBtOpts = [
+ {compression, couch_compress:get_compression_method()}
+ ],
{ok, IdBtree} = couch_btree:open(IdBtreeState, Fd, IdBtOpts),
{ok, LogBtree} = case SeqIndexed orelse KeySeqIndexed of
true -> couch_btree:open(LogBtreeState, Fd, IdBtOpts);
@@ -287,10 +290,10 @@ init_state(Db, Fd, State, Header) ->
views=Views2
}.
-open_view(Db, Fd, Lang, ViewState, View) ->
+open_view(_Db, Fd, Lang, ViewState, View) ->
ReduceFun = make_reduce_fun(Lang, View#mrview.reduce_funs),
LessFun = maybe_define_less_fun(View),
- Compression = couch_db:compression(Db),
+ Compression = couch_compress:get_compression_method(),
BTState = get_key_btree_state(ViewState),
ViewBtOpts = [
{less, LessFun},
@@ -354,7 +357,7 @@ get_row_count(#mrview{btree=Bt}) ->
all_docs_reduce_to_count(Reductions) ->
- Reduce = fun couch_db_updater:btree_by_id_reduce/2,
+ Reduce = fun couch_bt_engine:id_tree_reduce/2,
{Count, _, _} = couch_btree:final_reduce(Reduce, Reductions),
Count.
@@ -1161,7 +1164,7 @@ get_view_keys({Props}) ->
Keys when is_list(Keys) ->
Keys;
_ ->
- throw({bad_request, "`keys` member must be a array."})
+ throw({bad_request, "`keys` member must be an array."})
end.
@@ -1172,7 +1175,7 @@ get_view_queries({Props}) ->
Queries when is_list(Queries) ->
Queries;
_ ->
- throw({bad_request, "`queries` member must be a array."})
+ throw({bad_request, "`queries` member must be an array."})
end.
diff --git a/src/couch_mrview/test/couch_mrview_local_docs_tests.erl b/src/couch_mrview/test/couch_mrview_local_docs_tests.erl
index c96b98875..b0d25469a 100644
--- a/src/couch_mrview/test/couch_mrview_local_docs_tests.erl
+++ b/src/couch_mrview/test/couch_mrview_local_docs_tests.erl
@@ -116,7 +116,11 @@ should_query_with_include_docs(Db) ->
{include_docs, true}
]),
{row, Doc0} = mk_row(8),
- Doc = Doc0 ++ [{doc, {[{<<"val">>, 8}]}}],
+ Doc = Doc0 ++ [{doc, {[
+ {<<"_id">>, <<"_local/8">>},
+ {<<"_rev">>, <<"0-1">>},
+ {<<"val">>, 8}
+ ]}}],
Expect = {ok, [
{meta, [{total, null}, {offset, null}]},
{row, Doc}
diff --git a/src/couch_peruser/src/couch_peruser.erl b/src/couch_peruser/src/couch_peruser.erl
index bbf40126c..886fb4f6e 100644
--- a/src/couch_peruser/src/couch_peruser.erl
+++ b/src/couch_peruser/src/couch_peruser.erl
@@ -35,7 +35,8 @@
delete_dbs :: boolean(),
changes_pid :: pid(),
changes_ref :: reference(),
- q_for_peruser_db :: integer()
+ q_for_peruser_db :: integer(),
+ peruser_dbname_prefix :: binary()
}).
-record(state, {
@@ -45,10 +46,11 @@
states :: list(),
mem3_cluster_pid :: pid(),
cluster_stable :: boolean(),
- q_for_peruser_db :: integer()
+ q_for_peruser_db :: integer(),
+ peruser_dbname_prefix :: binary()
}).
--define(USERDB_PREFIX, "userdb-").
+-define(DEFAULT_USERDB_PREFIX, "userdb-").
-define(RELISTEN_DELAY, 5000).
-define(DEFAULT_QUIET_PERIOD, 60). % seconds
-define(DEFAULT_START_PERIOD, 5). % seconds
@@ -73,6 +75,14 @@ init_state() ->
"couch_httpd_auth", "authentication_db", "_users")),
DeleteDbs = config:get_boolean("couch_peruser", "delete_dbs", false),
Q = config:get_integer("couch_peruser", "q", 1),
+ Prefix = config:get("couch_peruser", "database_prefix", ?DEFAULT_USERDB_PREFIX),
+ case couch_db:validate_dbname(Prefix) of
+ ok -> ok;
+ Error ->
+ couch_log:error("couch_peruser can't proceed as illegal database prefix ~p.
+ Error: ~p", [Prefix, Error]),
+ throw(Error)
+ end,
% set up cluster-stable listener
@@ -90,7 +100,8 @@ init_state() ->
delete_dbs = DeleteDbs,
mem3_cluster_pid = Mem3Cluster,
cluster_stable = false,
- q_for_peruser_db = Q
+ q_for_peruser_db = Q,
+ peruser_dbname_prefix = ?l2b(Prefix)
}
end.
@@ -100,7 +111,8 @@ start_listening(#state{states=ChangesStates}=State)
when length(ChangesStates) > 0 ->
% couch_log:debug("peruser: start_listening() already run on node ~p in pid ~p", [node(), self()]),
State;
-start_listening(#state{db_name=DbName, delete_dbs=DeleteDbs, q_for_peruser_db = Q} = State) ->
+start_listening(#state{db_name=DbName, delete_dbs=DeleteDbs,
+ q_for_peruser_db = Q, peruser_dbname_prefix = Prefix} = State) ->
% couch_log:debug("peruser: start_listening() on node ~p", [node()]),
try
States = lists:map(fun (A) ->
@@ -108,7 +120,8 @@ start_listening(#state{db_name=DbName, delete_dbs=DeleteDbs, q_for_peruser_db =
parent = State#state.parent,
db_name = A#shard.name,
delete_dbs = DeleteDbs,
- q_for_peruser_db = Q
+ q_for_peruser_db = Q,
+ peruser_dbname_prefix = Prefix
},
{Pid, Ref} = spawn_opt(
?MODULE, init_changes_handler, [S], [link, monitor]),
@@ -144,7 +157,8 @@ init_changes_handler(#changes_state{db_name=DbName} = ChangesState) ->
changes_handler(
{change, {Doc}, _Prepend},
_ResType,
- ChangesState=#changes_state{db_name=DbName, q_for_peruser_db = Q}) ->
+ ChangesState=#changes_state{db_name=DbName, q_for_peruser_db = Q,
+ peruser_dbname_prefix = Prefix}) ->
% couch_log:debug("peruser: changes_handler() on DbName/Doc ~p/~p", [DbName, Doc]),
case couch_util:get_value(<<"id">>, Doc) of
@@ -153,16 +167,16 @@ changes_handler(
true ->
case couch_util:get_value(<<"deleted">>, Doc, false) of
false ->
- UserDb = ensure_user_db(User, Q),
+ UserDb = ensure_user_db(Prefix, User, Q),
ok = ensure_security(User, UserDb, fun add_user/3),
ChangesState;
true ->
case ChangesState#changes_state.delete_dbs of
true ->
- _UserDb = delete_user_db(User),
+ _UserDb = delete_user_db(Prefix, User),
ChangesState;
false ->
- UserDb = user_db_name(User),
+ UserDb = user_db_name(Prefix, User),
ok = ensure_security(User, UserDb, fun remove_user/3),
ChangesState
end
@@ -207,9 +221,9 @@ should_handle_doc_int(ShardName, DocId) ->
false
end.
--spec delete_user_db(User :: binary()) -> binary().
-delete_user_db(User) ->
- UserDb = user_db_name(User),
+-spec delete_user_db(Prefix:: binary(), User :: binary()) -> binary().
+delete_user_db(Prefix, User) ->
+ UserDb = user_db_name(Prefix, User),
try
case fabric:delete_db(UserDb, [?ADMIN_CTX]) of
ok -> ok;
@@ -220,9 +234,9 @@ delete_user_db(User) ->
end,
UserDb.
--spec ensure_user_db(User :: binary(), Q :: integer()) -> binary().
-ensure_user_db(User, Q) ->
- UserDb = user_db_name(User),
+-spec ensure_user_db(Prefix:: binary(), User :: binary(), Q :: integer()) -> binary().
+ensure_user_db(Prefix, User, Q) ->
+ UserDb = user_db_name(Prefix, User),
try
{ok, _DbInfo} = fabric:get_db_info(UserDb)
catch error:database_does_not_exist ->
@@ -300,11 +314,11 @@ ensure_security(User, UserDb, TransformFun) ->
end
end.
--spec user_db_name(User :: binary()) -> binary().
-user_db_name(User) ->
+-spec user_db_name(Prefix :: binary(), User :: binary()) -> binary().
+user_db_name(Prefix, User) ->
HexUser = list_to_binary(
[string:to_lower(integer_to_list(X, 16)) || <<X>> <= User]),
- <<?USERDB_PREFIX,HexUser/binary>>.
+ <<Prefix/binary,HexUser/binary>>.
-spec exit_changes(State :: #state{}) -> ok.
exit_changes(State) ->
diff --git a/src/couch_peruser/test/couch_peruser_test.erl b/src/couch_peruser/test/couch_peruser_test.erl
index 04ef2ea90..ea71f113c 100644
--- a/src/couch_peruser/test/couch_peruser_test.erl
+++ b/src/couch_peruser/test/couch_peruser_test.erl
@@ -42,7 +42,6 @@ setup() ->
set_config("couch_peruser", "cluster_start_period", "1"),
set_config("couch_peruser", "enable", "true"),
set_config("cluster", "n", "1"),
- timer:sleep(1000),
TestAuthDb.
teardown(TestAuthDb) ->
@@ -62,9 +61,7 @@ teardown(TestAuthDb) ->
end, all_dbs()).
set_config(Section, Key, Value) ->
- Url = lists:concat([
- get_base_url(), "/_config/", Section, "/", Key]),
- do_request(put, Url, "\"" ++ Value ++ "\"").
+ ok = config:set(Section, Key, Value, _Persist=false).
delete_config(Section, Key, Value) ->
Url = lists:concat([
@@ -156,6 +153,20 @@ should_create_user_db_with_default(TestAuthDb) ->
?_assertEqual(1, couch_util:get_value(q, ClusterInfo))
].
+should_create_user_db_with_custom_prefix(TestAuthDb) ->
+ set_config("couch_peruser", "database_prefix", "newuserdb-"),
+ create_user(TestAuthDb, "fooo"),
+ wait_for_db_create(<<"newuserdb-666f6f6f">>),
+ delete_config("couch_peruser", "database_prefix", "newuserdb-"),
+ ?_assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs())).
+
+should_create_user_db_with_custom_special_prefix(TestAuthDb) ->
+ set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
+ create_user(TestAuthDb, "fooo"),
+ wait_for_db_create(<<"userdb_$()+--/666f6f6f">>),
+ delete_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
+ ?_assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs())).
+
should_create_anon_user_db_with_default(TestAuthDb) ->
create_anon_user(TestAuthDb, "fooo"),
wait_for_db_create(<<"userdb-666f6f6f">>),
@@ -166,6 +177,20 @@ should_create_anon_user_db_with_default(TestAuthDb) ->
?_assertEqual(1, couch_util:get_value(q, ClusterInfo))
].
+should_create_anon_user_db_with_custom_prefix(TestAuthDb) ->
+ set_config("couch_peruser", "database_prefix", "newuserdb-"),
+ create_anon_user(TestAuthDb, "fooo"),
+ wait_for_db_create(<<"newuserdb-666f6f6f">>),
+ delete_config("couch_peruser", "database_prefix", "newuserdb-"),
+ ?_assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs())).
+
+should_create_anon_user_db_with_custom_special_prefix(TestAuthDb) ->
+ set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
+ create_anon_user(TestAuthDb, "fooo"),
+ wait_for_db_create(<<"userdb_$()+--/666f6f6f">>),
+ delete_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
+ ?_assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs())).
+
should_create_user_db_with_q4(TestAuthDb) ->
set_config("couch_peruser", "q", "4"),
create_user(TestAuthDb, "foo"),
@@ -196,10 +221,11 @@ should_not_delete_user_db(TestAuthDb) ->
UserDbName = <<"userdb-666f6f">>,
create_user(TestAuthDb, User),
wait_for_db_create(<<"userdb-666f6f">>),
- ?assert(lists:member(UserDbName, all_dbs())),
+ AfterCreate = lists:member(UserDbName, all_dbs()),
delete_user(TestAuthDb, User),
timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- ?_assert(lists:member(UserDbName, all_dbs())).
+ AfterDelete = lists:member(UserDbName, all_dbs()),
+ [?_assert(AfterCreate), ?_assert(AfterDelete)].
should_delete_user_db(TestAuthDb) ->
User = "bar",
@@ -207,10 +233,45 @@ should_delete_user_db(TestAuthDb) ->
set_config("couch_peruser", "delete_dbs", "true"),
create_user(TestAuthDb, User),
wait_for_db_create(UserDbName),
- ?assert(lists:member(UserDbName, all_dbs())),
+ AfterCreate = lists:member(UserDbName, all_dbs()),
delete_user(TestAuthDb, User),
wait_for_db_delete(UserDbName),
- ?_assert(not lists:member(UserDbName, all_dbs())).
+ AfterDelete = lists:member(UserDbName, all_dbs()),
+ [?_assert(AfterCreate), ?_assertNot(AfterDelete)].
+
+should_delete_user_db_with_custom_prefix(TestAuthDb) ->
+ User = "bar",
+ UserDbName = <<"newuserdb-626172">>,
+ set_config("couch_peruser", "delete_dbs", "true"),
+ set_config("couch_peruser", "database_prefix", "newuserdb-"),
+ create_user(TestAuthDb, User),
+ wait_for_db_create(UserDbName),
+ AfterCreate = lists:member(UserDbName, all_dbs()),
+ delete_user(TestAuthDb, User),
+ wait_for_db_delete(UserDbName),
+ delete_config("couch_peruser", "database_prefix", "newuserdb-"),
+ AfterDelete = lists:member(UserDbName, all_dbs()),
+ [
+ ?_assert(AfterCreate),
+ ?_assertNot(AfterDelete)
+ ].
+
+should_delete_user_db_with_custom_special_prefix(TestAuthDb) ->
+ User = "bar",
+ UserDbName = <<"userdb_$()+--/626172">>,
+ set_config("couch_peruser", "delete_dbs", "true"),
+ set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
+ create_user(TestAuthDb, User),
+ wait_for_db_create(UserDbName),
+ AfterCreate = lists:member(UserDbName, all_dbs()),
+ delete_user(TestAuthDb, User),
+ wait_for_db_delete(UserDbName),
+ delete_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
+ AfterDelete = lists:member(UserDbName, all_dbs()),
+ [
+ ?_assert(AfterCreate),
+ ?_assertNot(AfterDelete)
+ ].
should_reflect_config_changes(TestAuthDb) ->
User = "baz",
@@ -218,28 +279,37 @@ should_reflect_config_changes(TestAuthDb) ->
set_config("couch_peruser", "delete_dbs", "true"),
create_user(TestAuthDb, User),
wait_for_db_create(UserDbName),
- ?assert(lists:member(UserDbName, all_dbs())),
+ AfterCreate1 = lists:member(UserDbName, all_dbs()),
delete_user(TestAuthDb, User),
timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
wait_for_db_delete(UserDbName),
- ?assert(not lists:member(UserDbName, all_dbs())),
+ AfterDelete1 = lists:member(UserDbName, all_dbs()),
create_user(TestAuthDb, User),
wait_for_db_create(UserDbName),
- ?assert(lists:member(UserDbName, all_dbs())),
+ AfterCreate2 = lists:member(UserDbName, all_dbs()),
set_config("couch_peruser", "delete_dbs", "false"),
delete_user(TestAuthDb, User),
timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- ?assert(lists:member(UserDbName, all_dbs())),
+ AfterDelete2 = lists:member(UserDbName, all_dbs()),
create_user(TestAuthDb, User),
wait_for_db_create(UserDbName),
set_config("couch_peruser", "delete_dbs", "true"),
delete_user(TestAuthDb, User),
wait_for_db_delete(UserDbName),
- ?assert(not lists:member(UserDbName, all_dbs())),
+ AfterDelete3 = lists:member(UserDbName, all_dbs()),
set_config("couch_peruser", "enable", "false"),
create_user(TestAuthDb, User),
timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- ?_assert(not lists:member(UserDbName, all_dbs())).
+ AfterCreate3 = lists:member(UserDbName, all_dbs()),
+ [
+ ?_assert(AfterCreate1),
+ ?_assertNot(AfterDelete1),
+ ?_assert(AfterCreate2),
+ ?_assert(AfterDelete2),
+ ?_assertNot(AfterDelete3),
+ ?_assertNot(AfterCreate3)
+ ].
+
should_add_user_to_db_admins(TestAuthDb) ->
User = "qux",
@@ -313,18 +383,24 @@ should_remove_user_from_db_admins(TestAuthDb) ->
{AdminProperties} = proplists:get_value(<<"admins">>,
get_security(UserDbName)),
AdminNames = proplists:get_value(<<"names">>, AdminProperties),
- ?assert(lists:member(<<"foo">>, AdminNames)),
- ?assert(lists:member(<<"bar">>, AdminNames)),
- ?assert(lists:member(<<"qux">>, AdminNames)),
+ FooBefore = lists:member(<<"foo">>, AdminNames),
+ BarBefore = lists:member(<<"bar">>, AdminNames),
+ QuxBefore = lists:member(<<"qux">>, AdminNames),
delete_user(TestAuthDb, User),
wait_for_security_delete(<<"admins">>, User, UserDbName),
{NewAdminProperties} = proplists:get_value(<<"admins">>,
get_security(UserDbName)),
NewAdminNames = proplists:get_value(<<"names">>, NewAdminProperties),
+ FooAfter = lists:member(<<"foo">>, NewAdminNames),
+ BarAfter = lists:member(<<"bar">>, NewAdminNames),
+ QuxAfter = lists:member(<<"qux">>, NewAdminNames),
[
- ?_assert(lists:member(<<"foo">>, NewAdminNames)),
- ?_assert(lists:member(<<"bar">>, NewAdminNames)),
- ?_assert(not lists:member(<<"qux">>, NewAdminNames))
+ ?_assert(FooBefore),
+ ?_assert(BarBefore),
+ ?_assert(QuxBefore),
+ ?_assert(FooAfter),
+ ?_assert(BarAfter),
+ ?_assertNot(QuxAfter)
].
should_remove_user_from_db_members(TestAuthDb) ->
@@ -341,18 +417,24 @@ should_remove_user_from_db_members(TestAuthDb) ->
{MemberProperties} = proplists:get_value(<<"members">>,
get_security(UserDbName)),
MemberNames = proplists:get_value(<<"names">>, MemberProperties),
- ?assert(lists:member(<<"pow">>, MemberNames)),
- ?assert(lists:member(<<"wow">>, MemberNames)),
- ?assert(lists:member(<<"qux">>, MemberNames)),
+ PowBefore = lists:member(<<"pow">>, MemberNames),
+ WowBefore = lists:member(<<"wow">>, MemberNames),
+ QuxBefore = lists:member(<<"qux">>, MemberNames),
delete_user(TestAuthDb, User),
wait_for_security_delete(<<"members">>, User, UserDbName),
{NewMemberProperties} = proplists:get_value(<<"members">>,
get_security(UserDbName)),
NewMemberNames = proplists:get_value(<<"names">>, NewMemberProperties),
+ PowAfter = lists:member(<<"pow">>, NewMemberNames),
+ WowAfter = lists:member(<<"wow">>, NewMemberNames),
+ QuxAfter = lists:member(<<"qux">>, NewMemberNames),
[
- ?_assert(lists:member(<<"pow">>, NewMemberNames)),
- ?_assert(lists:member(<<"wow">>, NewMemberNames)),
- ?_assert(not lists:member(<<"qux">>, NewMemberNames))
+ ?_assert(PowBefore),
+ ?_assert(WowBefore),
+ ?_assert(QuxBefore),
+ ?_assert(PowAfter),
+ ?_assert(WowAfter),
+ ?_assertNot(QuxAfter)
].
% infinite loop waiting for a db to be created, either this returns true
@@ -422,11 +504,17 @@ couch_peruser_test_() ->
fun setup/0, fun teardown/1,
[
fun should_create_anon_user_db_with_default/1,
+ fun should_create_anon_user_db_with_custom_prefix/1,
+ fun should_create_anon_user_db_with_custom_special_prefix/1,
fun should_create_user_db_with_default/1,
+ fun should_create_user_db_with_custom_prefix/1,
+ fun should_create_user_db_with_custom_special_prefix/1,
fun should_create_user_db_with_q4/1,
fun should_create_anon_user_db_with_q4/1,
fun should_not_delete_user_db/1,
fun should_delete_user_db/1,
+ fun should_delete_user_db_with_custom_prefix/1,
+ fun should_delete_user_db_with_custom_special_prefix/1,
fun should_reflect_config_changes/1,
fun should_add_user_to_db_admins/1,
fun should_add_user_to_db_members/1,
diff --git a/src/couch_replicator/src/couch_replicator_api_wrap.hrl b/src/couch_replicator/include/couch_replicator_api_wrap.hrl
index d2e0fdff5..0f8213c51 100644
--- a/src/couch_replicator/src/couch_replicator_api_wrap.hrl
+++ b/src/couch_replicator/include/couch_replicator_api_wrap.hrl
@@ -14,7 +14,7 @@
-record(httpdb, {
url,
- oauth = nil,
+ auth_props = [],
headers = [
{"Accept", "application/json"},
{"User-Agent", "CouchDB-Replicator/" ++ couch_server:get_version()}
@@ -26,13 +26,6 @@
httpc_pool = nil,
http_connections,
first_error_timestamp = nil,
- proxy_url
-}).
-
--record(oauth, {
- consumer_key,
- token,
- token_secret,
- consumer_secret,
- signature_method
+ proxy_url,
+ auth_context = nil
}).
diff --git a/src/couch_replicator/src/couch_replicator.app.src b/src/couch_replicator/src/couch_replicator.app.src
index 18dde37d3..2e0e191d3 100644
--- a/src/couch_replicator/src/couch_replicator.app.src
+++ b/src/couch_replicator/src/couch_replicator.app.src
@@ -29,6 +29,7 @@
stdlib,
couch_log,
mem3,
+ config,
couch,
couch_event,
couch_stats
diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl
index 8b7cd5cb1..39141c301 100644
--- a/src/couch_replicator/src/couch_replicator.erl
+++ b/src/couch_replicator/src/couch_replicator.erl
@@ -25,7 +25,7 @@
-include_lib("couch/include/couch_db.hrl").
-include("couch_replicator.hrl").
--include("couch_replicator_api_wrap.hrl").
+-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-include_lib("mem3/include/mem3.hrl").
diff --git a/src/couch_replicator/src/couch_replicator_api_wrap.erl b/src/couch_replicator/src/couch_replicator_api_wrap.erl
index b5ea57c3c..44c290d33 100644
--- a/src/couch_replicator/src/couch_replicator_api_wrap.erl
+++ b/src/couch_replicator/src/couch_replicator_api_wrap.erl
@@ -142,7 +142,8 @@ db_open(DbName, Options, Create, _CreateParams) ->
throw({unauthorized, DbName})
end.
-db_close(#httpdb{httpc_pool = Pool}) ->
+db_close(#httpdb{httpc_pool = Pool} = HttpDb) ->
+ couch_replicator_auth:cleanup(HttpDb),
unlink(Pool),
ok = couch_replicator_httpc_pool:stop(Pool);
db_close(DbName) ->
@@ -1009,7 +1010,7 @@ header_value(Key, Headers, Default) ->
normalize_db(#httpdb{} = HttpDb) ->
#httpdb{
url = HttpDb#httpdb.url,
- oauth = HttpDb#httpdb.oauth,
+ auth_props = lists:sort(HttpDb#httpdb.auth_props),
headers = lists:keysort(1, HttpDb#httpdb.headers),
timeout = HttpDb#httpdb.timeout,
ibrowse_options = lists:keysort(1, HttpDb#httpdb.ibrowse_options),
@@ -1037,7 +1038,7 @@ maybe_append_create_query_params(Db, CreateParams) ->
normalize_http_db_test() ->
HttpDb = #httpdb{
url = "http://host/db",
- oauth = #oauth{},
+ auth_props = [{"key", "val"}],
headers = [{"k2","v2"}, {"k1","v1"}],
timeout = 30000,
ibrowse_options = [{k2, v2}, {k1, v1}],
diff --git a/src/couch_replicator/src/couch_replicator_auth.erl b/src/couch_replicator/src/couch_replicator_auth.erl
new file mode 100644
index 000000000..60273fc32
--- /dev/null
+++ b/src/couch_replicator/src/couch_replicator_auth.erl
@@ -0,0 +1,100 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_replicator_auth).
+
+
+-export([
+ initialize/1,
+ update_headers/2,
+ handle_response/3,
+ cleanup/1
+]).
+
+
+-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
+
+
+-type headers() :: [{string(), string()}].
+-type code() :: non_neg_integer().
+
+
+-define(DEFAULT_PLUGINS, "couch_replicator_auth_noop").
+
+
+% Behavior API
+
+-callback initialize(#httpdb{}) ->
+ {ok, #httpdb{}, term()} | {error, term()} | ignore.
+
+-callback update_headers(term(), headers()) -> {headers(), term()}.
+
+-callback handle_response(term(), code(), headers()) ->
+ {continue | retry, term()}.
+
+-callback cleanup(term()) -> ok.
+
+
+% Main API
+
+-spec initialize(#httpdb{}) -> {ok, #httpdb{}} | {error, term()}.
+initialize(#httpdb{auth_context = nil} = HttpDb) ->
+ case try_initialize(get_plugin_modules(), HttpDb) of
+ {ok, Mod, HttpDb1, Context} ->
+ {ok, HttpDb1#httpdb{auth_context = {Mod, Context}}};
+ {error, Error} ->
+ {error, Error}
+ end.
+
+
+-spec update_headers(#httpdb{}, headers()) -> {headers(), #httpdb{}}.
+update_headers(#httpdb{auth_context = {Mod, Context}} = HttpDb, Headers) ->
+ {Headers1, Context1} = Mod:update_headers(Context, Headers),
+ {Headers1, HttpDb#httpdb{auth_context = {Mod, Context1}}}.
+
+
+-spec handle_response(#httpdb{}, code(), headers()) ->
+ {continue | retry, term()}.
+handle_response(#httpdb{} = HttpDb, Code, Headers) ->
+ {Mod, Context} = HttpDb#httpdb.auth_context,
+ {Res, Context1} = Mod:handle_response(Context, Code, Headers),
+ {Res, HttpDb#httpdb{auth_context = {Mod, Context1}}}.
+
+
+-spec cleanup(#httpdb{}) -> #httpdb{}.
+cleanup(#httpdb{auth_context = {Module, Context}} = HttpDb) ->
+ ok = Module:cleanup(Context),
+ HttpDb#httpdb{auth_context = nil}.
+
+
+% Private helper functions
+
+-spec get_plugin_modules() -> [atom()].
+get_plugin_modules() ->
+ Plugins1 = config:get("replicator", "auth_plugins", ?DEFAULT_PLUGINS),
+ [list_to_atom(Plugin) || Plugin <- string:tokens(Plugins1, ",")].
+
+
+try_initialize([], _HttpDb) ->
+ {error, no_more_auth_plugins_left_to_try};
+try_initialize([Mod | Modules], HttpDb) ->
+ try Mod:initialize(HttpDb) of
+ {ok, HttpDb1, Context} ->
+ {ok, Mod, HttpDb1, Context};
+ ignore ->
+ try_initialize(Modules, HttpDb);
+ {error, Error} ->
+ {error, Error}
+ catch
+ error:undef ->
+ {error, {could_not_load_plugin_module, Mod}}
+ end.
diff --git a/src/couch_replicator/src/couch_replicator_auth_noop.erl b/src/couch_replicator/src/couch_replicator_auth_noop.erl
new file mode 100644
index 000000000..5dbf13335
--- /dev/null
+++ b/src/couch_replicator/src/couch_replicator_auth_noop.erl
@@ -0,0 +1,52 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_replicator_auth_noop).
+
+
+-behavior(couch_replicator_auth).
+
+
+-export([
+ initialize/1,
+ update_headers/2,
+ handle_response/3,
+ cleanup/1
+]).
+
+
+-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
+
+
+-type headers() :: [{string(), string()}].
+-type code() :: non_neg_integer().
+
+
+-spec initialize(#httpdb{}) -> {ok, #httpdb{}, term()} | ignore.
+initialize(#httpdb{} = HttpDb) ->
+ {ok, HttpDb, nil}.
+
+
+-spec update_headers(term(), headers()) -> {headers(), term()}.
+update_headers(Context, Headers) ->
+ {Headers, Context}.
+
+
+-spec handle_response(term(), code(), headers()) ->
+ {continue | retry, term()}.
+handle_response(Context, _Code, _Headers) ->
+ {continue, Context}.
+
+
+-spec cleanup(term()) -> ok.
+cleanup(_Context) ->
+ ok.
diff --git a/src/couch_replicator/src/couch_replicator_auth_session.erl b/src/couch_replicator/src/couch_replicator_auth_session.erl
new file mode 100644
index 000000000..fedc4c668
--- /dev/null
+++ b/src/couch_replicator/src/couch_replicator_auth_session.erl
@@ -0,0 +1,693 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+
+% This is the replicator session auth plugin. It implements session based
+% authentication for the replicator. The only public API are the functions from
+% the couch_replicator_auth behaviour. Most of the logic and state is in the
+% gen_server. An instance of a gen_server could be spawned for the source and
+% target endpoints of each replication jobs.
+%
+% The workflow is roughly this:
+%
+% * On initialization, try to get a cookie in `refresh/1` If an error occurs,
+% the crash. If `_session` endpoint fails with a 404 (not found), return
+% `ignore` assuming session authentication is not support or we simply hit a
+% non-CouchDb server.
+%
+% * Before each request, auth framework calls `update_headers` API function.
+% Before updating the headers and returning, check if need to refresh again.
+% The check looks `next_refresh` time. If that time is set (not `infinity`)
+% and just expired, then obtain a new cookie, then update headers and
+% return.
+%
+% * After each request, auth framework calls `handle_response` function. If
+% request was successful check if a new cookie was sent by the server in the
+% `Set-Cookie` header. If it was then then that becomes the current cookie.
+%
+% * If last request has an auth failure, check if request used a stale cookie
+% In this case nothing is done, and the client is told to retry. Next time
+% it updates its headers befor the request it should pick up the latest
+% cookie.
+%
+% * If last request failed and cookie was the latest known cookie, schedule a
+% refresh and tell client to retry. However, if the cookie was just updated,
+% tell the client to continue such that it will handle the auth failure on
+% its own via a set of retries with exponential backoffs. This is it to
+% ensure if something goes wrong and one of the endpoints issues invalid
+% cookies, replicator won't be stuck in a busy loop refreshing them.
+
+
+-module(couch_replicator_auth_session).
+
+
+-behaviour(couch_replicator_auth).
+-behaviour(gen_server).
+
+
+-export([
+ initialize/1,
+ update_headers/2,
+ handle_response/3,
+ cleanup/1
+]).
+
+-export([
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3,
+ format_status/2
+]).
+
+
+-include_lib("ibrowse/include/ibrowse.hrl").
+-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
+
+
+-type headers() :: [{string(), string()}].
+-type code() :: non_neg_integer().
+-type creds() :: {string() | undefined, string() | undefined}.
+
+
+-define(MIN_UPDATE_INTERVAL, 5).
+
+
+-record(state, {
+ epoch = 0 :: non_neg_integer(),
+ cookie :: string() | undefined,
+ user :: string() | undefined,
+ pass :: string() | undefined,
+ httpdb_timeout :: integer(),
+ httpdb_pool :: pid(),
+ httpdb_ibrowse_options = [] :: list(),
+ session_url :: string(),
+ next_refresh = infinity :: infinity | non_neg_integer(),
+ refresh_tstamp = 0 :: non_neg_integer()
+}).
+
+
+% Behavior API callbacks
+
+-spec initialize(#httpdb{}) ->
+ {ok, #httpdb{}, term()} | {error, term()} | ignore.
+initialize(#httpdb{} = HttpDb) ->
+ case init_state(HttpDb) of
+ {ok, HttpDb1, State} ->
+ {ok, Pid} = gen_server:start_link(?MODULE, [State], []),
+ Epoch = State#state.epoch,
+ Timeout = State#state.httpdb_timeout,
+ {ok, HttpDb1, {Pid, Epoch, Timeout}};
+ {error, Error} ->
+ {error, Error};
+ ignore ->
+ ignore
+ end.
+
+
+-spec update_headers(term(), headers()) -> {headers(), term()}.
+update_headers({Pid, Epoch, Timeout}, Headers) ->
+ Args = {update_headers, Headers, Epoch},
+ {Headers1, Epoch1} = gen_server:call(Pid, Args, Timeout * 10),
+ {Headers1, {Pid, Epoch1, Timeout}}.
+
+
+-spec handle_response(term(), code(), headers()) ->
+ {continue | retry, term()}.
+handle_response({Pid, Epoch, Timeout}, Code, Headers) ->
+ Args = {handle_response, Code, Headers, Epoch},
+ {Retry, Epoch1} = gen_server:call(Pid, Args, Timeout * 10),
+ {Retry, {Pid, Epoch1, Timeout}}.
+
+
+-spec cleanup(term()) -> ok.
+cleanup({Pid, _Epoch, Timeout}) ->
+ gen_server:call(Pid, stop, Timeout * 10).
+
+
+%% gen_server functions
+
+init([#state{} = State]) ->
+ {ok, State}.
+
+
+terminate(_Reason, _State) ->
+ ok.
+
+
+handle_call({update_headers, Headers, _Epoch}, _From, State) ->
+ case maybe_refresh(State) of
+ {ok, State1} ->
+ Cookie = "AuthSession=" ++ State1#state.cookie,
+ Headers1 = [{"Cookie", Cookie} | Headers],
+ {reply, {Headers1, State1#state.epoch}, State1};
+ {error, Error} ->
+ LogMsg = "~p: Stopping session auth plugin because of error ~p",
+ couch_log:error(LogMsg, [?MODULE, Error]),
+ {stop, Error, State}
+ end;
+
+handle_call({handle_response, Code, Headers, Epoch}, _From, State) ->
+ {Retry, State1} = process_response(Code, Headers, Epoch, State),
+ {reply, {Retry, State1#state.epoch}, State1};
+
+handle_call(stop, _From, State) ->
+ {stop, normal, ok, State}.
+
+
+handle_cast(Msg, State) ->
+ couch_log:error("~p: Received un-expected cast ~p", [?MODULE, Msg]),
+ {noreply, State}.
+
+
+handle_info(Msg, State) ->
+ couch_log:error("~p : Received un-expected message ~p", [?MODULE, Msg]),
+ {noreply, State}.
+
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+format_status(_Opt, [_PDict, State]) ->
+ [
+ {epoch, State#state.epoch},
+ {user, State#state.user},
+ {session_url, State#state.session_url},
+ {refresh_tstamp, State#state.refresh_tstamp}
+ ].
+
+
+%% Private helper functions
+
+
+-spec init_state(#httpdb{}) ->
+ {ok, #httpdb{}, #state{}} | {error, term()} | ignore.
+init_state(#httpdb{} = HttpDb) ->
+ case extract_creds(HttpDb) of
+ {ok, User, Pass, HttpDb1} ->
+ State = #state{
+ user = User,
+ pass = Pass,
+ session_url = get_session_url(HttpDb1#httpdb.url),
+ httpdb_pool = HttpDb1#httpdb.httpc_pool,
+ httpdb_timeout = HttpDb1#httpdb.timeout,
+ httpdb_ibrowse_options = HttpDb1#httpdb.ibrowse_options
+ },
+ case refresh(State) of
+ {ok, State1} ->
+ {ok, HttpDb1, State1};
+ {error, {session_not_supported, _, _}} ->
+ ignore;
+ {error, Error} ->
+ {error, Error}
+ end;
+ {error, missing_credentials} ->
+ ignore;
+ {error, Error} ->
+ {error, Error}
+ end.
+
+
+-spec extract_creds(#httpdb{}) ->
+ {ok, string(), string(), #httpdb{}} | {error, term()}.
+extract_creds(#httpdb{url = Url, headers = Headers} = HttpDb) ->
+ {{HeadersUser, HeadersPass}, HeadersNoCreds} =
+ couch_replicator_utils:remove_basic_auth_from_headers(Headers),
+ case extract_creds_from_url(Url) of
+ {ok, UrlUser, UrlPass, UrlNoCreds} ->
+ case pick_creds({UrlUser, UrlPass}, {HeadersUser, HeadersPass}) of
+ {ok, User, Pass} ->
+ HttpDb1 = HttpDb#httpdb{
+ url = UrlNoCreds,
+ headers = HeadersNoCreds
+ },
+ {ok, User, Pass, HttpDb1};
+ {error, Error} ->
+ {error, Error}
+ end;
+ {error, Error} ->
+ {error, Error}
+ end.
+
+
+% Credentials could be specified in the url and/or in the headers.
+% * If no credentials specified return error.
+% * If specified in url but not in headers, pick url creds.
+% * Otherwise pick headers creds.
+%
+-spec pick_creds(creds(), creds()) ->
+ {ok, string(), string()} | {error, missing_credentials}.
+pick_creds({undefined, _}, {undefined, _}) ->
+ {error, missing_credentials};
+pick_creds({UrlUser, UrlPass}, {undefined, _}) ->
+ {ok, UrlUser, UrlPass};
+pick_creds({_, _}, {HeadersUser, HeadersPass}) ->
+ {ok, HeadersUser, HeadersPass}.
+
+
+-spec extract_creds_from_url(string()) ->
+ {ok, string() | undefined, string() | undefined, string()} |
+ {error, term()}.
+extract_creds_from_url(Url) ->
+ case ibrowse_lib:parse_url(Url) of
+ {error, Error} ->
+ {error, Error};
+ #url{username = undefined, password = undefined} ->
+ {ok, undefined, undefined, Url};
+ #url{protocol = Proto, username = User, password = Pass} ->
+ % Excise user and pass parts from the url. Try to keep the host,
+ % port and path as they were in the original.
+ Prefix = lists:concat([Proto, "://", User, ":", Pass, "@"]),
+ Suffix = lists:sublist(Url, length(Prefix) + 1, length(Url) + 1),
+ NoCreds = lists:concat([Proto, "://", Suffix]),
+ {ok, User, Pass, NoCreds}
+ end.
+
+
+-spec process_response(non_neg_integer(), headers(),
+ non_neg_integer(), #state{}) -> {retry | continue, #state{}}.
+process_response(403, _Headers, Epoch, State) ->
+ process_auth_failure(Epoch, State);
+process_response(401, _Headers, Epoch, State) ->
+ process_auth_failure(Epoch, State);
+process_response(Code, Headers, _Epoch, State) when Code >= 200, Code < 300 ->
+ % If server noticed cookie is about to time out it can send a new cookie in
+ % the response headers. Take advantage of that and refresh the cookie.
+ State1 = case maybe_update_cookie(Headers, State) of
+ {ok, UpdatedState} ->
+ UpdatedState;
+ {error, cookie_not_found} ->
+ State;
+ {error, Other} ->
+ LogMsg = "~p : Could not parse cookie from response headers ~p",
+ couch_log:error(LogMsg, [?MODULE, Other]),
+ State
+ end,
+ {continue, State1};
+process_response(_Code, _Headers, _Epoch, State) ->
+ {continue, State}.
+
+
+-spec process_auth_failure(non_neg_integer(), #state{}) ->
+ {retry | continue, #state{}}.
+process_auth_failure(Epoch, #state{epoch = StateEpoch} = State)
+ when StateEpoch > Epoch ->
+ % This request used an outdated cookie, tell it to immediately retry
+ % and it will pick up the current cookie when its headers are updated
+ {retry, State};
+process_auth_failure(Epoch, #state{epoch = Epoch} = State) ->
+ MinInterval = min_update_interval(),
+ case cookie_age_sec(State, now_sec()) of
+ AgeSec when AgeSec < MinInterval ->
+ % A recently acquired cookie failed. Schedule a refresh and
+ % return `continue` to let httpc's retry apply a backoff
+ {continue, schedule_refresh(now_sec() + MinInterval, State)};
+ _AgeSec ->
+ % Current cookie failed auth. Schedule refresh and ask
+ % httpc to retry the request.
+ {retry, schedule_refresh(now_sec(), State)}
+ end.
+
+
+-spec get_session_url(string()) -> string().
+get_session_url(Url) ->
+ #url{
+ protocol = Proto,
+ host = Host,
+ port = Port
+ } = ibrowse_lib:parse_url(Url),
+ WithPort = lists:concat([Proto, "://", Host, ":", Port]),
+ case lists:prefix(WithPort, Url) of
+ true ->
+ % Explicit port specified in the original url
+ WithPort ++ "/_session";
+ false ->
+ % Implicit proto default port was used
+ lists:concat([Proto, "://", Host, "/_session"])
+ end.
+
+
+-spec schedule_refresh(non_neg_integer(), #state{}) -> #state{}.
+schedule_refresh(T, #state{next_refresh = Tc} = State) when T < Tc ->
+ State#state{next_refresh = T};
+schedule_refresh(_, #state{} = State) ->
+ State.
+
+
+-spec maybe_refresh(#state{}) -> {ok, #state{}} | {error, term()}.
+maybe_refresh(#state{next_refresh = T} = State) ->
+ case now_sec() >= T of
+ true ->
+ refresh(State#state{next_refresh = infinity});
+ false ->
+ {ok, State}
+ end.
+
+
+-spec refresh(#state{}) -> {ok, #state{}} | {error, term()}.
+refresh(#state{session_url = Url, user = User, pass = Pass} = State) ->
+ Body = mochiweb_util:urlencode([{name, User}, {password, Pass}]),
+ Headers = [{"Content-Type", "application/x-www-form-urlencoded"}],
+ Result = http_request(State, Url, Headers, post, Body),
+ http_response(Result, State).
+
+
+-spec http_request(#state{}, string(), headers(), atom(), iolist()) ->
+ {ok, string(), headers(), binary()} | {error, term()}.
+http_request(#state{httpdb_pool = Pool} = State, Url, Headers, Method, Body) ->
+ Timeout = State#state.httpdb_timeout,
+ Opts = [
+ {response_format, binary},
+ {inactivity_timeout, Timeout}
+ | State#state.httpdb_ibrowse_options
+ ],
+ {ok, Wrk} = couch_replicator_httpc_pool:get_worker(Pool),
+ try
+ ibrowse:send_req_direct(Wrk, Url, Headers, Method, Body, Opts, Timeout)
+ after
+ ok = couch_replicator_httpc_pool:release_worker(Pool, Wrk)
+ end.
+
+
+-spec http_response({ok, string(), headers(), binary()} | {error, term()},
+ #state{}) -> {ok, #state{}} | {error, term()}.
+http_response({ok, "200", Headers, _}, State) ->
+ maybe_update_cookie(Headers, State);
+http_response({ok, "401", _, _}, #state{session_url = Url, user = User}) ->
+ {error, {session_request_unauthorized, Url, User}};
+http_response({ok, "403", _, _}, #state{session_url = Url, user = User}) ->
+ {error, {session_request_forbidden, Url, User}};
+http_response({ok, "404", _, _}, #state{session_url = Url, user = User}) ->
+ {error, {session_not_supported, Url, User}};
+http_response({ok, Code, _, _}, #state{session_url = Url, user = User}) ->
+ {error, {session_unexpected_result, Code, Url, User}};
+http_response({error, Error}, #state{session_url = Url, user = User}) ->
+ {error, {session_request_failed, Url, User, Error}}.
+
+
+-spec parse_cookie(list()) -> {ok, string()} | {error, term()}.
+parse_cookie(Headers0) ->
+ Headers = mochiweb_headers:make(Headers0),
+ case mochiweb_headers:get_value("Set-Cookie", Headers) of
+ undefined ->
+ {error, cookie_not_found};
+ CookieHeader ->
+ CookieKVs = mochiweb_cookies:parse_cookie(CookieHeader),
+ CaseInsKVs = mochiweb_headers:make(CookieKVs),
+ case mochiweb_headers:get_value("AuthSession", CaseInsKVs) of
+ undefined ->
+ {error, cookie_format_invalid};
+ Cookie ->
+ {ok, Cookie}
+ end
+ end.
+
+
+-spec maybe_update_cookie(headers(), #state{}) ->
+ {ok, string()} | {error, term()}.
+maybe_update_cookie(ResponseHeaders, State) ->
+ case parse_cookie(ResponseHeaders) of
+ {ok, Cookie} ->
+ {ok, update_cookie(State, Cookie, now_sec())};
+ {error, Error} ->
+ {error, Error}
+ end.
+
+
+-spec update_cookie(#state{}, string(), non_neg_integer()) -> #state{}.
+update_cookie(#state{cookie = Cookie} = State, Cookie, _) ->
+ State;
+update_cookie(#state{epoch = Epoch} = State, Cookie, NowSec) ->
+ State#state{
+ epoch = Epoch + 1,
+ cookie = Cookie,
+ refresh_tstamp = NowSec
+ }.
+
+
+-spec cookie_age_sec(#state{}, non_neg_integer()) -> non_neg_integer().
+cookie_age_sec(#state{refresh_tstamp = RefreshTs}, Now) ->
+ max(0, Now - RefreshTs).
+
+
+-spec now_sec() -> non_neg_integer().
+now_sec() ->
+ {Mega, Sec, _Micro} = os:timestamp(),
+ Mega * 1000000 + Sec.
+
+
+-spec min_update_interval() -> non_neg_integer().
+min_update_interval() ->
+ config:get_integer("replicator", "session_min_update_interval",
+ ?MIN_UPDATE_INTERVAL).
+
+
+-ifdef(TEST).
+
+-include_lib("eunit/include/eunit.hrl").
+
+
+get_session_url_test_() ->
+ [?_assertEqual(SessionUrl, get_session_url(Url)) || {Url, SessionUrl} <- [
+ {"http://host/db", "http://host/_session"},
+ {"http://127.0.0.1/db", "http://127.0.0.1/_session"},
+ {"http://host/x/y/z", "http://host/_session"},
+ {"http://host:5984/db", "http://host:5984/_session"},
+ {"https://host/db?q=1", "https://host/_session"}
+ ]].
+
+
+extract_creds_success_test_() ->
+ DefaultHeaders = (#httpdb{})#httpdb.headers,
+ [?_assertEqual({ok, User, Pass, HttpDb2}, extract_creds(HttpDb1)) ||
+ {HttpDb1, {User, Pass, HttpDb2}} <- [
+ {
+ #httpdb{url = "http://u:p@x.y/db"},
+ {"u", "p", #httpdb{url = "http://x.y/db"}}
+ },
+ {
+ #httpdb{url = "http://u:p@h:80/db"},
+ {"u", "p", #httpdb{url = "http://h:80/db"}}
+ },
+ {
+ #httpdb{url = "https://u:p@h/db"},
+ {"u", "p", #httpdb{url = "https://h/db"}}
+ },
+ {
+ #httpdb{url = "http://u:p@127.0.0.1:5984/db"},
+ {"u", "p", #httpdb{url = "http://127.0.0.1:5984/db"}}
+ },
+ {
+ #httpdb{url = "http://u:p@[2001:db8:a1b:12f9::1]/db"},
+ {"u", "p", #httpdb{url = "http://[2001:db8:a1b:12f9::1]/db"}}
+ },
+ {
+ #httpdb{url = "http://u:p@[2001:db8:a1b:12f9::1]:81/db"},
+ {"u", "p", #httpdb{url = "http://[2001:db8:a1b:12f9::1]:81/db"}}
+ },
+ {
+ #httpdb{url = "http://u:p@x.y/db/other?query=Z&query=w"},
+ {"u", "p", #httpdb{url = "http://x.y/db/other?query=Z&query=w"}}
+ },
+ {
+ #httpdb{
+ url = "http://h/db",
+ headers = DefaultHeaders ++ [
+ {"Authorization", "Basic " ++ b64creds("u", "p")}
+ ]
+ },
+ {"u", "p", #httpdb{url = "http://h/db"}}
+ },
+ {
+ #httpdb{
+ url = "http://h/db",
+ headers = DefaultHeaders ++ [
+ {"aUthoriZation", "bASIC " ++ b64creds("U", "p")}
+ ]
+ },
+ {"U", "p", #httpdb{url = "http://h/db"}}
+ },
+ {
+ #httpdb{
+ url = "http://u1:p1@h/db",
+ headers = DefaultHeaders ++ [
+ {"Authorization", "Basic " ++ b64creds("u2", "p2")}
+ ]
+ },
+ {"u2", "p2", #httpdb{url = "http://h/db"}}
+ }
+ ]].
+
+
+cookie_update_test_() ->
+ {
+ foreach,
+ fun setup/0,
+ fun teardown/1,
+ [
+ t_do_refresh(),
+ t_dont_refresh(),
+ t_process_auth_failure(),
+ t_process_auth_failure_stale_epoch(),
+ t_process_auth_failure_too_frequent(),
+ t_process_ok_update_cookie(),
+ t_process_ok_no_cookie(),
+ t_init_state_fails_on_401(),
+ t_init_state_404(),
+ t_init_state_no_creds(),
+ t_init_state_http_error()
+ ]
+ }.
+
+
+t_do_refresh() ->
+ ?_test(begin
+ State = #state{next_refresh = 0},
+ {ok, State1} = maybe_refresh(State),
+ ?assertMatch(#state{
+ next_refresh = infinity,
+ epoch = 1,
+ cookie = "Abc"
+ }, State1)
+ end).
+
+
+t_dont_refresh() ->
+ ?_test(begin
+ State = #state{next_refresh = now_sec() + 100},
+ {ok, State1} = maybe_refresh(State),
+ ?assertMatch(State, State1),
+ State2 = #state{next_refresh = infinity},
+ {ok, State3} = maybe_refresh(State2),
+ ?assertMatch(State2, State3)
+ end).
+
+
+t_process_auth_failure() ->
+ ?_test(begin
+ State = #state{epoch = 1, refresh_tstamp = 0},
+ {retry, State1} = process_auth_failure(1, State),
+ NextRefresh = State1#state.next_refresh,
+ ?assert(NextRefresh =< now_sec())
+ end).
+
+
+t_process_auth_failure_stale_epoch() ->
+ ?_test(begin
+ State = #state{epoch = 3},
+ ?assertMatch({retry, State}, process_auth_failure(2, State))
+ end).
+
+
+t_process_auth_failure_too_frequent() ->
+ ?_test(begin
+ State = #state{epoch = 4, refresh_tstamp = now_sec()},
+ ?assertMatch({continue, _}, process_auth_failure(4, State))
+ end).
+
+
+t_process_ok_update_cookie() ->
+ ?_test(begin
+ Headers = [{"set-CookiE", "AuthSession=xyz; Path=/;"}, {"X", "y"}],
+ Res = process_response(200, Headers, 1, #state{}),
+ ?assertMatch({continue, #state{cookie = "xyz", epoch = 1}}, Res),
+ State = #state{cookie = "xyz", refresh_tstamp = 42, epoch = 2},
+ Res2 = process_response(200, Headers, 1, State),
+ ?assertMatch({continue, #state{cookie = "xyz", epoch = 2}}, Res2)
+ end).
+
+
+t_process_ok_no_cookie() ->
+ ?_test(begin
+ Headers = [{"X", "y"}],
+ State = #state{cookie = "old", epoch = 3, refresh_tstamp = 42},
+ Res = process_response(200, Headers, 1, State),
+ ?assertMatch({continue, State}, Res)
+ end).
+
+
+t_init_state_fails_on_401() ->
+ ?_test(begin
+ mock_http_401_response(),
+ {error, Error} = init_state(#httpdb{url = "http://u:p@h"}),
+ SessionUrl = "http://h/_session",
+ ?assertEqual({session_request_unauthorized, SessionUrl, "u"}, Error)
+ end).
+
+
+t_init_state_404() ->
+ ?_test(begin
+ mock_http_404_response(),
+ ?assertEqual(ignore, init_state(#httpdb{url = "http://u:p@h"}))
+ end).
+
+
+t_init_state_no_creds() ->
+ ?_test(begin
+ ?_assertEqual(ignore, init_state(#httpdb{url = "http://h"}))
+ end).
+
+
+t_init_state_http_error() ->
+ ?_test(begin
+ mock_http_error_response(),
+ {error, Error} = init_state(#httpdb{url = "http://u:p@h"}),
+ SessionUrl = "http://h/_session",
+ ?assertEqual({session_request_failed, SessionUrl, "u", x}, Error)
+ end).
+
+
+setup() ->
+ meck:expect(couch_replicator_httpc_pool, get_worker, 1, {ok, worker}),
+ meck:expect(couch_replicator_httpc_pool, release_worker, 2, ok),
+ meck:expect(config, get, fun(_, _, Default) -> Default end),
+ mock_http_cookie_response("Abc"),
+ ok.
+
+
+teardown(_) ->
+ meck:unload().
+
+
+mock_http_cookie_response(Cookie) ->
+ Resp = {ok, "200", [{"Set-Cookie", "AuthSession=" ++ Cookie}], []},
+ meck:expect(ibrowse, send_req_direct, 7, Resp).
+
+
+mock_http_401_response() ->
+ meck:expect(ibrowse, send_req_direct, 7, {ok, "401", [], []}).
+
+
+mock_http_404_response() ->
+ meck:expect(ibrowse, send_req_direct, 7, {ok, "404", [], []}).
+
+
+mock_http_error_response() ->
+ meck:expect(ibrowse, send_req_direct, 7, {error, x}).
+
+
+extract_creds_error_test_() ->
+ [?_assertMatch({error, Error}, extract_creds(HttpDb)) ||
+ {HttpDb, Error} <- [
+ {#httpdb{url = "some_junk"}, invalid_uri},
+ {#httpdb{url = "http://h/db"}, missing_credentials}
+ ]].
+
+
+b64creds(User, Pass) ->
+ base64:encode_to_string(User ++ ":" ++ Pass).
+
+
+-endif.
diff --git a/src/couch_replicator/src/couch_replicator_changes_reader.erl b/src/couch_replicator/src/couch_replicator_changes_reader.erl
index 3659d9592..2e4df5365 100644
--- a/src/couch_replicator/src/couch_replicator_changes_reader.erl
+++ b/src/couch_replicator/src/couch_replicator_changes_reader.erl
@@ -19,7 +19,7 @@
-export([read_changes/5]).
-include_lib("couch/include/couch_db.hrl").
--include("couch_replicator_api_wrap.hrl").
+-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-include("couch_replicator.hrl").
-import(couch_util, [
diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl
index 6666cba53..62d21fe12 100644
--- a/src/couch_replicator/src/couch_replicator_docs.erl
+++ b/src/couch_replicator/src/couch_replicator_docs.erl
@@ -35,7 +35,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("ibrowse/include/ibrowse.hrl").
-include_lib("mem3/include/mem3.hrl").
--include("couch_replicator_api_wrap.hrl").
+-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-include("couch_replicator.hrl").
-include("couch_replicator_js_functions.hrl").
@@ -396,28 +396,9 @@ parse_rep_db({Props}, Proxy, Options) ->
{BinHeaders} = get_value(<<"headers">>, Props, {[]}),
Headers = lists:ukeysort(1, [{?b2l(K), ?b2l(V)} || {K, V} <- BinHeaders]),
DefaultHeaders = (#httpdb{})#httpdb.headers,
- OAuth = case get_value(<<"oauth">>, AuthProps) of
- undefined ->
- nil;
- {OauthProps} ->
- #oauth{
- consumer_key = ?b2l(get_value(<<"consumer_key">>, OauthProps)),
- token = ?b2l(get_value(<<"token">>, OauthProps)),
- token_secret = ?b2l(get_value(<<"token_secret">>, OauthProps)),
- consumer_secret = ?b2l(get_value(<<"consumer_secret">>,
- OauthProps)),
- signature_method =
- case get_value(<<"signature_method">>, OauthProps) of
- undefined -> hmac_sha1;
- <<"PLAINTEXT">> -> plaintext;
- <<"HMAC-SHA1">> -> hmac_sha1;
- <<"RSA-SHA1">> -> rsa_sha1
- end
- }
- end,
#httpdb{
url = Url,
- oauth = OAuth,
+ auth_props = AuthProps,
headers = lists:ukeymerge(1, Headers, DefaultHeaders),
ibrowse_options = lists:keysort(1,
[{socket_options, get_value(socket_options, Options)} |
@@ -695,7 +676,7 @@ strip_credentials(Url) when is_binary(Url) ->
"http\\1://\\2",
[{return, binary}]);
strip_credentials({Props}) ->
- {lists:keydelete(<<"oauth">>, 1, Props)}.
+ {lists:keydelete(<<"headers">>, 1, Props)}.
error_reason({shutdown, Error}) ->
@@ -761,4 +742,28 @@ check_convert_options_fail_test() ->
?assertThrow({bad_request, _},
convert_options([{<<"selector">>, [{key, value}]}])).
+check_strip_credentials_test() ->
+ [?assertEqual(Expected, strip_credentials(Body)) || {Expected, Body} <- [
+ {
+ undefined,
+ undefined
+ },
+ {
+ <<"https://remote_server/database">>,
+ <<"https://foo:bar@remote_server/database">>
+ },
+ {
+ {[{<<"_id">>, <<"foo">>}]},
+ {[{<<"_id">>, <<"foo">>}, {<<"headers">>, <<"bar">>}]}
+ },
+ {
+ {[{<<"_id">>, <<"foo">>}, {<<"other">>, <<"bar">>}]},
+ {[{<<"_id">>, <<"foo">>}, {<<"other">>, <<"bar">>}]}
+ },
+ {
+ {[{<<"_id">>, <<"foo">>}]},
+ {[{<<"_id">>, <<"foo">>}, {<<"headers">>, <<"baz">>}]}
+ }
+ ]].
+
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_httpc.erl b/src/couch_replicator/src/couch_replicator_httpc.erl
index d3c6b0179..2f865c6d2 100644
--- a/src/couch_replicator/src/couch_replicator_httpc.erl
+++ b/src/couch_replicator/src/couch_replicator_httpc.erl
@@ -14,7 +14,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("ibrowse/include/ibrowse.hrl").
--include("couch_replicator_api_wrap.hrl").
+-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-export([setup/1]).
-export([send_req/3]).
@@ -28,7 +28,7 @@
-define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
-define(MAX_WAIT, 5 * 60 * 1000).
-define(STREAM_STATUS, ibrowse_stream_status).
-
+-define(STOP_HTTP_WORKER, stop_http_worker).
% This limit is for the number of messages we're willing to discard
% from an HTTP stream in clean_mailbox/1 before killing the worker
@@ -51,8 +51,17 @@ setup(Db) ->
undefined -> Url;
_ when is_list(ProxyURL) -> ProxyURL
end,
- {ok, Pid} = couch_replicator_httpc_pool:start_link(HttpcURL, [{max_connections, MaxConns}]),
- {ok, Db#httpdb{httpc_pool = Pid}}.
+ {ok, Pid} = couch_replicator_httpc_pool:start_link(HttpcURL,
+ [{max_connections, MaxConns}]),
+ case couch_replicator_auth:initialize(Db#httpdb{httpc_pool = Pid}) of
+ {ok, Db1} ->
+ {ok, Db1};
+ {error, Error} ->
+ LogMsg = "~p: auth plugin initialization failed ~p ~p",
+ LogUrl = couch_util:url_strip_password(Url),
+ couch_log:error(LogMsg, [?MODULE, LogUrl, Error]),
+ throw({replication_auth_error, Error})
+ end.
send_req(HttpDb, Params1, Callback) ->
@@ -69,10 +78,14 @@ send_req(HttpDb, Params1, Callback) ->
throw:{retry, NewHttpDb0, NewParams0} ->
{retry, NewHttpDb0, NewParams0}
after
- ok = couch_replicator_httpc_pool:release_worker(
- HttpDb#httpdb.httpc_pool,
- Worker
- ),
+ Pool = HttpDb#httpdb.httpc_pool,
+ case get(?STOP_HTTP_WORKER) of
+ stop ->
+ ok = stop_and_release_worker(Pool, Worker),
+ erase(?STOP_HTTP_WORKER);
+ undefined ->
+ ok = couch_replicator_httpc_pool:release_worker(Pool, Worker)
+ end,
clean_mailbox(Response)
end,
% This is necessary to keep this tail-recursive. Calling
@@ -86,11 +99,11 @@ send_req(HttpDb, Params1, Callback) ->
end.
-send_ibrowse_req(#httpdb{headers = BaseHeaders} = HttpDb, Params) ->
+send_ibrowse_req(#httpdb{headers = BaseHeaders} = HttpDb0, Params) ->
Method = get_value(method, Params, get),
UserHeaders = lists:keysort(1, get_value(headers, Params, [])),
Headers1 = lists:ukeymerge(1, UserHeaders, BaseHeaders),
- Headers2 = oauth_header(HttpDb, Params) ++ Headers1,
+ {Headers2, HttpDb} = couch_replicator_auth:update_headers(HttpDb0, Headers1),
Url = full_url(HttpDb, Params),
Body = get_value(body, Params, []),
case get_value(path, Params) == "_changes" of
@@ -129,7 +142,7 @@ stop_and_release_worker(Pool, Worker) ->
ok = couch_replicator_httpc_pool:release_worker_sync(Pool, Worker).
process_response({error, sel_conn_closed}, Worker, HttpDb, Params, _Cb) ->
- stop_and_release_worker(HttpDb#httpdb.httpc_pool, Worker),
+ put(?STOP_HTTP_WORKER, stop),
maybe_retry(sel_conn_closed, Worker, HttpDb, Params);
@@ -138,13 +151,8 @@ process_response({error, sel_conn_closed}, Worker, HttpDb, Params, _Cb) ->
%% and closes the socket, ibrowse will detect that error when it sends
%% next request.
process_response({error, connection_closing}, Worker, HttpDb, Params, _Cb) ->
- stop_and_release_worker(HttpDb#httpdb.httpc_pool, Worker),
- throw({retry, HttpDb, Params});
-
-process_response({error, req_timedout}, _Worker, HttpDb, Params, _Cb) ->
- % ibrowse worker terminated because remote peer closed the socket
- % -> not an error
- throw({retry, HttpDb, Params});
+ put(?STOP_HTTP_WORKER, stop),
+ maybe_retry({error, connection_closing}, Worker, HttpDb, Params);
process_response({ibrowse_req_id, ReqId}, Worker, HttpDb, Params, Callback) ->
process_stream_response(ReqId, Worker, HttpDb, Params, Callback);
@@ -162,6 +170,8 @@ process_response({ok, Code, Headers, Body}, Worker, HttpDb, Params, Callback) ->
Json ->
?JSON_DECODE(Json)
end,
+ process_auth_response(HttpDb, Ok, Headers, Params),
+ if Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop); true -> ok end,
Callback(Ok, Headers, EJson);
R when R =:= 301 ; R =:= 302 ; R =:= 303 ->
backoff_success(HttpDb, Params),
@@ -184,10 +194,12 @@ process_stream_response(ReqId, Worker, HttpDb, Params, Callback) ->
backoff(HttpDb#httpdb{timeout = Timeout}, Params);
Ok when (Ok >= 200 andalso Ok < 300) ; (Ok >= 400 andalso Ok < 500) ->
backoff_success(HttpDb, Params),
+ HttpDb1 = process_auth_response(HttpDb, Ok, Headers, Params),
StreamDataFun = fun() ->
- stream_data_self(HttpDb, Params, Worker, ReqId, Callback)
+ stream_data_self(HttpDb1, Params, Worker, ReqId, Callback)
end,
put(?STREAM_STATUS, {streaming, Worker}),
+ if Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop); true -> ok end,
ibrowse:stream_next(ReqId),
try
Ret = Callback(Ok, Headers, StreamDataFun),
@@ -195,9 +207,9 @@ process_stream_response(ReqId, Worker, HttpDb, Params, Callback) ->
catch
throw:{maybe_retry_req, connection_closed} ->
maybe_retry({connection_closed, mid_stream},
- Worker, HttpDb, Params);
+ Worker, HttpDb1, Params);
throw:{maybe_retry_req, Err} ->
- maybe_retry(Err, Worker, HttpDb, Params)
+ maybe_retry(Err, Worker, HttpDb1, Params)
end;
R when R =:= 301 ; R =:= 302 ; R =:= 303 ->
backoff_success(HttpDb, Params),
@@ -221,6 +233,16 @@ process_stream_response(ReqId, Worker, HttpDb, Params, Callback) ->
end.
+process_auth_response(HttpDb, Code, Headers, Params) ->
+ case couch_replicator_auth:handle_response(HttpDb, Code, Headers) of
+ {continue, HttpDb1} ->
+ HttpDb1;
+ {retry, HttpDb1} ->
+ log_retry_error(Params, HttpDb1, 0, Code),
+ throw({retry, HttpDb1, Params})
+ end.
+
+
% Only streaming HTTP requests send messages back from
% the ibrowse worker process. We can detect that based
% on the ibrowse_req_id format. This just drops all
@@ -402,28 +424,6 @@ query_args_to_string([{K, V} | Rest], Acc) ->
query_args_to_string(Rest, [K ++ "=" ++ couch_httpd:quote(V) | Acc]).
-oauth_header(#httpdb{oauth = nil}, _ConnParams) ->
- [];
-oauth_header(#httpdb{url = BaseUrl, oauth = OAuth}, ConnParams) ->
- Consumer = {
- OAuth#oauth.consumer_key,
- OAuth#oauth.consumer_secret,
- OAuth#oauth.signature_method
- },
- Method = case get_value(method, ConnParams, get) of
- get -> "GET";
- post -> "POST";
- put -> "PUT";
- head -> "HEAD"
- end,
- QSL = get_value(qs, ConnParams, []),
- OAuthParams = oauth:sign(Method,
- BaseUrl ++ get_value(path, ConnParams, []),
- QSL, Consumer, OAuth#oauth.token, OAuth#oauth.token_secret) -- QSL,
- [{"Authorization",
- "OAuth " ++ oauth:header_params_encode(OAuthParams)}].
-
-
do_redirect(_Worker, Code, Headers, #httpdb{url = Url} = HttpDb, Params, _Cb) ->
RedirectUrl = redirect_url(Headers, Url),
{HttpDb2, Params2} = after_redirect(RedirectUrl, Code, HttpDb, Params),
diff --git a/src/couch_replicator/src/couch_replicator_ids.erl b/src/couch_replicator/src/couch_replicator_ids.erl
index e7067622b..e8faf8ea3 100644
--- a/src/couch_replicator/src/couch_replicator_ids.erl
+++ b/src/couch_replicator/src/couch_replicator_ids.erl
@@ -21,7 +21,7 @@
-include_lib("ibrowse/include/ibrowse.hrl").
-include_lib("couch/include/couch_db.hrl").
--include("couch_replicator_api_wrap.hrl").
+-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-include("couch_replicator.hrl").
% replication_id/1 and replication_id/2 will attempt to fetch
@@ -127,62 +127,25 @@ maybe_append_options(Options, RepOptions) ->
end, [], Options).
-get_rep_endpoint(_UserCtx, #httpdb{url=Url, headers=Headers, oauth=OAuth}) ->
+get_rep_endpoint(_UserCtx, #httpdb{url=Url, headers=Headers}) ->
DefaultHeaders = (#httpdb{})#httpdb.headers,
- case OAuth of
- nil ->
- {remote, Url, Headers -- DefaultHeaders};
- #oauth{} ->
- {remote, Url, Headers -- DefaultHeaders, OAuth}
- end;
+ {remote, Url, Headers -- DefaultHeaders};
get_rep_endpoint(UserCtx, <<DbName/binary>>) ->
{local, DbName, UserCtx}.
get_v4_endpoint(UserCtx, #httpdb{} = HttpDb) ->
- {Url, Headers, OAuth} = case get_rep_endpoint(UserCtx, HttpDb) of
- {remote, U, Hds} ->
- {U, Hds, undefined};
- {remote, U, Hds, OA} ->
- {U, Hds, OA}
- end,
- {UserFromHeaders, HeadersWithoutBasicAuth} = remove_basic_auth(Headers),
+ {remote, Url, Headers} = get_rep_endpoint(UserCtx, HttpDb),
+ {{UserFromHeaders, _}, HeadersWithoutBasicAuth} =
+ couch_replicator_utils:remove_basic_auth_from_headers(Headers),
{UserFromUrl, Host, NonDefaultPort, Path} = get_v4_url_info(Url),
User = pick_defined_value([UserFromUrl, UserFromHeaders]),
+ OAuth = undefined, % Keep this to ensure checkpoints don't change
{remote, User, Host, NonDefaultPort, Path, HeadersWithoutBasicAuth, OAuth};
get_v4_endpoint(UserCtx, <<DbName/binary>>) ->
{local, DbName, UserCtx}.
-remove_basic_auth(Headers) ->
- case lists:partition(fun is_basic_auth/1, Headers) of
- {[], HeadersWithoutBasicAuth} ->
- {undefined, HeadersWithoutBasicAuth};
- {[{_, "Basic " ++ Base64} | _], HeadersWithoutBasicAuth} ->
- User = get_basic_auth_user(Base64),
- {User, HeadersWithoutBasicAuth}
- end.
-
-
-is_basic_auth({"Authorization", "Basic " ++ _Base64}) ->
- true;
-is_basic_auth(_) ->
- false.
-
-
-get_basic_auth_user(Base64) ->
- try re:split(base64:decode(Base64), ":", [{return, list}, {parts, 2}]) of
- [User, _Pass] ->
- User;
- _ ->
- undefined
- catch
- % Tolerate invalid B64 values here to avoid crashing replicator
- error:function_clause ->
- undefined
- end.
-
-
pick_defined_value(Values) ->
case [V || V <- Values, V /= undefined] of
[] ->
diff --git a/src/couch_replicator/src/couch_replicator_scheduler.erl b/src/couch_replicator/src/couch_replicator_scheduler.erl
index be956b6a7..0b396346a 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler.erl
@@ -56,7 +56,7 @@
-include("couch_replicator_scheduler.hrl").
-include("couch_replicator.hrl").
--include("couch_replicator_api_wrap.hrl").
+-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-include_lib("couch/include/couch_db.hrl").
%% types
diff --git a/src/couch_replicator/src/couch_replicator_scheduler_job.erl b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
index 0438249be..1467d9f30 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler_job.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
@@ -29,7 +29,7 @@
]).
-include_lib("couch/include/couch_db.hrl").
--include("couch_replicator_api_wrap.hrl").
+-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-include("couch_replicator_scheduler.hrl").
-include("couch_replicator.hrl").
diff --git a/src/couch_replicator/src/couch_replicator_utils.erl b/src/couch_replicator/src/couch_replicator_utils.erl
index 01881e423..218fcf501 100644
--- a/src/couch_replicator/src/couch_replicator_utils.erl
+++ b/src/couch_replicator/src/couch_replicator_utils.erl
@@ -27,7 +27,8 @@
get_json_value/3,
pp_rep_id/1,
iso8601/1,
- filter_state/3
+ filter_state/3,
+ remove_basic_auth_from_headers/1
]).
-export([
@@ -36,7 +37,7 @@
-include_lib("couch/include/couch_db.hrl").
-include("couch_replicator.hrl").
--include("couch_replicator_api_wrap.hrl").
+-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-import(couch_util, [
get_value/2,
@@ -174,3 +175,88 @@ filter_state(State, States, Info) ->
false ->
skip
end.
+
+
+remove_basic_auth_from_headers(Headers) ->
+ Headers1 = mochiweb_headers:make(Headers),
+ case mochiweb_headers:get_value("Authorization", Headers1) of
+ undefined ->
+ {{undefined, undefined}, Headers};
+ Auth ->
+ {Basic, Base64} = lists:splitwith(fun(X) -> X =/= $\s end, Auth),
+ maybe_remove_basic_auth(string:to_lower(Basic), Base64, Headers1)
+ end.
+
+
+maybe_remove_basic_auth("basic", " " ++ Base64, Headers) ->
+ Headers1 = mochiweb_headers:delete_any("Authorization", Headers),
+ {decode_basic_creds(Base64), mochiweb_headers:to_list(Headers1)};
+maybe_remove_basic_auth(_, _, Headers) ->
+ {{undefined, undefined}, mochiweb_headers:to_list(Headers)}.
+
+
+decode_basic_creds(Base64) ->
+ try re:split(base64:decode(Base64), ":", [{return, list}, {parts, 2}]) of
+ [User, Pass] ->
+ {User, Pass};
+ _ ->
+ {undefined, undefined}
+ catch
+ % Tolerate invalid B64 values here to avoid crashing replicator
+ error:function_clause ->
+ {undefined, undefined}
+ end.
+
+
+-ifdef(TEST).
+
+-include_lib("eunit/include/eunit.hrl").
+
+remove_basic_auth_from_headers_test_() ->
+ [?_assertMatch({{User, Pass}, NoAuthHeaders},
+ remove_basic_auth_from_headers(Headers)) ||
+ {{User, Pass, NoAuthHeaders}, Headers} <- [
+ {
+ {undefined, undefined, []},
+ []
+ },
+ {
+ {undefined, undefined, [{"h", "v"}]},
+ [{"h", "v"}]
+ },
+ {
+ {undefined, undefined, [{"Authorization", "junk"}]},
+ [{"Authorization", "junk"}]
+ },
+ {
+ {undefined, undefined, []},
+ [{"Authorization", "basic X"}]
+ },
+ {
+ {"user", "pass", []},
+ [{"Authorization", "Basic " ++ b64creds("user", "pass")}]
+ },
+ {
+ {"user", "pass", []},
+ [{"AuThorization", "Basic " ++ b64creds("user", "pass")}]
+ },
+ {
+ {"user", "pass", []},
+ [{"Authorization", "bAsIc " ++ b64creds("user", "pass")}]
+ },
+ {
+ {"user", "pass", [{"h", "v"}]},
+ [
+ {"Authorization", "Basic " ++ b64creds("user", "pass")},
+ {"h", "v"}
+ ]
+ }
+ ]
+ ].
+
+
+b64creds(User, Pass) ->
+ base64:encode_to_string(User ++ ":" ++ Pass).
+
+
+-endif.
diff --git a/src/couch_replicator/src/couch_replicator_worker.erl b/src/couch_replicator/src/couch_replicator_worker.erl
index db6b72b2e..ec98fa0f3 100644
--- a/src/couch_replicator/src/couch_replicator_worker.erl
+++ b/src/couch_replicator/src/couch_replicator_worker.erl
@@ -20,9 +20,10 @@
% gen_server callbacks
-export([init/1, terminate/2, code_change/3]).
-export([handle_call/3, handle_cast/2, handle_info/2]).
+-export([format_status/2]).
-include_lib("couch/include/couch_db.hrl").
--include("couch_replicator_api_wrap.hrl").
+-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-include("couch_replicator.hrl").
% TODO: maybe make both buffer max sizes configurable
@@ -218,6 +219,25 @@ terminate(_Reason, State) ->
stop_db_compaction_notifier(State#state.source_db_compaction_notifier),
stop_db_compaction_notifier(State#state.target_db_compaction_notifier).
+format_status(_Opt, [_PDict, State]) ->
+ #state{
+ cp = MainJobPid,
+ loop = LoopPid,
+ source = Source,
+ target = Target,
+ readers = Readers,
+ pending_fetch = PendingFetch,
+ batch = #batch{size = BatchSize}
+ } = State,
+ [
+ {main_pid, MainJobPid},
+ {loop, LoopPid},
+ {source, couch_replicator_api_wrap:db_uri(Source)},
+ {target, couch_replicator_api_wrap:db_uri(Target)},
+ {num_readers, length(Readers)},
+ {pending_fetch, PendingFetch},
+ {batch_size, BatchSize}
+ ].
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
@@ -559,3 +579,30 @@ maybe_report_stats(Cp, Stats) ->
false ->
Stats
end.
+
+
+-ifdef(TEST).
+
+-include_lib("eunit/include/eunit.hrl").
+
+
+replication_worker_format_status_test() ->
+ State = #state{
+ cp = self(),
+ loop = self(),
+ source = #httpdb{url = "http://u:p@h/d1"},
+ target = #httpdb{url = "http://u:p@h/d2"},
+ readers = [r1, r2, r3],
+ pending_fetch = nil,
+ batch = #batch{size = 5}
+ },
+ Format = format_status(opts_ignored, [pdict, State]),
+ ?assertEqual(self(), proplists:get_value(main_pid, Format)),
+ ?assertEqual(self(), proplists:get_value(loop, Format)),
+ ?assertEqual("http://u:*****@h/d1", proplists:get_value(source, Format)),
+ ?assertEqual("http://u:*****@h/d2", proplists:get_value(target, Format)),
+ ?assertEqual(3, proplists:get_value(num_readers, Format)),
+ ?assertEqual(nil, proplists:get_value(pending_fetch, Format)),
+ ?assertEqual(5, proplists:get_value(batch_size, Format)).
+
+-endif.
diff --git a/src/couch_replicator/test/couch_replicator_compact_tests.erl b/src/couch_replicator/test/couch_replicator_compact_tests.erl
index f06a684b5..89e9295d4 100644
--- a/src/couch_replicator/test/couch_replicator_compact_tests.erl
+++ b/src/couch_replicator/test/couch_replicator_compact_tests.erl
@@ -236,7 +236,7 @@ should_compare_databases(Source, Target) ->
{timeout, 35, ?_test(begin
{ok, SourceDb} = couch_db:open_int(Source, []),
{ok, TargetDb} = couch_db:open_int(Target, []),
- Fun = fun(FullDocInfo, _, Acc) ->
+ Fun = fun(FullDocInfo, Acc) ->
{ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
{Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
DocId = couch_util:get_value(<<"_id">>, Props),
@@ -255,7 +255,7 @@ should_compare_databases(Source, Target) ->
?assertEqual(DocJson, DocTargetJson),
{ok, Acc}
end,
- {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+ {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []),
ok = couch_db:close(SourceDb),
ok = couch_db:close(TargetDb)
end)}.
diff --git a/src/couch_replicator/test/couch_replicator_filtered_tests.erl b/src/couch_replicator/test/couch_replicator_filtered_tests.erl
index 03cf44c53..d34e9f020 100644
--- a/src/couch_replicator/test/couch_replicator_filtered_tests.erl
+++ b/src/couch_replicator/test/couch_replicator_filtered_tests.erl
@@ -169,7 +169,7 @@ compare_dbs(Source, Target, FilterFun) ->
{ok, SourceDb} = couch_db:open_int(Source, []),
{ok, TargetDb} = couch_db:open_int(Target, []),
{ok, TargetDbInfo} = couch_db:get_db_info(TargetDb),
- Fun = fun(FullDocInfo, _, Acc) ->
+ Fun = fun(FullDocInfo, Acc) ->
{ok, DocId, SourceDoc} = read_doc(SourceDb, FullDocInfo),
TargetReply = read_doc(TargetDb, DocId),
case FilterFun(DocId, SourceDoc) of
@@ -181,7 +181,7 @@ compare_dbs(Source, Target, FilterFun) ->
{ok, [ValidReply|Acc]}
end
end,
- {ok, _, AllReplies} = couch_db:enum_docs(SourceDb, Fun, [], []),
+ {ok, AllReplies} = couch_db:fold_docs(SourceDb, Fun, [], []),
ok = couch_db:close(SourceDb),
ok = couch_db:close(TargetDb),
{ok, TargetDbInfo, AllReplies}.
diff --git a/src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl b/src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl
index 9e92f1c31..c1681781f 100644
--- a/src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl
+++ b/src/couch_replicator/test/couch_replicator_missing_stubs_tests.erl
@@ -131,9 +131,9 @@ populate_db(DbName) ->
update_db_docs(DbName, Times) ->
{ok, Db} = couch_db:open_int(DbName, []),
- {ok, _, _} = couch_db:enum_docs(
+ {ok, _} = couch_db:fold_docs(
Db,
- fun(FDI, _, Acc) -> db_fold_fun(FDI, Acc) end,
+ fun(FDI, Acc) -> db_fold_fun(FDI, Acc) end,
{DbName, Times},
[]),
ok = couch_db:close(Db).
diff --git a/src/couch_replicator/test/couch_replicator_proxy_tests.erl b/src/couch_replicator/test/couch_replicator_proxy_tests.erl
index a40e5b166..4f545bcb5 100644
--- a/src/couch_replicator/test/couch_replicator_proxy_tests.erl
+++ b/src/couch_replicator/test/couch_replicator_proxy_tests.erl
@@ -14,7 +14,7 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch_replicator/src/couch_replicator.hrl").
--include_lib("couch_replicator/src/couch_replicator_api_wrap.hrl").
+-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
setup() ->
diff --git a/src/couch_replicator/test/couch_replicator_selector_tests.erl b/src/couch_replicator/test/couch_replicator_selector_tests.erl
index 98c609984..a7f4c5df3 100644
--- a/src/couch_replicator/test/couch_replicator_selector_tests.erl
+++ b/src/couch_replicator/test/couch_replicator_selector_tests.erl
@@ -65,7 +65,7 @@ compare_dbs(Source, Target, FilterFun) ->
{ok, SourceDb} = couch_db:open_int(Source, []),
{ok, TargetDb} = couch_db:open_int(Target, []),
{ok, TargetDbInfo} = couch_db:get_db_info(TargetDb),
- Fun = fun(FullDocInfo, _, Acc) ->
+ Fun = fun(FullDocInfo, Acc) ->
{ok, DocId, SourceDoc} = read_doc(SourceDb, FullDocInfo),
TargetReply = read_doc(TargetDb, DocId),
case FilterFun(DocId, SourceDoc) of
@@ -77,7 +77,7 @@ compare_dbs(Source, Target, FilterFun) ->
{ok, [ValidReply|Acc]}
end
end,
- {ok, _, AllReplies} = couch_db:enum_docs(SourceDb, Fun, [], []),
+ {ok, AllReplies} = couch_db:fold_docs(SourceDb, Fun, [], []),
ok = couch_db:close(SourceDb),
ok = couch_db:close(TargetDb),
{ok, TargetDbInfo, AllReplies}.
diff --git a/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl b/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl
index 6f3308c39..af3a285f5 100644
--- a/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl
+++ b/src/couch_replicator/test/couch_replicator_small_max_request_size_target.erl
@@ -61,8 +61,8 @@ reduce_max_request_size_test_() ->
% attachment which exceed maximum request size are simply
% closed instead of returning a 413 request. That makes these
% tests flaky.
- % ++ [{Pair, fun should_replicate_one_with_attachment/2}
- % || Pair <- Pairs]
+ ++ [{Pair, fun should_replicate_one_with_attachment/2}
+ || Pair <- Pairs]
}
}.
@@ -90,12 +90,12 @@ should_replicate_one({From, To}, {_Ctx, {Source, Target}}) ->
% POST-ing individual documents directly and skip bulk_docs. Test that case
% separately
% See note in main test function why this was disabled.
-% should_replicate_one_with_attachment({From, To}, {_Ctx, {Source, Target}}) ->
-% {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
-% {inorder, [should_populate_source_one_large_attachment(Source),
-% should_populate_source(Source),
-% should_replicate(Source, Target),
-% should_compare_databases(Source, Target, [<<"doc0">>])]}}.
+should_replicate_one_with_attachment({From, To}, {_Ctx, {Source, Target}}) ->
+ {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
+ {inorder, [should_populate_source_one_large_attachment(Source),
+ should_populate_source(Source),
+ should_replicate(Source, Target),
+ should_compare_databases(Source, Target, [<<"doc0">>])]}}.
should_populate_source({remote, Source}) ->
@@ -112,11 +112,11 @@ should_populate_source_one_large_one_small(Source) ->
{timeout, ?TIMEOUT_EUNIT, ?_test(one_large_one_small(Source, 12000, 3000))}.
-% should_populate_source_one_large_attachment({remote, Source}) ->
-% should_populate_source_one_large_attachment(Source);
+should_populate_source_one_large_attachment({remote, Source}) ->
+ should_populate_source_one_large_attachment(Source);
-% should_populate_source_one_large_attachment(Source) ->
-% {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_attachment(Source, 70000, 70000))}.
+should_populate_source_one_large_attachment(Source) ->
+ {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_attachment(Source, 70000, 70000))}.
should_replicate({remote, Source}, Target) ->
@@ -156,8 +156,8 @@ one_large_one_small(DbName, Large, Small) ->
add_doc(DbName, <<"doc1">>, Small, 0).
-% one_large_attachment(DbName, Size, AttSize) ->
-% add_doc(DbName, <<"doc0">>, Size, AttSize).
+one_large_attachment(DbName, Size, AttSize) ->
+ add_doc(DbName, <<"doc0">>, Size, AttSize).
add_doc(DbName, DocId, Size, AttSize) when is_binary(DocId) ->
diff --git a/src/couch_replicator/test/couch_replicator_test_helper.erl b/src/couch_replicator/test/couch_replicator_test_helper.erl
index f87c7636b..8ee2114f0 100644
--- a/src/couch_replicator/test/couch_replicator_test_helper.erl
+++ b/src/couch_replicator/test/couch_replicator_test_helper.erl
@@ -22,7 +22,7 @@ compare_dbs(Source, Target, ExceptIds) ->
{ok, SourceDb} = couch_db:open_int(Source, []),
{ok, TargetDb} = couch_db:open_int(Target, []),
- Fun = fun(FullDocInfo, _, Acc) ->
+ Fun = fun(FullDocInfo, Acc) ->
{ok, DocSource} = couch_db:open_doc(SourceDb, FullDocInfo),
Id = DocSource#doc.id,
case lists:member(Id, ExceptIds) of
@@ -35,7 +35,7 @@ compare_dbs(Source, Target, ExceptIds) ->
{ok, Acc}
end,
- {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+ {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []),
ok = couch_db:close(SourceDb),
ok = couch_db:close(TargetDb).
diff --git a/src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl b/src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl
index 73ea7f1f4..c2fcf8bf1 100644
--- a/src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl
+++ b/src/couch_replicator/test/couch_replicator_use_checkpoints_tests.erl
@@ -149,7 +149,7 @@ populate_db(DbName, DocCount) ->
compare_dbs(Source, Target) ->
{ok, SourceDb} = couch_db:open_int(Source, []),
{ok, TargetDb} = couch_db:open_int(Target, []),
- Fun = fun(FullDocInfo, _, Acc) ->
+ Fun = fun(FullDocInfo, Acc) ->
{ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
{Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
DocId = couch_util:get_value(<<"_id">>, Props),
@@ -168,7 +168,7 @@ compare_dbs(Source, Target) ->
?assertEqual(DocJson, DocTargetJson),
{ok, Acc}
end,
- {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+ {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []),
ok = couch_db:close(SourceDb),
ok = couch_db:close(TargetDb).
diff --git a/src/couch_stats/src/couch_stats.app.src b/src/couch_stats/src/couch_stats.app.src
index 6339a0f1d..990f8de62 100644
--- a/src/couch_stats/src/couch_stats.app.src
+++ b/src/couch_stats/src/couch_stats.app.src
@@ -14,7 +14,7 @@
{description, "Simple statistics collection"},
{vsn, git},
{registered, [couch_stats_aggregator, couch_stats_process_tracker]},
- {applications, [kernel, stdlib, folsom, couch_log]},
+ {applications, [kernel, stdlib, folsom]},
{mod, {couch_stats_app, []}},
{env, []}
]}.
diff --git a/src/couch_stats/src/couch_stats.erl b/src/couch_stats/src/couch_stats.erl
index 59175f7a8..4fde14acb 100644
--- a/src/couch_stats/src/couch_stats.erl
+++ b/src/couch_stats/src/couch_stats.erl
@@ -119,7 +119,7 @@ notify_existing_metric(Name, Op, Type) ->
try
ok = folsom_metrics:notify_existing_metric(Name, Op, Type)
catch _:_ ->
- couch_log:notice("unknown metric: ~p", [Name]),
+ error_logger:error_msg("unknown metric: ~p", [Name]),
{error, unknown_metric}
end.
diff --git a/src/couch_stats/src/couch_stats_process_tracker.erl b/src/couch_stats/src/couch_stats_process_tracker.erl
index 4765734e9..fef955efd 100644
--- a/src/couch_stats/src/couch_stats_process_tracker.erl
+++ b/src/couch_stats/src/couch_stats_process_tracker.erl
@@ -37,8 +37,8 @@ track(Name) ->
track(self(), Name).
-spec track(pid(), any()) -> ok.
-track(Name, Pid) ->
- gen_server:cast(?MODULE, {track, Name, Pid}).
+track(Pid, Name) ->
+ gen_server:cast(?MODULE, {track, Pid, Name}).
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
@@ -48,7 +48,7 @@ init([]) ->
{ok, #st{}}.
handle_call(Msg, _From, State) ->
- couch_log:notice("~p received unknown call ~p", [?MODULE, Msg]),
+ error_logger:error_msg("~p received unknown call ~p", [?MODULE, Msg]),
{noreply, State}.
handle_cast({track, Pid, Name}, State) ->
@@ -57,13 +57,13 @@ handle_cast({track, Pid, Name}, State) ->
ets:insert(?MODULE, {Ref, Name}),
{noreply, State};
handle_cast(Msg, State) ->
- couch_log:notice("~p received unknown cast ~p", [?MODULE, Msg]),
+ error_logger:error_msg("~p received unknown cast ~p", [?MODULE, Msg]),
{noreply, State}.
handle_info({'DOWN', Ref, _, _, _}=Msg, State) ->
case ets:lookup(?MODULE, Ref) of
[] ->
- couch_log:notice(
+ error_logger:error_msg(
"~p received unknown exit; message was ~p", [?MODULE, Msg]
);
[{Ref, Name}] ->
@@ -72,7 +72,7 @@ handle_info({'DOWN', Ref, _, _, _}=Msg, State) ->
end,
{noreply, State};
handle_info(Msg, State) ->
- couch_log:notice("~p received unknown message ~p", [?MODULE, Msg]),
+ error_logger:error_msg("~p received unknown message ~p", [?MODULE, Msg]),
{noreply, State}.
terminate(_Reason, _State) ->
diff --git a/src/fabric/src/fabric_db_create.erl b/src/fabric/src/fabric_db_create.erl
index d793f4f13..94ffd5643 100644
--- a/src/fabric/src/fabric_db_create.erl
+++ b/src/fabric/src/fabric_db_create.erl
@@ -28,7 +28,7 @@ go(DbName, Options) ->
{error, file_exists};
false ->
{Shards, Doc} = generate_shard_map(DbName, Options),
- CreateShardResult = create_shard_files(Shards),
+ CreateShardResult = create_shard_files(Shards, Options),
case CreateShardResult of
enametoolong ->
{error, {database_name_too_long, DbName}};
@@ -64,12 +64,12 @@ generate_shard_map(DbName, Options) ->
% the DB already exists, and may have a different Suffix
ok;
{not_found, _} ->
- Doc = make_document(Shards, Suffix)
+ Doc = make_document(Shards, Suffix, Options)
end,
{Shards, Doc}.
-create_shard_files(Shards) ->
- Workers = fabric_util:submit_jobs(Shards, create_db, []),
+create_shard_files(Shards, Options) ->
+ Workers = fabric_util:submit_jobs(Shards, create_db, [Options]),
RexiMon = fabric_util:create_monitors(Shards),
try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Workers) of
{error, file_exists} ->
@@ -146,16 +146,16 @@ maybe_stop(W, Counters) ->
{ok, {W, Counters}};
false ->
case lists:sum([1 || {_, ok} <- Counters]) of
- W ->
+ NumOk when NumOk >= (W div 2 +1) ->
{stop, ok};
- NumOk when NumOk >= (W div 2 + 1) ->
+ NumOk when NumOk > 0 ->
{stop, accepted};
_ ->
{error, internal_server_error}
end
end.
-make_document([#shard{dbname=DbName}|_] = Shards, Suffix) ->
+make_document([#shard{dbname=DbName}|_] = Shards, Suffix, Options) ->
{RawOut, ByNodeOut, ByRangeOut} =
lists:foldl(fun(#shard{node=N, range=[B,E]}, {Raw, ByNode, ByRange}) ->
Range = ?l2b([couch_util:to_hex(<<B:32/integer>>), "-",
@@ -164,12 +164,19 @@ make_document([#shard{dbname=DbName}|_] = Shards, Suffix) ->
{[[<<"add">>, Range, Node] | Raw], orddict:append(Node, Range, ByNode),
orddict:append(Range, Node, ByRange)}
end, {[], [], []}, Shards),
- #doc{id=DbName, body = {[
- {<<"shard_suffix">>, Suffix},
- {<<"changelog">>, lists:sort(RawOut)},
- {<<"by_node">>, {[{K,lists:sort(V)} || {K,V} <- ByNodeOut]}},
- {<<"by_range">>, {[{K,lists:sort(V)} || {K,V} <- ByRangeOut]}}
- ]}}.
+ EngineProp = case couch_util:get_value(engine, Options) of
+ E when is_binary(E) -> [{<<"engine">>, E}];
+ _ -> []
+ end,
+ #doc{
+ id = DbName,
+ body = {[
+ {<<"shard_suffix">>, Suffix},
+ {<<"changelog">>, lists:sort(RawOut)},
+ {<<"by_node">>, {[{K,lists:sort(V)} || {K,V} <- ByNodeOut]}},
+ {<<"by_range">>, {[{K,lists:sort(V)} || {K,V} <- ByRangeOut]}}
+ ] ++ EngineProp}
+ }.
db_exists(DbName) -> is_list(catch mem3:shards(DbName)).
diff --git a/src/fabric/src/fabric_rpc.erl b/src/fabric/src/fabric_rpc.erl
index 9cf653c59..4a69e7ea1 100644
--- a/src/fabric/src/fabric_rpc.erl
+++ b/src/fabric/src/fabric_rpc.erl
@@ -82,7 +82,7 @@ changes(DbName, Options, StartVector, DbOptions) ->
},
try
{ok, #cacc{seq=LastSeq, pending=Pending, epochs=Epochs}} =
- couch_db:changes_since(Db, StartSeq, Enum, Opts, Acc0),
+ couch_db:fold_changes(Db, StartSeq, Enum, Acc0, Opts),
rexi:stream_last({complete, [
{seq, {LastSeq, uuid(Db), couch_db:owner_of(Epochs, LastSeq)}},
{pending, Pending}
@@ -224,7 +224,7 @@ get_missing_revs(DbName, IdRevsList, Options) ->
Ids = [Id1 || {Id1, _Revs} <- IdRevsList],
{ok, lists:zipwith(fun({Id, Revs}, FullDocInfoResult) ->
case FullDocInfoResult of
- {ok, #full_doc_info{rev_tree=RevisionTree} = FullInfo} ->
+ #full_doc_info{rev_tree=RevisionTree} = FullInfo ->
MissingRevs = couch_key_tree:find_missing(RevisionTree, Revs),
{Id, MissingRevs, possible_ancestors(FullInfo, MissingRevs)};
not_found ->
@@ -255,8 +255,7 @@ group_info(DbName, DDocId, DbOptions) ->
reset_validation_funs(DbName) ->
case get_or_create_db(DbName, []) of
{ok, Db} ->
- DbPid = couch_db:get_pid(Db),
- gen_server:cast(DbPid, {load_validation_funs, undefined});
+ couch_db:reload_validation_funs(Db);
_ ->
ok
end.
@@ -344,6 +343,8 @@ reduce_cb(ok, ddoc_updated) ->
rexi:reply({ok, ddoc_updated}).
+changes_enumerator(#full_doc_info{} = FDI, Acc) ->
+ changes_enumerator(couch_doc:to_doc_info(FDI), Acc);
changes_enumerator(#doc_info{id= <<"_local/", _/binary>>, high_seq=Seq}, Acc) ->
{ok, Acc#cacc{seq = Seq, pending = Acc#cacc.pending-1}};
changes_enumerator(DocInfo, Acc) ->
diff --git a/src/fabric/src/fabric_view_all_docs.erl b/src/fabric/src/fabric_view_all_docs.erl
index de21dde08..ac16dac52 100644
--- a/src/fabric/src/fabric_view_all_docs.erl
+++ b/src/fabric/src/fabric_view_all_docs.erl
@@ -59,7 +59,8 @@ go(DbName, Options, QueryArgs, Callback, Acc0) ->
conflicts = Conflicts,
skip = Skip,
keys = Keys0,
- extra = Extra
+ extra = Extra,
+ update_seq = UpdateSeq
} = QueryArgs,
DocOptions1 = case Conflicts of
true -> [conflicts|DocOptions0];
@@ -97,7 +98,12 @@ go(DbName, Options, QueryArgs, Callback, Acc0) ->
end,
case Resp of
{ok, TotalRows} ->
- {ok, Acc1} = Callback({meta, [{total, TotalRows}]}, Acc0),
+ Meta = case UpdateSeq of
+ false -> [{total, TotalRows}, {offset, null}];
+ true ->
+ [{total, TotalRows}, {offset, null}, {update_seq, null}]
+ end,
+ {ok, Acc1} = Callback({meta, Meta}, Acc0),
{ok, Acc2} = doc_receive_loop(
Keys3, queue:new(), SpawnFun, MaxJobs, Callback, Acc1
),
diff --git a/src/mango/src/mango_cursor_special.erl b/src/mango/src/mango_cursor_special.erl
index 78cac7f5d..f4a760d1c 100644
--- a/src/mango/src/mango_cursor_special.erl
+++ b/src/mango/src/mango_cursor_special.erl
@@ -31,10 +31,13 @@
create(Db, Indexes, Selector, Opts) ->
InitialRange = mango_idx_view:field_ranges(Selector),
CatchAll = [{<<"_id">>, {'$gt', null, '$lt', mango_json_max}}],
- FieldRanges = lists:append(CatchAll, InitialRange),
+ % order matters here - we only want to use the catchall index
+ % if no other range can fulfill the query (because we know)
+ % catchall is the most expensive range
+ FieldRanges = InitialRange ++ CatchAll,
Composited = mango_cursor_view:composite_indexes(Indexes, FieldRanges),
{Index, IndexRanges} = mango_cursor_view:choose_best_index(Db, Composited),
-
+
Limit = couch_util:get_value(limit, Opts, mango_opts:default_limit()),
Skip = couch_util:get_value(skip, Opts, 0),
Fields = couch_util:get_value(fields, Opts, all_fields),
diff --git a/src/mango/src/mango_httpd.erl b/src/mango/src/mango_httpd.erl
index 3ed51e2f0..2e8777135 100644
--- a/src/mango/src/mango_httpd.erl
+++ b/src/mango/src/mango_httpd.erl
@@ -41,7 +41,7 @@ handle_req(#httpd{} = Req, Db0) ->
case mango_error:info(Module, Reason) of
{500, ErrorStr, ReasonStr} ->
Stack = erlang:get_stacktrace(),
- chttpd:send_error(Req, 500, [], ErrorStr, ReasonStr, Stack);
+ chttpd:send_error(Req, {ErrorStr, ReasonStr, Stack});
{Code, ErrorStr, ReasonStr} ->
chttpd:send_error(Req, Code, ErrorStr, ReasonStr)
end
diff --git a/src/mango/src/mango_idx_text.erl b/src/mango/src/mango_idx_text.erl
index 369e2cd08..29b4441a1 100644
--- a/src/mango/src/mango_idx_text.erl
+++ b/src/mango/src/mango_idx_text.erl
@@ -330,7 +330,7 @@ indexable_fields(Fields, {op_not, {ExistsQuery, Arg}}) when is_tuple(Arg) ->
Fields0 = indexable_fields(Fields, ExistsQuery),
indexable_fields(Fields0, Arg);
% forces "$exists" : false to use _all_docs
-indexable_fields(Fields, {op_not, {ExistsQuery, false}}) ->
+indexable_fields(_, {op_not, {_, false}}) ->
[];
indexable_fields(Fields, {op_insert, Arg}) when is_binary(Arg) ->
diff --git a/src/mango/src/mango_native_proc.erl b/src/mango/src/mango_native_proc.erl
index 61d79b7ec..6150e1d19 100644
--- a/src/mango/src/mango_native_proc.erl
+++ b/src/mango/src/mango_native_proc.erl
@@ -113,6 +113,9 @@ handle_cast(garbage_collect, St) ->
erlang:garbage_collect(),
{noreply, St};
+handle_cast(stop, St) ->
+ {stop, normal, St};
+
handle_cast(Msg, St) ->
{stop, {invalid_cast, Msg}, St}.
@@ -169,19 +172,12 @@ get_text_entries({IdxProps}, Doc) ->
get_index_partial_filter_selector(IdxProps) ->
- case couch_util:get_value(<<"partial_filter_selector">>, IdxProps) of
- undefined ->
+ case couch_util:get_value(<<"partial_filter_selector">>, IdxProps, {[]}) of
+ {[]} ->
% this is to support legacy text indexes that had the partial_filter_selector
% set as selector
- case couch_util:get_value(<<"selector">>, IdxProps, []) of
- [] ->
- {[]};
- Else ->
- Else
- end;
- [] ->
- {[]};
- Else ->
+ couch_util:get_value(<<"selector">>, IdxProps, {[]});
+ Else ->
Else
end.
@@ -363,4 +359,35 @@ validate_index_info(IndexInfo) ->
[invalid_index | Results0]
end
end, [], IdxTypes),
- lists:member(valid_index, Results). \ No newline at end of file
+ lists:member(valid_index, Results).
+
+
+-ifdef(TEST).
+
+-include_lib("eunit/include/eunit.hrl").
+
+handle_garbage_collect_cast_test() ->
+ Pid = self(),
+ {_, TracerRef} = spawn_monitor(fun() ->
+ erlang:trace(Pid, true, [garbage_collection]),
+ receive {trace, Pid, gc_start, _} ->
+ erlang:trace(Pid, false, [garbage_collection]),
+ exit(gc_start)
+ end
+ end),
+ erlang:yield(),
+ ?assertEqual({noreply, []}, handle_cast(garbage_collect, [])),
+ receive
+ {'DOWN', TracerRef, _, _, Msg} -> ?assertEqual(gc_start, Msg)
+ after 1000 ->
+ erlang:error({assertion_failed, [{module, ?MODULE}, {line, ?LINE},
+ {expected, gc_start}, {reason, timeout}]})
+ end.
+
+handle_stop_cast_test() ->
+ ?assertEqual({stop, normal, []}, handle_cast(stop, [])).
+
+handle_invalid_cast_test() ->
+ ?assertEqual({stop, {invalid_cast, random}, []}, handle_cast(random, [])).
+
+-endif.
diff --git a/src/mango/test/03-operator-test.py b/src/mango/test/03-operator-test.py
index 239cc7d41..4650c7e84 100644
--- a/src/mango/test/03-operator-test.py
+++ b/src/mango/test/03-operator-test.py
@@ -264,6 +264,13 @@ class OperatorTextTests(mango.UserDocsTextTests, OperatorTests):
class OperatorAllDocsTests(mango.UserDocsTestsNoIndexes, OperatorTests):
- pass
+ def test_range_id_eq(self):
+ doc_id = "8e1c90c0-ac18-4832-8081-40d14325bde0"
+ r = self.db.find({
+ "_id": doc_id
+ }, explain=True, return_raw=True)
+
+ self.assertEqual(r["mrargs"]["end_key"], doc_id)
+ self.assertEqual(r["mrargs"]["start_key"], doc_id)
diff --git a/src/mango/test/16-index-selectors-test.py b/src/mango/test/16-index-selectors-test.py
index 389f5f41e..a876dc68f 100644
--- a/src/mango/test/16-index-selectors-test.py
+++ b/src/mango/test/16-index-selectors-test.py
@@ -272,6 +272,16 @@ class IndexSelectorJson(mango.DbPerClass):
self.assertEqual(len(docs), 3)
@unittest.skipUnless(mango.has_text_service(), "requires text service")
+ def test_text_old_selector_still_supported_via_api(self):
+ selector = {"location": {"$gte": "FRA"}}
+ self.db.create_text_index(fields=[{"name":"location", "type":"string"}],
+ selector=selector,
+ ddoc="Selected",
+ name="Selected")
+ docs = self.db.find({"location": {"$exists":True}}, use_index='Selected')
+ self.assertEqual(len(docs), 3)
+
+ @unittest.skipUnless(mango.has_text_service(), "requires text service")
def test_text_partial_filter_only_in_return_if_not_default(self):
self.db.create_text_index(fields=[{"name":"location", "type":"string"}])
index = self.db.list_indexes()[1]
diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py
index 560914b8c..9b6b998cd 100644
--- a/src/mango/test/mango.py
+++ b/src/mango/test/mango.py
@@ -82,13 +82,15 @@ class Database(object):
def recreate(self):
r = self.sess.get(self.url)
- db_info = r.json()
- docs = db_info["doc_count"] + db_info["doc_del_count"]
- if docs == 0:
- # db never used - create unnecessary
- return
- self.delete()
+ if r.status_code == 200:
+ db_info = r.json()
+ docs = db_info["doc_count"] + db_info["doc_del_count"]
+ if docs == 0:
+ # db never used - create unnecessary
+ return
+ self.delete()
self.create()
+ self.recreate()
def save_doc(self, doc):
self.save_docs([doc])
@@ -143,7 +145,7 @@ class Database(object):
return created
def create_text_index(self, analyzer=None, idx_type="text",
- partial_filter_selector=None, default_field=None, fields=None,
+ partial_filter_selector=None, selector=None, default_field=None, fields=None,
name=None, ddoc=None,index_array_lengths=None):
body = {
"index": {
@@ -159,6 +161,8 @@ class Database(object):
body["index"]["default_field"] = default_field
if index_array_lengths is not None:
body["index"]["index_array_lengths"] = index_array_lengths
+ if selector is not None:
+ body["index"]["selector"] = selector
if partial_filter_selector is not None:
body["index"]["partial_filter_selector"] = partial_filter_selector
if fields is not None:
diff --git a/src/mem3/include/mem3.hrl b/src/mem3/include/mem3.hrl
index d6ac0bed2..6579210ed 100644
--- a/src/mem3/include/mem3.hrl
+++ b/src/mem3/include/mem3.hrl
@@ -16,7 +16,8 @@
node :: node() | '_',
dbname :: binary(),
range :: [non_neg_integer() | '$1' | '$2'] | '_',
- ref :: reference() | 'undefined' | '_'
+ ref :: reference() | 'undefined' | '_',
+ opts :: list()
}).
%% Do not reference outside of mem3.
@@ -26,7 +27,8 @@
dbname :: binary(),
range :: [non_neg_integer() | '$1' | '$2'] | '_',
ref :: reference() | 'undefined' | '_',
- order :: non_neg_integer() | 'undefined' | '_'
+ order :: non_neg_integer() | 'undefined' | '_',
+ opts :: list()
}).
%% types
diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl
index 047154af8..0e5eabfe3 100644
--- a/src/mem3/src/mem3.erl
+++ b/src/mem3/src/mem3.erl
@@ -23,7 +23,7 @@
-export([get_placement/1]).
%% For mem3 use only.
--export([name/1, node/1, range/1]).
+-export([name/1, node/1, range/1, engine/1]).
-include_lib("mem3/include/mem3.hrl").
-include_lib("couch/include/couch_db.hrl").
@@ -99,7 +99,8 @@ shards_int(DbName, Options) ->
name = ShardDbName,
dbname = ShardDbName,
range = [0, (2 bsl 31)-1],
- order = undefined}];
+ order = undefined,
+ opts = []}];
ShardDbName ->
%% shard_db is treated as a single sharded db to support calls to db_info
%% and view_all_docs
@@ -107,7 +108,8 @@ shards_int(DbName, Options) ->
node = node(),
name = ShardDbName,
dbname = ShardDbName,
- range = [0, (2 bsl 31)-1]}];
+ range = [0, (2 bsl 31)-1],
+ opts = []}];
_ ->
mem3_shards:for_db(DbName, Options)
end.
@@ -317,6 +319,17 @@ name(#ordered_shard{name=Name}) ->
owner(DbName, DocId, Nodes) ->
hd(mem3_util:rotate_list({DbName, DocId}, lists:usort(Nodes))).
+engine(#shard{opts=Opts}) ->
+ engine(Opts);
+engine(#ordered_shard{opts=Opts}) ->
+ engine(Opts);
+engine(Opts) when is_list(Opts) ->
+ case couch_util:get_value(engine, Opts) of
+ Engine when is_binary(Engine) ->
+ [{engine, Engine}];
+ _ ->
+ []
+ end.
-ifdef(TEST).
diff --git a/src/mem3/src/mem3_nodes.erl b/src/mem3/src/mem3_nodes.erl
index 555389b90..019ceaf32 100644
--- a/src/mem3/src/mem3_nodes.erl
+++ b/src/mem3/src/mem3_nodes.erl
@@ -102,8 +102,9 @@ initialize_nodelist() ->
Doc = #doc{id = couch_util:to_binary(node())},
{ok, _} = couch_db:update_doc(Db, Doc, [])
end,
+ Seq = couch_db:get_update_seq(Db),
couch_db:close(Db),
- couch_db:get_update_seq(Db).
+ Seq.
first_fold(#full_doc_info{id = <<"_design/", _/binary>>}, Acc) ->
{ok, Acc};
diff --git a/src/mem3/src/mem3_rep.erl b/src/mem3/src/mem3_rep.erl
index e178fad6d..942f8a8e0 100644
--- a/src/mem3/src/mem3_rep.erl
+++ b/src/mem3/src/mem3_rep.erl
@@ -21,7 +21,7 @@
]).
-export([
- changes_enumerator/3
+ changes_enumerator/2
]).
@@ -177,8 +177,8 @@ repl(Db, Acc0) ->
true ->
{ok, 0};
false ->
- Fun = fun ?MODULE:changes_enumerator/3,
- {ok, _, Acc2} = couch_db:enum_docs_since(Db, Seq, Fun, Acc1, []),
+ Fun = fun ?MODULE:changes_enumerator/2,
+ {ok, Acc2} = couch_db:fold_changes(Db, Seq, Fun, Acc1),
{ok, #acc{seq = LastSeq}} = replicate_batch(Acc2),
{ok, couch_db:count_changes_since(Db, LastSeq)}
end.
@@ -230,11 +230,10 @@ compare_epochs(Acc) ->
Seq = mem3_rpc:find_common_seq(Node, Name, UUID, Epochs),
Acc#acc{seq = Seq, history = {[]}}.
-changes_enumerator(#doc_info{id=DocId}, Reds, #acc{db=Db}=Acc) ->
+changes_enumerator(#doc_info{id=DocId}, #acc{db=Db}=Acc) ->
{ok, FDI} = couch_db:get_full_doc_info(Db, DocId),
- changes_enumerator(FDI, Reds, Acc);
-changes_enumerator(#full_doc_info{}=FDI, _,
- #acc{revcount=C, infos=Infos}=Acc0) ->
+ changes_enumerator(FDI, Acc);
+changes_enumerator(#full_doc_info{}=FDI, #acc{revcount=C, infos=Infos}=Acc0) ->
#doc_info{
high_seq=Seq,
revs=Revs
diff --git a/src/mem3/src/mem3_shards.erl b/src/mem3/src/mem3_shards.erl
index 5e215e201..0975d2f8f 100644
--- a/src/mem3/src/mem3_shards.erl
+++ b/src/mem3/src/mem3_shards.erl
@@ -69,19 +69,14 @@ for_docid(DbName, DocId) ->
for_docid(DbName, DocId, Options) ->
HashKey = mem3_util:hash(DocId),
ShardHead = #shard{
- name = '_',
- node = '_',
dbname = DbName,
- range = ['$1','$2'],
- ref = '_'
+ range = ['$1', '$2'],
+ _ = '_'
},
OrderedShardHead = #ordered_shard{
- name = '_',
- node = '_',
dbname = DbName,
- range = ['$1','$2'],
- ref = '_',
- order = '_'
+ range = ['$1', '$2'],
+ _ = '_'
},
Conditions = [{'=<', '$1', HashKey}, {'=<', HashKey, '$2'}],
ShardSpec = {ShardHead, Conditions, ['$_']},
@@ -107,18 +102,13 @@ for_shard_name(ShardName, Options) ->
DbName = mem3:dbname(ShardName),
ShardHead = #shard{
name = ShardName,
- node = '_',
dbname = DbName,
- range = '_',
- ref = '_'
+ _ = '_'
},
OrderedShardHead = #ordered_shard{
name = ShardName,
- node = '_',
dbname = DbName,
- range = '_',
- ref = '_',
- order = '_'
+ _ = '_'
},
ShardSpec = {ShardHead, [], ['$_']},
OrderedShardSpec = {OrderedShardHead, [], ['$_']},
@@ -160,7 +150,7 @@ fold(Fun, Acc) ->
{ok, Db} = mem3_util:ensure_exists(DbName),
FAcc = {Db, Fun, Acc},
try
- {ok, _, LastAcc} = couch_db:enum_docs(Db, fun fold_fun/3, FAcc, []),
+ {ok, LastAcc} = couch_db:fold_docs(Db, fun fold_fun/2, FAcc),
{_Db, _UFun, UAcc} = LastAcc,
UAcc
after
@@ -305,10 +295,10 @@ start_changes_listener(SinceSeq) ->
end),
Pid.
-fold_fun(#full_doc_info{}=FDI, _, Acc) ->
+fold_fun(#full_doc_info{}=FDI, Acc) ->
DI = couch_doc:to_doc_info(FDI),
- fold_fun(DI, nil, Acc);
-fold_fun(#doc_info{}=DI, _, {Db, UFun, UAcc}) ->
+ fold_fun(DI, Acc);
+fold_fun(#doc_info{}=DI, {Db, UFun, UAcc}) ->
case couch_db:open_doc(Db, DI, [ejson_body, conflicts]) of
{ok, Doc} ->
{Props} = Doc#doc.body,
@@ -322,8 +312,9 @@ fold_fun(#doc_info{}=DI, _, {Db, UFun, UAcc}) ->
get_update_seq() ->
DbName = config:get("mem3", "shards_db", "_dbs"),
{ok, Db} = mem3_util:ensure_exists(DbName),
+ Seq = couch_db:get_update_seq(Db),
couch_db:close(Db),
- couch_db:get_update_seq(Db).
+ Seq.
listen_for_changes(Since) ->
DbName = config:get("mem3", "shards_db", "_dbs"),
@@ -361,7 +352,7 @@ changes_callback({change, {Change}, _}, _) ->
ets:insert(?OPENERS, {DbName, Writer}),
Msg = {cache_insert_change, DbName, Writer, Seq},
gen_server:cast(?MODULE, Msg),
- [create_if_missing(mem3:name(S)) || S
+ [create_if_missing(mem3:name(S), mem3:engine(S)) || S
<- Shards, mem3:node(S) =:= node()]
end
end
@@ -412,20 +403,18 @@ in_range(Shard, HashKey) ->
[B, E] = mem3:range(Shard),
B =< HashKey andalso HashKey =< E.
-create_if_missing(Name) ->
- DbDir = config:get("couchdb", "database_dir"),
- Filename = filename:join(DbDir, ?b2l(Name) ++ ".couch"),
- case filelib:is_regular(Filename) of
- true ->
- ok;
- false ->
- case couch_server:create(Name, [?ADMIN_CTX]) of
- {ok, Db} ->
- couch_db:close(Db);
- Error ->
- couch_log:error("~p tried to create ~s, got ~p",
- [?MODULE, Name, Error])
- end
+create_if_missing(Name, Options) ->
+ case couch_server:exists(Name) of
+ true ->
+ ok;
+ false ->
+ case couch_server:create(Name, [?ADMIN_CTX] ++ Options) of
+ {ok, Db} ->
+ couch_db:close(Db);
+ Error ->
+ couch_log:error("~p tried to create ~s, got ~p",
+ [?MODULE, Name, Error])
+ end
end.
cache_insert(#st{cur_size=Cur}=St, DbName, Writer, Timeout) ->
diff --git a/src/mem3/src/mem3_util.erl b/src/mem3/src/mem3_util.erl
index 71ef5b6c9..0b69d790d 100644
--- a/src/mem3/src/mem3_util.erl
+++ b/src/mem3/src/mem3_util.erl
@@ -161,7 +161,8 @@ build_shards_by_node(DbName, DocProps) ->
name_shard(#shard{
dbname = DbName,
node = to_atom(Node),
- range = [Beg, End]
+ range = [Beg, End],
+ opts = get_engine_opt(DocProps)
}, Suffix)
end, Ranges)
end, ByNode).
@@ -178,7 +179,8 @@ build_shards_by_range(DbName, DocProps) ->
dbname = DbName,
node = to_atom(Node),
range = [Beg, End],
- order = Order
+ order = Order,
+ opts = get_engine_opt(DocProps)
}, Suffix)
end, lists:zip(Nodes, lists:seq(1, length(Nodes))))
end, ByRange).
@@ -195,6 +197,14 @@ to_integer(N) when is_binary(N) ->
to_integer(N) when is_list(N) ->
list_to_integer(N).
+get_engine_opt(DocProps) ->
+ case couch_util:get_value(<<"engine">>, DocProps) of
+ Engine when is_binary(Engine) ->
+ [{engine, Engine}];
+ _ ->
+ []
+ end.
+
n_val(undefined, NodeCount) ->
n_val(config:get("cluster", "n", "3"), NodeCount);
n_val(N, NodeCount) when is_list(N) ->
@@ -248,7 +258,8 @@ downcast(#ordered_shard{}=S) ->
node = S#ordered_shard.node,
dbname = S#ordered_shard.dbname,
range = S#ordered_shard.range,
- ref = S#ordered_shard.ref
+ ref = S#ordered_shard.ref,
+ opts = S#ordered_shard.opts
};
downcast(Shards) when is_list(Shards) ->
[downcast(Shard) || Shard <- Shards].
diff --git a/src/mem3/test/mem3_util_test.erl b/src/mem3/test/mem3_util_test.erl
index 38a35309b..163580cc2 100644
--- a/src/mem3/test/mem3_util_test.erl
+++ b/src/mem3/test/mem3_util_test.erl
@@ -85,35 +85,35 @@ build_shards_test() ->
[{shard,<<"shards/00000000-1fffffff/testdb1">>,
'bigcouch@node.local',<<"testdb1">>,
[0,536870911],
- undefined},
+ undefined,[]},
{shard,<<"shards/20000000-3fffffff/testdb1">>,
'bigcouch@node.local',<<"testdb1">>,
[536870912,1073741823],
- undefined},
+ undefined,[]},
{shard,<<"shards/40000000-5fffffff/testdb1">>,
'bigcouch@node.local',<<"testdb1">>,
[1073741824,1610612735],
- undefined},
+ undefined,[]},
{shard,<<"shards/60000000-7fffffff/testdb1">>,
'bigcouch@node.local',<<"testdb1">>,
[1610612736,2147483647],
- undefined},
+ undefined,[]},
{shard,<<"shards/80000000-9fffffff/testdb1">>,
'bigcouch@node.local',<<"testdb1">>,
[2147483648,2684354559],
- undefined},
+ undefined,[]},
{shard,<<"shards/a0000000-bfffffff/testdb1">>,
'bigcouch@node.local',<<"testdb1">>,
[2684354560,3221225471],
- undefined},
+ undefined,[]},
{shard,<<"shards/c0000000-dfffffff/testdb1">>,
'bigcouch@node.local',<<"testdb1">>,
[3221225472,3758096383],
- undefined},
+ undefined,[]},
{shard,<<"shards/e0000000-ffffffff/testdb1">>,
'bigcouch@node.local',<<"testdb1">>,
[3758096384,4294967295],
- undefined}],
+ undefined,[]}],
?assertEqual(ExpectedShards1, Shards1),
ok.
diff --git a/test/javascript/run b/test/javascript/run
index c611be51e..8ae424467 100755
--- a/test/javascript/run
+++ b/test/javascript/run
@@ -107,7 +107,10 @@ def options():
dest="ignore", help="Ignore test suites"),
op.make_option("-u", "--suites", type="string", action="callback",
default=None, callback=get_delimited_list,
- dest="suites", help="Run specific suites")
+ dest="suites", help="Run specific suites"),
+ op.make_option("-p", "--path", type="string",
+ default="test/javascript/tests",
+ dest="test_path", help="Path where the tests are located")
]
@@ -118,10 +121,9 @@ def main():
run_list = []
ignore_list = []
tests = []
-
- run_list = ["test/javascript/tests"] if not opts.suites else opts.suites
- run_list = build_test_case_paths(run_list)
- ignore_list = build_test_case_paths(opts.ignore)
+ run_list = [opts.test_path] if not opts.suites else opts.suites
+ run_list = build_test_case_paths(opts.test_path,run_list)
+ ignore_list = build_test_case_paths(opts.test_path,opts.ignore)
# sort is needed because certain tests fail if executed out of order
tests = sorted(list(set(run_list)-set(ignore_list)))
@@ -151,7 +153,7 @@ def main():
failed, passed) + os.linesep)
exit(failed > 0)
-def build_test_case_paths(args=None):
+def build_test_case_paths(path,args=None):
tests = []
if args is None:
args = []
@@ -161,7 +163,7 @@ def build_test_case_paths(args=None):
elif os.path.isfile(name):
check = tests.append(name)
else:
- pname = os.path.join("test/javascript/tests", name)
+ pname = os.path.join(path, name)
if os.path.isfile(pname):
tests.append(pname)
elif os.path.isfile(pname + ".js"):
diff --git a/test/javascript/tests-cluster/with-quorum/db-creation.js b/test/javascript/tests-cluster/with-quorum/db-creation.js
new file mode 100644
index 000000000..f8efd6e68
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/db-creation.js
@@ -0,0 +1,27 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do DB creation under cluster with quorum conditions.
+couchTests.db_creation = function(debug) {
+
+ if (debug) debugger;
+
+ var db_name = get_random_db_name()
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+
+ // DB Creation should return 201 - Created
+ xhr = CouchDB.request("PUT", "/" + db_name + "/");
+ T(xhr.status == 201);
+
+ // cleanup
+ db.deleteDb();
+};
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/without-quorum/db-creation.js
new file mode 100644
index 000000000..0d8ff8367
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/db-creation.js
@@ -0,0 +1,28 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do DB creation under cluster without quorum conditions.
+couchTests.db_creation = function(debug) {
+
+ if (debug) debugger;
+
+ var db_name = get_random_db_name()
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+
+ // DB Creation should return 202- Accepted
+ xhr = CouchDB.request("PUT", "/" + db_name + "/");
+ T(xhr.status == 202);
+
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+};
diff --git a/test/javascript/tests/design_docs_query.js b/test/javascript/tests/design_docs_query.js
new file mode 100644
index 000000000..8fc8da5f8
--- /dev/null
+++ b/test/javascript/tests/design_docs_query.js
@@ -0,0 +1,154 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.design_docs_query = function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+ db.createDb();
+ if (debug) debugger;
+
+ var docs = makeDocs(5);
+
+ // create the docs
+ var results = db.bulkSave(docs);
+ T(results.length == 5);
+ for (var i = 0; i < 5; i++) {
+ T(results[i].id == docs[i]._id);
+ }
+
+ // create the ddocs
+ for (var i = 0; i < 5; i++) {
+ T(db.save({
+ _id : "_design/ddoc0" + (i+1).toString(),
+ views : {
+ "testing" : {
+ "map" : "function(){emit(1,1)}"
+ }
+ }
+ }).ok);
+ }
+
+ // test design_docs
+ var path = "/" + db_name + "/_design_docs?";
+ var xhr_AllDDocs = CouchDB.request("GET", path);
+ T(xhr_AllDDocs.status == 200, "standard get should be 200");
+ var allDDocs = JSON.parse(xhr_AllDDocs.responseText);
+ TEquals(10, allDDocs.total_rows, "total_rows mismatch");
+ TEquals(5, allDDocs.rows.length, "amount of rows mismatch");
+
+ // test key="_design/ddoc03"
+ var xhr = CouchDB.request("GET", path + "key=\"_design/ddoc03\"");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(1, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[0].key, "key test");
+
+ // test descending=true
+ var xhr = CouchDB.request("GET", path + "descending=true");
+ T(xhr.status == 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(5, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc05", result.rows[0].key, "descending test");
+
+ // test descending=false
+ var xhr = CouchDB.request("GET", path + "descending=false");
+ T(xhr.status == 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(5, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc01", result.rows[0].key, "descending test");
+
+ // test end_key="_design/ddoc03"
+ var xhr = CouchDB.request("GET", path + "end_key=\"_design/ddoc03\"");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(3, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[2].key, "end_key test");
+
+ // test endkey="_design/ddoc03"
+ var xhr = CouchDB.request("GET", path + "endkey=\"_design/ddoc03\"");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(3, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[2].key, "endkey test");
+
+ // test start_key="_design/ddoc03"
+ var xhr = CouchDB.request("GET", path + "start_key=\"_design/ddoc03\"");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(3, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[0].key, "start_key test");
+
+ // test startkey="_design/ddoc03"
+ var xhr = CouchDB.request("GET", path + "startkey=\"_design/ddoc03\"");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(3, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[0].key, "startkey test");
+
+ // test end_key="_design/ddoc03"&inclusive_end=true
+ var xhr = CouchDB.request("GET", path + "end_key=\"_design/ddoc03\"&inclusive_end=true");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(3, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[2].key, "end_key and inclusive_end test");
+
+ // test end_key="_design/ddoc03"&inclusive_end=false
+ var xhr = CouchDB.request("GET", path + "end_key=\"_design/ddoc03\"&inclusive_end=false");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(2, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc02", result.rows[1].key, "end_key and inclusive_end test");
+
+ // test end_key="_design/ddoc03"&inclusive_end=false&descending=true
+ var xhr = CouchDB.request("GET", path +
+ "end_key=\"_design/ddoc03\"&inclusive_end=false&descending=true");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(2, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc04", result.rows[1].key, "end_key, inclusive_end and descending test");
+
+ // test end_key="_design/ddoc05"&limit=2
+ var xhr = CouchDB.request("GET", path +
+ "end_key=\"_design/ddoc05\"&limit=2");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(2, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc02", result.rows[1].key, "end_key and limit test");
+
+ // test end_key="_design/ddoc05"&skip=2
+ var xhr = CouchDB.request("GET", path +
+ "end_key=\"_design/ddoc05\"&skip=2");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(3, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[0].key, "end_key and skip test");
+ TEquals("_design/ddoc05", result.rows[2].key, "end_key and skip test");
+
+ // test end_key="_design/ddoc05"&update_seq=true
+ var xhr = CouchDB.request("GET", path +
+ "end_key=\"_design/ddoc05\"&update_seq=true");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ T(result.update_seq);
+
+ // test POST with keys
+ var xhr = CouchDB.request("POST", path, {
+ headers: {"Content-Type": "application/json"},
+ body: JSON.stringify({"keys" : ["_design/ddoc02", "_design/ddoc03"]})
+ });
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(2, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[1].key, "POST test");
+
+ db.deleteDb();
+};
diff --git a/test/javascript/tests/users_db_security.js b/test/javascript/tests/users_db_security.js
index ad516a0c4..536585acf 100644
--- a/test/javascript/tests/users_db_security.js
+++ b/test/javascript/tests/users_db_security.js
@@ -15,6 +15,8 @@ couchTests.users_db_security = function(debug) {
var usersDb = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
try { usersDb.createDb(); } catch (e) { /* ignore if exists*/ }
+ var passwordSchemes = ['pbkdf2', 'bcrypt'];
+
if (debug) debugger;
var loginUser = function(username) {
@@ -86,7 +88,7 @@ couchTests.users_db_security = function(debug) {
}
};
- var testFun = function()
+ var testFun = function(scheme, derivedKeyTest, saltTest)
{
// _users db
@@ -105,11 +107,13 @@ couchTests.users_db_security = function(debug) {
// jan's gonna be admin as he's the first user
TEquals(true, usersDb.save(userDoc).ok, "should save document");
- wait(5000)
+ wait(5000);
userDoc = open_as(usersDb, "org.couchdb.user:jchris", "jchris");
TEquals(undefined, userDoc.password, "password field should be null 1");
- TEquals(40, userDoc.derived_key.length, "derived_key should exist");
- TEquals(32, userDoc.salt.length, "salt should exist");
+ TEquals(scheme, userDoc.password_scheme, "password_scheme should be " + scheme);
+ derivedKeyTest(userDoc.derived_key);
+ saltTest(userDoc.salt);
+
// create server admin
@@ -141,10 +145,13 @@ couchTests.users_db_security = function(debug) {
var jchrisDoc = open_as(usersDb, "org.couchdb.user:jchris", "jan");
TEquals(undefined, jchrisDoc.password, "password field should be null 2");
- TEquals(40, jchrisDoc.derived_key.length, "derived_key should exist");
- TEquals(32, jchrisDoc.salt.length, "salt should exist");
+ TEquals(scheme, jchrisDoc.password_scheme, "password_scheme should be " + scheme);
+ derivedKeyTest(jchrisDoc.derived_key);
+ saltTest(jchrisDoc.salt);
- TEquals(true, userDoc.salt != jchrisDoc.salt, "should have new salt");
+ if(userDoc.salt || jchrisDoc.salt) {
+ TEquals(true, userDoc.salt != jchrisDoc.salt, "should have new salt");
+ }
TEquals(true, userDoc.derived_key != jchrisDoc.derived_key,
"should have new derived_key");
@@ -227,7 +234,7 @@ couchTests.users_db_security = function(debug) {
TEquals("forbidden", e.error, "non-admins can't read design docs");
}
- // admin shold be able to read _list
+ // admin should be able to read _list
var listPath = ddoc["_id"] + "/_list/names/test";
var result = request_as(usersDb, listPath, "jan");
var lines = result.responseText.split("\n");
@@ -373,14 +380,124 @@ couchTests.users_db_security = function(debug) {
});
};
+ var derivedKeyTests = {
+ pbkdf2: function(derived_key) {
+ TEquals(40, derived_key.length, "derived_key should exist");
+ },
+ bcrypt: function(derived_key) {
+ TEquals(60, derived_key.length, "derived_key should exist");
+ }
+ };
+ var saltTests = {
+ pbkdf2: function(salt) {
+ TEquals(32, salt.length, "salt should exist");
+ },
+ bcrypt: function(salt) {
+ TEquals(undefined, salt, "salt should not exist");
+ }
+ };
+ passwordSchemes.forEach(function(scheme){
+ run_on_modified_server(
+ [{
+ section: "couch_httpd_auth",
+ key: "iterations", value: "1"
+ }, {
+ section: "couch_httpd_auth",
+ key: "password_scheme", value: scheme
+ }, {
+ section: "admins",
+ key: "jan", value: "apple"
+ }],
+ function() {
+ try {
+ testFun(scheme, derivedKeyTests[scheme], saltTests[scheme]);
+ } finally {
+ CouchDB.login("jan", "apple");
+ usersDb.deleteDb(); // cleanup
+ sleep(5000);
+ usersDb.createDb();
+ }
+ }
+ );
+ });
+
+ var testFunUpdatePasswordScheme = function() {
+ var userDocs = {
+ jchris: {
+ _id: "org.couchdb.user:jchris",
+ type: "user",
+ name: "jchris",
+ password: "mp3",
+ roles: []
+ },
+ fdmanana: {
+ _id: "org.couchdb.user:fdmanana",
+ type: "user",
+ name: "fdmanana",
+ password: "foobar",
+ roles: []
+ }
+ };
+
+ // create new user (has pbkdf2 hash)
+ TEquals(true, usersDb.save(userDocs.jchris).ok, "should save document");
+ wait(5000);
+ var userDoc = open_as(usersDb, "org.couchdb.user:jchris", "jchris");
+ TEquals(undefined, userDoc.password, "password field should be null 1");
+ TEquals("pbkdf2", userDoc.password_scheme, "password_scheme should be pbkdf2");
+ derivedKeyTests.pbkdf2(userDoc.derived_key);
+ saltTests.pbkdf2(userDoc.salt);
+
+ // change scheme to bcrypt
+ CouchDB.login("jan", "apple");
+ var xhr = CouchDB.request("PUT", "/_node/node1@127.0.0.1/_config/couch_httpd_auth/password_scheme", {
+ body : JSON.stringify("bcrypt"),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(200, xhr.status);
+ xhr = CouchDB.request("GET", "/_node/node1@127.0.0.1/_config/couch_httpd_auth/password_scheme");
+ var scheme = JSON.parse(xhr.responseText);
+ TEquals("bcrypt", scheme);
+
+ // create new user (has bcrypt hash)
+ TEquals(true, usersDb.save(userDocs.fdmanana).ok, "should save document");
+ wait(5000);
+ userDoc = open_as(usersDb, "org.couchdb.user:fdmanana", "fdmanana");
+ TEquals(undefined, userDoc.password, "password field should be null 1");
+ TEquals("bcrypt", userDoc.password_scheme, "password_scheme should be bcrypt");
+ derivedKeyTests.bcrypt(userDoc.derived_key);
+ saltTests.bcrypt(userDoc.salt);
+
+ // test that both users can still log in
+ TEquals(true, CouchDB.login(userDocs.jchris.name, userDocs.jchris.password).ok);
+ TEquals(true, CouchDB.login(userDocs.fdmanana.name, userDocs.fdmanana.password).ok);
+
+ // change scheme back to pbkdf2
+ CouchDB.login("jan", "apple");
+ var xhr = CouchDB.request("PUT", "/_node/node1@127.0.0.1/_config/couch_httpd_auth/password_scheme", {
+ body : JSON.stringify("pbkdf2"),
+ headers: {"X-Couch-Persist": "false"}
+ });
+ TEquals(200, xhr.status);
+ xhr = CouchDB.request("GET", "/_node/node1@127.0.0.1/_config/couch_httpd_auth/password_scheme");
+ var scheme = JSON.parse(xhr.responseText);
+ TEquals("pbkdf2", scheme);
+
+ // test that both users can still log in
+ TEquals(true, CouchDB.login(userDocs.jchris.name, userDocs.jchris.password).ok);
+ TEquals(true, CouchDB.login(userDocs.fdmanana.name, userDocs.fdmanana.password).ok);
+ };
run_on_modified_server(
- [{section: "couch_httpd_auth",
- key: "iterations", value: "1"},
- {section: "admins",
- key: "jan", value: "apple"}],
+ [{
+ section: "couch_httpd_auth",
+ key: "iterations", value: "1"
+ }, {
+ section: "admins",
+ key: "jan", value: "apple"
+ }],
function() {
try {
- testFun();
+ testFunUpdatePasswordScheme();
} finally {
CouchDB.login("jan", "apple");
usersDb.deleteDb(); // cleanup
@@ -389,5 +506,6 @@ couchTests.users_db_security = function(debug) {
}
}
);
+
CouchDB.logout();
};
diff --git a/test/javascript/tests/view_errors.js b/test/javascript/tests/view_errors.js
index dd60292a3..f135b749a 100644
--- a/test/javascript/tests/view_errors.js
+++ b/test/javascript/tests/view_errors.js
@@ -169,7 +169,7 @@ couchTests.view_errors = function(debug) {
T(xhr.status == 400);
result = JSON.parse(xhr.responseText);
T(result.error == "bad_request");
- T(result.reason == "`keys` member must be a array.");
+ T(result.reason == "`keys` member must be an array.");
// if the reduce grows to fast, throw an overflow error
var path = "/" + db_name + "/_design/testbig/_view/reduce_too_big";