summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTony Sun <tony.sun427@gmail.com>2017-10-31 17:11:44 -0700
committerGitHub <noreply@github.com>2017-10-31 17:11:44 -0700
commitd5ef16e4a10975f4d1d25c72bbaeade4716164b5 (patch)
tree3e0a9e4f8b1c1820cb21c863d340f09932927473
parent1b79cd4d98f8c283f578262b92bf2cf002dc8a0b (diff)
parentc4ac5c04d051f4e1336f90ebdca82706b2b9633b (diff)
downloadcouchdb-seqt-external-size-bug.tar.gz
Merge branch 'master' into seqt-external-size-bugseqt-external-size-bug
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md11
-rw-r--r--.travis.yml1
-rw-r--r--INSTALL.Unix.md7
-rw-r--r--Jenkinsfile83
-rw-r--r--Makefile26
-rw-r--r--NOTICE24
-rw-r--r--README-DEV.rst15
-rwxr-xr-xdev/run11
-rw-r--r--rebar.config.script14
-rwxr-xr-xrel/overlay/bin/remsh6
-rw-r--r--rel/overlay/etc/default.ini15
-rw-r--r--rel/reltool.config2
-rw-r--r--src/chttpd/src/chttpd_auth_cache.erl18
-rw-r--r--src/chttpd/src/chttpd_db.erl8
-rw-r--r--src/chttpd/src/chttpd_misc.erl7
-rw-r--r--src/chttpd/src/chttpd_view.erl2
-rw-r--r--src/couch/include/couch_eunit.hrl9
-rw-r--r--src/couch/priv/couch_js/help.h2
-rw-r--r--src/couch/priv/couch_js/main.c6
-rw-r--r--src/couch/priv/couch_js/util.c4
-rw-r--r--src/couch/priv/couch_js/util.h2
-rw-r--r--src/couch/rebar.config.script13
-rw-r--r--src/couch/src/couch_auth_cache.erl5
-rw-r--r--src/couch/src/couch_debug.erl2
-rw-r--r--src/couch/src/couch_doc.erl96
-rw-r--r--src/couch/src/couch_httpd.erl6
-rw-r--r--src/couch/src/couch_httpd_auth.erl9
-rw-r--r--src/couch/src/couch_httpd_db.erl36
-rw-r--r--src/couch/src/couch_httpd_misc_handlers.erl1
-rw-r--r--src/couch/src/couch_lru.erl4
-rw-r--r--src/couch/src/couch_multidb_changes.erl2
-rw-r--r--src/couch/src/couch_query_servers.erl29
-rw-r--r--src/couch/src/couch_rand.erl57
-rw-r--r--src/couch/src/couch_util.erl42
-rw-r--r--src/couch/src/couch_uuids.erl105
-rw-r--r--src/couch/src/test_util.erl2
-rw-r--r--src/couch/test/couch_btree_tests.erl8
-rw-r--r--src/couch/test/couch_doc_json_tests.erl71
-rw-r--r--src/couch/test/couch_doc_tests.erl16
-rw-r--r--src/couch/test/couch_file_tests.erl4
-rw-r--r--src/couch/test/couchdb_attachments_tests.erl4
-rwxr-xr-xsrc/couch/test/couchdb_cookie_domain_tests.erl77
-rw-r--r--src/couch_epi/test/couch_epi_tests.erl2
-rw-r--r--src/couch_log/test/couch_log_test.erl3
-rw-r--r--src/couch_mrview/src/couch_mrview_http.erl6
-rw-r--r--src/couch_peruser/README.md19
-rw-r--r--src/couch_peruser/src/couch_peruser.app.src6
-rw-r--r--src/couch_peruser/src/couch_peruser.erl312
-rw-r--r--src/couch_peruser/src/couch_peruser_app.erl26
-rw-r--r--src/couch_peruser/src/couch_peruser_sup.erl29
-rw-r--r--src/couch_peruser/test/couch_peruser_test.erl107
-rw-r--r--src/couch_replicator/src/couch_replicator.erl2
-rw-r--r--src/couch_replicator/src/couch_replicator_clustering.erl10
-rw-r--r--src/couch_replicator/src/couch_replicator_doc_processor.erl14
-rw-r--r--src/couch_replicator/src/couch_replicator_docs.erl2
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler_job.erl170
-rw-r--r--src/couch_replicator/src/couch_replicator_worker.erl17
-rw-r--r--src/ddoc_cache/src/ddoc_cache_lru.erl7
-rw-r--r--src/ddoc_cache/test/ddoc_cache_lru_test.erl2
-rw-r--r--src/ddoc_cache/test/ddoc_cache_refresh_test.erl4
-rw-r--r--src/fabric/rebar.config2
-rw-r--r--src/fabric/src/fabric_db_create.erl2
-rw-r--r--src/fabric/src/fabric_util.erl2
-rw-r--r--src/fabric/src/fabric_view.erl2
-rw-r--r--src/mango/src/mango_cursor.erl2
-rw-r--r--src/mango/src/mango_cursor_view.erl33
-rw-r--r--src/mango/src/mango_error.erl2
-rw-r--r--src/mango/src/mango_execution_stats.erl6
-rw-r--r--src/mango/src/mango_idx.erl55
-rw-r--r--src/mango/src/mango_idx_special.erl4
-rw-r--r--src/mango/src/mango_idx_text.erl4
-rw-r--r--src/mango/src/mango_idx_view.erl18
-rw-r--r--src/mango/src/mango_idx_view.hrl13
-rw-r--r--src/mango/src/mango_selector.erl2
-rw-r--r--src/mango/test/01-index-crud-test.py126
-rw-r--r--src/mango/test/02-basic-find-test.py48
-rw-r--r--src/mango/test/03-operator-test.py5
-rw-r--r--src/mango/test/05-index-selection-test.py137
-rw-r--r--src/mango/test/09-text-sort-test.py50
-rw-r--r--src/mango/test/10-disable-array-length-field-test.py14
-rw-r--r--src/mango/test/11-ignore-design-docs-test.py (renamed from src/mango/test/11-ignore-design-docs.py)0
-rw-r--r--src/mango/test/12-use-correct-index-test.py (renamed from src/mango/test/12-use-correct-index.py)17
-rw-r--r--src/mango/test/14-json-pagination-test.py (renamed from src/mango/test/14-json-pagination.py)0
-rw-r--r--src/mango/test/15-execution-stats-test.py4
-rw-r--r--src/mango/test/16-index-selectors-test.py (renamed from src/mango/test/16-index-selectors.py)30
-rw-r--r--src/mango/test/17-multi-type-value-test.py90
-rw-r--r--src/mango/test/README.md23
-rw-r--r--src/mango/test/mango.py51
-rw-r--r--src/mem3/src/mem3.erl8
-rw-r--r--src/mem3/src/mem3_shards.erl4
-rw-r--r--src/rexi/src/rexi_server.erl2
-rwxr-xr-xtest/build/test-run-couch-for-mango.sh13
-rw-r--r--test/javascript/couch.js2
-rwxr-xr-xtest/javascript/run59
-rw-r--r--test/javascript/tests/config.js8
-rw-r--r--test/javascript/tests/erlang_views.js4
-rw-r--r--test/javascript/tests/view_errors.js16
-rw-r--r--test/javascript/tests/view_sandboxing.js38
-rw-r--r--version.mk2
99 files changed, 1857 insertions, 641 deletions
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index f364446c3..a64a17e7b 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -22,17 +22,10 @@
Does it provides any behaviour that the end users
could notice? -->
-## GitHub issue number
-
-<!-- If this is a significant change, please file a separate issue at:
- https://github.com/apache/couchdb/issues
- and include the number here and in commit message(s) using
- syntax like "Fixes #472" or "Fixes apache/couchdb#472". -->
-
-## Related Pull Requests
+## Related Issues or Pull Requests
<!-- If your changes affects multiple components in different
- repositories please put links to those pull requests here. -->
+ repositories please put links to those issues or pull requests here. -->
## Checklist
diff --git a/.travis.yml b/.travis.yml
index 8aebaabc6..fe84f87c4 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,6 +3,7 @@ sudo: false
os: linux
otp_release:
+ - 20.1
- 19.3
- 18.3
- 17.5
diff --git a/INSTALL.Unix.md b/INSTALL.Unix.md
index 3ed2091f0..b2d4fbdbc 100644
--- a/INSTALL.Unix.md
+++ b/INSTALL.Unix.md
@@ -135,6 +135,11 @@ You can install this by running:
pkg install gmake
+You can install the remaining dependencies by running:
+
+ pkg install npm4 help2man openssl icu curl git \
+ autoconf automake libtool node spidermonkey185
+
## Installing
Once you have satisfied the dependencies you should run:
@@ -231,7 +236,7 @@ To check that everything has worked, point your web browser to:
From here you should verify your installation by pointing your web browser to:
- http://localhost:5984/_utils/verify_install.html
+ http://localhost:5984/_utils/#/verifyinstall
## Running as a Daemon
diff --git a/Jenkinsfile b/Jenkinsfile
index d5212279e..fed976afc 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -182,11 +182,11 @@ pipeline {
deleteDir()
} // node
},
- ubuntu1204erlang183: {
+ ubuntu1404erlangdefault: {
node(label: 'couchdbtest') {
timeout(time: 45, unit: "MINUTES") {
- sh 'docker pull couchdbdev/ubuntu-12.04-erlang-18.3'
- withDockerContainer(image: 'couchdbdev/ubuntu-12.04-erlang-18.3') {
+ sh 'docker pull couchdbdev/ubuntu-14.04-erlang-default'
+ withDockerContainer(image: 'couchdbdev/ubuntu-14.04-erlang-default') {
sh 'rm -f apache-couchdb-*.tar.gz'
unstash 'tarball'
sh '''
@@ -205,11 +205,51 @@ pipeline {
deleteDir()
} // node
},
- ubuntu1404erlangdefault: {
+ ubuntu1404erlang183: {
+ node(label: 'couchdbtest') {
+ timeout(time: 60, unit: "MINUTES") {
+ sh 'docker pull couchdbdev/ubuntu-14.04-erlang-18.3'
+ withDockerContainer(image: 'couchdbdev/ubuntu-14.04-erlang-18.3') {
+ sh 'rm -f apache-couchdb-*.tar.gz'
+ unstash 'tarball'
+ sh '''
+ cwd=$(pwd)
+ mkdir -p $COUCHDB_IO_LOG_DIR
+
+ # Build CouchDB from tarball
+ builddir=$(mktemp -d)
+ cd $builddir
+ tar -xf $cwd/apache-couchdb-*.tar.gz
+ cd apache-couchdb-*
+ ./configure --with-curl
+ make all
+ make check || (build-aux/logfile-uploader.py && false)
+
+ # Build CouchDB packages
+ cd $builddir
+ git clone https://github.com/apache/couchdb-pkg
+ mkdir couchdb
+ cp $cwd/apache-couchdb-*.tar.gz couchdb
+ tar -xf $cwd/apache-couchdb-*.tar.gz -C couchdb
+ cd couchdb-pkg
+ platform=$(lsb_release -cs)
+ make $platform PLATFORM=$platform
+
+ # Cleanup & save for posterity
+ rm -rf $cwd/pkgs/$platform && mkdir -p $cwd/pkgs/$platform
+ mv ../couchdb/*deb $cwd/pkgs/$platform || true
+ '''
+ } // withDocker
+ } // timeout
+ archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ deleteDir()
+ } // node
+ },
+ ubuntu1604erlangdefault: {
node(label: 'couchdbtest') {
timeout(time: 45, unit: "MINUTES") {
- sh 'docker pull couchdbdev/ubuntu-14.04-erlang-default'
- withDockerContainer(image: 'couchdbdev/ubuntu-14.04-erlang-default') {
+ sh 'docker pull couchdbdev/ubuntu-16.04-erlang-default'
+ withDockerContainer(image: 'couchdbdev/ubuntu-16.04-erlang-default') {
sh 'rm -f apache-couchdb-*.tar.gz'
unstash 'tarball'
sh '''
@@ -228,11 +268,11 @@ pipeline {
deleteDir()
} // node
},
- ubuntu1404erlang183: {
+ ubuntu1604erlang183: {
node(label: 'couchdbtest') {
timeout(time: 60, unit: "MINUTES") {
- sh 'docker pull couchdbdev/ubuntu-14.04-erlang-18.3'
- withDockerContainer(image: 'couchdbdev/ubuntu-14.04-erlang-18.3') {
+ sh 'docker pull couchdbdev/ubuntu-16.04-erlang-18.3'
+ withDockerContainer(image: 'couchdbdev/ubuntu-16.04-erlang-18.3') {
sh 'rm -f apache-couchdb-*.tar.gz'
unstash 'tarball'
sh '''
@@ -268,11 +308,11 @@ pipeline {
deleteDir()
} // node
},
- ubuntu1604erlangdefault: {
+ debian8erlangdefault: {
node(label: 'couchdbtest') {
timeout(time: 45, unit: "MINUTES") {
- sh 'docker pull couchdbdev/ubuntu-16.04-erlang-default'
- withDockerContainer(image: 'couchdbdev/ubuntu-16.04-erlang-default') {
+ sh 'docker pull couchdbdev/debian-8-erlang-default'
+ withDockerContainer(image: 'couchdbdev/debian-8-erlang-default') {
sh 'rm -f apache-couchdb-*.tar.gz'
unstash 'tarball'
sh '''
@@ -291,11 +331,11 @@ pipeline {
deleteDir()
} // node
},
- ubuntu1604erlang183: {
+ debian8erlang183: {
node(label: 'couchdbtest') {
timeout(time: 60, unit: "MINUTES") {
- sh 'docker pull couchdbdev/ubuntu-16.04-erlang-18.3'
- withDockerContainer(image: 'couchdbdev/ubuntu-16.04-erlang-18.3') {
+ sh 'docker pull couchdbdev/debian-8-erlang-18.3'
+ withDockerContainer(image: 'couchdbdev/debian-8-erlang-18.3') {
sh 'rm -f apache-couchdb-*.tar.gz'
unstash 'tarball'
sh '''
@@ -331,11 +371,11 @@ pipeline {
deleteDir()
} // node
},
- debian8erlangdefault: {
+ debian9erlangdefault: {
node(label: 'couchdbtest') {
timeout(time: 45, unit: "MINUTES") {
- sh 'docker pull couchdbdev/debian-8-erlang-default'
- withDockerContainer(image: 'couchdbdev/debian-8-erlang-default') {
+ sh 'docker pull couchdbdev/debian-9-erlang-default'
+ withDockerContainer(image: 'couchdbdev/debian-9-erlang-default') {
sh 'rm -f apache-couchdb-*.tar.gz'
unstash 'tarball'
sh '''
@@ -354,11 +394,11 @@ pipeline {
deleteDir()
} // node
},
- debian8erlang183: {
+ debian9erlang183: {
node(label: 'couchdbtest') {
timeout(time: 60, unit: "MINUTES") {
- sh 'docker pull couchdbdev/debian-8-erlang-18.3'
- withDockerContainer(image: 'couchdbdev/debian-8-erlang-18.3') {
+ sh 'docker pull couchdbdev/debian-9-erlang-18.3'
+ withDockerContainer(image: 'couchdbdev/debian-9-erlang-18.3') {
sh 'rm -f apache-couchdb-*.tar.gz'
unstash 'tarball'
sh '''
@@ -429,6 +469,7 @@ pipeline {
reprepro -b couchdb-pkg/repo includedeb jessie pkgs/jessie/*deb
reprepro -b couchdb-pkg/repo includedeb trusty pkgs/trusty/*deb
reprepro -b couchdb-pkg/repo includedeb xenial pkgs/xenial/*deb
+ reprepro -b couchdb-pkg/repo includedeb stretch pkgs/stretch/*deb
'''
echo 'Building CentOS repos...'
sh '''
diff --git a/Makefile b/Makefile
index 239a2db1c..248dddc58 100644
--- a/Makefile
+++ b/Makefile
@@ -14,7 +14,7 @@ include version.mk
REBAR?=$(shell echo `pwd`/bin/rebar)
IN_RELEASE = $(shell if [ ! -d .git ]; then echo true; fi)
-COUCHDB_VERSION_SUFFIX = $(shell if [ -d .git ]; then echo '-`git rev-parse --short --verify HEAD`'; fi)
+COUCHDB_VERSION_SUFFIX = $(shell if [ ! -z "$(COUCH_RC)" ]; then echo '-RC$(COUCH_RC)'; else if [ -d .git ]; then echo '-`git rev-parse --short --verify HEAD`'; fi; fi)
COUCHDB_VERSION = $(vsn_major).$(vsn_minor).$(vsn_patch)$(COUCHDB_VERSION_SUFFIX)
DESTDIR=
@@ -36,6 +36,8 @@ DIALYZE_OPTS=$(shell echo "\
skip_deps=$(skip_deps) \
" | sed -e 's/[a-z]\+= / /g')
+#ignore javascript tests
+ignore_js_suites=
################################################################################
# Main commands
@@ -122,8 +124,10 @@ else
endif
@rm -rf dev/lib
@dev/run -n 1 -q --with-admin-party-please \
+ --enable-erlang-views \
-c 'startup_jitter=0' \
- test/javascript/run $(suites)
+ 'test/javascript/run --suites "$(suites)" \
+ --ignore "$(ignore_js_suites)"'
.PHONY: soak-javascript
soak-javascript:
@@ -138,7 +142,8 @@ endif
while [ $$? -eq 0 ]; do \
dev/run -n 1 -q --with-admin-party-please \
-c 'startup_jitter=0' \
- test/javascript/run $(suites); \
+ 'test/javascript/run --suites "$(suites)" \
+ --ignore "$(ignore_js_suites)"' \
done
.PHONY: check-qs
@@ -349,21 +354,6 @@ uninstall:
@rm -rf $(DESTDIR)/$(html_dir)
@rm -rf $(DESTDIR)/$(man_dir)
-.PHONY: rc
-rc:
-ifeq ($(strip $(COUCH_RC)),)
- @echo "COUCH_RC environment variable not set. Run as 'COUCH_RC=X make rc'"
-else
- @rm -rf apache-couchdb-*
- @$(MAKE) dist 2>&1 > /dev/null
- @rm apache-couchdb-*.tar.gz
- @mv apache-couchdb-* apache-couchdb-2.1.0-RC$(COUCH_RC)
- @tar czf apache-couchdb-2.1.0-RC$(COUCH_RC).tar.gz apache-couchdb-2.1.0-RC$(COUCH_RC)
- @echo "Done apache-couchdb-2.1.0-RC$(COUCH_RC).tar.gz"
- @echo "Here is the list of commits since the last RC"
- @git log --left-right --graph --cherry-pick --oneline 2.1.0-RC$(shell echo $(COUCH_RC)-1 | bc)...master
- @echo "Done!"
-endif
################################################################################
# Misc
diff --git a/NOTICE b/NOTICE
index 97523cc52..5fddffb3e 100644
--- a/NOTICE
+++ b/NOTICE
@@ -6,25 +6,9 @@ The Apache Software Foundation (http://www.apache.org/).
This product also includes the following third-party components:
- * ac_check_icu.m4 (http://autoconf-archive.cryp.to/ac_check_icu.html)
-
- Copyright 2008, Akos Maroy <darkeye@tyrell.hu>
-
- * ac_check_curl.m4 (http://autoconf-archive.cryp.to/ac_check_curl.html)
-
- Copyright 2008, Akos Maroy <darkeye@tyrell.hu>
-
- * pkg.m4 (http://http://pkg-config.freedesktop.org/wiki/)
-
- Copyright 2004, Scott James Remnant <scott@netsplit.com>
-
- * jQuery (http://jquery.org/)
-
+* jQuery (http://jquery.org/)
+
Copyright 2012 jQuery Foundation and other contributors
-
- * jQuery UI (http://jqueryui.com)
-
- Copyright 2011, Paul Bakaus
* json2.js (http://www.json.org/)
@@ -172,9 +156,7 @@ This product also includes the following third-party components:
* React.js
- Copyright (c) 2013-2015, Facebook, Inc.
- NOTE: This is a Facebook "BSD+patents" licensed artifact. For more
- information, see https://issues.apache.org/jira/browse/LEGAL-303
+ Copyright (c) 2013-2017, Facebook, Inc.
* Flux.js
diff --git a/README-DEV.rst b/README-DEV.rst
index 0f12fa44c..f8d80ac41 100644
--- a/README-DEV.rst
+++ b/README-DEV.rst
@@ -89,7 +89,7 @@ Unless you want to install the optional dependencies, skip to the next section.
Install what else we can with Homebrew::
- brew install help2man gnupg md5sha1sum node
+ brew install help2man gnupg md5sha1sum node spidermonkey
If you don't already have pip installed, install it::
@@ -97,7 +97,7 @@ If you don't already have pip installed, install it::
Now, install the required Python packages::
- sudo pip install sphinx docutils pygments nose requests hypothesis
+ sudo pip install sphinx docutils pygments nose requests hypothesis sphinx_rtd_theme
FreeBSD
~~~~~~~
@@ -173,8 +173,15 @@ JavaScript tests accepts only `suites` option, but in the same way::
# Run only basic and design_options tests
make javascript suites="basic design_options"
-Note that tests are delimited here by whitespace, not by comma. You can get list
-of all possible test targets with the following command::
+ # Ignore specific test suites via command line
+ make javascript ignore_js_suites="all_docs bulk_docs"
+
+ # Ignore specific test suites in makefile
+ ignore_js_suites=all_docs,bulk_docs
+
+Note that tests on the command line are delimited here by whitespace,
+not by comma.You can get list of all possible test targets with the
+following command::
make list-js-suites
diff --git a/dev/run b/dev/run
index 5693e1273..4924de1f6 100755
--- a/dev/run
+++ b/dev/run
@@ -113,6 +113,9 @@ def setup_argparse():
dest='with_admin_party', default=False,
action='store_true',
help='Runs a dev cluster with admin party mode on')
+ parser.add_option('--enable-erlang-views',
+ action='store_true',
+ help='Enables the Erlang view server')
parser.add_option('--no-join',
dest='no_join', default=False,
action='store_true',
@@ -135,6 +138,7 @@ def setup_context(opts, args):
return {'N': opts.nodes,
'no_join': opts.no_join,
'with_admin_party': opts.with_admin_party,
+ 'enable_erlang_views': opts.enable_erlang_views,
'admin': opts.admin.split(':', 1) if opts.admin else None,
'nodes': ['node%d' % (i + opts.node_number) for i in range(opts.nodes)],
'node_number': opts.node_number,
@@ -274,6 +278,13 @@ def hack_default_ini(ctx, node, contents):
repl = toposixpath("coffeescript = %s %s" % (couchjs, coffeejs))
contents = re.sub("(?m)^coffeescript.*$", repl, contents)
+ if ctx['enable_erlang_views']:
+ contents = re.sub(
+ "^\[native_query_servers\]$",
+ "[native_query_servers]\nerlang = {couch_native_process, start_link, []}",
+ contents,
+ flags=re.MULTILINE)
+
return contents
diff --git a/rebar.config.script b/rebar.config.script
index 654fb2f12..60d2e3124 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -47,12 +47,12 @@ SubDirs = [
DepDescs = [
%% Independent Apps
{config, "config", {tag, "1.0.1"}},
-{b64url, "b64url", {tag, "1.0.0"}},
+{b64url, "b64url", {tag, "1.0.1"}},
{ets_lru, "ets-lru", {tag, "1.0.0"}},
-{khash, "khash", {tag, "1.0.0"}},
+{khash, "khash", {tag, "1.0.1"}},
{snappy, "snappy", {tag, "CouchDB-1.0.0"}},
{setup, "setup", {tag, "1.0.1"}},
-{ioq, "ioq", {tag, "1.0.0"}},
+{ioq, "ioq", {tag, "1.0.1"}},
%% Non-Erlang deps
{docs, {url, "https://github.com/apache/couchdb-documentation"},
@@ -60,11 +60,11 @@ DepDescs = [
{fauxton, {url, "https://github.com/apache/couchdb-fauxton"},
{tag, "v1.1.13"}, [raw]},
%% Third party deps
-{folsom, "folsom", {tag, "CouchDB-0.8.1"}},
+{folsom, "folsom", {tag, "CouchDB-0.8.2"}},
{ibrowse, "ibrowse", {tag, "CouchDB-4.0.1"}},
-{jiffy, "jiffy", {tag, "CouchDB-0.14.11-1"}},
+{jiffy, "jiffy", {tag, "CouchDB-0.14.11-2"}},
{mochiweb, "mochiweb", {tag, "CouchDB-2.12.0-1"}},
-{meck, "meck", {tag, "0.8.2"}}
+{meck, "meck", {tag, "0.8.8"}}
],
@@ -85,7 +85,7 @@ MakeDep = fun
end,
AddConfig = [
- {require_otp_vsn, "R16B03|R16B03-1|17|18|19"},
+ {require_otp_vsn, "R16B03|R16B03-1|17|18|19|20"},
{deps_dir, "src"},
{deps, lists:map(MakeDep, DepDescs)},
{sub_dirs, SubDirs},
diff --git a/rel/overlay/bin/remsh b/rel/overlay/bin/remsh
index b409ceaa3..963c16a10 100755
--- a/rel/overlay/bin/remsh
+++ b/rel/overlay/bin/remsh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
@@ -66,11 +66,11 @@ done
shift $((OPTIND - 1))
-if [[ ! -z "$VERBOSE" ]]; then
+if [ ! -z "$VERBOSE" ]; then
# cheap but it works
set -x
fi
exec "$BINDIR/erl" -boot "$ROOTDIR/releases/$APP_VSN/start_clean" \
- -name remsh$$@$LHOST -remsh $NAME -hidden -setcookie $COOKIE \
+ -name remsh$$@$LHOST -remsh $NODE -hidden -setcookie $COOKIE \
"$@"
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index 7cb805e94..745e5a8e4 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -88,6 +88,10 @@ enable = false
; If set to true and a user is deleted, the respective database gets
; deleted as well.
delete_dbs = false
+; Wait this many seconds after startup before attaching changes listeners
+; cluster_start_period = 5
+; Re-check cluster state at least every cluster_quiet_period seconds
+; cluster_quiet_period = 60
[httpd]
port = {{backend_port}}
@@ -177,6 +181,7 @@ iterations = 10 ; iterations for password hashing
; public_fields =
; secret =
; users_db_public = false
+; cookie_domain = example.com
; CSP (Content Security Policy) Support for _utils
[csp]
@@ -253,7 +258,6 @@ uuids={couch_uuids, start, []}
auth_cache={couch_auth_cache, start_link, []}
os_daemons={couch_os_daemons, start_link, []}
compaction_daemon={couch_compaction_daemon, start_link, []}
-couch_peruser={couch_peruser, start_link, []}
[mango]
; Set to true to disable the "index all fields" text index, which can lead
@@ -395,6 +399,15 @@ verify_ssl_certificates = false
ssl_certificate_max_depth = 3
; Maximum document ID length for replication.
;max_document_id_length = 0
+; How much time to wait before retrying after a missing doc exception. This
+; exception happens if the document was seen in the changes feed, but internal
+; replication hasn't caught up yet, and fetching document's revisions
+; fails. This a common scenario when source is updated while continous
+; replication is running. The retry period would depend on how quickly internal
+; replication is expected to catch up. In general this is an optimisation to
+; avoid crashing the whole replication job, which would consume more resources
+; and add log noise.
+;missing_doc_retry_msec = 2000
[compaction_daemon]
; The delay, in seconds, between each check for which database and view indexes
diff --git a/rel/reltool.config b/rel/reltool.config
index 762848f22..8bcf4c2ba 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -12,7 +12,7 @@
{sys, [
{lib_dirs, ["../src"]},
- {rel, "couchdb", "2.1.0", [
+ {rel, "couchdb", "2.1.1", [
%% stdlib
asn1,
compiler,
diff --git a/src/chttpd/src/chttpd_auth_cache.erl b/src/chttpd/src/chttpd_auth_cache.erl
index f3e69de63..4d85b165b 100644
--- a/src/chttpd/src/chttpd_auth_cache.erl
+++ b/src/chttpd/src/chttpd_auth_cache.erl
@@ -15,7 +15,7 @@
-export([start_link/0, get_user_creds/2, update_user_creds/3]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
+ code_change/3]).
-export([listen_for_changes/1, changes_callback/2]).
-include_lib("couch/include/couch_db.hrl").
@@ -45,7 +45,7 @@ get_user_creds(_Req, UserName) when is_binary(UserName) ->
Props;
UserProps when is_list(UserProps) ->
couch_auth_cache:add_roles(Props,
- couch_util:get_value(<<"roles">>, UserProps))
+ couch_util:get_value(<<"roles">>, UserProps))
end
end,
maybe_validate_user_creds(Resp).
@@ -164,14 +164,14 @@ changes_callback({error, _}, EndSeq) ->
load_user_from_db(UserName) ->
try fabric:open_doc(dbname(), docid(UserName), [?ADMIN_CTX, ejson_body, conflicts]) of
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- Props;
- _Else ->
- couch_log:debug("no record of user ~s", [UserName]),
- nil
+ {ok, Doc} ->
+ {Props} = couch_doc:to_json_obj(Doc, []),
+ Props;
+ _Else ->
+ couch_log:debug("no record of user ~s", [UserName]),
+ nil
catch error:database_does_not_exist ->
- nil
+ nil
end.
dbname() ->
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index c8826d581..7e467508e 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -711,7 +711,7 @@ db_doc_req(#httpd{method='GET', mochi_req=MochiReq}=Req, Db, DocId) ->
db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
couch_httpd:validate_referer(Req),
- couch_doc:validate_docid(DocId),
+ couch_doc:validate_docid(DocId, couch_db:name(Db)),
chttpd:validate_ctype(Req, "multipart/form-data"),
W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
@@ -766,9 +766,9 @@ db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
#doc_query_args{
update_type = UpdateType
} = parse_doc_query(Req),
- couch_doc:validate_docid(DocId),
-
DbName = couch_db:name(Db),
+ couch_doc:validate_docid(DocId, DbName),
+
W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
Options = [{user_ctx,Ctx}, {w,W}],
@@ -1243,7 +1243,7 @@ db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNamePa
% check for the existence of the doc to handle the 404 case.
couch_doc_open(Db, DocId, nil, [])
end,
- couch_doc:validate_docid(DocId),
+ couch_doc:validate_docid(DocId, couch_db:name(Db)),
#doc{id=DocId};
Rev ->
case fabric:open_revs(Db, DocId, [Rev], [{user_ctx,Ctx}]) of
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index cfeeb3ff7..15eabbfbd 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -228,8 +228,9 @@ handle_uuids_req(Req) ->
% Node-specific request handler (_config and _stats)
-
-
+% Support _local meaning this node
+handle_node_req(#httpd{path_parts=[A, <<"_local">>|Rest]}=Req) ->
+ handle_node_req(Req#httpd{path_parts=[A, node()] ++ Rest});
% GET /_node/$node/_config
handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>]}=Req) ->
Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
@@ -256,6 +257,7 @@ handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section]}=Req) ->
% PUT /_node/$node/_config/Section/Key
% "value"
handle_node_req(#httpd{method='PUT', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+ couch_util:check_config_blacklist(Section),
Value = chttpd:json_body(Req),
Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
OldValue = call_node(Node, config, get, [Section, Key, ""]),
@@ -271,6 +273,7 @@ handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section
end;
% DELETE /_node/$node/_config/Section/Key
handle_node_req(#httpd{method='DELETE',path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+ couch_util:check_config_blacklist(Section),
Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
case call_node(Node, config, get, [Section, Key, undefined]) of
undefined ->
diff --git a/src/chttpd/src/chttpd_view.erl b/src/chttpd/src/chttpd_view.erl
index 52c96cd3b..0aaa849a5 100644
--- a/src/chttpd/src/chttpd_view.erl
+++ b/src/chttpd/src/chttpd_view.erl
@@ -82,7 +82,7 @@ handle_view_req(Req, _Db, _DDoc) ->
handle_temp_view_req(Req, _Db) ->
Msg = <<"Temporary views are not supported in CouchDB">>,
- chttpd:send_error(Req, 403, forbidden, Msg).
+ chttpd:send_error(Req, 410, gone, Msg).
diff --git a/src/couch/include/couch_eunit.hrl b/src/couch/include/couch_eunit.hrl
index d3000ae5d..8a1527bcc 100644
--- a/src/couch/include/couch_eunit.hrl
+++ b/src/couch/include/couch_eunit.hrl
@@ -40,22 +40,21 @@
-define(tempfile,
fun() ->
- {A, B, C} = erlang:now(),
+ A = integer_to_list(couch_util:unique_monotonic_integer()),
N = node(),
- FileName = lists:flatten(io_lib:format("~p-~p.~p.~p", [N, A, B, C])),
+ FileName = lists:flatten(io_lib:format("~p-~p", [N, A])),
filename:join([?TEMPDIR, FileName])
end).
-define(tempdb,
fun() ->
- Nums = tuple_to_list(erlang:now()),
+ Nums = integer_to_list(couch_util:unique_monotonic_integer()),
Prefix = "eunit-test-db",
Suffix = lists:concat([integer_to_list(Num) || Num <- Nums]),
list_to_binary(Prefix ++ "-" ++ Suffix)
end).
-define(docid,
fun() ->
- {A, B, C} = erlang:now(),
- lists:flatten(io_lib:format("~p~p~p", [A, B, C]))
+ integer_to_list(couch_util:unique_monotonic_integer())
end).
%% Like assertEqual, but using == instead of =:=
diff --git a/src/couch/priv/couch_js/help.h b/src/couch/priv/couch_js/help.h
index e6afaa830..c6d76b257 100644
--- a/src/couch/priv/couch_js/help.h
+++ b/src/couch/priv/couch_js/help.h
@@ -54,7 +54,7 @@ static const char USAGE_TEMPLATE[] =
" most SIZE bytes of memory to be allocated\n"
" -u FILE path to a .uri file containing the address\n"
" (or addresses) of one or more servers\n"
- " --no-eval Disable runtime code evaluation\n"
+ " --eval Enable runtime code evaluation (dangerous!)\n"
"\n"
"Report bugs at <%s>.\n";
diff --git a/src/couch/priv/couch_js/main.c b/src/couch/priv/couch_js/main.c
index 20096ae27..986791c90 100644
--- a/src/couch/priv/couch_js/main.c
+++ b/src/couch/priv/couch_js/main.c
@@ -353,10 +353,10 @@ static JSBool
csp_allows(JSContext* cx)
{
couch_args *args = (couch_args*)JS_GetContextPrivate(cx);
- if(args->no_eval) {
- return JS_FALSE;
- } else {
+ if(args->eval) {
return JS_TRUE;
+ } else {
+ return JS_FALSE;
}
}
diff --git a/src/couch/priv/couch_js/util.c b/src/couch/priv/couch_js/util.c
index 7919025d3..cf676ea33 100644
--- a/src/couch/priv/couch_js/util.c
+++ b/src/couch/priv/couch_js/util.c
@@ -98,8 +98,8 @@ couch_parse_args(int argc, const char* argv[])
}
} else if(strcmp("-u", argv[i]) == 0) {
args->uri_file = argv[++i];
- } else if(strcmp("--no-eval", argv[i]) == 0) {
- args->no_eval = 1;
+ } else if(strcmp("--eval", argv[i]) == 0) {
+ args->eval = 1;
} else if(strcmp("--", argv[i]) == 0) {
i++;
break;
diff --git a/src/couch/priv/couch_js/util.h b/src/couch/priv/couch_js/util.h
index 062469d66..b24d7f76f 100644
--- a/src/couch/priv/couch_js/util.h
+++ b/src/couch/priv/couch_js/util.h
@@ -16,7 +16,7 @@
#include <jsapi.h>
typedef struct {
- int no_eval;
+ int eval;
int use_http;
int use_test_funs;
int stack_size;
diff --git a/src/couch/rebar.config.script b/src/couch/rebar.config.script
index 5586032d9..498ce3a82 100644
--- a/src/couch/rebar.config.script
+++ b/src/couch/rebar.config.script
@@ -131,15 +131,20 @@ PortSpecs = case os:type() of
os:cmd("chmod +x priv/couchspawnkillable"),
BaseSpecs
end,
-
+PlatformDefines = [
+ {platform_define, "^R16", 'PRE18TIMEFEATURES'},
+ {platform_define, "^17", 'PRE18TIMEFEATURES'},
+ {platform_define, "^R16", 'NORANDMODULE'},
+ {platform_define, "^17", 'NORANDMODULE'},
+ {platform_define, "win32", 'WINDOWS'}
+],
AddConfig = [
{port_specs, PortSpecs},
- {erl_opts, [
- {platform_define, "win32", 'WINDOWS'},
+ {erl_opts, PlatformDefines ++ [
{d, 'COUCHDB_VERSION', Version},
{i, "../"}
]},
- {eunit_compile_opts, [{platform_define, "win32", 'WINDOWS'}]}
+ {eunit_compile_opts, PlatformDefines}
].
lists:foldl(fun({K, V}, CfgAcc) ->
diff --git a/src/couch/src/couch_auth_cache.erl b/src/couch/src/couch_auth_cache.erl
index 16c59d19a..45b34e1bd 100644
--- a/src/couch/src/couch_auth_cache.erl
+++ b/src/couch/src/couch_auth_cache.erl
@@ -203,7 +203,8 @@ handle_call({fetch, UserName}, _From, State) ->
[] ->
couch_stats:increment_counter([couchdb, auth_cache_misses]),
Creds = get_user_props_from_db(UserName),
- State1 = add_cache_entry(UserName, Creds, erlang:now(), State),
+ ATime = couch_util:unique_monotonic_integer(),
+ State1 = add_cache_entry(UserName, Creds, ATime, State),
{Creds, State1}
end,
{reply, Credentials, NewState};
@@ -311,7 +312,7 @@ free_mru_cache_entry() ->
cache_hit(UserName, Credentials, ATime) ->
- NewATime = erlang:now(),
+ NewATime = couch_util:unique_monotonic_integer(),
true = ets:delete(?BY_ATIME, ATime),
true = ets:insert(?BY_ATIME, {NewATime, UserName}),
true = ets:insert(?BY_USER, {UserName, {Credentials, NewATime}}).
diff --git a/src/couch/src/couch_debug.erl b/src/couch/src/couch_debug.erl
index 858a4fb10..96c7a505f 100644
--- a/src/couch/src/couch_debug.erl
+++ b/src/couch/src/couch_debug.erl
@@ -508,7 +508,7 @@ random_processes(Acc, Depth) ->
end.
oneof(Options) ->
- lists:nth(random:uniform(length(Options)), Options).
+ lists:nth(couch_rand:uniform(length(Options)), Options).
tree() ->
diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl
index eb96d44bb..f960ec5c2 100644
--- a/src/couch/src/couch_doc.erl
+++ b/src/couch/src/couch_doc.erl
@@ -13,8 +13,10 @@
-module(couch_doc).
-export([to_doc_info/1,to_doc_info_path/1,parse_rev/1,parse_revs/1,rev_to_str/1,revs_to_strs/1]).
--export([from_json_obj/1, from_json_obj_validate/1, to_json_obj/2,has_stubs/1, merge_stubs/2]).
--export([validate_docid/1, get_validate_doc_fun/1]).
+-export([from_json_obj/1, from_json_obj_validate/1]).
+-export([from_json_obj/2, from_json_obj_validate/2]).
+-export([to_json_obj/2, has_stubs/1, merge_stubs/2]).
+-export([validate_docid/1, validate_docid/2, get_validate_doc_fun/1]).
-export([doc_from_multi_part_stream/2, doc_from_multi_part_stream/3]).
-export([doc_from_multi_part_stream/4]).
-export([doc_to_multi_part_stream/5, len_doc_to_multi_part_stream/4]).
@@ -126,8 +128,11 @@ doc_to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds},
}.
from_json_obj_validate(EJson) ->
+ from_json_obj_validate(EJson, undefined).
+
+from_json_obj_validate(EJson, DbName) ->
MaxSize = config:get_integer("couchdb", "max_document_size", 4294967296),
- Doc = from_json_obj(EJson),
+ Doc = from_json_obj(EJson, DbName),
case couch_ejson_size:encoded_size(Doc#doc.body) =< MaxSize of
true ->
validate_attachment_sizes(Doc#doc.atts),
@@ -149,9 +154,11 @@ validate_attachment_sizes(Atts) ->
from_json_obj({Props}) ->
- transfer_fields(Props, #doc{body=[]});
+ from_json_obj({Props}, undefined).
-from_json_obj(_Other) ->
+from_json_obj({Props}, DbName) ->
+ transfer_fields(Props, #doc{body=[]}, DbName);
+from_json_obj(_Other, _) ->
throw({bad_request, "Document must be a JSON object"}).
parse_revid(RevId) when size(RevId) =:= 32 ->
@@ -191,6 +198,15 @@ parse_revs(_) ->
throw({bad_request, "Invalid list of revisions"}).
+validate_docid(DocId, DbName) ->
+ case DbName =:= ?l2b(config:get("mem3", "shards_db", "_dbs")) andalso
+ lists:member(DocId, ?SYSTEM_DATABASES) of
+ true ->
+ ok;
+ false ->
+ validate_docid(DocId)
+ end.
+
validate_docid(<<"">>) ->
throw({illegal_docid, <<"Document id must not be empty">>});
validate_docid(<<"_design/">>) ->
@@ -228,28 +244,28 @@ validate_docid(Id) ->
couch_log:debug("Document id is not a string: ~p", [Id]),
throw({illegal_docid, <<"Document id must be a string">>}).
-transfer_fields([], #doc{body=Fields}=Doc) ->
+transfer_fields([], #doc{body=Fields}=Doc, _) ->
% convert fields back to json object
Doc#doc{body={lists:reverse(Fields)}};
-transfer_fields([{<<"_id">>, Id} | Rest], Doc) ->
- validate_docid(Id),
- transfer_fields(Rest, Doc#doc{id=Id});
+transfer_fields([{<<"_id">>, Id} | Rest], Doc, DbName) ->
+ validate_docid(Id, DbName),
+ transfer_fields(Rest, Doc#doc{id=Id}, DbName);
-transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc) ->
+transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc, DbName) ->
{Pos, RevId} = parse_rev(Rev),
transfer_fields(Rest,
- Doc#doc{revs={Pos, [RevId]}});
+ Doc#doc{revs={Pos, [RevId]}}, DbName);
-transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc) ->
+transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc, DbName) ->
% we already got the rev from the _revisions
- transfer_fields(Rest,Doc);
+ transfer_fields(Rest, Doc, DbName);
-transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc) ->
+transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc, DbName) ->
Atts = [couch_att:from_json(Name, Props) || {Name, {Props}} <- JsonBins],
- transfer_fields(Rest, Doc#doc{atts=Atts});
+ transfer_fields(Rest, Doc#doc{atts=Atts}, DbName);
-transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc) ->
+transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc, DbName) ->
RevIds = couch_util:get_value(<<"ids">>, Props),
Start = couch_util:get_value(<<"start">>, Props),
if not is_integer(Start) ->
@@ -262,45 +278,45 @@ transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc) ->
[throw({doc_validation, "RevId isn't a string"}) ||
RevId <- RevIds, not is_binary(RevId)],
RevIds2 = [parse_revid(RevId) || RevId <- RevIds],
- transfer_fields(Rest, Doc#doc{revs={Start, RevIds2}});
+ transfer_fields(Rest, Doc#doc{revs={Start, RevIds2}}, DbName);
-transfer_fields([{<<"_deleted">>, B} | Rest], Doc) when is_boolean(B) ->
- transfer_fields(Rest, Doc#doc{deleted=B});
+transfer_fields([{<<"_deleted">>, B} | Rest], Doc, DbName) when is_boolean(B) ->
+ transfer_fields(Rest, Doc#doc{deleted=B}, DbName);
% ignored fields
-transfer_fields([{<<"_revs_info">>, _} | Rest], Doc) ->
- transfer_fields(Rest, Doc);
-transfer_fields([{<<"_local_seq">>, _} | Rest], Doc) ->
- transfer_fields(Rest, Doc);
-transfer_fields([{<<"_conflicts">>, _} | Rest], Doc) ->
- transfer_fields(Rest, Doc);
-transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc) ->
- transfer_fields(Rest, Doc);
+transfer_fields([{<<"_revs_info">>, _} | Rest], Doc, DbName) ->
+ transfer_fields(Rest, Doc, DbName);
+transfer_fields([{<<"_local_seq">>, _} | Rest], Doc, DbName) ->
+ transfer_fields(Rest, Doc, DbName);
+transfer_fields([{<<"_conflicts">>, _} | Rest], Doc, DbName) ->
+ transfer_fields(Rest, Doc, DbName);
+transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc, DbName) ->
+ transfer_fields(Rest, Doc, DbName);
% special fields for replication documents
transfer_fields([{<<"_replication_state">>, _} = Field | Rest],
- #doc{body=Fields} = Doc) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+ #doc{body=Fields} = Doc, DbName) ->
+ transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
transfer_fields([{<<"_replication_state_time">>, _} = Field | Rest],
- #doc{body=Fields} = Doc) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+ #doc{body=Fields} = Doc, DbName) ->
+ transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
transfer_fields([{<<"_replication_state_reason">>, _} = Field | Rest],
- #doc{body=Fields} = Doc) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+ #doc{body=Fields} = Doc, DbName) ->
+ transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
transfer_fields([{<<"_replication_id">>, _} = Field | Rest],
- #doc{body=Fields} = Doc) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+ #doc{body=Fields} = Doc, DbName) ->
+ transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
transfer_fields([{<<"_replication_stats">>, _} = Field | Rest],
- #doc{body=Fields} = Doc) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]});
+ #doc{body=Fields} = Doc, DbName) ->
+ transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
% unknown special field
-transfer_fields([{<<"_",Name/binary>>, _} | _], _) ->
+transfer_fields([{<<"_",Name/binary>>, _} | _], _, _) ->
throw({doc_validation,
?l2b(io_lib:format("Bad special document member: _~s", [Name]))});
-transfer_fields([Field | Rest], #doc{body=Fields}=Doc) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]}).
+transfer_fields([Field | Rest], #doc{body=Fields}=Doc, DbName) ->
+ transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName).
to_doc_info(FullDocInfo) ->
{DocInfo, _Path} = to_doc_info_path(FullDocInfo),
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index b3bbd5baa..1694ac87f 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -536,7 +536,7 @@ host_for_request(#httpd{mochi_req=MochiReq}) ->
Value -> Value
end.
-absolute_uri(#httpd{mochi_req=MochiReq}=Req, Path) ->
+absolute_uri(#httpd{mochi_req=MochiReq}=Req, [$/ | _] = Path) ->
Host = host_for_request(Req),
XSsl = config:get("httpd", "x_forwarded_ssl", "X-Forwarded-Ssl"),
Scheme = case MochiReq:get_header_value(XSsl) of
@@ -552,7 +552,9 @@ absolute_uri(#httpd{mochi_req=MochiReq}=Req, Path) ->
end
end
end,
- Scheme ++ "://" ++ Host ++ Path.
+ Scheme ++ "://" ++ Host ++ Path;
+absolute_uri(_Req, _Path) ->
+ throw({bad_request, "path must begin with a /."}).
unquote(UrlEncodedString) ->
mochiweb_util:unquote(UrlEncodedString).
diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl
index 51a83e7e4..6ac7b75af 100644
--- a/src/couch/src/couch_httpd_auth.erl
+++ b/src/couch/src/couch_httpd_auth.erl
@@ -265,7 +265,7 @@ cookie_auth_cookie(Req, User, Secret, TimeStamp) ->
Hash = crypto:hmac(sha, Secret, SessionData),
mochiweb_cookies:cookie("AuthSession",
couch_util:encodeBase64Url(SessionData ++ ":" ++ ?b2l(Hash)),
- [{path, "/"}] ++ cookie_scheme(Req) ++ max_age()).
+ [{path, "/"}] ++ cookie_scheme(Req) ++ max_age() ++ cookie_domain()).
ensure_cookie_auth_secret() ->
case config:get("couch_httpd_auth", "secret", undefined) of
@@ -442,6 +442,13 @@ max_age() ->
[{max_age, Timeout}]
end.
+cookie_domain() ->
+ Domain = config:get("couch_httpd_auth", "cookie_domain", ""),
+ case Domain of
+ "" -> [];
+ _ -> [{domain, Domain}]
+ end.
+
reject_if_totp(User) ->
case get_totp_config(User) of
undefined ->
diff --git a/src/couch/src/couch_httpd_db.erl b/src/couch/src/couch_httpd_db.erl
index 34a1539aa..05e63ba97 100644
--- a/src/couch/src/couch_httpd_db.erl
+++ b/src/couch/src/couch_httpd_db.erl
@@ -257,7 +257,8 @@ db_req(#httpd{method='GET',path_parts=[_DbName]}=Req, Db) ->
db_req(#httpd{method='POST',path_parts=[_DbName]}=Req, Db) ->
couch_httpd:validate_ctype(Req, "application/json"),
- Doc = couch_doc:from_json_obj_validate(couch_httpd:json_body(Req)),
+ DbName = couch_db:name(Db),
+ Doc = couch_doc:from_json_obj_validate(couch_httpd:json_body(Req), DbName),
validate_attachment_names(Doc),
Doc2 = case Doc#doc.id of
<<"">> ->
@@ -303,6 +304,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
couch_stats:increment_counter([couchdb, httpd, bulk_requests]),
couch_httpd:validate_ctype(Req, "application/json"),
{JsonProps} = couch_httpd:json_body_obj(Req),
+ DbName = couch_db:name(Db),
case couch_util:get_value(<<"docs">>, JsonProps) of
undefined ->
send_error(Req, 400, <<"bad_request">>, <<"Missing JSON list of 'docs'">>);
@@ -320,7 +322,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
true ->
Docs = lists:map(
fun({ObjProps} = JsonObj) ->
- Doc = couch_doc:from_json_obj_validate(JsonObj),
+ Doc = couch_doc:from_json_obj_validate(JsonObj, DbName),
validate_attachment_names(Doc),
Id = case Doc#doc.id of
<<>> -> couch_uuids:new();
@@ -354,7 +356,7 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
end;
false ->
Docs = lists:map(fun(JsonObj) ->
- Doc = couch_doc:from_json_obj_validate(JsonObj),
+ Doc = couch_doc:from_json_obj_validate(JsonObj, DbName),
validate_attachment_names(Doc),
Doc
end, DocsArray),
@@ -486,14 +488,17 @@ db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
% check for the existence of the doc to handle the 404 case.
couch_doc_open(Db, DocId, nil, []),
+ DbName = couch_db:name(Db),
case couch_httpd:qs_value(Req, "rev") of
undefined ->
update_doc(Req, Db, DocId,
- couch_doc_from_req(Req, DocId, {[{<<"_deleted">>,true}]}));
+ couch_doc_from_req(Req, DocId, {[{<<"_deleted">>,true}]},
+ DbName));
Rev ->
update_doc(Req, Db, DocId,
couch_doc_from_req(Req, DocId,
- {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]}))
+ {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]},
+ DbName))
end;
db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
@@ -546,7 +551,8 @@ db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
db_doc_req(#httpd{method='POST'}=Req, Db, DocId) ->
couch_httpd:validate_referer(Req),
- couch_doc:validate_docid(DocId),
+ DbName = couch_db:name(Db),
+ couch_doc:validate_docid(DocId, DbName),
couch_httpd:validate_ctype(Req, "multipart/form-data"),
Form = couch_httpd:parse_form(Req),
case couch_util:get_value("_doc", Form) of
@@ -554,7 +560,7 @@ db_doc_req(#httpd{method='POST'}=Req, Db, DocId) ->
Rev = couch_doc:parse_rev(couch_util:get_value("_rev", Form)),
{ok, [{ok, Doc}]} = couch_db:open_doc_revs(Db, DocId, [Rev], []);
Json ->
- Doc = couch_doc_from_req(Req, DocId, ?JSON_DECODE(Json))
+ Doc = couch_doc_from_req(Req, DocId, ?JSON_DECODE(Json), DbName)
end,
UpdatedAtts = [
couch_att:new([
@@ -580,14 +586,15 @@ db_doc_req(#httpd{method='POST'}=Req, Db, DocId) ->
update_doc(Req, Db, DocId, NewDoc);
db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) ->
- couch_doc:validate_docid(DocId),
+ DbName = couch_db:name(Db),
+ couch_doc:validate_docid(DocId, DbName),
case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
("multipart/related;" ++ _) = ContentType ->
couch_httpd:check_max_request_length(Req),
{ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(
ContentType, fun() -> receive_request_data(Req) end),
- Doc = couch_doc_from_req(Req, DocId, Doc0),
+ Doc = couch_doc_from_req(Req, DocId, Doc0, DbName),
try
Result = update_doc(Req, Db, DocId, Doc),
WaitFun(),
@@ -599,7 +606,7 @@ db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) ->
end;
_Else ->
Body = couch_httpd:json_body(Req),
- Doc = couch_doc_from_req(Req, DocId, Body),
+ Doc = couch_doc_from_req(Req, DocId, Body, DbName),
update_doc(Req, Db, DocId, Doc)
end;
@@ -783,7 +790,7 @@ update_doc(Req, Db, DocId, #doc{deleted=Deleted}=Doc, Headers, UpdateType) ->
{rev, NewRevStr}]})
end.
-couch_doc_from_req(Req, DocId, #doc{revs=Revs}=Doc) ->
+couch_doc_from_req(Req, DocId, #doc{revs=Revs}=Doc, _) ->
validate_attachment_names(Doc),
Rev = case couch_httpd:qs_value(Req, "rev") of
undefined ->
@@ -810,8 +817,9 @@ couch_doc_from_req(Req, DocId, #doc{revs=Revs}=Doc) ->
end
end,
Doc#doc{id=DocId, revs=Revs2};
-couch_doc_from_req(Req, DocId, Json) ->
- couch_doc_from_req(Req, DocId, couch_doc:from_json_obj_validate(Json)).
+couch_doc_from_req(Req, DocId, Json, DbName) ->
+ couch_doc_from_req(Req, DocId,
+ couch_doc:from_json_obj_validate(Json, DbName), DbName).
% Useful for debugging
% couch_doc_open(Db, DocId) ->
@@ -1019,7 +1027,7 @@ db_attachment_req(#httpd{method=Method,mochi_req=MochiReq}=Req, Db, DocId, FileN
% check for the existence of the doc to handle the 404 case.
couch_doc_open(Db, DocId, nil, [])
end,
- couch_doc:validate_docid(DocId),
+ couch_doc:validate_docid(DocId, couch_db:name(Db)),
#doc{id=DocId};
Rev ->
case couch_db:open_doc_revs(Db, DocId, [Rev], []) of
diff --git a/src/couch/src/couch_httpd_misc_handlers.erl b/src/couch/src/couch_httpd_misc_handlers.erl
index eb75a9461..1def94853 100644
--- a/src/couch/src/couch_httpd_misc_handlers.erl
+++ b/src/couch/src/couch_httpd_misc_handlers.erl
@@ -199,6 +199,7 @@ handle_config_req(#httpd{method='POST', path_parts=[_, <<"_reload">>]}=Req) ->
handle_config_req(#httpd{method=Method, path_parts=[_, Section, Key]}=Req)
when (Method == 'PUT') or (Method == 'DELETE') ->
ok = couch_httpd:verify_is_server_admin(Req),
+ couch_util:check_config_blacklist(Section),
Persist = couch_httpd:header_value(Req, "X-Couch-Persist") /= "false",
case config:get("httpd", "config_whitelist", undefined) of
undefined ->
diff --git a/src/couch/src/couch_lru.erl b/src/couch/src/couch_lru.erl
index 023515e7c..6ad7c65cd 100644
--- a/src/couch/src/couch_lru.erl
+++ b/src/couch/src/couch_lru.erl
@@ -19,13 +19,13 @@ new() ->
{gb_trees:empty(), dict:new()}.
insert(DbName, {Tree0, Dict0}) ->
- Lru = erlang:now(),
+ Lru = couch_util:unique_monotonic_integer(),
{gb_trees:insert(Lru, DbName, Tree0), dict:store(DbName, Lru, Dict0)}.
update(DbName, {Tree0, Dict0}) ->
case dict:find(DbName, Dict0) of
{ok, Old} ->
- New = erlang:now(),
+ New = couch_util:unique_monotonic_integer(),
Tree = gb_trees:insert(New, DbName, gb_trees:delete(Old, Tree0)),
Dict = dict:store(DbName, New, Dict0),
{Tree, Dict};
diff --git a/src/couch/src/couch_multidb_changes.erl b/src/couch/src/couch_multidb_changes.erl
index 5efcccaac..b6a7873fb 100644
--- a/src/couch/src/couch_multidb_changes.erl
+++ b/src/couch/src/couch_multidb_changes.erl
@@ -302,7 +302,7 @@ notify_fold(DbName, {Server, DbSuffix, Count}) ->
% number of shards back to back during startup.
jitter(N) ->
Range = min(2 * N * ?AVG_DELAY_MSEC, ?MAX_DELAY_MSEC),
- random:uniform(Range).
+ couch_rand:uniform(Range).
scan_local_db(Server, DbSuffix) when is_pid(Server) ->
diff --git a/src/couch/src/couch_query_servers.erl b/src/couch/src/couch_query_servers.erl
index 63b0e3900..4928eea32 100644
--- a/src/couch/src/couch_query_servers.erl
+++ b/src/couch/src/couch_query_servers.erl
@@ -125,20 +125,39 @@ os_reduce(Lang, OsRedSrcs, KVs) ->
Proc = get_os_process(Lang),
OsResults = try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of
[true, Reductions] -> Reductions
+ catch
+ throw:{reduce_overflow_error, Msg} ->
+ [{[{reduce_overflow_error, Msg}]} || _ <- OsRedSrcs]
after
ok = ret_os_process(Proc)
end,
{ok, OsResults}.
os_rereduce(Lang, OsRedSrcs, KVs) ->
- Proc = get_os_process(Lang),
- try proc_prompt(Proc, [<<"rereduce">>, OsRedSrcs, KVs]) of
- [true, [Reduction]] -> Reduction
- after
- ok = ret_os_process(Proc)
+ case get_overflow_error(KVs) of
+ undefined ->
+ Proc = get_os_process(Lang),
+ try proc_prompt(Proc, [<<"rereduce">>, OsRedSrcs, KVs]) of
+ [true, [Reduction]] -> Reduction
+ catch
+ throw:{reduce_overflow_error, Msg} ->
+ {[{reduce_overflow_error, Msg}]}
+ after
+ ok = ret_os_process(Proc)
+ end;
+ Error ->
+ Error
end.
+get_overflow_error([]) ->
+ undefined;
+get_overflow_error([{[{reduce_overflow_error, _}]} = Error | _]) ->
+ Error;
+get_overflow_error([_ | Rest]) ->
+ get_overflow_error(Rest).
+
+
builtin_reduce(_Re, [], _KVs, Acc) ->
{ok, lists:reverse(Acc)};
builtin_reduce(Re, [<<"_sum",_/binary>>|BuiltinReds], KVs, Acc) ->
diff --git a/src/couch/src/couch_rand.erl b/src/couch/src/couch_rand.erl
new file mode 100644
index 000000000..f5a8fc6af
--- /dev/null
+++ b/src/couch/src/couch_rand.erl
@@ -0,0 +1,57 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_rand).
+
+
+-export([
+ uniform/0,
+ uniform/1
+]).
+
+
+-ifdef(NORANDMODULE).
+
+
+uniform() ->
+ maybe_set_random_seed(),
+ random:uniform().
+
+
+uniform(N) ->
+ maybe_set_random_seed(),
+ random:uniform(N).
+
+
+maybe_set_random_seed() ->
+ case get(random_seed) of
+ undefined ->
+ {_, Sec, USec} = os:timestamp(),
+ Seed = {erlang:phash2(self()), Sec, USec},
+ random:seed(Seed);
+ _ ->
+ ok
+ end.
+
+
+-else.
+
+
+uniform() ->
+ rand:uniform().
+
+
+uniform(N) ->
+ rand:uniform(N).
+
+
+-endif.
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index 42d10ec1e..b8a0b623b 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -34,12 +34,26 @@
-export([callback_exists/3, validate_callback_exists/3]).
-export([with_proc/4]).
-export([process_dict_get/2, process_dict_get/3]).
+-export([unique_monotonic_integer/0]).
+-export([check_config_blacklist/1]).
-include_lib("couch/include/couch_db.hrl").
% arbitrarily chosen amount of memory to use before flushing to disk
-define(FLUSH_MAX_MEM, 10000000).
+-define(BLACKLIST_CONFIG_SECTIONS, [
+ <<"daemons">>,
+ <<"external">>,
+ <<"httpd_design_handlers">>,
+ <<"httpd_db_handlers">>,
+ <<"httpd_global_handlers">>,
+ <<"native_query_servers">>,
+ <<"os_daemons">>,
+ <<"query_servers">>
+]).
+
+
priv_dir() ->
case code:priv_dir(couch) of
{error, bad_name} ->
@@ -209,7 +223,8 @@ json_user_ctx(Db) ->
% returns a random integer
rand32() ->
- crypto:rand_uniform(0, 16#100000000).
+ <<I:32>> = crypto:strong_rand_bytes(4),
+ I.
% given a pathname "../foo/bar/" it gives back the fully qualified
% absolute pathname.
@@ -411,7 +426,7 @@ json_encode(V) ->
json_decode(V) ->
try
- jiffy:decode(V)
+ jiffy:decode(V, [dedupe_keys])
catch
throw:Error ->
throw({invalid_json, Error})
@@ -624,3 +639,26 @@ process_dict_get(Pid, Key, DefaultValue) ->
undefined ->
DefaultValue
end.
+
+
+-ifdef(PRE18TIMEFEATURES).
+
+unique_monotonic_integer() ->
+ {Ms, S, Us} = erlang:now(),
+ (Ms * 1000000 + S) * 1000000 + Us.
+
+-else.
+
+unique_monotonic_integer() ->
+ erlang:unique_integer([monotonic, positive]).
+
+-endif.
+
+check_config_blacklist(Section) ->
+ case lists:member(Section, ?BLACKLIST_CONFIG_SECTIONS) of
+ true ->
+ Msg = <<"Config section blacklisted for modification over HTTP API.">>,
+ throw({forbidden, Msg});
+ _ ->
+ ok
+ end.
diff --git a/src/couch/src/couch_uuids.erl b/src/couch/src/couch_uuids.erl
index ebe145c17..5c7359b33 100644
--- a/src/couch/src/couch_uuids.erl
+++ b/src/couch/src/couch_uuids.erl
@@ -17,7 +17,7 @@
-behaviour(config_listener).
-export([start/0, stop/0]).
--export([new/0, random/0, utc_random/0]).
+-export([new/0, random/0]).
-export([init/1, terminate/2, code_change/3]).
-export([handle_call/3, handle_cast/2, handle_info/2]).
@@ -39,17 +39,6 @@ new() ->
random() ->
list_to_binary(couch_util:to_hex(crypto:strong_rand_bytes(16))).
-utc_random() ->
- utc_suffix(couch_util:to_hex(crypto:strong_rand_bytes(9))).
-
-utc_suffix(Suffix) ->
- Now = {_, _, Micro} = erlang:now(), % uniqueness is used.
- Nowish = calendar:now_to_universal_time(Now),
- Nowsecs = calendar:datetime_to_gregorian_seconds(Nowish),
- Then = calendar:datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}}),
- Prefix = io_lib:format("~14.16.0b", [(Nowsecs - Then) * 1000000 + Micro]),
- list_to_binary(Prefix ++ Suffix).
-
init([]) ->
ok = config:listen_for_changes(?MODULE, nil),
{ok, state()}.
@@ -59,10 +48,13 @@ terminate(_Reason, _State) ->
handle_call(create, _From, random) ->
{reply, random(), random};
-handle_call(create, _From, utc_random) ->
- {reply, utc_random(), utc_random};
-handle_call(create, _From, {utc_id, UtcIdSuffix}) ->
- {reply, utc_suffix(UtcIdSuffix), {utc_id, UtcIdSuffix}};
+handle_call(create, _From, {utc_random, ClockSeq}) ->
+ {UtcRandom, NewClockSeq} = utc_random(ClockSeq),
+ {reply, UtcRandom, {utc_random, NewClockSeq}};
+handle_call(create, _From, {utc_id, UtcIdSuffix, ClockSeq}) ->
+ Now = os:timestamp(),
+ {UtcId, NewClockSeq} = utc_suffix(UtcIdSuffix, ClockSeq, Now),
+ {reply, UtcId, {utc_id, UtcIdSuffix, NewClockSeq}};
handle_call(create, _From, {sequential, Pref, Seq}) ->
Result = ?l2b(Pref ++ io_lib:format("~6.16.0b", [Seq])),
case Seq >= 16#fff000 of
@@ -111,12 +103,89 @@ state() ->
random ->
random;
utc_random ->
- utc_random;
+ ClockSeq = micros_since_epoch(os:timestamp()),
+ {utc_random, ClockSeq};
utc_id ->
+ ClockSeq = micros_since_epoch(os:timestamp()),
UtcIdSuffix = config:get("uuids", "utc_id_suffix", ""),
- {utc_id, UtcIdSuffix};
+ {utc_id, UtcIdSuffix, ClockSeq};
sequential ->
{sequential, new_prefix(), inc()};
Unknown ->
throw({unknown_uuid_algorithm, Unknown})
end.
+
+micros_since_epoch({_, _, Micro} = Now) ->
+ Nowish = calendar:now_to_universal_time(Now),
+ Nowsecs = calendar:datetime_to_gregorian_seconds(Nowish),
+ Then = calendar:datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}}),
+ (Nowsecs - Then) * 1000000 + Micro.
+
+utc_random(ClockSeq) ->
+ Suffix = couch_util:to_hex(crypto:strong_rand_bytes(9)),
+ utc_suffix(Suffix, ClockSeq, os:timestamp()).
+
+utc_suffix(Suffix, ClockSeq, Now) ->
+ OsMicros = micros_since_epoch(Now),
+ NewClockSeq = if
+ OsMicros =< ClockSeq ->
+ % Timestamp is lagging, use ClockSeq as Timestamp
+ ClockSeq + 1;
+ OsMicros > ClockSeq ->
+ % Timestamp advanced, use it, and reset ClockSeq with it
+ OsMicros
+ end,
+ Prefix = io_lib:format("~14.16.0b", [NewClockSeq]),
+ {list_to_binary(Prefix ++ Suffix), NewClockSeq}.
+
+
+-ifdef(TEST).
+
+-include_lib("eunit/include/eunit.hrl").
+
+
+utc_id_time_does_not_advance_test() ->
+ % Timestamp didn't advance but local clock sequence should and new UUIds
+ % should be generated
+ Now = {0, 1, 2},
+ ClockSeq0 = micros_since_epoch({3, 4, 5}),
+ {UtcId0, ClockSeq1} = utc_suffix("", ClockSeq0, Now),
+ ?assert(is_binary(UtcId0)),
+ ?assertEqual(ClockSeq0 + 1, ClockSeq1),
+ {UtcId1, ClockSeq2} = utc_suffix("", ClockSeq1, Now),
+ ?assertNotEqual(UtcId0, UtcId1),
+ ?assertEqual(ClockSeq1 + 1, ClockSeq2).
+
+
+utc_id_time_advanced_test() ->
+ % Timestamp advanced, a new UUID generated and also the last clock sequence
+ % is updated to that timestamp.
+ Now0 = {0, 1, 2},
+ ClockSeq0 = micros_since_epoch({3, 4, 5}),
+ {UtcId0, ClockSeq1} = utc_suffix("", ClockSeq0, Now0),
+ ?assert(is_binary(UtcId0)),
+ ?assertEqual(ClockSeq0 + 1, ClockSeq1),
+ Now1 = {9, 9, 9},
+ {UtcId1, ClockSeq2} = utc_suffix("", ClockSeq1, Now1),
+ ?assert(is_binary(UtcId1)),
+ ?assertNotEqual(UtcId0, UtcId1),
+ ?assertEqual(micros_since_epoch(Now1), ClockSeq2).
+
+utc_random_test_time_does_not_advance_test() ->
+ {MSec, Sec, USec} = os:timestamp(),
+ Future = {MSec + 10, Sec, USec},
+ ClockSeqFuture = micros_since_epoch(Future),
+ {UtcRandom, NextClockSeq} = utc_random(ClockSeqFuture),
+ ?assert(is_binary(UtcRandom)),
+ ?assertEqual(32, byte_size(UtcRandom)),
+ ?assertEqual(ClockSeqFuture + 1, NextClockSeq).
+
+utc_random_test_time_advance_test() ->
+ ClockSeqPast = micros_since_epoch({1, 1, 1}),
+ {UtcRandom, NextClockSeq} = utc_random(ClockSeqPast),
+ ?assert(is_binary(UtcRandom)),
+ ?assertEqual(32, byte_size(UtcRandom)),
+ ?assert(NextClockSeq > micros_since_epoch({1000, 0, 0})).
+
+
+-endif.
diff --git a/src/couch/src/test_util.erl b/src/couch/src/test_util.erl
index 8a05e8830..e0a53a6f7 100644
--- a/src/couch/src/test_util.erl
+++ b/src/couch/src/test_util.erl
@@ -245,7 +245,7 @@ fake_db(Fields) ->
end, #db{}, Fields).
now_us() ->
- {MegaSecs, Secs, MicroSecs} = now(),
+ {MegaSecs, Secs, MicroSecs} = os:timestamp(),
(MegaSecs * 1000000 + Secs) * 1000000 + MicroSecs.
mock(Modules) when is_list(Modules) ->
diff --git a/src/couch/test/couch_btree_tests.erl b/src/couch/test/couch_btree_tests.erl
index 35cf41604..3c8840a60 100644
--- a/src/couch/test/couch_btree_tests.erl
+++ b/src/couch/test/couch_btree_tests.erl
@@ -82,7 +82,7 @@ btree_open_test_() ->
sorted_kvs_test_() ->
Funs = kvs_test_funs(),
- Sorted = [{Seq, random:uniform()} || Seq <- lists:seq(1, ?ROWS)],
+ Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)],
{
"BTree with sorted keys",
{
@@ -97,7 +97,7 @@ sorted_kvs_test_() ->
}.
rsorted_kvs_test_() ->
- Sorted = [{Seq, random:uniform()} || Seq <- lists:seq(1, ?ROWS)],
+ Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)],
Funs = kvs_test_funs(),
Reversed = Sorted,
{
@@ -115,7 +115,7 @@ rsorted_kvs_test_() ->
shuffled_kvs_test_() ->
Funs = kvs_test_funs(),
- Sorted = [{Seq, random:uniform()} || Seq <- lists:seq(1, ?ROWS)],
+ Sorted = [{Seq, couch_rand:uniform()} || Seq <- lists:seq(1, ?ROWS)],
Shuffled = shuffle(Sorted),
{
"BTree with shuffled keys",
@@ -479,7 +479,7 @@ randomize(T, List) ->
end, randomize(List), lists:seq(1, (T - 1))).
randomize(List) ->
- D = lists:map(fun(A) -> {random:uniform(), A} end, List),
+ D = lists:map(fun(A) -> {couch_rand:uniform(), A} end, List),
{_, D1} = lists:unzip(lists:keysort(1, D)),
D1.
diff --git a/src/couch/test/couch_doc_json_tests.erl b/src/couch/test/couch_doc_json_tests.erl
index ce099d112..bcff0646a 100644
--- a/src/couch/test/couch_doc_json_tests.erl
+++ b/src/couch/test/couch_doc_json_tests.erl
@@ -171,6 +171,45 @@ from_json_success_cases() ->
end,
Cases).
+from_json_with_db_name_success_cases() ->
+ Cases = [
+ {
+ {[]},
+ <<"_dbs">>,
+ #doc{},
+ "DbName _dbs is acceptable with no docid"
+ },
+ {
+ {[{<<"_id">>, <<"zing!">>}]},
+ <<"_dbs">>,
+ #doc{id = <<"zing!">>},
+ "DbName _dbs is acceptable with a normal docid"
+ },
+ {
+ {[{<<"_id">>, <<"_users">>}]},
+ <<"_dbs">>,
+ #doc{id = <<"_users">>},
+ "_dbs/_users is acceptable"
+ },
+ {
+ {[{<<"_id">>, <<"_replicator">>}]},
+ <<"_dbs">>,
+ #doc{id = <<"_replicator">>},
+ "_dbs/_replicator is acceptable"
+ },
+ {
+ {[{<<"_id">>, <<"_global_changes">>}]},
+ <<"_dbs">>,
+ #doc{id = <<"_global_changes">>},
+ "_dbs/_global_changes is acceptable"
+ }
+ ],
+ lists:map(
+ fun({EJson, DbName, Expect, Msg}) ->
+ {Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson, DbName))}
+ end,
+ Cases).
+
from_json_error_cases() ->
Cases = [
{
@@ -261,6 +300,38 @@ from_json_error_cases() ->
end
end, Cases).
+from_json_with_dbname_error_cases() ->
+ Cases = [
+ {
+ {[{<<"_id">>, <<"_random">>}]},
+ <<"_dbs">>,
+ {illegal_docid,
+ <<"Only reserved document ids may start with underscore.">>},
+ "Disallow non-system-DB underscore prefixed docids in _dbs database."
+ },
+ {
+ {[{<<"_id">>, <<"_random">>}]},
+ <<"foobar">>,
+ {illegal_docid,
+ <<"Only reserved document ids may start with underscore.">>},
+ "Disallow arbitrary underscore prefixed docids in regular database."
+ },
+ {
+ {[{<<"_id">>, <<"_users">>}]},
+ <<"foobar">>,
+ {illegal_docid,
+ <<"Only reserved document ids may start with underscore.">>},
+ "Disallow system-DB docid _users in regular database."
+ }
+ ],
+
+ lists:map(
+ fun({EJson, DbName, Expect, Msg}) ->
+ Error = (catch couch_doc:from_json_obj_validate(EJson, DbName)),
+ {Msg, ?_assertMatch(Expect, Error)}
+ end,
+ Cases).
+
to_json_success_cases() ->
Cases = [
{
diff --git a/src/couch/test/couch_doc_tests.erl b/src/couch/test/couch_doc_tests.erl
index 5d0448a9e..cf41df61d 100644
--- a/src/couch/test/couch_doc_tests.erl
+++ b/src/couch/test/couch_doc_tests.erl
@@ -29,7 +29,7 @@ doc_from_multi_part_stream_test() ->
ContentType = "multipart/related;boundary=multipart_related_boundary~~~~~~~~~~~~~~~~~~~~",
DataFun = fun() -> request(start) end,
- mock_config_max_document_id_length(),
+ mock_config(),
{ok, #doc{id = <<"doc0">>, atts = [_]}, _Fun, _Parser} =
couch_doc:doc_from_multi_part_stream(ContentType, DataFun),
meck:unload(config),
@@ -77,7 +77,7 @@ len_doc_to_multi_part_stream_test() ->
validate_docid_test_() ->
{setup,
fun() ->
- mock_config_max_document_id_length(),
+ mock_config(),
ok = meck:new(couch_db_plugin, [passthrough]),
meck:expect(couch_db_plugin, validate_docid, fun(_) -> false end)
end,
@@ -90,6 +90,9 @@ validate_docid_test_() ->
?_assertEqual(ok, couch_doc:validate_docid(<<"_design/idx">>)),
?_assertEqual(ok, couch_doc:validate_docid(<<"_local/idx">>)),
?_assertEqual(ok, couch_doc:validate_docid(large_id(1024))),
+ ?_assertEqual(ok, couch_doc:validate_docid(<<"_users">>, <<"_dbs">>)),
+ ?_assertEqual(ok, couch_doc:validate_docid(<<"_replicator">>, <<"_dbs">>)),
+ ?_assertEqual(ok, couch_doc:validate_docid(<<"_global_changes">>, <<"_dbs">>)),
?_assertThrow({illegal_docid, _},
couch_doc:validate_docid(<<>>)),
?_assertThrow({illegal_docid, _},
@@ -103,7 +106,11 @@ validate_docid_test_() ->
?_assertThrow({illegal_docid, _},
couch_doc:validate_docid(<<"_local/">>)),
?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(large_id(1025)))
+ couch_doc:validate_docid(large_id(1025))),
+ ?_assertThrow({illegal_docid, _},
+ couch_doc:validate_docid(<<"_users">>, <<"foo">>)),
+ ?_assertThrow({illegal_docid, _},
+ couch_doc:validate_docid(<<"_weeee">>, <<"_dbs">>))
]
}.
@@ -127,11 +134,12 @@ collected() ->
B = binary:replace(iolist_to_binary(get(data)), <<"\r\n">>, <<0>>, [global]),
binary:split(B, [<<0>>], [global]).
-mock_config_max_document_id_length() ->
+mock_config() ->
ok = meck:new(config, [passthrough]),
meck:expect(config, get,
fun("couchdb", "max_document_id_length", "infinity") -> "1024";
("couchdb", "max_attachment_size", "infinity") -> "infinity";
+ ("mem3", "shards_db", "_dbs") -> "_dbs";
(Key, Val, Default) -> meck:passthrough([Key, Val, Default])
end
).
diff --git a/src/couch/test/couch_file_tests.erl b/src/couch/test/couch_file_tests.erl
index c16be16c4..a387615dd 100644
--- a/src/couch/test/couch_file_tests.erl
+++ b/src/couch/test/couch_file_tests.erl
@@ -311,14 +311,14 @@ check_header_recovery(CheckFun) ->
ok.
write_random_data(Fd) ->
- write_random_data(Fd, 100 + random:uniform(1000)).
+ write_random_data(Fd, 100 + couch_rand:uniform(1000)).
write_random_data(Fd, 0) ->
{ok, Bytes} = couch_file:bytes(Fd),
{ok, (1 + Bytes div ?BLOCK_SIZE) * ?BLOCK_SIZE};
write_random_data(Fd, N) ->
Choices = [foo, bar, <<"bizzingle">>, "bank", ["rough", stuff]],
- Term = lists:nth(random:uniform(4) + 1, Choices),
+ Term = lists:nth(couch_rand:uniform(4) + 1, Choices),
{ok, _, _} = couch_file:append_term(Fd, Term),
write_random_data(Fd, N - 1).
diff --git a/src/couch/test/couchdb_attachments_tests.erl b/src/couch/test/couchdb_attachments_tests.erl
index 4536ba6b2..d9efac551 100644
--- a/src/couch/test/couchdb_attachments_tests.erl
+++ b/src/couch/test/couchdb_attachments_tests.erl
@@ -758,8 +758,8 @@ create_already_compressed_att(Host, DbName) ->
gzip(Data) ->
Z = zlib:open(),
ok = zlib:deflateInit(Z, ?COMPRESSION_LEVEL, deflated, 16 + 15, 8, default),
- zlib:deflate(Z, Data),
+ Chunk = zlib:deflate(Z, Data),
Last = zlib:deflate(Z, [], finish),
ok = zlib:deflateEnd(Z),
ok = zlib:close(Z),
- Last.
+ [Chunk, Last].
diff --git a/src/couch/test/couchdb_cookie_domain_tests.erl b/src/couch/test/couchdb_cookie_domain_tests.erl
new file mode 100755
index 000000000..1a9aedb93
--- /dev/null
+++ b/src/couch/test/couchdb_cookie_domain_tests.erl
@@ -0,0 +1,77 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couchdb_cookie_domain_tests).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-define(USER, "cookie_domain_test_admin").
+-define(PASS, "pass").
+
+setup(PortType) ->
+ Hashed = couch_passwords:hash_admin_password(?PASS),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ Addr = config:get("httpd", "bind_address", "127.0.0.1"),
+ lists:concat(["http://", Addr, ":", port(PortType), "/_session"]).
+
+teardown(_,_) ->
+ ok = config:delete("admins", ?USER, _Persist=false).
+
+cookie_test_() ->
+ Tests = [
+ fun should_set_cookie_domain/2,
+ fun should_not_set_cookie_domain/2
+ ],
+ {
+ "Cookie domain tests",
+ {
+ setup,
+ fun() -> test_util:start_couch([chttpd]) end, fun test_util:stop_couch/1,
+ [
+ make_test_case(clustered, Tests)
+ ]
+ }
+ }.
+
+make_test_case(Mod, Funs) ->
+{
+ lists:flatten(io_lib:format("~s", [Mod])),
+ {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]}
+}.
+
+should_set_cookie_domain(_PortType, Url) ->
+ ?_assertEqual(true,
+ begin
+ ok = config:set("couch_httpd_auth", "cookie_domain", "example.com", false),
+ {ok, Code, Headers, _} = test_request:post(Url, [{"Content-Type", "application/json"}],
+ "{\"name\":\"" ++ ?USER ++ "\", \"password\": \"" ++ ?PASS ++ "\"}"),
+ ?_assert(Code =:= 200),
+ Cookie = proplists:get_value("Set-Cookie", Headers),
+ string:str(Cookie, "; Domain=example.com") > 0
+ end).
+
+should_not_set_cookie_domain(_PortType, Url) ->
+ ?_assertEqual(0,
+ begin
+ ok = config:set("couch_httpd_auth", "cookie_domain", "", false),
+ {ok, Code, Headers, _} = test_request:post(Url, [{"Content-Type", "application/json"}],
+ "{\"name\":\"" ++ ?USER ++ "\", \"password\": \"" ++ ?PASS ++ "\"}"),
+ ?_assert(Code =:= 200),
+ Cookie = proplists:get_value("Set-Cookie", Headers),
+ string:str(Cookie, "; Domain=")
+ end).
+
+port(clustered) ->
+ integer_to_list(mochiweb_socket_server:get(chttpd, port));
+port(backdoor) ->
+ integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
diff --git a/src/couch_epi/test/couch_epi_tests.erl b/src/couch_epi/test/couch_epi_tests.erl
index 99a06f31a..79122d75a 100644
--- a/src/couch_epi/test/couch_epi_tests.erl
+++ b/src/couch_epi/test/couch_epi_tests.erl
@@ -26,7 +26,7 @@
-define(temp_atom,
fun() ->
- {A, B, C} = erlang:now(),
+ {A, B, C} = os:timestamp(),
list_to_atom(lists:flatten(io_lib:format("~p~p~p", [A, B, C])))
end).
diff --git a/src/couch_log/test/couch_log_test.erl b/src/couch_log/test/couch_log_test.erl
index 17777304f..c7195f65f 100644
--- a/src/couch_log/test/couch_log_test.erl
+++ b/src/couch_log/test/couch_log_test.erl
@@ -80,6 +80,5 @@ check_levels(TestLevel, [CfgLevel | RestLevels]) ->
new_msg() ->
- random:seed(os:timestamp()),
- Bin = list_to_binary([random:uniform(255) || _ <- lists:seq(1, 16)]),
+ Bin = list_to_binary([couch_rand:uniform(255) || _ <- lists:seq(1, 16)]),
couch_util:to_hex(Bin).
diff --git a/src/couch_mrview/src/couch_mrview_http.erl b/src/couch_mrview/src/couch_mrview_http.erl
index 9ad50eeef..004caef09 100644
--- a/src/couch_mrview/src/couch_mrview_http.erl
+++ b/src/couch_mrview/src/couch_mrview_http.erl
@@ -440,7 +440,11 @@ row_to_json(error, Row) ->
% match prior behavior.
Key = couch_util:get_value(key, Row),
Val = couch_util:get_value(value, Row),
- Obj = {[{key, Key}, {error, Val}]},
+ Reason = couch_util:get_value(reason, Row),
+ ReasonProp = if Reason == undefined -> []; true ->
+ [{reason, Reason}]
+ end,
+ Obj = {[{key, Key}, {error, Val}] ++ ReasonProp},
?JSON_ENCODE(Obj);
row_to_json(Id0, Row) ->
Id = case Id0 of
diff --git a/src/couch_peruser/README.md b/src/couch_peruser/README.md
index 70f8348f5..64a05188c 100644
--- a/src/couch_peruser/README.md
+++ b/src/couch_peruser/README.md
@@ -1,6 +1,6 @@
# couch_peruser [![Build Status](https://travis-ci.org/apache/couchdb-peruser.svg?branch=master)](https://travis-ci.org/apache/couchdb-peruser)
-couch_peruser is a CouchDB daemon that ensures that a private per-user
+couch_peruser is a CouchDB application that ensures that a private per-user
database exists for each document in _users. These databases are
writable only by the corresponding user. Databases are in the form:
@@ -15,3 +15,20 @@ correctly implement in just about any language, especially JavaScript
and Erlang. Other encodings would be possible, but would require
additional client and server-side code to support that encoding. This
is the simplest scheme that is obviously correct.
+
+## Implementation Notes
+
+The module itself is a `gen_server` and it implements the `mem3_cluster`
+behaviour.
+
+In a CouchDB cluster, the module runs on each node in the cluster. On startup,
+it launches a changes listener for each shard of the `authentication_db`
+(`_users`).
+
+In a cluster, when a change notification comes in (after a user doc has been
+created/updated/deleted), each node independently calculates if it should
+handle the notification based on the current list of active nodes in the
+cluster. This ensures that we avoid trying to update the internal `_dbs`
+concurrently and causing conflicts. It also ensures that at least one node
+does handle a notification. The mechanism that handles this does survive
+cluster reconfigurations transparently.
diff --git a/src/couch_peruser/src/couch_peruser.app.src b/src/couch_peruser/src/couch_peruser.app.src
index fb6d45bf1..6cfaf4421 100644
--- a/src/couch_peruser/src/couch_peruser.app.src
+++ b/src/couch_peruser/src/couch_peruser.app.src
@@ -13,6 +13,8 @@
{application, couch_peruser, [
{description, "couch_peruser - maintains per-user databases in CouchDB"},
{vsn, git},
- {registered, []},
- {applications, [kernel, stdlib, config, couch, fabric]}
+ {registered, [couch_peruser, couch_peruser_sup]},
+ {applications, [kernel, stdlib, config, couch, fabric, mem3]},
+ {mod, {couch_peruser_app, []}},
+ {env, []}
]}.
diff --git a/src/couch_peruser/src/couch_peruser.erl b/src/couch_peruser/src/couch_peruser.erl
index 63ef084ce..0c769324a 100644
--- a/src/couch_peruser/src/couch_peruser.erl
+++ b/src/couch_peruser/src/couch_peruser.erl
@@ -12,62 +12,116 @@
-module(couch_peruser).
-behaviour(gen_server).
+-behaviour(mem3_cluster).
-include_lib("couch/include/couch_db.hrl").
-include_lib("mem3/include/mem3.hrl").
--define(USERDB_PREFIX, "userdb-").
-
% gen_server callbacks
-export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
-export([init_changes_handler/1, changes_handler/3]).
--record(state, {parent, db_name, delete_dbs, changes_pid, changes_ref}).
--record(clusterState, {parent, db_name, delete_dbs, states}).
+% mem3_cluster callbacks
+-export([
+ cluster_stable/1,
+ cluster_unstable/1
+]).
+
+-record(changes_state, {
+ parent :: pid(),
+ db_name :: binary(),
+ delete_dbs :: boolean(),
+ changes_pid :: pid(),
+ changes_ref :: reference()
+}).
+
+-record(state, {
+ parent :: pid(),
+ db_name :: binary(),
+ delete_dbs :: boolean(),
+ states :: list(),
+ mem3_cluster_pid :: pid(),
+ cluster_stable :: boolean()
+}).
+-define(USERDB_PREFIX, "userdb-").
-define(RELISTEN_DELAY, 5000).
+-define(DEFAULT_QUIET_PERIOD, 60). % seconds
+-define(DEFAULT_START_PERIOD, 5). % seconds
-
+%%
+%% Please leave in the commented-out couch_log:debug calls, thanks! — Jan
+%%
+-spec start_link() -> {ok, pid()} | ignore | {error, term()}.
start_link() ->
- gen_server:start_link(?MODULE, [], []).
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-init() ->
+-spec init_state() -> #state{}.
+init_state() ->
+ couch_log:debug("peruser: starting on node ~p in pid ~p", [node(), self()]),
case config:get_boolean("couch_peruser", "enable", false) of
false ->
- #clusterState{};
+ couch_log:debug("peruser: disabled on node ~p", [node()]),
+ #state{};
true ->
+ couch_log:debug("peruser: enabled on node ~p", [node()]),
DbName = ?l2b(config:get(
"couch_httpd_auth", "authentication_db", "_users")),
DeleteDbs = config:get_boolean("couch_peruser", "delete_dbs", false),
- ClusterState = #clusterState{
+ % set up cluster-stable listener
+ Period = abs(config:get_integer("couch_peruser", "cluster_quiet_period",
+ ?DEFAULT_QUIET_PERIOD)),
+ StartPeriod = abs(config:get_integer("couch_peruser",
+ "cluster_start_period", ?DEFAULT_START_PERIOD)),
+
+ {ok, Mem3Cluster} = mem3_cluster:start_link(?MODULE, self(), StartPeriod,
+ Period),
+
+ #state{
parent = self(),
db_name = DbName,
- delete_dbs = DeleteDbs
- },
- try
- States = lists:map(fun (A) ->
- S = #state{parent = ClusterState#clusterState.parent,
- db_name = A#shard.name,
- delete_dbs = DeleteDbs},
- {Pid, Ref} = spawn_opt(
- ?MODULE, init_changes_handler, [S], [link, monitor]),
- S#state{changes_pid=Pid, changes_ref=Ref}
- end, mem3:local_shards(DbName)),
-
- ClusterState#clusterState{states = States}
- catch error:database_does_not_exist ->
- couch_log:warning("couch_peruser can't proceed as underlying database (~s) is missing, disables itself.", [DbName]),
- config:set("couch_peruser", "enable", "false", lists:concat([binary_to_list(DbName), " is missing"]))
- end
+ delete_dbs = DeleteDbs,
+ mem3_cluster_pid = Mem3Cluster,
+ cluster_stable = false
+ }
end.
-init_changes_handler(#state{db_name=DbName} = State) ->
+
+-spec start_listening(State :: #state{}) -> #state{} | ok.
+start_listening(#state{states=ChangesStates}=State)
+ when length(ChangesStates) > 0 ->
+ % couch_log:debug("peruser: start_listening() already run on node ~p in pid ~p", [node(), self()]),
+ State;
+start_listening(#state{db_name=DbName, delete_dbs=DeleteDbs} = State) ->
+ % couch_log:debug("peruser: start_listening() on node ~p", [node()]),
+ try
+ States = lists:map(fun (A) ->
+ S = #changes_state{
+ parent = State#state.parent,
+ db_name = A#shard.name,
+ delete_dbs = DeleteDbs
+ },
+ {Pid, Ref} = spawn_opt(
+ ?MODULE, init_changes_handler, [S], [link, monitor]),
+ S#changes_state{changes_pid=Pid, changes_ref=Ref}
+ end, mem3:local_shards(DbName)),
+ % couch_log:debug("peruser: start_listening() States ~p", [States]),
+
+ State#state{states = States, cluster_stable = true}
+ catch error:database_does_not_exist ->
+ couch_log:warning("couch_peruser can't proceed as underlying database (~s) is missing, disables itself.", [DbName]),
+ config:set("couch_peruser", "enable", "false", lists:concat([binary_to_list(DbName), " is missing"]))
+ end.
+
+-spec init_changes_handler(ChangesState :: #changes_state{}) -> ok.
+init_changes_handler(#changes_state{db_name=DbName} = ChangesState) ->
+ % couch_log:debug("peruser: init_changes_handler() on DbName ~p", [DbName]),
try
{ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX, sys_db]),
- FunAcc = {fun ?MODULE:changes_handler/3, State},
+ FunAcc = {fun ?MODULE:changes_handler/3, ChangesState},
(couch_changes:handle_db_changes(
#changes_args{feed="continuous", timeout=infinity},
{json_req, null},
@@ -76,31 +130,78 @@ init_changes_handler(#state{db_name=DbName} = State) ->
ok
end.
-changes_handler({change, {Doc}, _Prepend}, _ResType, State=#state{}) ->
+-type db_change() :: {atom(), tuple(), binary()}.
+-spec changes_handler(
+ Change :: db_change(),
+ ResultType :: any(),
+ ChangesState :: #changes_state{}) -> #changes_state{}.
+changes_handler(
+ {change, {Doc}, _Prepend},
+ _ResType,
+ ChangesState=#changes_state{db_name=DbName}) ->
+ % couch_log:debug("peruser: changes_handler() on DbName/Doc ~p/~p", [DbName, Doc]),
+
case couch_util:get_value(<<"id">>, Doc) of
- <<"org.couchdb.user:",User/binary>> ->
- case couch_util:get_value(<<"deleted">>, Doc, false) of
- false ->
- UserDb = ensure_user_db(User),
- ok = ensure_security(User, UserDb, fun add_user/3),
- State;
+ <<"org.couchdb.user:",User/binary>> = DocId ->
+ case should_handle_doc(DbName, DocId) of
true ->
- case State#state.delete_dbs of
- true ->
- _UserDb = delete_user_db(User),
- State;
+ case couch_util:get_value(<<"deleted">>, Doc, false) of
false ->
- UserDb = user_db_name(User),
- ok = ensure_security(User, UserDb, fun remove_user/3),
- State
- end
+ UserDb = ensure_user_db(User),
+ ok = ensure_security(User, UserDb, fun add_user/3),
+ ChangesState;
+ true ->
+ case ChangesState#changes_state.delete_dbs of
+ true ->
+ _UserDb = delete_user_db(User),
+ ChangesState;
+ false ->
+ UserDb = user_db_name(User),
+ ok = ensure_security(User, UserDb, fun remove_user/3),
+ ChangesState
+ end
+ end;
+ false ->
+ ChangesState
end;
_ ->
- State
+ ChangesState
end;
-changes_handler(_Event, _ResType, State) ->
- State.
+changes_handler(_Event, _ResType, ChangesState) ->
+ ChangesState.
+
+-spec should_handle_doc(ShardName :: binary(), DocId::binary()) -> boolean().
+should_handle_doc(ShardName, DocId) ->
+ case is_stable() of
+ false ->
+ % when the cluster is unstable, we have already stopped all Listeners
+ % the next stable event will restart all listeners and pick up this
+ % doc change
+ couch_log:debug("peruser: skipping, cluster unstable ~s/~s",
+ [ShardName, DocId]),
+ false;
+ true ->
+ should_handle_doc_int(ShardName, DocId)
+ end.
+-spec should_handle_doc_int(
+ ShardName :: binary(),
+ DocId :: binary()) -> boolean().
+should_handle_doc_int(ShardName, DocId) ->
+ DbName = mem3:dbname(ShardName),
+ Live = [erlang:node() | erlang:nodes()],
+ Shards = mem3:shards(DbName, DocId),
+ Nodes = [N || #shard{node=N} <- Shards, lists:member(N, Live)],
+ case mem3:owner(DbName, DocId, Nodes) of
+ ThisNode when ThisNode =:= node() ->
+ couch_log:debug("peruser: handling ~s/~s", [DbName, DocId]),
+ true; % do the database action
+ _OtherNode ->
+ couch_log:debug("peruser: skipping ~s/~s", [DbName, DocId]),
+ false
+ end.
+
+-spec delete_user_db(User :: binary()) -> binary().
delete_user_db(User) ->
UserDb = user_db_name(User),
try
@@ -113,18 +214,24 @@ delete_user_db(User) ->
end,
UserDb.
+-spec ensure_user_db(User :: binary()) -> binary().
ensure_user_db(User) ->
UserDb = user_db_name(User),
try
{ok, _DbInfo} = fabric:get_db_info(UserDb)
catch error:database_does_not_exist ->
case fabric:create_db(UserDb, [?ADMIN_CTX]) of
+ {error, file_exists} -> ok;
ok -> ok;
accepted -> ok
end
end,
UserDb.
+-spec add_user(
+ User :: binary(),
+ Properties :: tuple(),
+ Acc :: tuple()) -> tuple().
add_user(User, Prop, {Modified, SecProps}) ->
{PropValue} = couch_util:get_value(Prop, SecProps, {[]}),
Names = couch_util:get_value(<<"names">>, PropValue, []),
@@ -141,6 +248,10 @@ add_user(User, Prop, {Modified, SecProps}) ->
{<<"names">>, [User | Names]})}})}
end.
+-spec remove_user(
+ User :: binary(),
+ Properties :: tuple(),
+ Acc :: tuple()) -> tuple().
remove_user(User, Prop, {Modified, SecProps}) ->
{PropValue} = couch_util:get_value(Prop, SecProps, {[]}),
Names = couch_util:get_value(<<"names">>, PropValue, []),
@@ -157,58 +268,108 @@ remove_user(User, Prop, {Modified, SecProps}) ->
{<<"names">>, lists:delete(User, Names)})}})}
end.
+-spec ensure_security(
+ User :: binary(),
+ UserDb :: binary(),
+ TransformFun :: fun()) -> ok.
ensure_security(User, UserDb, TransformFun) ->
- {ok, Shards} = fabric:get_all_security(UserDb, [?ADMIN_CTX]),
- {_ShardInfo, {SecProps}} = hd(Shards),
- % assert that shards have the same security object
- true = lists:all(fun ({_, {SecProps1}}) ->
- SecProps =:= SecProps1
- end, Shards),
- case lists:foldl(
- fun (Prop, SAcc) -> TransformFun(User, Prop, SAcc) end,
- {false, SecProps},
- [<<"admins">>, <<"members">>]) of
- {false, _} ->
- ok;
- {true, SecProps1} ->
- ok = fabric:set_security(UserDb, {SecProps1}, [?ADMIN_CTX])
+ case fabric:get_all_security(UserDb, [?ADMIN_CTX]) of
+ {error, no_majority} ->
+ % TODO: make sure this is still true: single node, ignore
+ ok;
+ {ok, Shards} ->
+ {_ShardInfo, {SecProps}} = hd(Shards),
+ % assert that shards have the same security object
+ true = lists:all(fun ({_, {SecProps1}}) ->
+ SecProps =:= SecProps1
+ end, Shards),
+ case lists:foldl(
+ fun (Prop, SAcc) -> TransformFun(User, Prop, SAcc) end,
+ {false, SecProps},
+ [<<"admins">>, <<"members">>]) of
+ {false, _} ->
+ ok;
+ {true, SecProps1} ->
+ ok = fabric:set_security(UserDb, {SecProps1}, [?ADMIN_CTX])
+ end
end.
+-spec user_db_name(User :: binary()) -> binary().
user_db_name(User) ->
HexUser = list_to_binary(
[string:to_lower(integer_to_list(X, 16)) || <<X>> <= User]),
<<?USERDB_PREFIX,HexUser/binary>>.
+-spec exit_changes(State :: #state{}) -> ok.
+exit_changes(State) ->
+ lists:foreach(fun (ChangesState) ->
+ demonitor(ChangesState#changes_state.changes_ref, [flush]),
+ unlink(ChangesState#changes_state.changes_pid),
+ exit(ChangesState#changes_state.changes_pid, kill)
+ end, State#state.states).
-%% gen_server callbacks
+-spec is_stable() -> true | false.
+is_stable() ->
+ gen_server:call(?MODULE, is_stable).
+
+-spec subscribe_for_changes() -> ok.
+subscribe_for_changes() ->
+ config:subscribe_for_changes([
+ {"couch_httpd_auth", "authentication_db"},
+ "couch_peruser"
+ ]).
+
+% Mem3 cluster callbacks
+
+% TODO: find out what type Server is
+-spec cluster_unstable(Server :: any()) -> any().
+cluster_unstable(Server) ->
+ gen_server:cast(Server, cluster_unstable),
+ Server.
+% TODO: find out what type Server is
+-spec cluster_stable(Server :: any()) -> any().
+cluster_stable(Server) ->
+ gen_server:cast(Server, cluster_stable),
+ Server.
+
+%% gen_server callbacks
+-spec init(Options :: list()) -> {ok, #state{}}.
init([]) ->
ok = subscribe_for_changes(),
- {ok, init()}.
+ {ok, init_state()}.
+handle_call(is_stable, _From, #state{cluster_stable = IsStable} = State) ->
+ {reply, IsStable, State};
handle_call(_Msg, _From, State) ->
{reply, error, State}.
-handle_cast(update_config, ClusterState) when ClusterState#clusterState.states =/= undefined ->
- lists:foreach(fun (State) ->
- demonitor(State#state.changes_ref, [flush]),
- exit(State#state.changes_pid, kill)
- end, ClusterState#clusterState.states),
-
- {noreply, init()};
+handle_cast(update_config, State) when State#state.states =/= undefined ->
+ exit_changes(State),
+ {noreply, init_state()};
handle_cast(update_config, _) ->
- {noreply, init()};
+ {noreply, init_state()};
handle_cast(stop, State) ->
{stop, normal, State};
+handle_cast(cluster_unstable, State) when State#state.states =/= undefined ->
+ exit_changes(State),
+ {noreply, init_state()};
+handle_cast(cluster_unstable, _) ->
+ {noreply, init_state()};
+handle_cast(cluster_stable, State) ->
+ {noreply, start_listening(State)};
handle_cast(_Msg, State) ->
{noreply, State}.
-handle_info({'DOWN', Ref, _, _, _Reason}, #state{changes_ref=Ref} = State) ->
+handle_info({'DOWN', _Ref, _, _, _Reason}, State) ->
{stop, normal, State};
handle_info({config_change, "couch_peruser", _, _, _}, State) ->
handle_cast(update_config, State);
-handle_info({config_change, "couch_httpd_auth", "authentication_db", _, _}, State) ->
+handle_info({
+ config_change,
+ "couch_httpd_auth",
+ "authentication_db", _, _}, State) ->
handle_cast(update_config, State);
handle_info({gen_event_EXIT, _Handler, _Reason}, State) ->
erlang:send_after(?RELISTEN_DELAY, self(), restart_config_listener),
@@ -222,13 +383,6 @@ handle_info(restart_config_listener, State) ->
handle_info(_Msg, State) ->
{noreply, State}.
-subscribe_for_changes() ->
- config:subscribe_for_changes([
- {"couch_httpd_auth", "authentication_db"},
- "couch_peruser"
- ]).
-
-
terminate(_Reason, _State) ->
%% Everything should be linked or monitored, let nature
%% take its course.
diff --git a/src/couch_peruser/src/couch_peruser_app.erl b/src/couch_peruser/src/couch_peruser_app.erl
new file mode 100644
index 000000000..770c08237
--- /dev/null
+++ b/src/couch_peruser/src/couch_peruser_app.erl
@@ -0,0 +1,26 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_peruser_app).
+
+-behaviour(application).
+
+-export([start/2, stop/1]).
+
+
+start(_Type, _StartArgs) ->
+ couch_peruser_sup:start_link().
+
+
+stop(_State) ->
+ ok.
+
diff --git a/src/couch_peruser/src/couch_peruser_sup.erl b/src/couch_peruser/src/couch_peruser_sup.erl
new file mode 100644
index 000000000..b89a36324
--- /dev/null
+++ b/src/couch_peruser/src/couch_peruser_sup.erl
@@ -0,0 +1,29 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_peruser_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/0, init/1]).
+
+%% Helper macro for declaring children of supervisor
+-define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
+
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+
+init([]) ->
+ {ok, { {one_for_one, 5, 10}, [?CHILD(couch_peruser, worker)]}}.
+
diff --git a/src/couch_peruser/test/couch_peruser_test.erl b/src/couch_peruser/test/couch_peruser_test.erl
index c6fde034e..2bc98af66 100644
--- a/src/couch_peruser/test/couch_peruser_test.erl
+++ b/src/couch_peruser/test/couch_peruser_test.erl
@@ -18,14 +18,19 @@
-define(ADMIN_USERNAME, "admin").
-define(ADMIN_PASSWORD, "secret").
+-define(WAIT_FOR_DB_TIMEOUT, 1000).
+-define(WAIT_FOR_USER_DELETE_TIMEOUT, 3000).
+
setup_all() ->
TestCtx = test_util:start_couch([chttpd]),
+ ok = application:start(couch_peruser),
Hashed = couch_passwords:hash_admin_password(?ADMIN_PASSWORD),
ok = config:set("admins", ?ADMIN_USERNAME, ?b2l(Hashed), _Persist=false),
TestCtx.
teardown_all(TestCtx) ->
config:delete("admins", ?ADMIN_USERNAME),
+ ok = application:stop(couch_peruser),
test_util:stop_couch(TestCtx).
setup() ->
@@ -33,13 +38,20 @@ setup() ->
do_request(put, get_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
do_request(put, get_cluster_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
set_config("couch_httpd_auth", "authentication_db", ?b2l(TestAuthDb)),
+ set_config("couch_peruser", "cluster_quiet_period", "1"),
+ set_config("couch_peruser", "cluster_start_period", "1"),
set_config("couch_peruser", "enable", "true"),
+ set_config("cluster", "n", "1"),
+ timer:sleep(1000),
TestAuthDb.
teardown(TestAuthDb) ->
set_config("couch_peruser", "enable", "false"),
set_config("couch_peruser", "delete_dbs", "false"),
set_config("couch_httpd_auth", "authentication_db", "_users"),
+ set_config("couch_peruser", "cluster_quiet_period", "60"),
+ set_config("couch_peruser", "cluster_start_period", "5"),
+ set_config("cluster", "n", "3"),
do_request(delete, get_cluster_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
do_request(delete, get_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
lists:foreach(fun (DbName) ->
@@ -80,18 +92,14 @@ create_user(AuthDb, Name) ->
"\",\"type\":\"user\",\"roles\":[],\"password\":\"secret\"}",
Url = lists:concat([
get_cluster_base_url(), "/", ?b2l(AuthDb), "/org.couchdb.user:", Name]),
- {ok, 201, _, _} = do_request(put, Url, Body),
- % let's proceed after giving couch_peruser some time to create the user db
- timer:sleep(2000).
+ {ok, 201, _, _} = do_request(put, Url, Body).
create_anon_user(AuthDb, Name) ->
Body = "{\"name\":\"" ++ Name ++
"\",\"type\":\"user\",\"roles\":[],\"password\":\"secret\"}",
Url = lists:concat([
get_cluster_base_url(), "/", ?b2l(AuthDb), "/org.couchdb.user:", Name]),
- {ok, 201, _, _} = do_anon_request(put, Url, Body),
- % let's proceed after giving couch_peruser some time to create the user db
- timer:sleep(2000).
+ {ok, 201, _, _} = do_anon_request(put, Url, Body).
delete_user(AuthDb, Name) ->
Url = lists:concat([get_cluster_base_url(), "/", ?b2l(AuthDb),
@@ -99,9 +107,7 @@ delete_user(AuthDb, Name) ->
{ok, 200, _, Body} = do_request(get, Url),
{DocProps} = jiffy:decode(Body),
Rev = proplists:get_value(<<"_rev">>, DocProps),
- {ok, 200, _, _} = do_request(delete, Url ++ "?rev=" ++ ?b2l(Rev)),
- % let's proceed after giving couch_peruser some time to delete the user db
- timer:sleep(2000).
+ {ok, 200, _, _} = do_request(delete, Url ++ "?rev=" ++ ?b2l(Rev)).
get_security(DbName) ->
Url = lists:concat([
@@ -120,6 +126,10 @@ all_dbs() ->
{ok, 200, _, Body} = do_request(get, get_cluster_base_url() ++ "/_all_dbs"),
jiffy:decode(Body).
+all_dbs_with_errors() ->
+ {Result, StatusCode, _Headers, Body} = do_request(get, get_cluster_base_url() ++ "/_all_dbs"),
+ {Result, StatusCode, _Headers, jiffy:decode(Body)}.
+
get_base_url() ->
Addr = config:get("httpd", "bind_address", "127.0.0.1"),
Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
@@ -130,20 +140,25 @@ get_cluster_base_url() ->
Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
"http://" ++ Addr ++ ":" ++ Port.
+
should_create_user_db(TestAuthDb) ->
create_user(TestAuthDb, "foo"),
+ wait_for_db_create(<<"userdb-666f6f">>),
?_assert(lists:member(<<"userdb-666f6f">>, all_dbs())).
should_create_anon_user_db(TestAuthDb) ->
create_anon_user(TestAuthDb, "fooo"),
+ wait_for_db_create(<<"userdb-666f6f6f">>),
?_assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())).
should_not_delete_user_db(TestAuthDb) ->
User = "foo",
UserDbName = <<"userdb-666f6f">>,
create_user(TestAuthDb, User),
+ wait_for_db_create(<<"userdb-666f6f">>),
?assert(lists:member(UserDbName, all_dbs())),
delete_user(TestAuthDb, User),
+ timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
?_assert(lists:member(UserDbName, all_dbs())).
should_delete_user_db(TestAuthDb) ->
@@ -151,8 +166,10 @@ should_delete_user_db(TestAuthDb) ->
UserDbName = <<"userdb-626172">>,
set_config("couch_peruser", "delete_dbs", "true"),
create_user(TestAuthDb, User),
+ wait_for_db_create(UserDbName),
?assert(lists:member(UserDbName, all_dbs())),
delete_user(TestAuthDb, User),
+ wait_for_db_delete(UserDbName),
?_assert(not lists:member(UserDbName, all_dbs())).
should_reflect_config_changes(TestAuthDb) ->
@@ -160,26 +177,35 @@ should_reflect_config_changes(TestAuthDb) ->
UserDbName = <<"userdb-62617a">>,
set_config("couch_peruser", "delete_dbs", "true"),
create_user(TestAuthDb, User),
+ wait_for_db_create(UserDbName),
?assert(lists:member(UserDbName, all_dbs())),
delete_user(TestAuthDb, User),
+ timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
+ wait_for_db_delete(UserDbName),
?assert(not lists:member(UserDbName, all_dbs())),
create_user(TestAuthDb, User),
+ wait_for_db_create(UserDbName),
?assert(lists:member(UserDbName, all_dbs())),
set_config("couch_peruser", "delete_dbs", "false"),
delete_user(TestAuthDb, User),
+ timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
?assert(lists:member(UserDbName, all_dbs())),
create_user(TestAuthDb, User),
+ wait_for_db_create(UserDbName),
set_config("couch_peruser", "delete_dbs", "true"),
delete_user(TestAuthDb, User),
+ wait_for_db_delete(UserDbName),
?assert(not lists:member(UserDbName, all_dbs())),
set_config("couch_peruser", "enable", "false"),
create_user(TestAuthDb, User),
+ timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
?_assert(not lists:member(UserDbName, all_dbs())).
should_add_user_to_db_admins(TestAuthDb) ->
User = "qux",
UserDbName = <<"userdb-717578">>,
create_user(TestAuthDb, User),
+ wait_for_db_create(UserDbName),
?_assertEqual(
{[{<<"names">>,[<<"qux">>]}]},
proplists:get_value(<<"admins">>, get_security(UserDbName))).
@@ -188,6 +214,7 @@ should_add_user_to_db_members(TestAuthDb) ->
User = "qux",
UserDbName = <<"userdb-717578">>,
create_user(TestAuthDb, User),
+ wait_for_db_create(UserDbName),
?_assertEqual(
{[{<<"names">>,[<<"qux">>]}]},
proplists:get_value(<<"members">>, get_security(UserDbName))).
@@ -202,6 +229,7 @@ should_not_remove_existing_db_admins(TestAuthDb) ->
create_db(UserDbName),
set_security(UserDbName, SecurityProperties),
create_user(TestAuthDb, User),
+ wait_for_security_create(<<"admins">>, User, UserDbName),
{AdminProperties} = proplists:get_value(<<"admins">>,
get_security(UserDbName)),
AdminNames = proplists:get_value(<<"names">>, AdminProperties),
@@ -221,6 +249,7 @@ should_not_remove_existing_db_members(TestAuthDb) ->
create_db(UserDbName),
set_security(UserDbName, SecurityProperties),
create_user(TestAuthDb, User),
+ wait_for_security_create(<<"members">>, User, UserDbName),
{MemberProperties} = proplists:get_value(<<"members">>,
get_security(UserDbName)),
MemberNames = proplists:get_value(<<"names">>, MemberProperties),
@@ -240,6 +269,7 @@ should_remove_user_from_db_admins(TestAuthDb) ->
create_db(UserDbName),
set_security(UserDbName, SecurityProperties),
create_user(TestAuthDb, User),
+ wait_for_security_create(<<"admins">>, User, UserDbName),
{AdminProperties} = proplists:get_value(<<"admins">>,
get_security(UserDbName)),
AdminNames = proplists:get_value(<<"names">>, AdminProperties),
@@ -247,6 +277,7 @@ should_remove_user_from_db_admins(TestAuthDb) ->
?assert(lists:member(<<"bar">>, AdminNames)),
?assert(lists:member(<<"qux">>, AdminNames)),
delete_user(TestAuthDb, User),
+ wait_for_security_delete(<<"admins">>, User, UserDbName),
{NewAdminProperties} = proplists:get_value(<<"admins">>,
get_security(UserDbName)),
NewAdminNames = proplists:get_value(<<"names">>, NewAdminProperties),
@@ -266,6 +297,7 @@ should_remove_user_from_db_members(TestAuthDb) ->
create_db(UserDbName),
set_security(UserDbName, SecurityProperties),
create_user(TestAuthDb, User),
+ wait_for_security_create(<<"members">>, User, UserDbName),
{MemberProperties} = proplists:get_value(<<"members">>,
get_security(UserDbName)),
MemberNames = proplists:get_value(<<"names">>, MemberProperties),
@@ -273,6 +305,7 @@ should_remove_user_from_db_members(TestAuthDb) ->
?assert(lists:member(<<"wow">>, MemberNames)),
?assert(lists:member(<<"qux">>, MemberNames)),
delete_user(TestAuthDb, User),
+ wait_for_security_delete(<<"members">>, User, UserDbName),
{NewMemberProperties} = proplists:get_value(<<"members">>,
get_security(UserDbName)),
NewMemberNames = proplists:get_value(<<"names">>, NewMemberProperties),
@@ -282,6 +315,62 @@ should_remove_user_from_db_members(TestAuthDb) ->
?_assert(not lists:member(<<"qux">>, NewMemberNames))
].
+% infinite loop waiting for a db to be created, either this returns true
+% or we get a test timeout error
+wait_for_db_create(UserDbName) ->
+ case all_dbs_with_errors() of
+ {error, _, _ , _} ->
+ timer:sleep(?WAIT_FOR_DB_TIMEOUT),
+ wait_for_db_create(UserDbName);
+ {ok, _, _, AllDbs} ->
+ case lists:member(UserDbName, AllDbs) of
+ true -> true;
+ _Else ->
+ timer:sleep(?WAIT_FOR_DB_TIMEOUT),
+ wait_for_db_create(UserDbName)
+ end
+ end.
+
+% infinite loop waiting for a db to be deleted, either this returns true
+% or we get a test timeout error
+wait_for_db_delete(UserDbName) ->
+ case all_dbs_with_errors() of
+ {ok, 500, _ , _} ->
+ timer:sleep(?WAIT_FOR_DB_TIMEOUT),
+ wait_for_db_delete(UserDbName);
+ {ok, _, _, AllDbs} ->
+ case not lists:member(UserDbName, AllDbs) of
+ true -> true;
+ _Else ->
+ timer:sleep(?WAIT_FOR_DB_TIMEOUT),
+ wait_for_db_delete(UserDbName)
+ end
+ end.
+
+wait_for_security_create(Type, User, UserDbName) ->
+ {MemberProperties} = proplists:get_value(Type,
+ get_security(UserDbName)),
+ Names = proplists:get_value(<<"names">>, MemberProperties),
+
+ case lists:member(?l2b(User), Names) of
+ true -> true;
+ _Else ->
+ timer:sleep(?WAIT_FOR_DB_TIMEOUT),
+ wait_for_security_create(Type, User, UserDbName)
+ end.
+
+wait_for_security_delete(Type, User, UserDbName) ->
+ {MemberProperties} = proplists:get_value(Type,
+ get_security(UserDbName)),
+ Names = proplists:get_value(<<"names">>, MemberProperties),
+
+ case not lists:member(?l2b(User), Names) of
+ true -> true;
+ _Else ->
+ timer:sleep(?WAIT_FOR_DB_TIMEOUT),
+ wait_for_security_delete(Type, User, UserDbName)
+ end.
+
couch_peruser_test_() ->
{
"couch_peruser test",
diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl
index c67b37d19..8b7cd5cb1 100644
--- a/src/couch_replicator/src/couch_replicator.erl
+++ b/src/couch_replicator/src/couch_replicator.erl
@@ -184,7 +184,7 @@ active_doc(DbName, DocId) ->
Live = [node() | nodes()],
Nodes = lists:usort([N || #shard{node=N} <- Shards,
lists:member(N, Live)]),
- Owner = couch_replicator_clustering:owner(DbName, DocId, Nodes),
+ Owner = mem3:owner(DbName, DocId, Nodes),
case active_doc_rpc(DbName, DocId, [Owner]) of
{ok, DocInfo} ->
{ok, DocInfo};
diff --git a/src/couch_replicator/src/couch_replicator_clustering.erl b/src/couch_replicator/src/couch_replicator_clustering.erl
index ed01465d5..3d5229b9f 100644
--- a/src/couch_replicator/src/couch_replicator_clustering.erl
+++ b/src/couch_replicator/src/couch_replicator_clustering.erl
@@ -45,7 +45,6 @@
-export([
owner/2,
- owner/3,
is_stable/0,
link_cluster_event_listener/3
]).
@@ -96,13 +95,6 @@ owner(_DbName, _DocId) ->
node().
-% Direct calculation of node membership. This is the algorithm part. It
-% doesn't read the shard map, just picks owner based on a hash.
--spec owner(binary(), binary(), [node()]) -> node().
-owner(DbName, DocId, Nodes) ->
- hd(mem3_util:rotate_list({DbName, DocId}, lists:usort(Nodes))).
-
-
-spec is_stable() -> true | false.
is_stable() ->
gen_server:call(?MODULE, is_stable).
@@ -200,4 +192,4 @@ owner_int(ShardName, DocId) ->
Live = [node() | nodes()],
Shards = mem3:shards(DbName, DocId),
Nodes = [N || #shard{node=N} <- Shards, lists:member(N, Live)],
- owner(DbName, DocId, Nodes).
+ mem3:owner(DbName, DocId, Nodes).
diff --git a/src/couch_replicator/src/couch_replicator_doc_processor.erl b/src/couch_replicator/src/couch_replicator_doc_processor.erl
index 28eb17c16..d3c001f26 100644
--- a/src/couch_replicator/src/couch_replicator_doc_processor.erl
+++ b/src/couch_replicator/src/couch_replicator_doc_processor.erl
@@ -423,20 +423,20 @@ error_backoff(ErrCnt) ->
% ErrCnt is the exponent here. The reason 64 is used is to start at
% 64 (about a minute) max range. Then first backoff would be 30 sec
% on average. Then 1 minute and so on.
- random:uniform(?INITIAL_BACKOFF_EXPONENT bsl Exp).
+ couch_rand:uniform(?INITIAL_BACKOFF_EXPONENT bsl Exp).
-spec filter_backoff() -> seconds().
filter_backoff() ->
Total = ets:info(?MODULE, size),
- % This value scaled by the number of replications. If the are a lot of
- % them wait is longer, but not more than a day (?TS_DAY_SEC). If there
- % are just few, wait is shorter, starting at about 30 seconds. `2 *` is
- % used since the expected wait would then be 0.5 * Range so it is easier
- % to see the average wait. `1 +` is used because random:uniform only
+ % This value scaled by the number of replications. If the are a lot of them
+ % wait is longer, but not more than a day (?TS_DAY_SEC). If there are just
+ % few, wait is shorter, starting at about 30 seconds. `2 *` is used since
+ % the expected wait would then be 0.5 * Range so it is easier to see the
+ % average wait. `1 +` is used because couch_rand:uniform only
% accepts >= 1 values and crashes otherwise.
Range = 1 + min(2 * (Total / 10), ?TS_DAY_SEC),
- ?MIN_FILTER_DELAY_SEC + random:uniform(round(Range)).
+ ?MIN_FILTER_DELAY_SEC + couch_rand:uniform(round(Range)).
% Document removed from db -- clear ets table and remove all scheduled jobs
diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl
index 9d844b9e7..d22b85f89 100644
--- a/src/couch_replicator/src/couch_replicator_docs.erl
+++ b/src/couch_replicator/src/couch_replicator_docs.erl
@@ -316,7 +316,7 @@ update_rep_doc(RepDbName, RepDocId, KVs, Wait) when is_binary(RepDocId) ->
throw:conflict ->
Msg = "Conflict when updating replication doc `~s`. Retrying.",
couch_log:error(Msg, [RepDocId]),
- ok = timer:sleep(random:uniform(erlang:min(128, Wait)) * 100),
+ ok = timer:sleep(couch_rand:uniform(erlang:min(128, Wait)) * 100),
update_rep_doc(RepDbName, RepDocId, KVs, Wait * 2)
end;
diff --git a/src/couch_replicator/src/couch_replicator_scheduler_job.erl b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
index 08a24886c..e2d8fb6d6 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler_job.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
@@ -95,8 +95,6 @@ start_link(#rep{id = {BaseId, Ext}, source = Src, target = Tgt} = Rep) ->
case gen_server:start_link(ServerName, ?MODULE, Rep, []) of
{ok, Pid} ->
- couch_log:notice("starting new replication `~s` at ~p (`~s` -> `~s`)",
- [RepChildId, Pid, Source, Target]),
{ok, Pid};
{error, Reason} ->
couch_log:warning("failed to start replication `~s` (`~s` -> `~s`)",
@@ -112,7 +110,6 @@ init(InitArgs) ->
do_init(#rep{options = Options, id = {BaseId, Ext}, user_ctx=UserCtx} = Rep) ->
process_flag(trap_exit, true),
- random:seed(os:timestamp()),
timer:sleep(startup_jitter()),
#rep_state{
@@ -184,24 +181,7 @@ do_init(#rep{options = Options, id = {BaseId, Ext}, user_ctx=UserCtx} = Rep) ->
% cancel_replication/1) and then start the replication again, but this is
% unfortunately not immune to race conditions.
- couch_log:notice("Replication `~p` is using:~n"
- "~c~p worker processes~n"
- "~ca worker batch size of ~p~n"
- "~c~p HTTP connections~n"
- "~ca connection timeout of ~p milliseconds~n"
- "~c~p retries per request~n"
- "~csocket options are: ~s~s",
- [BaseId ++ Ext, $\t, NumWorkers, $\t, BatchSize, $\t,
- MaxConns, $\t, get_value(connection_timeout, Options),
- $\t, get_value(retries, Options),
- $\t, io_lib:format("~p", [get_value(socket_options, Options)]),
- case StartSeq of
- ?LOWEST_SEQ ->
- "";
- _ ->
- io_lib:format("~n~csource start sequence ~p", [$\t, StartSeq])
- end]),
-
+ log_replication_start(State),
couch_log:debug("Worker pids are: ~p", [Workers]),
doc_update_triggered(Rep),
@@ -363,10 +343,11 @@ handle_info(timeout, InitArgs) ->
{stop, {shutdown, max_backoff}, {error, InitArgs}};
Class:Error ->
ShutdownReason = {error, replication_start_error(Error)},
+ StackTop2 = lists:sublist(erlang:get_stacktrace(), 2),
% Shutdown state is a hack as it is not really the state of the
% gen_server (it failed to initialize, so it doesn't have one).
% Shutdown state is used to pass extra info about why start failed.
- ShutdownState = {error, Class, erlang:get_stacktrace(), InitArgs},
+ ShutdownState = {error, Class, StackTop2, InitArgs},
{stop, {shutdown, ShutdownReason}, ShutdownState}
end.
@@ -399,11 +380,20 @@ terminate({shutdown, max_backoff}, {error, InitArgs}) ->
couch_replicator_notifier:notify({error, RepId, max_backoff});
terminate({shutdown, {error, Error}}, {error, Class, Stack, InitArgs}) ->
- #rep{id=RepId} = InitArgs,
+ #rep{
+ id = {BaseId, Ext} = RepId,
+ source = Source0,
+ target = Target0,
+ doc_id = DocId,
+ db_name = DbName
+ } = InitArgs,
+ Source = couch_replicator_api_wrap:db_uri(Source0),
+ Target = couch_replicator_api_wrap:db_uri(Target0),
+ RepIdStr = BaseId ++ Ext,
+ Msg = "~p:~p: Replication ~s failed to start ~p -> ~p doc ~p:~p stack:~p",
+ couch_log:error(Msg, [Class, Error, RepIdStr, Source, Target, DbName,
+ DocId, Stack]),
couch_stats:increment_counter([couch_replicator, failed_starts]),
- CleanInitArgs = rep_strip_creds(InitArgs),
- couch_log:error("~p:~p: Replication failed to start for args ~p: ~p",
- [Class, Error, CleanInitArgs, Stack]),
couch_replicator_notifier:notify({error, RepId, Error});
terminate({shutdown, max_backoff}, State) ->
@@ -441,13 +431,43 @@ code_change(_OldVsn, #rep_state{}=State, _Extra) ->
format_status(_Opt, [_PDict, State]) ->
- [{data, [{"State", state_strip_creds(State)}]}].
+ #rep_state{
+ source = Source,
+ target = Target,
+ rep_details = RepDetails,
+ start_seq = StartSeq,
+ source_seq = SourceSeq,
+ committed_seq = CommitedSeq,
+ current_through_seq = ThroughSeq,
+ highest_seq_done = HighestSeqDone,
+ session_id = SessionId
+ } = state_strip_creds(State),
+ #rep{
+ id = RepId,
+ options = Options,
+ doc_id = DocId,
+ db_name = DbName
+ } = RepDetails,
+ [
+ {rep_id, RepId},
+ {source, couch_replicator_api_wrap:db_uri(Source)},
+ {target, couch_replicator_api_wrap:db_uri(Target)},
+ {db_name, DbName},
+ {doc_id, DocId},
+ {options, Options},
+ {session_id, SessionId},
+ {start_seq, StartSeq},
+ {source_seq, SourceSeq},
+ {committed_seq, CommitedSeq},
+ {current_through_seq, ThroughSeq},
+ {highest_seq_done, HighestSeqDone}
+ ].
startup_jitter() ->
Jitter = config:get_integer("replicator", "startup_jitter",
?STARTUP_JITTER_DEFAULT),
- random:uniform(erlang:max(1, Jitter)).
+ couch_rand:uniform(erlang:max(1, Jitter)).
headers_strip_creds([], Acc) ->
@@ -989,5 +1009,99 @@ replication_start_error({unauthorized, DbUri}) ->
{unauthorized, <<"unauthorized to access or create database ", DbUri/binary>>};
replication_start_error({db_not_found, DbUri}) ->
{db_not_found, <<"could not open ", DbUri/binary>>};
+replication_start_error({http_request_failed, _Method, Url0,
+ {error, {error, {conn_failed, {error, nxdomain}}}}}) ->
+ Url = ?l2b(couch_util:url_strip_password(Url0)),
+ {nxdomain, <<"could not resolve ", Url/binary>>};
+replication_start_error({http_request_failed, Method0, Url0,
+ {error, {code, Code}}}) when is_integer(Code) ->
+ Url = ?l2b(couch_util:url_strip_password(Url0)),
+ Method = ?l2b(Method0),
+ {http_error_code, Code, <<Method/binary, " ", Url/binary>>};
replication_start_error(Error) ->
Error.
+
+
+log_replication_start(#rep_state{rep_details = Rep} = RepState) ->
+ #rep{
+ id = {BaseId, Ext},
+ doc_id = DocId,
+ db_name = DbName,
+ options = Options
+ } = Rep,
+ Id = BaseId ++ Ext,
+ Workers = get_value(worker_processes, Options),
+ BatchSize = get_value(worker_batch_size, Options),
+ #rep_state{
+ source_name = Source, % credentials already stripped
+ target_name = Target, % credentials already stripped
+ session_id = Sid
+ } = RepState,
+ From = case DbName of
+ ShardName when is_binary(ShardName) ->
+ io_lib:format("from doc ~s:~s", [mem3:dbname(ShardName), DocId]);
+ _ ->
+ "from _replicate endpoint"
+ end,
+ Msg = "Starting replication ~s (~s -> ~s) ~s worker_procesess:~p"
+ " worker_batch_size:~p session_id:~s",
+ couch_log:notice(Msg, [Id, Source, Target, From, Workers, BatchSize, Sid]).
+
+
+-ifdef(TEST).
+
+-include_lib("eunit/include/eunit.hrl").
+
+
+replication_start_error_test() ->
+ ?assertEqual({unauthorized, <<"unauthorized to access or create database"
+ " http://x/y">>}, replication_start_error({unauthorized,
+ <<"http://x/y">>})),
+ ?assertEqual({db_not_found, <<"could not open http://x/y">>},
+ replication_start_error({db_not_found, <<"http://x/y">>})),
+ ?assertEqual({nxdomain,<<"could not resolve http://x/y">>},
+ replication_start_error({http_request_failed, "GET", "http://x/y",
+ {error, {error, {conn_failed, {error, nxdomain}}}}})),
+ ?assertEqual({http_error_code,503,<<"GET http://x/y">>},
+ replication_start_error({http_request_failed, "GET", "http://x/y",
+ {error, {code, 503}}})).
+
+
+scheduler_job_format_status_test() ->
+ Source = <<"http://u:p@h1/d1">>,
+ Target = <<"http://u:p@h2/d2">>,
+ Rep = #rep{
+ id = {"base", "+ext"},
+ source = couch_replicator_docs:parse_rep_db(Source, [], []),
+ target = couch_replicator_docs:parse_rep_db(Target, [], []),
+ options = [{create_target, true}],
+ doc_id = <<"mydoc">>,
+ db_name = <<"mydb">>
+ },
+ State = #rep_state{
+ rep_details = Rep,
+ source = Rep#rep.source,
+ target = Rep#rep.target,
+ session_id = <<"a">>,
+ start_seq = <<"1">>,
+ source_seq = <<"2">>,
+ committed_seq = <<"3">>,
+ current_through_seq = <<"4">>,
+ highest_seq_done = <<"5">>
+ },
+ Format = format_status(opts_ignored, [pdict, State]),
+ ?assertEqual("http://u:*****@h1/d1/", proplists:get_value(source, Format)),
+ ?assertEqual("http://u:*****@h2/d2/", proplists:get_value(target, Format)),
+ ?assertEqual({"base", "+ext"}, proplists:get_value(rep_id, Format)),
+ ?assertEqual([{create_target, true}], proplists:get_value(options, Format)),
+ ?assertEqual(<<"mydoc">>, proplists:get_value(doc_id, Format)),
+ ?assertEqual(<<"mydb">>, proplists:get_value(db_name, Format)),
+ ?assertEqual(<<"a">>, proplists:get_value(session_id, Format)),
+ ?assertEqual(<<"1">>, proplists:get_value(start_seq, Format)),
+ ?assertEqual(<<"2">>, proplists:get_value(source_seq, Format)),
+ ?assertEqual(<<"3">>, proplists:get_value(committed_seq, Format)),
+ ?assertEqual(<<"4">>, proplists:get_value(current_through_seq, Format)),
+ ?assertEqual(<<"5">>, proplists:get_value(highest_seq_done, Format)).
+
+
+-endif.
diff --git a/src/couch_replicator/src/couch_replicator_worker.erl b/src/couch_replicator/src/couch_replicator_worker.erl
index 344b8f286..db6b72b2e 100644
--- a/src/couch_replicator/src/couch_replicator_worker.erl
+++ b/src/couch_replicator/src/couch_replicator_worker.erl
@@ -31,6 +31,7 @@
-define(MAX_BULK_ATT_SIZE, 64 * 1024).
-define(MAX_BULK_ATTS_PER_DOC, 8).
-define(STATS_DELAY, 10000000). % 10 seconds (in microseconds)
+-define(MISSING_DOC_RETRY_MSEC, 2000).
-import(couch_replicator_utils, [
open_db/1,
@@ -73,7 +74,7 @@ start_link(Cp, #httpdb{} = Source, Target, ChangesManager, MaxConns) ->
start_link(Cp, Source, Target, ChangesManager, _MaxConns) ->
Pid = spawn_link(fun() ->
- erlang:put(last_stats_report, now()),
+ erlang:put(last_stats_report, os:timestamp()),
queue_fetch_loop(Source, Target, Cp, Cp, ChangesManager)
end),
{ok, Pid}.
@@ -85,7 +86,7 @@ init({Cp, Source, Target, ChangesManager, MaxConns}) ->
LoopPid = spawn_link(fun() ->
queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager)
end),
- erlang:put(last_stats_report, now()),
+ erlang:put(last_stats_report, os:timestamp()),
State = #state{
cp = Cp,
max_parallel_conns = MaxConns,
@@ -247,7 +248,7 @@ queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager) ->
end,
close_db(Target2),
ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity),
- erlang:put(last_stats_report, now()),
+ erlang:put(last_stats_report, os:timestamp()),
couch_log:debug("Worker reported completion of seq ~p", [ReportSeq]),
queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager)
end.
@@ -314,11 +315,17 @@ fetch_doc(Source, {Id, Revs, PAs}, DocHandler, Acc) ->
couch_log:error("Retrying fetch and update of document `~s` as it is "
"unexpectedly missing. Missing revisions are: ~s",
[Id, couch_doc:revs_to_strs(Revs)]),
+ WaitMSec = config:get_integer("replicator", "missing_doc_retry_msec",
+ ?MISSING_DOC_RETRY_MSEC),
+ timer:sleep(WaitMSec),
couch_replicator_api_wrap:open_doc_revs(Source, Id, Revs, [latest], DocHandler, Acc);
throw:{missing_stub, _} ->
couch_log:error("Retrying fetch and update of document `~s` due to out of "
"sync attachment stubs. Missing revisions are: ~s",
[Id, couch_doc:revs_to_strs(Revs)]),
+ WaitMSec = config:get_integer("replicator", "missing_doc_retry_msec",
+ ?MISSING_DOC_RETRY_MSEC),
+ timer:sleep(WaitMSec),
couch_replicator_api_wrap:open_doc_revs(Source, Id, Revs, [latest], DocHandler, Acc)
end.
@@ -392,7 +399,7 @@ spawn_writer(Target, #batch{docs = DocList, size = Size}) ->
after_full_flush(#state{stats = Stats, flush_waiter = Waiter} = State) ->
gen_server:reply(Waiter, {ok, Stats}),
- erlang:put(last_stats_report, now()),
+ erlang:put(last_stats_report, os:timestamp()),
State#state{
stats = couch_replicator_stats:new(),
flush_waiter = nil,
@@ -543,7 +550,7 @@ find_missing(DocInfos, Target) ->
maybe_report_stats(Cp, Stats) ->
- Now = now(),
+ Now = os:timestamp(),
case timer:now_diff(erlang:get(last_stats_report), Now) >= ?STATS_DELAY of
true ->
ok = gen_server:call(Cp, {add_stats, Stats}, infinity),
diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
index 810cbef63..e94934d04 100644
--- a/src/ddoc_cache/src/ddoc_cache_lru.erl
+++ b/src/ddoc_cache/src/ddoc_cache_lru.erl
@@ -39,6 +39,9 @@
-include("ddoc_cache.hrl").
+-define(OPENER, ddoc_cache_opener).
+
+
-record(st, {
pids, % pid -> key
dbs, % dbname -> docid -> key -> pid
@@ -143,11 +146,11 @@ handle_call(Msg, _From, St) ->
handle_cast({evict, DbName}, St) ->
- gen_server:abcast(mem3:nodes(), ?MODULE, {do_evict, DbName}),
+ gen_server:abcast(mem3:nodes(), ?OPENER, {do_evict, DbName}),
{noreply, St};
handle_cast({refresh, DbName, DDocIds}, St) ->
- gen_server:abcast(mem3:nodes(), ?MODULE, {do_refresh, DbName, DDocIds}),
+ gen_server:abcast(mem3:nodes(), ?OPENER, {do_evict, DbName, DDocIds}),
{noreply, St};
handle_cast({do_evict, DbName}, St) ->
diff --git a/src/ddoc_cache/test/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
index 245511563..60605b9a5 100644
--- a/src/ddoc_cache/test/ddoc_cache_lru_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_lru_test.erl
@@ -28,7 +28,7 @@ recover(<<"pause", _/binary>>) ->
{ok, paused};
recover(<<"big", _/binary>>) ->
- {ok, [random:uniform() || _ <- lists:seq(1, 8192)]};
+ {ok, [couch_rand:uniform() || _ <- lists:seq(1, 8192)]};
recover(DbName) ->
{ok, DbName}.
diff --git a/src/ddoc_cache/test/ddoc_cache_refresh_test.erl b/src/ddoc_cache/test/ddoc_cache_refresh_test.erl
index f1459870d..261c158c7 100644
--- a/src/ddoc_cache/test/ddoc_cache_refresh_test.erl
+++ b/src/ddoc_cache/test/ddoc_cache_refresh_test.erl
@@ -158,7 +158,7 @@ check_upgrade_clause({DbName, _}) ->
rand_string() ->
- Bin = crypto:rand_bytes(8),
+ Bin = crypto:strong_rand_bytes(8),
to_hex(Bin, []).
@@ -171,4 +171,4 @@ to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
hexdig(C) when C >= 0, C =< 9 ->
C + $0;
hexdig(C) when C >= 10, C =< 15 ->
- C + $A - 10. \ No newline at end of file
+ C + $A - 10.
diff --git a/src/fabric/rebar.config b/src/fabric/rebar.config
index ccfb9b435..df35ac639 100644
--- a/src/fabric/rebar.config
+++ b/src/fabric/rebar.config
@@ -11,5 +11,5 @@
% the License.
{deps, [
- {meck, ".*", {git, "https://github.com/apache/couchdb-meck.git", {tag, "0.8.2"}}}
+ {meck, ".*", {git, "https://github.com/apache/couchdb-meck.git", {tag, "0.8.8"}}}
]}.
diff --git a/src/fabric/src/fabric_db_create.erl b/src/fabric/src/fabric_db_create.erl
index a7f4ed9d6..d793f4f13 100644
--- a/src/fabric/src/fabric_db_create.erl
+++ b/src/fabric/src/fabric_db_create.erl
@@ -56,7 +56,7 @@ validate_dbname(DbName, Options) ->
end.
generate_shard_map(DbName, Options) ->
- {MegaSecs, Secs, _} = now(),
+ {MegaSecs, Secs, _} = os:timestamp(),
Suffix = "." ++ integer_to_list(MegaSecs*1000000 + Secs),
Shards = mem3:choose_shards(DbName, [{shard_suffix,Suffix} | Options]),
case mem3_util:open_db_doc(DbName) of
diff --git a/src/fabric/src/fabric_util.erl b/src/fabric/src/fabric_util.erl
index bf3f023db..49f4c8913 100644
--- a/src/fabric/src/fabric_util.erl
+++ b/src/fabric/src/fabric_util.erl
@@ -203,8 +203,6 @@ get_shard([#shard{node = Node, name = Name} | Rest], Opts, Timeout, Factor) ->
rexi_monitor:stop(Mon)
end.
-error_info({{<<"reduce_overflow_error">>, _} = Error, _Stack}) ->
- Error;
error_info({{timeout, _} = Error, _Stack}) ->
Error;
error_info({{Error, Reason}, Stack}) ->
diff --git a/src/fabric/src/fabric_view.erl b/src/fabric/src/fabric_view.erl
index 45262e4eb..dd0fcfd8b 100644
--- a/src/fabric/src/fabric_view.erl
+++ b/src/fabric/src/fabric_view.erl
@@ -258,6 +258,8 @@ find_next_key([], _, _, _) ->
find_next_key([Key|Rest], _, _, _) ->
{Key, Rest}.
+transform_row(#view_row{value={[{reduce_overflow_error, Msg}]}}) ->
+ {row, [{key,null}, {id,error}, {value,reduce_overflow_error}, {reason,Msg}]};
transform_row(#view_row{key=Key, id=reduced, value=Value}) ->
{row, [{key,Key}, {value,Value}]};
transform_row(#view_row{key=Key, id=undefined}) ->
diff --git a/src/mango/src/mango_cursor.erl b/src/mango/src/mango_cursor.erl
index e0792b737..98b2d52bd 100644
--- a/src/mango/src/mango_cursor.erl
+++ b/src/mango/src/mango_cursor.erl
@@ -46,7 +46,7 @@
create(Db, Selector0, Opts) ->
Selector = mango_selector:normalize(Selector0),
- UsableIndexes = mango_idx:get_usable_indexes(Db, Selector0, Opts),
+ UsableIndexes = mango_idx:get_usable_indexes(Db, Selector, Opts),
{use_index, IndexSpecified} = proplists:lookup(use_index, Opts),
case {length(UsableIndexes), length(IndexSpecified)} of
diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl
index 31e198fca..3fcec07be 100644
--- a/src/mango/src/mango_cursor_view.erl
+++ b/src/mango/src/mango_cursor_view.erl
@@ -29,7 +29,7 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-include("mango_cursor.hrl").
-
+-include("mango_idx_view.hrl").
create(Db, Indexes, Selector, Opts) ->
FieldRanges = mango_idx_view:field_ranges(Selector),
@@ -61,18 +61,37 @@ explain(Cursor) ->
BaseArgs = base_args(Cursor),
Args = apply_opts(Opts, BaseArgs),
+
[{mrargs, {[
{include_docs, Args#mrargs.include_docs},
{view_type, Args#mrargs.view_type},
{reduce, Args#mrargs.reduce},
{start_key, Args#mrargs.start_key},
- {end_key, Args#mrargs.end_key},
+ {end_key, maybe_replace_max_json(Args#mrargs.end_key)},
{direction, Args#mrargs.direction},
{stable, Args#mrargs.stable},
{update, Args#mrargs.update}
]}}].
+% replace internal values that cannot
+% be represented as a valid UTF-8 string
+% with a token for JSON serialization
+maybe_replace_max_json([]) ->
+ [];
+
+maybe_replace_max_json(?MAX_STR) ->
+ <<"<MAX>">>;
+
+maybe_replace_max_json([H | T] = EndKey) when is_list(EndKey) ->
+ H1 = if H == ?MAX_JSON_OBJ -> <<"<MAX>">>;
+ true -> H
+ end,
+ [H1 | maybe_replace_max_json(T)];
+
+maybe_replace_max_json(EndKey) ->
+ EndKey.
+
base_args(#cursor{index = Idx} = Cursor) ->
#mrargs{
view_type = map,
@@ -202,10 +221,7 @@ handle_message({row, Props}, Cursor) ->
true ->
Cursor2 = update_bookmark_keys(Cursor1, Props),
FinalDoc = mango_fields:extract(Doc, Cursor2#cursor.fields),
- Cursor3 = Cursor2#cursor {
- execution_stats = mango_execution_stats:incr_results_returned(Cursor2#cursor.execution_stats)
- },
- handle_doc(Cursor3, FinalDoc);
+ handle_doc(Cursor2, FinalDoc);
false ->
{ok, Cursor1}
end;
@@ -230,13 +246,14 @@ handle_all_docs_message(Message, Cursor) ->
handle_doc(#cursor{skip = S} = C, _) when S > 0 ->
{ok, C#cursor{skip = S - 1}};
-handle_doc(#cursor{limit = L} = C, Doc) when L > 0 ->
+handle_doc(#cursor{limit = L, execution_stats = Stats} = C, Doc) when L > 0 ->
UserFun = C#cursor.user_fun,
UserAcc = C#cursor.user_acc,
{Go, NewAcc} = UserFun({row, Doc}, UserAcc),
{Go, C#cursor{
user_acc = NewAcc,
- limit = L - 1
+ limit = L - 1,
+ execution_stats = mango_execution_stats:incr_results_returned(Stats)
}};
handle_doc(C, _Doc) ->
{stop, C}.
diff --git a/src/mango/src/mango_error.erl b/src/mango/src/mango_error.erl
index 4f8ae204d..4c55ef3f6 100644
--- a/src/mango/src/mango_error.erl
+++ b/src/mango/src/mango_error.erl
@@ -43,7 +43,7 @@ info(mango_cursor, {no_usable_index, selector_unsupported}) ->
{
400,
<<"no_usable_index">>,
- <<"There is no index available for this selector.">>
+ <<"The index specified with \"use_index\" is not usable for the query.">>
};
info(mango_json_bookmark, {invalid_bookmark, BadBookmark}) ->
diff --git a/src/mango/src/mango_execution_stats.erl b/src/mango/src/mango_execution_stats.erl
index 95b9038a8..afdb417b7 100644
--- a/src/mango/src/mango_execution_stats.erl
+++ b/src/mango/src/mango_execution_stats.erl
@@ -64,12 +64,12 @@ incr_results_returned(Stats) ->
log_start(Stats) ->
Stats#execution_stats {
- executionStartTime = now()
+ executionStartTime = os:timestamp()
}.
log_end(Stats) ->
- End = now(),
+ End = os:timestamp(),
Diff = timer:now_diff(End, Stats#execution_stats.executionStartTime) / 1000,
Stats#execution_stats {
executionTimeMs = Diff
@@ -86,4 +86,4 @@ maybe_add_stats(Opts, UserFun, Stats, UserAcc) ->
FinalUserAcc;
_ ->
UserAcc
- end. \ No newline at end of file
+ end.
diff --git a/src/mango/src/mango_idx.erl b/src/mango/src/mango_idx.erl
index c5f870d5b..4dd2e180d 100644
--- a/src/mango/src/mango_idx.erl
+++ b/src/mango/src/mango_idx.erl
@@ -20,7 +20,6 @@
-export([
list/1,
recover/1,
- for_sort/2,
new/2,
validate_new/2,
@@ -36,7 +35,7 @@
def/1,
opts/1,
columns/1,
- is_usable/2,
+ is_usable/3,
start_key/2,
end_key/2,
cursor_mod/1,
@@ -57,9 +56,8 @@ list(Db) ->
{ok, Indexes} = ddoc_cache:open(db_to_name(Db), ?MODULE),
Indexes.
-get_usable_indexes(Db, Selector0, Opts) ->
- Selector = mango_selector:normalize(Selector0),
+get_usable_indexes(Db, Selector, Opts) ->
ExistingIndexes = mango_idx:list(Db),
if ExistingIndexes /= [] -> ok; true ->
?MANGO_ERROR({no_usable_index, no_indexes_defined})
@@ -70,13 +68,17 @@ get_usable_indexes(Db, Selector0, Opts) ->
?MANGO_ERROR({no_usable_index, no_index_matching_name})
end,
- SortIndexes = mango_idx:for_sort(FilteredIndexes, Opts),
- if SortIndexes /= [] -> ok; true ->
- ?MANGO_ERROR({no_usable_index, missing_sort_index})
- end,
+ SortFields = get_sort_fields(Opts),
+ UsableFilter = fun(I) -> is_usable(I, Selector, SortFields) end,
+ UsableIndexes0 = lists:filter(UsableFilter, FilteredIndexes),
+
+ case maybe_filter_by_sort_fields(UsableIndexes0, SortFields) of
+ {ok, SortIndexes} ->
+ SortIndexes;
+ {error, no_usable_index} ->
+ ?MANGO_ERROR({no_usable_index, missing_sort_index})
+ end.
- UsableFilter = fun(I) -> mango_idx:is_usable(I, Selector) end,
- lists:filter(UsableFilter, SortIndexes).
recover(Db) ->
{ok, DDocs0} = mango_util:open_ddocs(Db),
@@ -93,33 +95,38 @@ recover(Db) ->
end, DDocs)}.
-for_sort(Indexes, Opts) ->
- % If a sort was specified we have to find an index that
- % can satisfy the request.
+get_sort_fields(Opts) ->
case lists:keyfind(sort, 1, Opts) of
- {sort, {SProps}} when is_list(SProps) ->
- for_sort_int(Indexes, {SProps});
+ {sort, Sort} ->
+ mango_sort:fields(Sort);
_ ->
- Indexes
+ []
end.
-for_sort_int(Indexes, Sort) ->
- Fields = mango_sort:fields(Sort),
+maybe_filter_by_sort_fields(Indexes, []) ->
+ {ok, Indexes};
+
+maybe_filter_by_sort_fields(Indexes, SortFields) ->
FilterFun = fun(Idx) ->
Cols = mango_idx:columns(Idx),
case {mango_idx:type(Idx), Cols} of
{_, all_fields} ->
true;
{<<"text">>, _} ->
- sets:is_subset(sets:from_list(Fields), sets:from_list(Cols));
+ sets:is_subset(sets:from_list(SortFields), sets:from_list(Cols));
{<<"json">>, _} ->
- lists:prefix(Fields, Cols);
+ lists:prefix(SortFields, Cols);
{<<"special">>, _} ->
- lists:prefix(Fields, Cols)
+ lists:prefix(SortFields, Cols)
end
end,
- lists:filter(FilterFun, Indexes).
+ case lists:filter(FilterFun, Indexes) of
+ [] ->
+ {error, no_usable_index};
+ FilteredIndexes ->
+ {ok, FilteredIndexes}
+ end.
new(Db, Opts) ->
@@ -250,9 +257,9 @@ columns(#idx{}=Idx) ->
Mod:columns(Idx).
-is_usable(#idx{}=Idx, Selector) ->
+is_usable(#idx{}=Idx, Selector, SortFields) ->
Mod = idx_mod(Idx),
- Mod:is_usable(Idx, Selector).
+ Mod:is_usable(Idx, Selector, SortFields).
start_key(#idx{}=Idx, Ranges) ->
diff --git a/src/mango/src/mango_idx_special.erl b/src/mango/src/mango_idx_special.erl
index a8f94002b..12da1cbe5 100644
--- a/src/mango/src/mango_idx_special.erl
+++ b/src/mango/src/mango_idx_special.erl
@@ -20,7 +20,7 @@
from_ddoc/1,
to_json/1,
columns/1,
- is_usable/2,
+ is_usable/3,
start_key/1,
end_key/1
]).
@@ -63,7 +63,7 @@ columns(#idx{def=all_docs}) ->
[<<"_id">>].
-is_usable(#idx{def=all_docs}, Selector) ->
+is_usable(#idx{def=all_docs}, Selector, _) ->
Fields = mango_idx_view:indexable_fields(Selector),
lists:member(<<"_id">>, Fields).
diff --git a/src/mango/src/mango_idx_text.erl b/src/mango/src/mango_idx_text.erl
index e00c241d2..e4ffc91db 100644
--- a/src/mango/src/mango_idx_text.erl
+++ b/src/mango/src/mango_idx_text.erl
@@ -22,7 +22,7 @@
from_ddoc/1,
to_json/1,
columns/1,
- is_usable/2,
+ is_usable/3,
get_default_field_options/1
]).
@@ -125,7 +125,7 @@ columns(Idx) ->
end.
-is_usable(Idx, Selector) ->
+is_usable(Idx, Selector, _) ->
case columns(Idx) of
all_fields ->
true;
diff --git a/src/mango/src/mango_idx_view.erl b/src/mango/src/mango_idx_view.erl
index 4cb039c4a..f1041bbaf 100644
--- a/src/mango/src/mango_idx_view.erl
+++ b/src/mango/src/mango_idx_view.erl
@@ -20,7 +20,7 @@
remove/2,
from_ddoc/1,
to_json/1,
- is_usable/2,
+ is_usable/3,
columns/1,
start_key/1,
end_key/1,
@@ -34,6 +34,7 @@
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-include("mango_idx.hrl").
+-include("mango_idx_view.hrl").
validate_new(#idx{}=Idx, _Db) ->
@@ -113,12 +114,17 @@ columns(Idx) ->
[Key || {Key, _} <- Fields].
-is_usable(Idx, Selector) ->
- % This index is usable if all of the columns are
+is_usable(Idx, Selector, SortFields) ->
+ % This index is usable if all of the columns are
% restricted by the selector such that they are required to exist
% and the selector is not a text search (so requires a text index)
RequiredFields = columns(Idx),
- mango_selector:has_required_fields(Selector, RequiredFields)
+
+ % sort fields are required to exist in the results so
+ % we don't need to check the selector for these
+ RequiredFields1 = ordsets:subtract(lists:usort(RequiredFields), lists:usort(SortFields)),
+
+ mango_selector:has_required_fields(Selector, RequiredFields1)
andalso not is_text_search(Selector).
@@ -158,11 +164,11 @@ start_key([{'$eq', Key, '$eq', Key} | Rest]) ->
end_key([]) ->
- [{[]}];
+ [?MAX_JSON_OBJ];
end_key([{_, _, '$lt', Key} | Rest]) ->
case mango_json:special(Key) of
true ->
- [{[]}];
+ [?MAX_JSON_OBJ];
false ->
[Key | end_key(Rest)]
end;
diff --git a/src/mango/src/mango_idx_view.hrl b/src/mango/src/mango_idx_view.hrl
new file mode 100644
index 000000000..0d213e56e
--- /dev/null
+++ b/src/mango/src/mango_idx_view.hrl
@@ -0,0 +1,13 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-define(MAX_JSON_OBJ, {<<255, 255, 255, 255>>}). \ No newline at end of file
diff --git a/src/mango/src/mango_selector.erl b/src/mango/src/mango_selector.erl
index fe3998683..4ff36945a 100644
--- a/src/mango/src/mango_selector.erl
+++ b/src/mango/src/mango_selector.erl
@@ -609,7 +609,7 @@ has_required_fields([{[{Field, Cond}]} | Rest], RequiredFields) ->
_ ->
has_required_fields(Rest, lists:delete(Field, RequiredFields))
end.
-
+
%%%%%%%% module tests below %%%%%%%%
diff --git a/src/mango/test/01-index-crud-test.py b/src/mango/test/01-index-crud-test.py
index 617bfd523..cf5b91865 100644
--- a/src/mango/test/01-index-crud-test.py
+++ b/src/mango/test/01-index-crud-test.py
@@ -36,7 +36,7 @@ class IndexCrudTests(mango.DbPerClass):
try:
self.db.create_index(fields)
except Exception as e:
- assert e.response.status_code == 400
+ self.assertEqual(e.response.status_code, 400)
else:
raise AssertionError("bad create index")
@@ -55,7 +55,7 @@ class IndexCrudTests(mango.DbPerClass):
try:
self.db.create_index(["foo"], idx_type=bt)
except Exception as e:
- assert e.response.status_code == 400, (bt, e.response.status_code)
+ self.assertEqual(e.response.status_code, 400, (bt, e.response.status_code))
else:
raise AssertionError("bad create index")
@@ -71,13 +71,13 @@ class IndexCrudTests(mango.DbPerClass):
try:
self.db.create_index(["foo"], name=bn)
except Exception as e:
- assert e.response.status_code == 400
+ self.assertEqual(e.response.status_code, 400)
else:
raise AssertionError("bad create index")
try:
self.db.create_index(["foo"], ddoc=bn)
except Exception as e:
- assert e.response.status_code == 400
+ self.assertEqual(e.response.status_code, 400)
else:
raise AssertionError("bad create index")
@@ -88,7 +88,7 @@ class IndexCrudTests(mango.DbPerClass):
for idx in self.db.list_indexes():
if idx["name"] != "idx_01":
continue
- assert idx["def"]["fields"] == [{"foo": "asc"}, {"bar": "asc"}]
+ self.assertEqual(idx["def"]["fields"], [{"foo": "asc"}, {"bar": "asc"}])
return
raise AssertionError("index not created")
@@ -106,7 +106,7 @@ class IndexCrudTests(mango.DbPerClass):
for idx in self.db.list_indexes():
if idx["name"] != "idx_02":
continue
- assert idx["def"]["fields"] == [{"baz": "asc"}, {"foo": "asc"}]
+ self.assertEqual(idx["def"]["fields"], [{"baz": "asc"}, {"foo": "asc"}])
return
raise AssertionError("index not created")
@@ -118,9 +118,9 @@ class IndexCrudTests(mango.DbPerClass):
continue
ddocid = idx["ddoc"]
doc = self.db.open_doc(ddocid)
- assert doc["_id"] == ddocid
+ self.assertEqual(doc["_id"], ddocid)
info = self.db.ddoc_info(ddocid)
- assert info["name"] == ddocid.split('_design/')[-1]
+ self.assertEqual(info["name"], ddocid.split('_design/')[-1])
def test_delete_idx_escaped(self):
self.db.create_index(["foo", "bar"], name="idx_01")
@@ -130,10 +130,10 @@ class IndexCrudTests(mango.DbPerClass):
for idx in self.db.list_indexes():
if idx["name"] != "idx_del_1":
continue
- assert idx["def"]["fields"] == [{"bing": "asc"}]
+ self.assertEqual(idx["def"]["fields"], [{"bing": "asc"}])
self.db.delete_index(idx["ddoc"].replace("/", "%2F"), idx["name"])
post_indexes = self.db.list_indexes()
- assert pre_indexes == post_indexes
+ self.assertEqual(pre_indexes, post_indexes)
def test_delete_idx_unescaped(self):
pre_indexes = self.db.list_indexes()
@@ -142,10 +142,10 @@ class IndexCrudTests(mango.DbPerClass):
for idx in self.db.list_indexes():
if idx["name"] != "idx_del_2":
continue
- assert idx["def"]["fields"] == [{"bing": "asc"}]
+ self.assertEqual(idx["def"]["fields"], [{"bing": "asc"}])
self.db.delete_index(idx["ddoc"], idx["name"])
post_indexes = self.db.list_indexes()
- assert pre_indexes == post_indexes
+ self.assertEqual(pre_indexes, post_indexes)
def test_delete_idx_no_design(self):
pre_indexes = self.db.list_indexes()
@@ -154,10 +154,10 @@ class IndexCrudTests(mango.DbPerClass):
for idx in self.db.list_indexes():
if idx["name"] != "idx_del_3":
continue
- assert idx["def"]["fields"] == [{"bing": "asc"}]
+ self.assertEqual(idx["def"]["fields"], [{"bing": "asc"}])
self.db.delete_index(idx["ddoc"].split("/")[-1], idx["name"])
post_indexes = self.db.list_indexes()
- assert pre_indexes == post_indexes
+ self.assertEqual(pre_indexes, post_indexes)
def test_bulk_delete(self):
fields = ["field1"]
@@ -182,8 +182,8 @@ class IndexCrudTests(mango.DbPerClass):
ret = self.db.bulk_delete(docids)
- assert ret["fail"][0]["id"] == "_design/this_is_not_an_index_name"
- assert len(ret["success"]) == 3
+ self.assertEqual(ret["fail"][0]["id"], "_design/this_is_not_an_index_name")
+ self.assertEqual(len(ret["success"]), 3)
for idx in self.db.list_indexes():
assert idx["type"] != "json"
@@ -197,18 +197,18 @@ class IndexCrudTests(mango.DbPerClass):
for idx in self.db.list_indexes():
if idx["name"] != "idx_recreate":
continue
- assert idx["def"]["fields"] == [{"bing": "asc"}]
+ self.assertEqual(idx["def"]["fields"], [{"bing": "asc"}])
self.db.delete_index(idx["ddoc"], idx["name"])
break
post_indexes = self.db.list_indexes()
- assert pre_indexes == post_indexes
+ self.assertEqual(pre_indexes, post_indexes)
- def test_delete_misisng(self):
+ def test_delete_missing(self):
# Missing design doc
try:
self.db.delete_index("this_is_not_a_design_doc_id", "foo")
except Exception as e:
- assert e.response.status_code == 404
+ self.assertEqual(e.response.status_code, 404)
else:
raise AssertionError("bad index delete")
@@ -221,7 +221,7 @@ class IndexCrudTests(mango.DbPerClass):
try:
self.db.delete_index(ddocid, "this_is_not_an_index_name")
except Exception as e:
- assert e.response.status_code == 404
+ self.assertEqual(e.response.status_code, 404)
else:
raise AssertionError("bad index delete")
@@ -229,11 +229,54 @@ class IndexCrudTests(mango.DbPerClass):
try:
self.db.delete_index(ddocid, idx["name"], idx_type="not_a_real_type")
except Exception as e:
- assert e.response.status_code == 404
+ self.assertEqual(e.response.status_code, 404)
else:
raise AssertionError("bad index delete")
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
+ def test_limit_skip_index(self):
+ fields = ["field1"]
+ ret = self.db.create_index(fields, name="idx_01")
+ assert ret is True
+
+ fields = ["field2"]
+ ret = self.db.create_index(fields, name="idx_02")
+ assert ret is True
+
+ fields = ["field3"]
+ ret = self.db.create_index(fields, name="idx_03")
+ assert ret is True
+
+ fields = ["field4"]
+ ret = self.db.create_index(fields, name="idx_04")
+ assert ret is True
+
+ fields = ["field5"]
+ ret = self.db.create_index(fields, name="idx_05")
+ assert ret is True
+
+ self.assertEqual(len(self.db.list_indexes(limit=2)), 2)
+ self.assertEqual(len(self.db.list_indexes(limit=5,skip=4)), 2)
+ self.assertEqual(len(self.db.list_indexes(skip=5)), 1)
+ self.assertEqual(len(self.db.list_indexes(skip=6)), 0)
+ self.assertEqual(len(self.db.list_indexes(skip=100)), 0)
+ self.assertEqual(len(self.db.list_indexes(limit=10000000)), 6)
+
+ try:
+ self.db.list_indexes(skip=-1)
+ except Exception as e:
+ self.assertEqual(e.response.status_code, 500)
+
+ try:
+ self.db.list_indexes(limit=0)
+ except Exception as e:
+ self.assertEqual(e.response.status_code, 500)
+
+
+@unittest.skipUnless(mango.has_text_service(), "requires text service")
+class IndexCrudTextTests(mango.DbPerClass):
+ def setUp(self):
+ self.db.recreate()
+
def test_create_text_idx(self):
fields = [
{"name":"stringidx", "type" : "string"},
@@ -244,14 +287,13 @@ class IndexCrudTests(mango.DbPerClass):
for idx in self.db.list_indexes():
if idx["name"] != "text_idx_01":
continue
- assert idx["def"]["fields"] == [
+ self.assertEqual(idx["def"]["fields"], [
{"stringidx": "string"},
{"booleanidx": "boolean"}
- ]
+ ])
return
raise AssertionError("index not created")
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
def test_create_bad_text_idx(self):
bad_fields = [
True,
@@ -270,10 +312,10 @@ class IndexCrudTests(mango.DbPerClass):
try:
self.db.create_text_index(fields=fields)
except Exception as e:
- assert e.response.status_code == 400
+ self.assertEqual(e.response.status_code, 400)
else:
raise AssertionError("bad create text index")
-
+
def test_limit_skip_index(self):
fields = ["field1"]
ret = self.db.create_index(fields, name="idx_01")
@@ -291,28 +333,26 @@ class IndexCrudTests(mango.DbPerClass):
ret = self.db.create_index(fields, name="idx_04")
assert ret is True
- fields = ["field5"]
- ret = self.db.create_index(fields, name="idx_05")
+ fields = [
+ {"name":"stringidx", "type" : "string"},
+ {"name":"booleanidx", "type": "boolean"}
+ ]
+ ret = self.db.create_text_index(fields=fields, name="idx_05")
assert ret is True
- skip_add = 0
-
- if mango.has_text_service():
- skip_add = 1
-
- assert len(self.db.list_indexes(limit=2)) == 2
- assert len(self.db.list_indexes(limit=5,skip=4)) == 2 + skip_add
- assert len(self.db.list_indexes(skip=5)) == 1 + skip_add
- assert len(self.db.list_indexes(skip=6)) == 0 + skip_add
- assert len(self.db.list_indexes(skip=100)) == 0
- assert len(self.db.list_indexes(limit=10000000)) == 6 + skip_add
+ self.assertEqual(len(self.db.list_indexes(limit=2)), 2)
+ self.assertEqual(len(self.db.list_indexes(limit=5,skip=4)), 2)
+ self.assertEqual(len(self.db.list_indexes(skip=5)), 1)
+ self.assertEqual(len(self.db.list_indexes(skip=6)), 0)
+ self.assertEqual(len(self.db.list_indexes(skip=100)), 0)
+ self.assertEqual(len(self.db.list_indexes(limit=10000000)), 6)
try:
self.db.list_indexes(skip=-1)
except Exception as e:
- assert e.response.status_code == 500
+ self.assertEqual(e.response.status_code, 500)
try:
self.db.list_indexes(limit=0)
except Exception as e:
- assert e.response.status_code == 500
+ self.assertEqual(e.response.status_code, 500)
diff --git a/src/mango/test/02-basic-find-test.py b/src/mango/test/02-basic-find-test.py
index a8725ffa8..82554a112 100644
--- a/src/mango/test/02-basic-find-test.py
+++ b/src/mango/test/02-basic-find-test.py
@@ -222,6 +222,52 @@ class BasicFindTests(mango.UserDocsTests):
docs2 = list(reversed(sorted(docs1, key=lambda d: d["age"])))
assert docs1 is not docs2 and docs1 == docs2
+ def test_sort_desc_complex(self):
+ docs = self.db.find({
+ "company": {"$lt": "M"},
+ "$or": [
+ {"company": "Dreamia"},
+ {"manager": True}
+ ]
+ }, sort=[{"company":"desc"}, {"manager":"desc"}])
+
+ companies_returned = list(d["company"] for d in docs)
+ desc_companies = sorted(companies_returned, reverse=True)
+ self.assertEqual(desc_companies, companies_returned)
+
+ def test_sort_with_primary_sort_not_in_selector(self):
+ try:
+ docs = self.db.find({
+ "name.last": {"$lt": "M"}
+ }, sort=[{"name.first":"desc"}])
+ except Exception as e:
+ self.assertEqual(e.response.status_code, 400)
+ resp = e.response.json()
+ self.assertEqual(resp["error"], "no_usable_index")
+ else:
+ raise AssertionError("expected find error")
+
+ def test_sort_exists_true(self):
+ docs1 = self.db.find({"age": {"$gt": 0, "$exists": True}}, sort=[{"age":"asc"}])
+ docs2 = list(sorted(docs1, key=lambda d: d["age"]))
+ assert docs1 is not docs2 and docs1 == docs2
+
+ def test_sort_desc_complex_error(self):
+ try:
+ self.db.find({
+ "company": {"$lt": "M"},
+ "$or": [
+ {"company": "Dreamia"},
+ {"manager": True}
+ ]
+ }, sort=[{"company":"desc"}])
+ except Exception as e:
+ self.assertEqual(e.response.status_code, 400)
+ resp = e.response.json()
+ self.assertEqual(resp["error"], "no_usable_index")
+ else:
+ raise AssertionError("expected find error")
+
def test_fields(self):
selector = {"age": {"$gt": 0}}
docs = self.db.find(selector, fields=["user_id", "location.address"])
@@ -273,5 +319,5 @@ class BasicFindTests(mango.UserDocsTests):
assert explain["mrargs"]["update"] == True
assert explain["mrargs"]["reduce"] == False
assert explain["mrargs"]["start_key"] == [0]
- assert explain["mrargs"]["end_key"] == [{}]
+ assert explain["mrargs"]["end_key"] == ["<MAX>"]
assert explain["mrargs"]["include_docs"] == True
diff --git a/src/mango/test/03-operator-test.py b/src/mango/test/03-operator-test.py
index 1af39f205..239cc7d41 100644
--- a/src/mango/test/03-operator-test.py
+++ b/src/mango/test/03-operator-test.py
@@ -174,6 +174,9 @@ class OperatorTests:
for d in docs:
self.assertIn("twitter", d)
+ # ideally this work be consistent across index types but, alas, it is not
+ @unittest.skipUnless(not mango.has_text_service(),
+ "text indexes do not support range queries across type boundaries")
def test_lt_includes_null_but_not_missing(self):
docs = self.db.find({
"twitter": {"$lt": 1}
@@ -183,6 +186,8 @@ class OperatorTests:
for d in docs:
self.assertEqual(d["twitter"], None)
+ @unittest.skipUnless(not mango.has_text_service(),
+ "text indexes do not support range queries across type boundaries")
def test_lte_includes_null_but_not_missing(self):
docs = self.db.find({
"twitter": {"$lt": 1}
diff --git a/src/mango/test/05-index-selection-test.py b/src/mango/test/05-index-selection-test.py
index 1cc210382..05571a7e8 100644
--- a/src/mango/test/05-index-selection-test.py
+++ b/src/mango/test/05-index-selection-test.py
@@ -15,12 +15,7 @@ import user_docs
import unittest
-class IndexSelectionTests(mango.UserDocsTests):
- @classmethod
- def setUpClass(klass):
- super(IndexSelectionTests, klass).setUpClass()
- if mango.has_text_service():
- user_docs.add_text_indexes(klass.db, {})
+class IndexSelectionTests:
def test_basic(self):
resp = self.db.find({"age": 123}, explain=True)
@@ -33,30 +28,6 @@ class IndexSelectionTests(mango.UserDocsTests):
}, explain=True)
self.assertEqual(resp["index"]["type"], "json")
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
- def test_with_text(self):
- resp = self.db.find({
- "$text" : "Stephanie",
- "name.first": "Stephanie",
- "name.last": "This doesn't have to match anything."
- }, explain=True)
- self.assertEqual(resp["index"]["type"], "text")
-
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
- def test_no_view_index(self):
- resp = self.db.find({"name.first": "Ohai!"}, explain=True)
- self.assertEqual(resp["index"]["type"], "text")
-
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
- def test_with_or(self):
- resp = self.db.find({
- "$or": [
- {"name.first": "Stephanie"},
- {"name.last": "This doesn't have to match anything."}
- ]
- }, explain=True)
- self.assertEqual(resp["index"]["type"], "text")
-
def test_use_most_columns(self):
# ddoc id for the age index
ddocid = "_design/ad3d537c03cd7c6a43cf8dff66ef70ea54c2b40f"
@@ -92,36 +63,6 @@ class IndexSelectionTests(mango.UserDocsTests):
else:
raise AssertionError("bad find")
- def test_uses_all_docs_when_fields_do_not_match_selector(self):
- # index exists on ["company", "manager"] but not ["company"]
- # so we should fall back to all docs (so we include docs
- # with no "manager" field)
- selector = {
- "company": "Pharmex"
- }
- docs = self.db.find(selector)
- self.assertEqual(len(docs), 1)
- self.assertEqual(docs[0]["company"], "Pharmex")
- self.assertNotIn("manager", docs[0])
-
- resp_explain = self.db.find(selector, explain=True)
- self.assertEqual(resp_explain["index"]["type"], "special")
-
- def test_uses_all_docs_when_selector_doesnt_require_fields_to_exist(self):
- # as in test above, use a selector that doesn't overlap with the index
- # due to an explicit exists clause
- selector = {
- "company": "Pharmex",
- "manager": {"$exists": False}
- }
- docs = self.db.find(selector)
- self.assertEqual(len(docs), 1)
- self.assertEqual(docs[0]["company"], "Pharmex")
- self.assertNotIn("manager", docs[0])
-
- resp_explain = self.db.find(selector, explain=True)
- self.assertEqual(resp_explain["index"]["type"], "special")
-
def test_uses_index_when_no_range_or_equals(self):
# index on ["manager"] should be valid because
# selector requires "manager" to exist. The
@@ -200,7 +141,77 @@ class IndexSelectionTests(mango.UserDocsTests):
with self.assertRaises(KeyError):
self.db.save_doc(design_doc)
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
+
+class JSONIndexSelectionTests(mango.UserDocsTests, IndexSelectionTests):
+
+ @classmethod
+ def setUpClass(klass):
+ super(JSONIndexSelectionTests, klass).setUpClass()
+
+ def test_uses_all_docs_when_fields_do_not_match_selector(self):
+ # index exists on ["company", "manager"] but not ["company"]
+ # so we should fall back to all docs (so we include docs
+ # with no "manager" field)
+ selector = {
+ "company": "Pharmex"
+ }
+ docs = self.db.find(selector)
+ self.assertEqual(len(docs), 1)
+ self.assertEqual(docs[0]["company"], "Pharmex")
+ self.assertNotIn("manager", docs[0])
+
+ resp_explain = self.db.find(selector, explain=True)
+
+ self.assertEqual(resp_explain["index"]["type"], "special")
+
+ def test_uses_all_docs_when_selector_doesnt_require_fields_to_exist(self):
+ # as in test above, use a selector that doesn't overlap with the index
+ # due to an explicit exists clause
+ selector = {
+ "company": "Pharmex",
+ "manager": {"$exists": False}
+ }
+ docs = self.db.find(selector)
+ self.assertEqual(len(docs), 1)
+ self.assertEqual(docs[0]["company"], "Pharmex")
+ self.assertNotIn("manager", docs[0])
+
+ resp_explain = self.db.find(selector, explain=True)
+ self.assertEqual(resp_explain["index"]["type"], "special")
+
+
+@unittest.skipUnless(mango.has_text_service(), "requires text service")
+class TextIndexSelectionTests(mango.UserDocsTests, IndexSelectionTests):
+
+ @classmethod
+ def setUpClass(klass):
+ super(TextIndexSelectionTests, klass).setUpClass()
+
+ def setUp(self):
+ self.db.recreate()
+ user_docs.add_text_indexes(self.db, {})
+
+ def test_with_text(self):
+ resp = self.db.find({
+ "$text" : "Stephanie",
+ "name.first": "Stephanie",
+ "name.last": "This doesn't have to match anything."
+ }, explain=True)
+ self.assertEqual(resp["index"]["type"], "text")
+
+ def test_no_view_index(self):
+ resp = self.db.find({"name.first": "Ohai!"}, explain=True)
+ self.assertEqual(resp["index"]["type"], "text")
+
+ def test_with_or(self):
+ resp = self.db.find({
+ "$or": [
+ {"name.first": "Stephanie"},
+ {"name.last": "This doesn't have to match anything."}
+ ]
+ }, explain=True)
+ self.assertEqual(resp["index"]["type"], "text")
+
def test_manual_bad_text_idx(self):
design_doc = {
"_id": "_design/bad_text_index",
@@ -243,8 +254,8 @@ class MultiTextIndexSelectionTests(mango.UserDocsTests):
klass.db.create_text_index(ddoc="foo", analyzer="keyword")
klass.db.create_text_index(ddoc="bar", analyzer="email")
- def test_view_ok_with_multi_text(self):
- resp = self.db.find({"name.last": "A last name"}, explain=True)
+ def test_fallback_to_json_with_multi_text(self):
+ resp = self.db.find({"name.first": "A first name", "name.last": "A last name"}, explain=True)
self.assertEqual(resp["index"]["type"], "json")
def test_multi_text_index_is_error(self):
diff --git a/src/mango/test/09-text-sort-test.py b/src/mango/test/09-text-sort-test.py
index 1c5557227..a1a644c79 100644
--- a/src/mango/test/09-text-sort-test.py
+++ b/src/mango/test/09-text-sort-test.py
@@ -19,60 +19,60 @@ class SortTests(mango.UserDocsTextTests):
def test_number_sort(self):
q = {"age": {"$gt": 0}}
docs = self.db.find(q, sort=["age:number"])
- assert len(docs) == 15
- assert docs[0]["age"] == 22
+ self.assertEqual(len(docs), 15)
+ self.assertEqual(docs[0]["age"], 22)
def test_number_sort_desc(self):
q = {"age": {"$gt": 0}}
docs = self.db.find(q, sort=[{"age": "desc"}])
- assert len(docs) == 15
- assert docs[0]["age"] == 79
+ self.assertEqual(len(docs), 15)
+ self.assertEqual(docs[0]["age"], 79)
q = {"manager": True}
docs = self.db.find(q, sort=[{"age:number": "desc"}])
- assert len(docs) == 11
- assert docs[0]["age"] == 79
+ self.assertEqual(len(docs), 10)
+ self.assertEqual(docs[0]["age"], 79)
def test_string_sort(self):
q = {"email": {"$gt": None}}
docs = self.db.find(q, sort=["email:string"])
- assert len(docs) == 15
- assert docs[0]["email"] == "abbottwatson@talkola.com"
+ self.assertEqual(len(docs), 15)
+ self.assertEqual(docs[0]["email"], "abbottwatson@talkola.com")
def test_notype_sort(self):
q = {"email": {"$gt": None}}
try:
self.db.find(q, sort=["email"])
except Exception as e:
- assert e.response.status_code == 400
+ self.assertEqual(e.response.status_code, 400)
else:
raise AssertionError("Should have thrown error for sort")
def test_array_sort(self):
q = {"favorites": {"$exists": True}}
docs = self.db.find(q, sort=["favorites.[]:string"])
- assert len(docs) == 15
- assert docs[0]["user_id"] == 8
+ self.assertEqual(len(docs), 15)
+ self.assertEqual(docs[0]["user_id"], 8)
def test_multi_sort(self):
q = {"name": {"$exists": True}}
docs = self.db.find(q, sort=["name.last:string", "age:number"])
- assert len(docs) == 15
- assert docs[0]["name"] == {"last":"Ewing","first":"Shelly"}
- assert docs[1]["age"] == 22
+ self.assertEqual(len(docs), 15)
+ self.assertEqual(docs[0]["name"], {"last":"Ewing","first":"Shelly"})
+ self.assertEqual(docs[1]["age"], 22)
def test_guess_type_sort(self):
q = {"$or": [{"age":{"$gt": 0}}, {"email": {"$gt": None}}]}
docs = self.db.find(q, sort=["age"])
- assert len(docs) == 15
- assert docs[0]["age"] == 22
+ self.assertEqual(len(docs), 15)
+ self.assertEqual(docs[0]["age"], 22)
def test_guess_dup_type_sort(self):
q = {"$and": [{"age":{"$gt": 0}}, {"email": {"$gt": None}},
{"age":{"$lte": 100}}]}
docs = self.db.find(q, sort=["age"])
- assert len(docs) == 15
- assert docs[0]["age"] == 22
+ self.assertEqual(len(docs), 15)
+ self.assertEqual(docs[0]["age"], 22)
def test_ambiguous_type_sort(self):
q = {"$or": [{"age":{"$gt": 0}}, {"email": {"$gt": None}},
@@ -80,7 +80,7 @@ class SortTests(mango.UserDocsTextTests):
try:
self.db.find(q, sort=["age"])
except Exception as e:
- assert e.response.status_code == 400
+ self.assertEqual(e.response.status_code, 400)
else:
raise AssertionError("Should have thrown error for sort")
@@ -88,14 +88,14 @@ class SortTests(mango.UserDocsTextTests):
q = {"$or": [{"age":{"$gt": 0}}, {"email": {"$gt": None}},
{"name.last": "Harvey"}]}
docs = self.db.find(q, sort=["name.last", "age"])
- assert len(docs) == 15
- assert docs[0]["name"] == {"last":"Ewing","first":"Shelly"}
- assert docs[1]["age"] == 22
+ self.assertEqual(len(docs), 15)
+ self.assertEqual(docs[0]["name"], {"last":"Ewing","first":"Shelly"})
+ self.assertEqual(docs[1]["age"], 22)
def test_guess_mix_sort(self):
q = {"$or": [{"age":{"$gt": 0}}, {"email": {"$gt": None}},
{"name.last": "Harvey"}]}
docs = self.db.find(q, sort=["name.last:string", "age"])
- assert len(docs) == 15
- assert docs[0]["name"] == {"last":"Ewing","first":"Shelly"}
- assert docs[1]["age"] == 22
+ self.assertEqual(len(docs), 15)
+ self.assertEqual(docs[0]["name"], {"last":"Ewing","first":"Shelly"})
+ self.assertEqual(docs[1]["age"], 22)
diff --git a/src/mango/test/10-disable-array-length-field-test.py b/src/mango/test/10-disable-array-length-field-test.py
index 0715f1db9..ce7713b63 100644
--- a/src/mango/test/10-disable-array-length-field-test.py
+++ b/src/mango/test/10-disable-array-length-field-test.py
@@ -13,28 +13,24 @@
import mango
import unittest
-
+@unittest.skipUnless(mango.has_text_service(), "requires text service")
class DisableIndexArrayLengthsTest(mango.UserDocsTextTests):
- @classmethod
- def setUpClass(klass):
- super(DisableIndexArrayLengthsTest, klass).setUpClass()
- if mango.has_text_service():
- klass.db.create_text_index(ddoc="disable_index_array_lengths",
+ def setUp(klass):
+ self.db.recreate()
+ self.db.create_text_index(ddoc="disable_index_array_lengths",
analyzer="keyword",
index_array_lengths=False)
- klass.db.create_text_index(ddoc="explicit_enable_index_array_lengths",
+ self.db.create_text_index(ddoc="explicit_enable_index_array_lengths",
analyzer="keyword",
index_array_lengths=True)
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
def test_disable_index_array_length(self):
docs = self.db.find({"favorites": {"$size": 4}},
use_index="disable_index_array_lengths")
for d in docs:
assert len(d["favorites"]) == 0
- @unittest.skipUnless(mango.has_text_service(), "requires text service")
def test_enable_index_array_length(self):
docs = self.db.find({"favorites": {"$size": 4}},
use_index="explicit_enable_index_array_lengths")
diff --git a/src/mango/test/11-ignore-design-docs.py b/src/mango/test/11-ignore-design-docs-test.py
index ea7165e3f..ea7165e3f 100644
--- a/src/mango/test/11-ignore-design-docs.py
+++ b/src/mango/test/11-ignore-design-docs-test.py
diff --git a/src/mango/test/12-use-correct-index.py b/src/mango/test/12-use-correct-index-test.py
index 84b425343..5a2b24d3f 100644
--- a/src/mango/test/12-use-correct-index.py
+++ b/src/mango/test/12-use-correct-index-test.py
@@ -68,15 +68,15 @@ class ChooseCorrectIndexForDocs(mango.DbPerClass):
self.assertEqual(explain["index"]["ddoc"], '_design/bbb')
def test_choose_index_alphabetically(self):
- self.db.create_index(["name", "age", "user_id"], ddoc="aaa")
- self.db.create_index(["name", "age", "location"], ddoc="bbb")
+ self.db.create_index(["name"], ddoc="aaa")
+ self.db.create_index(["name"], ddoc="bbb")
self.db.create_index(["name"], ddoc="zzz")
explain = self.db.find({"name": "Eddie", "age": {"$gte": 12}}, explain=True)
self.assertEqual(explain["index"]["ddoc"], '_design/aaa')
def test_choose_index_most_accurate(self):
- self.db.create_index(["name", "location", "user_id"], ddoc="aaa")
- self.db.create_index(["name", "age", "user_id"], ddoc="bbb")
+ self.db.create_index(["name", "age", "user_id"], ddoc="aaa")
+ self.db.create_index(["name", "age"], ddoc="bbb")
self.db.create_index(["name"], ddoc="zzz")
explain = self.db.find({"name": "Eddie", "age": {"$gte": 12}}, explain=True)
self.assertEqual(explain["index"]["ddoc"], '_design/bbb')
@@ -105,3 +105,12 @@ class ChooseCorrectIndexForDocs(mango.DbPerClass):
self.db.create_index(["a", "d", "e"])
explain = self.db.find({"a": {"$gt": 0}, "b": {"$gt": 0}, "c": {"$gt": 0}}, explain=True)
self.assertEqual(explain["index"]["def"]["fields"], [{'a': 'asc'}, {'b': 'asc'}, {'c': 'asc'}])
+
+ def test_can_query_with_range_on_secondary_column(self):
+ self.db.create_index(["age", "name"], ddoc="bbb")
+ selector = {"age": 10, "name": {"$gte": 0}}
+ docs = self.db.find(selector)
+ self.assertEqual(len(docs), 1)
+ explain = self.db.find(selector, explain=True)
+ self.assertEqual(explain["index"]["ddoc"], "_design/bbb")
+ self.assertEqual(explain["mrargs"]["end_key"], [10, '<MAX>'])
diff --git a/src/mango/test/14-json-pagination.py b/src/mango/test/14-json-pagination-test.py
index ea06e0a2a..ea06e0a2a 100644
--- a/src/mango/test/14-json-pagination.py
+++ b/src/mango/test/14-json-pagination-test.py
diff --git a/src/mango/test/15-execution-stats-test.py b/src/mango/test/15-execution-stats-test.py
index 67c9e64ec..6b7408b8b 100644
--- a/src/mango/test/15-execution-stats-test.py
+++ b/src/mango/test/15-execution-stats-test.py
@@ -38,6 +38,10 @@ class ExecutionStatsTests(mango.UserDocsTests):
self.assertEqual(resp["execution_stats"]["results_returned"], 3)
self.assertGreater(resp["execution_stats"]["execution_time_ms"], 0)
+ def test_results_returned_limit(self):
+ resp = self.db.find({"age": {"$lt": 35}}, limit=2, return_raw=True, executionStats=True)
+ self.assertEqual(resp["execution_stats"]["results_returned"], len(resp["docs"]))
+
@unittest.skipUnless(mango.has_text_service(), "requires text service")
class ExecutionStatsTests_Text(mango.UserDocsTextTests):
diff --git a/src/mango/test/16-index-selectors.py b/src/mango/test/16-index-selectors-test.py
index 3ce659ecf..456b396c7 100644
--- a/src/mango/test/16-index-selectors.py
+++ b/src/mango/test/16-index-selectors-test.py
@@ -73,6 +73,28 @@ DOCS = [
},
]
+oldschoolnoselectorddoc = {
+ "_id": "_design/oldschoolnoselector",
+ "language": "query",
+ "views": {
+ "oldschoolnoselector": {
+ "map": {
+ "fields": {
+ "location": "asc"
+ }
+ },
+ "reduce": "_count",
+ "options": {
+ "def": {
+ "fields": [
+ "location"
+ ]
+ }
+ }
+ }
+ }
+}
+
oldschoolddoc = {
"_id": "_design/oldschool",
"language": "query",
@@ -178,6 +200,14 @@ class IndexSelectorJson(mango.DbPerClass):
resp = self.db.find(selector, explain=True)
self.assertEqual(resp["index"]["name"], "NotSelected")
+ def test_old_selector_with_no_selector_still_supported(self):
+ selector = {"location": {"$gte": "FRA"}}
+ self.db.save_doc(oldschoolnoselectorddoc)
+ resp = self.db.find(selector, explain=True, use_index='oldschoolnoselector')
+ self.assertEqual(resp["index"]["name"], "oldschoolnoselector")
+ docs = self.db.find(selector, use_index='oldschoolnoselector')
+ self.assertEqual(len(docs), 3)
+
def test_old_selector_still_supported(self):
selector = {"location": {"$gte": "FRA"}}
self.db.save_doc(oldschoolddoc)
diff --git a/src/mango/test/17-multi-type-value-test.py b/src/mango/test/17-multi-type-value-test.py
new file mode 100644
index 000000000..d838447d5
--- /dev/null
+++ b/src/mango/test/17-multi-type-value-test.py
@@ -0,0 +1,90 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import copy
+import mango
+import unittest
+
+DOCS = [
+ {
+ "_id": "1",
+ "name": "Jimi",
+ "age": 10
+ },
+ {
+ "_id": "2",
+ "name": {"forename":"Eddie"},
+ "age": 20
+ },
+ {
+ "_id": "3",
+ "name": None,
+ "age": 30
+ },
+ {
+ "_id": "4",
+ "name": 1,
+ "age": 40
+ },
+ {
+ "_id": "5",
+ "forename": "Sam",
+ "age": 50
+ }
+]
+
+
+class MultiValueFieldTests:
+
+ def test_can_query_with_name(self):
+ docs = self.db.find({"name": {"$exists": True}})
+ self.assertEqual(len(docs), 4)
+ for d in docs:
+ self.assertIn("name", d)
+
+ def test_can_query_with_name_subfield(self):
+ docs = self.db.find({"name.forename": {"$exists": True}})
+ self.assertEqual(len(docs), 1)
+ self.assertEqual(docs[0]["_id"], "2")
+
+ def test_can_query_with_name_range(self):
+ docs = self.db.find({"name": {"$gte": 0}})
+ # expect to include "Jimi", 1 and {"forename":"Eddie"}
+ self.assertEqual(len(docs), 3)
+ for d in docs:
+ self.assertIn("name", d)
+
+ def test_can_query_with_age_and_name_range(self):
+ docs = self.db.find({"age": {"$gte": 0, "$lt": 40}, "name": {"$gte": 0}})
+ # expect to include "Jimi", 1 and {"forename":"Eddie"}
+ self.assertEqual(len(docs), 2)
+ for d in docs:
+ self.assertIn("name", d)
+
+
+
+class MultiValueFieldJSONTests(mango.DbPerClass, MultiValueFieldTests):
+ def setUp(self):
+ self.db.recreate()
+ self.db.save_docs(copy.deepcopy(DOCS))
+ self.db.create_index(["name"])
+ self.db.create_index(["age", "name"])
+
+# @unittest.skipUnless(mango.has_text_service(), "requires text service")
+# class MultiValueFieldTextTests(MultiValueFieldDocsNoIndexes, OperatorTests):
+# pass
+
+
+class MultiValueFieldAllDocsTests(mango.DbPerClass, MultiValueFieldTests):
+ def setUp(self):
+ self.db.recreate()
+ self.db.save_docs(copy.deepcopy(DOCS))
diff --git a/src/mango/test/README.md b/src/mango/test/README.md
index fc2cd62e5..3c99cab9d 100644
--- a/src/mango/test/README.md
+++ b/src/mango/test/README.md
@@ -7,6 +7,23 @@ To run these, do this in the Mango top level directory:
$ virtualenv venv
$ source venv/bin/activate
- $ pip install nose requests
- $ pip install hypothesis
- $ nosetests
+ $ make pip-install
+ $ make test
+
+To run an individual test suite:
+ nosetests --nocapture test/12-use-correct-index.py
+
+To run the tests with text index support:
+ MANGO_TEXT_INDEXES=1 nosetests --nocapture test
+
+
+Test configuration
+==================
+
+The following environment variables can be used to configure the test fixtures:
+
+ * `COUCH_HOST` - root url (including port) of the CouchDB instance to run the tests against. Default is `"http://127.0.0.1:15984"`.
+ * `COUCH_USER` - CouchDB username (with admin premissions). Default is `"testuser"`.
+ * `COUCH_PASSWORD` - CouchDB password. Default is `"testpass"`.
+ * `COUCH_AUTH_HEADER` - Optional Authorization header value. If specified, this is used instead of basic authentication with the username/password variables above.
+ * `MANGO_TEXT_INDEXES` - Set to `"1"` to run the tests only applicable to text indexes.
diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py
index a275a23d0..03cc67c52 100644
--- a/src/mango/test/mango.py
+++ b/src/mango/test/mango.py
@@ -27,28 +27,51 @@ def random_db_name():
return "mango_test_" + uuid.uuid4().hex
def has_text_service():
- return os.path.isfile(os.getcwd() + "/../src/mango_cursor_text.erl")
+ return os.environ.get('MANGO_TEXT_INDEXES') == '1'
+
+def get_from_environment(key, default):
+ value = os.environ.get(key)
+ return value if value is not None else default
+
+# add delay functionality
+def delay(n=5, t=0.5):
+ for i in range(0, n):
+ time.sleep(t)
class Database(object):
- def __init__(self, host, port, dbname, auth=None):
- self.host = host
- self.port = port
+ def __init__(self, dbname,
+ host="127.0.0.1", port="15984",
+ user='testuser', password='testpass'):
+ root_url = get_from_environment('COUCH_HOST', "http://{}:{}".format(host, port))
+ auth_header = get_from_environment('COUCH_AUTH_HEADER', None)
+ user = get_from_environment('COUCH_USER', user)
+ password = get_from_environment('COUCH_PASSWORD', password)
+
+ self.root_url = root_url
self.dbname = dbname
self.sess = requests.session()
- self.sess.auth = ('testuser', 'testpass')
+
+ # allow explicit auth header to be set to enable testing
+ # against deployments where basic auth isn't available
+ if auth_header is not None:
+ self.sess.headers["Authorization"] = auth_header
+ else:
+ self.sess.auth = (user, password)
+
self.sess.headers["Content-Type"] = "application/json"
+
@property
def url(self):
- return "http://{}:{}/{}".format(self.host, self.port, self.dbname)
+ return "{}/{}".format(self.root_url, self.dbname)
def path(self, parts):
if isinstance(parts, ("".__class__, u"".__class__)):
parts = [parts]
return "/".join([self.url] + parts)
- def create(self, q=1, n=3):
+ def create(self, q=1, n=1):
r = self.sess.get(self.url)
if r.status_code == 404:
r = self.sess.put(self.url, params={"q":q, "n": n})
@@ -59,9 +82,9 @@ class Database(object):
def recreate(self):
self.delete()
- time.sleep(1)
+ delay()
self.create()
- time.sleep(1)
+ delay()
def save_doc(self, doc):
self.save_docs([doc])
@@ -103,6 +126,7 @@ class Database(object):
body["index"]["partial_filter_selector"] = partial_filter_selector
body = json.dumps(body)
r = self.sess.post(self.path("_index"), data=body)
+ delay()
r.raise_for_status()
assert r.json()["id"] is not None
assert r.json()["name"] is not None
@@ -133,6 +157,7 @@ class Database(object):
body["ddoc"] = ddoc
body = json.dumps(body)
r = self.sess.post(self.path("_index"), data=body)
+ delay()
r.raise_for_status()
return r.json()["result"] == "created"
@@ -147,7 +172,8 @@ class Database(object):
def delete_index(self, ddocid, name, idx_type="json"):
path = ["_index", ddocid, idx_type, name]
- r = self.sess.delete(self.path(path), params={"w":"3"})
+ r = self.sess.delete(self.path(path), params={"w": "3"})
+ delay()
r.raise_for_status()
def bulk_delete(self, docs):
@@ -157,6 +183,7 @@ class Database(object):
}
body = json.dumps(body)
r = self.sess.post(self.path("_index/_bulk_delete"), data=body)
+ delay(n=10)
return r.json()
def find(self, selector, limit=25, skip=0, sort=None, fields=None,
@@ -206,7 +233,7 @@ class UsersDbTests(unittest.TestCase):
@classmethod
def setUpClass(klass):
- klass.db = Database("127.0.0.1", "15984", "_users")
+ klass.db = Database("_users")
user_docs.setup_users(klass.db)
def setUp(self):
@@ -217,7 +244,7 @@ class DbPerClass(unittest.TestCase):
@classmethod
def setUpClass(klass):
- klass.db = Database("127.0.0.1", "15984", random_db_name())
+ klass.db = Database(random_db_name())
klass.db.create(q=1, n=3)
def setUp(self):
diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl
index e2cbb2ec6..047154af8 100644
--- a/src/mem3/src/mem3.erl
+++ b/src/mem3/src/mem3.erl
@@ -19,7 +19,7 @@
-export([compare_nodelists/0, compare_shards/1]).
-export([quorum/1, group_by_proximity/1]).
-export([live_shards/2]).
--export([belongs/2]).
+-export([belongs/2, owner/3]).
-export([get_placement/1]).
%% For mem3 use only.
@@ -311,6 +311,12 @@ name(#shard{name=Name}) ->
name(#ordered_shard{name=Name}) ->
Name.
+% Direct calculation of node membership. This is the algorithm part. It
+% doesn't read the shard map, just picks owner based on a hash.
+-spec owner(binary(), binary(), [node()]) -> node().
+owner(DbName, DocId, Nodes) ->
+ hd(mem3_util:rotate_list({DbName, DocId}, lists:usort(Nodes))).
+
-ifdef(TEST).
diff --git a/src/mem3/src/mem3_shards.erl b/src/mem3/src/mem3_shards.erl
index be7e5aaaf..5e215e201 100644
--- a/src/mem3/src/mem3_shards.erl
+++ b/src/mem3/src/mem3_shards.erl
@@ -429,7 +429,7 @@ create_if_missing(Name) ->
end.
cache_insert(#st{cur_size=Cur}=St, DbName, Writer, Timeout) ->
- NewATime = now(),
+ NewATime = couch_util:unique_monotonic_integer(),
true = ets:delete(?SHARDS, DbName),
flush_write(DbName, Writer, Timeout),
case ets:lookup(?DBS, DbName) of
@@ -458,7 +458,7 @@ cache_remove(#st{cur_size=Cur}=St, DbName) ->
cache_hit(DbName) ->
case ets:lookup(?DBS, DbName) of
[{DbName, ATime}] ->
- NewATime = now(),
+ NewATime = couch_util:unique_monotonic_integer(),
true = ets:delete(?ATIMES, ATime),
true = ets:insert(?ATIMES, {NewATime, DbName}),
true = ets:insert(?DBS, {DbName, NewATime});
diff --git a/src/rexi/src/rexi_server.erl b/src/rexi/src/rexi_server.erl
index 614c3fc0c..3d3f272e4 100644
--- a/src/rexi/src/rexi_server.erl
+++ b/src/rexi/src/rexi_server.erl
@@ -144,7 +144,7 @@ init_p(From, {M,F,A}, Nonce) ->
node(ClientPid), ClientPid, M, F, length(A),
Class, Reason, Stack]),
exit(#error{
- timestamp = now(),
+ timestamp = os:timestamp(),
reason = {Class, Reason},
mfa = {M,F,A},
nonce = Nonce,
diff --git a/test/build/test-run-couch-for-mango.sh b/test/build/test-run-couch-for-mango.sh
index 6034a794c..0597a8fca 100755
--- a/test/build/test-run-couch-for-mango.sh
+++ b/test/build/test-run-couch-for-mango.sh
@@ -13,8 +13,17 @@
./dev/run -n 1 --admin=testuser:testpass &
export SERVER_PID=$!
-sleep 10
-curl http://dev:15984
+
+COUCH_STARTED=-1
+while ( [ $COUCH_STARTED -ne 0 ] ); do
+ curl -s http://127.0.0.1:15984
+ COUCH_STARTED=$?
+ if [ $COUCH_STARTED -ne 0 ]; then
+ # do not wait another 5 seconds if couch started now
+ sleep 5
+ fi
+done
+
cd src/mango/
nosetests
diff --git a/test/javascript/couch.js b/test/javascript/couch.js
index 6ff3005f6..c325d68be 100644
--- a/test/javascript/couch.js
+++ b/test/javascript/couch.js
@@ -475,7 +475,7 @@ CouchDB.requestStats = function(path, test) {
query_arg = "?flush=true";
}
- var url = "/_node/node1@127.0.0.1/_stats/" + path.join("/") + query_arg;
+ var url = "/_node/_local/_stats/" + path.join("/") + query_arg;
var stat = CouchDB.request("GET", url).responseText;
return JSON.parse(stat);
};
diff --git a/test/javascript/run b/test/javascript/run
index f7659b0f2..c611be51e 100755
--- a/test/javascript/run
+++ b/test/javascript/run
@@ -17,6 +17,7 @@ import optparse as op
import os
import subprocess as sp
import sys
+import re
USAGE = "%prog [options] [command to run...]"
@@ -71,7 +72,7 @@ def mkformatter(tests):
def run_couchjs(test, fmt):
fmt(test)
- cmd = [COUCHJS, "-H", "-T"] + \
+ cmd = [COUCHJS, "--eval", "-H", "-T"] + \
["-u", "test/javascript/couchdb.uri"] + SCRIPTS + [test, RUNNER]
p = sp.Popen(
cmd,
@@ -100,7 +101,13 @@ def options():
help="Start from the given filename if multiple files "
"are passed"),
op.make_option("-a", "--all", action="store_true", dest="all",
- help="Run all tests, even if one or more fail")
+ help="Run all tests, even if one or more fail"),
+ op.make_option("-i", "--ignore", type="string", action="callback",
+ default=None, callback=get_delimited_list,
+ dest="ignore", help="Ignore test suites"),
+ op.make_option("-u", "--suites", type="string", action="callback",
+ default=None, callback=get_delimited_list,
+ dest="suites", help="Run specific suites")
]
@@ -108,23 +115,15 @@ def main():
parser = op.OptionParser(usage=USAGE, option_list=options())
opts, args = parser.parse_args()
+ run_list = []
+ ignore_list = []
tests = []
- if not len(args):
- args = ["test/javascript/tests"]
- for name in args:
- if os.path.isdir(name):
- tests.extend(sorted(glob.glob(os.path.join(name, "*.js"))))
- elif os.path.isfile(name):
- tests.append(name)
- else:
- pname = os.path.join("test/javascript/tests", name)
- if os.path.isfile(pname):
- tests.append(pname)
- elif os.path.isfile(pname + ".js"):
- tests.append(pname + ".js")
- else:
- sys.stderr.write("Unknown test: " + name + os.linesep)
- exit(1)
+
+ run_list = ["test/javascript/tests"] if not opts.suites else opts.suites
+ run_list = build_test_case_paths(run_list)
+ ignore_list = build_test_case_paths(opts.ignore)
+ # sort is needed because certain tests fail if executed out of order
+ tests = sorted(list(set(run_list)-set(ignore_list)))
if opts.start is not None:
tmp = []
@@ -152,6 +151,30 @@ def main():
failed, passed) + os.linesep)
exit(failed > 0)
+def build_test_case_paths(args=None):
+ tests = []
+ if args is None:
+ args = []
+ for name in args:
+ if os.path.isdir(name):
+ tests.extend(sorted(glob.glob(os.path.join(name, "*.js"))))
+ elif os.path.isfile(name):
+ check = tests.append(name)
+ else:
+ pname = os.path.join("test/javascript/tests", name)
+ if os.path.isfile(pname):
+ tests.append(pname)
+ elif os.path.isfile(pname + ".js"):
+ tests.append(pname + ".js")
+ else:
+ sys.stderr.write("Unknown test: " + name + os.linesep)
+ exit(1)
+ return tests
+
+
+def get_delimited_list(option, opt, value, parser):
+ delimited = [i for i in re.split(r',|\s', value.strip()) if i]
+ setattr(parser.values, option.dest, delimited)
if __name__ == "__main__":
try:
diff --git a/test/javascript/tests/config.js b/test/javascript/tests/config.js
index ee51ef5b9..8c7ce9917 100644
--- a/test/javascript/tests/config.js
+++ b/test/javascript/tests/config.js
@@ -212,4 +212,12 @@ couchTests.config = function(debug) {
headers: {"X-Couch-Persist": "false"}
});
TEquals(200, xhr.status, "Reset config whitelist to undefined");
+
+ // Confirm that the blacklist is functional
+ ["daemons", "external", "httpd_design_handlers", "httpd_db_handlers", "native_query_servers", "os_daemons", "query_servers"].forEach(function(section) {
+ xhr = CouchDB.request("PUT", "/_node/node1@127.0.0.1/_config/" + section + "/wohali",{
+ body: "\"rules\""
+ });
+ TEquals(403, xhr.status, "Blacklisted config section " + section);
+ });
};
diff --git a/test/javascript/tests/erlang_views.js b/test/javascript/tests/erlang_views.js
index 8ce9a7e42..ec78e6506 100644
--- a/test/javascript/tests/erlang_views.js
+++ b/test/javascript/tests/erlang_views.js
@@ -17,9 +17,7 @@ couchTests.erlang_views = function(debug) {
if (debug) debugger;
run_on_modified_server(
- [{section: "native_query_servers",
- key: "erlang",
- value: "{couch_native_process, start_link, []}"}],
+ [],
function() {
// Note we just do some basic 'smoke tests' here - the
// test/query_server_spec.rb tests have more comprehensive tests
diff --git a/test/javascript/tests/view_errors.js b/test/javascript/tests/view_errors.js
index b53a3c764..dd60292a3 100644
--- a/test/javascript/tests/view_errors.js
+++ b/test/javascript/tests/view_errors.js
@@ -174,9 +174,9 @@ couchTests.view_errors = function(debug) {
// if the reduce grows to fast, throw an overflow error
var path = "/" + db_name + "/_design/testbig/_view/reduce_too_big";
xhr = CouchDB.request("GET", path);
- T(xhr.status == 500);
+ T(xhr.status == 200);
result = JSON.parse(xhr.responseText);
- T(result.error == "reduce_overflow_error");
+ T(result.rows[0].error == "reduce_overflow_error");
try {
db.query(function() {emit(null, null)}, null, {startkey: 2, endkey:1});
@@ -185,6 +185,18 @@ couchTests.view_errors = function(debug) {
T(e.error == "query_parse_error");
T(e.reason.match(/no rows can match/i));
}
+
+ // querying a temporary view should give "gone" error message
+ var xhr = CouchDB.request("POST", "/" + db_name + "/_temp_view", {
+ headers: {"Content-Type": "application/json"},
+ body: JSON.stringify({language: "javascript",
+ map : "function(doc){emit(doc.integer)}"
+ })
+ });
+ T(xhr.status == 410);
+ result = JSON.parse(xhr.responseText);
+ T(result.error == "gone");
+ T(result.reason == "Temporary views are not supported in CouchDB");
// });
// cleanup
diff --git a/test/javascript/tests/view_sandboxing.js b/test/javascript/tests/view_sandboxing.js
index 9e7fa8694..1cdd815de 100644
--- a/test/javascript/tests/view_sandboxing.js
+++ b/test/javascript/tests/view_sandboxing.js
@@ -149,38 +149,20 @@ couchTests.view_sandboxing = function(debug) {
db.deleteDb();
// test that runtime code evaluation can be prevented
- var couchjs_command_xhr = CouchDB.request(
- "GET", "_node/node1@127.0.0.1/_config/query_servers/javascript");
-
- var couchjs_command = JSON.parse(couchjs_command_xhr.responseText);
- var couchjs_command_args = couchjs_command.match(/\S+|"(?:\\"|[^"])+"/g);
-
- couchjs_command_args.splice(1, 0, "--no-eval");
- var new_couchjs_command = couchjs_command_args.join(" ");
-
- run_on_modified_server(
- [{section: "query_servers",
- key: "javascript",
- value: new_couchjs_command}],
- function () {
- CouchDB.request("POST", "_reload_query_servers");
-
- db_name = get_random_db_name();
- db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
- db.createDb();
+ db_name = get_random_db_name();
+ db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+ db.createDb();
- var doc = {integer: 1, string: "1", array: [1, 2, 3]};
- T(db.save(doc).ok);
+ var doc = {integer: 1, string: "1", array: [1, 2, 3]};
+ T(db.save(doc).ok);
- var results = db.query(function(doc) {
- var glob = emit.constructor('return this')();
- emit(doc._id, null);
- });
+ var results = db.query(function(doc) {
+ var glob = emit.constructor('return this')();
+ emit(doc._id, null);
+ });
- TEquals(0, results.rows.length);
- });
+ TEquals(0, results.rows.length);
// cleanup
- CouchDB.request("POST", "_reload_query_servers");
db.deleteDb();
};
diff --git a/version.mk b/version.mk
index 10a51517a..a0b8bd1e3 100644
--- a/version.mk
+++ b/version.mk
@@ -1,3 +1,3 @@
vsn_major=2
vsn_minor=1
-vsn_patch=0
+vsn_patch=1