summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoan Touzet <wohali@users.noreply.github.com>2018-08-06 10:41:51 -0400
committerGitHub <noreply@github.com>2018-08-06 10:41:51 -0400
commit54edac959fdeae6f64663be83bfb5b4ba8510b57 (patch)
tree2bddd1a09a04301ccce5e3745c255bfe73e56e41
parent967d1b0111bc59046f247a744ebad05e66afd666 (diff)
parent0cbaefcaceff750c46ea612322fa1aa02d37c87e (diff)
downloadcouchdb-dns-cluster-discovery.tar.gz
Merge branch 'master' into dns-cluster-discoveryarchive/dns-cluster-discoverydns-cluster-discovery
-rw-r--r--.gitignore2
-rw-r--r--.travis.yml11
-rw-r--r--INSTALL.Unix.md9
-rw-r--r--Jenkinsfile153
-rw-r--r--LICENSE135
-rw-r--r--Makefile62
-rw-r--r--Makefile.win45
-rw-r--r--NOTICE9
-rwxr-xr-xbuild-aux/couchdb-build-release.sh6
-rwxr-xr-xconfigure9
-rw-r--r--rebar.config.script25
-rw-r--r--rel/overlay/etc/default.ini46
-rw-r--r--rel/overlay/etc/local.ini14
-rw-r--r--rel/reltool.config4
-rw-r--r--src/chttpd/src/chttpd.erl70
-rw-r--r--src/chttpd/src/chttpd_auth.erl4
-rw-r--r--src/chttpd/src/chttpd_auth_request.erl5
-rw-r--r--src/chttpd/src/chttpd_db.erl1
-rw-r--r--src/chttpd/src/chttpd_misc.erl11
-rw-r--r--src/chttpd/test/chttpd_welcome_test.erl2
-rw-r--r--src/couch/rebar.config.script17
-rw-r--r--src/couch/src/couch.app.src4
-rw-r--r--src/couch/src/couch.erl1
-rw-r--r--src/couch/src/couch_auth_cache.erl7
-rw-r--r--src/couch/src/couch_bt_engine.erl6
-rw-r--r--src/couch/src/couch_db.erl12
-rw-r--r--src/couch/src/couch_db_updater.erl38
-rw-r--r--src/couch/src/couch_file.erl8
-rw-r--r--src/couch/src/couch_hash.erl45
-rw-r--r--src/couch/src/couch_httpd.erl2
-rw-r--r--src/couch/src/couch_httpd_auth.erl12
-rw-r--r--src/couch/src/couch_httpd_misc_handlers.erl2
-rw-r--r--src/couch/src/couch_key_tree.erl83
-rw-r--r--src/couch/src/couch_native_process.erl16
-rw-r--r--src/couch/src/couch_passwords.erl19
-rw-r--r--src/couch/src/couch_query_servers.erl80
-rw-r--r--src/couch/src/couch_server.erl7
-rw-r--r--src/couch/src/couch_stream.erl18
-rw-r--r--src/couch/src/couch_users_db.erl10
-rw-r--r--src/couch/src/couch_util.erl50
-rw-r--r--src/couch/src/test_engine_util.erl11
-rw-r--r--src/couch/src/test_request.erl6
-rw-r--r--src/couch/src/test_util.erl3
-rw-r--r--src/couch/test/couch_changes_tests.erl38
-rw-r--r--src/couch/test/couch_key_tree_tests.erl197
-rw-r--r--src/couch/test/couch_passwords_tests.erl42
-rw-r--r--src/couch/test/couchdb_attachments_tests.erl12
-rw-r--r--src/couch_epi/src/couch_epi_data.erl2
-rw-r--r--src/couch_epi/src/couch_epi_util.erl2
-rw-r--r--src/couch_event/src/couch_event_os_sup.erl82
-rw-r--r--src/couch_event/src/couch_event_sup2.erl7
-rw-r--r--src/couch_index/test/couch_index_ddoc_updated_tests.erl2
-rw-r--r--src/couch_log/src/couch_log_server.erl1
-rw-r--r--src/couch_mrview/src/couch_mrview.erl32
-rw-r--r--src/couch_mrview/src/couch_mrview_compactor.erl2
-rw-r--r--src/couch_mrview/src/couch_mrview_index.erl3
-rw-r--r--src/couch_mrview/src/couch_mrview_util.erl25
-rw-r--r--src/couch_mrview/test/couch_mrview_index_info_tests.erl96
-rw-r--r--src/couch_replicator/src/couch_replicator_auth.erl2
-rw-r--r--src/couch_replicator/src/couch_replicator_ids.erl2
-rw-r--r--src/couch_replicator/test/couch_replicator_many_leaves_tests.erl2
-rw-r--r--src/couch_replicator/test/couch_replicator_test_helper.erl12
-rw-r--r--src/ddoc_cache/src/ddoc_cache_lru.erl1
-rw-r--r--src/fabric/src/fabric.erl9
-rw-r--r--src/fabric/src/fabric_db_delete.erl6
-rw-r--r--src/fabric/src/fabric_doc_atts.erl168
-rw-r--r--src/fabric/src/fabric_rpc.erl7
-rw-r--r--src/fabric/src/fabric_view.erl3
-rw-r--r--src/mango/src/mango_cursor.erl18
-rw-r--r--src/mango/src/mango_cursor_view.erl8
-rw-r--r--src/mango/src/mango_error.erl4
-rw-r--r--src/mango/src/mango_idx.erl36
-rw-r--r--src/mango/src/mango_idx_special.erl13
-rw-r--r--src/mango/src/mango_idx_view.erl30
-rw-r--r--src/mango/src/mango_selector.erl113
-rw-r--r--src/mango/test/02-basic-find-test.py7
-rw-r--r--src/mango/test/18-json-sort.py222
-rw-r--r--src/mango/test/19-find-conflicts.py41
-rw-r--r--src/mango/test/mango.py5
-rw-r--r--src/mem3/src/mem3_rep.erl6
-rw-r--r--src/mem3/src/mem3_shards.erl1
-rw-r--r--src/rexi/src/rexi_server.erl1
-rw-r--r--src/setup/src/setup.erl43
-rwxr-xr-xtest/javascript/run8
-rw-r--r--test/javascript/tests-cluster/with-quorum/attachments.js36
-rw-r--r--test/javascript/tests-cluster/with-quorum/attachments_delete.js32
-rw-r--r--test/javascript/tests-cluster/with-quorum/attachments_delete_overridden_quorum.js36
-rw-r--r--test/javascript/tests-cluster/with-quorum/attachments_overridden_quorum.js40
-rw-r--r--test/javascript/tests-cluster/with-quorum/db_creation.js (renamed from test/javascript/tests-cluster/with-quorum/db-creation.js)0
-rw-r--r--test/javascript/tests-cluster/with-quorum/db_creation_overridden_quorum.js28
-rw-r--r--test/javascript/tests-cluster/with-quorum/db_deletion.js30
-rw-r--r--test/javascript/tests-cluster/with-quorum/db_deletion_overridden_quorum.js23
-rw-r--r--test/javascript/tests-cluster/with-quorum/doc_bulk.js25
-rw-r--r--test/javascript/tests-cluster/with-quorum/doc_bulk_overridden_quorum.js25
-rw-r--r--test/javascript/tests-cluster/with-quorum/doc_copy.js27
-rw-r--r--test/javascript/tests-cluster/with-quorum/doc_copy_overridden_quorum.js30
-rw-r--r--test/javascript/tests-cluster/with-quorum/doc_crud.js31
-rw-r--r--test/javascript/tests-cluster/with-quorum/doc_crud_overridden_quorum.js31
-rw-r--r--test/javascript/tests-cluster/without-quorum/attachments.js39
-rw-r--r--test/javascript/tests-cluster/without-quorum/attachments_delete.js37
-rw-r--r--test/javascript/tests-cluster/without-quorum/attachments_delete_overridden_quorum.js36
-rw-r--r--test/javascript/tests-cluster/without-quorum/attachments_overridden_quorum.js42
-rw-r--r--test/javascript/tests-cluster/without-quorum/db_creation.js (renamed from test/javascript/tests-cluster/without-quorum/db-creation.js)3
-rw-r--r--test/javascript/tests-cluster/without-quorum/db_creation_overridden_quorum.js30
-rw-r--r--test/javascript/tests-cluster/without-quorum/db_deletion.js30
-rw-r--r--test/javascript/tests-cluster/without-quorum/db_deletion_overridden_quorum.js25
-rw-r--r--test/javascript/tests-cluster/without-quorum/doc_bulk.js28
-rw-r--r--test/javascript/tests-cluster/without-quorum/doc_bulk_overridden_quorum.js28
-rw-r--r--test/javascript/tests-cluster/without-quorum/doc_copy.js30
-rw-r--r--test/javascript/tests-cluster/without-quorum/doc_copy_overridden_quorum.js33
-rw-r--r--test/javascript/tests-cluster/without-quorum/doc_crud.js35
-rw-r--r--test/javascript/tests-cluster/without-quorum/doc_crud_overridden_quorum.js34
-rw-r--r--test/javascript/tests/design_docs.js8
-rw-r--r--test/javascript/tests/reduce_builtin.js20
-rw-r--r--test/javascript/tests/users_db.js7
-rw-r--r--test/javascript/tests/users_db_security.js167
116 files changed, 2509 insertions, 908 deletions
diff --git a/.gitignore b/.gitignore
index faa07f983..088303039 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,7 +28,6 @@ share/server/main.js
share/www
src/b64url/
src/bear/
-src/bcrypt/
src/config/
src/couch/priv/couch_js/config.h
src/couch/priv/couchjs
@@ -54,6 +53,7 @@ src/oauth/
src/rebar/
src/snappy/
src/triq/
+src/hyper/
tmp/
src/couch/*.o
diff --git a/.travis.yml b/.travis.yml
index fe84f87c4..acb0b5102 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,11 +3,10 @@ sudo: false
os: linux
otp_release:
- - 20.1
+ - 20.3
- 19.3
- 18.3
- 17.5
- - R16B03-1
addons:
apt:
@@ -64,6 +63,14 @@ script:
after_failure:
- build-aux/logfile-uploader.py
+# start a push build on master and release branches + PRs build on every branch
+# Avoid double build on PRs (See https://github.com/travis-ci/travis-ci/issues/1147)
+branches:
+ only:
+ - master
+ - /^\d+\.x\.x$/
+ - /^\d+\.\d+\.x$/
+
# Re-enable once test suite is reliable
#notifications:
# email: false
diff --git a/INSTALL.Unix.md b/INSTALL.Unix.md
index b2d4fbdbc..f0baf58c9 100644
--- a/INSTALL.Unix.md
+++ b/INSTALL.Unix.md
@@ -74,6 +74,9 @@ You can install the documentation dependencies by running:
sudo apt-get --no-install-recommends -y install \
python-sphinx
+
+ sudo pip install --upgrade sphinx_rtd_theme nose requests hypothesis
+
Be sure to update the version numbers to match your system's available
packages.
@@ -137,8 +140,10 @@ You can install this by running:
You can install the remaining dependencies by running:
- pkg install npm4 help2man openssl icu curl git \
- autoconf automake libtool node spidermonkey185
+ pkg install help2man openssl icu curl git bash \
+ autoconf automake libtool node spidermonkey185 \
+ erlang node8 npm-node8 lang/python py27-sphinx py27-pip
+ pip install --upgrade sphinx_rtd_theme nose requests hypothesis
## Installing
diff --git a/Jenkinsfile b/Jenkinsfile
index 905a85f29..46fb7238c 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -39,13 +39,13 @@ pipeline {
}
steps {
// This image has the oldest Erlang we support, 16B03
- sh 'docker pull couchdbdev/ubuntu-14.04-erlang-default:latest'
+ sh 'docker pull couchdbdev/debian-jessie-erlang-17.5.3:latest'
timeout(time: 15, unit: "MINUTES") {
// https://github.com/jenkins-infra/jenkins.io/blob/master/Jenkinsfile#64
// We need the jenkins user mapped inside of the image
// npm config cache below is required because /home/jenkins doesn't
// ACTUALLY exist in the image
- withDockerContainer(image: 'couchdbdev/ubuntu-14.04-erlang-default', args: '-e npm_config_cache=npm-cache -e HOME=. -v=/etc/passwd:/etc/passwd -v /etc/group:/etc/group') {
+ withDockerContainer(image: 'couchdbdev/debian-jessie-erlang-17.5.3', args: '-e npm_config_cache=npm-cache -e HOME=. -v=/etc/passwd:/etc/passwd -v /etc/group:/etc/group') {
sh '''
set
rm -rf apache-couchdb-*
@@ -78,24 +78,48 @@ pipeline {
// Build packages on supported platforms using esl's erlang
stage('Test') {
steps {
- parallel(centos6erlang183: {
+ parallel(freebsd: {
+ node(label: 'couchdb && freebsd') {
+ timeout(time: 60, unit: "MINUTES") {
+ deleteDir()
+ unstash 'tarball'
+ withEnv(['HOME='+pwd()]) {
+ sh '''
+ cwd=$(pwd)
+ mkdir -p $COUCHDB_IO_LOG_DIR
+
+ # Build CouchDB from tarball & test
+ builddir=$(mktemp -d)
+ cd $builddir
+ tar -xf $cwd/apache-couchdb-*.tar.gz
+ cd apache-couchdb-*
+ ./configure --with-curl
+ gmake check || (build-aux/logfile-uploader.py && false)
+
+ # No package build for FreeBSD at this time
+ '''
+ } // withEnv
+ } // timeout
+ deleteDir()
+ } // node
+ },
+ centos6: {
node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
- sh 'docker pull couchdbdev/centos-6-erlang-18.3'
- withDockerContainer(image: 'couchdbdev/centos-6-erlang-18.3', args: '-e LD_LIBRARY_PATH=/usr/local/bin') {
+ sh 'docker pull couchdbdev/centos-6-erlang-19.3.6'
+ withDockerContainer(image: 'couchdbdev/centos-6-erlang-19.3.6') {
sh 'rm -f apache-couchdb-*.tar.gz'
unstash 'tarball'
sh '''
cwd=$(pwd)
mkdir -p $COUCHDB_IO_LOG_DIR
- # Build CouchDB from tarball
+ # Build CouchDB from tarball & test
builddir=$(mktemp -d)
cd $builddir
tar -xf $cwd/apache-couchdb-*.tar.gz
cd apache-couchdb-*
./configure --with-curl
- make all
make check || (build-aux/logfile-uploader.py && false)
# Build CouchDB packages
@@ -118,24 +142,23 @@ pipeline {
deleteDir()
} // node
},
- centos7erlang183: {
+ centos7: {
node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
- sh 'docker pull couchdbdev/centos-7-erlang-18.3'
- withDockerContainer(image: 'couchdbdev/centos-7-erlang-18.3', args: '-e LD_LIBRARY_PATH=/usr/local/bin') {
+ sh 'docker pull couchdbdev/centos-7-erlang-19.3.6'
+ withDockerContainer(image: 'couchdbdev/centos-7-erlang-19.3.6') {
sh 'rm -f apache-couchdb-*.tar.gz'
unstash 'tarball'
sh '''
cwd=$(pwd)
mkdir -p $COUCHDB_IO_LOG_DIR
- # Build CouchDB from tarball
+ # Build CouchDB from tarball & test
builddir=$(mktemp -d)
cd $builddir
tar -xf $cwd/apache-couchdb-*.tar.gz
cd apache-couchdb-*
./configure --with-curl
- make all
make check || (build-aux/logfile-uploader.py && false)
# Build CouchDB packages
@@ -158,24 +181,62 @@ pipeline {
deleteDir()
} // node
},
- ubuntu1404erlang183: {
+ ubuntutrusty: {
+ node(label: 'ubuntu') {
+ timeout(time: 60, unit: "MINUTES") {
+ sh 'docker pull couchdbdev/ubuntu-trusty-erlang-19.3.6'
+ withDockerContainer(image: 'couchdbdev/ubuntu-trusty-erlang-19.3.6') {
+ sh 'rm -f apache-couchdb-*.tar.gz'
+ unstash 'tarball'
+ sh '''
+ cwd=$(pwd)
+ mkdir -p $COUCHDB_IO_LOG_DIR
+
+ # Build CouchDB from tarball & test
+ builddir=$(mktemp -d)
+ cd $builddir
+ tar -xf $cwd/apache-couchdb-*.tar.gz
+ cd apache-couchdb-*
+ ./configure --with-curl
+ make check || (build-aux/logfile-uploader.py && false)
+
+ # Build CouchDB packages
+ cd $builddir
+ git clone https://github.com/apache/couchdb-pkg
+ mkdir couchdb
+ cp $cwd/apache-couchdb-*.tar.gz couchdb
+ tar -xf $cwd/apache-couchdb-*.tar.gz -C couchdb
+ cd couchdb-pkg
+ platform=$(lsb_release -cs)
+ make $platform PLATFORM=$platform
+
+ # Cleanup & save for posterity
+ rm -rf $cwd/pkgs/$platform && mkdir -p $cwd/pkgs/$platform
+ mv ../couchdb/*.deb $cwd/pkgs/$platform || true
+ '''
+ } // withDocker
+ } // timeout
+ archiveArtifacts artifacts: 'pkgs/**', fingerprint: true
+ deleteDir()
+ } // node
+ },
+ ubuntuxenial: {
node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
- sh 'docker pull couchdbdev/ubuntu-14.04-erlang-18.3'
- withDockerContainer(image: 'couchdbdev/ubuntu-14.04-erlang-18.3') {
+ sh 'docker pull couchdbdev/ubuntu-xenial-erlang-19.3.6'
+ withDockerContainer(image: 'couchdbdev/ubuntu-xenial-erlang-19.3.6') {
sh 'rm -f apache-couchdb-*.tar.gz'
unstash 'tarball'
sh '''
cwd=$(pwd)
mkdir -p $COUCHDB_IO_LOG_DIR
- # Build CouchDB from tarball
+ # Build CouchDB from tarball & test
builddir=$(mktemp -d)
cd $builddir
tar -xf $cwd/apache-couchdb-*.tar.gz
cd apache-couchdb-*
./configure --with-curl
- make all
make check || (build-aux/logfile-uploader.py && false)
# Build CouchDB packages
@@ -190,7 +251,7 @@ pipeline {
# Cleanup & save for posterity
rm -rf $cwd/pkgs/$platform && mkdir -p $cwd/pkgs/$platform
- mv ../couchdb/*deb $cwd/pkgs/$platform || true
+ mv ../couchdb/*.deb $cwd/pkgs/$platform || true
'''
} // withDocker
} // timeout
@@ -198,24 +259,23 @@ pipeline {
deleteDir()
} // node
},
- ubuntu1604erlang183: {
+ ubuntubionic: {
node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
- sh 'docker pull couchdbdev/ubuntu-16.04-erlang-18.3'
- withDockerContainer(image: 'couchdbdev/ubuntu-16.04-erlang-18.3') {
+ sh 'docker pull couchdbdev/ubuntu-bionic-erlang-19.3.6'
+ withDockerContainer(image: 'couchdbdev/ubuntu-bionic-erlang-19.3.6') {
sh 'rm -f apache-couchdb-*.tar.gz'
unstash 'tarball'
sh '''
cwd=$(pwd)
mkdir -p $COUCHDB_IO_LOG_DIR
- # Build CouchDB from tarball
+ # Build CouchDB from tarball & test
builddir=$(mktemp -d)
cd $builddir
tar -xf $cwd/apache-couchdb-*.tar.gz
cd apache-couchdb-*
./configure --with-curl
- make all
make check || (build-aux/logfile-uploader.py && false)
# Build CouchDB packages
@@ -230,7 +290,7 @@ pipeline {
# Cleanup & save for posterity
rm -rf $cwd/pkgs/$platform && mkdir -p $cwd/pkgs/$platform
- mv ../couchdb/*deb $cwd/pkgs/$platform || true
+ mv ../couchdb/*.deb $cwd/pkgs/$platform || true
'''
} // withDocker
} // timeout
@@ -238,24 +298,23 @@ pipeline {
deleteDir()
} // node
},
- debian8erlang183: {
+ debianjessie: {
node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
- sh 'docker pull couchdbdev/debian-8-erlang-18.3'
- withDockerContainer(image: 'couchdbdev/debian-8-erlang-18.3') {
+ sh 'docker pull couchdbdev/debian-jessie-erlang-19.3.6'
+ withDockerContainer(image: 'couchdbdev/debian-jessie-erlang-19.3.6') {
sh 'rm -f apache-couchdb-*.tar.gz'
unstash 'tarball'
sh '''
cwd=$(pwd)
mkdir -p $COUCHDB_IO_LOG_DIR
- # Build CouchDB from tarball
+ # Build CouchDB from tarball & test
builddir=$(mktemp -d)
cd $builddir
tar -xf $cwd/apache-couchdb-*.tar.gz
cd apache-couchdb-*
./configure --with-curl
- make all
make check || (build-aux/logfile-uploader.py && false)
# Build CouchDB packages
@@ -270,7 +329,7 @@ pipeline {
# Cleanup & save for posterity
rm -rf $cwd/pkgs/$platform && mkdir -p $cwd/pkgs/$platform
- mv ../couchdb/*deb $cwd/pkgs/$platform || true
+ mv ../couchdb/*.deb $cwd/pkgs/$platform || true
'''
} // withDocker
} // timeout
@@ -278,24 +337,23 @@ pipeline {
deleteDir()
} // node
},
- debian9erlang183: {
+ debianstretch: {
node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
- sh 'docker pull couchdbdev/debian-9-erlang-18.3'
- withDockerContainer(image: 'couchdbdev/debian-9-erlang-18.3') {
+ sh 'docker pull couchdbdev/debian-stretch-erlang-19.3.6'
+ withDockerContainer(image: 'couchdbdev/debian-stretch-erlang-19.3.6') {
sh 'rm -f apache-couchdb-*.tar.gz'
unstash 'tarball'
sh '''
cwd=$(pwd)
mkdir -p $COUCHDB_IO_LOG_DIR
- # Build CouchDB from tarball
+ # Build CouchDB from tarball & test
builddir=$(mktemp -d)
cd $builddir
tar -xf $cwd/apache-couchdb-*.tar.gz
cd apache-couchdb-*
./configure --with-curl
- make all
make check || (build-aux/logfile-uploader.py && false)
# Build CouchDB packages
@@ -310,7 +368,7 @@ pipeline {
# Cleanup & save for posterity
rm -rf $cwd/pkgs/$platform && mkdir -p $cwd/pkgs/$platform
- mv ../couchdb/*deb $cwd/pkgs/$platform || true
+ mv ../couchdb/*.deb $cwd/pkgs/$platform || true
'''
} // withDocker
} // timeout
@@ -335,8 +393,8 @@ pipeline {
}
}
steps {
- sh 'docker pull couchdbdev/debian-8-base:latest'
- withDockerContainer(image: 'couchdbdev/debian-8-base:latest', args: '-e npm_config_cache=npm-cache -e HOME=. -v=/etc/passwd:/etc/passwd -v /etc/group:/etc/group') {
+ sh 'docker pull couchdbdev/debian-stretch-erlang-19.3.6:latest'
+ withDockerContainer(image: 'couchdbdev/debian-stretch-erlang-19.3.6:latest', args: '-e npm_config_cache=npm-cache -e HOME=. -v=/etc/passwd:/etc/passwd -v /etc/group:/etc/group') {
withCredentials([file(credentialsId: 'jenkins-key', variable: 'KEY')]) {
sh 'rm -rf pkgs *.tar.gz'
unarchive mapping: ['pkgs/' : '.']
@@ -346,19 +404,28 @@ pipeline {
rsync -avz -e "ssh -o StrictHostKeyChecking=no -i $KEY" jenkins@couchdb-vm2.apache.org:/var/www/html/$BRANCH_NAME . || mkdir -p $BRANCH_NAME
rm -rf $BRANCH_NAME/debian/* $BRANCH_NAME/el6/* $BRANCH_NAME/el7/*
mkdir -p $BRANCH_NAME/debian $BRANCH_NAME/el6 $BRANCH_NAME/el7 $BRANCH_NAME/source
+ rsync -avz -e "ssh -o StrictHostKeyChecking=no -i $KEY" jenkins@couchdb-vm2.apache.org:/var/www/html/js .
'''
echo 'Building Debian repo...'
sh '''
git clone https://github.com/apache/couchdb-pkg
- reprepro -b couchdb-pkg/repo includedeb jessie pkgs/jessie/*deb
- reprepro -b couchdb-pkg/repo includedeb trusty pkgs/trusty/*deb
- reprepro -b couchdb-pkg/repo includedeb xenial pkgs/xenial/*deb
- reprepro -b couchdb-pkg/repo includedeb stretch pkgs/stretch/*deb
+ cp js/debian-jessie/*.deb pkgs/jessie
+ reprepro -b couchdb-pkg/repo includedeb jessie pkgs/jessie/*.deb
+ cp js/debian-stretch/*.deb pkgs/stretch
+ reprepro -b couchdb-pkg/repo includedeb stretch pkgs/stretch/*.deb
+ cp js/ubuntu-trusty/*.deb pkgs/trusty
+ reprepro -b couchdb-pkg/repo includedeb trusty pkgs/trusty/*.deb
+ cp js/ubuntu-xenial/*.deb pkgs/xenial
+ reprepro -b couchdb-pkg/repo includedeb xenial pkgs/xenial/*.deb
+ cp js/ubuntu-bionic/*.deb pkgs/bionic
+ reprepro -b couchdb-pkg/repo includedeb bionic pkgs/bionic/*.deb
'''
echo 'Building CentOS repos...'
sh '''
+ cp js/centos-6/*rpm pkgs/centos6
+ cp js/centos-7/*rpm pkgs/centos7
cd pkgs/centos6 && createrepo --database .
- cd ../centos7 && rm -f js* && createrepo --database .
+ cd ../centos7 && createrepo --database .
'''
echo 'Building tree to upload...'
sh '''
diff --git a/LICENSE b/LICENSE
index a209352a0..83a1aa7df 100644
--- a/LICENSE
+++ b/LICENSE
@@ -2158,119 +2158,26 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
-The Erlang code is subject to this license:
-
-%% Copyright (c) 2011 Hunter Morris <hunter.morris@smarkets.com>
-
-%% Permission to use, copy, modify, and distribute this software for any
-%% purpose with or without fee is hereby granted, provided that the above
-%% copyright notice and this permission notice appear in all copies.
-
-%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-The underlying blowfish code is derived from OpenBSD libc and is
-subject to the following license:
-
-/*
- * Blowfish block cipher for OpenBSD
- * Copyright 1997 Niels Provos <provos@physnet.uni-hamburg.de>
- * All rights reserved.
- *
- * Implementation advice by David Mazieres <dm@lcs.mit.edu>.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by Niels Provos.
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-The underlying bcrypt (hashing) code is derived from OpenBSD libc and is
-subject to the following license:
-
-/*
- * Copyright 1997 Niels Provos <provos@physnet.uni-hamburg.de>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by Niels Provos.
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-The asynchronous queue code (c_src/async_queue.c and
-c_src/async_queue.h) is from the esnappy project, copyright 2011
-Konstantin V. Sorokin. It is subject to the following license:
-
-Copyright (c) 2011 Konstantin V. Sorokin
-All rights reserved.
+For the src/hyper component:
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
+The MIT License (MIT)
-1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-3. Neither the name of the copyright holder nor the names of contributors
- may be used to endorse or promote products derived from this software
- without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGE.
+Copyright (c) 2014 Game Analytics ApS
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Makefile b/Makefile
index d56d4e897..0ab7b4eed 100644
--- a/Makefile
+++ b/Makefile
@@ -13,16 +13,49 @@
include version.mk
REBAR?=$(shell echo `pwd`/bin/rebar)
+
+# Handle the following scenarios:
+# 1. When building from a tarball, use version.mk.
+# 2. When building from a clean release tag (#.#.#), use that tag.
+# 3. When building from a clean RC tag (#.#.#-RC#), use JUST the version
+# number inside the tarball, but use the full name for the name of the
+# tarball itself.
+# 4. When not on a clean tag, use version.mk + git sha + dirty status.
+
+COUCHDB_GIT_SHA=$(git_sha)
+
IN_RELEASE = $(shell if [ ! -d .git ]; then echo true; fi)
ifeq ($(IN_RELEASE), true)
+
+# 1. Building from tarball, use version.mk.
COUCHDB_VERSION = $(vsn_major).$(vsn_minor).$(vsn_patch)
+
else
-RELTAG = $(shell git describe | grep -E '^[0-9]+\.[0-9]\.[0-9]+(-RC[0-9]+)?$$')
-ifeq ($(RELTAG),)
-COUCHDB_VERSION_SUFFIX = $(shell git rev-parse --short --verify HEAD)
+
+# Gather some additional information.
+# We do it this way so we don't bake shell-isms into Makefile
+# to make it easier to port to Windows. I know, I know. -jst
+# IN_RC contains the -RCx suffix in the name if present
+IN_RC = $(shell git describe --tags --always --first-parent \
+ | grep -Eo -- '-RC[0-9]+' 2>/dev/null)
+# ON_TAG matches *ONLY* if we are on a release or RC tag
+ON_TAG = $(shell git describe --tags --always --first-parent \
+ | grep -Eo -- '^[0-9]+\.[0-9]\.[0-9]+(-RC[0-9]+)?$$' 2>/dev/null)
+# RELTAG contains the #.#.# from git describe, which might be used
+RELTAG = $(shell git describe --tags --always --first-parent \
+ | grep -Eo -- '^[0-9]+\.[0-9]\.[0-9]+' 2>/dev/null)
+# DIRTY identifies if we're not on a commit
+DIRTY = $(shell git describe --dirty | grep -Eo -- '-dirty' 2>/dev/null)
+# COUCHDB_GIT_SHA is our current git hash.
+COUCHDB_GIT_SHA=$(shell git rev-parse --short --verify HEAD)
+
+ifeq ($(ON_TAG),)
+# 4. Not on a tag.
+COUCHDB_VERSION_SUFFIX = $(COUCHDB_GIT_SHA)$(DIRTY)
COUCHDB_VERSION = $(vsn_major).$(vsn_minor).$(vsn_patch)-$(COUCHDB_VERSION_SUFFIX)
else
-COUCHDB_VERSION = $(RELTAG)
+# 2 and 3. On a tag.
+COUCHDB_VERSION = $(RELTAG)$(DIRTY)
endif
endif
@@ -30,20 +63,23 @@ DESTDIR=
# Rebar options
apps=
-skip_deps=folsom,meck,mochiweb,triq,snappy
+skip_deps=folsom,meck,mochiweb,triq,snappy,bcrypt,hyper
suites=
tests=
+COMPILE_OPTS=$(shell echo "\
+ apps=$(apps) \
+ " | sed -e 's/[a-z_]\{1,\}= / /g')
EUNIT_OPTS=$(shell echo "\
apps=$(apps) \
skip_deps=$(skip_deps) \
suites=$(suites) \
tests=$(tests) \
- " | sed -e 's/[a-z]\+= / /g')
+ " | sed -e 's/[a-z]\{1,\}= / /g')
DIALYZE_OPTS=$(shell echo "\
apps=$(apps) \
skip_deps=$(skip_deps) \
- " | sed -e 's/[a-z]\+= / /g')
+ " | sed -e 's/[a-z]\{1,\}= / /g')
#ignore javascript tests
ignore_js_suites=
@@ -73,9 +109,9 @@ help:
.PHONY: couch
-# target: couch - Build CouchDB core
+# target: couch - Build CouchDB core, use ERL_OPTS to provide custom compiler's options
couch: config.erl
- @COUCHDB_VERSION=$(COUCHDB_VERSION) $(REBAR) compile
+ @COUCHDB_VERSION=$(COUCHDB_VERSION) COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) $(REBAR) compile $(COMPILE_OPTS)
@cp src/couch/priv/couchjs bin/
@@ -218,7 +254,7 @@ list-eunit-apps:
.PHONY: list-eunit-suites
# target: list-eunit-suites - List EUnit target test suites
list-eunit-suites:
- @find ./src/ -type f -name *_test.erl -o -name *_tests.erl -printf "%f\n" \
+ @find ./src/ -type f -name *_test.erl -o -name *_tests.erl -exec basename {} \; \
| cut -d '.' -f -1 \
| sort
@@ -226,7 +262,7 @@ list-eunit-suites:
.PHONY: list-js-suites
# target: list-js-suites - List JavaScript test suites
list-js-suites:
- @find ./test/javascript/tests/ -type f -name *.js -printf "%f\n" \
+ @find ./test/javascript/tests/ -type f -name *.js -exec basename {} \; \
| cut -d '.' -f -1 \
| sort
@@ -289,8 +325,8 @@ dist: all
@mkdir -p apache-couchdb-$(COUCHDB_VERSION)/share/docs/man
@cp src/docs/build/man/apachecouchdb.1 apache-couchdb-$(COUCHDB_VERSION)/share/docs/man/
- @tar czf apache-couchdb-$(COUCHDB_VERSION).tar.gz apache-couchdb-$(COUCHDB_VERSION)
- @echo "Done: apache-couchdb-$(COUCHDB_VERSION).tar.gz"
+ @tar czf apache-couchdb-$(COUCHDB_VERSION)$(IN_RC).tar.gz apache-couchdb-$(COUCHDB_VERSION)
+ @echo "Done: apache-couchdb-$(COUCHDB_VERSION)$(IN_RC).tar.gz"
.PHONY: release
diff --git a/Makefile.win b/Makefile.win
index 5a2a73ab1..67c15fc69 100644
--- a/Makefile.win
+++ b/Makefile.win
@@ -14,13 +14,50 @@ include version.mk
SHELL=cmd.exe
REBAR?=$(shell where rebar.cmd)
+
+# Handle the following scenarios:
+# 1. When building from a tarball, use version.mk.
+# 2. When building from a clean release tag (#.#.#), use that tag.
+# 3. When building from a clean RC tag (#.#.#-RC#), use JUST the version
+# number inside the tarball, but use the full name for the name of the
+# tarball itself.
+# 4. When not on a clean tag, use version.mk + git sha + dirty status.
+
+COUCHDB_GIT_SHA=$(git_sha)
IN_RELEASE = $(shell if not exist .git echo true)
+
ifeq ($(IN_RELEASE), true)
-COUCHDB_VERSION_SUFFIX=
+
+# 1. Building from tarball, use version.mk.
COUCHDB_VERSION = $(vsn_major).$(vsn_minor).$(vsn_patch)
+
+else
+
+# Gather some additional information.
+# We do it this way so we don't bake shell-isms into Makefile
+# to make it easier to port to Windows. I know, I know. -jst
+# COUCHDB_GIT_SHA is our current git hash.
+COUCHDB_GIT_SHA=$(shell git rev-parse --short --verify HEAD)
+# IN_RC contains the -RCx suffix in the name if present
+IN_RC = $(shell git describe --tags --always --first-parent \
+ | grep -Eo -- '-RC[0-9]+' 2>/dev/null)
+# ON_TAG matches *ONLY* if we are on a release or RC tag
+ON_TAG = $(shell git describe --tags --always --first-parent \
+ | grep -Eo -- '^[0-9]+\.[0-9]\.[0-9]+(-RC[0-9]+)?$$' 2>/dev/null)
+# RELTAG contains the #.#.# from git describe, which might be used
+RELTAG = $(shell git describe --tags --always --first-parent \
+ | grep -Eo -- '^[0-9]+\.[0-9]\.[0-9]+' 2>/dev/null)
+# DIRTY identifies if we're not on a commit
+DIRTY = $(shell git describe --dirty | grep -Eo -- '-dirty' 2>/dev/null)
+
+ifeq ($(ON_TAG),)
+# 4. Not on a tag.
+COUCHDB_VERSION_SUFFIX = $(COUCHDB_GIT_SHA)$(DIRTY)
+COUCHDB_VERSION = $(vsn_major).$(vsn_minor).$(vsn_patch)-$(COUCHDB_VERSION_SUFFIX)
else
-COUCHDB_VERSION_SUFFIX = -$(shell git rev-parse --short --verify HEAD)
-COUCHDB_VERSION = $(vsn_major).$(vsn_minor).$(vsn_patch)$(COUCHDB_VERSION_SUFFIX)
+# 2 and 3. On a tag.
+COUCHDB_VERSION = $(RELTAG)$(DIRTY)
+endif
endif
DESTDIR=
@@ -53,7 +90,7 @@ all: couch fauxton docs
.PHONY: couch
# target: couch - Build CouchDB core
couch: config.erl
- @set COUCHDB_VERSION=$(COUCHDB_VERSION) && $(REBAR) compile
+ @set COUCHDB_VERSION=$(COUCHDB_VERSION) && set COUCHDB_GIT_SHA=$(COUCHDB_GIT_SHA) && $(REBAR) compile
@copy src\couch\priv\couchjs.exe bin
diff --git a/NOTICE b/NOTICE
index c04033897..f703af216 100644
--- a/NOTICE
+++ b/NOTICE
@@ -178,9 +178,6 @@ This product also includes the following third-party components:
Copyright (c) 2015 Twitter, Inc.
-* erlang-bcrypt
- - Erlang code: Copyright (c) 2011 Hunter Morris <hunter.morris@smarkets.com>
- - Blowfish block cipher & bcrypt (hashing) code for OpenBSD, Copyright
- 1997 Niels Provos <provos@physnet.uni-hamburg.de>
- - The asynchronous queue code (c_src/async_queue.c and c_src/async_queue.h)
- is from the esnappy project, copyright 2011 Konstantin V. Sorokin.
+* hyper
+
+ Copyright (c) 2014 Game Analytics ApS
diff --git a/build-aux/couchdb-build-release.sh b/build-aux/couchdb-build-release.sh
index 4482b713c..2d219e5e4 100755
--- a/build-aux/couchdb-build-release.sh
+++ b/build-aux/couchdb-build-release.sh
@@ -35,8 +35,12 @@ done
cd ..
-# create CONTRIBUTORS file
+
if test -e .git; then
+ # save git sha in version.mk
+ git_sha=`git rev-parse --short HEAD`
+ echo "git_sha=$git_sha" >> $RELDIR/version.mk
+ # create CONTRIBUTORS file
OS=`uname -s`
case "$OS" in
Linux|CYGWIN*) # GNU sed
diff --git a/configure b/configure
index fa0dfed6a..370c964ae 100755
--- a/configure
+++ b/configure
@@ -25,6 +25,7 @@ PACKAGE_AUTHOR_NAME="The Apache Software Foundation"
WITH_CURL="false"
WITH_FAUXTON=1
WITH_DOCS=1
+ERLANG_MD5="false"
SKIP_DEPS=0
COUCHDB_USER="$(whoami 2>/dev/null || echo couchdb)"
@@ -46,6 +47,7 @@ Options:
-c | --with-curl request that couchjs is linked to cURL (default false)
--disable-fauxton do not build Fauxton
--disable-docs do not build any documentation or manpages
+ --erlang-md5 use erlang for md5 hash operations
--dev alias for --with-curl --disable-docs --disable-fauxton
--skip-deps do not update erlang dependencies
--rebar=PATH use rebar by specified path (version >=2.6.0 && <3.0 required)
@@ -78,6 +80,12 @@ parse_opts() {
continue
;;
+ --erlang-md5)
+ ERLANG_MD5="true"
+ shift
+ continue
+ ;;
+
--dev)
WITH_DOCS=0
WITH_FAUXTON=0
@@ -195,6 +203,7 @@ EOF
cat > $rootdir/config.erl << EOF
{with_curl, $WITH_CURL}.
+{erlang_md5, $ERLANG_MD5}.
EOF
install_local_rebar() {
diff --git a/rebar.config.script b/rebar.config.script
index 0cbc21faf..cd469553c 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -47,27 +47,25 @@ SubDirs = [
DepDescs = [
%% Independent Apps
-{config, "config", {tag, "1.0.3"}},
+{config, "config", {tag, "1.0.4"}},
{b64url, "b64url", {tag, "1.0.1"}},
{ets_lru, "ets-lru", {tag, "1.0.0"}},
{khash, "khash", {tag, "1.0.1"}},
-{snappy, "snappy", {tag, "CouchDB-1.0.0"}},
+{snappy, "snappy", {tag, "CouchDB-1.0.1"}},
{ioq, "ioq", {tag, "1.0.1"}},
%% Non-Erlang deps
{docs, {url, "https://github.com/apache/couchdb-documentation"},
- {tag, "2.1.0"}, [raw]},
+ {tag, "2.2.0"}, [raw]},
{fauxton, {url, "https://github.com/apache/couchdb-fauxton"},
- {tag, "v1.1.15"}, [raw]},
+ {tag, "v1.1.17"}, [raw]},
%% Third party deps
{folsom, "folsom", {tag, "CouchDB-0.8.2"}},
+{hyper, "hyper", {tag, "CouchDB-2.2.0-4"}},
{ibrowse, "ibrowse", {tag, "CouchDB-4.0.1"}},
{jiffy, "jiffy", {tag, "CouchDB-0.14.11-2"}},
{mochiweb, "mochiweb", {tag, "v2.17.0"}},
-{meck, "meck", {tag, "0.8.8"}},
-{bcrypt, {url, "https://github.com/apache/couchdb-erlang-bcrypt"},
- {tag, "1.0.2"}},
-{triq, "triq", {tag, "v1.2.0"}}
+{meck, "meck", {tag, "0.8.8"}}
],
@@ -86,20 +84,25 @@ MakeDep = fun
{AppName, ".*", {git, Url, Version}, Options}
end,
+ErlOpts = case os:getenv("ERL_OPTS") of
+ false -> [];
+ Opts -> [list_to_atom(O) || O <- string:tokens(Opts, ",")]
+end,
+
AddConfig = [
- {require_otp_vsn, "R16B03|R16B03-1|17|18|19|20"},
+ {require_otp_vsn, "17|18|19|20"},
{deps_dir, "src"},
{deps, lists:map(MakeDep, DepDescs)},
{sub_dirs, SubDirs},
{lib_dirs, ["src"]},
- {erl_opts, [bin_opt_info, debug_info, {i, "../"}]},
+ {erl_opts, [{i, "../"} | ErlOpts]},
{eunit_opts, [verbose]},
{plugins, [eunit_plugin]},
{dialyzer, [
{plt_location, local},
{plt_location, COUCHDB_ROOT},
{plt_extra_apps, [
- asn1, bcrypt, compiler, crypto, inets, kernel, os_mon, runtime_tools,
+ asn1, compiler, crypto, inets, kernel, os_mon, runtime_tools,
sasl, setup, ssl, stdlib, syntax_tools, xmerl]},
{warnings, [unmatched_returns, error_handling, race_conditions]}]},
{post_hooks, [{compile, "escript support/build_js.escript"}]}
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index 8b47cb04a..084a16ec2 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -8,7 +8,7 @@ database_dir = {{data_dir}}
view_index_dir = {{view_index_dir}}
; util_driver_dir =
; plugin_dir =
-os_process_timeout = 5000 ; 5 seconds. for view and external servers.
+os_process_timeout = 5000 ; 5 seconds. for view servers.
max_dbs_open = 500
delayed_commits = false
; Method used to compress everything that is appended to database and view index files, except
@@ -93,6 +93,14 @@ prefer_minimal = Cache-Control, Content-Length, Content-Range, Content-Type, ETa
; _dbs_info in a request
max_db_number_for_dbs_info_req = 100
+; authentication handlers
+; authentication_handlers = {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
+; uncomment the next line to enable proxy authentication
+; authentication_handlers = {chttpd_auth, proxy_authentication_handler}, {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, default_authentication_handler}
+
+; prevent non-admins from accessing /_all_dbs
+;admin_only_all_dbs = false
+
[database_compaction]
; larger buffer sizes can originate smaller files
doc_buffer_size = 524288 ; value in bytes
@@ -144,7 +152,7 @@ enable_xframe_options = false
; x_forwarded_proto = X-Forwarded-Proto
; x_forwarded_ssl = X-Forwarded-Ssl
; Maximum allowed http request size. Applies to both clustered and local port.
-max_http_request_size = 67108864 ; 64 MB
+max_http_request_size = 4294967296 ; 4GB
; [httpd_design_handlers]
; _view =
@@ -198,8 +206,7 @@ require_valid_user = false
timeout = 600 ; number of seconds before automatic logout
auth_cache_size = 50 ; size is number of cache entries
allow_persistent_cookies = false ; set to true to allow persistent cookies
-iterations = 10 ; iterations for PBKDF2 password hashing
-log_rounds = 10 ; 2^log_rounds iterations for Bcrypt password hashing
+iterations = 10 ; iterations for password hashing
; min_iterations = 1
; max_iterations = 1000000000
; password_scheme = pbkdf2
@@ -277,7 +284,6 @@ os_process_limit = 100
[daemons]
index_server={couch_index_server, start_link, []}
-external_manager={couch_external_manager, start_link, []}
query_servers={couch_proc_manager, start_link, []}
vhosts={couch_httpd_vhost, start_link, []}
httpd={couch_httpd, start_link, []}
@@ -322,12 +328,6 @@ _design = {couch_httpd_db, handle_design_req}
_temp_view = {couch_mrview_http, handle_temp_view_req}
_view_cleanup = {couch_mrview_http, handle_cleanup_req}
-; The external module takes an optional argument allowing you to narrow it to a
-; single script. Otherwise the script name is inferred from the first path section
-; after _external's own path.
-; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
-; _external = {couch_httpd_external, handle_external_req}
-
[httpd_design_handlers]
_compact = {couch_mrview_http, handle_compact_req}
_info = {couch_mrview_http, handle_info_req}
@@ -338,21 +338,6 @@ _update = {couch_mrview_show, handle_doc_update_req}
_view = {couch_mrview_http, handle_view_req}
_view_changes = {couch_mrview_http, handle_view_changes_req}
-; enable external as an httpd handler, then link it with commands here.
-; note, this api is still under consideration.
-; [external]
-; mykey = /path/to/mycommand
-
-; Here you can setup commands for CouchDB to manage
-; while it is alive. It will attempt to keep each command
-; alive if it exits.
-; [os_daemons]
-; some_daemon_name = /path/to/script -with args
-; [os_daemon_settings]
-; max_retries = 3
-; retry_time = 5
-
-
[uuids]
; Known algorithms:
; random - 128 bits of random awesome
@@ -448,13 +433,12 @@ ssl_certificate_max_depth = 3
; There are currently two plugins available:
; couch_replicator_auth_session - use _session cookie authentication
; couch_replicator_auth_noop - use basic authentication (previous default)
-; Currently previous default behavior is still the default. To start using
-; session auth, use this as the list of plugins:
-; `couch_replicator_auth_session,couch_replicator_auth_noop`.
-; In a future release the session plugin might be used by default.
+; Currently, the new _session cookie authentication is tried first, before
+; falling back to the old basic authenticaion default:
+;auth_plugins = couch_replicator_auth_session,couch_replicator_auth_noop
+; To restore the old behaviour, use the following value:
;auth_plugins = couch_replicator_auth_noop
-
[compaction_daemon]
; The delay, in seconds, between each check for which database and view indexes
; need to be compacted.
diff --git a/rel/overlay/etc/local.ini b/rel/overlay/etc/local.ini
index 6b46f0fa1..e3b7b1502 100644
--- a/rel/overlay/etc/local.ini
+++ b/rel/overlay/etc/local.ini
@@ -46,23 +46,12 @@
[query_servers]
;nodejs = /usr/local/bin/couchjs-node /path/to/couchdb/share/server/main.js
-
-[httpd_global_handlers]
-;_google = {couch_httpd_proxy, handle_proxy_req, <<"http://www.google.com">>}
-
[couch_httpd_auth]
; If you set this to true, you should also uncomment the WWW-Authenticate line
; above. If you don't configure a WWW-Authenticate header, CouchDB will send
; Basic realm="server" in order to prevent you getting logged out.
; require_valid_user = false
-[os_daemons]
-; For any commands listed here, CouchDB will attempt to ensure that
-; the process remains alive. Daemons should monitor their environment
-; to know when to exit. This can most easily be accomplished by exiting
-; when stdin is closed.
-;foo = /path/to/command -with args
-
[daemons]
; enable SSL support by uncommenting the following line and supply the PEM's below.
; the default ssl port CouchDB listens on is 6984
@@ -103,9 +92,6 @@
[vhosts]
;example.com = /database/
-[update_notification]
-;unique notifier name=/full/path/to/exe -with "cmd line arg"
-
; To create an admin account uncomment the '[admins]' section below and add a
; line in the format 'username = password'. When you next start CouchDB, it
; will change the password to a hash (so that your passwords don't linger
diff --git a/rel/reltool.config b/rel/reltool.config
index aa3100647..2c55d0900 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -15,7 +15,6 @@
{rel, "couchdb", "2.2.0", [
%% stdlib
asn1,
- bcrypt,
compiler,
crypto,
inets,
@@ -47,6 +46,7 @@
fabric,
folsom,
global_changes,
+ hyper,
ibrowse,
ioq,
jiffy,
@@ -67,7 +67,6 @@
%% stdlib
{app, asn1, [{incl_cond, include}]},
- {app, bcrypt, [{incl_cond, include}]},
{app, compiler, [{incl_cond, include}]},
{app, crypto, [{incl_cond, include}]},
{app, inets, [{incl_cond, include}]},
@@ -101,6 +100,7 @@
{app, fabric, [{incl_cond, include}]},
{app, folsom, [{incl_cond, include}]},
{app, global_changes, [{incl_cond, include}]},
+ {app, hyper, [{incl_cond, include}]},
{app, ibrowse, [{incl_cond, include}]},
{app, ioq, [{incl_cond, include}]},
{app, jiffy, [{incl_cond, include}]},
diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
index 6be0d1848..a5628396b 100644
--- a/src/chttpd/src/chttpd.erl
+++ b/src/chttpd/src/chttpd.erl
@@ -104,10 +104,12 @@ start_link(https) ->
end,
SslOpts = ServerOpts ++ ClientOpts,
- Options =
+ Options0 =
[{port, Port},
{ssl, true},
{ssl_opts, SslOpts}],
+ CustomServerOpts = get_server_options("httpsd"),
+ Options = merge_server_options(Options0, CustomServerOpts),
start_link(https, Options).
start_link(Name, Options) ->
@@ -124,9 +126,8 @@ start_link(Name, Options) ->
{name, Name},
{ip, IP}
],
- ServerOptsCfg = config:get("chttpd", "server_options", "[]"),
- {ok, ServerOpts} = couch_util:parse_term(ServerOptsCfg),
- Options2 = lists:keymerge(1, lists:sort(Options1), lists:sort(ServerOpts)),
+ ServerOpts = get_server_options("chttpd"),
+ Options2 = merge_server_options(Options1, ServerOpts),
case mochiweb_http:start(Options2) of
{ok, Pid} ->
{ok, Pid};
@@ -135,6 +136,14 @@ start_link(Name, Options) ->
{error, Reason}
end.
+get_server_options(Module) ->
+ ServerOptsCfg = config:get(Module, "server_options", "[]"),
+ {ok, ServerOpts} = couch_util:parse_term(ServerOptsCfg),
+ ServerOpts.
+
+merge_server_options(A, B) ->
+ lists:keymerge(1, lists:sort(A), lists:sort(B)).
+
stop() ->
catch mochiweb_http:stop(https),
mochiweb_http:stop(?MODULE).
@@ -288,11 +297,7 @@ process_request(#httpd{mochi_req = MochiReq} = HttpReq) ->
not_preflight ->
case chttpd_auth:authenticate(HttpReq, fun authenticate_request/1) of
#httpd{} = Req ->
- HandlerFun = chttpd_handlers:url_handler(
- HandlerKey, fun chttpd_db:handle_request/1),
- AuthorizedReq = chttpd_auth:authorize(possibly_hack(Req),
- fun chttpd_auth_request:authorize_request/1),
- {AuthorizedReq, HandlerFun(AuthorizedReq)};
+ handle_req_after_auth(HandlerKey, Req);
Response ->
{HttpReq, Response}
end;
@@ -303,6 +308,17 @@ process_request(#httpd{mochi_req = MochiReq} = HttpReq) ->
{HttpReq, catch_error(HttpReq, Tag, Error)}
end.
+handle_req_after_auth(HandlerKey, HttpReq) ->
+ try
+ HandlerFun = chttpd_handlers:url_handler(HandlerKey,
+ fun chttpd_db:handle_request/1),
+ AuthorizedReq = chttpd_auth:authorize(possibly_hack(HttpReq),
+ fun chttpd_auth_request:authorize_request/1),
+ {AuthorizedReq, HandlerFun(AuthorizedReq)}
+ catch Tag:Error ->
+ {HttpReq, catch_error(HttpReq, Tag, Error)}
+ end.
+
catch_error(_HttpReq, throw, {http_head_abort, Resp}) ->
{ok, Resp};
catch_error(_HttpReq, throw, {http_abort, Resp, Reason}) ->
@@ -665,7 +681,7 @@ doc_etag(#doc{id=Id, body=Body, revs={Start, [DiskRev|_]}}) ->
couch_httpd:doc_etag(Id, Body, {Start, DiskRev}).
make_etag(Term) ->
- <<SigInt:128/integer>> = crypto:hash(md5, term_to_binary(Term)),
+ <<SigInt:128/integer>> = couch_hash:md5_hash(term_to_binary(Term)),
list_to_binary(io_lib:format("\"~.36B\"",[SigInt])).
etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
@@ -1238,4 +1254,38 @@ test_log_request(RawPath, UserCtx) ->
ok = meck:unload(couch_log),
Message.
+handle_req_after_auth_test() ->
+ Headers = mochiweb_headers:make([{"HOST", "127.0.0.1:15984"}]),
+ MochiReq = mochiweb_request:new(socket, [], 'PUT', "/newdb", version,
+ Headers),
+ UserCtx = #user_ctx{name = <<"retain_user">>},
+ Roles = [<<"_reader">>],
+ AuthorizedCtx = #user_ctx{name = <<"retain_user">>, roles = Roles},
+ Req = #httpd{
+ mochi_req = MochiReq,
+ begin_ts = {1458,588713,124003},
+ original_method = 'PUT',
+ peer = "127.0.0.1",
+ nonce = "nonce",
+ user_ctx = UserCtx
+ },
+ AuthorizedReq = Req#httpd{user_ctx = AuthorizedCtx},
+ ok = meck:new(chttpd_handlers, [passthrough]),
+ ok = meck:new(chttpd_auth, [passthrough]),
+ ok = meck:expect(chttpd_handlers, url_handler, fun(_Key, _Fun) ->
+ fun(_Req) -> handled_authorized_req end
+ end),
+ ok = meck:expect(chttpd_auth, authorize, fun(_Req, _Fun) ->
+ AuthorizedReq
+ end),
+ ?assertEqual({AuthorizedReq, handled_authorized_req},
+ handle_req_after_auth(foo_key, Req)),
+ ok = meck:expect(chttpd_auth, authorize, fun(_Req, _Fun) ->
+ meck:exception(throw, {http_abort, resp, some_reason})
+ end),
+ ?assertEqual({Req, {aborted, resp, some_reason}},
+ handle_req_after_auth(foo_key, Req)),
+ ok = meck:unload(chttpd_handlers),
+ ok = meck:unload(chttpd_auth).
+
-endif.
diff --git a/src/chttpd/src/chttpd_auth.erl b/src/chttpd/src/chttpd_auth.erl
index be12148f3..6602468e1 100644
--- a/src/chttpd/src/chttpd_auth.erl
+++ b/src/chttpd/src/chttpd_auth.erl
@@ -17,6 +17,7 @@
-export([default_authentication_handler/1]).
-export([cookie_authentication_handler/1]).
+-export([proxy_authentication_handler/1]).
-export([party_mode_handler/1]).
-export([handle_session_req/1]).
@@ -47,6 +48,9 @@ default_authentication_handler(Req) ->
cookie_authentication_handler(Req) ->
couch_httpd_auth:cookie_authentication_handler(Req, chttpd_auth_cache).
+proxy_authentication_handler(Req) ->
+ couch_httpd_auth:proxy_authentication_handler(Req).
+
party_mode_handler(Req) ->
case config:get("chttpd", "require_valid_user", "false") of
"true" ->
diff --git a/src/chttpd/src/chttpd_auth_request.erl b/src/chttpd/src/chttpd_auth_request.erl
index 05c5e8e35..9110ed6bc 100644
--- a/src/chttpd/src/chttpd_auth_request.erl
+++ b/src/chttpd/src/chttpd_auth_request.erl
@@ -34,7 +34,10 @@ authorize_request_int(#httpd{path_parts=[]}=Req) ->
authorize_request_int(#httpd{path_parts=[<<"favicon.ico">>|_]}=Req) ->
Req;
authorize_request_int(#httpd{path_parts=[<<"_all_dbs">>|_]}=Req) ->
- Req;
+ case config:get_boolean("chttpd", "admin_only_all_dbs", false) of
+ true -> require_admin(Req);
+ false -> Req
+ end;
authorize_request_int(#httpd{path_parts=[<<"_dbs_info">>|_]}=Req) ->
Req;
authorize_request_int(#httpd{path_parts=[<<"_replicator">>], method='PUT'}=Req) ->
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index ed0adead9..776100730 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -314,7 +314,6 @@ delete_db_req(#httpd{}=Req, DbName) ->
end.
do_db_req(#httpd{path_parts=[DbName|_], user_ctx=Ctx}=Req, Fun) ->
- fabric:get_security(DbName, [{user_ctx,Ctx}]), % calls check_is_reader
{ok, Db} = couch_db:clustered_db(DbName, Ctx),
Fun(Req, Db).
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 253da233e..596e0142b 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -49,6 +49,7 @@ handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) ->
send_json(Req, {[
{couchdb, WelcomeMessage},
{version, list_to_binary(couch_server:get_version())},
+ {git_sha, list_to_binary(couch_server:get_git_sha())},
{features, config:features()}
] ++ case config:get("vendor") of
[] ->
@@ -293,11 +294,15 @@ handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section]}=Req) ->
% "value"
handle_node_req(#httpd{method='PUT', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
couch_util:check_config_blacklist(Section),
- Value = chttpd:json_body(Req),
+ Value = couch_util:trim(chttpd:json_body(Req)),
Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
OldValue = call_node(Node, config, get, [Section, Key, ""]),
- ok = call_node(Node, config, set, [Section, Key, ?b2l(Value), Persist]),
- send_json(Req, 200, list_to_binary(OldValue));
+ case call_node(Node, config, set, [Section, Key, ?b2l(Value), Persist]) of
+ ok ->
+ send_json(Req, 200, list_to_binary(OldValue));
+ {error, Reason} ->
+ chttpd:send_error(Req, {bad_request, Reason})
+ end;
% GET /_node/$node/_config/Section/Key
handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
case call_node(Node, config, get, [Section, Key, undefined]) of
diff --git a/src/chttpd/test/chttpd_welcome_test.erl b/src/chttpd/test/chttpd_welcome_test.erl
index af9732f57..b737abd7a 100644
--- a/src/chttpd/test/chttpd_welcome_test.erl
+++ b/src/chttpd/test/chttpd_welcome_test.erl
@@ -60,6 +60,8 @@ should_have_version(Url) ->
Version = couch_util:get_value(<<"version">>, Json, undefined),
CouchDB = couch_util:get_value(<<"couchdb">>, Json, undefined),
Features = couch_util:get_value(<<"features">>, Json, undefined),
+ Sha = couch_util:get_value(<<"git_sha">>, Json, undefined),
+ ?assertNotEqual(Sha, undefined),
?assertEqual(<<"Welcome">>, CouchDB),
RealVersion = list_to_binary(couch_server:get_version()),
?assertEqual(RealVersion, Version),
diff --git a/src/couch/rebar.config.script b/src/couch/rebar.config.script
index 498ce3a82..5321cff66 100644
--- a/src/couch/rebar.config.script
+++ b/src/couch/rebar.config.script
@@ -39,6 +39,13 @@ Version = case os:getenv("COUCHDB_VERSION") of
string:strip(Version0, right)
end,
+GitSha = case os:getenv("COUCHDB_GIT_SHA") of
+ false ->
+ ""; % release builds won’t get a fallback
+ GitSha0 ->
+ string:strip(GitSha0, right)
+end,
+
CouchConfig = case filelib:is_file(os:getenv("COUCHDB_CONFIG")) of
true ->
{ok, Result} = file:consult(os:getenv("COUCHDB_CONFIG")),
@@ -64,6 +71,13 @@ ConfigSrc = [["#define ", K, " ", V, $\n] || {K, V} <- ConfigH],
ConfigBin = iolist_to_binary(ConfigSrc),
ok = CopyIfDifferent(CouchJSConfig, ConfigBin),
+MD5Config = case lists:keyfind(erlang_md5, 1, CouchConfig) of
+ {erlang_md5, true} ->
+ [{d, 'ERLANG_MD5', true}];
+ _ ->
+ []
+end,
+
%% TODO support curl on Windows
{JS_CFLAGS, JS_LDFLAGS} = case lists:keyfind(with_curl, 1, CouchConfig) of
{with_curl, true} ->
@@ -142,8 +156,9 @@ AddConfig = [
{port_specs, PortSpecs},
{erl_opts, PlatformDefines ++ [
{d, 'COUCHDB_VERSION', Version},
+ {d, 'COUCHDB_GIT_SHA', GitSha},
{i, "../"}
- ]},
+ ] ++ MD5Config},
{eunit_compile_opts, PlatformDefines}
].
diff --git a/src/couch/src/couch.app.src b/src/couch/src/couch.app.src
index 524b72868..6af213624 100644
--- a/src/couch/src/couch.app.src
+++ b/src/couch/src/couch.app.src
@@ -31,7 +31,6 @@
kernel,
stdlib,
crypto,
- bcrypt,
sasl,
inets,
ssl,
@@ -47,6 +46,7 @@
couch_log,
couch_event,
ioq,
- couch_stats
+ couch_stats,
+ hyper
]}
]}.
diff --git a/src/couch/src/couch.erl b/src/couch/src/couch.erl
index f956b4b3d..fd5c9e101 100644
--- a/src/couch/src/couch.erl
+++ b/src/couch/src/couch.erl
@@ -21,7 +21,6 @@ deps() ->
inets,
os_mon,
crypto,
- bcrypt,
public_key,
ssl,
ibrowse,
diff --git a/src/couch/src/couch_auth_cache.erl b/src/couch/src/couch_auth_cache.erl
index 425cce010..157b0902e 100644
--- a/src/couch/src/couch_auth_cache.erl
+++ b/src/couch/src/couch_auth_cache.erl
@@ -92,8 +92,6 @@ get_admin(UserName) when is_list(UserName) ->
"-pbkdf2-" ++ HashedPwdSaltAndIterations ->
[HashedPwd, Salt, Iterations] = string:tokens(HashedPwdSaltAndIterations, ","),
make_admin_doc(HashedPwd, Salt, Iterations);
- "-bcrypt-" ++ HashedPwd ->
- make_admin_doc(HashedPwd);
_Else ->
nil
end.
@@ -111,11 +109,6 @@ make_admin_doc(DerivedKey, Salt, Iterations) ->
{<<"password_scheme">>, <<"pbkdf2">>},
{<<"derived_key">>, ?l2b(DerivedKey)}].
-make_admin_doc(DerivedKey) ->
- [{<<"roles">>, [<<"_admin">>]},
- {<<"password_scheme">>, <<"bcrypt">>},
- {<<"derived_key">>, ?l2b(DerivedKey)}].
-
get_from_cache(UserName) ->
exec_if_auth_db(
fun(_AuthDb) ->
diff --git a/src/couch/src/couch_bt_engine.erl b/src/couch/src/couch_bt_engine.erl
index 43a77b071..ee0d6d864 100644
--- a/src/couch/src/couch_bt_engine.erl
+++ b/src/couch/src/couch_bt_engine.erl
@@ -114,7 +114,7 @@ delete(RootDir, FilePath, Async) ->
%% Delete any leftover compaction files. If we don't do this a
%% subsequent request for this DB will try to open them to use
%% as a recovery.
- delete_compaction_files(RootDir, FilePath, [{context, delete}]),
+ delete_compaction_files(RootDir, FilePath, [{context, compaction}]),
% Delete the actual database file
couch_file:delete(RootDir, FilePath, Async).
@@ -330,7 +330,7 @@ serialize_doc(#st{} = St, #doc{} = Doc) ->
Body = Compress(Doc#doc.body),
Atts = Compress(Doc#doc.atts),
SummaryBin = ?term_to_bin({Body, Atts}),
- Md5 = crypto:hash(md5, SummaryBin),
+ Md5 = couch_hash:md5_hash(SummaryBin),
Data = couch_file:assemble_file_chunk(SummaryBin, Md5),
% TODO: This is a terrible hack to get around the issues
% in COUCHDB-3255. We'll need to come back and figure
@@ -765,7 +765,7 @@ set_default_security_object(Fd, Header, Compression, Options) ->
delete_compaction_files(FilePath) ->
RootDir = config:get("couchdb", "database_dir", "."),
- DelOpts = [{context, delete}],
+ DelOpts = [{context, compaction}],
delete_compaction_files(RootDir, FilePath, DelOpts).
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
index 93ea07e65..65ca54a59 100644
--- a/src/couch/src/couch_db.erl
+++ b/src/couch/src/couch_db.erl
@@ -870,16 +870,10 @@ prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldI
{[], AccErrors}, Bucket),
prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3);
#full_doc_info{rev_tree=OldTree} ->
- RevsLimit = get_revs_limit(Db),
OldLeafs = couch_key_tree:get_all_leafs_full(OldTree),
OldLeafsLU = [{Start, RevId} || {Start, [{RevId, _}|_]} <- OldLeafs],
- NewRevTree = lists:foldl(
- fun(NewDoc, AccTree) ->
- {NewTree, _} = couch_key_tree:merge(AccTree,
- couch_doc:to_path(NewDoc), RevsLimit),
- NewTree
- end,
- OldTree, Bucket),
+ NewPaths = lists:map(fun couch_doc:to_path/1, Bucket),
+ NewRevTree = couch_key_tree:multi_merge(OldTree, NewPaths),
Leafs = couch_key_tree:get_all_leafs_full(NewRevTree),
LeafRevsFullDict = dict:from_list( [{{Start, RevId}, FullPath} || {Start, [{RevId, _}|_]}=FullPath <- Leafs]),
{ValidatedBucket, AccErrors3} =
@@ -944,7 +938,7 @@ new_revid(#doc{body=Body, revs={OldStart,OldRevs}, atts=Atts, deleted=Deleted})
?l2b(integer_to_list(couch_util:rand32()));
Atts2 ->
OldRev = case OldRevs of [] -> 0; [OldRev0|_] -> OldRev0 end,
- crypto:hash(md5, term_to_binary([Deleted, OldStart, OldRev, Body, Atts2], [{minor_version, 1}]))
+ couch_hash:md5_hash(term_to_binary([Deleted, OldStart, OldRev, Body, Atts2], [{minor_version, 1}]))
end.
new_revs([], OutBuckets, IdRevsAcc) ->
diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl
index a2de3bc60..acb9ec1c9 100644
--- a/src/couch/src/couch_db_updater.erl
+++ b/src/couch/src/couch_db_updater.erl
@@ -504,23 +504,24 @@ merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList],
[OldDocInfo|RestOldInfo], AccNewInfos, AccRemoveSeqs, AccSeq) ->
erlang:put(last_id_merged, OldDocInfo#full_doc_info.id), % for debugging
NewDocInfo0 = lists:foldl(fun({Client, NewDoc}, OldInfoAcc) ->
- merge_rev_tree(OldInfoAcc, NewDoc, Client, Limit, MergeConflicts)
+ merge_rev_tree(OldInfoAcc, NewDoc, Client, MergeConflicts)
end, OldDocInfo, NewDocs),
+ NewDocInfo1 = maybe_stem_full_doc_info(NewDocInfo0, Limit),
% When MergeConflicts is false, we updated #full_doc_info.deleted on every
% iteration of merge_rev_tree. However, merge_rev_tree does not update
% #full_doc_info.deleted when MergeConflicts is true, since we don't need
% to know whether the doc is deleted between iterations. Since we still
% need to know if the doc is deleted after the merge happens, we have to
% set it here.
- NewDocInfo1 = case MergeConflicts of
+ NewDocInfo2 = case MergeConflicts of
true ->
- NewDocInfo0#full_doc_info{
- deleted = couch_doc:is_deleted(NewDocInfo0)
+ NewDocInfo1#full_doc_info{
+ deleted = couch_doc:is_deleted(NewDocInfo1)
};
false ->
- NewDocInfo0
+ NewDocInfo1
end,
- if NewDocInfo1 == OldDocInfo ->
+ if NewDocInfo2 == OldDocInfo ->
% nothing changed
merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
AccNewInfos, AccRemoveSeqs, AccSeq);
@@ -529,7 +530,7 @@ merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList],
% important to note that the update_seq on OldDocInfo should
% be identical to the value on NewDocInfo1.
OldSeq = OldDocInfo#full_doc_info.update_seq,
- NewDocInfo2 = NewDocInfo1#full_doc_info{
+ NewDocInfo3 = NewDocInfo2#full_doc_info{
update_seq = AccSeq + 1
},
RemoveSeqs = case OldSeq of
@@ -537,10 +538,10 @@ merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList],
_ -> [OldSeq | AccRemoveSeqs]
end,
merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
- [NewDocInfo2|AccNewInfos], RemoveSeqs, AccSeq+1)
+ [NewDocInfo3|AccNewInfos], RemoveSeqs, AccSeq+1)
end.
-merge_rev_tree(OldInfo, NewDoc, Client, Limit, false)
+merge_rev_tree(OldInfo, NewDoc, Client, false)
when OldInfo#full_doc_info.deleted ->
% We're recreating a document that was previously
% deleted. To check that this is a recreation from
@@ -574,7 +575,7 @@ merge_rev_tree(OldInfo, NewDoc, Client, Limit, false)
% Merge our modified new doc into the tree
#full_doc_info{rev_tree=OldTree} = OldInfo,
NewTree0 = couch_doc:to_path(NewDoc2),
- case couch_key_tree:merge(OldTree, NewTree0, Limit) of
+ case couch_key_tree:merge(OldTree, NewTree0) of
{NewTree1, new_leaf} ->
% We changed the revision id so inform the caller
send_result(Client, NewDoc, {ok, {OldPos+1, NewRevId}}),
@@ -589,7 +590,7 @@ merge_rev_tree(OldInfo, NewDoc, Client, Limit, false)
send_result(Client, NewDoc, conflict),
OldInfo
end;
-merge_rev_tree(OldInfo, NewDoc, Client, Limit, false) ->
+merge_rev_tree(OldInfo, NewDoc, Client, false) ->
% We're attempting to merge a new revision into an
% undeleted document. To not be a conflict we require
% that the merge results in extending a branch.
@@ -597,7 +598,7 @@ merge_rev_tree(OldInfo, NewDoc, Client, Limit, false) ->
OldTree = OldInfo#full_doc_info.rev_tree,
NewTree0 = couch_doc:to_path(NewDoc),
NewDeleted = NewDoc#doc.deleted,
- case couch_key_tree:merge(OldTree, NewTree0, Limit) of
+ case couch_key_tree:merge(OldTree, NewTree0) of
{NewTree, new_leaf} when not NewDeleted ->
OldInfo#full_doc_info{
rev_tree = NewTree,
@@ -615,14 +616,23 @@ merge_rev_tree(OldInfo, NewDoc, Client, Limit, false) ->
send_result(Client, NewDoc, conflict),
OldInfo
end;
-merge_rev_tree(OldInfo, NewDoc, _Client, Limit, true) ->
+merge_rev_tree(OldInfo, NewDoc, _Client, true) ->
% We're merging in revisions without caring about
% conflicts. Most likely this is a replication update.
OldTree = OldInfo#full_doc_info.rev_tree,
NewTree0 = couch_doc:to_path(NewDoc),
- {NewTree, _} = couch_key_tree:merge(OldTree, NewTree0, Limit),
+ {NewTree, _} = couch_key_tree:merge(OldTree, NewTree0),
OldInfo#full_doc_info{rev_tree = NewTree}.
+maybe_stem_full_doc_info(#full_doc_info{rev_tree = Tree} = Info, Limit) ->
+ case config:get_boolean("couchdb", "stem_interactive_updates", true) of
+ true ->
+ Stemmed = couch_key_tree:stem(Tree, Limit),
+ Info#full_doc_info{rev_tree = Stemmed};
+ false ->
+ Info
+ end.
+
update_docs_int(Db, DocsList, LocalDocs, MergeConflicts, FullCommit) ->
UpdateSeq = couch_db_engine:get_update_seq(Db),
RevsLimit = couch_db_engine:get_revs_limit(Db),
diff --git a/src/couch/src/couch_file.erl b/src/couch/src/couch_file.erl
index 9f668ea69..55cb95661 100644
--- a/src/couch/src/couch_file.erl
+++ b/src/couch/src/couch_file.erl
@@ -132,7 +132,7 @@ append_binary(Fd, Bin) ->
append_binary_md5(Fd, Bin) ->
ioq:call(Fd,
- {append_bin, assemble_file_chunk(Bin, crypto:hash(md5, Bin))},
+ {append_bin, assemble_file_chunk(Bin, couch_hash:md5_hash(Bin))},
erlang:get(io_priority)).
append_raw_chunk(Fd, Chunk) ->
@@ -175,7 +175,7 @@ pread_iolist(Fd, Pos) ->
{ok, IoList, <<>>} ->
{ok, IoList};
{ok, IoList, Md5} ->
- case crypto:hash(md5, IoList) of
+ case couch_hash:md5_hash(IoList) of
Md5 ->
{ok, IoList};
_ ->
@@ -333,7 +333,7 @@ read_header(Fd) ->
write_header(Fd, Data) ->
Bin = term_to_binary(Data),
- Md5 = crypto:hash(md5, Bin),
+ Md5 = couch_hash:md5_hash(Bin),
% now we assemble the final header binary and write to disk
FinalBin = <<Md5/binary, Bin/binary>>,
ioq:call(Fd, {write_header, FinalBin}, erlang:get(io_priority)).
@@ -559,7 +559,7 @@ load_header(Fd, Pos, HeaderLen, RestBlock) ->
end,
<<Md5Sig:16/binary, HeaderBin/binary>> =
iolist_to_binary(remove_block_prefixes(?PREFIX_SIZE, RawBin)),
- Md5Sig = crypto:hash(md5, HeaderBin),
+ Md5Sig = couch_hash:md5_hash(HeaderBin),
{ok, HeaderBin}.
diff --git a/src/couch/src/couch_hash.erl b/src/couch/src/couch_hash.erl
new file mode 100644
index 000000000..842b37423
--- /dev/null
+++ b/src/couch/src/couch_hash.erl
@@ -0,0 +1,45 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_hash).
+
+-export([md5_hash/1, md5_hash_final/1, md5_hash_init/0, md5_hash_update/2]).
+
+-ifdef(ERLANG_MD5).
+
+md5_hash(Data) ->
+ erlang:md5(Data).
+
+md5_hash_final(Context) ->
+ erlang:md5_final(Context).
+
+md5_hash_init() ->
+ erlang:md5_init().
+
+md5_hash_update(Context, Data) ->
+ erlang:md5_update(Context, Data).
+
+-else.
+
+md5_hash(Data) ->
+ crypto:hash(md5, Data).
+
+md5_hash_final(Context) ->
+ crypto:hash_final(Context).
+
+md5_hash_init() ->
+ crypto:hash_init(md5).
+
+md5_hash_update(Context, Data) ->
+ crypto:hash_update(Context, Data).
+
+-endif.
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index 050282a0c..a8cfca6d2 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -622,7 +622,7 @@ rev_etag({Start, DiskRev}) ->
<<$", Rev/binary, $">>.
make_etag(Term) ->
- <<SigInt:128/integer>> = crypto:hash(md5, term_to_binary(Term)),
+ <<SigInt:128/integer>> = couch_hash:md5_hash(term_to_binary(Term)),
iolist_to_binary([$", io_lib:format("~.36B", [SigInt]), $"]).
etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl
index 74cbe5a08..6ac7b75af 100644
--- a/src/couch/src/couch_httpd_auth.erl
+++ b/src/couch/src/couch_httpd_auth.erl
@@ -309,12 +309,7 @@ handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req, AuthModule) ->
Secret = ?l2b(ensure_cookie_auth_secret()),
UserSalt = couch_util:get_value(<<"salt">>, UserProps),
CurrentTime = make_cookie_time(),
- Cookie = case UserSalt of
- undefined ->
- cookie_auth_cookie(Req, ?b2l(UserName), <<Secret/binary>>, CurrentTime);
- _ ->
- cookie_auth_cookie(Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime)
- end,
+ Cookie = cookie_auth_cookie(Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime),
% TODO document the "next" feature in Futon
{Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
nil ->
@@ -406,10 +401,7 @@ authenticate(Pass, UserProps) ->
Iterations = couch_util:get_value(<<"iterations">>, UserProps, 10000),
verify_iterations(Iterations),
{couch_passwords:pbkdf2(Pass, UserSalt, Iterations),
- couch_util:get_value(<<"derived_key">>, UserProps, nil)};
- <<"bcrypt">> ->
- UserHash = couch_util:get_value(<<"derived_key">>, UserProps, nil),
- {couch_passwords:bcrypt(Pass, UserHash), UserHash}
+ couch_util:get_value(<<"derived_key">>, UserProps, nil)}
end,
couch_passwords:verify(PasswordHash, ExpectedHash).
diff --git a/src/couch/src/couch_httpd_misc_handlers.erl b/src/couch/src/couch_httpd_misc_handlers.erl
index e2fc9f2fc..0c70bcb8a 100644
--- a/src/couch/src/couch_httpd_misc_handlers.erl
+++ b/src/couch/src/couch_httpd_misc_handlers.erl
@@ -262,7 +262,7 @@ handle_approved_config_req(#httpd{method='PUT', path_parts=[_, Section, Key]}=Re
<<"admins">> ->
couch_passwords:hash_admin_password(RawValue);
_ ->
- RawValue
+ couch_util:trim(RawValue)
end
end,
OldValue = config:get(Section, Key, ""),
diff --git a/src/couch/src/couch_key_tree.erl b/src/couch/src/couch_key_tree.erl
index cd661e29a..94150418e 100644
--- a/src/couch/src/couch_key_tree.erl
+++ b/src/couch/src/couch_key_tree.erl
@@ -59,7 +59,7 @@ get_key_leafs/2,
map/2,
map_leafs/2,
mapfold/3,
-merge/3,
+multi_merge/2,
merge/2,
remove_leafs/2,
stem/2
@@ -71,16 +71,13 @@ stem/2
-type revtree() :: [tree()].
-%% @doc Merge a path into the given tree and then stem the result.
-%% Although Tree is of type tree(), it must not contain any branches.
--spec merge(revtree(), tree() | path(), pos_integer()) ->
- {revtree(), new_leaf | new_branch | internal_node}.
-merge(RevTree, Tree, StemDepth) ->
- {Merged, Result} = merge(RevTree, Tree),
- case config:get("couchdb", "stem_interactive_updates", "true") of
- "true" -> {stem(Merged, StemDepth), Result};
- _ -> {Merged, Result}
- end.
+%% @doc Merge multiple paths into the given tree.
+-spec multi_merge(revtree(), tree()) -> revtree().
+multi_merge(RevTree, Trees) ->
+ lists:foldl(fun(Tree, RevTreeAcc) ->
+ {NewRevTree, _} = merge(RevTreeAcc, Tree),
+ NewRevTree
+ end, RevTree, lists:sort(Trees)).
%% @doc Merge a path into a tree.
@@ -470,6 +467,70 @@ map_leafs_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
stem(Trees, Limit) ->
+ try
+ {_, Branches} = lists:foldl(fun(Tree, {Seen, TreeAcc}) ->
+ {NewSeen, NewBranches} = stem_tree(Tree, Limit, Seen),
+ {NewSeen, NewBranches ++ TreeAcc}
+ end, {sets:new(), []}, Trees),
+ lists:sort(Branches)
+ catch throw:dupe_keys ->
+ repair_tree(Trees, Limit)
+ end.
+
+
+stem_tree({Depth, Child}, Limit, Seen) ->
+ case stem_tree(Depth, Child, Limit, Seen) of
+ {NewSeen, _, NewChild, NewBranches} ->
+ {NewSeen, [{Depth, NewChild} | NewBranches]};
+ {NewSeen, _, NewBranches} ->
+ {NewSeen, NewBranches}
+ end.
+
+
+stem_tree(_Depth, {Key, _Val, []} = Leaf, Limit, Seen) ->
+ {check_key(Key, Seen), Limit - 1, Leaf, []};
+
+stem_tree(Depth, {Key, Val, Children}, Limit, Seen0) ->
+ Seen1 = check_key(Key, Seen0),
+ FinalAcc = lists:foldl(fun(Child, Acc) ->
+ {SeenAcc, LimitPosAcc, ChildAcc, BranchAcc} = Acc,
+ case stem_tree(Depth + 1, Child, Limit, SeenAcc) of
+ {NewSeenAcc, LimitPos, NewChild, NewBranches} ->
+ NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc),
+ NewChildAcc = [NewChild | ChildAcc],
+ NewBranchAcc = NewBranches ++ BranchAcc,
+ {NewSeenAcc, NewLimitPosAcc, NewChildAcc, NewBranchAcc};
+ {NewSeenAcc, LimitPos, NewBranches} ->
+ NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc),
+ NewBranchAcc = NewBranches ++ BranchAcc,
+ {NewSeenAcc, NewLimitPosAcc, ChildAcc, NewBranchAcc}
+ end
+ end, {Seen1, -1, [], []}, Children),
+ {FinalSeen, FinalLimitPos, FinalChildren, FinalBranches} = FinalAcc,
+ case FinalLimitPos of
+ N when N > 0, length(FinalChildren) > 0 ->
+ FinalNode = {Key, Val, lists:reverse(FinalChildren)},
+ {FinalSeen, FinalLimitPos - 1, FinalNode, FinalBranches};
+ 0 when length(FinalChildren) > 0 ->
+ NewBranches = lists:map(fun(Child) ->
+ {Depth + 1, Child}
+ end, lists:reverse(FinalChildren)),
+ {FinalSeen, -1, NewBranches ++ FinalBranches};
+ N when N < 0, length(FinalChildren) == 0 ->
+ {FinalSeen, FinalLimitPos - 1, FinalBranches}
+ end.
+
+
+check_key(Key, Seen) ->
+ case sets:is_element(Key, Seen) of
+ true ->
+ throw(dupe_keys);
+ false ->
+ sets:add_element(Key, Seen)
+ end.
+
+
+repair_tree(Trees, Limit) ->
% flatten each branch in a tree into a tree path, sort by starting rev #
Paths = lists:sort(lists:map(fun({Pos, Path}) ->
StemmedPath = lists:sublist(Path, Limit),
diff --git a/src/couch/src/couch_native_process.erl b/src/couch/src/couch_native_process.erl
index 6d66c936b..eee8b2860 100644
--- a/src/couch/src/couch_native_process.erl
+++ b/src/couch/src/couch_native_process.erl
@@ -226,6 +226,18 @@ ddoc(State, {_, Fun}, [<<"filters">>|_], [Docs, Req]) ->
end,
Resp = lists:map(FilterFunWrapper, Docs),
{State, [true, Resp]};
+ddoc(State, {_, Fun}, [<<"views">>|_], [Docs]) ->
+ MapFunWrapper = fun(Doc) ->
+ case catch Fun(Doc) of
+ undefined -> true;
+ ok -> false;
+ false -> false;
+ [_|_] -> true;
+ {'EXIT', Error} -> couch_log:error("~p", [Error])
+ end
+ end,
+ Resp = lists:map(MapFunWrapper, Docs),
+ {State, [true, Resp]};
ddoc(State, {_, Fun}, [<<"shows">>|_], Args) ->
Resp = case (catch apply(Fun, Args)) of
FunResp when is_list(FunResp) ->
@@ -351,11 +363,11 @@ bindings(State, Sig, DDoc) ->
% thanks to erlview, via:
% http://erlang.org/pipermail/erlang-questions/2003-November/010544.html
makefun(State, Source) ->
- Sig = crypto:hash(md5, Source),
+ Sig = couch_hash:md5_hash(Source),
BindFuns = bindings(State, Sig),
{Sig, makefun(State, Source, BindFuns)}.
makefun(State, Source, {DDoc}) ->
- Sig = crypto:hash(md5, lists:flatten([Source, term_to_binary(DDoc)])),
+ Sig = couch_hash:md5_hash(lists:flatten([Source, term_to_binary(DDoc)])),
BindFuns = bindings(State, Sig, {DDoc}),
{Sig, makefun(State, Source, BindFuns)};
makefun(_State, Source, BindFuns) when is_list(BindFuns) ->
diff --git a/src/couch/src/couch_passwords.erl b/src/couch/src/couch_passwords.erl
index 77e136144..baf78f5d5 100644
--- a/src/couch/src/couch_passwords.erl
+++ b/src/couch/src/couch_passwords.erl
@@ -12,7 +12,7 @@
-module(couch_passwords).
--export([simple/2, pbkdf2/3, pbkdf2/4, bcrypt/2, verify/2]).
+-export([simple/2, pbkdf2/3, pbkdf2/4, verify/2]).
-export([hash_admin_password/1, get_unhashed_admins/0]).
-include_lib("couch/include/couch_db.hrl").
@@ -51,10 +51,7 @@ hash_admin_password("pbkdf2", ClearPassword) ->
Salt ,list_to_integer(Iterations)),
?l2b("-pbkdf2-" ++ ?b2l(DerivedKey) ++ ","
++ ?b2l(Salt) ++ ","
- ++ Iterations);
-hash_admin_password("bcrypt", ClearPassword) ->
- LogRounds = list_to_integer(config:get("couch_httpd_auth", "log_rounds", "10")),
- ?l2b("-bcrypt-" ++ couch_passwords:bcrypt(couch_util:to_binary(ClearPassword), LogRounds)).
+ ++ Iterations).
-spec get_unhashed_admins() -> list().
get_unhashed_admins() ->
@@ -63,8 +60,6 @@ get_unhashed_admins() ->
false; % already hashed
({_User, "-pbkdf2-" ++ _}) ->
false; % already hashed
- ({_User, "-bcrypt-" ++ _}) ->
- false; % already hashed
({_User, _ClearPassword}) ->
true
end,
@@ -128,16 +123,6 @@ pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration, Prev, Acc) ->
pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration + 1,
Next, crypto:exor(Next, Acc)).
-%% Define the bcrypt functions to hash a password
--spec bcrypt(binary(), binary()) -> binary();
- (binary(), integer()) -> binary().
-bcrypt(Password, Salt) when is_binary(Salt) ->
- {ok, Hash} = bcrypt:hashpw(Password, Salt),
- list_to_binary(Hash);
-bcrypt(Password, LogRounds) when is_integer(LogRounds) ->
- {ok, Salt} = bcrypt:gen_salt(LogRounds),
- bcrypt(Password, list_to_binary(Salt)).
-
%% verify two lists for equality without short-circuits to avoid timing attacks.
-spec verify(string(), string(), integer()) -> boolean().
verify([X|RestX], [Y|RestY], Result) ->
diff --git a/src/couch/src/couch_query_servers.erl b/src/couch/src/couch_query_servers.erl
index f31d24c6c..7047364e2 100644
--- a/src/couch/src/couch_query_servers.erl
+++ b/src/couch/src/couch_query_servers.erl
@@ -17,6 +17,7 @@
-export([reduce/3, rereduce/3,validate_doc_update/5]).
-export([filter_docs/5]).
-export([filter_view/3]).
+-export([finalize/2]).
-export([rewrite/3]).
-export([with_ddoc_proc/2, proc_prompt/2, ddoc_prompt/3, ddoc_proc_prompt/3, json_doc/1]).
@@ -86,6 +87,14 @@ group_reductions_results(List) ->
[Heads | group_reductions_results(Tails)]
end.
+finalize(<<"_approx_count_distinct",_/binary>>, Reduction) ->
+ true = hyper:is_hyper(Reduction),
+ {ok, round(hyper:card(Reduction))};
+finalize(<<"_stats",_/binary>>, Unpacked) ->
+ {ok, pack_stats(Unpacked)};
+finalize(_RedSrc, Reduction) ->
+ {ok, Reduction}.
+
rereduce(_Lang, [], _ReducedValues) ->
{ok, []};
rereduce(Lang, RedSrcs, ReducedValues) ->
@@ -171,7 +180,10 @@ builtin_reduce(rereduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
builtin_reduce(rereduce, BuiltinReds, KVs, [Count|Acc]);
builtin_reduce(Re, [<<"_stats",_/binary>>|BuiltinReds], KVs, Acc) ->
Stats = builtin_stats(Re, KVs),
- builtin_reduce(Re, BuiltinReds, KVs, [Stats|Acc]).
+ builtin_reduce(Re, BuiltinReds, KVs, [Stats|Acc]);
+builtin_reduce(Re, [<<"_approx_count_distinct",_/binary>>|BuiltinReds], KVs, Acc) ->
+ Distinct = approx_count_distinct(Re, KVs),
+ builtin_reduce(Re, BuiltinReds, KVs, [Distinct|Acc]).
builtin_sum_rows([], Acc) ->
@@ -236,11 +248,11 @@ sum_arrays(Else, _) ->
throw_sum_error(Else).
builtin_stats(_, []) ->
- {[{sum,0}, {count,0}, {min,0}, {max,0}, {sumsqr,0}]};
+ {0, 0, 0, 0, 0};
builtin_stats(_, [[_,First]|Rest]) ->
- Unpacked = lists:foldl(fun([_Key, Value], Acc) -> stat_values(Value, Acc) end,
- build_initial_accumulator(First), Rest),
- pack_stats(Unpacked).
+ lists:foldl(fun([_Key, Value], Acc) ->
+ stat_values(Value, Acc)
+ end, build_initial_accumulator(First), Rest).
stat_values(Value, Acc) when is_list(Value), is_list(Acc) ->
lists:zipwith(fun stat_values/2, Value, Acc);
@@ -267,6 +279,8 @@ build_initial_accumulator(L) when is_list(L) ->
[build_initial_accumulator(X) || X <- L];
build_initial_accumulator(X) when is_number(X) ->
{X, 1, X, X, X*X};
+build_initial_accumulator({_, _, _, _, _} = AlreadyUnpacked) ->
+ AlreadyUnpacked;
build_initial_accumulator({Props}) ->
unpack_stats({Props});
build_initial_accumulator(Else) ->
@@ -282,8 +296,12 @@ unpack_stats({PreRed}) when is_list(PreRed) ->
get_number(<<"sumsqr">>, PreRed)
}.
+
pack_stats({Sum, Cnt, Min, Max, Sqr}) ->
{[{<<"sum">>,Sum}, {<<"count">>,Cnt}, {<<"min">>,Min}, {<<"max">>,Max}, {<<"sumsqr">>,Sqr}]};
+pack_stats({Packed}) ->
+ % Legacy code path before we had the finalize operation
+ {Packed};
pack_stats(Stats) when is_list(Stats) ->
lists:map(fun pack_stats/1, Stats).
@@ -303,6 +321,13 @@ get_number(Key, Props) ->
throw({invalid_value, iolist_to_binary(Msg)})
end.
+% TODO allow customization of precision in the ddoc.
+approx_count_distinct(reduce, KVs) ->
+ lists:foldl(fun([[Key, _Id], _Value], Filter) ->
+ hyper:insert(term_to_binary(Key), Filter)
+ end, hyper:new(11), KVs);
+approx_count_distinct(rereduce, Reds) ->
+ hyper:union([Filter || [_, Filter] <- Reds]).
% use the function stored in ddoc.validate_doc_update to test an update.
-spec validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) -> ok when
@@ -585,4 +610,49 @@ stat_values_test() ->
{18, 2, 5, 13, 194}
], stat_values([2,3,5], [7,11,13])).
+reduce_stats_test() ->
+ ?assertEqual([
+ {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
+ ], test_reduce(<<"_stats">>, [[[null, key], 2]])),
+
+ ?assertEqual([[
+ {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
+ {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
+ ]], test_reduce(<<"_stats">>, [[[null, key],[1,2]]])),
+
+ ?assertEqual(
+ {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
+ , element(2, finalize(<<"_stats">>, {2, 1, 2, 2, 4}))),
+
+ ?assertEqual([
+ {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
+ {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
+ ], element(2, finalize(<<"_stats">>, [
+ {1, 1, 1, 1, 1},
+ {2, 1, 2, 2, 4}
+ ]))),
+
+ ?assertEqual([
+ {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
+ {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
+ ], element(2, finalize(<<"_stats">>, [
+ {1, 1, 1, 1, 1},
+ {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
+ ]))),
+
+ ?assertEqual([
+ {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
+ {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
+ ], element(2, finalize(<<"_stats">>, [
+ {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
+ {2, 1, 2, 2, 4}
+ ]))),
+ ok.
+
+test_reduce(Reducer, KVs) ->
+ ?assertMatch({ok, _}, reduce(<<"javascript">>, [Reducer], KVs)),
+ {ok, Reduced} = reduce(<<"javascript">>, [Reducer], KVs),
+ {ok, Finalized} = finalize(Reducer, Reduced),
+ Finalized.
+
-endif.
diff --git a/src/couch/src/couch_server.erl b/src/couch/src/couch_server.erl
index 05af0ed54..ede8227c8 100644
--- a/src/couch/src/couch_server.erl
+++ b/src/couch/src/couch_server.erl
@@ -15,7 +15,7 @@
-behaviour(config_listener).
-vsn(3).
--export([open/2,create/2,delete/2,get_version/0,get_version/1,get_uuid/0]).
+-export([open/2,create/2,delete/2,get_version/0,get_version/1,get_git_sha/0,get_uuid/0]).
-export([all_databases/0, all_databases/2]).
-export([init/1, handle_call/3,sup_start_link/0]).
-export([handle_cast/2,code_change/3,handle_info/2,terminate/2]).
@@ -57,6 +57,7 @@ get_version(short) ->
[Version|_Rest] = string:tokens(get_version(), "+"),
Version.
+get_git_sha() -> ?COUCHDB_GIT_SHA.
get_uuid() ->
case config:get("couchdb", "uuid", undefined) of
@@ -210,6 +211,8 @@ close_db_if_idle(DbName) ->
init([]) ->
+ couch_util:set_mqd_off_heap(),
+
% Mark pluggable storage engines as a supported feature
config:enable_feature('pluggable-storage-engines'),
@@ -523,7 +526,7 @@ handle_call({delete, DbName, Options}, _From, Server) ->
DelOpt = [{context, delete} | Options],
% Make sure and remove all compaction data
- delete_compaction_files(DbNameList, DelOpt),
+ delete_compaction_files(DbNameList, Options),
{ok, {Engine, FilePath}} = get_engine(Server, DbNameList),
RootDir = Server#server.root_dir,
diff --git a/src/couch/src/couch_stream.erl b/src/couch/src/couch_stream.erl
index 83b0611eb..033562932 100644
--- a/src/couch/src/couch_stream.erl
+++ b/src/couch/src/couch_stream.erl
@@ -98,9 +98,9 @@ foldl({Engine, EngineState}, Fun, Acc) ->
foldl(Engine, <<>>, Fun, Acc) ->
foldl(Engine, Fun, Acc);
foldl(Engine, Md5, UserFun, UserAcc) ->
- InitAcc = {crypto:hash_init(md5), UserFun, UserAcc},
+ InitAcc = {couch_hash:md5_hash_init(), UserFun, UserAcc},
{Md5Acc, _, OutAcc} = foldl(Engine, fun foldl_md5/2, InitAcc),
- Md5 = crypto:hash_final(Md5Acc),
+ Md5 = couch_hash:md5_hash_final(Md5Acc),
OutAcc.
@@ -128,7 +128,7 @@ range_foldl(Engine, From, To, UserFun, UserAcc) when To >= From ->
foldl_md5(Bin, {Md5Acc, UserFun, UserAcc}) ->
- NewMd5Acc = crypto:hash_update(Md5Acc, Bin),
+ NewMd5Acc = couch_hash:md5_hash_update(Md5Acc, Bin),
{NewMd5Acc, UserFun, UserFun(Bin, UserAcc)}.
@@ -201,8 +201,8 @@ init({Engine, OpenerPid, OpenerPriority, Options}) ->
{ok, #stream{
engine=Engine,
opener_monitor=erlang:monitor(process, OpenerPid),
- md5=crypto:hash_init(md5),
- identity_md5=crypto:hash_init(md5),
+ md5=couch_hash:md5_hash_init(),
+ identity_md5=couch_hash:md5_hash_init(),
encoding_fun=EncodingFun,
end_encoding_fun=EndEncodingFun,
max_buffer=couch_util:get_value(
@@ -227,7 +227,7 @@ handle_call({write, Bin}, _From, Stream) ->
encoding_fun = EncodingFun} = Stream,
if BinSize + BufferLen > Max ->
WriteBin = lists:reverse(Buffer, [Bin]),
- IdenMd5_2 = crypto:hash_update(IdenMd5, WriteBin),
+ IdenMd5_2 = couch_hash:md5_hash_update(IdenMd5, WriteBin),
case EncodingFun(WriteBin) of
[] ->
% case where the encoder did some internal buffering
@@ -238,7 +238,7 @@ handle_call({write, Bin}, _From, Stream) ->
WriteBin2 ->
NewEngine = do_write(Engine, WriteBin2),
WrittenLen2 = WrittenLen + iolist_size(WriteBin2),
- Md5_2 = crypto:hash_update(Md5, WriteBin2)
+ Md5_2 = couch_hash:md5_hash_update(Md5, WriteBin2)
end,
{reply, ok, Stream#stream{
@@ -268,9 +268,9 @@ handle_call(close, _From, Stream) ->
end_encoding_fun = EndEncodingFun} = Stream,
WriteBin = lists:reverse(Buffer),
- IdenMd5Final = crypto:hash_final(crypto:hash_update(IdenMd5, WriteBin)),
+ IdenMd5Final = couch_hash:md5_hash_final(couch_hash:md5_hash_update(IdenMd5, WriteBin)),
WriteBin2 = EncodingFun(WriteBin) ++ EndEncodingFun(),
- Md5Final = crypto:hash_final(crypto:hash_update(Md5, WriteBin2)),
+ Md5Final = couch_hash:md5_hash_final(couch_hash:md5_hash_update(Md5, WriteBin2)),
Result = case WriteBin2 of
[] ->
{do_finalize(Engine), WrittenLen, IdenLen, Md5Final, IdenMd5Final};
diff --git a/src/couch/src/couch_users_db.erl b/src/couch/src/couch_users_db.erl
index dd6d3208c..c7b41f1fc 100644
--- a/src/couch/src/couch_users_db.erl
+++ b/src/couch/src/couch_users_db.erl
@@ -23,7 +23,6 @@
-define(SIMPLE, <<"simple">>).
-define(PASSWORD_SHA, <<"password_sha">>).
-define(PBKDF2, <<"pbkdf2">>).
--define(BCRYPT, <<"bcrypt">>).
-define(ITERATIONS, <<"iterations">>).
-define(SALT, <<"salt">>).
-define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
@@ -60,7 +59,7 @@ before_doc_update(Doc, Db) ->
% newDoc.salt = salt
% newDoc.password = null
save_doc(#doc{body={Body}} = Doc) ->
- %% Support all schemes to smooth migration from legacy scheme
+ %% Support both schemes to smooth migration from legacy scheme
Scheme = config:get("couch_httpd_auth", "password_scheme", "pbkdf2"),
case {couch_util:get_value(?PASSWORD, Body), Scheme} of
{null, _} -> % server admins don't have a user-db password entry
@@ -85,13 +84,6 @@ save_doc(#doc{body={Body}} = Doc) ->
Body3 = ?replace(Body2, ?SALT, Salt),
Body4 = proplists:delete(?PASSWORD, Body3),
Doc#doc{body={Body4}};
- {ClearPassword, "bcrypt"} ->
- LogRounds = list_to_integer(config:get("couch_httpd_auth", "log_rounds", "10")),
- DerivedKey = couch_passwords:bcrypt(ClearPassword, LogRounds),
- Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?BCRYPT),
- Body1 = ?replace(Body0, ?DERIVED_KEY, DerivedKey),
- Body2 = proplists:delete(?PASSWORD, Body1),
- Doc#doc{body={Body2}};
{_ClearPassword, Scheme} ->
couch_log:error("[couch_httpd_auth] password_scheme value of '~p' is invalid.", [Scheme]),
throw({forbidden, "Server cannot hash passwords at this time."})
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index f3a9249f7..936b562e1 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -37,6 +37,7 @@
-export([unique_monotonic_integer/0]).
-export([check_config_blacklist/1]).
-export([check_md5/2]).
+-export([set_mqd_off_heap/0]).
-include_lib("couch/include/couch_db.hrl").
@@ -301,15 +302,45 @@ separate_cmd_args(" " ++ Rest, CmdAcc) ->
separate_cmd_args([Char|Rest], CmdAcc) ->
separate_cmd_args(Rest, [Char | CmdAcc]).
-% Is a character whitespace?
-is_whitespace($\s) -> true;
-is_whitespace($\t) -> true;
-is_whitespace($\n) -> true;
-is_whitespace($\r) -> true;
+% Is a character whitespace (from https://en.wikipedia.org/wiki/Whitespace_character#Unicode)?
+is_whitespace(9) -> true;
+is_whitespace(10) -> true;
+is_whitespace(11) -> true;
+is_whitespace(12) -> true;
+is_whitespace(13) -> true;
+is_whitespace(32) -> true;
+is_whitespace(133) -> true;
+is_whitespace(160) -> true;
+is_whitespace(5760) -> true;
+is_whitespace(8192) -> true;
+is_whitespace(8193) -> true;
+is_whitespace(8194) -> true;
+is_whitespace(8195) -> true;
+is_whitespace(8196) -> true;
+is_whitespace(8197) -> true;
+is_whitespace(8198) -> true;
+is_whitespace(8199) -> true;
+is_whitespace(8200) -> true;
+is_whitespace(8201) -> true;
+is_whitespace(8202) -> true;
+is_whitespace(8232) -> true;
+is_whitespace(8233) -> true;
+is_whitespace(8239) -> true;
+is_whitespace(8287) -> true;
+is_whitespace(12288) -> true;
+is_whitespace(6158) -> true;
+is_whitespace(8203) -> true;
+is_whitespace(8204) -> true;
+is_whitespace(8205) -> true;
+is_whitespace(8288) -> true;
+is_whitespace(65279) -> true;
is_whitespace(_Else) -> false.
% removes leading and trailing whitespace from a string
+trim(String) when is_binary(String) ->
+ % mirror string:trim() behaviour of returning a binary when a binary is passed in
+ ?l2b(trim(?b2l(String)));
trim(String) ->
String2 = lists:dropwhile(fun is_whitespace/1, String),
lists:reverse(lists:dropwhile(fun is_whitespace/1, lists:reverse(String2))).
@@ -639,6 +670,15 @@ check_md5(Sig, Sig) -> ok;
check_md5(_, _) -> throw(md5_mismatch).
+set_mqd_off_heap() ->
+ try
+ erlang:process_flag(message_queue_data, off_heap),
+ ok
+ catch error:badarg ->
+ ok
+ end.
+
+
ensure_loaded(Module) when is_atom(Module) ->
case code:ensure_loaded(Module) of
{module, Module} ->
diff --git a/src/couch/src/test_engine_util.erl b/src/couch/src/test_engine_util.erl
index 89997538d..6cc6bccdc 100644
--- a/src/couch/src/test_engine_util.erl
+++ b/src/couch/src/test_engine_util.erl
@@ -186,7 +186,7 @@ gen_write(Engine, St, {create, {DocId, Body, Atts0}}, UpdateSeq) ->
[not_found] = Engine:open_docs(St, [DocId]),
Atts = [couch_att:to_disk_term(Att) || Att <- Atts0],
- Rev = crypto:hash(md5, term_to_binary({DocId, Body, Atts})),
+ Rev = couch_hash:md5_hash(term_to_binary({DocId, Body, Atts})),
Doc0 = #doc{
id = DocId,
@@ -309,7 +309,8 @@ gen_write(Engine, St, {Action, {DocId, Body, Atts0}}, UpdateSeq) ->
conflict -> new_branch;
_ -> new_leaf
end,
- {NewTree, NodeType} = couch_key_tree:merge(PrevRevTree, Path, RevsLimit),
+ {MergedTree, NodeType} = couch_key_tree:merge(PrevRevTree, Path),
+ NewTree = couch_key_tree:stem(MergedTree, RevsLimit),
NewFDI = PrevFDI#full_doc_info{
deleted = couch_doc:is_deleted(NewTree),
@@ -322,11 +323,11 @@ gen_write(Engine, St, {Action, {DocId, Body, Atts0}}, UpdateSeq) ->
gen_revision(conflict, DocId, _PrevRev, Body, Atts) ->
- crypto:hash(md5, term_to_binary({DocId, Body, Atts}));
+ couch_hash:md5_hash(term_to_binary({DocId, Body, Atts}));
gen_revision(delete, DocId, PrevRev, Body, Atts) ->
gen_revision(update, DocId, PrevRev, Body, Atts);
gen_revision(update, DocId, PrevRev, Body, Atts) ->
- crypto:hash(md5, term_to_binary({DocId, PrevRev, Body, Atts})).
+ couch_hash:md5_hash(term_to_binary({DocId, PrevRev, Body, Atts})).
gen_path(conflict, _RevPos, _PrevRevId, Rev, Leaf) ->
@@ -372,7 +373,7 @@ prep_atts(Engine, St, [{FileName, Data} | Rest]) ->
write_att(Stream, FileName, OrigData, <<>>) ->
{StreamEngine, Len, Len, Md5, Md5} = couch_stream:close(Stream),
- couch_util:check_md5(Md5, crypto:hash(md5, OrigData)),
+ couch_util:check_md5(Md5, couch_hash:md5_hash(OrigData)),
Len = size(OrigData),
couch_att:new([
{name, FileName},
diff --git a/src/couch/src/test_request.erl b/src/couch/src/test_request.erl
index 4dfde1a33..48f49bda6 100644
--- a/src/couch/src/test_request.erl
+++ b/src/couch/src/test_request.erl
@@ -101,7 +101,11 @@ request(Method, Url, Headers, Body, Opts, N) ->
{error, {'EXIT', {normal, _}}} ->
% Connection closed right after a successful request that
% used the same connection.
- request(Method, Url, Headers, Body, N - 1);
+ request(Method, Url, Headers, Body, Opts, N - 1);
+ {error, retry_later} ->
+ % CouchDB is busy, let’s wait a bit
+ timer:sleep(3000 div N),
+ request(Method, Url, Headers, Body, Opts, N - 1);
Error ->
Error
end.
diff --git a/src/couch/src/test_util.erl b/src/couch/src/test_util.erl
index 738e9a3fb..efb506460 100644
--- a/src/couch/src/test_util.erl
+++ b/src/couch/src/test_util.erl
@@ -101,6 +101,9 @@ start_applications([App|Apps], Acc) ->
io:format(standard_error, "Application ~s was left running!~n", [App]),
application:stop(App),
start_applications([App|Apps], Acc);
+ {error, Reason} ->
+ io:format(standard_error, "Cannot start application '~s', reason ~p~n", [App, Reason]),
+ throw({error, {cannot_start, App, Reason}});
ok ->
start_applications(Apps, [App|Acc])
end.
diff --git a/src/couch/test/couch_changes_tests.erl b/src/couch/test/couch_changes_tests.erl
index 673f2faad..e4ea76167 100644
--- a/src/couch/test/couch_changes_tests.erl
+++ b/src/couch/test/couch_changes_tests.erl
@@ -47,9 +47,11 @@ setup() ->
save_doc(Db1, {[{<<"_id">>, <<"doc7">>}]}),
save_doc(Db1, {[{<<"_id">>, <<"doc8">>}]})
]],
+ config:set("native_query_servers", "erlang", "{couch_native_process, start_link, []}", _Persist=false),
{DbName, list_to_tuple(Revs2)}.
teardown({DbName, _}) ->
+ config:delete("native_query_servers", "erlang", _Persist=false),
delete_db(DbName),
ok.
@@ -153,7 +155,8 @@ filter_by_view() ->
fun setup/0, fun teardown/1,
[
fun should_filter_by_view/1,
- fun should_filter_by_fast_view/1
+ fun should_filter_by_fast_view/1,
+ fun should_filter_by_erlang_view/1
]
}
}.
@@ -733,6 +736,39 @@ should_filter_by_fast_view({DbName, _}) ->
?assertEqual(UpSeq, ViewUpSeq)
end).
+should_filter_by_erlang_view({DbName, _}) ->
+ ?_test(
+ begin
+ DDocId = <<"_design/app">>,
+ DDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, DDocId},
+ {<<"language">>, <<"erlang">>},
+ {<<"views">>, {[
+ {<<"valid">>, {[
+ {<<"map">>, <<"fun({Doc}) ->"
+ " case lists:keyfind(<<\"_id\">>, 1, Doc) of"
+ " {<<\"_id\">>, <<\"doc3\">>} -> Emit(Doc, null); "
+ " false -> ok"
+ " end "
+ "end.">>}
+ ]}}
+ ]}}
+ ]}),
+ ChArgs = #changes_args{filter = "_view"},
+ Req = {json_req, {[{
+ <<"query">>, {[
+ {<<"view">>, <<"app/valid">>}
+ ]}
+ }]}},
+ ok = update_ddoc(DbName, DDoc),
+ {Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
+ ?assertEqual(1, length(Rows)),
+ [#row{seq = Seq, id = Id}] = Rows,
+ ?assertEqual(<<"doc3">>, Id),
+ ?assertEqual(6, Seq),
+ ?assertEqual(UpSeq, LastSeq)
+ end).
+
update_ddoc(DbName, DDoc) ->
{ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
{ok, _} = couch_db:update_doc(Db, DDoc, []),
diff --git a/src/couch/test/couch_key_tree_tests.erl b/src/couch/test/couch_key_tree_tests.erl
index 88d920363..5d9cc8372 100644
--- a/src/couch/test/couch_key_tree_tests.erl
+++ b/src/couch/test/couch_key_tree_tests.erl
@@ -16,138 +16,108 @@
-define(DEPTH, 10).
-setup() ->
- meck:new(config),
- meck:expect(config, get, fun(_, _, Default) -> Default end).
-
-teardown(_) ->
- meck:unload(config).
key_tree_merge_test_()->
{
"Key tree merge",
- {
- setup,
- fun setup/0, fun teardown/1,
- [
- should_merge_with_empty_tree(),
- should_merge_reflexive(),
- should_merge_prefix_of_a_tree_with_tree(),
- should_produce_conflict_on_merge_with_unrelated_branch(),
- should_merge_reflexive_for_child_nodes(),
- should_merge_tree_to_itself(),
- should_merge_tree_of_odd_length(),
- should_merge_tree_with_stem(),
- should_merge_with_stem_at_deeper_level(),
- should_merge_with_stem_at_deeper_level_with_deeper_paths(),
- should_merge_single_tree_with_deeper_stem(),
- should_merge_tree_with_large_stem(),
- should_merge_stems(),
- should_create_conflicts_on_merge(),
- should_create_no_conflicts_on_merge(),
- should_ignore_conflicting_branch()
- ]
- }
+ [
+ should_merge_with_empty_tree(),
+ should_merge_reflexive(),
+ should_merge_prefix_of_a_tree_with_tree(),
+ should_produce_conflict_on_merge_with_unrelated_branch(),
+ should_merge_reflexive_for_child_nodes(),
+ should_merge_tree_to_itself(),
+ should_merge_tree_of_odd_length(),
+ should_merge_tree_with_stem(),
+ should_merge_with_stem_at_deeper_level(),
+ should_merge_with_stem_at_deeper_level_with_deeper_paths(),
+ should_merge_single_tree_with_deeper_stem(),
+ should_merge_tree_with_large_stem(),
+ should_merge_stems(),
+ should_create_conflicts_on_merge(),
+ should_create_no_conflicts_on_merge(),
+ should_ignore_conflicting_branch()
+ ]
}.
key_tree_missing_leaves_test_()->
{
- "Missing tree leaves",
- {
- setup,
- fun setup/0, fun teardown/1,
- [
- should_not_find_missing_leaves(),
- should_find_missing_leaves()
- ]
- }
+ "Missing tree leaves",
+ [
+ should_not_find_missing_leaves(),
+ should_find_missing_leaves()
+ ]
}.
key_tree_remove_leaves_test_()->
{
"Remove tree leaves",
- {
- setup,
- fun setup/0, fun teardown/1,
- [
- should_have_no_effect_on_removing_no_leaves(),
- should_have_no_effect_on_removing_non_existant_branch(),
- should_remove_leaf(),
- should_produce_empty_tree_on_removing_all_leaves(),
- should_have_no_effect_on_removing_non_existant_node(),
- should_produce_empty_tree_on_removing_last_leaf()
- ]
- }
+ [
+ should_have_no_effect_on_removing_no_leaves(),
+ should_have_no_effect_on_removing_non_existant_branch(),
+ should_remove_leaf(),
+ should_produce_empty_tree_on_removing_all_leaves(),
+ should_have_no_effect_on_removing_non_existant_node(),
+ should_produce_empty_tree_on_removing_last_leaf()
+ ]
}.
key_tree_get_leaves_test_()->
{
"Leaves retrieving",
- {
- setup,
- fun setup/0, fun teardown/1,
- [
- should_extract_subtree(),
- should_extract_subsubtree(),
- should_gather_non_existant_leaf(),
- should_gather_leaf(),
- shoul_gather_multiple_leaves(),
- should_gather_single_leaf_for_multiple_revs(),
- should_gather_multiple_for_multiple_revs(),
- should_retrieve_full_key_path(),
- should_retrieve_full_key_path_for_node(),
- should_retrieve_leaves_with_parent_node(),
- should_retrieve_all_leaves()
- ]
- }
+ [
+ should_extract_subtree(),
+ should_extract_subsubtree(),
+ should_gather_non_existant_leaf(),
+ should_gather_leaf(),
+ shoul_gather_multiple_leaves(),
+ should_gather_single_leaf_for_multiple_revs(),
+ should_gather_multiple_for_multiple_revs(),
+ should_retrieve_full_key_path(),
+ should_retrieve_full_key_path_for_node(),
+ should_retrieve_leaves_with_parent_node(),
+ should_retrieve_all_leaves()
+ ]
}.
key_tree_leaf_counting_test_()->
{
"Leaf counting",
- {
- setup,
- fun setup/0, fun teardown/1,
- [
- should_have_no_leaves_for_empty_tree(),
- should_have_single_leaf_for_tree_with_single_node(),
- should_have_two_leaves_for_tree_with_chindler_siblings(),
- should_not_affect_on_leaf_counting_for_stemmed_tree()
- ]
- }
+ [
+ should_have_no_leaves_for_empty_tree(),
+ should_have_single_leaf_for_tree_with_single_node(),
+ should_have_two_leaves_for_tree_with_chindler_siblings(),
+ should_not_affect_on_leaf_counting_for_stemmed_tree()
+ ]
}.
key_tree_stemming_test_()->
{
"Stemming",
- {
- setup,
- fun setup/0, fun teardown/1,
- [
- should_have_no_effect_for_stemming_more_levels_than_exists(),
- should_return_one_deepest_node(),
- should_return_two_deepest_nodes()
- ]
- }
+ [
+ should_have_no_effect_for_stemming_more_levels_than_exists(),
+ should_return_one_deepest_node(),
+ should_return_two_deepest_nodes()
+ ]
}.
should_merge_with_empty_tree()->
One = {1, {"1","foo",[]}},
?_assertEqual({[One], new_leaf},
- couch_key_tree:merge([], One, ?DEPTH)).
+ merge_and_stem([], One)).
should_merge_reflexive()->
One = {1, {"1","foo",[]}},
?_assertEqual({[One], internal_node},
- couch_key_tree:merge([One], One, ?DEPTH)).
+ merge_and_stem([One], One)).
should_merge_prefix_of_a_tree_with_tree()->
One = {1, {"1","foo",[]}},
TwoSibs = [{1, {"1","foo",[]}},
{1, {"2","foo",[]}}],
?_assertEqual({TwoSibs, internal_node},
- couch_key_tree:merge(TwoSibs, One, ?DEPTH)).
+ merge_and_stem(TwoSibs, One)).
should_produce_conflict_on_merge_with_unrelated_branch()->
TwoSibs = [{1, {"1","foo",[]}},
@@ -157,18 +127,33 @@ should_produce_conflict_on_merge_with_unrelated_branch()->
{1, {"2","foo",[]}},
{1, {"3","foo",[]}}],
?_assertEqual({ThreeSibs, new_branch},
- couch_key_tree:merge(TwoSibs, Three, ?DEPTH)).
+ merge_and_stem(TwoSibs, Three)).
should_merge_reflexive_for_child_nodes()->
TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
?_assertEqual({[TwoChild], internal_node},
- couch_key_tree:merge([TwoChild], TwoChild, ?DEPTH)).
+ merge_and_stem([TwoChild], TwoChild)).
should_merge_tree_to_itself()->
TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
{"1b", "bar", []}]}},
- ?_assertEqual({[TwoChildSibs], new_branch},
- couch_key_tree:merge([TwoChildSibs], TwoChildSibs, ?DEPTH)).
+ Leafs = couch_key_tree:get_all_leafs([TwoChildSibs]),
+ Paths = lists:map(fun leaf_to_path/1, Leafs),
+ FinalTree = lists:foldl(fun(Path, TreeAcc) ->
+ {NewTree, internal_node} = merge_and_stem(TreeAcc, Path),
+ NewTree
+ end, [TwoChildSibs], Paths),
+ ?_assertEqual([TwoChildSibs], FinalTree).
+
+leaf_to_path({Value, {Start, Keys}}) ->
+ [Branch] = to_branch(Value, lists:reverse(Keys)),
+ {Start - length(Keys) + 1, Branch}.
+
+to_branch(Value, [Key]) ->
+ [{Key, Value, []}];
+to_branch(Value, [Key | RestKeys]) ->
+ [{Key, [], to_branch(Value, RestKeys)}].
+
should_merge_tree_of_odd_length()->
TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
@@ -176,9 +161,8 @@ should_merge_tree_of_odd_length()->
{"1b", "bar", []}]}},
TwoChildPlusSibs = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]},
{"1b", "bar", []}]}},
-
- ?_assertEqual({[TwoChildPlusSibs], new_branch},
- couch_key_tree:merge([TwoChild], TwoChildSibs, ?DEPTH)).
+ ?_assertEqual({[TwoChildPlusSibs], new_leaf},
+ merge_and_stem([TwoChildSibs], TwoChild)).
should_merge_tree_with_stem()->
Stemmed = {2, {"1a", "bar", []}},
@@ -186,52 +170,52 @@ should_merge_tree_with_stem()->
{"1b", "bar", []}]}},
?_assertEqual({[TwoChildSibs], internal_node},
- couch_key_tree:merge([TwoChildSibs], Stemmed, ?DEPTH)).
+ merge_and_stem([TwoChildSibs], Stemmed)).
should_merge_with_stem_at_deeper_level()->
Stemmed = {3, {"1bb", "boo", []}},
TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
{"1b", "bar", [{"1bb", "boo", []}]}]}},
?_assertEqual({[TwoChildSibs], internal_node},
- couch_key_tree:merge([TwoChildSibs], Stemmed, ?DEPTH)).
+ merge_and_stem([TwoChildSibs], Stemmed)).
should_merge_with_stem_at_deeper_level_with_deeper_paths()->
Stemmed = {3, {"1bb", "boo", []}},
StemmedTwoChildSibs = [{2,{"1a", "bar", []}},
{2,{"1b", "bar", [{"1bb", "boo", []}]}}],
?_assertEqual({StemmedTwoChildSibs, internal_node},
- couch_key_tree:merge(StemmedTwoChildSibs, Stemmed, ?DEPTH)).
+ merge_and_stem(StemmedTwoChildSibs, Stemmed)).
should_merge_single_tree_with_deeper_stem()->
Stemmed = {3, {"1aa", "bar", []}},
TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
?_assertEqual({[TwoChild], internal_node},
- couch_key_tree:merge([TwoChild], Stemmed, ?DEPTH)).
+ merge_and_stem([TwoChild], Stemmed)).
should_merge_tree_with_large_stem()->
Stemmed = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
?_assertEqual({[TwoChild], internal_node},
- couch_key_tree:merge([TwoChild], Stemmed, ?DEPTH)).
+ merge_and_stem([TwoChild], Stemmed)).
should_merge_stems()->
StemmedA = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
StemmedB = {3, {"1aa", "bar", []}},
?_assertEqual({[StemmedA], internal_node},
- couch_key_tree:merge([StemmedA], StemmedB, ?DEPTH)).
+ merge_and_stem([StemmedA], StemmedB)).
should_create_conflicts_on_merge()->
OneChild = {1, {"1","foo",[{"1a", "bar", []}]}},
Stemmed = {3, {"1aa", "bar", []}},
?_assertEqual({[OneChild, Stemmed], new_branch},
- couch_key_tree:merge([OneChild], Stemmed, ?DEPTH)).
+ merge_and_stem([OneChild], Stemmed)).
should_create_no_conflicts_on_merge()->
OneChild = {1, {"1","foo",[{"1a", "bar", []}]}},
Stemmed = {3, {"1aa", "bar", []}},
TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
?_assertEqual({[TwoChild], new_leaf},
- couch_key_tree:merge([OneChild, Stemmed], TwoChild, ?DEPTH)).
+ merge_and_stem([OneChild, Stemmed], TwoChild)).
should_ignore_conflicting_branch()->
%% this test is based on couch-902-test-case2.py
@@ -260,7 +244,7 @@ should_ignore_conflicting_branch()->
{
"COUCHDB-902",
?_assertEqual({[FooBar], new_leaf},
- couch_key_tree:merge([Foo], Bar, ?DEPTH))
+ merge_and_stem([Foo], Bar))
}.
should_not_find_missing_leaves()->
@@ -422,3 +406,8 @@ should_return_two_deepest_nodes()->
TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 2)).
+
+
+merge_and_stem(RevTree, Tree) ->
+ {Merged, Result} = couch_key_tree:merge(RevTree, Tree),
+ {couch_key_tree:stem(Merged, ?DEPTH), Result}.
diff --git a/src/couch/test/couch_passwords_tests.erl b/src/couch/test/couch_passwords_tests.erl
index a56627361..dea6d6b7b 100644
--- a/src/couch/test/couch_passwords_tests.erl
+++ b/src/couch/test/couch_passwords_tests.erl
@@ -14,6 +14,7 @@
-include_lib("couch/include/couch_eunit.hrl").
+
pbkdf2_test_()->
{"PBKDF2",
[
@@ -51,44 +52,3 @@ pbkdf2_test_()->
{ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>},
couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 16777216, 20)
)}}]}.
-
-
-bcrypt_test_() ->
- {
- "Bcrypt",
- {
- setup,
- fun() ->
- test_util:start_applications([bcrypt])
- end,
- fun test_util:stop_applications/1,
- [
- {"Log rounds: 4",
- {timeout, 1, fun bcrypt_logRounds_4/0}},
- {"Log rounds: 5",
- {timeout, 1, fun bcrypt_logRounds_5/0}},
- {"Log rounds: 12",
- {timeout, 5, fun bcrypt_logRounds_12/0}},
- {"Null byte",
- {timeout, 5, fun bcrypt_null_byte/0}}
-
- ]
- }
- }.
-
-bcrypt_logRounds_4() ->
- bcrypt_assert_equal(<<"password">>, 4).
-
-bcrypt_logRounds_5() ->
- bcrypt_assert_equal(<<"password">>, 5).
-
-bcrypt_logRounds_12() ->
- bcrypt_assert_equal(<<"password">>, 12).
-
-bcrypt_null_byte() ->
- bcrypt_assert_equal(<<"passw\0rd">>, 12).
-
-bcrypt_assert_equal(Password, Rounds) when is_integer(Rounds) ->
- HashPass = couch_passwords:bcrypt(Password, Rounds),
- ReHashPass = couch_passwords:bcrypt(Password, HashPass),
- ?assertEqual(HashPass, ReHashPass).
diff --git a/src/couch/test/couchdb_attachments_tests.erl b/src/couch/test/couchdb_attachments_tests.erl
index d9efac551..04859dbc9 100644
--- a/src/couch/test/couchdb_attachments_tests.erl
+++ b/src/couch/test/couchdb_attachments_tests.erl
@@ -21,9 +21,9 @@
-define(ATT_TXT_NAME, <<"file.erl">>).
-define(FIXTURE_PNG, filename:join([?FIXTURESDIR, "logo.png"])).
-define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
--define(TIMEOUT, 1000).
--define(TIMEOUT_EUNIT, 10).
--define(TIMEWAIT, 100).
+-define(TIMEOUT, 5000).
+-define(TIMEOUT_EUNIT, 100).
+-define(TIMEWAIT, 1000).
-define(i2l(I), integer_to_list(I)).
@@ -208,7 +208,7 @@ should_upload_attachment_with_valid_md5_header({Host, DbName}) ->
Headers = [
{"Content-Length", "34"},
{"Content-Type", "text/plain"},
- {"Content-MD5", ?b2l(base64:encode(crypto:hash(md5, Body)))},
+ {"Content-MD5", ?b2l(base64:encode(couch_hash:md5_hash(Body)))},
{"Host", Host}
],
{ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
@@ -224,7 +224,7 @@ should_upload_attachment_by_chunks_with_valid_md5_header({Host, DbName}) ->
Body = [chunked_body([Part1, Part2]), "\r\n"],
Headers = [
{"Content-Type", "text/plain"},
- {"Content-MD5", ?b2l(base64:encode(crypto:hash(md5, AttData)))},
+ {"Content-MD5", ?b2l(base64:encode(couch_hash:md5_hash(AttData)))},
{"Host", Host},
{"Transfer-Encoding", "chunked"}
],
@@ -239,7 +239,7 @@ should_upload_attachment_by_chunks_with_valid_md5_trailer({Host, DbName}) ->
AttData = <<"We all live in a yellow submarine!">>,
<<Part1:21/binary, Part2:13/binary>> = AttData,
Body = [chunked_body([Part1, Part2]),
- "Content-MD5: ", base64:encode(crypto:hash(md5, AttData)),
+ "Content-MD5: ", base64:encode(couch_hash:md5_hash(AttData)),
"\r\n\r\n"],
Headers = [
{"Content-Type", "text/plain"},
diff --git a/src/couch_epi/src/couch_epi_data.erl b/src/couch_epi/src/couch_epi_data.erl
index 93e39f69d..bbed828bb 100644
--- a/src/couch_epi/src/couch_epi_data.erl
+++ b/src/couch_epi/src/couch_epi_data.erl
@@ -111,4 +111,4 @@ definitions({module, Modules}) ->
hash_of_file(FilePath) ->
{ok, Data} = file:read_file(FilePath),
- crypto:hash(md5, Data).
+ couch_hash:md5_hash(Data).
diff --git a/src/couch_epi/src/couch_epi_util.erl b/src/couch_epi/src/couch_epi_util.erl
index e99db4668..ea4b10ea8 100644
--- a/src/couch_epi/src/couch_epi_util.erl
+++ b/src/couch_epi/src/couch_epi_util.erl
@@ -22,7 +22,7 @@ module_version(Module) ->
VSNs.
hash(Term) ->
- <<SigInt:128/integer>> = crypto:hash(md5, term_to_binary(Term)),
+ <<SigInt:128/integer>> = couch_hash:md5_hash(term_to_binary(Term)),
lists:flatten(io_lib:format("\"~.36B\"",[SigInt])).
module_exists(Module) ->
diff --git a/src/couch_event/src/couch_event_os_sup.erl b/src/couch_event/src/couch_event_os_sup.erl
deleted file mode 100644
index f219d0000..000000000
--- a/src/couch_event/src/couch_event_os_sup.erl
+++ /dev/null
@@ -1,82 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-% This causes an OS process to spawned and it is notified every time a database
-% is updated.
-%
-% The notifications are in the form of a the database name sent as a line of
-% text to the OS processes stdout.
-
-
--module(couch_event_os_sup).
--behaviour(supervisor).
--behaviour(config_listener).
-
--vsn(2).
-
--export([
- start_link/0,
- init/1
-]).
-
--export([
- handle_config_change/5,
- handle_config_terminate/3
-]).
-
-
-start_link() ->
- supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
-
-init([]) ->
- UpdateNotifierExes = config:get("update_notification"),
- Children = [
- {
- config_listener_mon,
- {config_listener_mon, start_link, [?MODULE, nil]},
- permanent,
- 5000,
- worker,
- [config_listener_mon]
- }
- | [child(Id, Exe) || {Id, Exe} <- UpdateNotifierExes]],
-
- {ok, {
- {one_for_one, 10, 3600},
- Children
- }}.
-
-
-handle_config_change("update_notification", Id, deleted, _, _) ->
- supervisor:terminate_child(?MODULE, Id),
- supervisor:delete_child(?MODULE, Id),
- {ok, nil};
-handle_config_change("update_notification", Id, Exe, _, _) when is_list(Exe) ->
- supervisor:start_child(?MODULE, child(Id, Exe)),
- {ok, nil};
-handle_config_change(_, _, _, _, _) ->
- {ok, nil}.
-
-handle_config_terminate(_Server, _Reason, _State) ->
- ok.
-
-child(Id, Arg) ->
- {
- Id,
- {couch_event_os_listener, start_link, [Arg]},
- permanent,
- 1000,
- supervisor,
- [couch_event_os_listener]
- }.
diff --git a/src/couch_event/src/couch_event_sup2.erl b/src/couch_event/src/couch_event_sup2.erl
index 36fbe542e..2d88b93d4 100644
--- a/src/couch_event/src/couch_event_sup2.erl
+++ b/src/couch_event/src/couch_event_sup2.erl
@@ -38,13 +38,6 @@ init(_) ->
5000,
worker,
[couch_event_server]
- },
- {couch_event_os_sup,
- {couch_event_os_sup, start_link, []},
- permanent,
- 5000,
- supervisor,
- [couch_event_os_sup]
}
],
{ok, {{one_for_one, 5, 10}, Children}}.
diff --git a/src/couch_index/test/couch_index_ddoc_updated_tests.erl b/src/couch_index/test/couch_index_ddoc_updated_tests.erl
index 40dadcc62..0e23adf91 100644
--- a/src/couch_index/test/couch_index_ddoc_updated_tests.erl
+++ b/src/couch_index/test/couch_index_ddoc_updated_tests.erl
@@ -118,7 +118,7 @@ fake_index() ->
(idx_name, {_DbName, DDoc}) ->
DDoc#doc.id;
(signature, {_DbName, DDoc}) ->
- crypto:hash(md5, term_to_binary(DDoc));
+ couch_hash:md5_hash(term_to_binary(DDoc));
(update_seq, Seq) ->
Seq
end),
diff --git a/src/couch_log/src/couch_log_server.erl b/src/couch_log/src/couch_log_server.erl
index be44af8ff..ea5def891 100644
--- a/src/couch_log/src/couch_log_server.erl
+++ b/src/couch_log/src/couch_log_server.erl
@@ -58,6 +58,7 @@ log(Entry) ->
init(_) ->
+ couch_util:set_mqd_off_heap(),
process_flag(trap_exit, true),
{ok, #st{
writer = couch_log_writer:init()
diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl
index a099f377e..533dd2de9 100644
--- a/src/couch_mrview/src/couch_mrview.erl
+++ b/src/couch_mrview/src/couch_mrview.erl
@@ -41,6 +41,7 @@
user_acc,
last_go=ok,
reduce_fun,
+ finalizer,
update_seq,
args
}).
@@ -184,6 +185,8 @@ validate(DbName, DDoc) ->
ok;
({_RedName, <<"_stats", _/binary>>}) ->
ok;
+ ({_RedName, <<"_approx_count_distinct", _/binary>>}) ->
+ ok;
({_RedName, <<"_", _/binary>> = Bad}) ->
Msg = ["`", Bad, "` is not a supported reduce function."],
throw({invalid_design_doc, Msg});
@@ -217,7 +220,7 @@ query_all_docs(Db, Args, Callback, Acc) when is_list(Args) ->
query_all_docs(Db, Args0, Callback, Acc) ->
Sig = couch_util:with_db(Db, fun(WDb) ->
{ok, Info} = couch_db:get_db_info(WDb),
- couch_index_util:hexsig(crypto:hash(md5, term_to_binary(Info)))
+ couch_index_util:hexsig(couch_hash:md5_hash(term_to_binary(Info)))
end),
Args1 = Args0#mrargs{view_type=map},
Args2 = couch_mrview_util:validate_args(Args1),
@@ -577,7 +580,14 @@ map_fold(#doc{id = <<"_local/", _/binary>>} = Doc, _Offset, #mracc{} = Acc) ->
last_go=Go
}}.
-red_fold(Db, {_Nth, _Lang, View}=RedView, Args, Callback, UAcc) ->
+red_fold(Db, {NthRed, _Lang, View}=RedView, Args, Callback, UAcc) ->
+ Finalizer = case couch_util:get_value(finalizer, Args#mrargs.extra) of
+ undefined ->
+ {_, FunSrc} = lists:nth(NthRed, View#mrview.reduce_funs),
+ FunSrc;
+ CustomFun->
+ CustomFun
+ end,
Acc = #mracc{
db=Db,
total_rows=null,
@@ -587,6 +597,7 @@ red_fold(Db, {_Nth, _Lang, View}=RedView, Args, Callback, UAcc) ->
callback=Callback,
user_acc=UAcc,
update_seq=View#mrview.update_seq,
+ finalizer=Finalizer,
args=Args
},
Grouping = {key_group_level, Args#mrargs.group_level},
@@ -618,41 +629,50 @@ red_fold(_Key, _Red, #mracc{limit=0} = Acc) ->
{stop, Acc};
red_fold(_Key, Red, #mracc{group_level=0} = Acc) ->
#mracc{
+ finalizer=Finalizer,
limit=Limit,
callback=Callback,
user_acc=UAcc0
} = Acc,
- Row = [{key, null}, {value, Red}],
+ Row = [{key, null}, {value, maybe_finalize(Red, Finalizer)}],
{Go, UAcc1} = Callback({row, Row}, UAcc0),
{Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}};
red_fold(Key, Red, #mracc{group_level=exact} = Acc) ->
#mracc{
+ finalizer=Finalizer,
limit=Limit,
callback=Callback,
user_acc=UAcc0
} = Acc,
- Row = [{key, Key}, {value, Red}],
+ Row = [{key, Key}, {value, maybe_finalize(Red, Finalizer)}],
{Go, UAcc1} = Callback({row, Row}, UAcc0),
{Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}};
red_fold(K, Red, #mracc{group_level=I} = Acc) when I > 0, is_list(K) ->
#mracc{
+ finalizer=Finalizer,
limit=Limit,
callback=Callback,
user_acc=UAcc0
} = Acc,
- Row = [{key, lists:sublist(K, I)}, {value, Red}],
+ Row = [{key, lists:sublist(K, I)}, {value, maybe_finalize(Red, Finalizer)}],
{Go, UAcc1} = Callback({row, Row}, UAcc0),
{Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}};
red_fold(K, Red, #mracc{group_level=I} = Acc) when I > 0 ->
#mracc{
+ finalizer=Finalizer,
limit=Limit,
callback=Callback,
user_acc=UAcc0
} = Acc,
- Row = [{key, K}, {value, Red}],
+ Row = [{key, K}, {value, maybe_finalize(Red, Finalizer)}],
{Go, UAcc1} = Callback({row, Row}, UAcc0),
{Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}}.
+maybe_finalize(Red, null) ->
+ Red;
+maybe_finalize(Red, RedSrc) ->
+ {ok, Finalized} = couch_query_servers:finalize(RedSrc, Red),
+ Finalized.
finish_fold(#mracc{last_go=ok, update_seq=UpdateSeq}=Acc, ExtraMeta) ->
#mracc{callback=Callback, user_acc=UAcc, args=Args}=Acc,
diff --git a/src/couch_mrview/src/couch_mrview_compactor.erl b/src/couch_mrview/src/couch_mrview_compactor.erl
index e9be89c71..3ef11805f 100644
--- a/src/couch_mrview/src/couch_mrview_compactor.erl
+++ b/src/couch_mrview/src/couch_mrview_compactor.erl
@@ -233,6 +233,8 @@ compact_view(#mrview{id_num=VID}=View, EmptyView, BufferSize, Acc0) ->
{EmptyView#mrview{btree=NewBt,
seq_btree=NewSeqBt,
+ update_seq=View#mrview.update_seq,
+ purge_seq=View#mrview.purge_seq,
key_byseq_btree=NewKeyBySeqBt}, FinalAcc}.
compact_view_btree(Btree, EmptyBtree, VID, BufferSize, Acc0) ->
diff --git a/src/couch_mrview/src/couch_mrview_index.erl b/src/couch_mrview/src/couch_mrview_index.erl
index aa1ee2741..5d285d639 100644
--- a/src/couch_mrview/src/couch_mrview_index.erl
+++ b/src/couch_mrview/src/couch_mrview_index.erl
@@ -61,13 +61,14 @@ get(info, State) ->
} = State,
{ok, FileSize} = couch_file:bytes(Fd),
{ok, ExternalSize} = couch_mrview_util:calculate_external_size(Views),
+ {ok, ActiveViewSize} = couch_mrview_util:calculate_active_size(Views),
LogBtSize = case LogBtree of
nil ->
0;
_ ->
couch_btree:size(LogBtree)
end,
- ActiveSize = couch_btree:size(IdBtree) + LogBtSize + ExternalSize,
+ ActiveSize = couch_btree:size(IdBtree) + LogBtSize + ActiveViewSize,
UpdateOptions0 = get(update_options, State),
UpdateOptions = [atom_to_binary(O, latin1) || O <- UpdateOptions0],
diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl
index 0c6e5fc88..120a9b873 100644
--- a/src/couch_mrview/src/couch_mrview_util.erl
+++ b/src/couch_mrview/src/couch_mrview_util.erl
@@ -23,6 +23,7 @@
-export([fold/4, fold_reduce/4]).
-export([temp_view_to_ddoc/1]).
-export([calculate_external_size/1]).
+-export([calculate_active_size/1]).
-export([validate_args/1]).
-export([maybe_load_doc/3, maybe_load_doc/4]).
-export([maybe_update_index_file/1]).
@@ -156,7 +157,7 @@ ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
keyseq_indexed=KeySeqIndexed
},
SigInfo = {Views, Language, DesignOpts, couch_index_util:sort_lib(Lib)},
- {ok, IdxState#mrst{sig=crypto:hash(md5, term_to_binary(SigInfo))}}.
+ {ok, IdxState#mrst{sig=couch_hash:md5_hash(term_to_binary(SigInfo))}}.
set_view_type(_Args, _ViewName, []) ->
@@ -202,7 +203,7 @@ view_sig(Db, State, View, #mrargs{include_docs=true}=Args) ->
keyseq_indexed=KeySeqIndexed
} = State,
Term = view_sig_term(BaseSig, UpdateSeq, PurgeSeq, KeySeqIndexed, SeqIndexed),
- couch_index_util:hexsig(crypto:hash(md5, term_to_binary(Term)));
+ couch_index_util:hexsig(couch_hash:md5_hash(term_to_binary(Term)));
view_sig(Db, State, {_Nth, _Lang, View}, Args) ->
view_sig(Db, State, View, Args);
view_sig(_Db, State, View, Args0) ->
@@ -216,7 +217,7 @@ view_sig(_Db, State, View, Args0) ->
extra=[]
},
Term = view_sig_term(Sig, UpdateSeq, PurgeSeq, KeySeqIndexed, SeqIndexed, Args),
- couch_index_util:hexsig(crypto:hash(md5, term_to_binary(Term))).
+ couch_index_util:hexsig(couch_hash:md5_hash(term_to_binary(Term))).
view_sig_term(BaseSig, UpdateSeq, PurgeSeq, false, false) ->
{BaseSig, UpdateSeq, PurgeSeq};
@@ -830,6 +831,22 @@ calculate_external_size(Views) ->
{ok, lists:foldl(SumFun, 0, Views)}.
+calculate_active_size(Views) ->
+ BtSize = fun
+ (nil) -> 0;
+ (Bt) -> couch_btree:size(Bt)
+ end,
+ FoldFun = fun(View, Acc) ->
+ Sizes = [
+ BtSize(View#mrview.btree),
+ BtSize(View#mrview.seq_btree),
+ BtSize(View#mrview.key_byseq_btree)
+ ],
+ Acc + lists:sum([S || S <- Sizes, is_integer(S)])
+ end,
+ {ok, lists:foldl(FoldFun, 0, Views)}.
+
+
sum_btree_sizes(nil, _) ->
0;
sum_btree_sizes(_, nil) ->
@@ -991,7 +1008,7 @@ sig_vsn_12x(State) ->
{ViewInfo, State#mrst.language, State#mrst.design_opts,
couch_index_util:sort_lib(State#mrst.lib)}
end,
- crypto:hash(md5, term_to_binary(SigData)).
+ couch_hash:md5_hash(term_to_binary(SigData)).
old_view_format(View) ->
{
diff --git a/src/couch_mrview/test/couch_mrview_index_info_tests.erl b/src/couch_mrview/test/couch_mrview_index_info_tests.erl
index c994df9d3..efa03e7c0 100644
--- a/src/couch_mrview/test/couch_mrview_index_info_tests.erl
+++ b/src/couch_mrview/test/couch_mrview_index_info_tests.erl
@@ -18,14 +18,13 @@
-define(TIMEOUT, 1000).
--ifdef(run_broken_tests).
-
setup() ->
{ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
{ok, Info} = couch_mrview:get_info(Db, <<"_design/bar">>),
{Db, Info}.
+
teardown({Db, _}) ->
couch_db:close(Db),
couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
@@ -37,39 +36,86 @@ view_info_test_() ->
"Views index tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
- fun should_get_property/1
+ fun sig_is_binary/1,
+ fun language_is_js/1,
+ fun file_size_is_non_neg_int/1,
+ fun active_size_is_non_neg_int/1,
+ fun external_size_is_non_neg_int/1,
+ fun disk_size_is_file_size/1,
+ fun data_size_is_external_size/1,
+ fun active_size_less_than_file_size/1,
+ fun update_seq_is_non_neg_int/1,
+ fun purge_seq_is_non_neg_int/1,
+ fun update_opts_is_bin_list/1
]
}
}
}.
-should_get_property({_, Info}) ->
- InfoProps = [
- {signature, <<"276df562b152b3c4e5d34024f62672ed">>},
- {language, <<"javascript">>},
- {disk_size, 314},
- {data_size, 263},
- {update_seq, 11},
- {purge_seq, 0},
- {updater_running, false},
- {compact_running, false},
- {waiting_clients, 0}
- ],
- [
- {atom_to_list(Key), ?_assertEqual(Val, getval(Key, Info))}
- || {Key, Val} <- InfoProps
- ].
+sig_is_binary({_, Info}) ->
+ ?_assert(is_binary(prop(signature, Info))).
+
+
+language_is_js({_, Info}) ->
+ ?_assertEqual(<<"javascript">>, prop(language, Info)).
+
+
+file_size_is_non_neg_int({_, Info}) ->
+ ?_assert(check_non_neg_int([sizes, file], Info)).
+
+
+active_size_is_non_neg_int({_, Info}) ->
+ ?_assert(check_non_neg_int([sizes, active], Info)).
+
+
+external_size_is_non_neg_int({_, Info}) ->
+ ?_assert(check_non_neg_int([sizes, external], Info)).
+
+
+disk_size_is_file_size({_, Info}) ->
+ ?_assertEqual(prop([sizes, file], Info), prop(disk_size, Info)).
+
+
+data_size_is_external_size({_, Info}) ->
+ ?_assertEqual(prop([sizes, external], Info), prop(data_size, Info)).
+
+
+active_size_less_than_file_size({_, Info}) ->
+ ?_assert(prop([sizes, active], Info) < prop([sizes, file], Info)).
+
+
+update_seq_is_non_neg_int({_, Info}) ->
+ ?_assert(check_non_neg_int(update_seq, Info)).
+
+
+purge_seq_is_non_neg_int({_, Info}) ->
+ ?_assert(check_non_neg_int(purge_seq, Info)).
+
+
+update_opts_is_bin_list({_, Info}) ->
+ Opts = prop(update_options, Info),
+ ?_assert(is_list(Opts) andalso
+ (Opts == [] orelse lists:all([is_binary(B) || B <- Opts]))).
-getval(Key, PL) ->
- {value, {Key, Val}} = lists:keysearch(Key, 1, PL),
- Val.
+check_non_neg_int(Key, Info) ->
+ Size = prop(Key, Info),
+ is_integer(Size) andalso Size >= 0.
--endif.
+prop(Key, {Props}) when is_list(Props) ->
+ prop(Key, Props);
+prop([Key], Info) ->
+ prop(Key, Info);
+prop([Key | Rest], Info) ->
+ prop(Rest, prop(Key, Info));
+prop(Key, Info) when is_atom(Key), is_list(Info) ->
+ couch_util:get_value(Key, Info).
diff --git a/src/couch_replicator/src/couch_replicator_auth.erl b/src/couch_replicator/src/couch_replicator_auth.erl
index 60273fc32..7f51cdd1c 100644
--- a/src/couch_replicator/src/couch_replicator_auth.erl
+++ b/src/couch_replicator/src/couch_replicator_auth.erl
@@ -28,7 +28,7 @@
-type code() :: non_neg_integer().
--define(DEFAULT_PLUGINS, "couch_replicator_auth_noop").
+-define(DEFAULT_PLUGINS, "couch_replicator_auth_session,couch_replicator_auth_noop").
% Behavior API
diff --git a/src/couch_replicator/src/couch_replicator_ids.erl b/src/couch_replicator/src/couch_replicator_ids.erl
index e8faf8ea3..e10b98082 100644
--- a/src/couch_replicator/src/couch_replicator_ids.erl
+++ b/src/couch_replicator/src/couch_replicator_ids.erl
@@ -112,7 +112,7 @@ maybe_append_filters(Base,
{error, FilterParseError} ->
throw({error, FilterParseError})
end,
- couch_util:to_hex(crypto:hash(md5, term_to_binary(Base2))).
+ couch_util:to_hex(couch_hash:md5_hash(term_to_binary(Base2))).
maybe_append_options(Options, RepOptions) ->
diff --git a/src/couch_replicator/test/couch_replicator_many_leaves_tests.erl b/src/couch_replicator/test/couch_replicator_many_leaves_tests.erl
index b2445a236..eee5b1647 100644
--- a/src/couch_replicator/test/couch_replicator_many_leaves_tests.erl
+++ b/src/couch_replicator/test/couch_replicator_many_leaves_tests.erl
@@ -141,7 +141,7 @@ add_doc_siblings(Db, _DocId, 0, AccDocs, AccRevs) ->
add_doc_siblings(Db, DocId, NumLeaves, AccDocs, AccRevs) ->
Value = ?l2b(?i2l(NumLeaves)),
- Rev = crypto:hash(md5, Value),
+ Rev = couch_hash:md5_hash(Value),
Doc = #doc{
id = DocId,
revs = {1, [Rev]},
diff --git a/src/couch_replicator/test/couch_replicator_test_helper.erl b/src/couch_replicator/test/couch_replicator_test_helper.erl
index 8ee2114f0..fd0409164 100644
--- a/src/couch_replicator/test/couch_replicator_test_helper.erl
+++ b/src/couch_replicator/test/couch_replicator_test_helper.erl
@@ -93,16 +93,16 @@ find_att([Att | Rest], Name) ->
att_md5(Att) ->
Md50 = couch_att:foldl(
Att,
- fun(Chunk, Acc) -> crypto:hash_update(Acc, Chunk) end,
- crypto:hash_init(md5)),
- crypto:hash_final(Md50).
+ fun(Chunk, Acc) -> couch_hash:md5_hash_update(Acc, Chunk) end,
+ couch_hash:md5_hash_init()),
+ couch_hash:md5_hash_final(Md50).
att_decoded_md5(Att) ->
Md50 = couch_att:foldl_decode(
Att,
- fun(Chunk, Acc) -> crypto:hash_update(Acc, Chunk) end,
- crypto:hash_init(md5)),
- crypto:hash_final(Md50).
+ fun(Chunk, Acc) -> couch_hash:md5_hash_update(Acc, Chunk) end,
+ couch_hash:md5_hash_init()),
+ couch_hash:md5_hash_final(Md50).
db_url(DbName) ->
iolist_to_binary([
diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
index e94934d04..248a76dc8 100644
--- a/src/ddoc_cache/src/ddoc_cache_lru.erl
+++ b/src/ddoc_cache/src/ddoc_cache_lru.erl
@@ -87,6 +87,7 @@ refresh(DbName, DDocIds) ->
init(_) ->
+ couch_util:set_mqd_off_heap(),
process_flag(trap_exit, true),
BaseOpts = [public, named_table],
CacheOpts = [
diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl
index 4a0727131..f5c793736 100644
--- a/src/fabric/src/fabric.erl
+++ b/src/fabric/src/fabric.erl
@@ -271,13 +271,14 @@ purge_docs(_DbName, _IdsRevs) ->
not_implemented.
%% @doc spawns a process to upload attachment data and
-%% returns a function that shards can use to communicate
-%% with the spawned middleman process
+%% returns a fabric attachment receiver context tuple
+%% with the spawned middleman process, an empty binary,
+%% or exits with an error tuple {Error, Arg}
-spec att_receiver(#httpd{}, Length :: undefined | chunked | pos_integer() |
{unknown_transfer_encoding, any()}) ->
- function() | binary().
+ {fabric_attachment_receiver, pid(), chunked | pos_integer()} | binary().
att_receiver(Req, Length) ->
- fabric_doc_attachments:receiver(Req, Length).
+ fabric_doc_atts:receiver(Req, Length).
%% @equiv all_docs(DbName, [], Callback, Acc0, QueryArgs)
all_docs(DbName, Callback, Acc, QueryArgs) ->
diff --git a/src/fabric/src/fabric_db_delete.erl b/src/fabric/src/fabric_db_delete.erl
index 9ba55fbb8..c146cb6cd 100644
--- a/src/fabric/src/fabric_db_delete.erl
+++ b/src/fabric/src/fabric_db_delete.erl
@@ -79,12 +79,12 @@ maybe_stop(W, Counters) ->
case {Ok + NotFound, Ok, NotFound} of
{W, 0, W} ->
{#shard{dbname=Name}, _} = hd(Counters),
- couch_log:warning("~p not_found ~s", [?MODULE, Name]),
+ couch_log:warning("~p not_found ~d", [?MODULE, Name]),
{stop, not_found};
{W, _, _} ->
{stop, ok};
- {N, M, _} when N >= (W div 2 + 1), M > 0 ->
- {stop, accepted};
+ {_, M, _} when M > 0 ->
+ {stop,accepted};
_ ->
{error, internal_server_error}
end
diff --git a/src/fabric/src/fabric_doc_atts.erl b/src/fabric/src/fabric_doc_atts.erl
new file mode 100644
index 000000000..7ef5dd893
--- /dev/null
+++ b/src/fabric/src/fabric_doc_atts.erl
@@ -0,0 +1,168 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_doc_atts).
+
+-include_lib("fabric/include/fabric.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-export([
+ receiver/2,
+ receiver_callback/2
+]).
+
+
+receiver(_Req, undefined) ->
+ <<"">>;
+receiver(_Req, {unknown_transfer_encoding, Unknown}) ->
+ exit({unknown_transfer_encoding, Unknown});
+receiver(Req, chunked) ->
+ MiddleMan = spawn(fun() -> middleman(Req, chunked) end),
+ {fabric_attachment_receiver, MiddleMan, chunked};
+receiver(_Req, 0) ->
+ <<"">>;
+receiver(Req, Length) when is_integer(Length) ->
+ maybe_send_continue(Req),
+ Middleman = spawn(fun() -> middleman(Req, Length) end),
+ {fabric_attachment_receiver, Middleman, Length};
+receiver(_Req, Length) ->
+ exit({length_not_integer, Length}).
+
+
+receiver_callback(Middleman, chunked) ->
+ fun(4096, ChunkFun, State) ->
+ write_chunks(Middleman, ChunkFun, State)
+ end;
+receiver_callback(Middleman, Length) when is_integer(Length) ->
+ fun() ->
+ Middleman ! {self(), gimme_data},
+ Timeout = fabric_util:attachments_timeout(),
+ receive
+ {Middleman, Data} ->
+ rexi:reply(attachment_chunk_received),
+ Data
+ after Timeout ->
+ exit(timeout)
+ end
+ end.
+
+
+%%
+%% internal
+%%
+
+maybe_send_continue(#httpd{mochi_req = MochiReq} = Req) ->
+ case couch_httpd:header_value(Req, "expect") of
+ undefined ->
+ ok;
+ Expect ->
+ case string:to_lower(Expect) of
+ "100-continue" ->
+ MochiReq:start_raw_response({100, gb_trees:empty()});
+ _ ->
+ ok
+ end
+ end.
+
+write_chunks(MiddleMan, ChunkFun, State) ->
+ MiddleMan ! {self(), gimme_data},
+ Timeout = fabric_util:attachments_timeout(),
+ receive
+ {MiddleMan, ChunkRecordList} ->
+ rexi:reply(attachment_chunk_received),
+ case flush_chunks(ChunkRecordList, ChunkFun, State) of
+ {continue, NewState} ->
+ write_chunks(MiddleMan, ChunkFun, NewState);
+ {done, NewState} ->
+ NewState
+ end
+ after Timeout ->
+ exit(timeout)
+ end.
+
+flush_chunks([], _ChunkFun, State) ->
+ {continue, State};
+flush_chunks([{0, _}], _ChunkFun, State) ->
+ {done, State};
+flush_chunks([Chunk | Rest], ChunkFun, State) ->
+ NewState = ChunkFun(Chunk, State),
+ flush_chunks(Rest, ChunkFun, NewState).
+
+receive_unchunked_attachment(_Req, 0) ->
+ ok;
+receive_unchunked_attachment(Req, Length) ->
+ receive {MiddleMan, go} ->
+ Data = couch_httpd:recv(Req, 0),
+ MiddleMan ! {self(), Data}
+ end,
+ receive_unchunked_attachment(Req, Length - size(Data)).
+
+middleman(Req, chunked) ->
+ % spawn a process to actually receive the uploaded data
+ RcvFun = fun(ChunkRecord, ok) ->
+ receive {From, go} -> From ! {self(), ChunkRecord} end, ok
+ end,
+ Receiver = spawn(fun() -> couch_httpd:recv_chunked(Req,4096,RcvFun,ok) end),
+
+ % take requests from the DB writers and get data from the receiver
+ N = erlang:list_to_integer(config:get("cluster","n")),
+ Timeout = fabric_util:attachments_timeout(),
+ middleman_loop(Receiver, N, [], [], Timeout);
+
+middleman(Req, Length) ->
+ Receiver = spawn(fun() -> receive_unchunked_attachment(Req, Length) end),
+ N = erlang:list_to_integer(config:get("cluster","n")),
+ Timeout = fabric_util:attachments_timeout(),
+ middleman_loop(Receiver, N, [], [], Timeout).
+
+middleman_loop(Receiver, N, Counters0, ChunkList0, Timeout) ->
+ receive {From, gimme_data} ->
+ % Figure out how far along this writer (From) is in the list
+ ListIndex = case fabric_dict:lookup_element(From, Counters0) of
+ undefined -> 0;
+ I -> I
+ end,
+
+ % Talk to the receiver to get another chunk if necessary
+ ChunkList1 = if ListIndex == length(ChunkList0) ->
+ Receiver ! {self(), go},
+ receive
+ {Receiver, ChunkRecord} ->
+ ChunkList0 ++ [ChunkRecord]
+ end;
+ true -> ChunkList0 end,
+
+ % reply to the writer
+ Reply = lists:nthtail(ListIndex, ChunkList1),
+ From ! {self(), Reply},
+
+ % Update the counter for this writer
+ Counters1 = fabric_dict:update_counter(From, length(Reply), Counters0),
+
+ % Drop any chunks that have been sent to all writers
+ Size = fabric_dict:size(Counters1),
+ NumToDrop = lists:min([I || {_, I} <- Counters1]),
+
+ {ChunkList3, Counters3} =
+ if Size == N andalso NumToDrop > 0 ->
+ ChunkList2 = lists:nthtail(NumToDrop, ChunkList1),
+ Counters2 = [{F, I-NumToDrop} || {F, I} <- Counters1],
+ {ChunkList2, Counters2};
+ true ->
+ {ChunkList1, Counters1}
+ end,
+
+ middleman_loop(Receiver, N, Counters3, ChunkList3, Timeout)
+ after Timeout ->
+ exit(Receiver, kill),
+ ok
+ end.
diff --git a/src/fabric/src/fabric_rpc.erl b/src/fabric/src/fabric_rpc.erl
index 4a69e7ea1..60526f495 100644
--- a/src/fabric/src/fabric_rpc.erl
+++ b/src/fabric/src/fabric_rpc.erl
@@ -142,8 +142,9 @@ reduce_view(DbName, DDoc, ViewName, Args0, DbOptions) ->
couch_mrview:query_view(Db, DDoc, ViewName, Args, fun reduce_cb/2, VAcc0).
fix_skip_and_limit(Args) ->
- #mrargs{skip=Skip, limit=Limit}=Args,
- Args#mrargs{skip=0, limit=Skip+Limit}.
+ #mrargs{skip=Skip, limit=Limit, extra=Extra}=Args,
+ % the coordinator needs to finalize each row, so make sure the shards don't
+ Args#mrargs{skip=0, limit=Skip+Limit, extra=[{finalizer,null} | Extra]}.
create_db(DbName) ->
create_db(DbName, []).
@@ -439,6 +440,8 @@ make_att_reader({follows, Parser, Ref}) ->
throw({mp_parser_died, Reason})
end
end;
+make_att_reader({fabric_attachment_receiver, Middleman, Length}) ->
+ fabric_doc_atts:receiver_callback(Middleman, Length);
make_att_reader(Else) ->
Else.
diff --git a/src/fabric/src/fabric_view.erl b/src/fabric/src/fabric_view.erl
index dd0fcfd8b..69f42909a 100644
--- a/src/fabric/src/fabric_view.erl
+++ b/src/fabric/src/fabric_view.erl
@@ -230,8 +230,9 @@ get_next_row(#collector{reducer = RedSrc} = St) when RedSrc =/= undefined ->
end, Counters0, Records),
Wrapped = [[V] || #view_row{value=V} <- Records],
{ok, [Reduced]} = couch_query_servers:rereduce(Lang, [RedSrc], Wrapped),
+ {ok, Finalized} = couch_query_servers:finalize(RedSrc, Reduced),
NewSt = St#collector{keys=RestKeys, rows=NewRowDict, counters=Counters},
- {#view_row{key=Key, id=reduced, value=Reduced}, NewSt};
+ {#view_row{key=Key, id=reduced, value=Finalized}, NewSt};
error ->
get_next_row(St#collector{keys=RestKeys})
end;
diff --git a/src/mango/src/mango_cursor.erl b/src/mango/src/mango_cursor.erl
index 5108d36b2..5d2ea717d 100644
--- a/src/mango/src/mango_cursor.erl
+++ b/src/mango/src/mango_cursor.erl
@@ -48,18 +48,12 @@
create(Db, Selector0, Opts) ->
Selector = mango_selector:normalize(Selector0),
UsableIndexes = mango_idx:get_usable_indexes(Db, Selector, Opts),
- case length(UsableIndexes) of
- 0 ->
- AllDocs = mango_idx:special(Db),
- create_cursor(Db, AllDocs, Selector, Opts);
- _ ->
- case mango_cursor:maybe_filter_indexes_by_ddoc(UsableIndexes, Opts) of
- [] ->
- % use_index doesn't match a valid index - fall back to a valid one
- create_cursor(Db, UsableIndexes, Selector, Opts);
- UserSpecifiedIndex ->
- create_cursor(Db, UserSpecifiedIndex, Selector, Opts)
- end
+ case mango_cursor:maybe_filter_indexes_by_ddoc(UsableIndexes, Opts) of
+ [] ->
+ % use_index doesn't match a valid index - fall back to a valid one
+ create_cursor(Db, UsableIndexes, Selector, Opts);
+ UserSpecifiedIndex ->
+ create_cursor(Db, UserSpecifiedIndex, Selector, Opts)
end.
diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl
index 1e2108b7d..dbea36e77 100644
--- a/src/mango/src/mango_cursor_view.erl
+++ b/src/mango/src/mango_cursor_view.erl
@@ -70,7 +70,8 @@ explain(Cursor) ->
{end_key, maybe_replace_max_json(Args#mrargs.end_key)},
{direction, Args#mrargs.direction},
{stable, Args#mrargs.stable},
- {update, Args#mrargs.update}
+ {update, Args#mrargs.update},
+ {conflicts, Args#mrargs.conflicts}
]}}].
@@ -283,9 +284,8 @@ apply_opts([{r, RStr} | Rest], Args) ->
NewArgs = Args#mrargs{include_docs = IncludeDocs},
apply_opts(Rest, NewArgs);
apply_opts([{conflicts, true} | Rest], Args) ->
- % I need to patch things so that views can specify
- % parameters when loading the docs from disk
- apply_opts(Rest, Args);
+ NewArgs = Args#mrargs{conflicts = true},
+ apply_opts(Rest, NewArgs);
apply_opts([{conflicts, false} | Rest], Args) ->
% Ignored cause default
apply_opts(Rest, Args);
diff --git a/src/mango/src/mango_error.erl b/src/mango/src/mango_error.erl
index ad665e2f3..b2bbb392a 100644
--- a/src/mango/src/mango_error.erl
+++ b/src/mango/src/mango_error.erl
@@ -308,7 +308,7 @@ info(mango_sort, {invalid_sort_json, BadSort}) ->
{
400,
<<"invalid_sort_json">>,
- fmt("Sort must be an array of sort specs, not: ~w", [BadSort])
+ fmt("Sort must be an array of sort specs, not: ~p", [BadSort])
};
info(mango_sort, {invalid_sort_dir, BadSpec}) ->
{
@@ -320,7 +320,7 @@ info(mango_sort, {invalid_sort_field, BadField}) ->
{
400,
<<"invalid_sort_field">>,
- fmt("Invalid sort field: ~w", [BadField])
+ fmt("Invalid sort field: ~p", [BadField])
};
info(mango_sort, {unsupported, mixed_sort}) ->
{
diff --git a/src/mango/src/mango_idx.erl b/src/mango/src/mango_idx.erl
index ea5949c02..8af92b946 100644
--- a/src/mango/src/mango_idx.erl
+++ b/src/mango/src/mango_idx.erl
@@ -66,13 +66,12 @@ get_usable_indexes(Db, Selector, Opts) ->
SortFields = get_sort_fields(Opts),
UsableFilter = fun(I) -> is_usable(I, Selector, SortFields) end,
- UsableIndexes1 = lists:filter(UsableFilter, UsableIndexes0),
- case maybe_filter_by_sort_fields(UsableIndexes1, SortFields) of
- {ok, SortIndexes} ->
- SortIndexes;
- {error, no_usable_index} ->
- ?MANGO_ERROR({no_usable_index, missing_sort_index})
+ case lists:filter(UsableFilter, UsableIndexes0) of
+ [] ->
+ ?MANGO_ERROR({no_usable_index, missing_sort_index});
+ UsableIndexes ->
+ UsableIndexes
end.
@@ -100,31 +99,6 @@ get_sort_fields(Opts) ->
end.
-maybe_filter_by_sort_fields(Indexes, []) ->
- {ok, Indexes};
-
-maybe_filter_by_sort_fields(Indexes, SortFields) ->
- FilterFun = fun(Idx) ->
- Cols = mango_idx:columns(Idx),
- case {mango_idx:type(Idx), Cols} of
- {_, all_fields} ->
- true;
- {<<"text">>, _} ->
- sets:is_subset(sets:from_list(SortFields), sets:from_list(Cols));
- {<<"json">>, _} ->
- lists:prefix(SortFields, Cols);
- {<<"special">>, _} ->
- lists:prefix(SortFields, Cols)
- end
- end,
- case lists:filter(FilterFun, Indexes) of
- [] ->
- {error, no_usable_index};
- FilteredIndexes ->
- {ok, FilteredIndexes}
- end.
-
-
new(Db, Opts) ->
Def = get_idx_def(Opts),
Type = get_idx_type(Opts),
diff --git a/src/mango/src/mango_idx_special.erl b/src/mango/src/mango_idx_special.erl
index 12da1cbe5..ac6efc707 100644
--- a/src/mango/src/mango_idx_special.erl
+++ b/src/mango/src/mango_idx_special.erl
@@ -63,9 +63,11 @@ columns(#idx{def=all_docs}) ->
[<<"_id">>].
-is_usable(#idx{def=all_docs}, Selector, _) ->
+is_usable(#idx{def=all_docs}, _Selector, []) ->
+ true;
+is_usable(#idx{def=all_docs} = Idx, Selector, SortFields) ->
Fields = mango_idx_view:indexable_fields(Selector),
- lists:member(<<"_id">>, Fields).
+ lists:member(<<"_id">>, Fields) and can_use_sort(Idx, SortFields, Selector).
start_key([{'$gt', Key, _, _}]) ->
@@ -96,3 +98,10 @@ end_key([{_, _, '$lte', Key}]) ->
end_key([{'$eq', Key, '$eq', Key}]) ->
false = mango_json:special(Key),
Key.
+
+
+can_use_sort(_Idx, [], _Selector) ->
+ true;
+can_use_sort(Idx, SortFields, _Selector) ->
+ Cols = columns(Idx),
+ lists:prefix(SortFields, Cols).
diff --git a/src/mango/src/mango_idx_view.erl b/src/mango/src/mango_idx_view.erl
index 8956b27b0..2d784b638 100644
--- a/src/mango/src/mango_idx_view.erl
+++ b/src/mango/src/mango_idx_view.erl
@@ -131,7 +131,8 @@ is_usable(Idx, Selector, SortFields) ->
[<<"_id">>, <<"_rev">>]),
mango_selector:has_required_fields(Selector, RequiredFields2)
- andalso not is_text_search(Selector).
+ andalso not is_text_search(Selector)
+ andalso can_use_sort(RequiredFields, SortFields, Selector).
is_text_search({[]}) ->
@@ -511,3 +512,30 @@ range_pos(Low, Arg, High) ->
max
end
end.
+
+
+% Can_use_sort works as follows:
+%
+% * no sort fields then we can use this
+% * Run out index columns we can't use this index
+% * If the current column is the start of the sort, return if sort is a prefix
+% * If the current column is constant, drop it and continue, else return false
+%
+% A constant column is a something that won't affect the sort
+% for example A: {$eq: 21}}
+%
+% Currently we only look at constant fields that are prefixes to the sort fields
+% set by the user. We considered adding in constant fields after sort fields
+% but were not 100% sure that it would not affect the sorting of the query.
+
+can_use_sort(_Cols, [], _Selector) ->
+ true;
+can_use_sort([], _SortFields, _Selector) ->
+ false;
+can_use_sort([Col | _] = Cols, [Col | _] = SortFields, _Selector) ->
+ lists:prefix(SortFields, Cols);
+can_use_sort([Col | RestCols], SortFields, Selector) ->
+ case mango_selector:is_constant_field(Selector, Col) of
+ true -> can_use_sort(RestCols, SortFields, Selector);
+ false -> false
+ end.
diff --git a/src/mango/src/mango_selector.erl b/src/mango/src/mango_selector.erl
index 968dc3c74..fffadcd20 100644
--- a/src/mango/src/mango_selector.erl
+++ b/src/mango/src/mango_selector.erl
@@ -16,7 +16,8 @@
-export([
normalize/1,
match/2,
- has_required_fields/2
+ has_required_fields/2,
+ is_constant_field/2
]).
@@ -638,11 +639,121 @@ has_required_fields_int([{[{Field, Cond}]} | Rest], RequiredFields) ->
end.
+% Returns true if a field in the selector is a constant value e.g. {a: {$eq: 1}}
+is_constant_field({[]}, _Field) ->
+ false;
+
+is_constant_field(Selector, Field) when not is_list(Selector) ->
+ is_constant_field([Selector], Field);
+
+is_constant_field([], _Field) ->
+ false;
+
+is_constant_field([{[{<<"$and">>, Args}]}], Field) when is_list(Args) ->
+ lists:any(fun(Arg) -> is_constant_field(Arg, Field) end, Args);
+
+is_constant_field([{[{<<"$and">>, Args}]}], Field) ->
+ is_constant_field(Args, Field);
+
+is_constant_field([{[{Field, {[{Cond, _Val}]}}]} | _Rest], Field) ->
+ Cond =:= <<"$eq">>;
+
+is_constant_field([{[{_UnMatched, _}]} | Rest], Field) ->
+ is_constant_field(Rest, Field).
+
+
%%%%%%%% module tests below %%%%%%%%
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
+is_constant_field_basic_test() ->
+ Selector = normalize({[{<<"A">>, <<"foo">>}]}),
+ Field = <<"A">>,
+ ?assertEqual(true, is_constant_field(Selector, Field)).
+
+is_constant_field_basic_two_test() ->
+ Selector = normalize({[{<<"$and">>,
+ [
+ {[{<<"cars">>,{[{<<"$eq">>,<<"2">>}]}}]},
+ {[{<<"age">>,{[{<<"$gt">>,10}]}}]}
+ ]
+ }]}),
+ Field = <<"cars">>,
+ ?assertEqual(true, is_constant_field(Selector, Field)).
+
+is_constant_field_not_eq_test() ->
+ Selector = normalize({[{<<"$and">>,
+ [
+ {[{<<"cars">>,{[{<<"$eq">>,<<"2">>}]}}]},
+ {[{<<"age">>,{[{<<"$gt">>,10}]}}]}
+ ]
+ }]}),
+ Field = <<"age">>,
+ ?assertEqual(false, is_constant_field(Selector, Field)).
+
+is_constant_field_missing_field_test() ->
+ Selector = normalize({[{<<"$and">>,
+ [
+ {[{<<"cars">>,{[{<<"$eq">>,<<"2">>}]}}]},
+ {[{<<"age">>,{[{<<"$gt">>,10}]}}]}
+ ]
+ }]}),
+ Field = <<"wrong">>,
+ ?assertEqual(false, is_constant_field(Selector, Field)).
+
+is_constant_field_or_field_test() ->
+ Selector = {[{<<"$or">>,
+ [
+ {[{<<"A">>, <<"foo">>}]},
+ {[{<<"B">>, <<"foo">>}]}
+ ]
+ }]},
+ Normalized = normalize(Selector),
+ Field = <<"A">>,
+ ?assertEqual(false, is_constant_field(Normalized, Field)).
+
+is_constant_field_empty_selector_test() ->
+ Selector = normalize({[]}),
+ Field = <<"wrong">>,
+ ?assertEqual(false, is_constant_field(Selector, Field)).
+
+is_constant_nested_and_test() ->
+ Selector1 = {[{<<"$and">>,
+ [
+ {[{<<"A">>, <<"foo">>}]}
+ ]
+ }]},
+ Selector2 = {[{<<"$and">>,
+ [
+ {[{<<"B">>, {[{<<"$gt">>,10}]}}]}
+ ]
+ }]},
+ Selector = {[{<<"$and">>,
+ [
+ Selector1,
+ Selector2
+ ]
+ }]},
+
+ Normalized = normalize(Selector),
+ ?assertEqual(true, is_constant_field(Normalized, <<"A">>)),
+ ?assertEqual(false, is_constant_field(Normalized, <<"B">>)).
+
+is_constant_combined_or_and_equals_test() ->
+ Selector = {[{<<"A">>, "foo"},
+ {<<"$or">>,
+ [
+ {[{<<"B">>, <<"bar">>}]},
+ {[{<<"B">>, <<"baz">>}]}
+ ]
+ },
+ {<<"C">>, "qux"}
+ ]},
+ Normalized = normalize(Selector),
+ ?assertEqual(true, is_constant_field(Normalized, <<"C">>)),
+ ?assertEqual(false, is_constant_field(Normalized, <<"B">>)).
+
has_required_fields_basic_test() ->
RequiredFields = [<<"A">>],
Selector = {[{<<"A">>, <<"foo">>}]},
diff --git a/src/mango/test/02-basic-find-test.py b/src/mango/test/02-basic-find-test.py
index f7e151ad8..6a31d33ee 100644
--- a/src/mango/test/02-basic-find-test.py
+++ b/src/mango/test/02-basic-find-test.py
@@ -333,3 +333,10 @@ class BasicFindTests(mango.UserDocsTests):
assert explain["mrargs"]["start_key"] == [0]
assert explain["mrargs"]["end_key"] == ["<MAX>"]
assert explain["mrargs"]["include_docs"] == True
+
+ def test_sort_with_all_docs(self):
+ explain = self.db.find({
+ "_id": {"$gt": 0},
+ "age": {"$gt": 0}
+ }, sort=["_id"], explain=True)
+ self.assertEquals(explain["index"]["type"], "special")
diff --git a/src/mango/test/18-json-sort.py b/src/mango/test/18-json-sort.py
new file mode 100644
index 000000000..f8d2abe99
--- /dev/null
+++ b/src/mango/test/18-json-sort.py
@@ -0,0 +1,222 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import mango
+import copy
+import unittest
+
+DOCS = [
+ {
+ "_id": "1",
+ "name": "Jimi",
+ "age": 10,
+ "cars": 1
+ },
+ {
+ "_id": "2",
+ "name": "Eddie",
+ "age": 20,
+ "cars": 1
+ },
+ {
+ "_id": "3",
+ "name": "Jane",
+ "age": 30,
+ "cars": 2
+ },
+ {
+ "_id": "4",
+ "name": "Mary",
+ "age": 40,
+ "cars": 2
+ },
+ {
+ "_id": "5",
+ "name": "Sam",
+ "age": 50,
+ "cars": 3
+ }
+]
+
+class JSONIndexSortOptimisations(mango.DbPerClass):
+ def setUp(self):
+ self.db.recreate()
+ self.db.save_docs(copy.deepcopy(DOCS))
+
+ def test_works_for_basic_case(self):
+ self.db.create_index(["cars", "age"], name="cars-age")
+ selector = {
+ "cars": "2",
+ "age": {
+ "$gt": 10
+ }
+ }
+ explain = self.db.find(selector, sort=["age"], explain=True)
+ self.assertEqual(explain["index"]["name"], "cars-age")
+ self.assertEqual(explain["mrargs"]["direction"], "fwd")
+
+ def test_works_for_all_fields_specified(self):
+ self.db.create_index(["cars", "age"], name="cars-age")
+ selector = {
+ "cars": "2",
+ "age": {
+ "$gt": 10
+ }
+ }
+ explain = self.db.find(selector, sort=["cars", "age"], explain=True)
+ self.assertEqual(explain["index"]["name"], "cars-age")
+
+ def test_works_for_no_sort_fields_specified(self):
+ self.db.create_index(["cars", "age"], name="cars-age")
+ selector = {
+ "cars": {
+ "$gt": 10
+ },
+ "age": {
+ "$gt": 10
+ }
+ }
+ explain = self.db.find(selector, explain=True)
+ self.assertEqual(explain["index"]["name"], "cars-age")
+
+ def test_works_for_opp_dir_sort(self):
+ self.db.create_index(["cars", "age"], name="cars-age")
+ selector = {
+ "cars": "2",
+ "age": {
+ "$gt": 10
+ }
+ }
+ explain = self.db.find(selector, sort=[{"age": "desc"}], explain=True)
+ self.assertEqual(explain["index"]["name"], "cars-age")
+ self.assertEqual(explain["mrargs"]["direction"], "rev")
+
+ def test_not_work_for_non_constant_field(self):
+ self.db.create_index(["cars", "age"], name="cars-age")
+ selector = {
+ "cars": {
+ "$gt": 10
+ },
+ "age": {
+ "$gt": 10
+ }
+ }
+ try:
+ self.db.find(selector, explain=True, sort=["age"])
+ raise Exception("Should not get here")
+ except Exception as e:
+ resp = e.response.json()
+ self.assertEqual(resp["error"], "no_usable_index")
+
+ def test_three_index_one(self):
+ self.db.create_index(["cars", "age", "name"], name="cars-age-name")
+ selector = {
+ "cars": "2",
+ "age": 10,
+ "name": {
+ "$gt": "AA"
+ }
+ }
+ explain = self.db.find(selector, sort=["name"], explain=True)
+ self.assertEqual(explain["index"]["name"], "cars-age-name")
+
+ def test_three_index_two(self):
+ self.db.create_index(["cars", "age", "name"], name="cars-age-name")
+ selector = {
+ "cars": "2",
+ "name": "Eddie",
+ "age": {
+ "$gt": 10
+ }
+ }
+ explain = self.db.find(selector, sort=["age"], explain=True)
+ self.assertEqual(explain["index"]["name"], "cars-age-name")
+
+ def test_three_index_fails(self):
+ self.db.create_index(["cars", "age", "name"], name="cars-age-name")
+ selector = {
+ "name": "Eddie",
+ "age": {
+ "$gt": 1
+ },
+ "cars": {
+ "$gt": "1"
+ }
+ }
+ try:
+ self.db.find(selector, explain=True, sort=["name"])
+ raise Exception("Should not get here")
+ except Exception as e:
+ resp = e.response.json()
+ self.assertEqual(resp["error"], "no_usable_index")
+
+ def test_empty_sort(self):
+ self.db.create_index(["cars", "age", "name"], name="cars-age-name")
+ selector = {
+ "name": {
+ "$gt": "Eddie",
+ },
+ "age": 10,
+ "cars": {
+ "$gt": "1"
+ }
+ }
+ explain = self.db.find(selector, explain=True)
+ self.assertEqual(explain["index"]["name"], "cars-age-name")
+
+ def test_in_between(self):
+ self.db.create_index(["cars", "age", "name"], name="cars-age-name")
+ selector = {
+ "name": "Eddie",
+ "age": 10,
+ "cars": {
+ "$gt": "1"
+ }
+ }
+ explain = self.db.find(selector, explain=True)
+ self.assertEqual(explain["index"]["name"], "cars-age-name")
+
+ try:
+ self.db.find(selector, sort=["cars", "name"], explain=True)
+ raise Exception("Should not get here")
+ except Exception as e:
+ resp = e.response.json()
+ self.assertEqual(resp["error"], "no_usable_index")
+
+ def test_ignore_after_set_sort_value(self):
+ self.db.create_index(["cars", "age", "name"], name="cars-age-name")
+ selector = {
+ "age": {
+ "$gt": 10
+ },
+ "cars": 2,
+ "name": {
+ "$gt": "A"
+ }
+ }
+ explain = self.db.find(selector, sort=["age"], explain=True)
+ self.assertEqual(explain["index"]["name"], "cars-age-name")
+
+ def test_not_use_index_if_other_fields_in_sort(self):
+ self.db.create_index(["cars", "age"], name="cars-age")
+ selector = {
+ "age": 10,
+ "cars": {
+ "$gt": "1"
+ }
+ }
+ try:
+ self.db.find(selector, sort=["cars", "name"], explain=True)
+ raise Exception("Should not get here")
+ except Exception as e:
+ resp = e.response.json()
+ self.assertEqual(resp["error"], "no_usable_index")
diff --git a/src/mango/test/19-find-conflicts.py b/src/mango/test/19-find-conflicts.py
new file mode 100644
index 000000000..c6d59f00d
--- /dev/null
+++ b/src/mango/test/19-find-conflicts.py
@@ -0,0 +1,41 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import mango
+import copy
+
+DOC = [
+ {
+ "_id": "doc",
+ "a": 2
+ }
+]
+
+CONFLICT = [
+ {
+ "_id": "doc",
+ "_rev": "1-23202479633c2b380f79507a776743d5",
+ "a": 1
+ }
+]
+
+class ChooseCorrectIndexForDocs(mango.DbPerClass):
+ def setUp(self):
+ self.db.recreate()
+ self.db.save_docs(copy.deepcopy(DOC))
+ self.db.save_docs_with_conflicts(copy.deepcopy(CONFLICT))
+
+ def test_retrieve_conflicts(self):
+ self.db.create_index(["_conflicts"])
+ result = self.db.find({"_conflicts": { "$exists": True}}, conflicts=True)
+ self.assertEqual(result[0]['_conflicts'][0], '1-23202479633c2b380f79507a776743d5')
+ self.assertEqual(result[0]['_rev'], '1-3975759ccff3842adf690a5c10caee42')
diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py
index 9b6b998cd..bc12bbc68 100644
--- a/src/mango/test/mango.py
+++ b/src/mango/test/mango.py
@@ -95,6 +95,11 @@ class Database(object):
def save_doc(self, doc):
self.save_docs([doc])
+ def save_docs_with_conflicts(self, docs, **kwargs):
+ body = json.dumps({"docs": docs, "new_edits": False})
+ r = self.sess.post(self.path("_bulk_docs"), data=body, params=kwargs)
+ r.raise_for_status()
+
def save_docs(self, docs, **kwargs):
body = json.dumps({"docs": docs})
r = self.sess.post(self.path("_bulk_docs"), data=body, params=kwargs)
diff --git a/src/mem3/src/mem3_rep.erl b/src/mem3/src/mem3_rep.erl
index 3d9187796..8d996d617 100644
--- a/src/mem3/src/mem3_rep.erl
+++ b/src/mem3/src/mem3_rep.erl
@@ -106,8 +106,8 @@ make_local_id(#shard{node=SourceNode}, #shard{node=TargetNode}, Filter) ->
make_local_id(SourceThing, TargetThing, Filter) ->
- S = couch_util:encodeBase64Url(crypto:hash(md5, term_to_binary(SourceThing))),
- T = couch_util:encodeBase64Url(crypto:hash(md5, term_to_binary(TargetThing))),
+ S = couch_util:encodeBase64Url(couch_hash:md5_hash(term_to_binary(SourceThing))),
+ T = couch_util:encodeBase64Url(couch_hash:md5_hash(term_to_binary(TargetThing))),
F = case is_function(Filter) of
true ->
{new_uniq, Hash} = erlang:fun_info(Filter, new_uniq),
@@ -339,7 +339,7 @@ update_locals(Acc) ->
find_repl_doc(SrcDb, TgtUUIDPrefix) ->
SrcUUID = couch_db:get_uuid(SrcDb),
- S = couch_util:encodeBase64Url(crypto:hash(md5, term_to_binary(SrcUUID))),
+ S = couch_util:encodeBase64Url(couch_hash:md5_hash(term_to_binary(SrcUUID))),
DocIdPrefix = <<"_local/shard-sync-", S/binary, "-">>,
FoldFun = fun(#doc{id = DocId, body = {BodyProps}} = Doc, _) ->
TgtUUID = couch_util:get_value(<<"target_uuid">>, BodyProps, <<>>),
diff --git a/src/mem3/src/mem3_shards.erl b/src/mem3/src/mem3_shards.erl
index c84b87397..da3b69a61 100644
--- a/src/mem3/src/mem3_shards.erl
+++ b/src/mem3/src/mem3_shards.erl
@@ -184,6 +184,7 @@ handle_config_terminate(_Server, _Reason, _State) ->
erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
init([]) ->
+ couch_util:set_mqd_off_heap(),
ets:new(?SHARDS, [
bag,
public,
diff --git a/src/rexi/src/rexi_server.erl b/src/rexi/src/rexi_server.erl
index 3d3f272e4..954ca88cb 100644
--- a/src/rexi/src/rexi_server.erl
+++ b/src/rexi/src/rexi_server.erl
@@ -39,6 +39,7 @@ start_link(ServerId) ->
gen_server:start_link({local, ServerId}, ?MODULE, [], []).
init([]) ->
+ couch_util:set_mqd_off_heap(),
{ok, #st{}}.
handle_call(get_errors, _From, #st{errors = Errors} = St) ->
diff --git a/src/setup/src/setup.erl b/src/setup/src/setup.erl
index 943318675..3ae455f54 100644
--- a/src/setup/src/setup.erl
+++ b/src/setup/src/setup.erl
@@ -30,15 +30,29 @@ require_node_count(undefined) ->
require_node_count(_) ->
ok.
-error_bind_address() ->
- throw({error, "Cluster setup requires bind_addres != 127.0.0.1"}).
-
-require_bind_address("127.0.0.1", undefined) ->
- error_bind_address();
-require_bind_address("127.0.0.1", <<"127.0.0.1">>) ->
- error_bind_address();
-require_bind_address(_, _) ->
- ok.
+error_local_bind_address() ->
+ throw({error, "Cluster setup requires a remote bind_address (not 127.0.0.1 nor ::1)"}).
+
+error_invalid_bind_address(InvalidBindAddress) ->
+ throw({error, io:format("Setup requires a valid IP bind_address. " ++
+ "~p is invalid.", [InvalidBindAddress])}).
+
+require_remote_bind_address(OldBindAddress, NewBindAddress) ->
+ case {OldBindAddress, NewBindAddress} of
+ {"127.0.0.1", undefined} -> error_local_bind_address();
+ {_, <<"127.0.0.1">>} -> error_local_bind_address();
+ {"::1", undefined} -> error_local_bind_address();
+ {_, <<"::1">>} -> error_local_bind_address();
+ {_, undefined} -> ok;
+ {_, PresentNewBindAddress} -> require_valid_bind_address(PresentNewBindAddress)
+ end.
+
+require_valid_bind_address(BindAddress) ->
+ ListBindAddress = binary_to_list(BindAddress),
+ case inet_parse:address(ListBindAddress) of
+ {ok, _} -> ok;
+ {error, _} -> error_invalid_bind_address(ListBindAddress)
+ end.
is_cluster_enabled() ->
% bind_address != 127.0.0.1 AND admins != empty
@@ -122,7 +136,6 @@ enable_cluster_http(Options) ->
{ok, "201", _, _} ->
ok;
Else ->
- couch_log:notice("send_req: ~p~n", [Else]),
{error, Else}
end.
@@ -143,13 +156,13 @@ enable_cluster_int(Options, false) ->
% if bind_address == 127.0.0.1 and no bind_address in req -> error
CurrentBindAddress = config:get("chttpd","bind_address"),
NewBindAddress = proplists:get_value(bind_address, Options),
- ok = require_bind_address(CurrentBindAddress, NewBindAddress),
+ ok = require_remote_bind_address(CurrentBindAddress, NewBindAddress),
NodeCount = couch_util:get_value(node_count, Options),
ok = require_node_count(NodeCount),
Port = proplists:get_value(port, Options),
setup_node(NewCredentials, NewBindAddress, NodeCount, Port),
- couch_log:notice("Enable Cluster: ~p~n", [Options]).
+ couch_log:debug("Enable Cluster: ~p~n", [Options]).
set_admin(Username, Password) ->
config:set("admins", binary_to_list(Username), binary_to_list(Password)).
@@ -162,6 +175,7 @@ setup_node(NewCredentials, NewBindAddress, NodeCount, Port) ->
set_admin(Username, Password)
end,
+ ok = require_valid_bind_address(NewBindAddress),
case NewBindAddress of
undefined ->
config:set("chttpd", "bind_address", "0.0.0.0");
@@ -211,7 +225,7 @@ enable_single_node(Options) ->
setup_node(NewCredentials, NewBindAddress, 1, Port),
Dbs = proplists:get_value(ensure_dbs_exist, Options, cluster_system_dbs()),
finish_cluster_int(Dbs, has_cluster_system_dbs(Dbs)),
- couch_log:notice("Enable Single Node: ~p~n", [Options]).
+ couch_log:debug("Enable Single Node: ~p~n", [Options]).
add_node(Options) ->
@@ -220,7 +234,7 @@ add_node(Options) ->
add_node_int(_Options, false) ->
{error, cluster_not_enabled};
add_node_int(Options, true) ->
- couch_log:notice("add node_int: ~p~n", [Options]),
+ couch_log:debug("add node_int: ~p~n", [Options]),
ErlangCookie = erlang:get_cookie(),
% POST to nodeB/_setup
@@ -251,7 +265,6 @@ add_node_int(Options, true) ->
% when done, PUT :5986/nodes/nodeB
create_node_doc(Host, Name);
Else ->
- couch_log:notice("send_req: ~p~n", [Else]),
Else
end.
diff --git a/test/javascript/run b/test/javascript/run
index 8ae424467..ca69e1ff2 100755
--- a/test/javascript/run
+++ b/test/javascript/run
@@ -134,10 +134,11 @@ def main():
tmp.append(name)
tests = tmp
- fmt = mkformatter(tests)
passed = 0
failed = 0
- for test in tests:
+ if len(tests) > 0 :
+ fmt = mkformatter(tests)
+ for test in tests:
result = run_couchjs(test, fmt)
if result == 0:
passed += 1
@@ -169,8 +170,7 @@ def build_test_case_paths(path,args=None):
elif os.path.isfile(pname + ".js"):
tests.append(pname + ".js")
else:
- sys.stderr.write("Unknown test: " + name + os.linesep)
- exit(1)
+ sys.stderr.write("Waring - Unknown test: " + name + os.linesep)
return tests
diff --git a/test/javascript/tests-cluster/with-quorum/attachments.js b/test/javascript/tests-cluster/with-quorum/attachments.js
new file mode 100644
index 000000000..f578f877c
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/attachments.js
@@ -0,0 +1,36 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachments= function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+ db.createDb();
+ if (debug) debugger;
+
+ var doc = db.save({_id:"dummy"});
+ T(doc.ok);
+
+ var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+ body:"This is no base64 encoded text",
+ headers:{"Content-Type": "text/plain;charset=utf-8"}
+ });
+ T(xhr.status == 201,"Should return 201");
+ var rev = JSON.parse(xhr.responseText).rev;
+
+ xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + rev, {
+ body:"This is no base64 encoded text-2",
+ headers:{"Content-Type": "text/plain;charset=utf-8"}
+ });
+ T(xhr.status == 201,"Should return 201");
+
+ db.deleteDb();
+}
diff --git a/test/javascript/tests-cluster/with-quorum/attachments_delete.js b/test/javascript/tests-cluster/with-quorum/attachments_delete.js
new file mode 100644
index 000000000..ed7d2db9a
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/attachments_delete.js
@@ -0,0 +1,32 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachments_delete= function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+ db.createDb();
+ if (debug) debugger;
+
+ var doc = db.save({_id:"dummy"});
+ T(doc.ok);
+ var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+ body:"This is no base64 encoded text",
+ headers:{"Content-Type": "text/plain;charset=utf-8"}
+ });
+ T(xhr.status == 201,"Should return 201 Accepted");
+ var rev = JSON.parse(xhr.responseText).rev;
+
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/dummy/foo.txt?rev=" + rev);
+ T(xhr.status == 200,"Should return 200 Ok but returns "+xhr.status);
+
+ db.deleteDb();
+}
diff --git a/test/javascript/tests-cluster/with-quorum/attachments_delete_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/attachments_delete_overridden_quorum.js
new file mode 100644
index 000000000..1994a0ac2
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/attachments_delete_overridden_quorum.js
@@ -0,0 +1,36 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachments_delete_overridden_quorum= function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
+ db.createDb();
+ if (debug) debugger;
+
+ var doc = db.save({_id:"dummy"});
+ T(doc.ok);
+ var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+ body:"This is no base64 encoded text",
+ headers:{"Content-Type": "text/plain;charset=utf-8"}
+ });
+ var rev = JSON.parse(xhr.responseText).rev;
+
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/dummy/foo.txt?rev=" + rev);
+ console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status);
+ // TODO: Define correct behaviour
+ //T(xhr.status == 202,"Should return 202 but returns "+xhr.status);
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+}
diff --git a/test/javascript/tests-cluster/with-quorum/attachments_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/attachments_overridden_quorum.js
new file mode 100644
index 000000000..22c8a4c87
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/attachments_overridden_quorum.js
@@ -0,0 +1,40 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+//Test attachments operations with an overridden quorum parameter
+couchTests.attachments_overriden_quorum= function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
+ db.createDb();
+ if (debug) debugger;
+
+ var doc = db.save({_id:"dummy"});
+ T(doc.ok);
+
+ var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+ body:"This is no base64 encoded text",
+ headers:{"Content-Type": "text/plain;charset=utf-8"}
+ });
+ //TODO: Define correct behaviour
+ //T(xhr.status == 202,"Should return 202");
+ var rev = JSON.parse(xhr.responseText).rev;
+
+ xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + rev, {
+ body:"This is no base64 encoded text-2",
+ headers:{"Content-Type": "text/plain;charset=utf-8"}
+ });
+ console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status);
+ //TODO: Define correct behaviour
+ //T(xhr.status == 202,"Should return 202");
+
+ db.deleteDb();
+}
diff --git a/test/javascript/tests-cluster/with-quorum/db-creation.js b/test/javascript/tests-cluster/with-quorum/db_creation.js
index f8efd6e68..f8efd6e68 100644
--- a/test/javascript/tests-cluster/with-quorum/db-creation.js
+++ b/test/javascript/tests-cluster/with-quorum/db_creation.js
diff --git a/test/javascript/tests-cluster/with-quorum/db_creation_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/db_creation_overridden_quorum.js
new file mode 100644
index 000000000..14d319ccd
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/db_creation_overridden_quorum.js
@@ -0,0 +1,28 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do DB creation under cluster with quorum conditions but overriding write quorum.
+couchTests.db_creation_overridden_quorum = function(debug) {
+
+ if (debug) debugger;
+
+ var db_name = get_random_db_name()
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
+
+ // DB Creation should return 202 - Accepted
+ xhr = CouchDB.request("PUT", "/" + db_name + "/");
+ console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status)
+ //T(xhr.status == 202,"Should return 202");
+
+ // cleanup
+ db.deleteDb();
+};
diff --git a/test/javascript/tests-cluster/with-quorum/db_deletion.js b/test/javascript/tests-cluster/with-quorum/db_deletion.js
new file mode 100644
index 000000000..079fb493d
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/db_deletion.js
@@ -0,0 +1,30 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do DB deletion under cluster with quorum conditions.
+couchTests.db_deletion = function(debug) {
+
+ if (debug) debugger;
+
+ var db_name = get_random_db_name()
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+
+ db.createDb();
+
+ // DB Deletion should return 202 - Acceted as the custer is not complete
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+ T(xhr.status == 202);
+
+// DB Deletion should return 404 - Not found
+ xhr = CouchDB.request("DELETE", "/not-existing-db/");
+ T(xhr.status == 404);
+};
diff --git a/test/javascript/tests-cluster/with-quorum/db_deletion_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/db_deletion_overridden_quorum.js
new file mode 100644
index 000000000..01417eb63
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/db_deletion_overridden_quorum.js
@@ -0,0 +1,23 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do DB deletion in a cluster with quorum conditions.
+couchTests.db_deletion_overridden_quorum = function(debug) {
+
+ if (debug) debugger;
+
+ var db_name = get_random_db_name()
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
+ db.createDb();
+ db.deleteDb();
+ T(db.last_req.status="202","Should return 202");
+};
diff --git a/test/javascript/tests-cluster/with-quorum/doc_bulk.js b/test/javascript/tests-cluster/with-quorum/doc_bulk.js
new file mode 100644
index 000000000..4bdd3c84b
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/doc_bulk.js
@@ -0,0 +1,25 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_bulk = function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+ db.createDb();
+ if (debug) debugger;
+
+ var docs = makeDocs(5);
+ // Create the docs
+ var results = db.bulkSave(docs);
+ T(db.last_req.status="201","Should return 201")
+
+ db.deleteDb();
+}
diff --git a/test/javascript/tests-cluster/with-quorum/doc_bulk_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/doc_bulk_overridden_quorum.js
new file mode 100644
index 000000000..0cf9a7e8c
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/doc_bulk_overridden_quorum.js
@@ -0,0 +1,25 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_bulk_overridden_quorum = function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
+ db.createDb();
+ if (debug) debugger;
+
+ var docs = makeDocs(5);
+ // Create the docs
+ var results = db.bulkSave(docs);
+ T(db.last_req.status="202","Should return 202")
+
+ db.deleteDb();
+}
diff --git a/test/javascript/tests-cluster/with-quorum/doc_copy.js b/test/javascript/tests-cluster/with-quorum/doc_copy.js
new file mode 100644
index 000000000..386ca5671
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/doc_copy.js
@@ -0,0 +1,27 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_copy = function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+ db.createDb();
+ if (debug) debugger;
+
+ db.save({_id:"dummy"});
+
+ var xhr = CouchDB.request("COPY", "/" + db_name + "/dummy", {
+ headers: {"Destination":"dummy2"}
+ });
+ T(xhr.status=="201","Should return 201 ");
+
+ db.deleteDb();
+}
diff --git a/test/javascript/tests-cluster/with-quorum/doc_copy_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/doc_copy_overridden_quorum.js
new file mode 100644
index 000000000..23fbc9754
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/doc_copy_overridden_quorum.js
@@ -0,0 +1,30 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_copy_overriden_quorum = function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
+ db.createDb();
+ if (debug) debugger;
+
+ db.save({_id:"dummy"});
+
+ var xhr = CouchDB.request("COPY", "/" + db_name + "/dummy", {
+ headers: {"Destination":"dummy2"}
+ });
+ //TODO: Define correct behaviour
+ //T(xhr.status=="202","Should return 202");
+ console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 202->"+xhr.status);
+
+ db.deleteDb();
+
+}
diff --git a/test/javascript/tests-cluster/with-quorum/doc_crud.js b/test/javascript/tests-cluster/with-quorum/doc_crud.js
new file mode 100644
index 000000000..f016cefdd
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/doc_crud.js
@@ -0,0 +1,31 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_crud = function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+ db.createDb();
+ if (debug) debugger;
+
+ db.save({_id:"0",a:1});
+ T(db.last_req.status=="201");
+
+ var doc = db.open("0");
+ db.save(doc);
+ T(db.last_req.status=="201");
+
+ doc = db.open("0");
+ db.deleteDoc(doc);
+ T(db.last_req.status="200");
+ db.deleteDb();
+
+}
diff --git a/test/javascript/tests-cluster/with-quorum/doc_crud_overridden_quorum.js b/test/javascript/tests-cluster/with-quorum/doc_crud_overridden_quorum.js
new file mode 100644
index 000000000..41502ca5e
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/doc_crud_overridden_quorum.js
@@ -0,0 +1,31 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_crud_overridden_quorum = function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":3});
+ db.createDb();
+ if (debug) debugger;
+
+ db.save({_id:"0",a:1});
+ T(db.last_req.status=="202","Should return 202 status");
+
+ var doc = db.open("0");
+ db.save(doc);
+ T(db.last_req.status=="202","Should return 202 status");
+
+ doc = db.open("0");
+ db.deleteDoc(doc);
+ T(db.last_req.status="202","Should return 202 status");
+
+ db.deleteDb();
+}
diff --git a/test/javascript/tests-cluster/without-quorum/attachments.js b/test/javascript/tests-cluster/without-quorum/attachments.js
new file mode 100644
index 000000000..57563439a
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/attachments.js
@@ -0,0 +1,39 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachments= function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+ db.createDb();
+ if (debug) debugger;
+
+ var doc = db.save({_id:"dummy"});
+ T(doc.ok);
+ var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+ body:"This is no base64 encoded text",
+ headers:{"Content-Type": "text/plain;charset=utf-8"}
+ });
+ T(xhr.status == 202,"Should return 202 Accepted");
+ var rev = JSON.parse(xhr.responseText).rev;
+
+ xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + rev, {
+ body:"This is no base64 encoded text-2",
+ headers:{"Content-Type": "text/plain;charset=utf-8"}
+ });
+ T(xhr.status == 202,"Should return 202 Accepted");
+ rev = JSON.parse(xhr.responseText).rev;
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+}
diff --git a/test/javascript/tests-cluster/without-quorum/attachments_delete.js b/test/javascript/tests-cluster/without-quorum/attachments_delete.js
new file mode 100644
index 000000000..d05fcaffa
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/attachments_delete.js
@@ -0,0 +1,37 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachments_delete= function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+ db.createDb();
+ if (debug) debugger;
+
+ var doc = db.save({_id:"dummy"});
+ T(doc.ok);
+ var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+ body:"This is no base64 encoded text",
+ headers:{"Content-Type": "text/plain;charset=utf-8"}
+ });
+ T(xhr.status == 202,"Should return 202 Accepted");
+ var rev = JSON.parse(xhr.responseText).rev;
+
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/dummy/foo.txt?rev=" + rev);
+ console.log("Skipped-TODO: Clarify correct behaviour. Is not considering quorum. 202->"+xhr.status);
+ //TODO: Define correct behaviour
+ //T(xhr.status == 202,"Should return 202 Accepted but returns "+xhr.status);
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+}
diff --git a/test/javascript/tests-cluster/without-quorum/attachments_delete_overridden_quorum.js b/test/javascript/tests-cluster/without-quorum/attachments_delete_overridden_quorum.js
new file mode 100644
index 000000000..906391ae1
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/attachments_delete_overridden_quorum.js
@@ -0,0 +1,36 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.attachments_delete_overridden_quorum= function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
+ db.createDb();
+ if (debug) debugger;
+
+ var doc = db.save({_id:"dummy"});
+ T(doc.ok);
+ var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+ body:"This is no base64 encoded text",
+ headers:{"Content-Type": "text/plain;charset=utf-8"}
+ });
+ var rev = JSON.parse(xhr.responseText).rev;
+
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/dummy/foo.txt?rev=" + rev);
+ console.log("Skipped-TODO: Clarify correct behaviour. Is not considering quorum. 202->"+xhr.status);
+ //TODO: Define correct behaviour
+ //T(xhr.status == 200,"Should return 200 but returns "+xhr.status);
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+}
diff --git a/test/javascript/tests-cluster/without-quorum/attachments_overridden_quorum.js b/test/javascript/tests-cluster/without-quorum/attachments_overridden_quorum.js
new file mode 100644
index 000000000..434578f3a
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/attachments_overridden_quorum.js
@@ -0,0 +1,42 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+//Test attachments operations with an overridden quorum parameter
+couchTests.attachments_overriden_quorum= function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
+ db.createDb();
+ if (debug) debugger;
+
+ var doc = db.save({_id:"dummy"});
+ T(doc.ok);
+
+ var xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + doc.rev, {
+ body:"This is no base64 encoded text",
+ headers:{"Content-Type": "text/plain;charset=utf-8"}
+ });
+ //TODO: Define correct behaviour
+ //T(xhr.status == 201,"Should return 201");
+ var rev = JSON.parse(xhr.responseText).rev;
+
+ xhr = CouchDB.request("PUT", "/" + db_name + "/dummy/foo.txt?rev=" + rev, {
+ body:"This is no base64 encoded text-2",
+ headers:{"Content-Type": "text/plain;charset=utf-8"}
+ });
+ //TODO: Define correct behaviour
+ //T(xhr.status == 201,"Should return 201");
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+}
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/without-quorum/db_creation.js
index 0d8ff8367..a21d37746 100644
--- a/test/javascript/tests-cluster/without-quorum/db-creation.js
+++ b/test/javascript/tests-cluster/without-quorum/db_creation.js
@@ -23,6 +23,5 @@ couchTests.db_creation = function(debug) {
T(xhr.status == 202);
// cleanup
- // TODO DB deletions fails if the quorum is not met.
- xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+ db.deleteDb();
};
diff --git a/test/javascript/tests-cluster/without-quorum/db_creation_overridden_quorum.js b/test/javascript/tests-cluster/without-quorum/db_creation_overridden_quorum.js
new file mode 100644
index 000000000..6d5d798d1
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/db_creation_overridden_quorum.js
@@ -0,0 +1,30 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do DB creation under cluster with quorum conditions but overriding write quorum.
+couchTests.db_creation_overridden_quorum = function(debug) {
+
+ if (debug) debugger;
+
+ var db_name = get_random_db_name()
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
+
+ // DB Creation should return 201 - Created
+ xhr = CouchDB.request("PUT", "/" + db_name + "/");
+ console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 201->"+xhr.status)
+ //T(xhr.status == 201,"Should return 201");
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+};
diff --git a/test/javascript/tests-cluster/without-quorum/db_deletion.js b/test/javascript/tests-cluster/without-quorum/db_deletion.js
new file mode 100644
index 000000000..006345e30
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/db_deletion.js
@@ -0,0 +1,30 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do DB creation under cluster with quorum conditions.
+couchTests.db_deletion = function(debug) {
+
+ if (debug) debugger;
+
+ var db_name = get_random_db_name()
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+
+ db.createDb();
+
+ // DB Deletion should return 202 - Acepted
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+ T(xhr.status == 202);
+
+ // DB Deletion should return 404 - Not found
+ xhr = CouchDB.request("DELETE", "/not-existing-db/");
+ T(xhr.status == 404);
+};
diff --git a/test/javascript/tests-cluster/without-quorum/db_deletion_overridden_quorum.js b/test/javascript/tests-cluster/without-quorum/db_deletion_overridden_quorum.js
new file mode 100644
index 000000000..11b344cfb
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/db_deletion_overridden_quorum.js
@@ -0,0 +1,25 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do DB deletion in a cluster with quorum conditions.
+couchTests.db_deletion_overridden_quorum = function(debug) {
+
+ if (debug) debugger;
+
+ var db_name = get_random_db_name()
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
+ db.createDb();
+
+ // DB deletions does not consider overriden quorum param.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+ T(db.last_req.status="202","Should return 202");
+};
diff --git a/test/javascript/tests-cluster/without-quorum/doc_bulk.js b/test/javascript/tests-cluster/without-quorum/doc_bulk.js
new file mode 100644
index 000000000..91578d88a
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/doc_bulk.js
@@ -0,0 +1,28 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_bulk = function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+ db.createDb();
+ if (debug) debugger;
+
+ var docs = makeDocs(5);
+ // Create the docs
+ var results = db.bulkSave(docs);
+ T(db.last_req.status="202","Should return 202")
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+}
diff --git a/test/javascript/tests-cluster/without-quorum/doc_bulk_overridden_quorum.js b/test/javascript/tests-cluster/without-quorum/doc_bulk_overridden_quorum.js
new file mode 100644
index 000000000..56fb11e59
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/doc_bulk_overridden_quorum.js
@@ -0,0 +1,28 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_bulk_overridden_quorum = function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
+ db.createDb();
+ if (debug) debugger;
+
+ var docs = makeDocs(5);
+ // Create the docs
+ var results = db.bulkSave(docs);
+ T(db.last_req.status="201","Should return 201")
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+}
diff --git a/test/javascript/tests-cluster/without-quorum/doc_copy.js b/test/javascript/tests-cluster/without-quorum/doc_copy.js
new file mode 100644
index 000000000..7d7c35fcc
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/doc_copy.js
@@ -0,0 +1,30 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_copy = function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+ db.createDb();
+ if (debug) debugger;
+
+ db.save({_id:"dummy"});
+
+ var xhr = CouchDB.request("COPY", "/" + db_name + "/dummy", {
+ headers: {"Destination":"dummy2"}
+ });
+ T(xhr.status=="202","Should return 202 ");
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+}
diff --git a/test/javascript/tests-cluster/without-quorum/doc_copy_overridden_quorum.js b/test/javascript/tests-cluster/without-quorum/doc_copy_overridden_quorum.js
new file mode 100644
index 000000000..e72425d86
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/doc_copy_overridden_quorum.js
@@ -0,0 +1,33 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_copy_overriden_quorum = function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
+ db.createDb();
+ if (debug) debugger;
+
+ db.save({_id:"dummy"});
+
+ var xhr = CouchDB.request("COPY", "/" + db_name + "/dummy", {
+ headers: {"Destination":"dummy2"}
+ });
+ console.log("Skipped-TODO: Clarify correct behaviour. Is not considering overridden quorum. 201->"+xhr.status);
+ //TODO Defie correct behaviour
+ //T(xhr.status=="201","Should return 201");
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+
+}
diff --git a/test/javascript/tests-cluster/without-quorum/doc_crud.js b/test/javascript/tests-cluster/without-quorum/doc_crud.js
new file mode 100644
index 000000000..aa706976b
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/doc_crud.js
@@ -0,0 +1,35 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_crud = function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+ db.createDb();
+ if (debug) debugger;
+
+ db.save({_id:"0",a:1});
+ T(db.last_req.status=="202","Should return 202 status");
+
+ var doc = db.open("0");
+ db.save(doc);
+ T(db.last_req.status=="202","Should return 202 status");
+
+ doc = db.open("0");
+ db.deleteDoc(doc);
+ T(db.last_req.status="202","Should return 202 status");
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+
+}
diff --git a/test/javascript/tests-cluster/without-quorum/doc_crud_overridden_quorum.js b/test/javascript/tests-cluster/without-quorum/doc_crud_overridden_quorum.js
new file mode 100644
index 000000000..44ab86ec0
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/doc_crud_overridden_quorum.js
@@ -0,0 +1,34 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.doc_crud_overridden_quorum = function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"},{"w":1});
+ db.createDb();
+ if (debug) debugger;
+
+ db.save({_id:"0",a:1});
+ T(db.last_req.status=="201","Should return 201 status");
+
+ var doc = db.open("0");
+ db.save(doc);
+ T(db.last_req.status=="201","Should return 201 status");
+
+ doc = db.open("0");
+ db.deleteDoc(doc);
+ T(db.last_req.status="200","Should return 200 status");
+
+ //db.deleteDb();
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+}
diff --git a/test/javascript/tests/design_docs.js b/test/javascript/tests/design_docs.js
index 6e12001d7..ed1e72f3f 100644
--- a/test/javascript/tests/design_docs.js
+++ b/test/javascript/tests/design_docs.js
@@ -373,7 +373,13 @@ couchTests.design_docs = function(debug) {
}
T(db.deleteDoc(designDoc).ok);
- T(db.open(designDoc._id) == null);
+ waitForSuccess(function() {
+ var ddoc = db.open(designDoc._id)
+ if (ddoc != null) {
+ throw({});
+ }
+ return true;
+ }, 'db.open(designDoc._id)');
T(db.view("test/no_docs") == null);
T(db.ensureFullCommit().ok);
diff --git a/test/javascript/tests/reduce_builtin.js b/test/javascript/tests/reduce_builtin.js
index 9c455e4e6..4686841e3 100644
--- a/test/javascript/tests/reduce_builtin.js
+++ b/test/javascript/tests/reduce_builtin.js
@@ -37,6 +37,12 @@ couchTests.reduce_builtin = function(debug) {
emit(doc.integer, doc.integer);
};
+ var check_approx_distinct = function(expected, estimated) {
+ // see https://en.wikipedia.org/wiki/HyperLogLog
+ var err = 1.04 / Math.sqrt(Math.pow(2, 11 - 1));
+ return Math.abs(expected - estimated) < expected * err;
+ };
+
var result = db.query(map, "_sum");
T(result.rows[0].value == 2*summate(numDocs));
result = db.query(map, "_count");
@@ -47,27 +53,41 @@ couchTests.reduce_builtin = function(debug) {
T(result.rows[0].value.min == 1);
T(result.rows[0].value.max == 500);
T(result.rows[0].value.sumsqr == 2*sumsqr(numDocs));
+ result = db.query(map, "_approx_count_distinct");
+ T(check_approx_distinct(numDocs, result.rows[0].value));
result = db.query(map, "_sum", {startkey: 4, endkey: 4});
T(result.rows[0].value == 8);
result = db.query(map, "_count", {startkey: 4, endkey: 4});
T(result.rows[0].value == 2);
+ result = db.query(map, "_approx_count_distinct", {startkey:4, endkey:4});
+ T(check_approx_distinct(1, result.rows[0].value));
result = db.query(map, "_sum", {startkey: 4, endkey: 5});
T(result.rows[0].value == 18);
result = db.query(map, "_count", {startkey: 4, endkey: 5});
T(result.rows[0].value == 4);
+ result = db.query(map, "_approx_count_distinct", {startkey:4, endkey:5});
+ T(check_approx_distinct(2, result.rows[0].value));
+
result = db.query(map, "_sum", {startkey: 4, endkey: 6});
T(result.rows[0].value == 30);
result = db.query(map, "_count", {startkey: 4, endkey: 6});
T(result.rows[0].value == 6);
+ result = db.query(map, "_approx_count_distinct", {startkey: 4, endkey: 6});
+ T(check_approx_distinct(3, result.rows[0].value));
result = db.query(map, "_sum", {group:true, limit:3});
T(result.rows[0].value == 2);
T(result.rows[1].value == 4);
T(result.rows[2].value == 6);
+ result = db.query(map, "_approx_count_distinct", {group:true, limit:3});
+ T(check_approx_distinct(1, result.rows[0].value));
+ T(check_approx_distinct(1, result.rows[1].value));
+ T(check_approx_distinct(1, result.rows[2].value));
+
for(var i=1; i<numDocs/2; i+=30) {
result = db.query(map, "_sum", {startkey: i, endkey: numDocs - i});
T(result.rows[0].value == 2*(summate(numDocs-i) - summate(i-1)));
diff --git a/test/javascript/tests/users_db.js b/test/javascript/tests/users_db.js
index 34a7bad68..20be325ca 100644
--- a/test/javascript/tests/users_db.js
+++ b/test/javascript/tests/users_db.js
@@ -205,6 +205,13 @@ couchTests.users_db = function(debug) {
} finally {
CouchDB.login("jan", "apple");
usersDb.deleteDb(); // cleanup
+ waitForSuccess(function() {
+ var req = CouchDB.request("GET", usersDb.name);
+ if (req.status == 404) {
+ return true
+ }
+ throw({});
+ }, "usersdb.deleteDb")
usersDb.createDb();
usersDbAlt.deleteDb(); // cleanup
}
diff --git a/test/javascript/tests/users_db_security.js b/test/javascript/tests/users_db_security.js
index c55c76434..1db6c14c5 100644
--- a/test/javascript/tests/users_db_security.js
+++ b/test/javascript/tests/users_db_security.js
@@ -15,8 +15,6 @@ couchTests.users_db_security = function(debug) {
var usersDb = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
try { usersDb.createDb(); } catch (e) { /* ignore if exists*/ }
- var passwordSchemes = ['pbkdf2', 'bcrypt'];
-
if (debug) debugger;
var loginUser = function(username) {
@@ -32,13 +30,7 @@ couchTests.users_db_security = function(debug) {
// the actual tests
var username1 = username.replace(/[0-9]$/, "");
var password = pws[username];
- waitForSuccess(function() {
- var req = CouchDB.login(username1, pws[username]);
- if (req.ok) {
- return true
- }
- throw({});
- }, 'loginUser');
+ T(CouchDB.login(username1, pws[username]).ok);
};
var open_as = function(db, docId, username) {
@@ -94,7 +86,7 @@ couchTests.users_db_security = function(debug) {
}
};
- var testFun = function(scheme, derivedKeyTest, saltTest)
+ var testFun = function()
{
// _users db
@@ -113,12 +105,11 @@ couchTests.users_db_security = function(debug) {
// jan's gonna be admin as he's the first user
TEquals(true, usersDb.save(userDoc).ok, "should save document");
+ wait(5000)
userDoc = open_as(usersDb, "org.couchdb.user:jchris", "jchris");
TEquals(undefined, userDoc.password, "password field should be null 1");
- TEquals(scheme, userDoc.password_scheme, "password_scheme should be " + scheme);
- derivedKeyTest(userDoc.derived_key);
- saltTest(userDoc.salt);
-
+ TEquals(40, userDoc.derived_key.length, "derived_key should exist");
+ TEquals(32, userDoc.salt.length, "salt should exist");
// create server admin
@@ -150,13 +141,10 @@ couchTests.users_db_security = function(debug) {
var jchrisDoc = open_as(usersDb, "org.couchdb.user:jchris", "jan");
TEquals(undefined, jchrisDoc.password, "password field should be null 2");
- TEquals(scheme, jchrisDoc.password_scheme, "password_scheme should be " + scheme);
- derivedKeyTest(jchrisDoc.derived_key);
- saltTest(jchrisDoc.salt);
+ TEquals(40, jchrisDoc.derived_key.length, "derived_key should exist");
+ TEquals(32, jchrisDoc.salt.length, "salt should exist");
- if(userDoc.salt || jchrisDoc.salt) {
- TEquals(true, userDoc.salt != jchrisDoc.salt, "should have new salt");
- }
+ TEquals(true, userDoc.salt != jchrisDoc.salt, "should have new salt");
TEquals(true, userDoc.derived_key != jchrisDoc.derived_key,
"should have new derived_key");
@@ -239,7 +227,7 @@ couchTests.users_db_security = function(debug) {
TEquals("forbidden", e.error, "non-admins can't read design docs");
}
- // admin should be able to read _list
+ // admin shold be able to read _list
var listPath = ddoc["_id"] + "/_list/names/test";
var result = request_as(usersDb, listPath, "jan");
var lines = result.responseText.split("\n");
@@ -385,140 +373,14 @@ couchTests.users_db_security = function(debug) {
});
};
- var derivedKeyTests = {
- pbkdf2: function(derived_key) {
- TEquals(40, derived_key.length, "derived_key should exist");
- },
- bcrypt: function(derived_key) {
- TEquals(60, derived_key.length, "derived_key should exist");
- }
- };
- var saltTests = {
- pbkdf2: function(salt) {
- TEquals(32, salt.length, "salt should exist");
- },
- bcrypt: function(salt) {
- TEquals(undefined, salt, "salt should not exist");
- }
- };
- passwordSchemes.forEach(function(scheme){
- run_on_modified_server(
- [{
- section: "couch_httpd_auth",
- key: "iterations", value: "1"
- }, {
- section: "couch_httpd_auth",
- key: "password_scheme", value: scheme
- }, {
- section: "admins",
- key: "jan", value: "apple"
- }],
- function() {
- try {
- testFun(scheme, derivedKeyTests[scheme], saltTests[scheme]);
- } catch (e) {
- throw(e)
- } finally {
- CouchDB.login("jan", "apple");
- usersDb.deleteDb(); // cleanup
- waitForSuccess(function() {
- var req = CouchDB.request("GET", db_name);
- if (req.status == 404) {
- return true
- }
- throw({});
- }, 'usersDb.deleteDb')
-
- usersDb.createDb();
- waitForSuccess(function() {
- var req = CouchDB.request("GET", db_name);
- if (req.status == 200) {
- return true
- }
- throw({});
- }, 'usersDb.creteDb')
- }
- }
- );
- });
-
- var testFunUpdatePasswordScheme = function() {
- var userDocs = {
- jchris: {
- _id: "org.couchdb.user:jchris",
- type: "user",
- name: "jchris",
- password: "mp3",
- roles: []
- },
- fdmanana: {
- _id: "org.couchdb.user:fdmanana",
- type: "user",
- name: "fdmanana",
- password: "foobar",
- roles: []
- }
- };
-
- // create new user (has pbkdf2 hash)
- TEquals(true, usersDb.save(userDocs.jchris).ok, "should save document");
- wait(5000);
- var userDoc = open_as(usersDb, "org.couchdb.user:jchris", "jchris");
- TEquals(undefined, userDoc.password, "password field should be null 1");
- TEquals("pbkdf2", userDoc.password_scheme, "password_scheme should be pbkdf2");
- derivedKeyTests.pbkdf2(userDoc.derived_key);
- saltTests.pbkdf2(userDoc.salt);
-
- // change scheme to bcrypt
- CouchDB.login("jan", "apple");
- var xhr = CouchDB.request("PUT", "/_node/node1@127.0.0.1/_config/couch_httpd_auth/password_scheme", {
- body : JSON.stringify("bcrypt"),
- headers: {"X-Couch-Persist": "false"}
- });
- TEquals(200, xhr.status);
- xhr = CouchDB.request("GET", "/_node/node1@127.0.0.1/_config/couch_httpd_auth/password_scheme");
- var scheme = JSON.parse(xhr.responseText);
- TEquals("bcrypt", scheme);
-
- // create new user (has bcrypt hash)
- TEquals(true, usersDb.save(userDocs.fdmanana).ok, "should save document");
- wait(5000);
- userDoc = open_as(usersDb, "org.couchdb.user:fdmanana", "fdmanana");
- TEquals(undefined, userDoc.password, "password field should be null 1");
- TEquals("bcrypt", userDoc.password_scheme, "password_scheme should be bcrypt");
- derivedKeyTests.bcrypt(userDoc.derived_key);
- saltTests.bcrypt(userDoc.salt);
-
- // test that both users can still log in
- TEquals(true, CouchDB.login(userDocs.jchris.name, userDocs.jchris.password).ok);
- TEquals(true, CouchDB.login(userDocs.fdmanana.name, userDocs.fdmanana.password).ok);
-
- // change scheme back to pbkdf2
- CouchDB.login("jan", "apple");
- var xhr = CouchDB.request("PUT", "/_node/node1@127.0.0.1/_config/couch_httpd_auth/password_scheme", {
- body : JSON.stringify("pbkdf2"),
- headers: {"X-Couch-Persist": "false"}
- });
- TEquals(200, xhr.status);
- xhr = CouchDB.request("GET", "/_node/node1@127.0.0.1/_config/couch_httpd_auth/password_scheme");
- var scheme = JSON.parse(xhr.responseText);
- TEquals("pbkdf2", scheme);
-
- // test that both users can still log in
- TEquals(true, CouchDB.login(userDocs.jchris.name, userDocs.jchris.password).ok);
- TEquals(true, CouchDB.login(userDocs.fdmanana.name, userDocs.fdmanana.password).ok);
- };
run_on_modified_server(
- [{
- section: "couch_httpd_auth",
- key: "iterations", value: "1"
- }, {
- section: "admins",
- key: "jan", value: "apple"
- }],
+ [{section: "couch_httpd_auth",
+ key: "iterations", value: "1"},
+ {section: "admins",
+ key: "jan", value: "apple"}],
function() {
try {
- testFunUpdatePasswordScheme();
+ testFun();
} finally {
CouchDB.login("jan", "apple");
usersDb.deleteDb(); // cleanup
@@ -540,6 +402,5 @@ couchTests.users_db_security = function(debug) {
}
}
);
-
CouchDB.logout();
};