summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Lehnardt <jan@apache.org>2018-02-13 11:58:08 +0100
committerGitHub <noreply@github.com>2018-02-13 11:58:08 +0100
commite5668051012cd98f14fd932681d63fa366911e7f (patch)
treebbfa5827c8f8b2e2522724f86fd1b6b6a753f007
parent3f68e69b466bcd770983df5d830af56c99a682b0 (diff)
parent3b53c1c92af6dc774995fd8f1009d1306248477c (diff)
downloadcouchdb-fix-make-dist.tar.gz
Merge branch 'master' into fix-make-distfix-make-dist
-rw-r--r--COMMITTERS.md2
-rw-r--r--Jenkinsfile27
-rw-r--r--LICENSE26
-rw-r--r--Makefile60
-rw-r--r--Makefile.win18
-rw-r--r--NOTICE4
-rw-r--r--README-DEV.rst28
-rw-r--r--TODO10
-rwxr-xr-xdev/run24
-rw-r--r--license.skip12
-rw-r--r--rel/haproxy.cfg4
-rwxr-xr-xrel/overlay/bin/couchdb8
-rw-r--r--rel/overlay/etc/default.ini19
-rw-r--r--rel/overlay/etc/local.ini3
-rw-r--r--rel/plugins/eunit_plugin.erl20
-rw-r--r--src/chttpd/rebar.config2
-rw-r--r--src/chttpd/src/chttpd_auth_request.erl4
-rw-r--r--src/chttpd/src/chttpd_db.erl3
-rw-r--r--src/chttpd/src/chttpd_httpd_handlers.erl1
-rw-r--r--src/chttpd/src/chttpd_misc.erl35
-rw-r--r--src/chttpd/test/chttpd_db_test.erl57
-rw-r--r--src/chttpd/test/chttpd_dbs_info_test.erl169
-rw-r--r--src/chttpd/test/chttpd_security_tests.erl22
-rw-r--r--src/couch/include/couch_eunit.hrl11
-rw-r--r--src/couch/src/couch_db.erl31
-rw-r--r--src/couch/src/couch_httpd_db.erl3
-rw-r--r--src/couch/src/couch_key_tree.erl4
-rw-r--r--src/couch/src/couch_passwords.erl20
-rw-r--r--src/couch/src/test_request.erl10
-rw-r--r--src/couch/test/chttpd_endpoints_tests.erl1
-rw-r--r--src/couch/test/couch_db_tests.erl221
-rw-r--r--src/couch/test/couch_key_tree_tests.erl18
-rwxr-xr-xsrc/couch/test/couchdb_cookie_domain_tests.erl78
-rw-r--r--src/couch/test/global_changes_tests.erl2
-rw-r--r--src/couch_epi/rebar.config4
-rw-r--r--src/couch_epi/src/couch_epi_codegen.erl19
-rw-r--r--src/couch_index/rebar.config2
-rw-r--r--src/couch_index/test/couch_index_compaction_tests.erl16
-rw-r--r--src/couch_index/test/couch_index_ddoc_updated_tests.erl5
-rw-r--r--src/couch_log/src/couch_log.erl1
-rw-r--r--src/couch_log/test/couch_log_test_util.erl7
-rw-r--r--src/couch_mrview/rebar.config2
-rw-r--r--src/couch_mrview/src/couch_mrview_util.erl4
-rw-r--r--src/couch_peruser/src/couch_peruser.erl60
-rw-r--r--src/couch_peruser/test/couch_peruser_test.erl189
-rw-r--r--src/couch_replicator/README.md2
-rw-r--r--src/couch_replicator/src/couch_replicator_api_wrap.erl23
-rw-r--r--src/couch_replicator/src/couch_replicator_clustering.erl73
-rw-r--r--src/couch_replicator/src/couch_replicator_docs.erl37
-rw-r--r--src/couch_replicator/src/couch_replicator_httpd.erl2
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler_job.erl3
-rw-r--r--src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl143
-rw-r--r--src/couch_stats/src/couch_stats.app.src2
-rw-r--r--src/couch_stats/src/couch_stats.erl2
-rw-r--r--src/couch_stats/src/couch_stats_process_tracker.erl8
-rw-r--r--src/fabric/rebar.config5
-rw-r--r--src/fabric/src/fabric_db_create.erl4
-rw-r--r--src/fabric/src/fabric_view_all_docs.erl10
-rw-r--r--src/mango/src/mango_cursor.erl76
-rw-r--r--src/mango/src/mango_cursor_special.erl7
-rw-r--r--src/mango/src/mango_cursor_text.erl2
-rw-r--r--src/mango/src/mango_cursor_view.erl4
-rw-r--r--src/mango/src/mango_error.erl19
-rw-r--r--src/mango/src/mango_idx.erl14
-rw-r--r--src/mango/src/mango_idx_text.erl3
-rw-r--r--src/mango/src/mango_native_proc.erl51
-rw-r--r--src/mango/src/mango_selector.erl225
-rw-r--r--src/mango/test/02-basic-find-test.py12
-rw-r--r--src/mango/test/03-operator-test.py9
-rw-r--r--src/mango/test/05-index-selection-test.py115
-rw-r--r--src/mango/test/06-basic-text-test.py33
-rw-r--r--src/mango/test/07-text-custom-field-list-test.py50
-rw-r--r--src/mango/test/10-disable-array-length-field-test.py2
-rw-r--r--src/mango/test/16-index-selectors-test.py12
-rw-r--r--src/mango/test/mango.py47
-rw-r--r--src/rexi/rebar.config2
-rwxr-xr-xtest/build/test-run-couch-for-mango.sh3
-rwxr-xr-xtest/javascript/run16
-rw-r--r--test/javascript/tests-cluster/with-quorum/db-creation.js27
-rw-r--r--test/javascript/tests-cluster/without-quorum/db-creation.js28
-rw-r--r--test/javascript/tests/design_docs_query.js154
-rw-r--r--test/javascript/tests/view_errors.js2
82 files changed, 1947 insertions, 546 deletions
diff --git a/COMMITTERS.md b/COMMITTERS.md
index 25866b857..7412efcb5 100644
--- a/COMMITTERS.md
+++ b/COMMITTERS.md
@@ -32,7 +32,7 @@ mean this in the sense of being loyal to the project and its interests.
* Klaus Trainer <klaus_trainer@apache.org>
* Benjamin Young <bigbluehat@apache.org>
* Robert Kowalski <robertkowalski@apache.org>
- * Max Thayer <garbados@apache.org>
+ * Diana Thayer <garbados@apache.org>
* Gianugo Rabellino <gianugo@apache.org>
* Jenn Schiffer <jenn@apache.org>
* Lena Reinhard <lena@apache.org>
diff --git a/Jenkinsfile b/Jenkinsfile
index fed976afc..221e96f45 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -12,6 +12,7 @@ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
*/
+// jenkins user == uid 910 for reference
pipeline {
// no top-level agent; agents must be declared for each stage
agent none
@@ -33,7 +34,7 @@ pipeline {
// each time. Instead, manually insert docker pull then run with the
// the docker image.
node {
- label 'couchdbtest'
+ label 'ubuntu'
}
}
steps {
@@ -78,7 +79,7 @@ pipeline {
stage('Test') {
steps {
parallel(centos6erlang183: {
- node(label: 'couchdbtest') {
+ node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
sh 'docker pull couchdbdev/centos-6-erlang-18.3'
withDockerContainer(image: 'couchdbdev/centos-6-erlang-18.3', args: '-e LD_LIBRARY_PATH=/usr/local/bin') {
@@ -118,7 +119,7 @@ pipeline {
} // node
},
centos7erlangdefault: {
- node(label: 'couchdbtest') {
+ node(label: 'ubuntu') {
timeout(time: 45, unit: "MINUTES") {
sh 'docker pull couchdbdev/centos-7-erlang-default'
withDockerContainer(image: 'couchdbdev/centos-7-erlang-default', args: '-e LD_LIBRARY_PATH=/usr/local/bin') {
@@ -143,7 +144,7 @@ pipeline {
} // node
},
centos7erlang183: {
- node(label: 'couchdbtest') {
+ node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
sh 'docker pull couchdbdev/centos-7-erlang-18.3'
withDockerContainer(image: 'couchdbdev/centos-7-erlang-18.3', args: '-e LD_LIBRARY_PATH=/usr/local/bin') {
@@ -183,7 +184,7 @@ pipeline {
} // node
},
ubuntu1404erlangdefault: {
- node(label: 'couchdbtest') {
+ node(label: 'ubuntu') {
timeout(time: 45, unit: "MINUTES") {
sh 'docker pull couchdbdev/ubuntu-14.04-erlang-default'
withDockerContainer(image: 'couchdbdev/ubuntu-14.04-erlang-default') {
@@ -206,7 +207,7 @@ pipeline {
} // node
},
ubuntu1404erlang183: {
- node(label: 'couchdbtest') {
+ node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
sh 'docker pull couchdbdev/ubuntu-14.04-erlang-18.3'
withDockerContainer(image: 'couchdbdev/ubuntu-14.04-erlang-18.3') {
@@ -246,7 +247,7 @@ pipeline {
} // node
},
ubuntu1604erlangdefault: {
- node(label: 'couchdbtest') {
+ node(label: 'ubuntu') {
timeout(time: 45, unit: "MINUTES") {
sh 'docker pull couchdbdev/ubuntu-16.04-erlang-default'
withDockerContainer(image: 'couchdbdev/ubuntu-16.04-erlang-default') {
@@ -269,7 +270,7 @@ pipeline {
} // node
},
ubuntu1604erlang183: {
- node(label: 'couchdbtest') {
+ node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
sh 'docker pull couchdbdev/ubuntu-16.04-erlang-18.3'
withDockerContainer(image: 'couchdbdev/ubuntu-16.04-erlang-18.3') {
@@ -309,7 +310,7 @@ pipeline {
} // node
},
debian8erlangdefault: {
- node(label: 'couchdbtest') {
+ node(label: 'ubuntu') {
timeout(time: 45, unit: "MINUTES") {
sh 'docker pull couchdbdev/debian-8-erlang-default'
withDockerContainer(image: 'couchdbdev/debian-8-erlang-default') {
@@ -332,7 +333,7 @@ pipeline {
} // node
},
debian8erlang183: {
- node(label: 'couchdbtest') {
+ node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
sh 'docker pull couchdbdev/debian-8-erlang-18.3'
withDockerContainer(image: 'couchdbdev/debian-8-erlang-18.3') {
@@ -372,7 +373,7 @@ pipeline {
} // node
},
debian9erlangdefault: {
- node(label: 'couchdbtest') {
+ node(label: 'ubuntu') {
timeout(time: 45, unit: "MINUTES") {
sh 'docker pull couchdbdev/debian-9-erlang-default'
withDockerContainer(image: 'couchdbdev/debian-9-erlang-default') {
@@ -395,7 +396,7 @@ pipeline {
} // node
},
debian9erlang183: {
- node(label: 'couchdbtest') {
+ node(label: 'ubuntu') {
timeout(time: 60, unit: "MINUTES") {
sh 'docker pull couchdbdev/debian-9-erlang-18.3'
withDockerContainer(image: 'couchdbdev/debian-9-erlang-18.3') {
@@ -447,7 +448,7 @@ pipeline {
// each time. Instead, manually insert docker pull then run with the
// the docker image.
node {
- label 'couchdbtest'
+ label 'ubuntu'
}
}
steps {
diff --git a/LICENSE b/LICENSE
index 1f0b270b4..e9a9c81e8 100644
--- a/LICENSE
+++ b/LICENSE
@@ -272,32 +272,6 @@ For the src/ibrowse component:
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-For the test/etap/etap.erl component:
-
- Copyright (c) 2008-2009 Nick Gerakines <nick@gerakines.net>
-
- Permission is hereby granted, free of charge, to any person
- obtaining a copy of this software and associated documentation
- files (the "Software"), to deal in the Software without
- restriction, including without limitation the rights to use,
- copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the
- Software is furnished to do so, subject to the following
- conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- OTHER DEALINGS IN THE SOFTWARE.
-
-
For the src/couch_log/src/couch_log_trunc_io.erl and
the src/couch_log/src/couch_log_trunc_io_fmt.erl components
diff --git a/Makefile b/Makefile
index 6f7328541..ed8fc63fc 100644
--- a/Makefile
+++ b/Makefile
@@ -100,6 +100,8 @@ fauxton: share/www
.PHONY: check
# target: check - Test everything
check: all
+ @$(MAKE) test-cluster-with-quorum
+ @$(MAKE) test-cluster-without-quorum
@$(MAKE) eunit
@$(MAKE) javascript
@$(MAKE) mango-test
@@ -123,7 +125,7 @@ soak-eunit: couch
.PHONY: javascript
# target: javascript - Run JavaScript test suites or specific ones defined by suites option
-javascript:
+javascript: devclean
@mkdir -p share/www/script/test
ifeq ($(IN_RELEASE), true)
@cp test/javascript/tests/lorem*.txt share/www/script/test/
@@ -131,13 +133,46 @@ else
@mkdir -p src/fauxton/dist/release/test
@cp test/javascript/tests/lorem*.txt src/fauxton/dist/release/test/
endif
- @rm -rf dev/lib
@dev/run -n 1 -q --with-admin-party-please \
--enable-erlang-views \
-c 'startup_jitter=0' \
'test/javascript/run --suites "$(suites)" \
--ignore "$(ignore_js_suites)"'
+# TODO: port to Makefile.win
+.PHONY: test-cluster-with-quorum
+test-cluster-with-quorum: devclean
+ @mkdir -p share/www/script/test
+ifeq ($(IN_RELEASE), true)
+ @cp test/javascript/tests/lorem*.txt share/www/script/test/
+else
+ @mkdir -p src/fauxton/dist/release/test
+ @cp test/javascript/tests/lorem*.txt src/fauxton/dist/release/test/
+endif
+ @dev/run -n 3 -q --with-admin-party-please \
+ --enable-erlang-views --degrade-cluster 1 \
+ -c 'startup_jitter=0' \
+ 'test/javascript/run --suites "$(suites)" \
+ --ignore "$(ignore_js_suites)" \
+ --path test/javascript/tests-cluster/with-quorum'
+
+# TODO: port to Makefile.win
+.PHONY: test-cluster-without-quorum
+test-cluster-without-quorum: devclean
+ @mkdir -p share/www/script/test
+ifeq ($(IN_RELEASE), true)
+ @cp test/javascript/tests/lorem*.txt share/www/script/test/
+else
+ @mkdir -p src/fauxton/dist/release/test
+ @cp test/javascript/tests/lorem*.txt src/fauxton/dist/release/test/
+endif
+ @dev/run -n 3 -q --with-admin-party-please \
+ --enable-erlang-views --degrade-cluster 2 \
+ -c 'startup_jitter=0' \
+ 'test/javascript/run --suites "$(suites)" \
+ --ignore "$(ignore_js_suites)" \
+ --path test/javascript/tests-cluster/without-quorum'
+
.PHONY: soak-javascript
soak-javascript:
@mkdir -p share/www/script/test
@@ -193,7 +228,7 @@ build-test:
.PHONY: mango-test
# target: mango-test - Run Mango tests
-mango-test: all
+mango-test: devclean all
./test/build/test-run-couch-for-mango.sh \
@@ -220,24 +255,6 @@ dialyze: .rebar
@$(REBAR) -r dialyze $(DIALYZE_OPTS)
-.PHONY: docker-image
-# target: docker-image - Build Docker image
-docker-image:
- @docker build --rm -t couchdb/dev-cluster .
-
-
-.PHONY: docker-start
-# target: docker-start - Start CouchDB in Docker container
-docker-start:
- @docker run -d -P -t couchdb/dev-cluster > .docker-id
-
-
-.PHONY: docker-stop
-# target: docker-stop - Stop Docker container
-docker-stop:
- @docker stop `cat .docker-id`
-
-
.PHONY: introspect
# target: introspect - Check for commits difference between rebar.config and repository
introspect:
@@ -285,6 +302,7 @@ ifeq ($(IN_RELEASE), true)
@cp -R share/docs/html/* rel/couchdb/share/www/docs/
@cp share/docs/man/apachecouchdb.1 rel/couchdb/share/docs/couchdb.1
else
+ @mkdir -p rel/couchdb/share/www/docs/
@mkdir -p rel/couchdb/share/docs/
@cp -R src/docs/build/html/ rel/couchdb/share/www/docs
@cp src/docs/build/man/apachecouchdb.1 rel/couchdb/share/docs/couchdb.1
diff --git a/Makefile.win b/Makefile.win
index 874ddf411..7ff0ab5c5 100644
--- a/Makefile.win
+++ b/Makefile.win
@@ -137,24 +137,6 @@ dialyze: .rebar
@$(REBAR) -r dialyze $(DIALYZE_OPTS)
-.PHONY: docker-image
-# target: docker-image - Build Docker image
-docker-image:
- @docker build --rm -t couchdb\dev-cluster .
-
-
-.PHONY: docker-start
-# target: docker-start - Start CouchDB in Docker container
-docker-start:
- @docker run -d -P -t couchdb\dev-cluster > .docker-id
-
-
-.PHONY: docker-stop
-# target: docker-stop - Stop Docker container
-docker-stop:
- @docker stop `cat .docker-id`
-
-
.PHONY: introspect
# target: introspect - Check for commits difference between rebar.config and repository
introspect:
diff --git a/NOTICE b/NOTICE
index 5fddffb3e..a1f06ae7b 100644
--- a/NOTICE
+++ b/NOTICE
@@ -22,10 +22,6 @@ This product also includes the following third-party components:
Copyright 2005-2012, Chandrashekhar Mullaparthi
- * ETap (http://github.com/ngerakines/etap/)
-
- Copyright 2009, Nick Gerakines <nick@gerakines.net>
-
* mimeparse.js (http://code.google.com/p/mimeparse/)
Copyright 2009, Chris Anderson <jchris@apache.org>
diff --git a/README-DEV.rst b/README-DEV.rst
index f8d80ac41..9cfa1f2ef 100644
--- a/README-DEV.rst
+++ b/README-DEV.rst
@@ -89,7 +89,7 @@ Unless you want to install the optional dependencies, skip to the next section.
Install what else we can with Homebrew::
- brew install help2man gnupg md5sha1sum node spidermonkey
+ brew install help2man gnupg md5sha1sum node
If you don't already have pip installed, install it::
@@ -150,7 +150,7 @@ to make targets::
make eunit apps=couch,chttpd
# Run only tests from couch_btree_tests suite
- make eunit suites=couch_btree_tests
+ make eunit apps=couch suites=couch_btree
# Run only only specific tests
make eunit tests=btree_open_test,reductions_test
@@ -198,30 +198,6 @@ See ``make help`` for more info and useful commands.
Please report any problems to the developer's mailing list.
-Testing a cluster
------------------
-
-We use `Docker <https://docker.io>`_ to safely run a local three node
-cluster all inside a single docker container.
-
-Assuming you have Docker installed and running::
-
- make docker-image
-
-This will create a docker image (tagged 'couchdb/dev-cluster') capable
-of running a joined three node cluster.
-
-To start it up::
-
- make docker-start
-
-A three node cluster should now be running (you can now use ``docker ps``
-to find the exposed ports of the nodes).
-
-To stop it::
-
- make docker-stop
-
Releasing
---------
diff --git a/TODO b/TODO
deleted file mode 100644
index d9d1929b1..000000000
--- a/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-CouchDB 2.0 TODO
-
-The remaining work after the merge of 1843-feature-bigcouch for the
-bigcouch side of things;
-
-1) Restore documentation (couchdb-documentation and build scripts)
-2) Restore couch-plugins
-3) Restore my-first-couchdb-plugin (to couchdb-examples)
-4) Restore _db_updates
-5) Sundries (AUTHORS, INSTALL.*, LICENSE, NOTICE, etc)
diff --git a/dev/run b/dev/run
index 4924de1f6..a5d8fde8c 100755
--- a/dev/run
+++ b/dev/run
@@ -130,6 +130,8 @@ def setup_argparse():
help='The node number to seed them when creating the node(s)')
parser.add_option('-c', '--config-overrides', action="append", default=[],
help='Optional key=val config overrides. Can be repeated')
+ parser.add_option('--degrade-cluster', dest="degrade_cluster",type=int, default=0,
+ help='The number of nodes that should be stopped after cluster config')
return parser.parse_args()
@@ -142,6 +144,7 @@ def setup_context(opts, args):
'admin': opts.admin.split(':', 1) if opts.admin else None,
'nodes': ['node%d' % (i + opts.node_number) for i in range(opts.nodes)],
'node_number': opts.node_number,
+ 'degrade_cluster': opts.degrade_cluster,
'devdir': os.path.dirname(fpath),
'rootdir': os.path.dirname(os.path.dirname(fpath)),
'cmd': ' '.join(args),
@@ -337,18 +340,35 @@ def startup(ctx):
cluster_setup_with_admin_party(ctx)
else:
cluster_setup(ctx)
-
+ if ctx['degrade_cluster'] > 0:
+ degrade_cluster(ctx)
def kill_processes(ctx):
for proc in ctx['procs']:
if proc and proc.returncode is None:
proc.kill()
+def degrade_cluster(ctx):
+ if ctx['with_haproxy']:
+ haproxy_proc = ctx['procs'].pop()
+ for i in range(0,ctx['degrade_cluster']):
+ proc = ctx['procs'].pop()
+ if proc is not None:
+ kill_process(proc)
+ if ctx['with_haproxy']:
+ ctx['procs'].append(haproxy_proc)
+
+@log('Stoping proc {proc.pid}')
+def kill_process(proc):
+ if proc and proc.returncode is None:
+ proc.kill()
def boot_nodes(ctx):
for node in ctx['nodes']:
ctx['procs'].append(boot_node(ctx, node))
- ctx['procs'].append(boot_haproxy(ctx))
+ haproxy_proc = boot_haproxy(ctx)
+ if haproxy_proc is not None:
+ ctx['procs'].append(haproxy_proc)
def ensure_all_nodes_alive(ctx):
diff --git a/license.skip b/license.skip
index 143639251..35f91e13c 100644
--- a/license.skip
+++ b/license.skip
@@ -135,7 +135,6 @@
^src/couchjs-node/Makefile.in
^src/couch_dbupdates
^src/ejson/.*
-^src/etap/.*
^src/fauxton/app/addons/config/assets/less/config.less
^src/fauxton/assets/css/codemirror.css
^src/fauxton/assets/css/nv.d3.css
@@ -171,9 +170,7 @@
^src/ddoc_cache/README.md
^src/ets_lru/ebin/.*.beam
^src/ets_lru/ebin/ets_lru.app
-^src/ets_lru/test/etap.erl
^src/ejson/.*
-^src/etap/.*
^src/fabric/ebin/.*.beam
^src/fabric/ebin/fabric.app
^src/ibrowse/.*
@@ -190,15 +187,6 @@
^stamp-h1
^test/Makefile
^test/Makefile.in
-^test/etap/.*.beam
-^test/etap/.*.o
-^test/etap/etap.erl
-^test/etap/.deps/.*
-^test/etap/test_cfg_register
-^test/etap/Makefile
-^test/etap/Makefile.in
-^test/etap/temp..*
-^test/etap/fixtures/*
^test/javascript/Makefile
^test/javascript/Makefile.in
^test/local.ini
diff --git a/rel/haproxy.cfg b/rel/haproxy.cfg
index 73ec987f4..45affaffe 100644
--- a/rel/haproxy.cfg
+++ b/rel/haproxy.cfg
@@ -30,8 +30,8 @@ defaults
timeout connect 500
stats enable
- stats scope .
- stats uri /_stats
+ stats uri /_haproxy_stats
+ # stats auth admin:admin # Uncomment for basic auth
frontend http-in
# This requires HAProxy 1.5.x
diff --git a/rel/overlay/bin/couchdb b/rel/overlay/bin/couchdb
index c82f581f4..a9e6e9bea 100755
--- a/rel/overlay/bin/couchdb
+++ b/rel/overlay/bin/couchdb
@@ -26,6 +26,10 @@ export BINDIR="$ROOTDIR/erts-$ERTS_VSN/bin"
export EMU=beam
export PROGNAME=`echo $0 | sed 's/.*\///'`
+ARGS_FILE="${COUCHDB_ARGS_FILE:-$ROOTDIR/etc/vm.args}"
+SYSCONFIG_FILE="${COUCHDB_SYSCONFIG_FILE:-$ROOTDIR/releases/$APP_VSN/sys.config}"
+
exec "$BINDIR/erlexec" -boot "$ROOTDIR/releases/$APP_VSN/couchdb" \
- -args_file "$ROOTDIR/etc/vm.args" \
- -config "$ROOTDIR/releases/$APP_VSN/sys.config" "$@"
+ -args_file "${ARGS_FILE}" \
+ -config "${SYSCONFIG_FILE}" "$@"
+
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index 745e5a8e4..17a9a4f3d 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -69,6 +69,10 @@ require_valid_user = false
; List of headers that will be kept when the header Prefer: return=minimal is included in a request.
; If Server header is left out, Mochiweb will add its own one in.
prefer_minimal = Cache-Control, Content-Length, Content-Range, Content-Type, ETag, Server, Transfer-Encoding, Vary
+;
+; Limit maximum number of databases when tying to get detailed information using
+; _dbs_info in a request
+max_db_number_for_dbs_info_req = 100
[database_compaction]
; larger buffer sizes can originate smaller files
@@ -88,10 +92,13 @@ enable = false
; If set to true and a user is deleted, the respective database gets
; deleted as well.
delete_dbs = false
-; Wait this many seconds after startup before attaching changes listeners
-; cluster_start_period = 5
-; Re-check cluster state at least every cluster_quiet_period seconds
-; cluster_quiet_period = 60
+; Set a default q value for peruser-created databases that is different from
+; cluster / q
+;q = 1
+; prefix for user databases. If you change this after user dbs have been
+; created, the existing databases won't get deleted if the associated user
+; gets deleted because of the then prefix mismatch.
+database_prefix = userdb-
[httpd]
port = {{backend_port}}
@@ -408,6 +415,10 @@ ssl_certificate_max_depth = 3
; avoid crashing the whole replication job, which would consume more resources
; and add log noise.
;missing_doc_retry_msec = 2000
+; Wait this many seconds after startup before attaching changes listeners
+; cluster_start_period = 5
+; Re-check cluster state at least every cluster_quiet_period seconds
+; cluster_quiet_period = 60
[compaction_daemon]
; The delay, in seconds, between each check for which database and view indexes
diff --git a/rel/overlay/etc/local.ini b/rel/overlay/etc/local.ini
index cd3080ecf..6b46f0fa1 100644
--- a/rel/overlay/etc/local.ini
+++ b/rel/overlay/etc/local.ini
@@ -17,6 +17,9 @@
; If set to true and a user is deleted, the respective database gets
; deleted as well.
;delete_dbs = true
+; Set a default q value for peruser-created databases that is different from
+; cluster / q
+;q = 1
[chttpd]
;port = 5984
diff --git a/rel/plugins/eunit_plugin.erl b/rel/plugins/eunit_plugin.erl
index bbf83d2ec..1de20b394 100644
--- a/rel/plugins/eunit_plugin.erl
+++ b/rel/plugins/eunit_plugin.erl
@@ -32,8 +32,28 @@ build_eunit_config(Config0, AppFile) ->
Cwd = filename:absname(rebar_utils:get_cwd()),
DataDir = Cwd ++ "/tmp/data",
ViewIndexDir = Cwd ++ "/tmp/data",
+ TmpDataDir = Cwd ++ "/tmp/tmp_data",
+ cleanup_dirs([DataDir, TmpDataDir]),
Config1 = rebar_config:set_global(Config0, template, "setup_eunit"),
Config2 = rebar_config:set_global(Config1, prefix, Cwd),
Config3 = rebar_config:set_global(Config2, data_dir, DataDir),
Config = rebar_config:set_global(Config3, view_index_dir, ViewIndexDir),
rebar_templater:create(Config, AppFile).
+
+
+cleanup_dirs(Dirs) ->
+ lists:foreach(fun(Dir) ->
+ case filelib:is_dir(Dir) of
+ true -> del_dir(Dir);
+ false -> ok
+ end
+ end, Dirs).
+
+
+del_dir(Dir) ->
+ All = filelib:wildcard(Dir ++ "/**"),
+ {Dirs, Files} = lists:partition(fun filelib:is_dir/1, All),
+ ok = lists:foreach(fun file:delete/1, Files),
+ SortedDirs = lists:sort(fun(A, B) -> length(A) > length(B) end, Dirs),
+ ok = lists:foreach(fun file:del_dir/1, SortedDirs),
+ ok = file:del_dir(Dir).
diff --git a/src/chttpd/rebar.config b/src/chttpd/rebar.config
new file mode 100644
index 000000000..e0d18443b
--- /dev/null
+++ b/src/chttpd/rebar.config
@@ -0,0 +1,2 @@
+{cover_enabled, true}.
+{cover_print_enabled, true}.
diff --git a/src/chttpd/src/chttpd_auth_request.erl b/src/chttpd/src/chttpd_auth_request.erl
index 4e2e0dbf2..05c5e8e35 100644
--- a/src/chttpd/src/chttpd_auth_request.erl
+++ b/src/chttpd/src/chttpd_auth_request.erl
@@ -35,6 +35,8 @@ authorize_request_int(#httpd{path_parts=[<<"favicon.ico">>|_]}=Req) ->
Req;
authorize_request_int(#httpd{path_parts=[<<"_all_dbs">>|_]}=Req) ->
Req;
+authorize_request_int(#httpd{path_parts=[<<"_dbs_info">>|_]}=Req) ->
+ Req;
authorize_request_int(#httpd{path_parts=[<<"_replicator">>], method='PUT'}=Req) ->
require_admin(Req);
authorize_request_int(#httpd{path_parts=[<<"_replicator">>], method='DELETE'}=Req) ->
@@ -81,6 +83,8 @@ server_authorization_check(#httpd{path_parts=[<<"_stats">>]}=Req) ->
Req;
server_authorization_check(#httpd{path_parts=[<<"_active_tasks">>]}=Req) ->
Req;
+server_authorization_check(#httpd{path_parts=[<<"_dbs_info">>]}=Req) ->
+ Req;
server_authorization_check(#httpd{method=Method, path_parts=[<<"_utils">>|_]}=Req)
when Method =:= 'HEAD' orelse Method =:= 'GET' ->
Req;
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index dbbb454cb..de5c79c66 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -825,7 +825,8 @@ db_doc_req(#httpd{method='COPY', user_ctx=Ctx}=Req, Db, SourceDocId) ->
missing_rev -> nil;
Rev -> Rev
end,
- {TargetDocId, TargetRevs} = couch_httpd_db:parse_copy_destination_header(Req),
+ {TargetDocId0, TargetRevs} = couch_httpd_db:parse_copy_destination_header(Req),
+ TargetDocId = list_to_binary(mochiweb_util:unquote(TargetDocId0)),
% open old doc
Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
% save new doc
diff --git a/src/chttpd/src/chttpd_httpd_handlers.erl b/src/chttpd/src/chttpd_httpd_handlers.erl
index 9c3044126..cb52e2c40 100644
--- a/src/chttpd/src/chttpd_httpd_handlers.erl
+++ b/src/chttpd/src/chttpd_httpd_handlers.erl
@@ -18,6 +18,7 @@ url_handler(<<>>) -> fun chttpd_misc:handle_welcome_req/1;
url_handler(<<"favicon.ico">>) -> fun chttpd_misc:handle_favicon_req/1;
url_handler(<<"_utils">>) -> fun chttpd_misc:handle_utils_dir_req/1;
url_handler(<<"_all_dbs">>) -> fun chttpd_misc:handle_all_dbs_req/1;
+url_handler(<<"_dbs_info">>) -> fun chttpd_misc:handle_dbs_info_req/1;
url_handler(<<"_active_tasks">>) -> fun chttpd_misc:handle_task_status_req/1;
url_handler(<<"_scheduler">>) -> fun couch_replicator_httpd:handle_scheduler_req/1;
url_handler(<<"_node">>) -> fun chttpd_misc:handle_node_req/1;
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 15eabbfbd..253da233e 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -14,6 +14,7 @@
-export([
handle_all_dbs_req/1,
+ handle_dbs_info_req/1,
handle_node_req/1,
handle_favicon_req/1,
handle_favicon_req/2,
@@ -37,6 +38,8 @@
[send_json/2,send_json/3,send_method_not_allowed/2,
send_chunk/2,start_chunked_response/3]).
+-define(MAX_DB_NUM_FOR_DBS_INFO, 100).
+
% httpd global handlers
handle_welcome_req(Req) ->
@@ -141,6 +144,38 @@ all_dbs_callback({error, Reason}, #vacc{resp=Resp0}=Acc) ->
{ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
{ok, Acc#vacc{resp=Resp1}}.
+handle_dbs_info_req(#httpd{method='POST'}=Req) ->
+ chttpd:validate_ctype(Req, "application/json"),
+ Props = chttpd:json_body_obj(Req),
+ Keys = couch_mrview_util:get_view_keys(Props),
+ case Keys of
+ undefined -> throw({bad_request, "`keys` member must exist."});
+ _ -> ok
+ end,
+ MaxNumber = config:get_integer("chttpd",
+ "max_db_number_for_dbs_info_req", ?MAX_DB_NUM_FOR_DBS_INFO),
+ case length(Keys) =< MaxNumber of
+ true -> ok;
+ false -> throw({bad_request, too_many_keys})
+ end,
+ {ok, Resp} = chttpd:start_json_response(Req, 200),
+ send_chunk(Resp, "["),
+ lists:foldl(fun(DbName, AccSeparator) ->
+ case catch fabric:get_db_info(DbName) of
+ {ok, Result} ->
+ Json = ?JSON_ENCODE({[{key, DbName}, {info, {Result}}]}),
+ send_chunk(Resp, AccSeparator ++ Json);
+ _ ->
+ Json = ?JSON_ENCODE({[{key, DbName}, {error, not_found}]}),
+ send_chunk(Resp, AccSeparator ++ Json)
+ end,
+ "," % AccSeparator now has a comma
+ end, "", Keys),
+ send_chunk(Resp, "]"),
+ chttpd:end_json_response(Resp);
+handle_dbs_info_req(Req) ->
+ send_method_not_allowed(Req, "POST").
+
handle_task_status_req(#httpd{method='GET'}=Req) ->
{Replies, _BadNodes} = gen_server:multi_call(couch_task_status, all),
Response = lists:flatmap(fun({Node, Tasks}) ->
diff --git a/src/chttpd/test/chttpd_db_test.erl b/src/chttpd/test/chttpd_db_test.erl
index f3c779bd3..2071ca502 100644
--- a/src/chttpd/test/chttpd_db_test.erl
+++ b/src/chttpd/test/chttpd_db_test.erl
@@ -19,7 +19,12 @@
-define(PASS, "pass").
-define(AUTH, {basic_auth, {?USER, ?PASS}}).
-define(CONTENT_JSON, {"Content-Type", "application/json"}).
+-define(DESTHEADER1, {"Destination", "foo%E5%95%8Abar"}).
+-define(DESTHEADER2, {"Destination", "foo%2Fbar%23baz%3Fpow%3Afiz"}).
+
+
-define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
+-define(i2l(I), integer_to_list(I)).
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
@@ -62,7 +67,10 @@ all_test_() ->
fun should_return_404_for_delete_att_on_notadoc/1,
fun should_return_409_for_del_att_without_rev/1,
fun should_return_200_for_del_att_with_rev/1,
- fun should_return_409_for_put_att_nonexistent_rev/1
+ fun should_return_409_for_put_att_nonexistent_rev/1,
+ fun should_return_update_seq_when_set_on_all_docs/1,
+ fun should_not_return_update_seq_when_unset_on_all_docs/1,
+ fun should_return_correct_id_on_doc_copy/1
]
}
}
@@ -187,6 +195,53 @@ should_return_409_for_put_att_nonexistent_rev(Url) ->
end).
+should_return_update_seq_when_set_on_all_docs(Url) ->
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)],
+ {ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/"
+ ++ "?update_seq=true&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ?assertNotEqual(undefined,
+ couch_util:get_value(<<"update_seq">>, ResultJson)),
+ ?assertNotEqual(undefined,
+ couch_util:get_value(<<"offset">>, ResultJson))
+ end).
+
+
+should_not_return_update_seq_when_unset_on_all_docs(Url) ->
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)],
+ {ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/"
+ ++ "?update_seq=false&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ?assertEqual(undefined,
+ couch_util:get_value(<<"update_seq">>, ResultJson)),
+ ?assertNotEqual(undefined,
+ couch_util:get_value(<<"offset">>, ResultJson))
+ end).
+
+
+should_return_correct_id_on_doc_copy(Url) ->
+ ?_test(begin
+ {ok, _, _, _} = create_doc(Url, "testdoc"),
+ {_, _, _, ResultBody1} = test_request:copy(Url ++ "/testdoc/",
+ [?CONTENT_JSON, ?AUTH, ?DESTHEADER1]),
+ {ResultJson1} = ?JSON_DECODE(ResultBody1),
+ Id1 = couch_util:get_value(<<"id">>, ResultJson1),
+
+ {_, _, _, ResultBody2} = test_request:copy(Url ++ "/testdoc/",
+ [?CONTENT_JSON, ?AUTH, ?DESTHEADER2]),
+ {ResultJson2} = ?JSON_DECODE(ResultBody2),
+ Id2 = couch_util:get_value(<<"id">>, ResultJson2),
+ [
+ ?assertEqual(<<102,111,111,229,149,138,98,97,114>>, Id1),
+ ?assertEqual(<<"foo/bar#baz?pow:fiz">>, Id2)
+ ]
+ end).
+
+
attachment_doc() ->
{ok, Data} = file:read_file(?FIXTURE_TXT),
{[
diff --git a/src/chttpd/test/chttpd_dbs_info_test.erl b/src/chttpd/test/chttpd_dbs_info_test.erl
new file mode 100644
index 000000000..5b61d8831
--- /dev/null
+++ b/src/chttpd/test/chttpd_dbs_info_test.erl
@@ -0,0 +1,169 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(chttpd_dbs_info_test).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+
+-define(USER, "chttpd_db_test_admin").
+-define(PASS, "pass").
+-define(AUTH, {basic_auth, {?USER, ?PASS}}).
+-define(CONTENT_JSON, {"Content-Type", "application/json"}).
+
+
+setup() ->
+ Hashed = couch_passwords:hash_admin_password(?PASS),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
+ Url = lists:concat(["http://", Addr, ":", Port, "/"]),
+ Db1Url = lists:concat([Url, "db1"]),
+ create_db(Db1Url),
+ Db2Url = lists:concat([Url, "db2"]),
+ create_db(Db2Url),
+ Url.
+
+teardown(Url) ->
+ Db1Url = lists:concat([Url, "db1"]),
+ Db2Url = lists:concat([Url, "db2"]),
+ delete_db(Db1Url),
+ delete_db(Db2Url),
+ ok = config:delete("admins", ?USER, _Persist=false).
+
+create_db(Url) ->
+ {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
+ ?assert(Status =:= 201 orelse Status =:= 202).
+
+delete_db(Url) ->
+ {ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
+
+dbs_info_test_() ->
+ {
+ "chttpd dbs info tests",
+ {
+ setup,
+ fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_return_error_for_get_db_info/1,
+ fun should_return_dbs_info_for_single_db/1,
+ fun should_return_dbs_info_for_multiple_dbs/1,
+ fun should_return_error_for_exceeded_keys/1,
+ fun should_return_error_for_missing_keys/1,
+ fun should_return_dbs_info_for_dbs_with_mixed_state/1
+ ]
+ }
+ }
+ }.
+
+
+should_return_error_for_get_db_info(Url) ->
+ ?_test(begin
+ {ok, Code, _, ResultBody} = test_request:get(Url ++ "/_dbs_info?"
+ ++ "keys=[\"db1\"]", [?CONTENT_JSON, ?AUTH]),
+ {Body} = jiffy:decode(ResultBody),
+ [
+ ?assertEqual(<<"method_not_allowed">>,
+ couch_util:get_value(<<"error">>, Body)),
+ ?assertEqual(405, Code)
+ ]
+ end).
+
+
+should_return_dbs_info_for_single_db(Url) ->
+ ?_test(begin
+ NewDoc = "{\"keys\": [\"db1\"]}",
+ {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/",
+ [?CONTENT_JSON, ?AUTH], NewDoc),
+ BodyJson = jiffy:decode(ResultBody),
+ {Db1Data} = lists:nth(1, BodyJson),
+ [
+ ?assertEqual(<<"db1">>,
+ couch_util:get_value(<<"key">>, Db1Data)),
+ ?assertNotEqual(undefined,
+ couch_util:get_value(<<"info">>, Db1Data))
+ ]
+ end).
+
+
+should_return_dbs_info_for_multiple_dbs(Url) ->
+ ?_test(begin
+ NewDoc = "{\"keys\": [\"db1\", \"db2\"]}",
+ {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/",
+ [?CONTENT_JSON, ?AUTH], NewDoc),
+ BodyJson = jiffy:decode(ResultBody),
+ {Db1Data} = lists:nth(1, BodyJson),
+ {Db2Data} = lists:nth(2, BodyJson),
+ [
+ ?assertEqual(<<"db1">>,
+ couch_util:get_value(<<"key">>, Db1Data)),
+ ?assertNotEqual(undefined,
+ couch_util:get_value(<<"info">>, Db1Data)),
+ ?assertEqual(<<"db2">>,
+ couch_util:get_value(<<"key">>, Db2Data)),
+ ?assertNotEqual(undefined,
+ couch_util:get_value(<<"info">>, Db2Data))
+ ]
+ end).
+
+
+should_return_error_for_exceeded_keys(Url) ->
+ ?_test(begin
+ NewDoc = "{\"keys\": [\"db1\", \"db2\"]}",
+ ok = config:set("chttpd", "max_db_number_for_dbs_info_req", "1"),
+ {ok, Code, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/",
+ [?CONTENT_JSON, ?AUTH], NewDoc),
+ {Body} = jiffy:decode(ResultBody),
+ ok = config:delete("chttpd", "max_db_number_for_dbs_info_req"),
+ [
+ ?assertEqual(<<"bad_request">>,
+ couch_util:get_value(<<"error">>, Body)),
+ ?assertEqual(400, Code)
+ ]
+ end).
+
+
+should_return_error_for_missing_keys(Url) ->
+ ?_test(begin
+ NewDoc = "{\"missingkeys\": [\"db1\", \"db2\"]}",
+ {ok, Code, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/",
+ [?CONTENT_JSON, ?AUTH], NewDoc),
+ {Body} = jiffy:decode(ResultBody),
+ [
+ ?assertEqual(<<"bad_request">>,
+ couch_util:get_value(<<"error">>, Body)),
+ ?assertEqual(400, Code)
+ ]
+ end).
+
+
+should_return_dbs_info_for_dbs_with_mixed_state(Url) ->
+ ?_test(begin
+ NewDoc = "{\"keys\": [\"db1\", \"noexisteddb\"]}",
+ {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/",
+ [?CONTENT_JSON, ?AUTH], NewDoc),
+ Json = jiffy:decode(ResultBody),
+ {Db1Data} = lists:nth(1, Json),
+ {Db2Data} = lists:nth(2, Json),
+ [
+ ?assertEqual(
+ <<"db1">>, couch_util:get_value(<<"key">>, Db1Data)),
+ ?assertNotEqual(undefined,
+ couch_util:get_value(<<"info">>, Db1Data)),
+ ?assertEqual(
+ <<"noexisteddb">>, couch_util:get_value(<<"key">>, Db2Data)),
+ ?assertEqual(undefined, couch_util:get_value(<<"info">>, Db2Data))
+ ]
+ end).
diff --git a/src/chttpd/test/chttpd_security_tests.erl b/src/chttpd/test/chttpd_security_tests.erl
index b80238c78..737a32e11 100644
--- a/src/chttpd/test/chttpd_security_tests.erl
+++ b/src/chttpd/test/chttpd_security_tests.erl
@@ -102,6 +102,8 @@ all_test_() ->
fun setup/0, fun teardown/1,
[
fun should_allow_admin_db_compaction/1,
+ fun should_allow_valid_password_to_create_user/1,
+ fun should_disallow_invalid_password_to_create_user/1,
fun should_disallow_anonymous_db_compaction/1,
fun should_disallow_db_member_db_compaction/1,
fun should_allow_db_admin_db_compaction/1,
@@ -124,6 +126,26 @@ should_allow_admin_db_compaction([Url,_UsersUrl]) ->
couch_util:get_value(<<"ok">>, InnerJson, undefined)
end).
+
+should_allow_valid_password_to_create_user([_Url, UsersUrl]) ->
+ UserDoc = "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\",
+ \"type\": \"user\", \"roles\": [], \"password\": \"bar\"}",
+ {ok, _, _, ResultBody} = test_request:post(UsersUrl,
+ [?CONTENT_JSON, ?AUTH], UserDoc),
+ ResultJson = ?JSON_DECODE(ResultBody),
+ {InnerJson} = ResultJson,
+ ?_assertEqual(true, couch_util:get_value(<<"ok">>, InnerJson)).
+
+should_disallow_invalid_password_to_create_user([_Url, UsersUrl]) ->
+ UserDoc = "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\",
+ \"type\": \"user\", \"roles\": [], \"password\": 123}",
+ {ok, _, _, ResultBody} = test_request:post(UsersUrl,
+ [?CONTENT_JSON, ?AUTH], UserDoc),
+ ResultJson = ?JSON_DECODE(ResultBody),
+ {InnerJson} = ResultJson,
+ ErrType = couch_util:get_value(<<"error">>, InnerJson),
+ ?_assertEqual(<<"forbidden">>, ErrType).
+
should_disallow_anonymous_db_compaction([Url,_UsersUrl]) ->
{ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact",
[?CONTENT_JSON], ""),
diff --git a/src/couch/include/couch_eunit.hrl b/src/couch/include/couch_eunit.hrl
index 8eb763aff..f4617e1d3 100644
--- a/src/couch/include/couch_eunit.hrl
+++ b/src/couch/include/couch_eunit.hrl
@@ -40,17 +40,14 @@
-define(tempfile,
fun() ->
- A = integer_to_list(couch_util:unique_monotonic_integer()),
- N = node(),
- FileName = lists:flatten(io_lib:format("~p-~s", [N, A])),
+ Suffix = couch_uuids:random(),
+ FileName = io_lib:format("~p-~s", [node(), Suffix]),
filename:join([?TEMPDIR, FileName])
end).
-define(tempdb,
fun() ->
- Nums = integer_to_list(couch_util:unique_monotonic_integer()),
- Prefix = "eunit-test-db",
- Suffix = lists:concat([integer_to_list(Num) || Num <- Nums]),
- list_to_binary(Prefix ++ "-" ++ Suffix)
+ Suffix = couch_uuids:random(),
+ iolist_to_binary(["eunit-test-db-", Suffix])
end).
-define(docid,
fun() ->
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
index 5e720c284..b2b94ce80 100644
--- a/src/couch/src/couch_db.erl
+++ b/src/couch/src/couch_db.erl
@@ -1738,14 +1738,29 @@ do_pipe([Filter|Rest], F0) ->
set_namespace_range(Options, undefined) -> Options;
set_namespace_range(Options, NS) ->
- %% FIXME depending on order we might need to swap keys
- SK = select_gt(
- proplists:get_value(start_key, Options, <<"">>),
- <<NS/binary, "/">>),
- EK = select_lt(
- proplists:get_value(end_key, Options, <<NS/binary, "0">>),
- <<NS/binary, "0">>),
- [{start_key, SK}, {end_key_gt, EK}].
+ SK0 = proplists:get_value(start_key, Options, <<NS/binary, "/">>),
+ EKType = case proplists:get_value(end_key_gt, Options) of
+ undefined -> end_key;
+ _ -> end_key_gt
+ end,
+ EK0 = case EKType of
+ end_key ->
+ proplists:get_value(end_key, Options, <<NS/binary, "0">>);
+ end_key_gt ->
+ proplists:get_value(end_key_gt, Options, <<NS/binary, "0">>)
+ end,
+ case SK0 =< EK0 of
+ true ->
+ SK = select_gt(SK0, <<NS/binary, "/">>),
+ EK = select_lt(EK0, <<NS/binary, "0">>),
+ [{dir, proplists:get_value(dir, Options, fwd)},
+ {start_key, SK}, {EKType, EK}];
+ false ->
+ SK = select_lt(SK0, <<NS/binary, "0">>),
+ EK = select_gt(EK0, <<NS/binary, "/">>),
+ [{dir, proplists:get_value(dir, Options, fwd)},
+ {start_key, SK}, {EKType, EK}]
+ end.
select_gt(V1, V2) when V1 < V2 -> V2;
select_gt(V1, _V2) -> V1.
diff --git a/src/couch/src/couch_httpd_db.erl b/src/couch/src/couch_httpd_db.erl
index 05e63ba97..79ba84dab 100644
--- a/src/couch/src/couch_httpd_db.erl
+++ b/src/couch/src/couch_httpd_db.erl
@@ -616,7 +616,8 @@ db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) ->
missing_rev -> nil;
Rev -> Rev
end,
- {TargetDocId, TargetRevs} = parse_copy_destination_header(Req),
+ {TargetDocId0, TargetRevs} = parse_copy_destination_header(Req),
+ TargetDocId = list_to_binary(mochiweb_util:unquote(TargetDocId0)),
% open old doc
Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
% save new doc
diff --git a/src/couch/src/couch_key_tree.erl b/src/couch/src/couch_key_tree.erl
index bc4076abc..e2e187eb1 100644
--- a/src/couch/src/couch_key_tree.erl
+++ b/src/couch/src/couch_key_tree.erl
@@ -498,7 +498,3 @@ value_pref(Other, ?REV_MISSING) ->
Other;
value_pref(Last, _) ->
Last.
-
-
-% Tests moved to test/etap/06?-*.t
-
diff --git a/src/couch/src/couch_passwords.erl b/src/couch/src/couch_passwords.erl
index 677ef6559..baf78f5d5 100644
--- a/src/couch/src/couch_passwords.erl
+++ b/src/couch/src/couch_passwords.erl
@@ -23,7 +23,13 @@
%% legacy scheme, not used for new passwords.
-spec simple(binary(), binary()) -> binary().
simple(Password, Salt) when is_binary(Password), is_binary(Salt) ->
- ?l2b(couch_util:to_hex(crypto:hash(sha, <<Password/binary, Salt/binary>>))).
+ ?l2b(couch_util:to_hex(crypto:hash(sha, <<Password/binary, Salt/binary>>)));
+simple(Password, Salt) when is_binary(Salt) ->
+ Msg = io_lib:format("Password value of '~p' is invalid.", [Password]),
+ throw({forbidden, Msg});
+simple(Password, Salt) when is_binary(Password) ->
+ Msg = io_lib:format("Salt value of '~p' is invalid.", [Salt]),
+ throw({forbidden, Msg}).
%% CouchDB utility functions
-spec hash_admin_password(binary() | list()) -> binary().
@@ -66,7 +72,17 @@ pbkdf2(Password, Salt, Iterations) when is_binary(Password),
is_integer(Iterations),
Iterations > 0 ->
{ok, Result} = pbkdf2(Password, Salt, Iterations, ?SHA1_OUTPUT_LENGTH),
- Result.
+ Result;
+pbkdf2(Password, Salt, Iterations) when is_binary(Salt),
+ is_integer(Iterations),
+ Iterations > 0 ->
+ Msg = io_lib:format("Password value of '~p' is invalid.", [Password]),
+ throw({forbidden, Msg});
+pbkdf2(Password, Salt, Iterations) when is_binary(Password),
+ is_integer(Iterations),
+ Iterations > 0 ->
+ Msg = io_lib:format("Salt value of '~p' is invalid.", [Salt]),
+ throw({forbidden, Msg}).
-spec pbkdf2(binary(), binary(), integer(), integer())
-> {ok, binary()} | {error, derived_key_too_long}.
diff --git a/src/couch/src/test_request.erl b/src/couch/src/test_request.erl
index a1b8b57c5..4dfde1a33 100644
--- a/src/couch/src/test_request.erl
+++ b/src/couch/src/test_request.erl
@@ -12,6 +12,7 @@
-module(test_request).
+-export([copy/1, copy/2, copy/3]).
-export([get/1, get/2, get/3]).
-export([post/2, post/3, post/4]).
-export([put/2, put/3, put/4]).
@@ -19,6 +20,15 @@
-export([options/1, options/2, options/3]).
-export([request/3, request/4, request/5]).
+copy(Url) ->
+ copy(Url, []).
+
+copy(Url, Headers) ->
+ copy(Url, Headers, []).
+
+copy(Url, Headers, Opts) ->
+ request(copy, Url, Headers, [], Opts).
+
get(Url) ->
get(Url, []).
diff --git a/src/couch/test/chttpd_endpoints_tests.erl b/src/couch/test/chttpd_endpoints_tests.erl
index 715576713..9b7430823 100644
--- a/src/couch/test/chttpd_endpoints_tests.erl
+++ b/src/couch/test/chttpd_endpoints_tests.erl
@@ -41,6 +41,7 @@ handlers(url_handler) ->
{<<"favicon.ico">>, chttpd_misc, handle_favicon_req},
{<<"_utils">>, chttpd_misc, handle_utils_dir_req},
{<<"_all_dbs">>, chttpd_misc, handle_all_dbs_req},
+ {<<"_dbs_info">>, chttpd_misc, handle_dbs_info_req},
{<<"_active_tasks">>, chttpd_misc, handle_task_status_req},
{<<"_node">>, chttpd_misc, handle_node_req},
{<<"_reload_query_servers">>, chttpd_misc, handle_reload_query_servers_req},
diff --git a/src/couch/test/couch_db_tests.erl b/src/couch/test/couch_db_tests.erl
index c57a0d497..d64f7c640 100644
--- a/src/couch/test/couch_db_tests.erl
+++ b/src/couch/test/couch_db_tests.erl
@@ -17,25 +17,55 @@
-define(TIMEOUT, 120).
-setup() ->
- Ctx = test_util:start_couch(),
- config:set("log", "include_sasl", "false", false),
- Ctx.
-
create_delete_db_test_()->
{
"Database create/delete tests",
{
setup,
- fun setup/0, fun test_util:stop_couch/1,
- fun(_) ->
- [should_create_db(),
- should_delete_db(),
- should_create_multiple_dbs(),
- should_delete_multiple_dbs(),
- should_create_delete_database_continuously()]
- end
+ fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ {
+ foreach,
+ fun() -> ?tempdb() end,
+ [
+ fun should_create_db/1,
+ fun should_delete_db/1
+ ]
+ }
+ }
+ }.
+
+create_delete_multiple_dbs_test_()->
+ {
+ "Multiple database create/delete tests",
+ {
+ setup,
+ fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ {
+ foreach,
+ fun() -> [?tempdb() || _ <- lists:seq(1, 6)] end,
+ [
+ fun should_create_multiple_dbs/1,
+ fun should_delete_multiple_dbs/1
+ ]
+ }
+ }
+ }.
+
+create_delete_database_continuously_test_() ->
+ {
+ "Continious database create/delete tests",
+ {
+ setup,
+ fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ {
+ foreachx,
+ fun(_) -> ?tempdb() end,
+ [
+ {10, fun should_create_delete_database_continuously/2},
+ {100, fun should_create_delete_database_continuously/2}
+ ]
+ }
}
}.
@@ -44,87 +74,98 @@ open_db_test_()->
"Database open tests",
{
setup,
- fun setup/0, fun test_util:stop_couch/1,
- fun(_) ->
- [should_create_db_if_missing()]
- end
+ fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ {
+ foreach,
+ fun() -> ?tempdb() end,
+ [
+ fun should_create_db_if_missing/1,
+ fun should_open_db_if_exists/1
+ ]
+ }
}
}.
-should_create_db() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, []),
- ok = couch_db:close(Db),
- {ok, AllDbs} = couch_server:all_databases(),
- ?_assert(lists:member(DbName, AllDbs)).
-
-should_delete_db() ->
- DbName = ?tempdb(),
- couch_db:create(DbName, []),
- couch_server:delete(DbName, []),
- {ok, AllDbs} = couch_server:all_databases(),
- ?_assertNot(lists:member(DbName, AllDbs)).
-
-should_create_multiple_dbs() ->
- gen_server:call(couch_server, {set_max_dbs_open, 3}),
-
- DbNames = [?tempdb() || _ <- lists:seq(1, 6)],
- lists:foreach(fun(DbName) ->
- {ok, Db} = couch_db:create(DbName, []),
- ok = couch_db:close(Db)
- end, DbNames),
-
- {ok, AllDbs} = couch_server:all_databases(),
- NumCreated = lists:foldl(fun(DbName, Acc) ->
- ?assert(lists:member(DbName, AllDbs)),
- Acc+1
- end, 0, DbNames),
-
- ?_assertEqual(NumCreated, 6).
-
-should_delete_multiple_dbs() ->
- DbNames = [?tempdb() || _ <- lists:seq(1, 6)],
- lists:foreach(fun(DbName) ->
- {ok, Db} = couch_db:create(DbName, []),
- ok = couch_db:close(Db)
- end, DbNames),
-
- lists:foreach(fun(DbName) ->
- ok = couch_server:delete(DbName, [])
- end, DbNames),
-
- {ok, AllDbs} = couch_server:all_databases(),
- NumDeleted = lists:foldl(fun(DbName, Acc) ->
- ?assertNot(lists:member(DbName, AllDbs)),
- Acc + 1
- end, 0, DbNames),
-
- ?_assertEqual(NumDeleted, 6).
-
-should_create_delete_database_continuously() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:create(DbName, []),
- couch_db:close(Db),
- [{timeout, ?TIMEOUT, {integer_to_list(N) ++ " times",
- ?_assert(loop(DbName, N))}}
- || N <- [10, 100]].
-
-should_create_db_if_missing() ->
- DbName = ?tempdb(),
- {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]),
+should_create_db(DbName) ->
+ ?_test(begin
+ {ok, Before} = couch_server:all_databases(),
+ ?assertNot(lists:member(DbName, Before)),
+ ?assert(create_db(DbName)),
+ {ok, After} = couch_server:all_databases(),
+ ?assert(lists:member(DbName, After))
+ end).
+
+should_delete_db(DbName) ->
+ ?_test(begin
+ ?assert(create_db(DbName)),
+ {ok, Before} = couch_server:all_databases(),
+ ?assert(lists:member(DbName, Before)),
+ couch_server:delete(DbName, []),
+ {ok, After} = couch_server:all_databases(),
+ ?assertNot(lists:member(DbName, After))
+ end).
+
+should_create_multiple_dbs(DbNames) ->
+ ?_test(begin
+ gen_server:call(couch_server, {set_max_dbs_open, 3}),
+ {ok, Before} = couch_server:all_databases(),
+ [?assertNot(lists:member(DbName, Before)) || DbName <- DbNames],
+ [?assert(create_db(DbName)) || DbName <- DbNames],
+ {ok, After} = couch_server:all_databases(),
+ [?assert(lists:member(DbName, After)) || DbName <- DbNames]
+ end).
+
+should_delete_multiple_dbs(DbNames) ->
+ ?_test(begin
+ [?assert(create_db(DbName)) || DbName <- DbNames],
+ {ok, Before} = couch_server:all_databases(),
+ [?assert(lists:member(DbName, Before)) || DbName <- DbNames],
+ [?assert(delete_db(DbName)) || DbName <- DbNames],
+ {ok, After} = couch_server:all_databases(),
+ [?assertNot(lists:member(DbName, After)) || DbName <- DbNames]
+ end).
+
+should_create_delete_database_continuously(Times, DbName) ->
+ {lists:flatten(io_lib:format("~b times", [Times])),
+ {timeout, ?TIMEOUT, ?_test(begin
+ ?assert(create_db(DbName)),
+ lists:foreach(fun(_) ->
+ ?assert(delete_db(DbName)),
+ ?assert(create_db(DbName))
+ end, lists:seq(1, Times))
+ end)}}.
+
+should_create_db_if_missing(DbName) ->
+ ?_test(begin
+ {ok, Before} = couch_server:all_databases(),
+ ?assertNot(lists:member(DbName, Before)),
+ {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]),
+ ok = couch_db:close(Db),
+ {ok, After} = couch_server:all_databases(),
+ ?assert(lists:member(DbName, After))
+ end).
+
+should_open_db_if_exists(DbName) ->
+ ?_test(begin
+ ?assert(create_db(DbName)),
+ {ok, Before} = couch_server:all_databases(),
+ ?assert(lists:member(DbName, Before)),
+ {ok, Db} = couch_db:open(DbName, [{create_if_missing, true}]),
+ ok = couch_db:close(Db),
+ {ok, After} = couch_server:all_databases(),
+ ?assert(lists:member(DbName, After))
+ end).
+
+
+create_db(DbName) ->
+ create_db(DbName, []).
+
+create_db(DbName, Opts) ->
+ {ok, Db} = couch_db:create(DbName, Opts),
ok = couch_db:close(Db),
- {ok, AllDbs} = couch_server:all_databases(),
- ?_assert(lists:member(DbName, AllDbs)).
-
-loop(_, 0) ->
- true;
-loop(DbName, N) ->
- ok = cycle(DbName),
- loop(DbName, N - 1).
+ true.
-cycle(DbName) ->
+delete_db(DbName) ->
ok = couch_server:delete(DbName, []),
- {ok, Db} = couch_db:create(DbName, []),
- couch_db:close(Db),
- ok.
+ true.
diff --git a/src/couch/test/couch_key_tree_tests.erl b/src/couch/test/couch_key_tree_tests.erl
index 8aa886fc8..88d920363 100644
--- a/src/couch/test/couch_key_tree_tests.erl
+++ b/src/couch/test/couch_key_tree_tests.erl
@@ -17,14 +17,18 @@
-define(DEPTH, 10).
setup() ->
- test_util:start(?MODULE, [], [{dont_mock, [config]}]).
+ meck:new(config),
+ meck:expect(config, get, fun(_, _, Default) -> Default end).
+
+teardown(_) ->
+ meck:unload(config).
key_tree_merge_test_()->
{
"Key tree merge",
{
setup,
- fun setup/0, fun test_util:stop/1,
+ fun setup/0, fun teardown/1,
[
should_merge_with_empty_tree(),
should_merge_reflexive(),
@@ -51,7 +55,7 @@ key_tree_missing_leaves_test_()->
"Missing tree leaves",
{
setup,
- fun setup/0, fun test_util:stop/1,
+ fun setup/0, fun teardown/1,
[
should_not_find_missing_leaves(),
should_find_missing_leaves()
@@ -64,7 +68,7 @@ key_tree_remove_leaves_test_()->
"Remove tree leaves",
{
setup,
- fun setup/0, fun test_util:stop/1,
+ fun setup/0, fun teardown/1,
[
should_have_no_effect_on_removing_no_leaves(),
should_have_no_effect_on_removing_non_existant_branch(),
@@ -81,7 +85,7 @@ key_tree_get_leaves_test_()->
"Leaves retrieving",
{
setup,
- fun setup/0, fun test_util:stop/1,
+ fun setup/0, fun teardown/1,
[
should_extract_subtree(),
should_extract_subsubtree(),
@@ -103,7 +107,7 @@ key_tree_leaf_counting_test_()->
"Leaf counting",
{
setup,
- fun setup/0, fun test_util:stop/1,
+ fun setup/0, fun teardown/1,
[
should_have_no_leaves_for_empty_tree(),
should_have_single_leaf_for_tree_with_single_node(),
@@ -118,7 +122,7 @@ key_tree_stemming_test_()->
"Stemming",
{
setup,
- fun setup/0, fun test_util:stop/1,
+ fun setup/0, fun teardown/1,
[
should_have_no_effect_for_stemming_more_levels_than_exists(),
should_return_one_deepest_node(),
diff --git a/src/couch/test/couchdb_cookie_domain_tests.erl b/src/couch/test/couchdb_cookie_domain_tests.erl
index 1a9aedb93..e66ab31e6 100755
--- a/src/couch/test/couchdb_cookie_domain_tests.erl
+++ b/src/couch/test/couchdb_cookie_domain_tests.erl
@@ -18,60 +18,52 @@
-define(USER, "cookie_domain_test_admin").
-define(PASS, "pass").
-setup(PortType) ->
+setup() ->
+ Ctx = test_util:start_couch([chttpd]),
Hashed = couch_passwords:hash_admin_password(?PASS),
ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
Addr = config:get("httpd", "bind_address", "127.0.0.1"),
- lists:concat(["http://", Addr, ":", port(PortType), "/_session"]).
+ Port = mochiweb_socket_server:get(chttpd, port),
+ Url = ?l2b(io_lib:format("http://~s:~b/_session", [Addr, Port])),
+ ContentType = [{"Content-Type", "application/json"}],
+ Payload = jiffy:encode({[{name, ?l2b(?USER)}, {password, ?l2b(?PASS)}]}),
+ {ok, ?b2l(Url), ContentType, ?b2l(Payload), Ctx}.
-teardown(_,_) ->
- ok = config:delete("admins", ?USER, _Persist=false).
+teardown({ok, _, _, _, Ctx}) ->
+ ok = config:delete("admins", ?USER, _Persist=false),
+ test_util:stop_couch(Ctx).
cookie_test_() ->
- Tests = [
- fun should_set_cookie_domain/2,
- fun should_not_set_cookie_domain/2
- ],
{
"Cookie domain tests",
{
setup,
- fun() -> test_util:start_couch([chttpd]) end, fun test_util:stop_couch/1,
- [
- make_test_case(clustered, Tests)
- ]
+ fun setup/0,
+ fun teardown/1,
+ fun({ok, Url, ContentType, Payload, _}) ->
+ [
+ should_set_cookie_domain(Url, ContentType, Payload),
+ should_not_set_cookie_domain(Url, ContentType, Payload)
+ ]
+ end
}
}.
-make_test_case(Mod, Funs) ->
-{
- lists:flatten(io_lib:format("~s", [Mod])),
- {foreachx, fun setup/1, fun teardown/2, [{Mod, Fun} || Fun <- Funs]}
-}.
+should_set_cookie_domain(Url, ContentType, Payload) ->
+ ?_test(begin
+ ok = config:set("couch_httpd_auth", "cookie_domain",
+ "example.com", false),
+ {ok, Code, Headers, _} = test_request:post(Url, ContentType, Payload),
+ ?assertEqual(200, Code),
+ Cookie = proplists:get_value("Set-Cookie", Headers),
+ ?assert(string:str(Cookie, "; Domain=example.com") > 0)
+ end).
-should_set_cookie_domain(_PortType, Url) ->
- ?_assertEqual(true,
- begin
- ok = config:set("couch_httpd_auth", "cookie_domain", "example.com", false),
- {ok, Code, Headers, _} = test_request:post(Url, [{"Content-Type", "application/json"}],
- "{\"name\":\"" ++ ?USER ++ "\", \"password\": \"" ++ ?PASS ++ "\"}"),
- ?_assert(Code =:= 200),
- Cookie = proplists:get_value("Set-Cookie", Headers),
- string:str(Cookie, "; Domain=example.com") > 0
- end).
-
-should_not_set_cookie_domain(_PortType, Url) ->
- ?_assertEqual(0,
- begin
- ok = config:set("couch_httpd_auth", "cookie_domain", "", false),
- {ok, Code, Headers, _} = test_request:post(Url, [{"Content-Type", "application/json"}],
- "{\"name\":\"" ++ ?USER ++ "\", \"password\": \"" ++ ?PASS ++ "\"}"),
- ?_assert(Code =:= 200),
- Cookie = proplists:get_value("Set-Cookie", Headers),
- string:str(Cookie, "; Domain=")
- end).
-
-port(clustered) ->
- integer_to_list(mochiweb_socket_server:get(chttpd, port));
-port(backdoor) ->
- integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
+should_not_set_cookie_domain(Url, ContentType, Payload) ->
+ ?_test(begin
+ ok = config:set("couch_httpd_auth", "cookie_domain", "", false),
+ {ok, Code, Headers, _} = test_request:post(Url, ContentType, Payload),
+ ?assertEqual(200, Code),
+ Cookie = proplists:get_value("Set-Cookie", Headers),
+ ?assertEqual(0, string:str(Cookie, "; Domain="))
+ end).
diff --git a/src/couch/test/global_changes_tests.erl b/src/couch/test/global_changes_tests.erl
index 864a6a0ec..4392aafac 100644
--- a/src/couch/test/global_changes_tests.erl
+++ b/src/couch/test/global_changes_tests.erl
@@ -32,7 +32,7 @@ teardown({_, DbName}) ->
ok.
http_create_db(Name) ->
- Resp = {ok, Status, _, _} = test_request:put(db_url(Name), [?AUTH], ""),
+ {ok, Status, _, _} = test_request:put(db_url(Name), [?AUTH], ""),
true = lists:member(Status, [201, 202]),
ok.
diff --git a/src/couch_epi/rebar.config b/src/couch_epi/rebar.config
index 82db830a2..3c7f8af73 100644
--- a/src/couch_epi/rebar.config
+++ b/src/couch_epi/rebar.config
@@ -1,3 +1,7 @@
{cover_enabled, true}.
{cover_print_enabled, true}.
+
+{erl_opts, [
+ {platform_define, "^R16", 'pre18'},
+ {platform_define, "^17", 'pre18'}]}.
diff --git a/src/couch_epi/src/couch_epi_codegen.erl b/src/couch_epi/src/couch_epi_codegen.erl
index 978f0bb58..89b82a1f9 100644
--- a/src/couch_epi/src/couch_epi_codegen.erl
+++ b/src/couch_epi/src/couch_epi_codegen.erl
@@ -70,11 +70,16 @@ fixup_terminator(Tokens) ->
Tokens ++ [{dot, Line}]
end.
+
+-ifdef(pre18).
+
line(Token) ->
- case erlang:function_exported(erl_scan, line, 1) of
- true ->
- erl_scan:line(Token);
- false ->
- {line, Line} = erl_scan:token_info(Token, line),
- Line
- end.
+ {line, Line} = erl_scan:token_info(Token, line),
+ Line.
+
+-else.
+
+line(Token) ->
+ erl_scan:line(Token).
+
+-endif.
diff --git a/src/couch_index/rebar.config b/src/couch_index/rebar.config
new file mode 100644
index 000000000..e0d18443b
--- /dev/null
+++ b/src/couch_index/rebar.config
@@ -0,0 +1,2 @@
+{cover_enabled, true}.
+{cover_print_enabled, true}.
diff --git a/src/couch_index/test/couch_index_compaction_tests.erl b/src/couch_index/test/couch_index_compaction_tests.erl
index 062be872a..53316d944 100644
--- a/src/couch_index/test/couch_index_compaction_tests.erl
+++ b/src/couch_index/test/couch_index_compaction_tests.erl
@@ -21,12 +21,12 @@ setup() ->
DbName = ?tempdb(),
{ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
couch_db:close(Db),
- {ok, IndexerPid} = fake_index(Db),
+ fake_index(DbName),
+ {ok, IndexerPid} = couch_index_server:get_index(test_index, Db, undefined),
?assertNot(is_opened(Db)),
{Db, IndexerPid}.
-fake_index(Db) ->
- DbName = couch_db:name(Db),
+fake_index(DbName) ->
ok = meck:new([test_index], [non_strict]),
ok = meck:expect(test_index, init, ['_', '_'], {ok, 10}),
ok = meck:expect(test_index, open, fun(_Db, State) ->
@@ -45,13 +45,13 @@ fake_index(Db) ->
(update_seq, Seq) ->
Seq
end),
-
- couch_index_server:get_index(test_index, Db, undefined).
+ ok = meck:expect(test_index, close, ['_'], ok),
+ ok = meck:expect(test_index, swap_compacted, fun(_, NewState) ->
+ {ok, NewState}
+ end).
teardown(_) ->
- (catch meck:unload(test_index)),
- (catch meck:unload(couch_util)),
- ok.
+ meck:unload(test_index).
compaction_test_() ->
{
diff --git a/src/couch_index/test/couch_index_ddoc_updated_tests.erl b/src/couch_index/test/couch_index_ddoc_updated_tests.erl
index d1bbc43d2..40dadcc62 100644
--- a/src/couch_index/test/couch_index_ddoc_updated_tests.erl
+++ b/src/couch_index/test/couch_index_ddoc_updated_tests.erl
@@ -25,7 +25,7 @@ start() ->
stop({Ctx, DbName}) ->
- (catch meck:unload(test_index)),
+ meck:unload(test_index),
ok = fabric:delete_db(DbName, [?ADMIN_CTX]),
DbDir = config:get("couchdb", "database_dir", "."),
WaitFun = fun() ->
@@ -121,7 +121,8 @@ fake_index() ->
crypto:hash(md5, term_to_binary(DDoc));
(update_seq, Seq) ->
Seq
- end).
+ end),
+ ok = meck:expect(test_index, shutdown, ['_'], ok).
get_indexes_by_ddoc(DDocID, N) ->
diff --git a/src/couch_log/src/couch_log.erl b/src/couch_log/src/couch_log.erl
index 0ce4739a4..a8dc5d48d 100644
--- a/src/couch_log/src/couch_log.erl
+++ b/src/couch_log/src/couch_log.erl
@@ -68,6 +68,7 @@ set_level(Level) ->
log(Level, Fmt, Args) ->
case couch_log_util:should_log(Level) of
true ->
+ couch_stats:increment_counter([couch_log, level, Level]),
Entry = couch_log_formatter:format(Level, self(), Fmt, Args),
ok = couch_log_server:log(Entry);
false ->
diff --git a/src/couch_log/test/couch_log_test_util.erl b/src/couch_log/test/couch_log_test_util.erl
index 250366982..05d64d8a9 100644
--- a/src/couch_log/test/couch_log_test_util.erl
+++ b/src/couch_log/test/couch_log_test_util.erl
@@ -22,12 +22,15 @@ start() ->
application:set_env(config, ini_files, config_files()),
application:start(config),
ignore_common_loggers(),
- application:start(couch_log).
+ application:start(couch_log),
+ meck:new(couch_stats),
+ ok = meck:expect(couch_stats, increment_counter, ['_'], ok).
stop(_) ->
application:stop(config),
- application:stop(couch_log).
+ application:stop(couch_log),
+ meck:unload(couch_stats).
with_level(Name, Fun) ->
diff --git a/src/couch_mrview/rebar.config b/src/couch_mrview/rebar.config
new file mode 100644
index 000000000..e0d18443b
--- /dev/null
+++ b/src/couch_mrview/rebar.config
@@ -0,0 +1,2 @@
+{cover_enabled, true}.
+{cover_print_enabled, true}.
diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl
index d26df94f2..bc6686b8a 100644
--- a/src/couch_mrview/src/couch_mrview_util.erl
+++ b/src/couch_mrview/src/couch_mrview_util.erl
@@ -1161,7 +1161,7 @@ get_view_keys({Props}) ->
Keys when is_list(Keys) ->
Keys;
_ ->
- throw({bad_request, "`keys` member must be a array."})
+ throw({bad_request, "`keys` member must be an array."})
end.
@@ -1172,7 +1172,7 @@ get_view_queries({Props}) ->
Queries when is_list(Queries) ->
Queries;
_ ->
- throw({bad_request, "`queries` member must be a array."})
+ throw({bad_request, "`queries` member must be an array."})
end.
diff --git a/src/couch_peruser/src/couch_peruser.erl b/src/couch_peruser/src/couch_peruser.erl
index 0c769324a..886fb4f6e 100644
--- a/src/couch_peruser/src/couch_peruser.erl
+++ b/src/couch_peruser/src/couch_peruser.erl
@@ -34,7 +34,9 @@
db_name :: binary(),
delete_dbs :: boolean(),
changes_pid :: pid(),
- changes_ref :: reference()
+ changes_ref :: reference(),
+ q_for_peruser_db :: integer(),
+ peruser_dbname_prefix :: binary()
}).
-record(state, {
@@ -43,10 +45,12 @@
delete_dbs :: boolean(),
states :: list(),
mem3_cluster_pid :: pid(),
- cluster_stable :: boolean()
+ cluster_stable :: boolean(),
+ q_for_peruser_db :: integer(),
+ peruser_dbname_prefix :: binary()
}).
--define(USERDB_PREFIX, "userdb-").
+-define(DEFAULT_USERDB_PREFIX, "userdb-").
-define(RELISTEN_DELAY, 5000).
-define(DEFAULT_QUIET_PERIOD, 60). % seconds
-define(DEFAULT_START_PERIOD, 5). % seconds
@@ -70,6 +74,16 @@ init_state() ->
DbName = ?l2b(config:get(
"couch_httpd_auth", "authentication_db", "_users")),
DeleteDbs = config:get_boolean("couch_peruser", "delete_dbs", false),
+ Q = config:get_integer("couch_peruser", "q", 1),
+ Prefix = config:get("couch_peruser", "database_prefix", ?DEFAULT_USERDB_PREFIX),
+ case couch_db:validate_dbname(Prefix) of
+ ok -> ok;
+ Error ->
+ couch_log:error("couch_peruser can't proceed as illegal database prefix ~p.
+ Error: ~p", [Prefix, Error]),
+ throw(Error)
+ end,
+
% set up cluster-stable listener
Period = abs(config:get_integer("couch_peruser", "cluster_quiet_period",
@@ -85,7 +99,9 @@ init_state() ->
db_name = DbName,
delete_dbs = DeleteDbs,
mem3_cluster_pid = Mem3Cluster,
- cluster_stable = false
+ cluster_stable = false,
+ q_for_peruser_db = Q,
+ peruser_dbname_prefix = ?l2b(Prefix)
}
end.
@@ -95,14 +111,17 @@ start_listening(#state{states=ChangesStates}=State)
when length(ChangesStates) > 0 ->
% couch_log:debug("peruser: start_listening() already run on node ~p in pid ~p", [node(), self()]),
State;
-start_listening(#state{db_name=DbName, delete_dbs=DeleteDbs} = State) ->
+start_listening(#state{db_name=DbName, delete_dbs=DeleteDbs,
+ q_for_peruser_db = Q, peruser_dbname_prefix = Prefix} = State) ->
% couch_log:debug("peruser: start_listening() on node ~p", [node()]),
try
States = lists:map(fun (A) ->
S = #changes_state{
parent = State#state.parent,
db_name = A#shard.name,
- delete_dbs = DeleteDbs
+ delete_dbs = DeleteDbs,
+ q_for_peruser_db = Q,
+ peruser_dbname_prefix = Prefix
},
{Pid, Ref} = spawn_opt(
?MODULE, init_changes_handler, [S], [link, monitor]),
@@ -138,7 +157,8 @@ init_changes_handler(#changes_state{db_name=DbName} = ChangesState) ->
changes_handler(
{change, {Doc}, _Prepend},
_ResType,
- ChangesState=#changes_state{db_name=DbName}) ->
+ ChangesState=#changes_state{db_name=DbName, q_for_peruser_db = Q,
+ peruser_dbname_prefix = Prefix}) ->
% couch_log:debug("peruser: changes_handler() on DbName/Doc ~p/~p", [DbName, Doc]),
case couch_util:get_value(<<"id">>, Doc) of
@@ -147,16 +167,16 @@ changes_handler(
true ->
case couch_util:get_value(<<"deleted">>, Doc, false) of
false ->
- UserDb = ensure_user_db(User),
+ UserDb = ensure_user_db(Prefix, User, Q),
ok = ensure_security(User, UserDb, fun add_user/3),
ChangesState;
true ->
case ChangesState#changes_state.delete_dbs of
true ->
- _UserDb = delete_user_db(User),
+ _UserDb = delete_user_db(Prefix, User),
ChangesState;
false ->
- UserDb = user_db_name(User),
+ UserDb = user_db_name(Prefix, User),
ok = ensure_security(User, UserDb, fun remove_user/3),
ChangesState
end
@@ -201,9 +221,9 @@ should_handle_doc_int(ShardName, DocId) ->
false
end.
--spec delete_user_db(User :: binary()) -> binary().
-delete_user_db(User) ->
- UserDb = user_db_name(User),
+-spec delete_user_db(Prefix:: binary(), User :: binary()) -> binary().
+delete_user_db(Prefix, User) ->
+ UserDb = user_db_name(Prefix, User),
try
case fabric:delete_db(UserDb, [?ADMIN_CTX]) of
ok -> ok;
@@ -214,13 +234,13 @@ delete_user_db(User) ->
end,
UserDb.
--spec ensure_user_db(User :: binary()) -> binary().
-ensure_user_db(User) ->
- UserDb = user_db_name(User),
+-spec ensure_user_db(Prefix:: binary(), User :: binary(), Q :: integer()) -> binary().
+ensure_user_db(Prefix, User, Q) ->
+ UserDb = user_db_name(Prefix, User),
try
{ok, _DbInfo} = fabric:get_db_info(UserDb)
catch error:database_does_not_exist ->
- case fabric:create_db(UserDb, [?ADMIN_CTX]) of
+ case fabric:create_db(UserDb, [?ADMIN_CTX, {q, integer_to_list(Q)}]) of
{error, file_exists} -> ok;
ok -> ok;
accepted -> ok
@@ -294,11 +314,11 @@ ensure_security(User, UserDb, TransformFun) ->
end
end.
--spec user_db_name(User :: binary()) -> binary().
-user_db_name(User) ->
+-spec user_db_name(Prefix :: binary(), User :: binary()) -> binary().
+user_db_name(Prefix, User) ->
HexUser = list_to_binary(
[string:to_lower(integer_to_list(X, 16)) || <<X>> <= User]),
- <<?USERDB_PREFIX,HexUser/binary>>.
+ <<Prefix/binary,HexUser/binary>>.
-spec exit_changes(State :: #state{}) -> ok.
exit_changes(State) ->
diff --git a/src/couch_peruser/test/couch_peruser_test.erl b/src/couch_peruser/test/couch_peruser_test.erl
index 2bc98af66..f6ef88f0b 100644
--- a/src/couch_peruser/test/couch_peruser_test.erl
+++ b/src/couch_peruser/test/couch_peruser_test.erl
@@ -66,6 +66,11 @@ set_config(Section, Key, Value) ->
get_base_url(), "/_config/", Section, "/", Key]),
do_request(put, Url, "\"" ++ Value ++ "\"").
+delete_config(Section, Key, Value) ->
+ Url = lists:concat([
+ get_base_url(), "/_config/", Section, "/", Key]),
+ do_request(delete, Url, "\"" ++ Value ++ "\"").
+
do_request(Method, Url) ->
Headers = [{basic_auth, {?ADMIN_USERNAME, ?ADMIN_PASSWORD}}],
{ok, _, _, _} = test_request:request(Method, Url, Headers).
@@ -141,25 +146,89 @@ get_cluster_base_url() ->
"http://" ++ Addr ++ ":" ++ Port.
-should_create_user_db(TestAuthDb) ->
+should_create_user_db_with_default(TestAuthDb) ->
+ create_user(TestAuthDb, "foo"),
+ wait_for_db_create(<<"userdb-666f6f">>),
+ {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>),
+ {ClusterInfo} = couch_util:get_value(cluster, DbInfo),
+ [
+ ?_assert(lists:member(<<"userdb-666f6f">>, all_dbs())),
+ ?_assertEqual(1, couch_util:get_value(q, ClusterInfo))
+ ].
+
+should_create_user_db_with_custom_prefix(TestAuthDb) ->
+ set_config("couch_peruser", "database_prefix", "newuserdb-"),
+ create_user(TestAuthDb, "fooo"),
+ wait_for_db_create(<<"newuserdb-666f6f6f">>),
+ delete_config("couch_peruser", "database_prefix", "newuserdb-"),
+ ?_assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs())).
+
+should_create_user_db_with_custom_special_prefix(TestAuthDb) ->
+ set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
+ create_user(TestAuthDb, "fooo"),
+ wait_for_db_create(<<"userdb_$()+--/666f6f6f">>),
+ delete_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
+ ?_assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs())).
+
+should_create_anon_user_db_with_default(TestAuthDb) ->
+ create_anon_user(TestAuthDb, "fooo"),
+ wait_for_db_create(<<"userdb-666f6f6f">>),
+ {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>),
+ {ClusterInfo} = couch_util:get_value(cluster, DbInfo),
+ [
+ ?_assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())),
+ ?_assertEqual(1, couch_util:get_value(q, ClusterInfo))
+ ].
+
+should_create_anon_user_db_with_custom_prefix(TestAuthDb) ->
+ set_config("couch_peruser", "database_prefix", "newuserdb-"),
+ create_anon_user(TestAuthDb, "fooo"),
+ wait_for_db_create(<<"newuserdb-666f6f6f">>),
+ delete_config("couch_peruser", "database_prefix", "newuserdb-"),
+ ?_assert(lists:member(<<"newuserdb-666f6f6f">>, all_dbs())).
+
+should_create_anon_user_db_with_custom_special_prefix(TestAuthDb) ->
+ set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
+ create_anon_user(TestAuthDb, "fooo"),
+ wait_for_db_create(<<"userdb_$()+--/666f6f6f">>),
+ delete_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
+ ?_assert(lists:member(<<"userdb_$()+--/666f6f6f">>, all_dbs())).
+
+should_create_user_db_with_q4(TestAuthDb) ->
+ set_config("couch_peruser", "q", "4"),
create_user(TestAuthDb, "foo"),
wait_for_db_create(<<"userdb-666f6f">>),
- ?_assert(lists:member(<<"userdb-666f6f">>, all_dbs())).
+ {ok, DbInfo} = fabric:get_db_info(<<"userdb-666f6f">>),
+ {ClusterInfo} = couch_util:get_value(cluster, DbInfo),
+ delete_config("couch_peruser", "q", "4"),
-should_create_anon_user_db(TestAuthDb) ->
+ [
+ ?_assert(lists:member(<<"userdb-666f6f">>, all_dbs())),
+ ?_assertEqual(4, couch_util:get_value(q, ClusterInfo))
+ ].
+
+should_create_anon_user_db_with_q4(TestAuthDb) ->
+ set_config("couch_peruser", "q", "4"),
create_anon_user(TestAuthDb, "fooo"),
wait_for_db_create(<<"userdb-666f6f6f">>),
- ?_assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())).
+ {ok, TargetInfo} = fabric:get_db_info(<<"userdb-666f6f6f">>),
+ {ClusterInfo} = couch_util:get_value(cluster, TargetInfo),
+ delete_config("couch_peruser", "q", "4"),
+ [
+ ?_assert(lists:member(<<"userdb-666f6f6f">>, all_dbs())),
+ ?_assertEqual(4, couch_util:get_value(q, ClusterInfo))
+ ].
should_not_delete_user_db(TestAuthDb) ->
User = "foo",
UserDbName = <<"userdb-666f6f">>,
create_user(TestAuthDb, User),
wait_for_db_create(<<"userdb-666f6f">>),
- ?assert(lists:member(UserDbName, all_dbs())),
+ AfterCreate = lists:member(UserDbName, all_dbs()),
delete_user(TestAuthDb, User),
timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- ?_assert(lists:member(UserDbName, all_dbs())).
+ AfterDelete = lists:member(UserDbName, all_dbs()),
+ [?_assert(AfterCreate), ?_assert(AfterDelete)].
should_delete_user_db(TestAuthDb) ->
User = "bar",
@@ -167,10 +236,45 @@ should_delete_user_db(TestAuthDb) ->
set_config("couch_peruser", "delete_dbs", "true"),
create_user(TestAuthDb, User),
wait_for_db_create(UserDbName),
- ?assert(lists:member(UserDbName, all_dbs())),
+ AfterCreate = lists:member(UserDbName, all_dbs()),
delete_user(TestAuthDb, User),
wait_for_db_delete(UserDbName),
- ?_assert(not lists:member(UserDbName, all_dbs())).
+ AfterDelete = lists:member(UserDbName, all_dbs()),
+ [?_assert(AfterCreate), ?_assertNot(AfterDelete)].
+
+should_delete_user_db_with_custom_prefix(TestAuthDb) ->
+ User = "bar",
+ UserDbName = <<"newuserdb-626172">>,
+ set_config("couch_peruser", "delete_dbs", "true"),
+ set_config("couch_peruser", "database_prefix", "newuserdb-"),
+ create_user(TestAuthDb, User),
+ wait_for_db_create(UserDbName),
+ AfterCreate = lists:member(UserDbName, all_dbs()),
+ delete_user(TestAuthDb, User),
+ wait_for_db_delete(UserDbName),
+ delete_config("couch_peruser", "database_prefix", "newuserdb-"),
+ AfterDelete = lists:member(UserDbName, all_dbs()),
+ [
+ ?_assert(AfterCreate),
+ ?_assertNot(AfterDelete)
+ ].
+
+should_delete_user_db_with_custom_special_prefix(TestAuthDb) ->
+ User = "bar",
+ UserDbName = <<"userdb_$()+--/626172">>,
+ set_config("couch_peruser", "delete_dbs", "true"),
+ set_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
+ create_user(TestAuthDb, User),
+ wait_for_db_create(UserDbName),
+ AfterCreate = lists:member(UserDbName, all_dbs()),
+ delete_user(TestAuthDb, User),
+ wait_for_db_delete(UserDbName),
+ delete_config("couch_peruser", "database_prefix", "userdb_$()+--/"),
+ AfterDelete = lists:member(UserDbName, all_dbs()),
+ [
+ ?_assert(AfterCreate),
+ ?_assertNot(AfterDelete)
+ ].
should_reflect_config_changes(TestAuthDb) ->
User = "baz",
@@ -178,28 +282,37 @@ should_reflect_config_changes(TestAuthDb) ->
set_config("couch_peruser", "delete_dbs", "true"),
create_user(TestAuthDb, User),
wait_for_db_create(UserDbName),
- ?assert(lists:member(UserDbName, all_dbs())),
+ AfterCreate1 = lists:member(UserDbName, all_dbs()),
delete_user(TestAuthDb, User),
timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
wait_for_db_delete(UserDbName),
- ?assert(not lists:member(UserDbName, all_dbs())),
+ AfterDelete1 = lists:member(UserDbName, all_dbs()),
create_user(TestAuthDb, User),
wait_for_db_create(UserDbName),
- ?assert(lists:member(UserDbName, all_dbs())),
+ AfterCreate2 = lists:member(UserDbName, all_dbs()),
set_config("couch_peruser", "delete_dbs", "false"),
delete_user(TestAuthDb, User),
timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- ?assert(lists:member(UserDbName, all_dbs())),
+ AfterDelete2 = lists:member(UserDbName, all_dbs()),
create_user(TestAuthDb, User),
wait_for_db_create(UserDbName),
set_config("couch_peruser", "delete_dbs", "true"),
delete_user(TestAuthDb, User),
wait_for_db_delete(UserDbName),
- ?assert(not lists:member(UserDbName, all_dbs())),
+ AfterDelete3 = lists:member(UserDbName, all_dbs()),
set_config("couch_peruser", "enable", "false"),
create_user(TestAuthDb, User),
timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- ?_assert(not lists:member(UserDbName, all_dbs())).
+ AfterCreate3 = lists:member(UserDbName, all_dbs()),
+ [
+ ?_assert(AfterCreate1),
+ ?_assertNot(AfterDelete1),
+ ?_assert(AfterCreate2),
+ ?_assert(AfterDelete2),
+ ?_assertNot(AfterDelete3),
+ ?_assertNot(AfterCreate3)
+ ].
+
should_add_user_to_db_admins(TestAuthDb) ->
User = "qux",
@@ -273,18 +386,24 @@ should_remove_user_from_db_admins(TestAuthDb) ->
{AdminProperties} = proplists:get_value(<<"admins">>,
get_security(UserDbName)),
AdminNames = proplists:get_value(<<"names">>, AdminProperties),
- ?assert(lists:member(<<"foo">>, AdminNames)),
- ?assert(lists:member(<<"bar">>, AdminNames)),
- ?assert(lists:member(<<"qux">>, AdminNames)),
+ FooBefore = lists:member(<<"foo">>, AdminNames),
+ BarBefore = lists:member(<<"bar">>, AdminNames),
+ QuxBefore = lists:member(<<"qux">>, AdminNames),
delete_user(TestAuthDb, User),
wait_for_security_delete(<<"admins">>, User, UserDbName),
{NewAdminProperties} = proplists:get_value(<<"admins">>,
get_security(UserDbName)),
NewAdminNames = proplists:get_value(<<"names">>, NewAdminProperties),
+ FooAfter = lists:member(<<"foo">>, NewAdminNames),
+ BarAfter = lists:member(<<"bar">>, NewAdminNames),
+ QuxAfter = lists:member(<<"qux">>, NewAdminNames),
[
- ?_assert(lists:member(<<"foo">>, NewAdminNames)),
- ?_assert(lists:member(<<"bar">>, NewAdminNames)),
- ?_assert(not lists:member(<<"qux">>, NewAdminNames))
+ ?_assert(FooBefore),
+ ?_assert(BarBefore),
+ ?_assert(QuxBefore),
+ ?_assert(FooAfter),
+ ?_assert(BarAfter),
+ ?_assertNot(QuxAfter)
].
should_remove_user_from_db_members(TestAuthDb) ->
@@ -301,18 +420,24 @@ should_remove_user_from_db_members(TestAuthDb) ->
{MemberProperties} = proplists:get_value(<<"members">>,
get_security(UserDbName)),
MemberNames = proplists:get_value(<<"names">>, MemberProperties),
- ?assert(lists:member(<<"pow">>, MemberNames)),
- ?assert(lists:member(<<"wow">>, MemberNames)),
- ?assert(lists:member(<<"qux">>, MemberNames)),
+ PowBefore = lists:member(<<"pow">>, MemberNames),
+ WowBefore = lists:member(<<"wow">>, MemberNames),
+ QuxBefore = lists:member(<<"qux">>, MemberNames),
delete_user(TestAuthDb, User),
wait_for_security_delete(<<"members">>, User, UserDbName),
{NewMemberProperties} = proplists:get_value(<<"members">>,
get_security(UserDbName)),
NewMemberNames = proplists:get_value(<<"names">>, NewMemberProperties),
+ PowAfter = lists:member(<<"pow">>, NewMemberNames),
+ WowAfter = lists:member(<<"wow">>, NewMemberNames),
+ QuxAfter = lists:member(<<"qux">>, NewMemberNames),
[
- ?_assert(lists:member(<<"pow">>, NewMemberNames)),
- ?_assert(lists:member(<<"wow">>, NewMemberNames)),
- ?_assert(not lists:member(<<"qux">>, NewMemberNames))
+ ?_assert(PowBefore),
+ ?_assert(WowBefore),
+ ?_assert(QuxBefore),
+ ?_assert(PowAfter),
+ ?_assert(WowAfter),
+ ?_assertNot(QuxAfter)
].
% infinite loop waiting for a db to be created, either this returns true
@@ -381,10 +506,18 @@ couch_peruser_test_() ->
foreach,
fun setup/0, fun teardown/1,
[
- fun should_create_anon_user_db/1,
- fun should_create_user_db/1,
+ fun should_create_anon_user_db_with_default/1,
+ fun should_create_anon_user_db_with_custom_prefix/1,
+ fun should_create_anon_user_db_with_custom_special_prefix/1,
+ fun should_create_user_db_with_default/1,
+ fun should_create_user_db_with_custom_prefix/1,
+ fun should_create_user_db_with_custom_special_prefix/1,
+ fun should_create_user_db_with_q4/1,
+ fun should_create_anon_user_db_with_q4/1,
fun should_not_delete_user_db/1,
fun should_delete_user_db/1,
+ fun should_delete_user_db_with_custom_prefix/1,
+ fun should_delete_user_db_with_custom_special_prefix/1,
fun should_reflect_config_changes/1,
fun should_add_user_to_db_admins/1,
fun should_add_user_to_db_members/1,
diff --git a/src/couch_replicator/README.md b/src/couch_replicator/README.md
index f08ff357e..fe975c1d0 100644
--- a/src/couch_replicator/README.md
+++ b/src/couch_replicator/README.md
@@ -262,7 +262,7 @@ A description of each child:
`error_backoff/1` is where the backoff period is calculated.
Consecutive errors are held in the `errcnt` field in the ETS table.
- 2. Fetchig filter code succeeds, replication ID is calculated and job is
+ 2. Fetching filter code succeeds, replication ID is calculated and job is
added to the scheduler. However, because this is a filtered replication
the source database could get an updated filter. Which means
replication ID could change again. So the worker is spawned to
diff --git a/src/couch_replicator/src/couch_replicator_api_wrap.erl b/src/couch_replicator/src/couch_replicator_api_wrap.erl
index ab8eb7f29..b5ea57c3c 100644
--- a/src/couch_replicator/src/couch_replicator_api_wrap.erl
+++ b/src/couch_replicator/src/couch_replicator_api_wrap.erl
@@ -24,7 +24,7 @@
-export([
db_open/2,
- db_open/3,
+ db_open/4,
db_close/1,
get_db_info/1,
get_pending_count/2,
@@ -68,20 +68,21 @@ db_uri(Db) ->
db_open(Db, Options) ->
- db_open(Db, Options, false).
+ db_open(Db, Options, false, []).
-db_open(#httpdb{} = Db1, _Options, Create) ->
+db_open(#httpdb{} = Db1, _Options, Create, CreateParams) ->
{ok, Db} = couch_replicator_httpc:setup(Db1),
try
case Create of
false ->
ok;
true ->
- send_req(Db, [{method, put}],
+ Db2 = maybe_append_create_query_params(Db, CreateParams),
+ send_req(Db2, [{method, put}],
fun(401, _, _) ->
- throw({unauthorized, ?l2b(db_uri(Db))});
+ throw({unauthorized, ?l2b(db_uri(Db2))});
(403, _, _) ->
- throw({forbidden, ?l2b(db_uri(Db))});
+ throw({forbidden, ?l2b(db_uri(Db2))});
(_, _, _) ->
ok
end)
@@ -118,7 +119,7 @@ db_open(#httpdb{} = Db1, _Options, Create) ->
db_close(Db),
erlang:exit(Error)
end;
-db_open(DbName, Options, Create) ->
+db_open(DbName, Options, Create, _CreateParams) ->
try
case Create of
false ->
@@ -1020,6 +1021,14 @@ normalize_db(<<DbName/binary>>) ->
DbName.
+maybe_append_create_query_params(Db, []) ->
+ Db;
+
+maybe_append_create_query_params(Db, CreateParams) ->
+ NewUrl = Db#httpdb.url ++ "?" ++ mochiweb_util:urlencode(CreateParams),
+ Db#httpdb{url = NewUrl}.
+
+
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/couch_replicator/src/couch_replicator_clustering.erl b/src/couch_replicator/src/couch_replicator_clustering.erl
index 3d5229b9f..a7f7573b6 100644
--- a/src/couch_replicator/src/couch_replicator_clustering.erl
+++ b/src/couch_replicator/src/couch_replicator_clustering.erl
@@ -114,17 +114,17 @@ link_cluster_event_listener(Mod, Fun, Args)
% Mem3 cluster callbacks
cluster_unstable(Server) ->
+ ok = gen_server:call(Server, set_unstable),
couch_replicator_notifier:notify({cluster, unstable}),
couch_stats:update_gauge([couch_replicator, cluster_is_stable], 0),
couch_log:notice("~s : cluster unstable", [?MODULE]),
- gen_server:cast(Server, cluster_unstable),
Server.
cluster_stable(Server) ->
+ ok = gen_server:call(Server, set_stable),
couch_replicator_notifier:notify({cluster, stable}),
couch_stats:update_gauge([couch_replicator, cluster_is_stable], 1),
couch_log:notice("~s : cluster stable", [?MODULE]),
- gen_server:cast(Server, cluster_stable),
Server.
@@ -147,18 +147,18 @@ terminate(_Reason, _State) ->
handle_call(is_stable, _From, #state{cluster_stable = IsStable} = State) ->
- {reply, IsStable, State}.
+ {reply, IsStable, State};
+handle_call(set_stable, _From, State) ->
+ {reply, ok, State#state{cluster_stable = true}};
-handle_cast({set_period, Period}, #state{mem3_cluster_pid = Pid} = State) ->
- ok = mem3_cluster:set_period(Pid, Period),
- {noreply, State};
+handle_call(set_unstable, _From, State) ->
+ {reply, ok, State#state{cluster_stable = false}}.
-handle_cast(cluster_stable, State) ->
- {noreply, State#state{cluster_stable = true}};
-handle_cast(cluster_unstable, State) ->
- {noreply, State#state{cluster_stable = false}}.
+handle_cast({set_period, Period}, #state{mem3_cluster_pid = Pid} = State) ->
+ ok = mem3_cluster:set_period(Pid, Period),
+ {noreply, State}.
handle_info(restart_config_listener, State) ->
@@ -193,3 +193,56 @@ owner_int(ShardName, DocId) ->
Shards = mem3:shards(DbName, DocId),
Nodes = [N || #shard{node=N} <- Shards, lists:member(N, Live)],
mem3:owner(DbName, DocId, Nodes).
+
+
+
+-ifdef(TEST).
+
+-include_lib("eunit/include/eunit.hrl").
+
+
+replicator_clustering_test_() ->
+ {
+ foreach,
+ fun setup/0,
+ fun teardown/1,
+ [
+ t_stable_callback(),
+ t_unstable_callback()
+ ]
+ }.
+
+
+t_stable_callback() ->
+ ?_test(begin
+ ?assertEqual(false, is_stable()),
+ cluster_stable(whereis(?MODULE)),
+ ?assertEqual(true, is_stable())
+ end).
+
+
+t_unstable_callback() ->
+ ?_test(begin
+ cluster_stable(whereis(?MODULE)),
+ ?assertEqual(true, is_stable()),
+ cluster_unstable(whereis(?MODULE)),
+ ?assertEqual(false, is_stable())
+ end).
+
+
+setup() ->
+ meck:expect(couch_log, notice, 2, ok),
+ meck:expect(config, get, fun(_, _, Default) -> Default end),
+ meck:expect(config, listen_for_changes, 2, ok),
+ meck:expect(couch_stats, update_gauge, 2, ok),
+ meck:expect(couch_replicator_notifier, notify, 1, ok),
+ {ok, Pid} = start_link(),
+ Pid.
+
+
+teardown(Pid) ->
+ unlink(Pid),
+ exit(Pid, kill),
+ meck:unload().
+
+-endif.
diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl
index d22b85f89..1fe91eca4 100644
--- a/src/couch_replicator/src/couch_replicator_docs.erl
+++ b/src/couch_replicator/src/couch_replicator_docs.erl
@@ -499,6 +499,11 @@ convert_options([{<<"create_target">>, V} | _R]) when not is_boolean(V)->
throw({bad_request, <<"parameter `create_target` must be a boolean">>});
convert_options([{<<"create_target">>, V} | R]) ->
[{create_target, V} | convert_options(R)];
+convert_options([{<<"create_target_params">>, V} | _R]) when not is_tuple(V) ->
+ throw({bad_request,
+ <<"parameter `create_target_params` must be a JSON object">>});
+convert_options([{<<"create_target_params">>, V} | R]) ->
+ [{create_target_params, V} | convert_options(R)];
convert_options([{<<"continuous">>, V} | _R]) when not is_boolean(V)->
throw({bad_request, <<"parameter `continuous` must be a boolean">>});
convert_options([{<<"continuous">>, V} | R]) ->
@@ -690,7 +695,8 @@ strip_credentials(Url) when is_binary(Url) ->
"http\\1://\\2",
[{return, binary}]);
strip_credentials({Props}) ->
- {lists:keydelete(<<"oauth">>, 1, Props)}.
+ Props1 = lists:keydelete(<<"oauth">>, 1, Props),
+ {lists:keydelete(<<"headers">>, 1, Props1)}.
error_reason({shutdown, Error}) ->
@@ -756,4 +762,33 @@ check_convert_options_fail_test() ->
?assertThrow({bad_request, _},
convert_options([{<<"selector">>, [{key, value}]}])).
+check_strip_credentials_test() ->
+ [?assertEqual(Expected, strip_credentials(Body)) || {Expected, Body} <- [
+ {
+ undefined,
+ undefined
+ },
+ {
+ <<"https://remote_server/database">>,
+ <<"https://foo:bar@remote_server/database">>
+ },
+ {
+ {[{<<"_id">>, <<"foo">>}]},
+ {[{<<"_id">>, <<"foo">>}, {<<"oauth">>, <<"bar">>}]}
+ },
+ {
+ {[{<<"_id">>, <<"foo">>}]},
+ {[{<<"_id">>, <<"foo">>}, {<<"headers">>, <<"bar">>}]}
+ },
+ {
+ {[{<<"_id">>, <<"foo">>}, {<<"other">>, <<"bar">>}]},
+ {[{<<"_id">>, <<"foo">>}, {<<"other">>, <<"bar">>}]}
+ },
+ {
+ {[{<<"_id">>, <<"foo">>}]},
+ {[{<<"_id">>, <<"foo">>}, {<<"oauth">>, <<"bar">>},
+ {<<"headers">>, <<"baz">>}]}
+ }
+ ]].
+
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_httpd.erl b/src/couch_replicator/src/couch_replicator_httpd.erl
index 0f78ce1d5..ebab1a614 100644
--- a/src/couch_replicator/src/couch_replicator_httpd.erl
+++ b/src/couch_replicator/src/couch_replicator_httpd.erl
@@ -82,6 +82,8 @@ handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"docs">>|Unquoted]}
{error, invalid} ->
throw(bad_request)
end;
+handle_scheduler_req(#httpd{method='GET'} = Req) ->
+ send_json(Req, 404, {[{error, <<"not found">>}]});
handle_scheduler_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD").
diff --git a/src/couch_replicator/src/couch_replicator_scheduler_job.erl b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
index e2d8fb6d6..0438249be 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler_job.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
@@ -587,8 +587,9 @@ init_state(Rep) ->
% Adjust minimum number of http source connections to 2 to avoid deadlock
Src = adjust_maxconn(Src0, BaseId),
{ok, Source} = couch_replicator_api_wrap:db_open(Src, [{user_ctx, UserCtx}]),
+ {CreateTargetParams} = get_value(create_target_params, Options, {[]}),
{ok, Target} = couch_replicator_api_wrap:db_open(Tgt, [{user_ctx, UserCtx}],
- get_value(create_target, Options, false)),
+ get_value(create_target, Options, false), CreateTargetParams),
{ok, SourceInfo} = couch_replicator_api_wrap:get_db_info(Source),
{ok, TargetInfo} = couch_replicator_api_wrap:get_db_info(Target),
diff --git a/src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl b/src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl
new file mode 100644
index 000000000..63310d39e
--- /dev/null
+++ b/src/couch_replicator/test/couch_replicator_create_target_with_options_tests.erl
@@ -0,0 +1,143 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_replicator_create_target_with_options_tests).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_replicator/src/couch_replicator.hrl").
+
+-define(USERNAME, "rep_admin").
+-define(PASSWORD, "secret").
+
+setup() ->
+ Ctx = test_util:start_couch([fabric, mem3, couch_replicator, chttpd]),
+ Hashed = couch_passwords:hash_admin_password(?PASSWORD),
+ ok = config:set("admins", ?USERNAME, ?b2l(Hashed), _Persist=false),
+ Source = ?tempdb(),
+ Target = ?tempdb(),
+ {Ctx, {Source, Target}}.
+
+
+teardown({Ctx, {_Source, _Target}}) ->
+ config:delete("admins", ?USERNAME),
+ ok = test_util:stop_couch(Ctx).
+
+
+create_target_with_options_replication_test_() ->
+ {
+ "Create target with range partitions tests",
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_create_target_with_q_4/1,
+ fun should_create_target_with_q_2_n_1/1,
+ fun should_create_target_with_default/1,
+ fun should_not_create_target_with_q_any/1
+ ]
+ }
+ }.
+
+
+should_create_target_with_q_4({_Ctx, {Source, Target}}) ->
+ RepObject = {[
+ {<<"source">>, db_url(Source)},
+ {<<"target">>, db_url(Target)},
+ {<<"create_target">>, true},
+ {<<"create_target_params">>, {[{<<"q">>, <<"4">>}]}}
+ ]},
+ create_db(Source),
+ create_doc(Source),
+ {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
+
+ {ok, TargetInfo} = fabric:get_db_info(Target),
+ {ClusterInfo} = couch_util:get_value(cluster, TargetInfo),
+ delete_db(Source),
+ delete_db(Target),
+ ?_assertEqual(4, couch_util:get_value(q, ClusterInfo)).
+
+
+should_create_target_with_q_2_n_1({_Ctx, {Source, Target}}) ->
+ RepObject = {[
+ {<<"source">>, db_url(Source)},
+ {<<"target">>, db_url(Target)},
+ {<<"create_target">>, true},
+ {<<"create_target_params">>,
+ {[{<<"q">>, <<"2">>}, {<<"n">>, <<"1">>}]}}
+ ]},
+ create_db(Source),
+ create_doc(Source),
+ {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
+
+ {ok, TargetInfo} = fabric:get_db_info(Target),
+ {ClusterInfo} = couch_util:get_value(cluster, TargetInfo),
+ delete_db(Source),
+ delete_db(Target),
+ [
+ ?_assertEqual(2, couch_util:get_value(q, ClusterInfo)),
+ ?_assertEqual(1, couch_util:get_value(n, ClusterInfo))
+ ].
+
+
+should_create_target_with_default({_Ctx, {Source, Target}}) ->
+ RepObject = {[
+ {<<"source">>, db_url(Source)},
+ {<<"target">>, db_url(Target)},
+ {<<"create_target">>, true}
+ ]},
+ create_db(Source),
+ create_doc(Source),
+ {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
+
+ {ok, TargetInfo} = fabric:get_db_info(Target),
+ {ClusterInfo} = couch_util:get_value(cluster, TargetInfo),
+ Q = config:get("cluster", "q", "8"),
+ delete_db(Source),
+ delete_db(Target),
+ ?_assertEqual(list_to_integer(Q), couch_util:get_value(q, ClusterInfo)).
+
+
+should_not_create_target_with_q_any({_Ctx, {Source, Target}}) ->
+ RepObject = {[
+ {<<"source">>, db_url(Source)},
+ {<<"target">>, db_url(Target)},
+ {<<"create_target">>, false},
+ {<<"create_target_params">>, {[{<<"q">>, <<"1">>}]}}
+ ]},
+ create_db(Source),
+ create_doc(Source),
+ {error, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
+ DbExist = is_list(catch mem3:shards(Target)),
+ delete_db(Source),
+ ?_assertEqual(false, DbExist).
+
+
+create_doc(DbName) ->
+ Body = {[{<<"foo">>, <<"bar">>}]},
+ NewDoc = #doc{body = Body},
+ {ok, _} = fabric:update_doc(DbName, NewDoc, [?ADMIN_CTX]).
+
+
+create_db(DbName) ->
+ ok = fabric:create_db(DbName, [?ADMIN_CTX]).
+
+
+delete_db(DbName) ->
+ ok = fabric:delete_db(DbName, [?ADMIN_CTX]).
+
+
+db_url(DbName) ->
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
+ ?l2b(io_lib:format("http://~s:~s@~s:~b/~s", [?USERNAME, ?PASSWORD, Addr,
+ Port, DbName])).
diff --git a/src/couch_stats/src/couch_stats.app.src b/src/couch_stats/src/couch_stats.app.src
index 6339a0f1d..990f8de62 100644
--- a/src/couch_stats/src/couch_stats.app.src
+++ b/src/couch_stats/src/couch_stats.app.src
@@ -14,7 +14,7 @@
{description, "Simple statistics collection"},
{vsn, git},
{registered, [couch_stats_aggregator, couch_stats_process_tracker]},
- {applications, [kernel, stdlib, folsom, couch_log]},
+ {applications, [kernel, stdlib, folsom]},
{mod, {couch_stats_app, []}},
{env, []}
]}.
diff --git a/src/couch_stats/src/couch_stats.erl b/src/couch_stats/src/couch_stats.erl
index 59175f7a8..4fde14acb 100644
--- a/src/couch_stats/src/couch_stats.erl
+++ b/src/couch_stats/src/couch_stats.erl
@@ -119,7 +119,7 @@ notify_existing_metric(Name, Op, Type) ->
try
ok = folsom_metrics:notify_existing_metric(Name, Op, Type)
catch _:_ ->
- couch_log:notice("unknown metric: ~p", [Name]),
+ error_logger:error_msg("unknown metric: ~p", [Name]),
{error, unknown_metric}
end.
diff --git a/src/couch_stats/src/couch_stats_process_tracker.erl b/src/couch_stats/src/couch_stats_process_tracker.erl
index 4765734e9..7d16deb8a 100644
--- a/src/couch_stats/src/couch_stats_process_tracker.erl
+++ b/src/couch_stats/src/couch_stats_process_tracker.erl
@@ -48,7 +48,7 @@ init([]) ->
{ok, #st{}}.
handle_call(Msg, _From, State) ->
- couch_log:notice("~p received unknown call ~p", [?MODULE, Msg]),
+ error_logger:error_msg("~p received unknown call ~p", [?MODULE, Msg]),
{noreply, State}.
handle_cast({track, Pid, Name}, State) ->
@@ -57,13 +57,13 @@ handle_cast({track, Pid, Name}, State) ->
ets:insert(?MODULE, {Ref, Name}),
{noreply, State};
handle_cast(Msg, State) ->
- couch_log:notice("~p received unknown cast ~p", [?MODULE, Msg]),
+ error_logger:error_msg("~p received unknown cast ~p", [?MODULE, Msg]),
{noreply, State}.
handle_info({'DOWN', Ref, _, _, _}=Msg, State) ->
case ets:lookup(?MODULE, Ref) of
[] ->
- couch_log:notice(
+ error_logger:error_msg(
"~p received unknown exit; message was ~p", [?MODULE, Msg]
);
[{Ref, Name}] ->
@@ -72,7 +72,7 @@ handle_info({'DOWN', Ref, _, _, _}=Msg, State) ->
end,
{noreply, State};
handle_info(Msg, State) ->
- couch_log:notice("~p received unknown message ~p", [?MODULE, Msg]),
+ error_logger:error_msg("~p received unknown message ~p", [?MODULE, Msg]),
{noreply, State}.
terminate(_Reason, _State) ->
diff --git a/src/fabric/rebar.config b/src/fabric/rebar.config
index df35ac639..362c8785e 100644
--- a/src/fabric/rebar.config
+++ b/src/fabric/rebar.config
@@ -10,6 +10,5 @@
% License for the specific language governing permissions and limitations under
% the License.
-{deps, [
- {meck, ".*", {git, "https://github.com/apache/couchdb-meck.git", {tag, "0.8.8"}}}
-]}.
+{cover_enabled, true}.
+{cover_print_enabled, true}.
diff --git a/src/fabric/src/fabric_db_create.erl b/src/fabric/src/fabric_db_create.erl
index d793f4f13..db914f90e 100644
--- a/src/fabric/src/fabric_db_create.erl
+++ b/src/fabric/src/fabric_db_create.erl
@@ -146,9 +146,9 @@ maybe_stop(W, Counters) ->
{ok, {W, Counters}};
false ->
case lists:sum([1 || {_, ok} <- Counters]) of
- W ->
+ NumOk when NumOk >= (W div 2 +1) ->
{stop, ok};
- NumOk when NumOk >= (W div 2 + 1) ->
+ NumOk when NumOk > 0 ->
{stop, accepted};
_ ->
{error, internal_server_error}
diff --git a/src/fabric/src/fabric_view_all_docs.erl b/src/fabric/src/fabric_view_all_docs.erl
index de21dde08..ac16dac52 100644
--- a/src/fabric/src/fabric_view_all_docs.erl
+++ b/src/fabric/src/fabric_view_all_docs.erl
@@ -59,7 +59,8 @@ go(DbName, Options, QueryArgs, Callback, Acc0) ->
conflicts = Conflicts,
skip = Skip,
keys = Keys0,
- extra = Extra
+ extra = Extra,
+ update_seq = UpdateSeq
} = QueryArgs,
DocOptions1 = case Conflicts of
true -> [conflicts|DocOptions0];
@@ -97,7 +98,12 @@ go(DbName, Options, QueryArgs, Callback, Acc0) ->
end,
case Resp of
{ok, TotalRows} ->
- {ok, Acc1} = Callback({meta, [{total, TotalRows}]}, Acc0),
+ Meta = case UpdateSeq of
+ false -> [{total, TotalRows}, {offset, null}];
+ true ->
+ [{total, TotalRows}, {offset, null}, {update_seq, null}]
+ end,
+ {ok, Acc1} = Callback({meta, Meta}, Acc0),
{ok, Acc2} = doc_receive_loop(
Keys3, queue:new(), SpawnFun, MaxJobs, Callback, Acc1
),
diff --git a/src/mango/src/mango_cursor.erl b/src/mango/src/mango_cursor.erl
index 98b2d52bd..5108d36b2 100644
--- a/src/mango/src/mango_cursor.erl
+++ b/src/mango/src/mango_cursor.erl
@@ -18,6 +18,7 @@
explain/1,
execute/3,
maybe_filter_indexes_by_ddoc/2,
+ remove_indexes_with_partial_filter_selector/1,
maybe_add_warning/3
]).
@@ -47,16 +48,18 @@
create(Db, Selector0, Opts) ->
Selector = mango_selector:normalize(Selector0),
UsableIndexes = mango_idx:get_usable_indexes(Db, Selector, Opts),
-
- {use_index, IndexSpecified} = proplists:lookup(use_index, Opts),
- case {length(UsableIndexes), length(IndexSpecified)} of
- {0, 1} ->
- ?MANGO_ERROR({no_usable_index, selector_unsupported});
- {0, 0} ->
+ case length(UsableIndexes) of
+ 0 ->
AllDocs = mango_idx:special(Db),
create_cursor(Db, AllDocs, Selector, Opts);
_ ->
- create_cursor(Db, UsableIndexes, Selector, Opts)
+ case mango_cursor:maybe_filter_indexes_by_ddoc(UsableIndexes, Opts) of
+ [] ->
+ % use_index doesn't match a valid index - fall back to a valid one
+ create_cursor(Db, UsableIndexes, Selector, Opts);
+ UserSpecifiedIndex ->
+ create_cursor(Db, UserSpecifiedIndex, Selector, Opts)
+ end
end.
@@ -90,9 +93,7 @@ execute(#cursor{index=Idx}=Cursor, UserFun, UserAcc) ->
maybe_filter_indexes_by_ddoc(Indexes, Opts) ->
case lists:keyfind(use_index, 1, Opts) of
{use_index, []} ->
- % We remove any indexes that have a selector
- % since they are only used when specified via use_index
- remove_indexes_with_partial_filter_selector(Indexes);
+ [];
{use_index, [DesignId]} ->
filter_indexes(Indexes, DesignId);
{use_index, [DesignId, ViewName]} ->
@@ -150,12 +151,53 @@ group_indexes_by_type(Indexes) ->
end, ?CURSOR_MODULES).
-maybe_add_warning(UserFun, #idx{type = IndexType}, UserAcc) ->
- case IndexType of
+maybe_add_warning(UserFun, #cursor{index = Index, opts = Opts}, UserAcc) ->
+ NoIndexWarning = case Index#idx.type of
<<"special">> ->
- Arg = {add_key, warning, <<"no matching index found, create an index to optimize query time">>},
- {_Go, UserAcc0} = UserFun(Arg, UserAcc),
- UserAcc0;
+ <<"no matching index found, create an index to optimize query time">>;
_ ->
- UserAcc
- end. \ No newline at end of file
+ ok
+ end,
+
+ UseIndexInvalidWarning = case lists:keyfind(use_index, 1, Opts) of
+ {use_index, []} ->
+ NoIndexWarning;
+ {use_index, [DesignId]} ->
+ case filter_indexes([Index], DesignId) of
+ [] ->
+ fmt("_design/~s was not used because it does not contain a valid index for this query.",
+ [ddoc_name(DesignId)]);
+ _ ->
+ NoIndexWarning
+ end;
+ {use_index, [DesignId, ViewName]} ->
+ case filter_indexes([Index], DesignId, ViewName) of
+ [] ->
+ fmt("_design/~s, ~s was not used because it is not a valid index for this query.",
+ [ddoc_name(DesignId), ViewName]);
+ _ ->
+ NoIndexWarning
+ end
+ end,
+
+ maybe_add_warning_int(UseIndexInvalidWarning, UserFun, UserAcc).
+
+
+maybe_add_warning_int(ok, _, UserAcc) ->
+ UserAcc;
+
+maybe_add_warning_int(Warning, UserFun, UserAcc) ->
+ Arg = {add_key, warning, Warning},
+ {_Go, UserAcc0} = UserFun(Arg, UserAcc),
+ UserAcc0.
+
+
+fmt(Format, Args) ->
+ iolist_to_binary(io_lib:format(Format, Args)).
+
+
+ddoc_name(<<"_design/", Name/binary>>) ->
+ Name;
+
+ddoc_name(Name) ->
+ Name.
diff --git a/src/mango/src/mango_cursor_special.erl b/src/mango/src/mango_cursor_special.erl
index 78cac7f5d..f4a760d1c 100644
--- a/src/mango/src/mango_cursor_special.erl
+++ b/src/mango/src/mango_cursor_special.erl
@@ -31,10 +31,13 @@
create(Db, Indexes, Selector, Opts) ->
InitialRange = mango_idx_view:field_ranges(Selector),
CatchAll = [{<<"_id">>, {'$gt', null, '$lt', mango_json_max}}],
- FieldRanges = lists:append(CatchAll, InitialRange),
+ % order matters here - we only want to use the catchall index
+ % if no other range can fulfill the query (because we know)
+ % catchall is the most expensive range
+ FieldRanges = InitialRange ++ CatchAll,
Composited = mango_cursor_view:composite_indexes(Indexes, FieldRanges),
{Index, IndexRanges} = mango_cursor_view:choose_best_index(Db, Composited),
-
+
Limit = couch_util:get_value(limit, Opts, mango_opts:default_limit()),
Skip = couch_util:get_value(skip, Opts, 0),
Fields = couch_util:get_value(fields, Opts, all_fields),
diff --git a/src/mango/src/mango_cursor_text.erl b/src/mango/src/mango_cursor_text.erl
index 88abfc00a..3883bc8f2 100644
--- a/src/mango/src/mango_cursor_text.erl
+++ b/src/mango/src/mango_cursor_text.erl
@@ -124,7 +124,7 @@ execute(Cursor, UserFun, UserAcc) ->
Arg = {add_key, bookmark, JsonBM},
{_Go, FinalUserAcc} = UserFun(Arg, LastUserAcc),
FinalUserAcc0 = mango_execution_stats:maybe_add_stats(Opts, UserFun, Stats0, FinalUserAcc),
- FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Idx, FinalUserAcc0),
+ FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Cursor, FinalUserAcc0),
{ok, FinalUserAcc1}
end.
diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl
index 3fcec07be..1e2108b7d 100644
--- a/src/mango/src/mango_cursor_view.erl
+++ b/src/mango/src/mango_cursor_view.erl
@@ -66,7 +66,7 @@ explain(Cursor) ->
{include_docs, Args#mrargs.include_docs},
{view_type, Args#mrargs.view_type},
{reduce, Args#mrargs.reduce},
- {start_key, Args#mrargs.start_key},
+ {start_key, maybe_replace_max_json(Args#mrargs.start_key)},
{end_key, maybe_replace_max_json(Args#mrargs.end_key)},
{direction, Args#mrargs.direction},
{stable, Args#mrargs.stable},
@@ -137,7 +137,7 @@ execute(#cursor{db = Db, index = Idx, execution_stats = Stats} = Cursor0, UserFu
{_Go, FinalUserAcc} = UserFun(Arg, LastCursor#cursor.user_acc),
Stats0 = LastCursor#cursor.execution_stats,
FinalUserAcc0 = mango_execution_stats:maybe_add_stats(Opts, UserFun, Stats0, FinalUserAcc),
- FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Idx, FinalUserAcc0),
+ FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Cursor, FinalUserAcc0),
{ok, FinalUserAcc1};
{error, Reason} ->
{error, Reason}
diff --git a/src/mango/src/mango_error.erl b/src/mango/src/mango_error.erl
index 4c55ef3f6..ad665e2f3 100644
--- a/src/mango/src/mango_error.erl
+++ b/src/mango/src/mango_error.erl
@@ -21,31 +21,12 @@
]).
-info(mango_idx, {no_usable_index, no_indexes_defined}) ->
- {
- 400,
- <<"no_usable_index">>,
- <<"There are no indexes defined in this database.">>
- };
-info(mango_idx, {no_usable_index, no_index_matching_name}) ->
- {
- 400,
- <<"no_usable_index">>,
- <<"No index matches the index specified with \"use_index\"">>
- };
info(mango_idx, {no_usable_index, missing_sort_index}) ->
{
400,
<<"no_usable_index">>,
<<"No index exists for this sort, try indexing by the sort fields.">>
};
-info(mango_cursor, {no_usable_index, selector_unsupported}) ->
- {
- 400,
- <<"no_usable_index">>,
- <<"The index specified with \"use_index\" is not usable for the query.">>
- };
-
info(mango_json_bookmark, {invalid_bookmark, BadBookmark}) ->
{
400,
diff --git a/src/mango/src/mango_idx.erl b/src/mango/src/mango_idx.erl
index 8e19ebff8..ea5949c02 100644
--- a/src/mango/src/mango_idx.erl
+++ b/src/mango/src/mango_idx.erl
@@ -59,20 +59,16 @@ list(Db) ->
get_usable_indexes(Db, Selector, Opts) ->
ExistingIndexes = mango_idx:list(Db),
- if ExistingIndexes /= [] -> ok; true ->
- ?MANGO_ERROR({no_usable_index, no_indexes_defined})
- end,
- FilteredIndexes = mango_cursor:maybe_filter_indexes_by_ddoc(ExistingIndexes, Opts),
- if FilteredIndexes /= [] -> ok; true ->
- ?MANGO_ERROR({no_usable_index, no_index_matching_name})
- end,
+ GlobalIndexes = mango_cursor:remove_indexes_with_partial_filter_selector(ExistingIndexes),
+ UserSpecifiedIndex = mango_cursor:maybe_filter_indexes_by_ddoc(ExistingIndexes, Opts),
+ UsableIndexes0 = lists:usort(GlobalIndexes ++ UserSpecifiedIndex),
SortFields = get_sort_fields(Opts),
UsableFilter = fun(I) -> is_usable(I, Selector, SortFields) end,
- UsableIndexes0 = lists:filter(UsableFilter, FilteredIndexes),
+ UsableIndexes1 = lists:filter(UsableFilter, UsableIndexes0),
- case maybe_filter_by_sort_fields(UsableIndexes0, SortFields) of
+ case maybe_filter_by_sort_fields(UsableIndexes1, SortFields) of
{ok, SortIndexes} ->
SortIndexes;
{error, no_usable_index} ->
diff --git a/src/mango/src/mango_idx_text.erl b/src/mango/src/mango_idx_text.erl
index 1d5ae9bad..29b4441a1 100644
--- a/src/mango/src/mango_idx_text.erl
+++ b/src/mango/src/mango_idx_text.erl
@@ -329,6 +329,9 @@ indexable_fields(Fields, {op_or, Args}) when is_list(Args) ->
indexable_fields(Fields, {op_not, {ExistsQuery, Arg}}) when is_tuple(Arg) ->
Fields0 = indexable_fields(Fields, ExistsQuery),
indexable_fields(Fields0, Arg);
+% forces "$exists" : false to use _all_docs
+indexable_fields(_, {op_not, {_, false}}) ->
+ [];
indexable_fields(Fields, {op_insert, Arg}) when is_binary(Arg) ->
Fields;
diff --git a/src/mango/src/mango_native_proc.erl b/src/mango/src/mango_native_proc.erl
index 61d79b7ec..6150e1d19 100644
--- a/src/mango/src/mango_native_proc.erl
+++ b/src/mango/src/mango_native_proc.erl
@@ -113,6 +113,9 @@ handle_cast(garbage_collect, St) ->
erlang:garbage_collect(),
{noreply, St};
+handle_cast(stop, St) ->
+ {stop, normal, St};
+
handle_cast(Msg, St) ->
{stop, {invalid_cast, Msg}, St}.
@@ -169,19 +172,12 @@ get_text_entries({IdxProps}, Doc) ->
get_index_partial_filter_selector(IdxProps) ->
- case couch_util:get_value(<<"partial_filter_selector">>, IdxProps) of
- undefined ->
+ case couch_util:get_value(<<"partial_filter_selector">>, IdxProps, {[]}) of
+ {[]} ->
% this is to support legacy text indexes that had the partial_filter_selector
% set as selector
- case couch_util:get_value(<<"selector">>, IdxProps, []) of
- [] ->
- {[]};
- Else ->
- Else
- end;
- [] ->
- {[]};
- Else ->
+ couch_util:get_value(<<"selector">>, IdxProps, {[]});
+ Else ->
Else
end.
@@ -363,4 +359,35 @@ validate_index_info(IndexInfo) ->
[invalid_index | Results0]
end
end, [], IdxTypes),
- lists:member(valid_index, Results). \ No newline at end of file
+ lists:member(valid_index, Results).
+
+
+-ifdef(TEST).
+
+-include_lib("eunit/include/eunit.hrl").
+
+handle_garbage_collect_cast_test() ->
+ Pid = self(),
+ {_, TracerRef} = spawn_monitor(fun() ->
+ erlang:trace(Pid, true, [garbage_collection]),
+ receive {trace, Pid, gc_start, _} ->
+ erlang:trace(Pid, false, [garbage_collection]),
+ exit(gc_start)
+ end
+ end),
+ erlang:yield(),
+ ?assertEqual({noreply, []}, handle_cast(garbage_collect, [])),
+ receive
+ {'DOWN', TracerRef, _, _, Msg} -> ?assertEqual(gc_start, Msg)
+ after 1000 ->
+ erlang:error({assertion_failed, [{module, ?MODULE}, {line, ?LINE},
+ {expected, gc_start}, {reason, timeout}]})
+ end.
+
+handle_stop_cast_test() ->
+ ?assertEqual({stop, normal, []}, handle_cast(stop, [])).
+
+handle_invalid_cast_test() ->
+ ?assertEqual({stop, {invalid_cast, random}, []}, handle_cast(random, [])).
+
+-endif.
diff --git a/src/mango/src/mango_selector.erl b/src/mango/src/mango_selector.erl
index 4ff36945a..968dc3c74 100644
--- a/src/mango/src/mango_selector.erl
+++ b/src/mango/src/mango_selector.erl
@@ -578,36 +578,63 @@ match({[_, _ | _] = _Props} = Sel, _Value, _Cmp) ->
% until we match then all or run out of selector to
% match against.
+has_required_fields(Selector, RequiredFields) ->
+ Remainder = has_required_fields_int(Selector, RequiredFields),
+ Remainder == [].
+
% Empty selector
-has_required_fields({[]}, _) ->
- false;
+has_required_fields_int({[]}, Remainder) ->
+ Remainder;
% No more required fields
-has_required_fields(_, []) ->
- true;
+has_required_fields_int(_, []) ->
+ [];
% No more selector
-has_required_fields([], _) ->
- false;
+has_required_fields_int([], Remainder) ->
+ Remainder;
-has_required_fields(Selector, RequiredFields) when not is_list(Selector) ->
- has_required_fields([Selector], RequiredFields);
+has_required_fields_int(Selector, RequiredFields) when not is_list(Selector) ->
+ has_required_fields_int([Selector], RequiredFields);
-% We can "see" through $and operator. We ignore other
-% combination operators because they can't be used to restrict
-% an index.
-has_required_fields([{[{<<"$and">>, Args}]}], RequiredFields)
+% We can "see" through $and operator. Iterate
+% through the list of child operators.
+has_required_fields_int([{[{<<"$and">>, Args}]}], RequiredFields)
+ when is_list(Args) ->
+ has_required_fields_int(Args, RequiredFields);
+
+% We can "see" through $or operator. Required fields
+% must be covered by all children.
+has_required_fields_int([{[{<<"$or">>, Args}]} | Rest], RequiredFields)
+ when is_list(Args) ->
+ Remainder0 = lists:foldl(fun(Arg, Acc) ->
+ % for each child test coverage against the full
+ % set of required fields
+ Remainder = has_required_fields_int(Arg, RequiredFields),
+
+ % collect the remaining fields across all children
+ Acc ++ Remainder
+ end, [], Args),
+
+ % remove duplicate fields
+ Remainder1 = lists:usort(Remainder0),
+ has_required_fields_int(Rest, Remainder1);
+
+% Handle $and operator where it has peers. Required fields
+% can be covered by any child.
+has_required_fields_int([{[{<<"$and">>, Args}]} | Rest], RequiredFields)
when is_list(Args) ->
- has_required_fields(Args, RequiredFields);
+ Remainder = has_required_fields_int(Args, RequiredFields),
+ has_required_fields_int(Rest, Remainder);
-has_required_fields([{[{Field, Cond}]} | Rest], RequiredFields) ->
+has_required_fields_int([{[{Field, Cond}]} | Rest], RequiredFields) ->
case Cond of
% $exists:false is a special case - this is the only operator
% that explicitly does not require a field to exist
{[{<<"$exists">>, false}]} ->
- has_required_fields(Rest, RequiredFields);
+ has_required_fields_int(Rest, RequiredFields);
_ ->
- has_required_fields(Rest, lists:delete(Field, RequiredFields))
+ has_required_fields_int(Rest, lists:delete(Field, RequiredFields))
end.
@@ -651,6 +678,28 @@ has_required_fields_and_true_test() ->
Normalized = normalize(Selector),
?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
+has_required_fields_nested_and_true_test() ->
+ RequiredFields = [<<"A">>, <<"B">>],
+ Selector1 = {[{<<"$and">>,
+ [
+ {[{<<"A">>, <<"foo">>}]}
+ ]
+ }]},
+ Selector2 = {[{<<"$and">>,
+ [
+ {[{<<"B">>, <<"foo">>}]}
+ ]
+ }]},
+ Selector = {[{<<"$and">>,
+ [
+ Selector1,
+ Selector2
+ ]
+ }]},
+
+ Normalized = normalize(Selector),
+ ?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
+
has_required_fields_and_false_test() ->
RequiredFields = [<<"A">>, <<"C">>],
Selector = {[{<<"$and">>,
@@ -662,7 +711,7 @@ has_required_fields_and_false_test() ->
Normalized = normalize(Selector),
?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
-has_required_fields_or_test() ->
+has_required_fields_or_false_test() ->
RequiredFields = [<<"A">>],
Selector = {[{<<"$or">>,
[
@@ -673,4 +722,144 @@ has_required_fields_or_test() ->
Normalized = normalize(Selector),
?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
--endif. \ No newline at end of file
+has_required_fields_or_true_test() ->
+ RequiredFields = [<<"A">>, <<"B">>, <<"C">>],
+ Selector = {[{<<"A">>, "foo"},
+ {<<"$or">>,
+ [
+ {[{<<"B">>, <<"bar">>}]},
+ {[{<<"B">>, <<"baz">>}]}
+ ]
+ },
+ {<<"C">>, "qux"}
+ ]},
+ Normalized = normalize(Selector),
+ ?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
+
+has_required_fields_and_nested_or_true_test() ->
+ RequiredFields = [<<"A">>, <<"B">>],
+ Selector1 = {[{<<"$and">>,
+ [
+ {[{<<"A">>, <<"foo">>}]}
+ ]
+ }]},
+ Selector2 = {[{<<"$or">>,
+ [
+ {[{<<"B">>, <<"foo">>}]},
+ {[{<<"B">>, <<"foo">>}]}
+ ]
+ }]},
+ Selector = {[{<<"$and">>,
+ [
+ Selector1,
+ Selector2
+ ]
+ }]},
+ Normalized = normalize(Selector),
+ ?assertEqual(true, has_required_fields(Normalized, RequiredFields)),
+
+ SelectorReverse = {[{<<"$and">>,
+ [
+ Selector2,
+ Selector1
+ ]
+ }]},
+ NormalizedReverse = normalize(SelectorReverse),
+ ?assertEqual(true, has_required_fields(NormalizedReverse, RequiredFields)).
+
+has_required_fields_and_nested_or_false_test() ->
+ RequiredFields = [<<"A">>, <<"B">>],
+ Selector1 = {[{<<"$and">>,
+ [
+ {[{<<"A">>, <<"foo">>}]}
+ ]
+ }]},
+ Selector2 = {[{<<"$or">>,
+ [
+ {[{<<"A">>, <<"foo">>}]},
+ {[{<<"B">>, <<"foo">>}]}
+ ]
+ }]},
+ Selector = {[{<<"$and">>,
+ [
+ Selector1,
+ Selector2
+ ]
+ }]},
+ Normalized = normalize(Selector),
+ ?assertEqual(false, has_required_fields(Normalized, RequiredFields)),
+
+ SelectorReverse = {[{<<"$and">>,
+ [
+ Selector2,
+ Selector1
+ ]
+ }]},
+
+ NormalizedReverse = normalize(SelectorReverse),
+ ?assertEqual(false, has_required_fields(NormalizedReverse, RequiredFields)).
+
+has_required_fields_or_nested_and_true_test() ->
+ RequiredFields = [<<"A">>],
+ Selector1 = {[{<<"$and">>,
+ [
+ {[{<<"A">>, <<"foo">>}]}
+ ]
+ }]},
+ Selector2 = {[{<<"$and">>,
+ [
+ {[{<<"A">>, <<"foo">>}]}
+ ]
+ }]},
+ Selector = {[{<<"$or">>,
+ [
+ Selector1,
+ Selector2
+ ]
+ }]},
+ Normalized = normalize(Selector),
+ ?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
+
+has_required_fields_or_nested_or_true_test() ->
+ RequiredFields = [<<"A">>],
+ Selector1 = {[{<<"$or">>,
+ [
+ {[{<<"A">>, <<"foo">>}]}
+ ]
+ }]},
+ Selector2 = {[{<<"$or">>,
+ [
+ {[{<<"A">>, <<"bar">>}]}
+ ]
+ }]},
+ Selector = {[{<<"$or">>,
+ [
+ Selector1,
+ Selector2
+ ]
+ }]},
+ Normalized = normalize(Selector),
+ ?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
+
+has_required_fields_or_nested_or_false_test() ->
+ RequiredFields = [<<"A">>],
+ Selector1 = {[{<<"$or">>,
+ [
+ {[{<<"A">>, <<"foo">>}]}
+ ]
+ }]},
+ Selector2 = {[{<<"$or">>,
+ [
+ {[{<<"B">>, <<"bar">>}]}
+ ]
+ }]},
+ Selector = {[{<<"$or">>,
+ [
+ Selector1,
+ Selector2
+ ]
+ }]},
+ Normalized = normalize(Selector),
+ ?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
+
+-endif.
diff --git a/src/mango/test/02-basic-find-test.py b/src/mango/test/02-basic-find-test.py
index 82554a112..f7e151ad8 100644
--- a/src/mango/test/02-basic-find-test.py
+++ b/src/mango/test/02-basic-find-test.py
@@ -159,6 +159,18 @@ class BasicFindTests(mango.UserDocsTests):
assert len(docs) == 1
assert docs[0]["user_id"] == 7
+ def test_multi_cond_duplicate_field(self):
+ # need to explicitly define JSON as dict won't allow duplicate keys
+ body = ("{\"selector\":{\"location.city\":{\"$regex\": \"^L+\"},"
+ "\"location.city\":{\"$exists\":true}}}")
+ r = self.db.sess.post(self.db.path("_find"), data=body)
+ r.raise_for_status()
+ docs = r.json()["docs"]
+
+ # expectation is that only the second instance
+ # of the "location.city" field is used
+ self.assertEqual(len(docs), 15)
+
def test_multi_cond_or(self):
docs = self.db.find({
"$and":[
diff --git a/src/mango/test/03-operator-test.py b/src/mango/test/03-operator-test.py
index 239cc7d41..4650c7e84 100644
--- a/src/mango/test/03-operator-test.py
+++ b/src/mango/test/03-operator-test.py
@@ -264,6 +264,13 @@ class OperatorTextTests(mango.UserDocsTextTests, OperatorTests):
class OperatorAllDocsTests(mango.UserDocsTestsNoIndexes, OperatorTests):
- pass
+ def test_range_id_eq(self):
+ doc_id = "8e1c90c0-ac18-4832-8081-40d14325bde0"
+ r = self.db.find({
+ "_id": doc_id
+ }, explain=True, return_raw=True)
+
+ self.assertEqual(r["mrargs"]["end_key"], doc_id)
+ self.assertEqual(r["mrargs"]["start_key"], doc_id)
diff --git a/src/mango/test/05-index-selection-test.py b/src/mango/test/05-index-selection-test.py
index 05571a7e8..2a40fda38 100644
--- a/src/mango/test/05-index-selection-test.py
+++ b/src/mango/test/05-index-selection-test.py
@@ -28,6 +28,32 @@ class IndexSelectionTests:
}, explain=True)
self.assertEqual(resp["index"]["type"], "json")
+ def test_with_nested_and(self):
+ resp = self.db.find({
+ "name.first": {
+ "$gt": "a",
+ "$lt": "z"
+ },
+ "name.last": "Foo"
+ }, explain=True)
+ self.assertEqual(resp["index"]["type"], "json")
+
+ def test_with_or(self):
+ # index on ["company","manager"]
+ ddocid = "_design/a0c425a60cf3c3c09e3c537c9ef20059dcef9198"
+
+ resp = self.db.find({
+ "company": {
+ "$gt": "a",
+ "$lt": "z"
+ },
+ "$or": [
+ {"manager": "Foo"},
+ {"manager": "Bar"}
+ ]
+ }, explain=True)
+ self.assertEqual(resp["index"]["ddoc"], ddocid)
+
def test_use_most_columns(self):
# ddoc id for the age index
ddocid = "_design/ad3d537c03cd7c6a43cf8dff66ef70ea54c2b40f"
@@ -56,12 +82,8 @@ class IndexSelectionTests:
def test_invalid_use_index(self):
# ddoc id for the age index
ddocid = "_design/ad3d537c03cd7c6a43cf8dff66ef70ea54c2b40f"
- try:
- self.db.find({}, use_index=ddocid)
- except Exception as e:
- self.assertEqual(e.response.status_code, 400)
- else:
- raise AssertionError("bad find")
+ r = self.db.find({}, use_index=ddocid, return_raw=True)
+ self.assertEqual(r["warning"], '{0} was not used because it does not contain a valid index for this query.'.format(ddocid))
def test_uses_index_when_no_range_or_equals(self):
# index on ["manager"] should be valid because
@@ -77,34 +99,79 @@ class IndexSelectionTests:
resp_explain = self.db.find(selector, explain=True)
self.assertEqual(resp_explain["index"]["type"], "json")
-
def test_reject_use_index_invalid_fields(self):
# index on ["company","manager"] which should not be valid
ddocid = "_design/a0c425a60cf3c3c09e3c537c9ef20059dcef9198"
selector = {
"company": "Pharmex"
}
- try:
- self.db.find(selector, use_index=ddocid)
- except Exception as e:
- self.assertEqual(e.response.status_code, 400)
- else:
- raise AssertionError("did not reject bad use_index")
+ r = self.db.find(selector, use_index=ddocid, return_raw=True)
+ self.assertEqual(r["warning"], '{0} was not used because it does not contain a valid index for this query.'.format(ddocid))
+
+ # should still return a correct result
+ for d in r["docs"]:
+ self.assertEqual(d["company"], "Pharmex")
+
+ def test_reject_use_index_ddoc_and_name_invalid_fields(self):
+ # index on ["company","manager"] which should not be valid
+ ddocid = "_design/a0c425a60cf3c3c09e3c537c9ef20059dcef9198"
+ name = "a0c425a60cf3c3c09e3c537c9ef20059dcef9198"
+ selector = {
+ "company": "Pharmex"
+ }
+
+ resp = self.db.find(selector, use_index=[ddocid,name], return_raw=True)
+ self.assertEqual(resp["warning"], "{0}, {1} was not used because it is not a valid index for this query.".format(ddocid, name))
+
+ # should still return a correct result
+ for d in resp["docs"]:
+ self.assertEqual(d["company"], "Pharmex")
def test_reject_use_index_sort_order(self):
# index on ["company","manager"] which should not be valid
+ # and there is no valid fallback (i.e. an index on ["company"])
ddocid = "_design/a0c425a60cf3c3c09e3c537c9ef20059dcef9198"
selector = {
- "company": {"$gt": None},
- "manager": {"$gt": None}
+ "company": {"$gt": None}
}
try:
- self.db.find(selector, use_index=ddocid, sort=[{"manager":"desc"}])
+ self.db.find(selector, use_index=ddocid, sort=[{"company":"desc"}])
except Exception as e:
self.assertEqual(e.response.status_code, 400)
else:
raise AssertionError("did not reject bad use_index")
+ def test_use_index_fallback_if_valid_sort(self):
+ ddocid_valid = "_design/fallbackfoo"
+ ddocid_invalid = "_design/fallbackfoobar"
+ self.db.create_index(fields=["foo"], ddoc=ddocid_invalid)
+ self.db.create_index(fields=["foo", "bar"], ddoc=ddocid_valid)
+ selector = {
+ "foo": {"$gt": None}
+ }
+
+ resp_explain = self.db.find(selector, sort=["foo", "bar"], use_index=ddocid_invalid, explain=True)
+ self.assertEqual(resp_explain["index"]["ddoc"], ddocid_valid)
+
+ resp = self.db.find(selector, sort=["foo", "bar"], use_index=ddocid_invalid, return_raw=True)
+ self.assertEqual(resp["warning"], '{0} was not used because it does not contain a valid index for this query.'.format(ddocid_invalid))
+ self.assertEqual(len(resp["docs"]), 0)
+
+ def test_prefer_use_index_over_optimal_index(self):
+ # index on ["company"] even though index on ["company", "manager"] is better
+ ddocid_preferred = "_design/testsuboptimal"
+ self.db.create_index(fields=["baz"], ddoc=ddocid_preferred)
+ self.db.create_index(fields=["baz", "bar"])
+ selector = {
+ "baz": {"$gt": None},
+ "bar": {"$gt": None}
+ }
+ resp = self.db.find(selector, use_index=ddocid_preferred, return_raw=True)
+ self.assertTrue("warning" not in resp)
+
+ resp_explain = self.db.find(selector, use_index=ddocid_preferred, explain=True)
+ self.assertEqual(resp_explain["index"]["ddoc"], ddocid_preferred)
+
# This doc will not be saved given the new ddoc validation code
# in couch_mrview
def test_manual_bad_view_idx01(self):
@@ -142,6 +209,14 @@ class IndexSelectionTests:
self.db.save_doc(design_doc)
+ def test_explain_sort_reverse(self):
+ selector = {
+ "manager": {"$gt": None}
+ }
+ resp_explain = self.db.find(selector, fields=["manager"], sort=[{"manager":"desc"}], explain=True)
+ self.assertEqual(resp_explain["index"]["type"], "json")
+
+
class JSONIndexSelectionTests(mango.UserDocsTests, IndexSelectionTests):
@classmethod
@@ -181,15 +256,13 @@ class JSONIndexSelectionTests(mango.UserDocsTests, IndexSelectionTests):
@unittest.skipUnless(mango.has_text_service(), "requires text service")
-class TextIndexSelectionTests(mango.UserDocsTests, IndexSelectionTests):
+class TextIndexSelectionTests(mango.UserDocsTests):
@classmethod
def setUpClass(klass):
super(TextIndexSelectionTests, klass).setUpClass()
-
- def setUp(self):
- self.db.recreate()
- user_docs.add_text_indexes(self.db, {})
+ if mango.has_text_service():
+ user_docs.add_text_indexes(klass.db, {})
def test_with_text(self):
resp = self.db.find({
diff --git a/src/mango/test/06-basic-text-test.py b/src/mango/test/06-basic-text-test.py
index c02950c46..3783006ab 100644
--- a/src/mango/test/06-basic-text-test.py
+++ b/src/mango/test/06-basic-text-test.py
@@ -450,14 +450,14 @@ class ElemMatchTests(mango.FriendDocsTextTests):
}
}
docs = self.db.find(q)
- assert len(docs) == 1
- assert docs[0]["bestfriends"] == ["Wolverine", "Cyclops"]
+ self.assertEqual(len(docs), 1)
+ self.assertEqual(docs[0]["bestfriends"], ["Wolverine", "Cyclops"])
q = {"results": {"$elemMatch": {"$gte": 80, "$lt": 85}}}
docs = self.db.find(q)
- assert len(docs) == 1
- assert docs[0]["results"] == [82, 85, 88]
+ self.assertEqual(len(docs), 1)
+ self.assertEqual(docs[0]["results"], [82, 85, 88])
def test_elem_match(self):
q = {"friends": {
@@ -466,9 +466,9 @@ class ElemMatchTests(mango.FriendDocsTextTests):
}
}
docs = self.db.find(q)
- assert len(docs) == 2
+ self.assertEqual(len(docs), 2)
for d in docs:
- assert d["user_id"] in (0, 1)
+ self.assertIn(d["user_id"], (0, 1))
q = {
"friends": {
@@ -479,8 +479,8 @@ class ElemMatchTests(mango.FriendDocsTextTests):
}
}
docs = self.db.find(q)
- assert len(docs) == 1
- assert docs[0]["user_id"] == 4
+ self.assertEqual(len(docs), 1)
+ self.assertEqual(docs[0]["user_id"], 4)
# Check that we can do logic in elemMatch
@@ -490,8 +490,9 @@ class ElemMatchTests(mango.FriendDocsTextTests):
}}
}
docs = self.db.find(q)
- assert len(docs) == 1
- assert docs[0]["user_id"] == 1
+ self.assertEqual(len(docs), 2)
+ for d in docs:
+ self.assertIn(d["user_id"], (1, 15))
q = {
"friends": {
@@ -505,9 +506,9 @@ class ElemMatchTests(mango.FriendDocsTextTests):
}
}
docs = self.db.find(q)
- assert len(docs) == 2
+ self.assertEqual(len(docs), 3)
for d in docs:
- assert d["user_id"] in (1, 4)
+ self.assertIn(d["user_id"], (1, 4, 15))
# Same as last, but using $in
q = {
@@ -519,9 +520,9 @@ class ElemMatchTests(mango.FriendDocsTextTests):
}
}
docs = self.db.find(q)
- assert len(docs) == 2
+ self.assertEqual(len(docs), 3)
for d in docs:
- assert d["user_id"] in (1, 4)
+ self.assertIn(d["user_id"], (1, 4, 15))
q = {
"$and": [{
@@ -564,9 +565,9 @@ class ElemMatchTests(mango.FriendDocsTextTests):
]
}
docs = self.db.find(q)
- assert len(docs) == 3
+ self.assertEqual(len(docs), 3)
for d in docs:
- assert d["user_id"] in (10, 11,12)
+ self.assertIn(d["user_id"], (10, 11,12))
@unittest.skipUnless(mango.has_text_service(), "requires text service")
class AllMatchTests(mango.FriendDocsTextTests):
diff --git a/src/mango/test/07-text-custom-field-list-test.py b/src/mango/test/07-text-custom-field-list-test.py
index a43e33003..9bfe07598 100644
--- a/src/mango/test/07-text-custom-field-list-test.py
+++ b/src/mango/test/07-text-custom-field-list-test.py
@@ -12,6 +12,7 @@
import mango
import unittest
+import user_docs
@unittest.skipUnless(mango.has_text_service(), "requires text service")
@@ -160,3 +161,52 @@ class CustomFieldsTest(mango.UserDocsTextTests):
})
assert len(docs) == 1
assert docs[0]["user_id"] == 10
+
+@unittest.skipUnless(mango.has_text_service(), "requires text service")
+class CustomFieldsExistsTest(mango.UserDocsTextTests):
+
+ FIELDS = [
+ {"name": "exists_field", "type": "string"},
+ {"name": "exists_array.[]", "type": "string"},
+ {"name": "exists_object.should", "type": "string"},
+ {"name": "twitter", "type": "string"}
+ ]
+
+ def test_exists_field(self):
+ docs = self.db.find({"exists_field": {"$exists": True}})
+ self.assertEqual(len(docs), 2)
+ for d in docs:
+ self.assertIn(d["user_id"], (7, 8))
+
+ docs = self.db.find({"exists_field": {"$exists": False}})
+ self.assertEqual(len(docs), len(user_docs.DOCS) - 2)
+ for d in docs:
+ self.assertNotIn(d["user_id"], (7, 8))
+
+ def test_exists_array(self):
+ docs = self.db.find({"exists_array": {"$exists": True}})
+ self.assertEqual(len(docs), 2)
+ for d in docs:
+ self.assertIn(d["user_id"], (9, 10))
+
+ docs = self.db.find({"exists_array": {"$exists": False}})
+ self.assertEqual(len(docs), len(user_docs.DOCS) - 2)
+ for d in docs:
+ self.assertNotIn(d["user_id"], (9, 10))
+
+ def test_exists_object_member(self):
+ docs = self.db.find({"exists_object.should": {"$exists": True}})
+ self.assertEqual(len(docs), 1)
+ self.assertEqual(docs[0]["user_id"], 11)
+
+ docs = self.db.find({"exists_object.should": {"$exists": False}})
+ self.assertEqual(len(docs), len(user_docs.DOCS) - 1)
+ for d in docs:
+ self.assertNotEqual(d["user_id"], 11)
+
+ def test_exists_false_same_as_views(self):
+ docs = self.db.find({
+ "twitter": {"$exists": False}
+ })
+ for d in docs:
+ self.assertNotIn(d["user_id"], (0, 1, 4, 13))
diff --git a/src/mango/test/10-disable-array-length-field-test.py b/src/mango/test/10-disable-array-length-field-test.py
index ce7713b63..6b6d41926 100644
--- a/src/mango/test/10-disable-array-length-field-test.py
+++ b/src/mango/test/10-disable-array-length-field-test.py
@@ -16,7 +16,7 @@ import unittest
@unittest.skipUnless(mango.has_text_service(), "requires text service")
class DisableIndexArrayLengthsTest(mango.UserDocsTextTests):
- def setUp(klass):
+ def setUp(self):
self.db.recreate()
self.db.create_text_index(ddoc="disable_index_array_lengths",
analyzer="keyword",
diff --git a/src/mango/test/16-index-selectors-test.py b/src/mango/test/16-index-selectors-test.py
index 6d771cc4b..a876dc68f 100644
--- a/src/mango/test/16-index-selectors-test.py
+++ b/src/mango/test/16-index-selectors-test.py
@@ -272,7 +272,17 @@ class IndexSelectorJson(mango.DbPerClass):
self.assertEqual(len(docs), 3)
@unittest.skipUnless(mango.has_text_service(), "requires text service")
+ def test_text_old_selector_still_supported_via_api(self):
+ selector = {"location": {"$gte": "FRA"}}
+ self.db.create_text_index(fields=[{"name":"location", "type":"string"}],
+ selector=selector,
+ ddoc="Selected",
+ name="Selected")
+ docs = self.db.find({"location": {"$exists":True}}, use_index='Selected')
+ self.assertEqual(len(docs), 3)
+
+ @unittest.skipUnless(mango.has_text_service(), "requires text service")
def test_text_partial_filter_only_in_return_if_not_default(self):
- self.db.create_text_index(fields=[{"name":"location"}])
+ self.db.create_text_index(fields=[{"name":"location", "type":"string"}])
index = self.db.list_indexes()[1]
self.assertEqual('partial_filter_selector' in index['def'], False)
diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py
index 03cc67c52..9b6b998cd 100644
--- a/src/mango/test/mango.py
+++ b/src/mango/test/mango.py
@@ -81,10 +81,16 @@ class Database(object):
r = self.sess.delete(self.url)
def recreate(self):
- self.delete()
- delay()
+ r = self.sess.get(self.url)
+ if r.status_code == 200:
+ db_info = r.json()
+ docs = db_info["doc_count"] + db_info["doc_del_count"]
+ if docs == 0:
+ # db never used - create unnecessary
+ return
+ self.delete()
self.create()
- delay()
+ self.recreate()
def save_doc(self, doc):
self.save_docs([doc])
@@ -126,14 +132,20 @@ class Database(object):
body["index"]["partial_filter_selector"] = partial_filter_selector
body = json.dumps(body)
r = self.sess.post(self.path("_index"), data=body)
- delay()
r.raise_for_status()
assert r.json()["id"] is not None
assert r.json()["name"] is not None
- return r.json()["result"] == "created"
+
+ created = r.json()["result"] == "created"
+ if created:
+ # wait until the database reports the index as available
+ while len(self.get_index(r.json()["id"], r.json()["name"])) < 1:
+ delay(t=0.1)
+
+ return created
def create_text_index(self, analyzer=None, idx_type="text",
- partial_filter_selector=None, default_field=None, fields=None,
+ partial_filter_selector=None, selector=None, default_field=None, fields=None,
name=None, ddoc=None,index_array_lengths=None):
body = {
"index": {
@@ -149,6 +161,8 @@ class Database(object):
body["index"]["default_field"] = default_field
if index_array_lengths is not None:
body["index"]["index_array_lengths"] = index_array_lengths
+ if selector is not None:
+ body["index"]["selector"] = selector
if partial_filter_selector is not None:
body["index"]["partial_filter_selector"] = partial_filter_selector
if fields is not None:
@@ -157,7 +171,6 @@ class Database(object):
body["ddoc"] = ddoc
body = json.dumps(body)
r = self.sess.post(self.path("_index"), data=body)
- delay()
r.raise_for_status()
return r.json()["result"] == "created"
@@ -169,13 +182,28 @@ class Database(object):
r = self.sess.get(self.path("_index?"+limit+";"+skip))
r.raise_for_status()
return r.json()["indexes"]
+
+ def get_index(self, ddocid, name):
+ if ddocid is None:
+ return [i for i in self.list_indexes() if i["name"] == name]
+
+ ddocid = ddocid.replace("%2F", "/")
+ if not ddocid.startswith("_design/"):
+ ddocid = "_design/" + ddocid
+
+ if name is None:
+ return [i for i in self.list_indexes() if i["ddoc"] == ddocid]
+ else:
+ return [i for i in self.list_indexes() if i["ddoc"] == ddocid and i["name"] == name]
def delete_index(self, ddocid, name, idx_type="json"):
path = ["_index", ddocid, idx_type, name]
r = self.sess.delete(self.path(path), params={"w": "3"})
- delay()
r.raise_for_status()
+ while len(self.get_index(ddocid, name)) == 1:
+ delay(t=0.1)
+
def bulk_delete(self, docs):
body = {
"docids" : docs,
@@ -183,7 +211,6 @@ class Database(object):
}
body = json.dumps(body)
r = self.sess.post(self.path("_index/_bulk_delete"), data=body)
- delay(n=10)
return r.json()
def find(self, selector, limit=25, skip=0, sort=None, fields=None,
@@ -245,7 +272,7 @@ class DbPerClass(unittest.TestCase):
@classmethod
def setUpClass(klass):
klass.db = Database(random_db_name())
- klass.db.create(q=1, n=3)
+ klass.db.create(q=1, n=1)
def setUp(self):
self.db = self.__class__.db
diff --git a/src/rexi/rebar.config b/src/rexi/rebar.config
new file mode 100644
index 000000000..e0d18443b
--- /dev/null
+++ b/src/rexi/rebar.config
@@ -0,0 +1,2 @@
+{cover_enabled, true}.
+{cover_print_enabled, true}.
diff --git a/test/build/test-run-couch-for-mango.sh b/test/build/test-run-couch-for-mango.sh
index 0597a8fca..472b19bd0 100755
--- a/test/build/test-run-couch-for-mango.sh
+++ b/test/build/test-run-couch-for-mango.sh
@@ -24,6 +24,9 @@ while ( [ $COUCH_STARTED -ne 0 ] ); do
fi
done
+# wait for cluster setup to complete
+sleep 5
+
cd src/mango/
nosetests
diff --git a/test/javascript/run b/test/javascript/run
index c611be51e..8ae424467 100755
--- a/test/javascript/run
+++ b/test/javascript/run
@@ -107,7 +107,10 @@ def options():
dest="ignore", help="Ignore test suites"),
op.make_option("-u", "--suites", type="string", action="callback",
default=None, callback=get_delimited_list,
- dest="suites", help="Run specific suites")
+ dest="suites", help="Run specific suites"),
+ op.make_option("-p", "--path", type="string",
+ default="test/javascript/tests",
+ dest="test_path", help="Path where the tests are located")
]
@@ -118,10 +121,9 @@ def main():
run_list = []
ignore_list = []
tests = []
-
- run_list = ["test/javascript/tests"] if not opts.suites else opts.suites
- run_list = build_test_case_paths(run_list)
- ignore_list = build_test_case_paths(opts.ignore)
+ run_list = [opts.test_path] if not opts.suites else opts.suites
+ run_list = build_test_case_paths(opts.test_path,run_list)
+ ignore_list = build_test_case_paths(opts.test_path,opts.ignore)
# sort is needed because certain tests fail if executed out of order
tests = sorted(list(set(run_list)-set(ignore_list)))
@@ -151,7 +153,7 @@ def main():
failed, passed) + os.linesep)
exit(failed > 0)
-def build_test_case_paths(args=None):
+def build_test_case_paths(path,args=None):
tests = []
if args is None:
args = []
@@ -161,7 +163,7 @@ def build_test_case_paths(args=None):
elif os.path.isfile(name):
check = tests.append(name)
else:
- pname = os.path.join("test/javascript/tests", name)
+ pname = os.path.join(path, name)
if os.path.isfile(pname):
tests.append(pname)
elif os.path.isfile(pname + ".js"):
diff --git a/test/javascript/tests-cluster/with-quorum/db-creation.js b/test/javascript/tests-cluster/with-quorum/db-creation.js
new file mode 100644
index 000000000..f8efd6e68
--- /dev/null
+++ b/test/javascript/tests-cluster/with-quorum/db-creation.js
@@ -0,0 +1,27 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do DB creation under cluster with quorum conditions.
+couchTests.db_creation = function(debug) {
+
+ if (debug) debugger;
+
+ var db_name = get_random_db_name()
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+
+ // DB Creation should return 201 - Created
+ xhr = CouchDB.request("PUT", "/" + db_name + "/");
+ T(xhr.status == 201);
+
+ // cleanup
+ db.deleteDb();
+};
diff --git a/test/javascript/tests-cluster/without-quorum/db-creation.js b/test/javascript/tests-cluster/without-quorum/db-creation.js
new file mode 100644
index 000000000..0d8ff8367
--- /dev/null
+++ b/test/javascript/tests-cluster/without-quorum/db-creation.js
@@ -0,0 +1,28 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Do DB creation under cluster without quorum conditions.
+couchTests.db_creation = function(debug) {
+
+ if (debug) debugger;
+
+ var db_name = get_random_db_name()
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+
+ // DB Creation should return 202- Accepted
+ xhr = CouchDB.request("PUT", "/" + db_name + "/");
+ T(xhr.status == 202);
+
+ // cleanup
+ // TODO DB deletions fails if the quorum is not met.
+ xhr = CouchDB.request("DELETE", "/" + db_name + "/");
+};
diff --git a/test/javascript/tests/design_docs_query.js b/test/javascript/tests/design_docs_query.js
new file mode 100644
index 000000000..8fc8da5f8
--- /dev/null
+++ b/test/javascript/tests/design_docs_query.js
@@ -0,0 +1,154 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+couchTests.design_docs_query = function(debug) {
+ var db_name = get_random_db_name();
+ var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"});
+ db.createDb();
+ if (debug) debugger;
+
+ var docs = makeDocs(5);
+
+ // create the docs
+ var results = db.bulkSave(docs);
+ T(results.length == 5);
+ for (var i = 0; i < 5; i++) {
+ T(results[i].id == docs[i]._id);
+ }
+
+ // create the ddocs
+ for (var i = 0; i < 5; i++) {
+ T(db.save({
+ _id : "_design/ddoc0" + (i+1).toString(),
+ views : {
+ "testing" : {
+ "map" : "function(){emit(1,1)}"
+ }
+ }
+ }).ok);
+ }
+
+ // test design_docs
+ var path = "/" + db_name + "/_design_docs?";
+ var xhr_AllDDocs = CouchDB.request("GET", path);
+ T(xhr_AllDDocs.status == 200, "standard get should be 200");
+ var allDDocs = JSON.parse(xhr_AllDDocs.responseText);
+ TEquals(10, allDDocs.total_rows, "total_rows mismatch");
+ TEquals(5, allDDocs.rows.length, "amount of rows mismatch");
+
+ // test key="_design/ddoc03"
+ var xhr = CouchDB.request("GET", path + "key=\"_design/ddoc03\"");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(1, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[0].key, "key test");
+
+ // test descending=true
+ var xhr = CouchDB.request("GET", path + "descending=true");
+ T(xhr.status == 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(5, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc05", result.rows[0].key, "descending test");
+
+ // test descending=false
+ var xhr = CouchDB.request("GET", path + "descending=false");
+ T(xhr.status == 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(5, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc01", result.rows[0].key, "descending test");
+
+ // test end_key="_design/ddoc03"
+ var xhr = CouchDB.request("GET", path + "end_key=\"_design/ddoc03\"");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(3, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[2].key, "end_key test");
+
+ // test endkey="_design/ddoc03"
+ var xhr = CouchDB.request("GET", path + "endkey=\"_design/ddoc03\"");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(3, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[2].key, "endkey test");
+
+ // test start_key="_design/ddoc03"
+ var xhr = CouchDB.request("GET", path + "start_key=\"_design/ddoc03\"");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(3, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[0].key, "start_key test");
+
+ // test startkey="_design/ddoc03"
+ var xhr = CouchDB.request("GET", path + "startkey=\"_design/ddoc03\"");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(3, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[0].key, "startkey test");
+
+ // test end_key="_design/ddoc03"&inclusive_end=true
+ var xhr = CouchDB.request("GET", path + "end_key=\"_design/ddoc03\"&inclusive_end=true");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(3, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[2].key, "end_key and inclusive_end test");
+
+ // test end_key="_design/ddoc03"&inclusive_end=false
+ var xhr = CouchDB.request("GET", path + "end_key=\"_design/ddoc03\"&inclusive_end=false");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(2, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc02", result.rows[1].key, "end_key and inclusive_end test");
+
+ // test end_key="_design/ddoc03"&inclusive_end=false&descending=true
+ var xhr = CouchDB.request("GET", path +
+ "end_key=\"_design/ddoc03\"&inclusive_end=false&descending=true");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(2, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc04", result.rows[1].key, "end_key, inclusive_end and descending test");
+
+ // test end_key="_design/ddoc05"&limit=2
+ var xhr = CouchDB.request("GET", path +
+ "end_key=\"_design/ddoc05\"&limit=2");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(2, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc02", result.rows[1].key, "end_key and limit test");
+
+ // test end_key="_design/ddoc05"&skip=2
+ var xhr = CouchDB.request("GET", path +
+ "end_key=\"_design/ddoc05\"&skip=2");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(3, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[0].key, "end_key and skip test");
+ TEquals("_design/ddoc05", result.rows[2].key, "end_key and skip test");
+
+ // test end_key="_design/ddoc05"&update_seq=true
+ var xhr = CouchDB.request("GET", path +
+ "end_key=\"_design/ddoc05\"&update_seq=true");
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ T(result.update_seq);
+
+ // test POST with keys
+ var xhr = CouchDB.request("POST", path, {
+ headers: {"Content-Type": "application/json"},
+ body: JSON.stringify({"keys" : ["_design/ddoc02", "_design/ddoc03"]})
+ });
+ T(xhr.status = 200, "standard get should be 200");
+ var result = JSON.parse(xhr.responseText);
+ TEquals(2, result.rows.length, "amount of rows mismatch");
+ TEquals("_design/ddoc03", result.rows[1].key, "POST test");
+
+ db.deleteDb();
+};
diff --git a/test/javascript/tests/view_errors.js b/test/javascript/tests/view_errors.js
index dd60292a3..f135b749a 100644
--- a/test/javascript/tests/view_errors.js
+++ b/test/javascript/tests/view_errors.js
@@ -169,7 +169,7 @@ couchTests.view_errors = function(debug) {
T(xhr.status == 400);
result = JSON.parse(xhr.responseText);
T(result.error == "bad_request");
- T(result.reason == "`keys` member must be a array.");
+ T(result.reason == "`keys` member must be an array.");
// if the reduce grows to fast, throw an overflow error
var path = "/" + db_name + "/_design/testbig/_view/reduce_too_big";