summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoan Touzet <wohali@users.noreply.github.com>2019-01-18 13:36:59 -0500
committerGitHub <noreply@github.com>2019-01-18 13:36:59 -0500
commite0ce19b80cbe0dc90fc1c222c2b29853d5d6944e (patch)
treec240c088883c1d811a0db2205e39464c75a32989
parent85fbe71cdc9e3e7e5aef25aba61c3d7402bfa2b2 (diff)
parent0f833fd0c20ba0227028776c398ccd8cd13b93b3 (diff)
downloadcouchdb-add-shard-sync-api.tar.gz
Merge branch 'master' into add-shard-sync-apiadd-shard-sync-api
-rw-r--r--.travis.yml2
-rw-r--r--Jenkinsfile6
-rw-r--r--LICENSE2
-rw-r--r--Makefile20
-rw-r--r--Makefile.win21
-rw-r--r--NOTICE2
-rw-r--r--rebar.config.script6
-rw-r--r--rel/overlay/etc/vm.args15
-rw-r--r--src/chttpd/.travis.yml23
-rw-r--r--src/chttpd/src/chttpd.app.src13
-rw-r--r--src/chttpd/src/chttpd_sup.erl2
-rw-r--r--src/chttpd/test/chttpd_purge_tests.erl35
-rw-r--r--src/couch/.travis.yml23
-rw-r--r--src/couch/include/couch_db.hrl1
-rw-r--r--src/couch/rebar.config.script4
-rw-r--r--src/couch/src/couch.erl6
-rw-r--r--src/couch/src/couch_btree.erl7
-rw-r--r--src/couch/src/couch_compaction_daemon.erl10
-rw-r--r--src/couch/src/couch_db.erl10
-rw-r--r--src/couch/src/couch_db_plugin.erl12
-rw-r--r--src/couch/src/couch_debug.erl2
-rw-r--r--src/couch/src/couch_rand.erl30
-rw-r--r--src/couch/src/couch_server.erl28
-rw-r--r--src/couch/src/couch_users_db.erl4
-rw-r--r--src/couch/src/couch_util.erl9
-rw-r--r--src/couch/src/couch_uuids.erl2
-rw-r--r--src/couch/test/couch_changes_tests.erl176
-rw-r--r--src/couch/test/couch_db_plugin_tests.erl17
-rw-r--r--src/couch/test/couch_doc_json_tests.erl2
-rw-r--r--src/couch/test/couch_file_tests.erl2
-rw-r--r--src/couch/test/couch_flags_tests.erl16
-rw-r--r--src/couch/test/couch_key_tree_prop_tests.erl1
-rw-r--r--src/couch/test/couch_util_tests.erl2
-rw-r--r--src/couch/test/couchdb_mrview_tests.erl4
-rw-r--r--src/couch/test/couchdb_vhosts_tests.erl4
-rw-r--r--src/couch/test/json_stream_parse_tests.erl2
-rw-r--r--src/couch_epi/.travis.yml34
-rw-r--r--src/couch_epi/src/couch_epi.erl3
-rw-r--r--src/couch_epi/src/couch_epi_sup.erl107
-rw-r--r--src/couch_epi/test/couch_epi_basic_test.erl137
-rw-r--r--src/couch_index/.travis.yml43
-rw-r--r--src/couch_index/src/couch_index.app.src4
-rw-r--r--src/couch_log/.travis.yml43
-rw-r--r--src/couch_log/src/couch_log_writer_file.erl4
-rw-r--r--src/couch_log/src/couch_log_writer_syslog.erl5
-rw-r--r--src/couch_log/test/couch_log_test_util.erl11
-rw-r--r--src/couch_mrview/.travis.yml23
-rw-r--r--src/couch_mrview/src/couch_mrview.app.src10
-rw-r--r--src/couch_mrview/src/couch_mrview_test_util.erl1
-rw-r--r--src/couch_mrview/test/couch_mrview_index_changes_tests.erl4
-rw-r--r--src/couch_pse_tests/src/cpse_test_attachments.erl1
-rw-r--r--src/couch_pse_tests/src/cpse_test_compaction.erl1
-rw-r--r--src/couch_pse_tests/src/cpse_test_fold_changes.erl17
-rw-r--r--src/couch_pse_tests/src/cpse_test_fold_docs.erl1
-rw-r--r--src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl1
-rw-r--r--src/couch_pse_tests/src/cpse_test_get_set_props.erl1
-rw-r--r--src/couch_pse_tests/src/cpse_test_open_close_delete.erl1
-rw-r--r--src/couch_pse_tests/src/cpse_test_purge_docs.erl1
-rw-r--r--src/couch_pse_tests/src/cpse_test_purge_seqs.erl11
-rw-r--r--src/couch_pse_tests/src/cpse_test_read_write_docs.erl1
-rw-r--r--src/couch_pse_tests/src/cpse_test_ref_counting.erl1
-rw-r--r--src/couch_pse_tests/src/cpse_util.erl4
-rw-r--r--src/couch_replicator/.travis.yml44
-rw-r--r--src/couch_replicator/src/couch_replicator_docs.erl8
-rw-r--r--src/couch_replicator/src/couch_replicator_fabric.erl4
-rw-r--r--src/couch_replicator/src/couch_replicator_manager.erl29
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler_job.erl4
-rw-r--r--src/couch_replicator/test/couch_replicator_compact_tests.erl2
-rw-r--r--src/couch_replicator/test/couch_replicator_connection_tests.erl2
-rw-r--r--src/couch_replicator/test/couch_replicator_modules_load_tests.erl45
-rw-r--r--src/couch_tests/test/couch_tests_app_tests.erl2
-rw-r--r--src/ddoc_cache/test/ddoc_cache_tutil.erl11
-rw-r--r--src/fabric/.travis.yml23
-rw-r--r--src/fabric/src/fabric.app.src23
-rw-r--r--src/fabric/src/fabric_dict.erl1
-rw-r--r--src/fabric/src/fabric_doc_open.erl4
-rw-r--r--src/fabric/src/fabric_doc_open_revs.erl118
-rw-r--r--src/fabric/src/fabric_doc_purge.erl2
-rw-r--r--src/fabric/src/fabric_doc_update.erl6
-rw-r--r--src/fabric/src/fabric_streams.erl251
-rw-r--r--src/fabric/src/fabric_util.erl88
-rw-r--r--src/fabric/src/fabric_view_all_docs.erl4
-rw-r--r--src/fabric/src/fabric_view_changes.erl4
-rw-r--r--src/fabric/src/fabric_view_map.erl4
-rw-r--r--src/fabric/src/fabric_view_reduce.erl4
-rw-r--r--src/global_changes/.travis.yml23
-rw-r--r--src/mango/.travis.yml29
-rw-r--r--src/mem3/src/mem3.app.src14
-rw-r--r--src/mem3/src/mem3.erl14
-rw-r--r--src/mem3/src/mem3_rep.erl1
-rw-r--r--src/mem3/src/mem3_sync_event_listener.erl2
-rw-r--r--src/mem3/test/mem3_seeds_test.erl13
-rw-r--r--src/rexi/src/rexi.app.src10
-rw-r--r--test/elixir/.credo.exs7
-rw-r--r--test/elixir/lib/couch/db_test.ex8
-rwxr-xr-xtest/elixir/run2
-rw-r--r--test/elixir/run.cmd2
-rw-r--r--test/elixir/test/all_docs_test.exs40
-rw-r--r--test/elixir/test/cluster_with_quorum_test.exs185
-rw-r--r--test/elixir/test/cluster_without_quorum_test.exs184
-rw-r--r--test/elixir/test/rewrite_test.exs6
-rw-r--r--test/elixir/test/security_validation_test.exs6
-rw-r--r--test/javascript/test_setup.js4
103 files changed, 1252 insertions, 972 deletions
diff --git a/.travis.yml b/.travis.yml
index c4769d632..45ff9edd2 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -6,8 +6,6 @@ otp_release:
- 21.1
- 20.3
- 19.3
- - 18.3
- - 17.5
addons:
apt:
diff --git a/Jenkinsfile b/Jenkinsfile
index 46fb7238c..b7933436d 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -38,14 +38,14 @@ pipeline {
}
}
steps {
- // This image has the oldest Erlang we support, 16B03
- sh 'docker pull couchdbdev/debian-jessie-erlang-17.5.3:latest'
+ // This image has the oldest Erlang we support, 19
+ sh 'docker pull couchdbdev/debian-stretch-erlang-19.3.6'
timeout(time: 15, unit: "MINUTES") {
// https://github.com/jenkins-infra/jenkins.io/blob/master/Jenkinsfile#64
// We need the jenkins user mapped inside of the image
// npm config cache below is required because /home/jenkins doesn't
// ACTUALLY exist in the image
- withDockerContainer(image: 'couchdbdev/debian-jessie-erlang-17.5.3', args: '-e npm_config_cache=npm-cache -e HOME=. -v=/etc/passwd:/etc/passwd -v /etc/group:/etc/group') {
+ withDockerContainer(image: 'couchdbdev/debian-stretch-erlang-19.3.6', args: '-e npm_config_cache=npm-cache -e HOME=. -v=/etc/passwd:/etc/passwd -v /etc/group:/etc/group') {
sh '''
set
rm -rf apache-couchdb-*
diff --git a/LICENSE b/LICENSE
index 83a1aa7df..78d943c8a 100644
--- a/LICENSE
+++ b/LICENSE
@@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
- Copyright [yyyy] [name of copyright owner]
+ Copyright 2019 The Apache Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/Makefile b/Makefile
index 5af99740d..c6cdcc350 100644
--- a/Makefile
+++ b/Makefile
@@ -59,6 +59,11 @@ COUCHDB_VERSION = $(RELTAG)$(DIRTY)
endif
endif
+# needed to do text substitutions
+comma:= ,
+empty:=
+space:= $(empty) $(empty)
+
DESTDIR=
# Rebar options
@@ -80,6 +85,7 @@ DIALYZE_OPTS=$(shell echo "\
apps=$(apps) \
skip_deps=$(skip_deps) \
" | sed -e 's/[a-z]\{1,\}= / /g')
+EXUNIT_OPTS=$(subst $(comma),$(space),$(tests))
#ignore javascript tests
ignore_js_suites=
@@ -195,7 +201,19 @@ python-black-update: .venv/bin/black
.PHONY: elixir
elixir: elixir-check-formatted elixir-credo devclean
- @dev/run -a adm:pass --no-eval test/elixir/run
+ @dev/run -a adm:pass --no-eval 'test/elixir/run --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
+
+.PHONY: elixir-cluster-without-quorum
+elixir-cluster-without-quorum: elixir-check-formatted elixir-credo devclean
+ @dev/run -n 3 -q -a adm:pass \
+ --degrade-cluster 2 \
+ --no-eval 'test/elixir/run --only without_quorum_test $(EXUNIT_OPTS)'
+
+.PHONY: elixir-cluster-with-quorum
+elixir-cluster-with-quorum: elixir-check-formatted elixir-credo devclean
+ @dev/run -n 3 -q -a adm:pass \
+ --degrade-cluster 1 \
+ --no-eval 'test/elixir/run --only with_quorum_test $(EXUNIT_OPTS)'
.PHONY: elixir-check-formatted
elixir-check-formatted:
diff --git a/Makefile.win b/Makefile.win
index a42ec7027..fd1ab9b86 100644
--- a/Makefile.win
+++ b/Makefile.win
@@ -60,6 +60,11 @@ COUCHDB_VERSION = $(RELTAG)$(DIRTY)
endif
endif
+# needed to do text substitutions
+comma:= ,
+empty:=
+space:= $(empty) $(empty)
+
DESTDIR=
# Rebar options
@@ -72,6 +77,8 @@ tests=
EUNIT_OPTS=skip_deps=$(skip_deps)
DIALYZE_OPTS=skip_deps=$(skip_deps)
+EXUNIT_OPTS=$(subst $(comma),$(space),$(tests))
+
################################################################################
# Main commands
################################################################################
@@ -166,7 +173,19 @@ python-black-update: .venv/bin/black
.PHONY: elixir
elixir: elixir-check-formatted elixir-credo devclean
- @dev\run -a adm:pass --no-eval test\elixir\run.cmd
+ @dev\run -a adm:pass --no-eval 'test\elixir\run.cmd --exclude without_quorum_test --exclude with_quorum_test $(EXUNIT_OPTS)'
+
+.PHONY: elixir-cluster-without-quorum
+elixir-cluster-without-quorum: elixir-check-formatted elixir-credo devclean
+ @dev\run -n 3 -q -a adm:pass \
+ --degrade-cluster 2 \
+ --no-eval 'test\elixir\run --only without_quorum_test $(EXUNIT_OPTS)'
+
+.PHONY: elixir-cluster-with-quorum
+elixir-cluster-with-quorum: elixir-check-formatted elixir-credo devclean
+ @dev\run -n 3 -q -a adm:pass \
+ --degrade-cluster 1 \
+ --no-eval 'test\elixir\run --only with_quorum_test $(EXUNIT_OPTS)'
.PHONY: elixir-check-formatted
elixir-check-formatted:
diff --git a/NOTICE b/NOTICE
index f703af216..a753b8e6b 100644
--- a/NOTICE
+++ b/NOTICE
@@ -1,5 +1,5 @@
Apache CouchDB
-Copyright 2009-2016 The Apache Software Foundation
+Copyright 2009-2019 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
diff --git a/rebar.config.script b/rebar.config.script
index 5f17c29e0..019e6f182 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -27,12 +27,13 @@ SubDirs = [
"src/couch_log",
"src/chttpd",
"src/couch",
+ "src/couch_event",
+ "src/mem3",
"src/couch_index",
"src/couch_mrview",
"src/couch_replicator",
"src/couch_plugins",
"src/couch_pse_tests",
- "src/couch_event",
"src/couch_stats",
"src/couch_peruser",
"src/couch_tests",
@@ -40,7 +41,6 @@ SubDirs = [
"src/fabric",
"src/global_changes",
"src/mango",
- "src/mem3",
"src/rexi",
"src/setup",
"rel"
@@ -91,7 +91,7 @@ ErlOpts = case os:getenv("ERL_OPTS") of
end,
AddConfig = [
- {require_otp_vsn, "17|18|19|20|21"},
+ {require_otp_vsn, "19|20|21"},
{deps_dir, "src"},
{deps, lists:map(MakeDep, DepDescs)},
{sub_dirs, SubDirs},
diff --git a/rel/overlay/etc/vm.args b/rel/overlay/etc/vm.args
index e9f0737d0..86b51f89e 100644
--- a/rel/overlay/etc/vm.args
+++ b/rel/overlay/etc/vm.args
@@ -11,18 +11,27 @@
# the License.
# Each node in the system must have a unique name. These are specified through
-# the Erlang -name flag, which takes the form nodename@hostname. CouchDB
-# recommends the following values for this flag:
+# the Erlang -name flag, which takes the form:
+#
+# -name nodename@<FQDN>
+#
+# or
+#
+# -name nodename@<IP-ADDRESS>
+#
+# CouchDB recommends the following values for this flag:
#
# 1. If this is a single node, not in a cluster, use:
# -name couchdb@127.0.0.1
+#
# 2. If DNS is configured for this host, use the FQDN, such as:
# -name couchdb@my.host.domain.com
+#
# 3. If DNS isn't configured for this host, use IP addresses only, such as:
# -name couchdb@192.168.0.1
#
# Do not rely on tricks with /etc/hosts or libresolv to handle anything
-# other than the above 3 approaches correctly.
+# other than the above 3 approaches correctly. They will not work reliably.
#
# Multiple CouchDBs running on the same machine can use couchdb1@, couchdb2@,
# etc.
diff --git a/src/chttpd/.travis.yml b/src/chttpd/.travis.yml
deleted file mode 100644
index afe172124..000000000
--- a/src/chttpd/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-language: erlang
-
-otp_release:
- - 18.0
- - 17.5
- - R16B03-1
-
-before_install:
- - sudo apt-get update -qq
- - sudo apt-get -y install libmozjs-dev
- - git clone https://github.com/apache/couchdb
-
-before_script:
- - cd couchdb
- - ./configure --disable-docs --disable-fauxton
- - cp -r ../!(couchdb) ./src/chttpd
- - make
-
-script:
- - ./bin/rebar setup_eunit
- - BUILDDIR=`pwd` ./bin/rebar -r eunit apps=chttpd
-
-cache: apt
diff --git a/src/chttpd/src/chttpd.app.src b/src/chttpd/src/chttpd.app.src
index 55619940e..3526745df 100644
--- a/src/chttpd/src/chttpd.app.src
+++ b/src/chttpd/src/chttpd.app.src
@@ -13,19 +13,6 @@
{application, chttpd, [
{description, "HTTP interface for CouchDB cluster"},
{vsn, git},
- {modules, [
- chttpd,
- chttpd_app,
- chttpd_auth_cache,
- chttpd_config_listener,
- chttpd_db,
- chttpd_external,
- chttpd_misc,
- chttpd_rewrite,
- chttpd_show,
- chttpd_sup,
- chttpd_view
- ]},
{registered, [
chttpd_sup,
chttpd,
diff --git a/src/chttpd/src/chttpd_sup.erl b/src/chttpd/src/chttpd_sup.erl
index 369248ea6..d4bdb118c 100644
--- a/src/chttpd/src/chttpd_sup.erl
+++ b/src/chttpd/src/chttpd_sup.erl
@@ -88,7 +88,7 @@ lru_opts() ->
append_if_set({Key, Value}, Opts) when Value > 0 ->
[{Key, Value} | Opts];
-append_if_set({Key, 0}, Opts) ->
+append_if_set({_Key, 0}, Opts) ->
Opts;
append_if_set({Key, Value}, Opts) ->
couch_log:error(
diff --git a/src/chttpd/test/chttpd_purge_tests.erl b/src/chttpd/test/chttpd_purge_tests.erl
index b3acb0668..dbd73de1f 100644
--- a/src/chttpd/test/chttpd_purge_tests.erl
+++ b/src/chttpd/test/chttpd_purge_tests.erl
@@ -52,6 +52,11 @@ create_doc(Url, Id, Content) ->
[?CONTENT_JSON, ?AUTH], "{\"mr\": \"" ++ Content ++ "\"}").
+create_docs(Url, Docs) ->
+ test_request:post(Url ++ "/_bulk_docs",
+ [?CONTENT_JSON, ?AUTH], ?JSON_ENCODE({[{docs, Docs}]})).
+
+
delete_db(Url) ->
{ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
@@ -70,6 +75,7 @@ purge_test_() ->
[
fun test_empty_purge_request/1,
fun test_ok_purge_request/1,
+ fun test_ok_purge_request_with_101_docid/1,
fun test_accepted_purge_request/1,
fun test_partial_purge_request/1,
fun test_mixed_purge_request/1,
@@ -137,6 +143,35 @@ test_ok_purge_request(Url) ->
end).
+test_ok_purge_request_with_101_docid(Url) ->
+ ?_test(begin
+ PurgedDocsNum = 101,
+ Docs = lists:foldl(fun(I, Acc) ->
+ Id = list_to_binary(integer_to_list(I)),
+ Doc = {[{<<"_id">>, Id}, {value, I}]},
+ [Doc | Acc]
+ end, [], lists:seq(1, PurgedDocsNum)),
+
+ {ok, _, _, Body} = create_docs(Url, Docs),
+ BodyJson = ?JSON_DECODE(Body),
+
+ PurgeBody = lists:map(fun({DocResp}) ->
+ Id = couch_util:get_value(<<"id">>, DocResp, undefined),
+ Rev = couch_util:get_value(<<"rev">>, DocResp, undefined),
+ {Id, [Rev]}
+ end, BodyJson),
+
+ ok = config:set("purge", "max_document_id_number", "101"),
+ try
+ {ok, Status, _, _} = test_request:post(Url ++ "/_purge/",
+ [?CONTENT_JSON, ?AUTH], ?JSON_ENCODE({PurgeBody})),
+ ?assert(Status =:= 201 orelse Status =:= 202)
+ after
+ ok = config:delete("purge", "max_document_id_number")
+ end
+ end).
+
+
test_accepted_purge_request(Url) ->
?_test(begin
{ok, _, _, Body} = create_doc(Url, "doc1"),
diff --git a/src/couch/.travis.yml b/src/couch/.travis.yml
deleted file mode 100644
index c06d1b130..000000000
--- a/src/couch/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-language: erlang
-
-otp_release:
- - 18.0
- - 17.5
- - R16B03-1
-
-before_install:
- - sudo apt-get update -qq
- - sudo apt-get -y install libmozjs-dev
- - git clone https://github.com/apache/couchdb
-
-before_script:
- - cd couchdb
- - ./configure --disable-docs --disable-fauxton
- - cp -r ../!(couchdb) ./src/couch
- - make
-
-script:
- - ./bin/rebar setup_eunit
- - BUILDDIR=`pwd` ./bin/rebar -r eunit apps=couch
-
-cache: apt
diff --git a/src/couch/include/couch_db.hrl b/src/couch/include/couch_db.hrl
index 801483aa1..830b9bcf4 100644
--- a/src/couch/include/couch_db.hrl
+++ b/src/couch/include/couch_db.hrl
@@ -49,6 +49,7 @@
-type branch() :: {Key::term(), Value::term(), Tree::term()}.
-type path() :: {Start::pos_integer(), branch()}.
+-type update_type() :: replicated_changes | interactive_edit.
-record(rev_info, {
rev,
diff --git a/src/couch/rebar.config.script b/src/couch/rebar.config.script
index 5321cff66..846e8b469 100644
--- a/src/couch/rebar.config.script
+++ b/src/couch/rebar.config.script
@@ -146,10 +146,6 @@ PortSpecs = case os:type() of
BaseSpecs
end,
PlatformDefines = [
- {platform_define, "^R16", 'PRE18TIMEFEATURES'},
- {platform_define, "^17", 'PRE18TIMEFEATURES'},
- {platform_define, "^R16", 'NORANDMODULE'},
- {platform_define, "^17", 'NORANDMODULE'},
{platform_define, "win32", 'WINDOWS'}
],
AddConfig = [
diff --git a/src/couch/src/couch.erl b/src/couch/src/couch.erl
index fd5c9e101..60a8b6626 100644
--- a/src/couch/src/couch.erl
+++ b/src/couch/src/couch.erl
@@ -12,7 +12,11 @@
-module(couch).
--compile(export_all).
+-export([
+ start/0,
+ stop/0,
+ restart/0
+]).
deps() ->
diff --git a/src/couch/src/couch_btree.erl b/src/couch/src/couch_btree.erl
index ea224b1ab..daf846ba8 100644
--- a/src/couch/src/couch_btree.erl
+++ b/src/couch/src/couch_btree.erl
@@ -378,13 +378,12 @@ get_chunk_size() ->
end.
modify_node(Bt, RootPointerInfo, Actions, QueryOutput) ->
- case RootPointerInfo of
+ {NodeType, NodeList} = case RootPointerInfo of
nil ->
- NodeType = kv_node,
- NodeList = [];
+ {kv_node, []};
_Tuple ->
Pointer = element(1, RootPointerInfo),
- {NodeType, NodeList} = get_node(Bt, Pointer)
+ get_node(Bt, Pointer)
end,
NodeTuple = list_to_tuple(NodeList),
diff --git a/src/couch/src/couch_compaction_daemon.erl b/src/couch/src/couch_compaction_daemon.erl
index fea505e42..115a9a897 100644
--- a/src/couch/src/couch_compaction_daemon.erl
+++ b/src/couch/src/couch_compaction_daemon.erl
@@ -167,15 +167,15 @@ maybe_compact_db(Parent, DbName, Config) ->
gen_server:call(Parent, {start, DbName}),
{ok, _} = couch_db:start_compact(Db),
TimeLeft = compact_time_left(Config),
- case Config#config.parallel_view_compact of
+ {ViewsCompactPid, ViewsMonRef} = case Config#config.parallel_view_compact of
true ->
- ViewsCompactPid = spawn_link(fun() ->
+ Pid = spawn_link(fun() ->
maybe_compact_views(DbName, DDocNames, Config)
end),
- ViewsMonRef = erlang:monitor(process, ViewsCompactPid);
+ Ref = erlang:monitor(process, Pid),
+ {Pid, Ref};
false ->
- ViewsCompactPid = nil,
- ViewsMonRef = nil
+ {nil, nil}
end,
case couch_db:wait_for_compaction(Db, TimeLeft) of
ok ->
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
index 0ae164d9b..2f63fcfe8 100644
--- a/src/couch/src/couch_db.erl
+++ b/src/couch/src/couch_db.erl
@@ -1148,7 +1148,7 @@ update_docs(Db, Docs0, Options, replicated_changes) ->
end,
{ok, DocBuckets, NonRepDocs, DocErrors}
- = before_docs_update(Db, Docs, PrepValidateFun),
+ = before_docs_update(Db, Docs, PrepValidateFun, replicated_changes),
DocBuckets2 = [[doc_flush_atts(Db, check_dup_atts(Doc))
|| Doc <- Bucket] || Bucket <- DocBuckets],
@@ -1166,7 +1166,7 @@ update_docs(Db, Docs0, Options, interactive_edit) ->
end,
{ok, DocBuckets, NonRepDocs, DocErrors}
- = before_docs_update(Db, Docs, PrepValidateFun),
+ = before_docs_update(Db, Docs, PrepValidateFun, interactive_edit),
if (AllOrNothing) and (DocErrors /= []) ->
RefErrorDict = dict:from_list([{doc_tag(Doc), Doc} || Doc <- Docs]),
@@ -1309,7 +1309,7 @@ prepare_doc_summaries(Db, BucketList) ->
Bucket) || Bucket <- BucketList].
-before_docs_update(#db{validate_doc_funs = VDFuns} = Db, Docs, PVFun) ->
+before_docs_update(#db{validate_doc_funs = VDFuns} = Db, Docs, PVFun, UpdateType) ->
increment_stat(Db, [couchdb, database_writes]),
% Separate _local docs from normal docs
@@ -1324,7 +1324,7 @@ before_docs_update(#db{validate_doc_funs = VDFuns} = Db, Docs, PVFun) ->
DocBuckets = lists:map(fun(Bucket) ->
lists:map(fun(Doc) ->
DocWithBody = couch_doc:with_ejson_body(Doc),
- couch_db_plugin:before_doc_update(Db, DocWithBody)
+ couch_db_plugin:before_doc_update(Db, DocWithBody, UpdateType)
end, Bucket)
end, BucketList),
@@ -1767,7 +1767,7 @@ is_system_db_name(DbName) when is_binary(DbName) ->
Suffix = filename:basename(Normalized),
case {filename:dirname(Normalized), lists:member(Suffix, ?SYSTEM_DATABASES)} of
{<<".">>, Result} -> Result;
- {Prefix, false} -> false;
+ {_Prefix, false} -> false;
{Prefix, true} ->
ReOpts = [{capture,none}, dollar_endonly],
re:run(Prefix, ?DBNAME_REGEX, ReOpts) == match
diff --git a/src/couch/src/couch_db_plugin.erl b/src/couch/src/couch_db_plugin.erl
index e25866ec4..c3684c6e3 100644
--- a/src/couch/src/couch_db_plugin.erl
+++ b/src/couch/src/couch_db_plugin.erl
@@ -14,7 +14,7 @@
-export([
validate_dbname/3,
- before_doc_update/2,
+ before_doc_update/3,
after_doc_read/2,
validate_docid/1,
check_is_admin/1,
@@ -34,11 +34,13 @@
validate_dbname(DbName, Normalized, Default) ->
maybe_handle(validate_dbname, [DbName, Normalized], Default).
-before_doc_update(Db, Doc0) ->
+before_doc_update(Db, Doc0, UpdateType) ->
Fun = couch_db:get_before_doc_update_fun(Db),
- case with_pipe(before_doc_update, [Doc0, Db]) of
- [Doc1, _Db] when is_function(Fun) -> Fun(Doc1, Db);
- [Doc1, _Db] -> Doc1
+ case with_pipe(before_doc_update, [Doc0, Db, UpdateType]) of
+ [Doc1, _Db, UpdateType1] when is_function(Fun) ->
+ Fun(Doc1, Db, UpdateType1);
+ [Doc1, _Db, _UpdateType] ->
+ Doc1
end.
after_doc_read(Db, Doc0) ->
diff --git a/src/couch/src/couch_debug.erl b/src/couch/src/couch_debug.erl
index 96c7a505f..9506a80ce 100644
--- a/src/couch/src/couch_debug.erl
+++ b/src/couch/src/couch_debug.erl
@@ -551,7 +551,7 @@ should_include_extra_info({InitialPid, _Processes, _Tree}) ->
?assert(lists:keymember(reductions, 1, Props)),
?assert(lists:keymember(message_queue_len, 1, Props)),
?assert(lists:keymember(memory, 1, Props));
- Port ->
+ _Port ->
ok
end,
Props
diff --git a/src/couch/src/couch_rand.erl b/src/couch/src/couch_rand.erl
index f5a8fc6af..67454b8ad 100644
--- a/src/couch/src/couch_rand.erl
+++ b/src/couch/src/couch_rand.erl
@@ -19,39 +19,9 @@
]).
--ifdef(NORANDMODULE).
-
-
-uniform() ->
- maybe_set_random_seed(),
- random:uniform().
-
-
-uniform(N) ->
- maybe_set_random_seed(),
- random:uniform(N).
-
-
-maybe_set_random_seed() ->
- case get(random_seed) of
- undefined ->
- {_, Sec, USec} = os:timestamp(),
- Seed = {erlang:phash2(self()), Sec, USec},
- random:seed(Seed);
- _ ->
- ok
- end.
-
-
--else.
-
-
uniform() ->
rand:uniform().
uniform(N) ->
rand:uniform(N).
-
-
--endif.
diff --git a/src/couch/src/couch_server.erl b/src/couch/src/couch_server.erl
index c4b7bf199..df447d1c7 100644
--- a/src/couch/src/couch_server.erl
+++ b/src/couch/src/couch_server.erl
@@ -160,20 +160,20 @@ maybe_add_sys_db_callbacks(DbName, Options) ->
IsUsersDb = path_ends_with(DbName, "_users")
orelse path_ends_with(DbName, UsersDbSuffix),
if
- DbName == DbsDbName ->
- [sys_db | Options];
- DbName == NodesDbName ->
- [sys_db | Options];
- IsReplicatorDb ->
- [{before_doc_update, fun couch_replicator_manager:before_doc_update/2},
- {after_doc_read, fun couch_replicator_manager:after_doc_read/2},
- sys_db | Options];
- IsUsersDb ->
- [{before_doc_update, fun couch_users_db:before_doc_update/2},
- {after_doc_read, fun couch_users_db:after_doc_read/2},
- sys_db | Options];
- true ->
- Options
+ DbName == DbsDbName ->
+ [sys_db | Options];
+ DbName == NodesDbName ->
+ [sys_db | Options];
+ IsReplicatorDb ->
+ [{before_doc_update, fun couch_replicator_docs:before_doc_update/3},
+ {after_doc_read, fun couch_replicator_docs:after_doc_read/2},
+ sys_db | Options];
+ IsUsersDb ->
+ [{before_doc_update, fun couch_users_db:before_doc_update/3},
+ {after_doc_read, fun couch_users_db:after_doc_read/2},
+ sys_db | Options];
+ true ->
+ Options
end.
path_ends_with(Path, Suffix) when is_binary(Suffix) ->
diff --git a/src/couch/src/couch_users_db.erl b/src/couch/src/couch_users_db.erl
index c7b41f1fc..e6e1c4892 100644
--- a/src/couch/src/couch_users_db.erl
+++ b/src/couch/src/couch_users_db.erl
@@ -12,7 +12,7 @@
-module(couch_users_db).
--export([before_doc_update/2, after_doc_read/2, strip_non_public_fields/1]).
+-export([before_doc_update/3, after_doc_read/2, strip_non_public_fields/1]).
-include_lib("couch/include/couch_db.hrl").
@@ -39,7 +39,7 @@
% -> 404 // Not Found
% Else
% -> save_doc
-before_doc_update(Doc, Db) ->
+before_doc_update(Doc, Db, _UpdateType) ->
#user_ctx{name=Name} = couch_db:get_user_ctx(Db),
DocName = get_doc_name(Doc),
case (catch couch_db:check_is_admin(Db)) of
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index ddd24aab0..62e17ce36 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -737,18 +737,9 @@ process_dict_get(Pid, Key, DefaultValue) ->
end.
--ifdef(PRE18TIMEFEATURES).
-
-unique_monotonic_integer() ->
- {Ms, S, Us} = erlang:now(),
- (Ms * 1000000 + S) * 1000000 + Us.
-
--else.
-
unique_monotonic_integer() ->
erlang:unique_integer([monotonic, positive]).
--endif.
check_config_blacklist(Section) ->
case lists:member(Section, ?BLACKLIST_CONFIG_SECTIONS) of
diff --git a/src/couch/src/couch_uuids.erl b/src/couch/src/couch_uuids.erl
index 5c7359b33..b9c03b502 100644
--- a/src/couch/src/couch_uuids.erl
+++ b/src/couch/src/couch_uuids.erl
@@ -95,7 +95,7 @@ new_prefix() ->
couch_util:to_hex((crypto:strong_rand_bytes(13))).
inc() ->
- crypto:rand_uniform(1, 16#ffe).
+ couch_rand:uniform(16#ffd).
state() ->
AlgoStr = config:get("uuids", "algorithm", "random"),
diff --git a/src/couch/test/couch_changes_tests.erl b/src/couch/test/couch_changes_tests.erl
index e4ea76167..0c2f5f91f 100644
--- a/src/couch/test/couch_changes_tests.erl
+++ b/src/couch/test/couch_changes_tests.erl
@@ -122,17 +122,17 @@ filter_by_design() ->
}
}.
-filter_by_custom_function() ->
- {
- "Filter function",
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_receive_heartbeats/1
- ]
- }
- }.
+%% filter_by_custom_function() ->
+%% {
+%% "Filter function",
+%% {
+%% foreach,
+%% fun setup/0, fun teardown/1,
+%% [
+%% fun should_receive_heartbeats/1
+%% ]
+%% }
+%% }.
filter_by_filter_function() ->
{
@@ -547,72 +547,72 @@ should_emit_only_design_documents({DbName, Revs}) ->
Rows2)
end).
-should_receive_heartbeats(_) ->
- {timeout, ?TEST_TIMEOUT div 1000,
- ?_test(
- begin
- DbName = ?tempdb(),
- Timeout = 100,
- {ok, Db} = create_db(DbName),
-
- {ok, _} = save_doc(Db, {[
- {<<"_id">>, <<"_design/filtered">>},
- {<<"language">>, <<"javascript">>},
- {<<"filters">>, {[
- {<<"foo">>, <<"function(doc) {
- return ['doc10', 'doc11', 'doc12'].indexOf(doc._id) != -1;}">>
- }]}}
- ]}),
-
- ChangesArgs = #changes_args{
- filter = "filtered/foo",
- feed = "continuous",
- timeout = 10000,
- heartbeat = 1000
- },
- Consumer = spawn_consumer(DbName, ChangesArgs, {json_req, null}),
-
- {ok, _Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
- timer:sleep(Timeout),
- {ok, _Rev2} = save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
- timer:sleep(Timeout),
- {ok, _Rev3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
- timer:sleep(Timeout),
- {ok, _Rev4} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
- timer:sleep(Timeout),
- {ok, _Rev5} = save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}),
- timer:sleep(Timeout),
- {ok, _Rev6} = save_doc(Db, {[{<<"_id">>, <<"doc6">>}]}),
- timer:sleep(Timeout),
- {ok, _Rev7} = save_doc(Db, {[{<<"_id">>, <<"doc7">>}]}),
- timer:sleep(Timeout),
- {ok, _Rev8} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}]}),
- timer:sleep(Timeout),
- {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}),
-
- Heartbeats = get_heartbeats(Consumer),
- ?assert(Heartbeats > 0),
-
- {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}),
- timer:sleep(Timeout),
- {ok, _Rev11} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}),
- timer:sleep(Timeout),
- {ok, _Rev12} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}),
-
- Heartbeats2 = get_heartbeats(Consumer),
- ?assert(Heartbeats2 > Heartbeats),
-
- Rows = get_rows(Consumer),
- ?assertEqual(3, length(Rows)),
-
- {ok, _Rev13} = save_doc(Db, {[{<<"_id">>, <<"doc13">>}]}),
- timer:sleep(Timeout),
- {ok, _Rev14} = save_doc(Db, {[{<<"_id">>, <<"doc14">>}]}),
- timer:sleep(Timeout),
-
- Heartbeats3 = get_heartbeats(Consumer),
- ?assert(Heartbeats3 > Heartbeats2)
- end)}.
+%% should_receive_heartbeats(_) ->
+%% {timeout, ?TEST_TIMEOUT div 1000,
+%% ?_test(
+%% begin
+%% DbName = ?tempdb(),
+%% Timeout = 100,
+%% {ok, Db} = create_db(DbName),
+
+%% {ok, _} = save_doc(Db, {[
+%% {<<"_id">>, <<"_design/filtered">>},
+%% {<<"language">>, <<"javascript">>},
+%% {<<"filters">>, {[
+%% {<<"foo">>, <<"function(doc) {
+%% return ['doc10', 'doc11', 'doc12'].indexOf(doc._id) != -1;}">>
+%% }]}}
+%% ]}),
+
+%% ChangesArgs = #changes_args{
+%% filter = "filtered/foo",
+%% feed = "continuous",
+%% timeout = 10000,
+%% heartbeat = 1000
+%% },
+%% Consumer = spawn_consumer(DbName, ChangesArgs, {json_req, null}),
+
+%% {ok, _Rev1} = save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
+%% timer:sleep(Timeout),
+%% {ok, _Rev2} = save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
+%% timer:sleep(Timeout),
+%% {ok, _Rev3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
+%% timer:sleep(Timeout),
+%% {ok, _Rev4} = save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
+%% timer:sleep(Timeout),
+%% {ok, _Rev5} = save_doc(Db, {[{<<"_id">>, <<"doc5">>}]}),
+%% timer:sleep(Timeout),
+%% {ok, _Rev6} = save_doc(Db, {[{<<"_id">>, <<"doc6">>}]}),
+%% timer:sleep(Timeout),
+%% {ok, _Rev7} = save_doc(Db, {[{<<"_id">>, <<"doc7">>}]}),
+%% timer:sleep(Timeout),
+%% {ok, _Rev8} = save_doc(Db, {[{<<"_id">>, <<"doc8">>}]}),
+%% timer:sleep(Timeout),
+%% {ok, _Rev9} = save_doc(Db, {[{<<"_id">>, <<"doc9">>}]}),
+
+%% Heartbeats = get_heartbeats(Consumer),
+%% ?assert(Heartbeats > 0),
+
+%% {ok, _Rev10} = save_doc(Db, {[{<<"_id">>, <<"doc10">>}]}),
+%% timer:sleep(Timeout),
+%% {ok, _Rev11} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}),
+%% timer:sleep(Timeout),
+%% {ok, _Rev12} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}),
+
+%% Heartbeats2 = get_heartbeats(Consumer),
+%% ?assert(Heartbeats2 > Heartbeats),
+
+%% Rows = get_rows(Consumer),
+%% ?assertEqual(3, length(Rows)),
+
+%% {ok, _Rev13} = save_doc(Db, {[{<<"_id">>, <<"doc13">>}]}),
+%% timer:sleep(Timeout),
+%% {ok, _Rev14} = save_doc(Db, {[{<<"_id">>, <<"doc14">>}]}),
+%% timer:sleep(Timeout),
+
+%% Heartbeats3 = get_heartbeats(Consumer),
+%% ?assert(Heartbeats3 > Heartbeats2)
+%% end)}.
should_filter_by_doc_attribute({DbName, _}) ->
?_test(
@@ -800,17 +800,17 @@ get_rows({Consumer, _}) ->
?assertNotEqual(timeout, Resp),
Resp.
-get_heartbeats({Consumer, _}) ->
- Ref = make_ref(),
- Consumer ! {get_heartbeats, Ref},
- Resp = receive
- {hearthbeats, Ref, HeartBeats} ->
- HeartBeats
- after ?TIMEOUT ->
- timeout
- end,
- ?assertNotEqual(timeout, Resp),
- Resp.
+%% get_heartbeats({Consumer, _}) ->
+%% Ref = make_ref(),
+%% Consumer ! {get_heartbeats, Ref},
+%% Resp = receive
+%% {hearthbeats, Ref, HeartBeats} ->
+%% HeartBeats
+%% after ?TIMEOUT ->
+%% timeout
+%% end,
+%% ?assertNotEqual(timeout, Resp),
+%% Resp.
clear_rows({Consumer, _}) ->
Ref = make_ref(),
diff --git a/src/couch/test/couch_db_plugin_tests.erl b/src/couch/test/couch_db_plugin_tests.erl
index 52533fec2..93551adbc 100644
--- a/src/couch/test/couch_db_plugin_tests.erl
+++ b/src/couch/test/couch_db_plugin_tests.erl
@@ -14,7 +14,7 @@
-export([
validate_dbname/2,
- before_doc_update/2,
+ before_doc_update/3,
after_doc_read/2,
validate_docid/1,
check_is_admin/1,
@@ -58,9 +58,9 @@ validate_dbname({false, _Db}, _) -> {decided, false};
validate_dbname({fail, _Db}, _) -> throw(validate_dbname);
validate_dbname({pass, _Db}, _) -> no_decision.
-before_doc_update({fail, _Doc}, _Db) -> throw(before_doc_update);
-before_doc_update({true, Doc}, Db) -> [{true, [before_doc_update|Doc]}, Db];
-before_doc_update({false, Doc}, Db) -> [{false, Doc}, Db].
+before_doc_update({fail, _Doc}, _Db, interactive_edit) -> throw(before_doc_update);
+before_doc_update({true, Doc}, Db, interactive_edit) -> [{true, [before_doc_update|Doc]}, Db, interactive_edit];
+before_doc_update({false, Doc}, Db, interactive_edit) -> [{false, Doc}, Db, interactive_edit].
after_doc_read({fail, _Doc}, _Db) -> throw(after_doc_read);
after_doc_read({true, Doc}, Db) -> [{true, [after_doc_read|Doc]}, Db];
@@ -134,17 +134,20 @@ validate_dbname_pass() ->
before_doc_update_match() ->
?assertMatch(
{true, [before_doc_update, doc]},
- couch_db_plugin:before_doc_update(fake_db(), {true, [doc]})).
+ couch_db_plugin:before_doc_update(
+ fake_db(), {true, [doc]}, interactive_edit)).
before_doc_update_no_match() ->
?assertMatch(
{false, [doc]},
- couch_db_plugin:before_doc_update(fake_db(), {false, [doc]})).
+ couch_db_plugin:before_doc_update(
+ fake_db(), {false, [doc]}, interactive_edit)).
before_doc_update_throw() ->
?assertThrow(
before_doc_update,
- couch_db_plugin:before_doc_update(fake_db(), {fail, [doc]})).
+ couch_db_plugin:before_doc_update(
+ fake_db(), {fail, [doc]}, interactive_edit)).
after_doc_read_match() ->
diff --git a/src/couch/test/couch_doc_json_tests.erl b/src/couch/test/couch_doc_json_tests.erl
index bcff0646a..cc5dc3222 100644
--- a/src/couch/test/couch_doc_json_tests.erl
+++ b/src/couch/test/couch_doc_json_tests.erl
@@ -51,6 +51,8 @@ json_doc_test_() ->
fun setup/0, fun teardown/1,
fun(_) ->
[{"Document from JSON", [
+ from_json_with_dbname_error_cases(),
+ from_json_with_db_name_success_cases(),
from_json_success_cases(),
from_json_error_cases()
]},
diff --git a/src/couch/test/couch_file_tests.erl b/src/couch/test/couch_file_tests.erl
index a387615dd..34c1a1654 100644
--- a/src/couch/test/couch_file_tests.erl
+++ b/src/couch/test/couch_file_tests.erl
@@ -418,7 +418,7 @@ nuke_dir_test_() ->
File0 = ?tempfile() ++ ".couch",
RootDir = filename:dirname(File0),
BaseName = filename:basename(File0),
- Seed = crypto:rand_uniform(1000000000, 9999999999),
+ Seed = couch_rand:uniform(8999999999) + 999999999,
DDocDir = io_lib:format("db.~b_design", [Seed]),
ViewDir = filename:join([RootDir, DDocDir]),
file:make_dir(ViewDir),
diff --git a/src/couch/test/couch_flags_tests.erl b/src/couch/test/couch_flags_tests.erl
index a467265cb..cda7639bf 100644
--- a/src/couch/test/couch_flags_tests.erl
+++ b/src/couch/test/couch_flags_tests.erl
@@ -124,14 +124,14 @@ is_enabled() ->
?_assertNot(couch_flags:is_enabled(non_existent, "shards/blacklist/4"))}
]}].
-match_performance() ->
- [{"match_performance", [
- ?_test(begin
- ?debugTime("1 million of operations took", lists:foreach(fun(_) ->
- couch_flags:is_enabled(bar, "shards/test/exact")
- end, lists:seq(1, 1000000)))
- end)
- ]}].
+%% match_performance() ->
+%% [{"match_performance", [
+%% ?_test(begin
+%% ?debugTime("1 million of operations took", lists:foreach(fun(_) ->
+%% couch_flags:is_enabled(bar, "shards/test/exact")
+%% end, lists:seq(1, 1000000)))
+%% end)
+%% ]}].
test_config() ->
diff --git a/src/couch/test/couch_key_tree_prop_tests.erl b/src/couch/test/couch_key_tree_prop_tests.erl
index 604a8285a..f8146926a 100644
--- a/src/couch/test/couch_key_tree_prop_tests.erl
+++ b/src/couch/test/couch_key_tree_prop_tests.erl
@@ -14,7 +14,6 @@
-include_lib("triq/include/triq.hrl").
-triq(eunit).
--include_lib("eunit/include/eunit.hrl").
-define(SIZE_REDUCTION, 3). % How much to reduce size with tree depth.
-define(MAX_BRANCHES, 4). % Maximum number of branches.
diff --git a/src/couch/test/couch_util_tests.erl b/src/couch/test/couch_util_tests.erl
index a0e923872..3e145c4f6 100644
--- a/src/couch/test/couch_util_tests.erl
+++ b/src/couch/test/couch_util_tests.erl
@@ -87,7 +87,7 @@ flush_test() ->
_IntsToAGazillion = lists:seq(1, 200000),
_LotsOfData = lists:map(fun(_) -> <<"foobar">> end,
lists:seq(1, 500000)),
- _BigBin = list_to_binary(_LotsOfData),
+ _ = list_to_binary(_LotsOfData),
%% Allocation 200K tuples puts us above the memory threshold
%% Originally, there should be:
diff --git a/src/couch/test/couchdb_mrview_tests.erl b/src/couch/test/couchdb_mrview_tests.erl
index 2549528be..1c96a0ae0 100644
--- a/src/couch/test/couchdb_mrview_tests.erl
+++ b/src/couch/test/couchdb_mrview_tests.erl
@@ -151,7 +151,7 @@ should_cleanup_index_files(_PortType, {Host, DbName}) ->
".[0-9]*_design/mrview/*"
],
ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_view/view1",
- {ok, Status0, _Headers0, Body0} = test_request:get(ReqUrl, [?AUTH]),
+ {ok, _Status0, _Headers0, _Body0} = test_request:get(ReqUrl, [?AUTH]),
FileList0 = filelib:wildcard(IndexWildCard),
?assertNotEqual([], FileList0),
@@ -169,7 +169,7 @@ should_cleanup_index_files(_PortType, {Host, DbName}) ->
?assertEqual([], lists:usort(FileList1 -- (FileList0 ++ ToDelete))),
CleanupUrl = Host ++ "/" ++ DbName ++ "/_view_cleanup",
- {ok, Status1, _Headers1, Body1} = test_request:post(
+ {ok, _Status1, _Headers1, _Body1} = test_request:post(
CleanupUrl, [], <<>>, [?AUTH]),
test_util:wait(fun() ->
IndexFiles = filelib:wildcard(IndexWildCard),
diff --git a/src/couch/test/couchdb_vhosts_tests.erl b/src/couch/test/couchdb_vhosts_tests.erl
index 2562a0653..1c4117215 100644
--- a/src/couch/test/couchdb_vhosts_tests.erl
+++ b/src/couch/test/couchdb_vhosts_tests.erl
@@ -270,7 +270,3 @@ should_return_path_for_vhost_with_wildcard_host({Url, DbName}) ->
{reason, ?iofmt("Request failed: ~p", [Else])}]})
end
end).
-
-ensure_index_file() ->
- Body = <<"<!DOCTYPE html>\n<html>\n<body>\nHello world\n</body>\n</html>">>,
- file:write_file(filename:join([?TEMPDIR, "index.html"]), Body).
diff --git a/src/couch/test/json_stream_parse_tests.erl b/src/couch/test/json_stream_parse_tests.erl
index ffcf9185a..e690d7728 100644
--- a/src/couch/test/json_stream_parse_tests.erl
+++ b/src/couch/test/json_stream_parse_tests.erl
@@ -136,7 +136,7 @@ single_byte_data_fun([H | T]) ->
multiple_bytes_data_fun([]) ->
done;
multiple_bytes_data_fun(L) ->
- N = crypto:rand_uniform(0, 7),
+ N = couch_rand:uniform(7) - 1,
{Part, Rest} = split(L, N),
{list_to_binary(Part), fun() -> multiple_bytes_data_fun(Rest) end}.
diff --git a/src/couch_epi/.travis.yml b/src/couch_epi/.travis.yml
deleted file mode 100644
index 236bcb5cc..000000000
--- a/src/couch_epi/.travis.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-language: erlang
-
-otp_release:
- - 18.1
- - 17.5
- - R16B03-1
-
-matrix:
- allow_failures:
- - otp_release: R16B03-1
-
-sudo: false
-
-addons:
- apt:
- packages:
- - libmozjs185-dev
-
-before_install:
- - git clone https://github.com/apache/couchdb
-
-before_script:
- - cd couchdb
- - ./configure --disable-docs --disable-fauxton
- - cp -r ../!(couchdb) ./src/couch_epi
- - make
-
-script:
- - ./bin/rebar setup_eunit
- - BUILDDIR=`pwd` ./bin/rebar -r eunit apps=couch_epi skip_deps=couch_log
- - ./bin/rebar -r build-plt apps=couch_epi skip_deps=couch_log
- - ./bin/rebar -r dialyze apps=couch_epi skip_deps=couch_log
-
-cache: apt
diff --git a/src/couch_epi/src/couch_epi.erl b/src/couch_epi/src/couch_epi.erl
index 2ce4592d2..0e5c233ab 100644
--- a/src/couch_epi/src/couch_epi.erl
+++ b/src/couch_epi/src/couch_epi.erl
@@ -58,9 +58,6 @@
-type apply_opts() :: [apply_opt()].
--type data_spec_opt()
- :: {interval, pos_integer()}.
-
-type data_spec()
:: {static_module, module()}
| {callback_module, module()}
diff --git a/src/couch_epi/src/couch_epi_sup.erl b/src/couch_epi/src/couch_epi_sup.erl
index 509f5a1c2..218db5445 100644
--- a/src/couch_epi/src/couch_epi_sup.erl
+++ b/src/couch_epi/src/couch_epi_sup.erl
@@ -37,6 +37,11 @@
%% Supervisor callbacks
-export([init/1]).
+%% For testing
+-export([
+ plugin_childspecs/3
+]).
+
%% Helper macro for declaring children of supervisor
-define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
@@ -132,105 +137,3 @@ merge([], Children) ->
Children;
merge([{Id, _, _, _, _, _} = Spec | Rest], Children) ->
merge(Rest, lists:keystore(Id, 1, Children, Spec)).
-
-
-%% ------------------------------------------------------------------
-%% Tests
-%% ------------------------------------------------------------------
-
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-%% ----
-%% BEGIN couch_epi_plugin behaviour callbacks
-
--compile([export_all]).
-
-app() -> test_app.
-providers() ->
- [
- {my_service, provider1},
- {my_service, provider2}
- ].
-
-services() ->
- [
- {my_service, ?MODULE}
- ].
-
-data_providers() ->
- [
- {{test_app, descriptions}, {static_module, ?MODULE}, [{interval, 100}]}
- ].
-
-data_subscriptions() ->
- [
- {test_app, descriptions}
- ].
-
-processes() ->
- [
- {?MODULE, [?CHILD(extra_process, worker)]},
- {?MODULE, [{to_replace, {new, start_link, [bar]},
- permanent, 5000, worker, [bar]}]}
- ].
-
-notify(_Key, _OldData, _NewData) ->
- ok.
-
-%% END couch_epi_plugin behaviour callbacks
-%% ----
-
-parse_child_id(Id) when is_atom(Id) ->
- Id;
-parse_child_id(Id) ->
- ["couch_epi_codechange_monitor", ServiceName, KindStr] = string:tokens(Id, "|"),
- Kind = list_to_atom(KindStr),
- case string:tokens(ServiceName, ":") of
- [ServiceId, Key] ->
- {{list_to_atom(ServiceId), list_to_atom(Key)}, Kind};
- [Key] ->
- {list_to_atom(Key), Kind}
- end.
-
-basic_test() ->
- Expected = lists:sort([
- {extra_process, [], [extra_process]},
- {to_replace, [bar], [bar]},
- {{my_service, providers},
- [couch_epi_functions_gen_my_service],
- [couch_epi_codechange_monitor, couch_epi_functions_gen_my_service,
- provider1, provider2]},
- {{my_service, services},
- [couch_epi_functions_gen_my_service],
- [couch_epi_codechange_monitor, couch_epi_functions_gen_my_service,
- couch_epi_sup]},
- {{{test_app, descriptions}, data_subscriptions},
- [couch_epi_data_gen_test_app_descriptions],
- [couch_epi_codechange_monitor,
- couch_epi_data_gen_test_app_descriptions, couch_epi_sup]},
- {{{test_app, descriptions}, data_providers},
- [couch_epi_data_gen_test_app_descriptions],
- [couch_epi_codechange_monitor, couch_epi_data_gen_test_app_descriptions,
- couch_epi_sup]}
- ]),
-
- ToReplace = {to_replace, {old, start_link, [foo]}, permanent, 5000, worker, [foo]},
- Children = lists:sort(plugin_childspecs(?MODULE, [?MODULE], [ToReplace])),
- Results = [
- {parse_child_id(Id), Args, lists:sort(Modules)}
- || {Id, {_M, _F, Args}, _, _, _, Modules} <- Children
- ],
-
- Tests = lists:zip(Expected, Results),
- [?assertEqual(Expect, Result) || {Expect, Result} <- Tests],
-
- ExpectedChild = {to_replace, {new, start_link, [bar]},
- permanent, 5000, worker, [bar]},
- ?assertEqual(
- ExpectedChild,
- lists:keyfind(to_replace, 1, Children)),
-
- ok.
-
--endif.
diff --git a/src/couch_epi/test/couch_epi_basic_test.erl b/src/couch_epi/test/couch_epi_basic_test.erl
new file mode 100644
index 000000000..587d1564e
--- /dev/null
+++ b/src/couch_epi/test/couch_epi_basic_test.erl
@@ -0,0 +1,137 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_epi_basic_test).
+
+-export([
+ start_link/0
+]).
+
+-export([
+ app/0,
+ providers/0,
+ services/0,
+ data_providers/0,
+ data_subscriptions/0,
+ processes/0,
+ notify/3
+]).
+
+-define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
+
+
+start_link() -> ok.
+
+
+%% BEGIN couch_epi_plugin behaviour callbacks
+
+
+app() -> test_app.
+
+
+providers() ->
+ [
+ {my_service, provider1},
+ {my_service, provider2}
+ ].
+
+
+services() ->
+ [
+ {my_service, ?MODULE}
+ ].
+
+
+data_providers() ->
+ [
+ {{test_app, descriptions}, {static_module, ?MODULE}, [{interval, 100}]}
+ ].
+
+
+data_subscriptions() ->
+ [
+ {test_app, descriptions}
+ ].
+
+
+processes() ->
+ [
+ {?MODULE, [?CHILD(extra_process, worker)]},
+ {?MODULE, [{to_replace, {new, start_link, [bar]},
+ permanent, 5000, worker, [bar]}]}
+ ].
+
+
+notify(_Key, _OldData, _NewData) ->
+ ok.
+
+
+%% END couch_epi_plugin behaviour callbacks
+
+
+parse_child_id(Id) when is_atom(Id) ->
+ Id;
+parse_child_id(Id) ->
+ ["couch_epi_codechange_monitor", ServiceName, KindStr]
+ = string:tokens(Id, "|"),
+ Kind = list_to_atom(KindStr),
+ case string:tokens(ServiceName, ":") of
+ [ServiceId, Key] ->
+ {{list_to_atom(ServiceId), list_to_atom(Key)}, Kind};
+ [Key] ->
+ {list_to_atom(Key), Kind}
+ end.
+
+
+-include_lib("eunit/include/eunit.hrl").
+
+basic_test() ->
+ Expected = lists:sort([
+ {extra_process, [], [extra_process]},
+ {to_replace, [bar], [bar]},
+ {{my_service, providers},
+ [couch_epi_functions_gen_my_service],
+ [couch_epi_codechange_monitor, couch_epi_functions_gen_my_service,
+ provider1, provider2]},
+ {{my_service, services},
+ [couch_epi_functions_gen_my_service],
+ lists:sort([couch_epi_codechange_monitor,
+ couch_epi_functions_gen_my_service, ?MODULE])},
+ {{{test_app, descriptions}, data_subscriptions},
+ [couch_epi_data_gen_test_app_descriptions],
+ lists:sort([couch_epi_codechange_monitor,
+ couch_epi_data_gen_test_app_descriptions, ?MODULE])},
+ {{{test_app, descriptions}, data_providers},
+ [couch_epi_data_gen_test_app_descriptions],
+ lists:sort([couch_epi_codechange_monitor,
+ couch_epi_data_gen_test_app_descriptions, ?MODULE])}
+ ]),
+
+ ToReplace = {to_replace,
+ {old, start_link, [foo]}, permanent, 5000, worker, [foo]},
+ Children = lists:sort(couch_epi_sup:plugin_childspecs(
+ ?MODULE, [?MODULE], [ToReplace])),
+ Results = [
+ {parse_child_id(Id), Args, lists:sort(Modules)}
+ || {Id, {_M, _F, Args}, _, _, _, Modules} <- Children
+ ],
+
+ Tests = lists:zip(Expected, Results),
+ [?assertEqual(Expect, Result) || {Expect, Result} <- Tests],
+
+ ExpectedChild = {to_replace, {new, start_link, [bar]},
+ permanent, 5000, worker, [bar]},
+ ?assertEqual(
+ ExpectedChild,
+ lists:keyfind(to_replace, 1, Children)),
+
+ ok.
diff --git a/src/couch_index/.travis.yml b/src/couch_index/.travis.yml
deleted file mode 100644
index ee4664edf..000000000
--- a/src/couch_index/.travis.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-language: erlang
-
-otp_release:
- - 18.1
- - 18.0
- - 17.5
- - R16B03-1
-
-sudo: false
-
-addons:
- apt:
- packages:
- - libmozjs185-dev
-
-before_install:
- - git clone --depth 1 https://github.com/apache/couchdb
-
-before_script:
- - cd couchdb
- - ./configure --disable-docs --disable-fauxton
- - cp -r ../!(couchdb) ./src/couch_index
- - make
-
-script:
- - make eunit apps=couch_index skip_deps=couch_epi,couch_log
-
-cache: apt
diff --git a/src/couch_index/src/couch_index.app.src b/src/couch_index/src/couch_index.app.src
index fd523b252..3aa92ba5d 100644
--- a/src/couch_index/src/couch_index.app.src
+++ b/src/couch_index/src/couch_index.app.src
@@ -13,10 +13,6 @@
{application, couch_index, [
{description, "CouchDB Secondary Index Manager"},
{vsn, git},
- {modules, [
- couch_index,
- couch_index_server
- ]},
{registered, [couch_index_server]},
{applications, [kernel, stdlib, couch_epi]},
{mod, {couch_index_app, []}}
diff --git a/src/couch_log/.travis.yml b/src/couch_log/.travis.yml
deleted file mode 100644
index 621c9cba6..000000000
--- a/src/couch_log/.travis.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-language: erlang
-
-otp_release:
- - 18.1
- - 18.0
- - 17.5
- - R16B03-1
-
-sudo: false
-
-addons:
- apt:
- packages:
- - libmozjs185-dev
-
-before_install:
- - git clone --depth 1 https://github.com/apache/couchdb
-
-before_script:
- - cd couchdb
- - ./configure --disable-docs --disable-fauxton
- - cp -r ../!(couchdb) ./src/couch_log
- - make
-
-script:
- - make eunit apps=couch_log skip_deps=couch_epi
-
-cache: apt
diff --git a/src/couch_log/src/couch_log_writer_file.erl b/src/couch_log/src/couch_log_writer_file.erl
index fb01363fd..1fe35a8ab 100644
--- a/src/couch_log/src/couch_log_writer_file.erl
+++ b/src/couch_log/src/couch_log_writer_file.erl
@@ -37,7 +37,9 @@
-ifdef(TEST).
--compile(export_all).
+-export([
+ maybe_reopen/1
+]).
-endif.
diff --git a/src/couch_log/src/couch_log_writer_syslog.erl b/src/couch_log/src/couch_log_writer_syslog.erl
index d918bb783..e3a6fc4b6 100644
--- a/src/couch_log/src/couch_log_writer_syslog.erl
+++ b/src/couch_log/src/couch_log_writer_syslog.erl
@@ -39,7 +39,10 @@
-ifdef(TEST).
--compile(export_all).
+-export([
+ get_facility/1,
+ get_level/1
+]).
-endif.
diff --git a/src/couch_log/test/couch_log_test_util.erl b/src/couch_log/test/couch_log_test_util.erl
index c7fd34f2d..00f3981fc 100644
--- a/src/couch_log/test/couch_log_test_util.erl
+++ b/src/couch_log/test/couch_log_test_util.erl
@@ -11,8 +11,17 @@
% the License.
-module(couch_log_test_util).
--compile(export_all).
+-export([
+ start/0,
+ stop/1,
+ last_log/0,
+ last_log_key/0,
+ wait_for_config/0,
+ with_config_listener/1,
+ with_level/2,
+ with_meck/2
+]).
-include("couch_log.hrl").
diff --git a/src/couch_mrview/.travis.yml b/src/couch_mrview/.travis.yml
deleted file mode 100644
index 142a2aad0..000000000
--- a/src/couch_mrview/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-language: erlang
-
-otp_release:
- - 18.0
- - 17.5
- - R16B03-1
-
-before_install:
- - sudo apt-get update -qq
- - sudo apt-get -y install libmozjs-dev
- - git clone https://github.com/apache/couchdb
-
-before_script:
- - cd couchdb
- - ./configure --disable-docs --disable-fauxton
- - cp -r ../!(couchdb) ./src/couch_mrview
- - make
-
-script:
- - ./bin/rebar setup_eunit
- - BUILDDIR=`pwd` ./bin/rebar -r eunit apps=couch_mrview
-
-cache: apt
diff --git a/src/couch_mrview/src/couch_mrview.app.src b/src/couch_mrview/src/couch_mrview.app.src
index 9c95ed0e5..735d1f8a0 100644
--- a/src/couch_mrview/src/couch_mrview.app.src
+++ b/src/couch_mrview/src/couch_mrview.app.src
@@ -13,16 +13,6 @@
{application, couch_mrview, [
{description, "CouchDB Map/Reduce Views"},
{vsn, git},
- {modules, [
- couch_mrview,
- couch_mrview_compactor,
- couch_mrview_http,
- couch_mrview_index,
- couch_mrview_show,
- couch_mrview_test_util,
- couch_mrview_updater,
- couch_mrview_util
- ]},
{registered, []},
{applications, [kernel, stdlib, couch_index, couch_stats, ioq]}
]}.
diff --git a/src/couch_mrview/src/couch_mrview_test_util.erl b/src/couch_mrview/src/couch_mrview_test_util.erl
index ac298263d..a6242cde7 100644
--- a/src/couch_mrview/src/couch_mrview_test_util.erl
+++ b/src/couch_mrview/src/couch_mrview_test_util.erl
@@ -13,6 +13,7 @@
-module(couch_mrview_test_util).
-compile(export_all).
+-compile(nowarn_export_all).
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_eunit.hrl").
diff --git a/src/couch_mrview/test/couch_mrview_index_changes_tests.erl b/src/couch_mrview/test/couch_mrview_index_changes_tests.erl
index 2701e0c22..f0be1b9b1 100644
--- a/src/couch_mrview/test/couch_mrview_index_changes_tests.erl
+++ b/src/couch_mrview/test/couch_mrview_index_changes_tests.erl
@@ -186,15 +186,13 @@ test_stream(Db) ->
test_indexer(Db) ->
Result = run_query(Db, [{since, 14}, refresh]),
Expect = {ok, 15, [{{15,14,<<"14">>},14}]},
- ?_assertEqual(Result, Expect),
{ok, Db1} = save_doc(Db, 15),
timer:sleep(1500),
Result1 = run_query(Db1, [{since, 14}], false),
Expect1 = {ok, 16, [{{15,14,<<"14">>},14},
{{16,15,<<"15">>},15}]},
- ?_assertEqual(Result1, Expect1),
- ok.
+ ?_assert(Result == Expect andalso Result1 == Expect1).
save_doc(Db, Id) ->
diff --git a/src/couch_pse_tests/src/cpse_test_attachments.erl b/src/couch_pse_tests/src/cpse_test_attachments.erl
index 61ada38c4..8c454ecb6 100644
--- a/src/couch_pse_tests/src/cpse_test_attachments.erl
+++ b/src/couch_pse_tests/src/cpse_test_attachments.erl
@@ -12,6 +12,7 @@
-module(cpse_test_attachments).
-compile(export_all).
+-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/couch_pse_tests/src/cpse_test_compaction.erl b/src/couch_pse_tests/src/cpse_test_compaction.erl
index d00611101..c8a2c1a7d 100644
--- a/src/couch_pse_tests/src/cpse_test_compaction.erl
+++ b/src/couch_pse_tests/src/cpse_test_compaction.erl
@@ -12,6 +12,7 @@
-module(cpse_test_compaction).
-compile(export_all).
+-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/couch_pse_tests/src/cpse_test_fold_changes.erl b/src/couch_pse_tests/src/cpse_test_fold_changes.erl
index 8ee74f041..436396276 100644
--- a/src/couch_pse_tests/src/cpse_test_fold_changes.erl
+++ b/src/couch_pse_tests/src/cpse_test_fold_changes.erl
@@ -12,6 +12,7 @@
-module(cpse_test_fold_changes).
-compile(export_all).
+-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
@@ -170,25 +171,11 @@ do_mutation_ordering(Db, Seq, [{DocId, _OldSeq} | Rest], DocSeqAcc) ->
shuffle(List) ->
- random:seed(os:timestamp()),
- Paired = [{random:uniform(), I} || I <- List],
+ Paired = [{couch_rand:uniform(), I} || I <- List],
Sorted = lists:sort(Paired),
[I || {_, I} <- Sorted].
-remove_random(List) ->
- Pos = random:uniform(length(List)),
- remove_random(Pos, List).
-
-
-remove_random(1, [Item | Rest]) ->
- {Item, Rest};
-
-remove_random(N, [Skip | Rest]) when N > 1 ->
- {Item, Tail} = remove_random(N - 1, Rest),
- {Item, [Skip | Tail]}.
-
-
fold_fun(#full_doc_info{id=Id, update_seq=Seq}, Acc) ->
{ok, [{Id, Seq} | Acc]}.
diff --git a/src/couch_pse_tests/src/cpse_test_fold_docs.erl b/src/couch_pse_tests/src/cpse_test_fold_docs.erl
index 09fbd26d0..d43930c4a 100644
--- a/src/couch_pse_tests/src/cpse_test_fold_docs.erl
+++ b/src/couch_pse_tests/src/cpse_test_fold_docs.erl
@@ -12,6 +12,7 @@
-module(cpse_test_fold_docs).
-compile(export_all).
+-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl b/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl
index 42bc536d2..4826c5d9c 100644
--- a/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl
+++ b/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl
@@ -12,6 +12,7 @@
-module(cpse_test_fold_purge_infos).
-compile(export_all).
+-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/couch_pse_tests/src/cpse_test_get_set_props.erl b/src/couch_pse_tests/src/cpse_test_get_set_props.erl
index 1f8684475..02f0eb531 100644
--- a/src/couch_pse_tests/src/cpse_test_get_set_props.erl
+++ b/src/couch_pse_tests/src/cpse_test_get_set_props.erl
@@ -12,6 +12,7 @@
-module(cpse_test_get_set_props).
-compile(export_all).
+-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/couch_pse_tests/src/cpse_test_open_close_delete.erl b/src/couch_pse_tests/src/cpse_test_open_close_delete.erl
index c19d0ee4c..d9b589fd6 100644
--- a/src/couch_pse_tests/src/cpse_test_open_close_delete.erl
+++ b/src/couch_pse_tests/src/cpse_test_open_close_delete.erl
@@ -12,6 +12,7 @@
-module(cpse_test_open_close_delete).
-compile(export_all).
+-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/couch_pse_tests/src/cpse_test_purge_docs.erl b/src/couch_pse_tests/src/cpse_test_purge_docs.erl
index 34bd34df6..1788eecd1 100644
--- a/src/couch_pse_tests/src/cpse_test_purge_docs.erl
+++ b/src/couch_pse_tests/src/cpse_test_purge_docs.erl
@@ -12,6 +12,7 @@
-module(cpse_test_purge_docs).
-compile(export_all).
+-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/couch_pse_tests/src/cpse_test_purge_seqs.erl b/src/couch_pse_tests/src/cpse_test_purge_seqs.erl
index a943fc487..f363b76bb 100644
--- a/src/couch_pse_tests/src/cpse_test_purge_seqs.erl
+++ b/src/couch_pse_tests/src/cpse_test_purge_seqs.erl
@@ -98,10 +98,13 @@ cpse_increment_purge_multiple_times(DbName) ->
cpse_increment_purge_seq_on_partial_purge(DbName) ->
- Doc1 = {[{'_id', foo}, {vsn, 1}]},
- Doc2 = {[{'_id', foo}, {vsn, 2}]},
- {ok, Rev1} = cpse_util:save_doc(DbName, Doc1),
- {ok, _Rev2} = cpse_util:save_doc(DbName, Doc2, [replicated_changes]),
+ {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, <<"1.1">>}]}),
+ Update = {[
+ {'_id', foo1},
+ {'_rev', couch_doc:rev_to_str({1, [crypto:hash(md5, <<"1.2">>)]})},
+ {vsn, <<"1.2">>}
+ ]},
+ {ok, [_Rev2]} = cpse_util:save_docs(DbName, [Update], [replicated_changes]),
cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
{doc_count, 1},
diff --git a/src/couch_pse_tests/src/cpse_test_read_write_docs.erl b/src/couch_pse_tests/src/cpse_test_read_write_docs.erl
index 84bf9f323..fd830d812 100644
--- a/src/couch_pse_tests/src/cpse_test_read_write_docs.erl
+++ b/src/couch_pse_tests/src/cpse_test_read_write_docs.erl
@@ -12,6 +12,7 @@
-module(cpse_test_read_write_docs).
-compile(export_all).
+-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/couch_pse_tests/src/cpse_test_ref_counting.erl b/src/couch_pse_tests/src/cpse_test_ref_counting.erl
index 2a0e4c202..cb115a785 100644
--- a/src/couch_pse_tests/src/cpse_test_ref_counting.erl
+++ b/src/couch_pse_tests/src/cpse_test_ref_counting.erl
@@ -12,6 +12,7 @@
-module(cpse_test_ref_counting).
-compile(export_all).
+-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/couch_pse_tests/src/cpse_util.erl b/src/couch_pse_tests/src/cpse_util.erl
index d3e125924..1bf24314e 100644
--- a/src/couch_pse_tests/src/cpse_util.erl
+++ b/src/couch_pse_tests/src/cpse_util.erl
@@ -12,6 +12,7 @@
-module(cpse_util).
-compile(export_all).
+-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
@@ -30,7 +31,8 @@
cpse_test_purge_replication,
cpse_test_purge_bad_checkpoints,
cpse_test_compaction,
- cpse_test_ref_counting
+ cpse_test_ref_counting,
+ cpse_test_purge_seqs
]).
diff --git a/src/couch_replicator/.travis.yml b/src/couch_replicator/.travis.yml
deleted file mode 100644
index ed8f466bd..000000000
--- a/src/couch_replicator/.travis.yml
+++ /dev/null
@@ -1,44 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-language: erlang
-
-otp_release:
- - 18.2
- - 18.1
- - 18.0
- - 17.5
- - R16B03-1
-
-sudo: false
-
-addons:
- apt:
- packages:
- - libmozjs185-dev
-
-before_install:
- - git clone --depth 1 https://github.com/apache/couchdb
-
-before_script:
- - cd couchdb
- - ./configure --disable-docs --disable-fauxton
- - cp -r ../!(couchdb) ./src/couch_replicator
- - make
-
-script:
- - make eunit apps=couch_replicator skip_deps=couch_epi,couch_log
-
-cache: apt
diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl
index 013475683..bbf9694d7 100644
--- a/src/couch_replicator/src/couch_replicator_docs.erl
+++ b/src/couch_replicator/src/couch_replicator_docs.erl
@@ -18,7 +18,7 @@
parse_rep_db/3,
parse_rep_doc_without_id/1,
parse_rep_doc_without_id/2,
- before_doc_update/2,
+ before_doc_update/3,
after_doc_read/2,
ensure_rep_db_exists/0,
ensure_rep_ddoc_exists/1,
@@ -615,10 +615,10 @@ ssl_verify_options(false) ->
[{verify, verify_none}].
--spec before_doc_update(#doc{}, Db::any()) -> #doc{}.
-before_doc_update(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, _Db) ->
+-spec before_doc_update(#doc{}, Db::any(), couch_db:update_type()) -> #doc{}.
+before_doc_update(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, _Db, _UpdateType) ->
Doc;
-before_doc_update(#doc{body = {Body}} = Doc, Db) ->
+before_doc_update(#doc{body = {Body}} = Doc, Db, _UpdateType) ->
#user_ctx{
roles = Roles,
name = Name
diff --git a/src/couch_replicator/src/couch_replicator_fabric.erl b/src/couch_replicator/src/couch_replicator_fabric.erl
index 6998b2803..1650105b5 100644
--- a/src/couch_replicator/src/couch_replicator_fabric.erl
+++ b/src/couch_replicator/src/couch_replicator_fabric.erl
@@ -27,12 +27,12 @@ docs(DbName, Options, QueryArgs, Callback, Acc) ->
Shards, couch_replicator_fabric_rpc, docs, [Options, QueryArgs]),
RexiMon = fabric_util:create_monitors(Workers0),
try
- case fabric_util:stream_start(Workers0, #shard.ref) of
+ case fabric_streams:start(Workers0, #shard.ref) of
{ok, Workers} ->
try
docs_int(DbName, Workers, QueryArgs, Callback, Acc)
after
- fabric_util:cleanup(Workers)
+ fabric_streams:cleanup(Workers)
end;
{timeout, NewState} ->
DefunctWorkers = fabric_util:remove_done_workers(
diff --git a/src/couch_replicator/src/couch_replicator_manager.erl b/src/couch_replicator/src/couch_replicator_manager.erl
deleted file mode 100644
index afccc0b9b..000000000
--- a/src/couch_replicator/src/couch_replicator_manager.erl
+++ /dev/null
@@ -1,29 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_manager).
-
-% TODO: This is a temporary proxy module to external calls (outside replicator)
-% to other replicator modules. This is done to avoid juggling multiple repos
-% during development.
-
-% NV: TODO: These functions were moved to couch_replicator_docs
-% but it is still called from fabric_doc_update. Keep it here for now
-% later, update fabric to call couch_replicator_docs instead
--export([before_doc_update/2, after_doc_read/2]).
-
-
-before_doc_update(Doc, Db) ->
- couch_replicator_docs:before_doc_update(Doc, Db).
-
-after_doc_read(Doc, Db) ->
- couch_replicator_docs:after_doc_read(Doc, Db).
diff --git a/src/couch_replicator/src/couch_replicator_scheduler_job.erl b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
index f669d464d..412ff7d05 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler_job.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
@@ -735,8 +735,8 @@ do_checkpoint(State) ->
{SrcInstanceStartTime, TgtInstanceStartTime} ->
couch_log:notice("recording a checkpoint for `~s` -> `~s` at source update_seq ~p",
[SourceName, TargetName, NewSeq]),
- UniversalStartTime = calendar:now_to_universal_time(ReplicationStartTime),
- StartTime = ?l2b(httpd_util:rfc1123_date(UniversalStartTime)),
+ LocalStartTime = calendar:now_to_local_time(ReplicationStartTime),
+ StartTime = ?l2b(httpd_util:rfc1123_date(LocalStartTime)),
EndTime = ?l2b(httpd_util:rfc1123_date()),
NewHistoryEntry = {[
{<<"session_id">>, SessionId},
diff --git a/src/couch_replicator/test/couch_replicator_compact_tests.erl b/src/couch_replicator/test/couch_replicator_compact_tests.erl
index 89e9295d4..7cc530c19 100644
--- a/src/couch_replicator/test/couch_replicator_compact_tests.erl
+++ b/src/couch_replicator/test/couch_replicator_compact_tests.erl
@@ -97,7 +97,7 @@ should_run_replication(RepPid, RepId, Source, Target) ->
should_ensure_replication_still_running(RepPid, RepId, Source, Target) ->
?_test(check_active_tasks(RepPid, RepId, Source, Target)).
-check_active_tasks(RepPid, {BaseId, Ext} = RepId, Src, Tgt) ->
+check_active_tasks(RepPid, {BaseId, Ext} = _RepId, Src, Tgt) ->
Source = case Src of
{remote, NameSrc} ->
<<(db_url(NameSrc))/binary, $/>>;
diff --git a/src/couch_replicator/test/couch_replicator_connection_tests.erl b/src/couch_replicator/test/couch_replicator_connection_tests.erl
index ef3f2b37e..e75cc5a63 100644
--- a/src/couch_replicator/test/couch_replicator_connection_tests.erl
+++ b/src/couch_replicator/test/couch_replicator_connection_tests.erl
@@ -73,7 +73,7 @@ connections_not_shared_after_owner_death({Host, Port}) ->
Self = self(),
spawn(fun() ->
Self ! couch_replicator_connection:acquire(URL),
- 1/0
+ error("simulate division by zero without compiler warning")
end),
receive
{ok, Pid} ->
diff --git a/src/couch_replicator/test/couch_replicator_modules_load_tests.erl b/src/couch_replicator/test/couch_replicator_modules_load_tests.erl
deleted file mode 100644
index a552d147b..000000000
--- a/src/couch_replicator/test/couch_replicator_modules_load_tests.erl
+++ /dev/null
@@ -1,45 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_replicator_modules_load_tests).
-
--include_lib("couch/include/couch_eunit.hrl").
-
-
-modules_load_test_() ->
- {
- "Verify that all modules loads",
- should_load_modules()
- }.
-
-
-should_load_modules() ->
- Modules = [
- couch_replicator_api_wrap,
- couch_replicator_httpc,
- couch_replicator_httpd,
- couch_replicator_manager,
- couch_replicator_scheduler,
- couch_replicator_scheduler_job,
- couch_replicator_docs,
- couch_replicator_clustering,
- couch_replicator_changes_reader,
- couch_replicator_ids,
- couch_replicator_notifier,
- couch_replicator,
- couch_replicator_worker,
- couch_replicator_utils
- ],
- [should_load_module(Mod) || Mod <- Modules].
-
-should_load_module(Mod) ->
- {atom_to_list(Mod), ?_assertMatch({module, _}, code:ensure_loaded(Mod))}.
diff --git a/src/couch_tests/test/couch_tests_app_tests.erl b/src/couch_tests/test/couch_tests_app_tests.erl
index 1acdec789..6f9c7e419 100644
--- a/src/couch_tests/test/couch_tests_app_tests.erl
+++ b/src/couch_tests/test/couch_tests_app_tests.erl
@@ -46,7 +46,7 @@ setup2(Arg1, Arg2) ->
Ctx1 = couch_tests:start_applications([public_key], Ctx0),
couch_tests:set_state(Fixture, Ctx1, {Arg1, Arg2})
end,
- fun(Fixture, Ctx) ->
+ fun(_Fixture, Ctx) ->
Ctx
end).
diff --git a/src/ddoc_cache/test/ddoc_cache_tutil.erl b/src/ddoc_cache/test/ddoc_cache_tutil.erl
index ec5d2db1e..b34d4b163 100644
--- a/src/ddoc_cache/test/ddoc_cache_tutil.erl
+++ b/src/ddoc_cache/test/ddoc_cache_tutil.erl
@@ -13,7 +13,16 @@
-module(ddoc_cache_tutil).
--compile(export_all).
+-export([
+ start_couch/0,
+ start_couch/1,
+ stop_couch/1,
+ clear/0,
+ get_rev/2,
+ ddocs/0,
+ purge_modules/0,
+ with/1
+]).
-include_lib("couch/include/couch_db.hrl").
diff --git a/src/fabric/.travis.yml b/src/fabric/.travis.yml
deleted file mode 100644
index 13a7a32ae..000000000
--- a/src/fabric/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-language: erlang
-
-otp_release:
- - 18.1
- - 17.5
- - R16B03-1
-
-before_install:
- - sudo apt-get update -qq
- - sudo apt-get -y install libmozjs-dev
- - git clone https://github.com/apache/couchdb
-
-before_script:
- - cd couchdb
- - ./configure --disable-docs --disable-fauxton
- - cp -r ../!(couchdb) ./src/fabric
- - make
-
-script:
- - ./bin/rebar setup_eunit
- - BUILDDIR=`pwd` ./bin/rebar -r eunit apps=fabric
-
-cache: apt
diff --git a/src/fabric/src/fabric.app.src b/src/fabric/src/fabric.app.src
index 3c6a280bd..d7686ca1a 100644
--- a/src/fabric/src/fabric.app.src
+++ b/src/fabric/src/fabric.app.src
@@ -13,29 +13,6 @@
{application, fabric, [
{description, "Routing and proxying layer for CouchDB cluster"},
{vsn, git},
- {modules, [
- fabric,
- fabric_db_create,
- fabric_db_delete,
- fabric_db_doc_count,
- fabric_db_info,
- fabric_db_meta,
- fabric_db_update_listener,
- fabric_dict,
- fabric_doc_attachments,
- fabric_doc_missing_revs,
- fabric_doc_open,
- fabric_doc_open_revs,
- fabric_doc_update,
- fabric_group_info,
- fabric_rpc,
- fabric_util,
- fabric_view,
- fabric_view_all_docs,
- fabric_view_changes,
- fabric_view_map,
- fabric_view_reduce
- ]},
{registered, []},
{applications, [
kernel,
diff --git a/src/fabric/src/fabric_dict.erl b/src/fabric/src/fabric_dict.erl
index ec2e25cfc..a336b47b0 100644
--- a/src/fabric/src/fabric_dict.erl
+++ b/src/fabric/src/fabric_dict.erl
@@ -12,6 +12,7 @@
-module(fabric_dict).
-compile(export_all).
+-compile(nowarn_export_all).
% Instead of ets, let's use an ordered keylist. We'll need to revisit if we
% have >> 100 shards, so a private interface is a good idea. - APK June 2010
diff --git a/src/fabric/src/fabric_doc_open.erl b/src/fabric/src/fabric_doc_open.erl
index 0a85346f7..aafdcfb79 100644
--- a/src/fabric/src/fabric_doc_open.erl
+++ b/src/fabric/src/fabric_doc_open.erl
@@ -136,7 +136,7 @@ read_repair(#acc{dbname=DbName, replies=Replies, node_revs=NodeRevs}) ->
[#doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>} | _] ->
choose_reply(Docs);
[#doc{id=Id} | _] ->
- Opts = [?ADMIN_CTX, {read_repair, NodeRevs}],
+ Opts = [?ADMIN_CTX, replicated_changes, {read_repair, NodeRevs}],
Res = fabric:update_docs(DbName, Docs, Opts),
case Res of
{ok, []} ->
@@ -592,4 +592,4 @@ t_get_doc_info() ->
?assert(is_record(Rec2, full_doc_info))
end).
--endif. \ No newline at end of file
+-endif.
diff --git a/src/fabric/src/fabric_doc_open_revs.erl b/src/fabric/src/fabric_doc_open_revs.erl
index 234b108ef..8ac3f30dc 100644
--- a/src/fabric/src/fabric_doc_open_revs.erl
+++ b/src/fabric/src/fabric_doc_open_revs.erl
@@ -224,7 +224,7 @@ dict_repair_docs(Replies, ReplyCount) ->
read_repair(Db, Docs, NodeRevs) ->
- Opts = [?ADMIN_CTX, {read_repair, NodeRevs}],
+ Opts = [?ADMIN_CTX, replicated_changes, {read_repair, NodeRevs}],
Res = fabric:update_docs(Db, Docs, Opts),
case Res of
{ok, []} ->
@@ -243,8 +243,7 @@ format_reply(true, Replies, _) ->
tree_format_replies(Replies);
format_reply(false, Replies, _) ->
- Filtered = filter_reply(Replies),
- dict_format_replies(Filtered).
+ dict_format_replies(Replies).
tree_format_replies(RevTree) ->
@@ -260,22 +259,59 @@ tree_format_replies(RevTree) ->
dict_format_replies(Dict) ->
- lists:sort([Reply || {_, {Reply, _}} <- Dict]).
-
-filter_reply(Replies) ->
- AllFoundRevs = lists:foldl(fun
- ({{{not_found, missing}, _}, _}, Acc) ->
- Acc;
- ({{_, {Pos, [Rev | _]}}, _}, Acc) ->
- [{Pos, Rev} | Acc]
- end, [], Replies),
- %% keep not_found replies only for the revs that don't also have doc reply
- lists:filter(fun
- ({{{not_found, missing}, Rev}, _}) ->
- not lists:member(Rev, AllFoundRevs);
- (_) ->
- true
- end, Replies).
+ Replies0 = [Reply || {_, {Reply, _}} <- Dict],
+
+ AllFoundRevs = lists:foldl(fun(Reply, Acc) ->
+ case Reply of
+ {ok, #doc{revs = {Pos, [RevId | _]}}} ->
+ [{Pos, RevId} | Acc];
+ _ ->
+ Acc
+ end
+ end, [], Replies0),
+
+ %% Drop any not_found replies for which we
+ %% found the revision on a different node.
+ Replies1 = lists:filter(fun(Reply) ->
+ case Reply of
+ {{not_found, missing}, Rev} ->
+ not lists:member(Rev, AllFoundRevs);
+ _ ->
+ true
+ end
+ end, Replies0),
+
+ % Remove replies with shorter revision
+ % paths for a given revision.
+ collapse_duplicate_revs(Replies1).
+
+
+collapse_duplicate_revs(Replies) ->
+ % The collapse logic requires that replies are
+ % sorted so that shorter rev paths are in
+ % the list just before longer lists.
+ %
+ % This somewhat implicitly relies on Erlang's
+ % sorting of [A, B] < [A, B, C] for all values
+ % of C.
+ collapse_duplicate_revs_int(lists:sort(Replies)).
+
+
+collapse_duplicate_revs_int([]) ->
+ [];
+
+collapse_duplicate_revs_int([{ok, Doc1}, {ok, Doc2} | Rest]) ->
+ {D1, R1} = Doc1#doc.revs,
+ {D2, R2} = Doc2#doc.revs,
+ Head = case D1 == D2 andalso lists:prefix(R1, R2) of
+ true -> [];
+ false -> [{ok, Doc1}]
+ end,
+ Head ++ collapse_duplicate_revs([{ok, Doc2} | Rest]);
+
+collapse_duplicate_revs_int([Reply | Rest]) ->
+ [Reply | collapse_duplicate_revs(Rest)].
+
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -313,7 +349,9 @@ revs() -> [{1,<<"foo">>}, {1,<<"bar">>}, {1,<<"baz">>}].
foo1() -> {ok, #doc{revs = {1, [<<"foo">>]}}}.
foo2() -> {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}}.
+foo2stemmed() -> {ok, #doc{revs = {2, [<<"foo2">>]}}}.
fooNF() -> {{not_found, missing}, {1,<<"foo">>}}.
+foo2NF() -> {{not_found, missing}, {2, <<"foo2">>}}.
bar1() -> {ok, #doc{revs = {1, [<<"bar">>]}}}.
barNF() -> {{not_found, missing}, {1,<<"bar">>}}.
bazNF() -> {{not_found, missing}, {1,<<"baz">>}}.
@@ -351,7 +389,10 @@ open_doc_revs_test_() ->
check_node_rev_unmodified_on_down_or_exit(),
check_not_found_replies_are_removed_when_doc_found(),
check_not_found_returned_when_one_of_docs_not_found(),
- check_not_found_returned_when_doc_not_found()
+ check_not_found_returned_when_doc_not_found(),
+ check_longer_rev_list_returned(),
+ check_longer_rev_list_not_combined(),
+ check_not_found_removed_and_longer_rev_list()
]
}.
@@ -685,24 +726,49 @@ check_node_rev_unmodified_on_down_or_exit() ->
check_not_found_replies_are_removed_when_doc_found() ->
?_test(begin
Replies = replies_to_dict([foo1(), bar1(), fooNF()]),
- Expect = replies_to_dict([foo1(), bar1()]),
- ?assertEqual(Expect, filter_reply(Replies))
+ Expect = [bar1(), foo1()],
+ ?assertEqual(Expect, dict_format_replies(Replies))
end).
check_not_found_returned_when_one_of_docs_not_found() ->
?_test(begin
Replies = replies_to_dict([foo1(), foo2(), barNF()]),
- Expect = replies_to_dict([foo1(), foo2(), barNF()]),
- ?assertEqual(Expect, filter_reply(Replies))
+ Expect = [foo1(), foo2(), barNF()],
+ ?assertEqual(Expect, dict_format_replies(Replies))
end).
check_not_found_returned_when_doc_not_found() ->
?_test(begin
Replies = replies_to_dict([fooNF(), barNF(), bazNF()]),
- Expect = replies_to_dict([fooNF(), barNF(), bazNF()]),
- ?assertEqual(Expect, filter_reply(Replies))
+ Expect = [barNF(), bazNF(), fooNF()],
+ ?assertEqual(Expect, dict_format_replies(Replies))
end).
+check_longer_rev_list_returned() ->
+ ?_test(begin
+ Replies = replies_to_dict([foo2(), foo2stemmed()]),
+ Expect = [foo2()],
+ ?assertEqual(2, length(Replies)),
+ ?assertEqual(Expect, dict_format_replies(Replies))
+ end).
+
+check_longer_rev_list_not_combined() ->
+ ?_test(begin
+ Replies = replies_to_dict([foo2(), foo2stemmed(), bar1()]),
+ Expect = [bar1(), foo2()],
+ ?assertEqual(3, length(Replies)),
+ ?assertEqual(Expect, dict_format_replies(Replies))
+ end).
+
+check_not_found_removed_and_longer_rev_list() ->
+ ?_test(begin
+ Replies = replies_to_dict([foo2(), foo2stemmed(), foo2NF()]),
+ Expect = [foo2()],
+ ?assertEqual(3, length(Replies)),
+ ?assertEqual(Expect, dict_format_replies(Replies))
+ end).
+
+
replies_to_dict(Replies) ->
[reply_to_element(R) || R <- Replies].
diff --git a/src/fabric/src/fabric_doc_purge.erl b/src/fabric/src/fabric_doc_purge.erl
index 2571d0d7f..7e447ff1b 100644
--- a/src/fabric/src/fabric_doc_purge.erl
+++ b/src/fabric/src/fabric_doc_purge.erl
@@ -191,7 +191,7 @@ format_resps(UUIDs, #acc{} = Acc) ->
[{UUID, {Health, AllRevs}} | ReplyAcc]
end
end,
- FinalReplies = dict:fold(FoldFun, {ok, []}, Resps),
+ FinalReplies = dict:fold(FoldFun, [], Resps),
couch_util:reorder_results(UUIDs, FinalReplies);
format_resps(_UUIDs, Else) ->
diff --git a/src/fabric/src/fabric_doc_update.erl b/src/fabric/src/fabric_doc_update.erl
index a4867f31d..b7a27bb4a 100644
--- a/src/fabric/src/fabric_doc_update.erl
+++ b/src/fabric/src/fabric_doc_update.erl
@@ -106,11 +106,13 @@ before_doc_update(DbName, Docs, Opts) ->
{true, _} ->
%% fake db is expensive to create so we only do it if we have to
Db = fabric_util:fake_db(DbName, Opts),
- [couch_replicator_manager:before_doc_update(Doc, Db) || Doc <- Docs];
+ [couch_replicator_docs:before_doc_update(Doc, Db, replicated_changes)
+ || Doc <- Docs];
{_, true} ->
%% fake db is expensive to create so we only do it if we have to
Db = fabric_util:fake_db(DbName, Opts),
- [couch_users_db:before_doc_update(Doc, Db) || Doc <- Docs];
+ [couch_users_db:before_doc_update(Doc, Db, interactive_edit)
+ || Doc <- Docs];
_ ->
Docs
end.
diff --git a/src/fabric/src/fabric_streams.erl b/src/fabric/src/fabric_streams.erl
new file mode 100644
index 000000000..ae0c2be55
--- /dev/null
+++ b/src/fabric/src/fabric_streams.erl
@@ -0,0 +1,251 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_streams).
+
+-export([
+ start/2,
+ start/4,
+ cleanup/1
+]).
+
+-include_lib("fabric/include/fabric.hrl").
+-include_lib("mem3/include/mem3.hrl").
+
+
+-define(WORKER_CLEANER, fabric_worker_cleaner).
+
+
+start(Workers, Keypos) ->
+ start(Workers, Keypos, undefined, undefined).
+
+start(Workers0, Keypos, StartFun, Replacements) ->
+ Fun = fun handle_stream_start/3,
+ Acc = #stream_acc{
+ workers = fabric_dict:init(Workers0, waiting),
+ start_fun = StartFun,
+ replacements = Replacements
+ },
+ spawn_worker_cleaner(self(), Workers0),
+ Timeout = fabric_util:request_timeout(),
+ case rexi_utils:recv(Workers0, Keypos, Fun, Acc, Timeout, infinity) of
+ {ok, #stream_acc{workers=Workers}} ->
+ true = fabric_view:is_progress_possible(Workers),
+ AckedWorkers = fabric_dict:fold(fun(Worker, From, WorkerAcc) ->
+ rexi:stream_start(From),
+ [Worker | WorkerAcc]
+ end, [], Workers),
+ {ok, AckedWorkers};
+ Else ->
+ Else
+ end.
+
+
+cleanup(Workers) ->
+ % Stop the auxiliary cleaner process as we got to the point where cleanup
+ % happesn in the regular fashion so we don't want to send 2x the number kill
+ % messages
+ case get(?WORKER_CLEANER) of
+ CleanerPid when is_pid(CleanerPid) ->
+ erase(?WORKER_CLEANER),
+ exit(CleanerPid, kill);
+ _ ->
+ ok
+ end,
+ fabric_util:cleanup(Workers).
+
+
+handle_stream_start({rexi_DOWN, _, {_, NodeRef}, _}, _, St) ->
+ case fabric_util:remove_down_workers(St#stream_acc.workers, NodeRef) of
+ {ok, Workers} ->
+ {ok, St#stream_acc{workers=Workers}};
+ error ->
+ Reason = {nodedown, <<"progress not possible">>},
+ {error, Reason}
+ end;
+
+handle_stream_start({rexi_EXIT, Reason}, Worker, St) ->
+ Workers = fabric_dict:erase(Worker, St#stream_acc.workers),
+ Replacements = St#stream_acc.replacements,
+ case {fabric_view:is_progress_possible(Workers), Reason} of
+ {true, _} ->
+ {ok, St#stream_acc{workers=Workers}};
+ {false, {maintenance_mode, _Node}} when Replacements /= undefined ->
+ % Check if we have replacements for this range
+ % and start the new workers if so.
+ case lists:keytake(Worker#shard.range, 1, Replacements) of
+ {value, {_Range, WorkerReplacements}, NewReplacements} ->
+ FinalWorkers = lists:foldl(fun(Repl, NewWorkers) ->
+ NewWorker = (St#stream_acc.start_fun)(Repl),
+ add_worker_to_cleaner(self(), NewWorker),
+ fabric_dict:store(NewWorker, waiting, NewWorkers)
+ end, Workers, WorkerReplacements),
+ % Assert that our replaced worker provides us
+ % the oppurtunity to make progress.
+ true = fabric_view:is_progress_possible(FinalWorkers),
+ NewRefs = fabric_dict:fetch_keys(FinalWorkers),
+ {new_refs, NewRefs, St#stream_acc{
+ workers=FinalWorkers,
+ replacements=NewReplacements
+ }};
+ false ->
+ % If we progress isn't possible and we don't have any
+ % replacements then we're dead in the water.
+ Error = {nodedown, <<"progress not possible">>},
+ {error, Error}
+ end;
+ {false, _} ->
+ {error, fabric_util:error_info(Reason)}
+ end;
+
+handle_stream_start(rexi_STREAM_INIT, {Worker, From}, St) ->
+ case fabric_dict:lookup_element(Worker, St#stream_acc.workers) of
+ undefined ->
+ % This worker lost the race with other partition copies, terminate
+ rexi:stream_cancel(From),
+ {ok, St};
+ waiting ->
+ % Don't ack the worker yet so they don't start sending us
+ % rows until we're ready
+ Workers0 = fabric_dict:store(Worker, From, St#stream_acc.workers),
+ Workers1 = fabric_view:remove_overlapping_shards(Worker, Workers0),
+ case fabric_dict:any(waiting, Workers1) of
+ true ->
+ {ok, St#stream_acc{workers=Workers1}};
+ false ->
+ {stop, St#stream_acc{workers=Workers1}}
+ end
+ end;
+
+handle_stream_start({ok, ddoc_updated}, _, St) ->
+ cleanup(St#stream_acc.workers),
+ {stop, ddoc_updated};
+
+handle_stream_start(Else, _, _) ->
+ exit({invalid_stream_start, Else}).
+
+
+% Spawn an auxiliary rexi worker cleaner. This will be used in cases
+% when the coordinator (request) process is forceably killed and doesn't
+% get a chance to process its `after` fabric:clean/1 clause.
+spawn_worker_cleaner(Coordinator, Workers) ->
+ case get(?WORKER_CLEANER) of
+ undefined ->
+ Pid = spawn(fun() ->
+ erlang:monitor(process, Coordinator),
+ cleaner_loop(Coordinator, Workers)
+ end),
+ put(?WORKER_CLEANER, Pid),
+ Pid;
+ ExistingCleaner ->
+ ExistingCleaner
+ end.
+
+
+cleaner_loop(Pid, Workers) ->
+ receive
+ {add_worker, Pid, Worker} ->
+ cleaner_loop(Pid, [Worker | Workers]);
+ {'DOWN', _, _, Pid, _} ->
+ fabric_util:cleanup(Workers)
+ end.
+
+
+add_worker_to_cleaner(CoordinatorPid, Worker) ->
+ case get(?WORKER_CLEANER) of
+ CleanerPid when is_pid(CleanerPid) ->
+ CleanerPid ! {add_worker, CoordinatorPid, Worker};
+ _ ->
+ ok
+ end.
+
+
+
+-ifdef(TEST).
+
+-include_lib("eunit/include/eunit.hrl").
+
+worker_cleaner_test_() ->
+ {
+ "Fabric spawn_worker_cleaner test", {
+ setup, fun setup/0, fun teardown/1,
+ fun(_) -> [
+ should_clean_workers(),
+ does_not_fire_if_cleanup_called(),
+ should_clean_additional_worker_too()
+ ] end
+ }
+ }.
+
+
+should_clean_workers() ->
+ ?_test(begin
+ meck:reset(rexi),
+ erase(?WORKER_CLEANER),
+ Workers = [
+ #shard{node = 'n1', ref = make_ref()},
+ #shard{node = 'n2', ref = make_ref()}
+ ],
+ {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
+ Cleaner = spawn_worker_cleaner(Coord, Workers),
+ Ref = erlang:monitor(process, Cleaner),
+ Coord ! die,
+ receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
+ ?assertEqual(2, meck:num_calls(rexi, kill, 2))
+ end).
+
+
+does_not_fire_if_cleanup_called() ->
+ ?_test(begin
+ meck:reset(rexi),
+ erase(?WORKER_CLEANER),
+ Workers = [
+ #shard{node = 'n1', ref = make_ref()},
+ #shard{node = 'n2', ref = make_ref()}
+ ],
+ {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
+ Cleaner = spawn_worker_cleaner(Coord, Workers),
+ Ref = erlang:monitor(process, Cleaner),
+ cleanup(Workers),
+ Coord ! die,
+ receive {'DOWN', Ref, _, _, _} -> ok end,
+ % 2 calls would be from cleanup/1 function. If cleanup process fired
+ % too it would have been 4 calls total.
+ ?assertEqual(2, meck:num_calls(rexi, kill, 2))
+ end).
+
+
+should_clean_additional_worker_too() ->
+ ?_test(begin
+ meck:reset(rexi),
+ erase(?WORKER_CLEANER),
+ Workers = [
+ #shard{node = 'n1', ref = make_ref()}
+ ],
+ {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
+ Cleaner = spawn_worker_cleaner(Coord, Workers),
+ add_worker_to_cleaner(Coord, #shard{node = 'n2', ref = make_ref()}),
+ Ref = erlang:monitor(process, Cleaner),
+ Coord ! die,
+ receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
+ ?assertEqual(2, meck:num_calls(rexi, kill, 2))
+ end).
+
+
+setup() ->
+ ok = meck:expect(rexi, kill, fun(_, _) -> ok end).
+
+
+teardown(_) ->
+ meck:unload().
+
+-endif.
diff --git a/src/fabric/src/fabric_util.erl b/src/fabric/src/fabric_util.erl
index e622c6aa0..cc1f1b622 100644
--- a/src/fabric/src/fabric_util.erl
+++ b/src/fabric/src/fabric_util.erl
@@ -16,7 +16,6 @@
update_counter/3, remove_ancestors/2, create_monitors/1, kv/2,
remove_down_workers/2, doc_id_and_rev/1]).
-export([request_timeout/0, attachments_timeout/0, all_docs_timeout/0]).
--export([stream_start/2, stream_start/4]).
-export([log_timeout/2, remove_done_workers/2]).
-export([is_users_db/1, is_replicator_db/1, fake_db/2]).
-export([upgrade_mrargs/1]).
@@ -51,93 +50,6 @@ submit_jobs(Shards, Module, EndPoint, ExtraArgs) ->
cleanup(Workers) ->
[rexi:kill(Node, Ref) || #shard{node=Node, ref=Ref} <- Workers].
-stream_start(Workers, Keypos) ->
- stream_start(Workers, Keypos, undefined, undefined).
-
-stream_start(Workers0, Keypos, StartFun, Replacements) ->
- Fun = fun handle_stream_start/3,
- Acc = #stream_acc{
- workers = fabric_dict:init(Workers0, waiting),
- start_fun = StartFun,
- replacements = Replacements
- },
- Timeout = request_timeout(),
- case rexi_utils:recv(Workers0, Keypos, Fun, Acc, Timeout, infinity) of
- {ok, #stream_acc{workers=Workers}} ->
- true = fabric_view:is_progress_possible(Workers),
- AckedWorkers = fabric_dict:fold(fun(Worker, From, WorkerAcc) ->
- rexi:stream_start(From),
- [Worker | WorkerAcc]
- end, [], Workers),
- {ok, AckedWorkers};
- Else ->
- Else
- end.
-
-handle_stream_start({rexi_DOWN, _, {_, NodeRef}, _}, _, St) ->
- case fabric_util:remove_down_workers(St#stream_acc.workers, NodeRef) of
- {ok, Workers} ->
- {ok, St#stream_acc{workers=Workers}};
- error ->
- Reason = {nodedown, <<"progress not possible">>},
- {error, Reason}
- end;
-handle_stream_start({rexi_EXIT, Reason}, Worker, St) ->
- Workers = fabric_dict:erase(Worker, St#stream_acc.workers),
- Replacements = St#stream_acc.replacements,
- case {fabric_view:is_progress_possible(Workers), Reason} of
- {true, _} ->
- {ok, St#stream_acc{workers=Workers}};
- {false, {maintenance_mode, _Node}} when Replacements /= undefined ->
- % Check if we have replacements for this range
- % and start the new workers if so.
- case lists:keytake(Worker#shard.range, 1, Replacements) of
- {value, {_Range, WorkerReplacements}, NewReplacements} ->
- FinalWorkers = lists:foldl(fun(Repl, NewWorkers) ->
- NewWorker = (St#stream_acc.start_fun)(Repl),
- fabric_dict:store(NewWorker, waiting, NewWorkers)
- end, Workers, WorkerReplacements),
- % Assert that our replaced worker provides us
- % the oppurtunity to make progress.
- true = fabric_view:is_progress_possible(FinalWorkers),
- NewRefs = fabric_dict:fetch_keys(FinalWorkers),
- {new_refs, NewRefs, St#stream_acc{
- workers=FinalWorkers,
- replacements=NewReplacements
- }};
- false ->
- % If we progress isn't possible and we don't have any
- % replacements then we're dead in the water.
- Error = {nodedown, <<"progress not possible">>},
- {error, Error}
- end;
- {false, _} ->
- {error, fabric_util:error_info(Reason)}
- end;
-handle_stream_start(rexi_STREAM_INIT, {Worker, From}, St) ->
- case fabric_dict:lookup_element(Worker, St#stream_acc.workers) of
- undefined ->
- % This worker lost the race with other partition copies, terminate
- rexi:stream_cancel(From),
- {ok, St};
- waiting ->
- % Don't ack the worker yet so they don't start sending us
- % rows until we're ready
- Workers0 = fabric_dict:store(Worker, From, St#stream_acc.workers),
- Workers1 = fabric_view:remove_overlapping_shards(Worker, Workers0),
- case fabric_dict:any(waiting, Workers1) of
- true ->
- {ok, St#stream_acc{workers=Workers1}};
- false ->
- {stop, St#stream_acc{workers=Workers1}}
- end
- end;
-handle_stream_start({ok, ddoc_updated}, _, St) ->
- cleanup(St#stream_acc.workers),
- {stop, ddoc_updated};
-handle_stream_start(Else, _, _) ->
- exit({invalid_stream_start, Else}).
-
recv(Workers, Keypos, Fun, Acc0) ->
rexi_utils:recv(Workers, Keypos, Fun, Acc0, request_timeout(), infinity).
diff --git a/src/fabric/src/fabric_view_all_docs.erl b/src/fabric/src/fabric_view_all_docs.erl
index 30c8e8d51..a404125fa 100644
--- a/src/fabric/src/fabric_view_all_docs.erl
+++ b/src/fabric/src/fabric_view_all_docs.erl
@@ -26,12 +26,12 @@ go(DbName, Options, #mrargs{keys=undefined} = QueryArgs, Callback, Acc) ->
Shards, fabric_rpc, all_docs, [Options, QueryArgs]),
RexiMon = fabric_util:create_monitors(Workers0),
try
- case fabric_util:stream_start(Workers0, #shard.ref) of
+ case fabric_streams:start(Workers0, #shard.ref) of
{ok, Workers} ->
try
go(DbName, Options, Workers, QueryArgs, Callback, Acc)
after
- fabric_util:cleanup(Workers)
+ fabric_streams:cleanup(Workers)
end;
{timeout, NewState} ->
DefunctWorkers = fabric_util:remove_done_workers(
diff --git a/src/fabric/src/fabric_view_changes.erl b/src/fabric/src/fabric_view_changes.erl
index 7288f1aa5..f96bb058d 100644
--- a/src/fabric/src/fabric_view_changes.erl
+++ b/src/fabric/src/fabric_view_changes.erl
@@ -166,7 +166,7 @@ send_changes(DbName, ChangesArgs, Callback, PackedSeqs, AccIn, Timeout) ->
end,
RexiMon = fabric_util:create_monitors(Workers0),
try
- case fabric_util:stream_start(Workers0, #shard.ref, StartFun, Repls) of
+ case fabric_streams:start(Workers0, #shard.ref, StartFun, Repls) of
{ok, Workers} ->
try
LiveSeqs = lists:map(fun(W) ->
@@ -178,7 +178,7 @@ send_changes(DbName, ChangesArgs, Callback, PackedSeqs, AccIn, Timeout) ->
send_changes(DbName, Workers, LiveSeqs, ChangesArgs,
Callback, AccIn, Timeout)
after
- fabric_util:cleanup(Workers)
+ fabric_streams:cleanup(Workers)
end;
{timeout, NewState} ->
DefunctWorkers = fabric_util:remove_done_workers(
diff --git a/src/fabric/src/fabric_view_map.erl b/src/fabric/src/fabric_view_map.erl
index b6a3d6f83..ee51bfe74 100644
--- a/src/fabric/src/fabric_view_map.erl
+++ b/src/fabric/src/fabric_view_map.erl
@@ -36,14 +36,14 @@ go(DbName, Options, DDoc, View, Args, Callback, Acc, VInfo) ->
Workers0 = fabric_util:submit_jobs(Shards, fabric_rpc, map_view, RPCArgs),
RexiMon = fabric_util:create_monitors(Workers0),
try
- case fabric_util:stream_start(Workers0, #shard.ref, StartFun, Repls) of
+ case fabric_streams:start(Workers0, #shard.ref, StartFun, Repls) of
{ok, ddoc_updated} ->
Callback({error, ddoc_updated}, Acc);
{ok, Workers} ->
try
go(DbName, Workers, VInfo, Args, Callback, Acc)
after
- fabric_util:cleanup(Workers)
+ fabric_streams:cleanup(Workers)
end;
{timeout, NewState} ->
DefunctWorkers = fabric_util:remove_done_workers(
diff --git a/src/fabric/src/fabric_view_reduce.erl b/src/fabric/src/fabric_view_reduce.erl
index a74be1073..b2b8a05f0 100644
--- a/src/fabric/src/fabric_view_reduce.erl
+++ b/src/fabric/src/fabric_view_reduce.erl
@@ -35,14 +35,14 @@ go(DbName, DDoc, VName, Args, Callback, Acc, VInfo) ->
Workers0 = fabric_util:submit_jobs(Shards,fabric_rpc,reduce_view,RPCArgs),
RexiMon = fabric_util:create_monitors(Workers0),
try
- case fabric_util:stream_start(Workers0, #shard.ref, StartFun, Repls) of
+ case fabric_streams:start(Workers0, #shard.ref, StartFun, Repls) of
{ok, ddoc_updated} ->
Callback({error, ddoc_updated}, Acc);
{ok, Workers} ->
try
go2(DbName, Workers, VInfo, Args, Callback, Acc)
after
- fabric_util:cleanup(Workers)
+ fabric_streams:cleanup(Workers)
end;
{timeout, NewState} ->
DefunctWorkers = fabric_util:remove_done_workers(
diff --git a/src/global_changes/.travis.yml b/src/global_changes/.travis.yml
deleted file mode 100644
index 67417be96..000000000
--- a/src/global_changes/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-language: erlang
-
-otp_release:
- - 18.0
- - 17.5
- - R16B03-1
-
-before_install:
- - sudo apt-get update -qq
- - sudo apt-get -y install libmozjs-dev
- - git clone https://github.com/apache/couchdb
-
-before_script:
- - cd couchdb
- - ./configure --disable-docs --disable-fauxton
- - cp -r ../!(couchdb) ./src/global_changes
- - make
-
-script:
- - ./bin/rebar setup_eunit
- - BUILDDIR=`pwd` ./bin/rebar -r eunit apps=global_changes
-
-cache: apt
diff --git a/src/mango/.travis.yml b/src/mango/.travis.yml
deleted file mode 100644
index d6130128b..000000000
--- a/src/mango/.travis.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-language: erlang
-
-before_install:
- - sudo apt-get update -qq
- - sudo apt-get -y install libmozjs-dev python-virtualenv
- - git clone --depth=1 https://github.com/apache/couchdb
- - cd couchdb
- - ./configure --disable-docs --disable-fauxton
- - cp -R ../src ./src/mango
- - make
- - cd ..
- - couchdb/dev/run -n 1 --admin=testuser:testpass &
- - sleep 10
-
-before_script:
- - make venv
- - source venv/bin/activate
- - make pip-install
-
-matrix:
- include:
- - otp_release: 18.1
- python: 2.7
- - otp_release: 17.5
- python: 2.7
- - otp_release: R16B03-1
- python: 2.7
-
-cache: apt
diff --git a/src/mem3/src/mem3.app.src b/src/mem3/src/mem3.app.src
index 99a9eed88..15efc64ca 100644
--- a/src/mem3/src/mem3.app.src
+++ b/src/mem3/src/mem3.app.src
@@ -13,20 +13,6 @@
{application, mem3, [
{description, "CouchDB Cluster Membership"},
{vsn, git},
- {modules, [
- mem3,
- mem3_app,
- mem3_httpd,
- mem3_nodes,
- mem3_rep,
- mem3_shards,
- mem3_sup,
- mem3_sync,
- mem3_sync_event,
- mem3_sync_nodes,
- mem3_sync_security,
- mem3_util
- ]},
{mod, {mem3_app, []}},
{registered, [
mem3_events,
diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl
index de633006a..f1af0f796 100644
--- a/src/mem3/src/mem3.erl
+++ b/src/mem3/src/mem3.erl
@@ -196,10 +196,13 @@ choose_shards(DbName, Nodes, Options) ->
Q = mem3_util:q_val(couch_util:get_value(q, Options,
config:get("cluster", "q", "8"))),
%% rotate to a random entry in the nodelist for even distribution
- {A, B} = lists:split(crypto:rand_uniform(1,length(Nodes)+1), Nodes),
- RotatedNodes = B ++ A,
+ RotatedNodes = rotate_rand(Nodes),
mem3_util:create_partition_map(DbName, N, Q, RotatedNodes, Suffix).
+rotate_rand(Nodes) ->
+ {A, B} = lists:split(couch_rand:uniform(length(Nodes)), Nodes),
+ B ++ A.
+
get_placement(Options) ->
case couch_util:get_value(placement, Options) of
undefined ->
@@ -382,4 +385,11 @@ allowed_nodes_test_() ->
]
}]}.
+rotate_rand_degenerate_test() ->
+ ?assertEqual([1], rotate_rand([1])).
+
+rotate_rand_distribution_test() ->
+ Cases = [rotate_rand([1, 2, 3]) || _ <- lists:seq(1, 100)],
+ ?assertEqual(3, length(lists:usort(Cases))).
+
-endif.
diff --git a/src/mem3/src/mem3_rep.erl b/src/mem3/src/mem3_rep.erl
index 340bc0e7b..a30630167 100644
--- a/src/mem3/src/mem3_rep.erl
+++ b/src/mem3/src/mem3_rep.erl
@@ -137,7 +137,6 @@ verify_purge_checkpoint(DbName, Props) ->
Target = binary_to_existing_atom(TargetBin, latin1),
try
- Shards = mem3:shards(DbName),
Nodes = lists:foldl(fun(Shard, Acc) ->
case Shard#shard.range == Range of
true -> [Shard#shard.node | Acc];
diff --git a/src/mem3/src/mem3_sync_event_listener.erl b/src/mem3/src/mem3_sync_event_listener.erl
index cd671e4d5..56ffe3d07 100644
--- a/src/mem3/src/mem3_sync_event_listener.erl
+++ b/src/mem3/src/mem3_sync_event_listener.erl
@@ -269,7 +269,7 @@ should_set_sync_frequency(Pid) ->
ok
end).
-should_restart_listener(Pid) ->
+should_restart_listener(_Pid) ->
?_test(begin
meck:reset(config_notifier),
config:set("mem3", "sync_frequency", "error", false),
diff --git a/src/mem3/test/mem3_seeds_test.erl b/src/mem3/test/mem3_seeds_test.erl
index 19e007950..ba83b66be 100644
--- a/src/mem3/test/mem3_seeds_test.erl
+++ b/src/mem3/test/mem3_seeds_test.erl
@@ -12,7 +12,7 @@
-module(mem3_seeds_test).
--include_lib("eunit/include/eunit.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
a_test_() ->
Tests = [
@@ -29,7 +29,7 @@ empty_seedlist_status_ok() ->
?assertEqual({[]}, couch_util:get_value(seeds, Result)),
?assertEqual(ok, couch_util:get_value(status, Result))
after
- application:stop(mem3)
+ cleanup()
end.
seedlist_misconfiguration() ->
@@ -43,7 +43,7 @@ seedlist_misconfiguration() ->
?assertMatch({_}, couch_util:get_value('couchdb@node2.example.com', Seeds)),
?assertEqual(seeding, couch_util:get_value(status, Result))
after
- application:stop(mem3)
+ cleanup()
end.
check_nodelist() ->
@@ -54,9 +54,14 @@ check_nodelist() ->
?assert(lists:member('couchdb@node1.example.com', Nodes)),
?assert(lists:member('couchdb@node2.example.com', Nodes))
after
- application:stop(mem3)
+ cleanup()
end.
+cleanup() ->
+ application:stop(mem3),
+ Filename = config:get("mem3", "nodes_db", "_nodes") ++ ".couch",
+ file:delete(filename:join([?BUILDDIR(), "tmp", "data", Filename])).
+
setup() ->
test_util:start_couch([rexi]).
diff --git a/src/rexi/src/rexi.app.src b/src/rexi/src/rexi.app.src
index efe128ca0..400293219 100644
--- a/src/rexi/src/rexi.app.src
+++ b/src/rexi/src/rexi.app.src
@@ -13,16 +13,6 @@
{application, rexi, [
{description, "Lightweight RPC server"},
{vsn, git},
- {modules, [
- rexi,
- rexi_app,
- rexi_gov_manager,
- rexi_governor,
- rexi_monitor,
- rexi_server,
- rexi_sup,
- rexi_utils
- ]},
{registered, [
rexi_sup,
rexi_server
diff --git a/test/elixir/.credo.exs b/test/elixir/.credo.exs
index 48ae45224..e24836c8f 100644
--- a/test/elixir/.credo.exs
+++ b/test/elixir/.credo.exs
@@ -70,7 +70,7 @@
# If you don't want TODO comments to cause `mix credo` to fail, just
# set this value to 0 (zero).
#
- {Credo.Check.Design.TagTODO, [exit_status: 0]},
+ {Credo.Check.Design.TagTODO, false},
{Credo.Check.Design.TagFIXME, []},
#
@@ -108,7 +108,10 @@
{Credo.Check.Refactor.NegatedConditionsWithElse, []},
{Credo.Check.Refactor.Nesting, false},
{Credo.Check.Refactor.PipeChainStart,
- [excluded_argument_types: [:atom, :binary, :fn, :keyword], excluded_functions: []]},
+ [
+ excluded_argument_types: [:atom, :binary, :fn, :keyword],
+ excluded_functions: []
+ ]},
{Credo.Check.Refactor.UnlessWithElse, []},
#
diff --git a/test/elixir/lib/couch/db_test.ex b/test/elixir/lib/couch/db_test.ex
index 3bbfb7eaa..899237635 100644
--- a/test/elixir/lib/couch/db_test.ex
+++ b/test/elixir/lib/couch/db_test.ex
@@ -156,21 +156,21 @@ defmodule Couch.DBTest do
def create_db(db_name) do
resp = Couch.put("/#{db_name}")
- assert resp.status_code == 201
+ assert resp.status_code in [201, 202]
assert resp.body == %{"ok" => true}
{:ok, resp}
end
def delete_db(db_name) do
resp = Couch.delete("/#{db_name}")
- assert resp.status_code == 200
+ assert resp.status_code in [200, 202]
assert resp.body == %{"ok" => true}
{:ok, resp}
end
def create_doc(db_name, body) do
resp = Couch.post("/#{db_name}", body: body)
- assert resp.status_code == 201
+ assert resp.status_code in [201, 202]
assert resp.body["ok"]
{:ok, resp}
end
@@ -273,7 +273,7 @@ defmodule Couch.DBTest do
# enough to inroduce a race here
retry_until(fn -> !node_is_running(port) end)
# wait utill node is back
- retry_until(fn -> node_is_running(port) end, 500, 10_000)
+ retry_until(fn -> node_is_running(port) end, 500, 30_000)
end
defp node_is_running(port) do
diff --git a/test/elixir/run b/test/elixir/run
index 2d8464c18..a9c2efa4d 100755
--- a/test/elixir/run
+++ b/test/elixir/run
@@ -3,4 +3,4 @@ cd "$(dirname "$0")"
mix local.hex --force
mix local.rebar --force
mix deps.get
-mix test --trace
+mix test --trace "$@"
diff --git a/test/elixir/run.cmd b/test/elixir/run.cmd
index a99df8c6c..f1789adce 100644
--- a/test/elixir/run.cmd
+++ b/test/elixir/run.cmd
@@ -4,4 +4,4 @@ cd %~dp0
call mix local.hex --force
call mix local.rebar --force
call mix deps.get
-call mix test --trace
+call mix test --trace %*
diff --git a/test/elixir/test/all_docs_test.exs b/test/elixir/test/all_docs_test.exs
index 21dcb616b..b8f21e7c0 100644
--- a/test/elixir/test/all_docs_test.exs
+++ b/test/elixir/test/all_docs_test.exs
@@ -46,7 +46,8 @@ defmodule AllDocsTest do
# Confirm that queries may assume raw collation
resp =
- Couch.get("/#{db_name}/_all_docs",
+ Couch.get(
+ "/#{db_name}/_all_docs",
query: %{
:startkey => "\"org.couchdb.user:\"",
:endkey => "\"org.couchdb.user;\""
@@ -68,9 +69,12 @@ defmodule AllDocsTest do
assert Couch.delete("/#{db_name}/1", query: %{:rev => doc1["_rev"]}).body["ok"]
changes = Couch.get("/#{db_name}/_changes").body["results"]
assert length(changes) == 4
- deleted = Enum.filter(changes, fn row -> row["deleted"] end)
- assert length(deleted) == 1
- assert hd(deleted)["id"] == "1"
+
+ retry_until(fn ->
+ deleted = Enum.filter(changes, fn row -> row["deleted"] end)
+ assert length(deleted) == 1
+ assert hd(deleted)["id"] == "1"
+ end)
# (remember old seq)
orig_doc = Enum.find(changes, fn row -> row["id"] == "3" end)
@@ -99,7 +103,8 @@ defmodule AllDocsTest do
# Test _all_docs with keys
rows =
- Couch.post("/#{db_name}/_all_docs",
+ Couch.post(
+ "/#{db_name}/_all_docs",
query: %{:include_docs => true},
body: %{:keys => ["1"]}
).body["rows"]
@@ -124,18 +129,23 @@ defmodule AllDocsTest do
:value => "Z"
}
- assert Couch.put("/#{db_name}/3", query: %{:new_edits => false}, body: conflicted_doc1).body[
- "ok"
- ]
+ assert Couch.put(
+ "/#{db_name}/3",
+ query: %{:new_edits => false},
+ body: conflicted_doc1
+ ).body["ok"]
- assert Couch.put("/#{db_name}/3", query: %{:new_edits => false}, body: conflicted_doc2).body[
- "ok"
- ]
+ assert Couch.put(
+ "/#{db_name}/3",
+ query: %{:new_edits => false},
+ body: conflicted_doc2
+ ).body["ok"]
win_rev = Couch.get("/#{db_name}/3").body
changes =
- Couch.get("/#{db_name}/_changes",
+ Couch.get(
+ "/#{db_name}/_changes",
query: %{:include_docs => true, :conflicts => true, :style => "all_docs"}
).body["results"]
@@ -147,7 +157,8 @@ defmodule AllDocsTest do
assert length(doc3["doc"]["_conflicts"]) == 2
rows =
- Couch.get("/#{db_name}/_all_docs",
+ Couch.get(
+ "/#{db_name}/_all_docs",
query: %{:include_docs => true, :conflicts => true}
).body["rows"]
@@ -166,7 +177,8 @@ defmodule AllDocsTest do
assert Couch.post("/#{db_name}", body: %{:_id => "a", :foo => "a"}).body["ok"]
rows =
- Couch.get("/#{db_name}/_all_docs",
+ Couch.get(
+ "/#{db_name}/_all_docs",
query: %{:startkey => "\"Z\"", :endkey => "\"Z\""}
).body["rows"]
diff --git a/test/elixir/test/cluster_with_quorum_test.exs b/test/elixir/test/cluster_with_quorum_test.exs
new file mode 100644
index 000000000..d07b18fea
--- /dev/null
+++ b/test/elixir/test/cluster_with_quorum_test.exs
@@ -0,0 +1,185 @@
+defmodule WithQuorumTest do
+ use CouchTestCase
+
+ @moduletag :with_quorum_test
+
+ @moduledoc """
+ Test CouchDB API in a cluster without quorum.
+ """
+ @tag :with_db_name
+ test "Creating/Deleting DB should return 201-Created/202-Acepted", context do
+ db_name = context[:db_name]
+ resp = Couch.put("/#{db_name}")
+ msg = "Should return 201-Created"
+ assert resp.status_code == 201, msg
+ resp = Couch.delete("/#{db_name}")
+ msg = "Should return 202-Acepted"
+ assert resp.status_code == 202, msg
+ end
+
+ @tag :with_db_name
+ test "Creating-Updating/Deleting doc should return 201-Created/200-OK", context do
+ db_name = context[:db_name]
+ Couch.put("/#{db_name}")
+
+ resp = Couch.post("/#{context[:db_name]}", body: %{:_id => "0", :a => 1})
+ msg = "Should return 201-Created"
+ assert resp.status_code == 201, msg
+
+ resp = Couch.get("/#{context[:db_name]}/0")
+ rev = resp.body["_rev"]
+
+ resp =
+ Couch.put("/#{context[:db_name]}/0", body: %{:_id => "0", :_rev => rev, :a => 2})
+
+ msg = "Should return 201-Created"
+ assert resp.status_code == 201, msg
+
+ resp = Couch.get("/#{context[:db_name]}/0")
+ rev = resp.body["_rev"]
+ resp = Couch.delete("/#{context[:db_name]}/0", query: %{:rev => rev})
+ msg = "Should return 200-OK"
+ assert resp.status_code == 200, msg
+
+ Couch.delete("/#{db_name}")
+ end
+
+ @tag :with_db_name
+ test "Creating-Updating/Deleting doc with overriden quorum should return 202-Acepted/200-OK",
+ context do
+ db_name = context[:db_name]
+ Couch.put("/#{db_name}")
+
+ resp =
+ Couch.post(
+ "/#{context[:db_name]}",
+ query: %{:w => 3},
+ body: %{:_id => "0", :a => 1}
+ )
+
+ msg = "Should return 202-Acepted"
+ assert resp.status_code == 202, msg
+
+ resp = Couch.get("/#{context[:db_name]}/0")
+ rev = resp.body["_rev"]
+
+ resp =
+ Couch.put(
+ "/#{context[:db_name]}/0",
+ query: %{:w => 3},
+ body: %{:_id => "0", :_rev => rev, :a => 2}
+ )
+
+ msg = "Should return 202-Acepted"
+ assert resp.status_code == 202, msg
+
+ resp = Couch.get("/#{context[:db_name]}/0")
+ rev = resp.body["_rev"]
+ resp = Couch.delete("/#{context[:db_name]}/0", query: %{:w => 1, :rev => rev})
+ msg = "Should return 200-Ok"
+ assert resp.status_code == 200, msg
+
+ Couch.delete("/#{db_name}")
+ end
+
+ @tag :with_db_name
+ test "Copy doc should return 201-Created", context do
+ db_name = context[:db_name]
+ Couch.put("/#{db_name}")
+
+ Couch.post(
+ "/#{context[:db_name]}",
+ body: %{:_id => "0", :a => 1}
+ )
+
+ headers = [Destination: "1"]
+ resp = Couch.request(:copy, "/#{context[:db_name]}/0", headers: headers)
+ msg = "Should return 201-Created"
+ assert resp.status_code == 201, msg
+ Couch.delete("/#{db_name}")
+ end
+
+ @doc_range 1..5
+
+ @tag :with_db_name
+ test "Bulk docs should return 201-Created", context do
+ db_name = context[:db_name]
+ Couch.put("/#{db_name}")
+ docs = create_docs(@doc_range)
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: docs})
+ msg = "Should return 201-Created"
+ assert resp.status_code == 201, msg
+
+ Couch.delete("/#{db_name}")
+ end
+
+ @tag :with_db_name
+ test "Bulk docs overriden quorum should return 202-Acepted", context do
+ db_name = context[:db_name]
+ Couch.put("/#{db_name}")
+ docs = create_docs(@doc_range)
+ resp = Couch.post("/#{db_name}/_bulk_docs", query: %{:w => 3}, body: %{docs: docs})
+ msg = "Should return 202-Acepted"
+ assert resp.status_code == 202, msg
+
+ Couch.delete("/#{db_name}")
+ end
+
+ @tag :with_db_name
+ test "Attachments should return 201-Created", context do
+ db_name = context[:db_name]
+ Couch.put("/#{db_name}")
+ resp = Couch.post("/#{context[:db_name]}", body: %{:_id => "0"})
+ rev = resp.body["rev"]
+
+ resp =
+ Couch.put(
+ "/#{context[:db_name]}/0/foo.txt",
+ query: %{:rev => rev},
+ body: "This is a no bas64 encoded text",
+ headers: ["Content-Type": "text/plain;charset=utf-8"]
+ )
+
+ msg = "Should return 201-Created"
+ assert resp.status_code == 201, msg
+
+ rev = resp.body["rev"]
+ resp = Couch.delete("/#{context[:db_name]}/0/foo.txt", query: %{:rev => rev})
+ msg = "Should return 200-Ok"
+ assert resp.status_code == 200, msg
+
+ Couch.delete("/#{db_name}")
+ end
+
+ @tag :with_db_name
+ test "Attachments overriden quorum should return 202-Acepted", context do
+ db_name = context[:db_name]
+ Couch.put("/#{db_name}")
+ resp = Couch.post("/#{context[:db_name]}", body: %{:_id => "0"})
+ rev = resp.body["rev"]
+
+ resp =
+ Couch.put(
+ "/#{context[:db_name]}/0/foo.txt",
+ query: %{:rev => rev, :w => 3},
+ body: "This is a no bas64 encoded text",
+ headers: ["Content-Type": "text/plain;charset=utf-8"]
+ )
+
+ msg = "Should return 202-Acepted"
+ assert resp.status_code == 202, msg
+
+ rev = resp.body["rev"]
+
+ resp =
+ Couch.delete(
+ "/#{context[:db_name]}/0/foo.txt",
+ query: %{:rev => rev, :w => 3}
+ )
+
+ msg = "Should return 200-Ok"
+ assert resp.status_code == 200, msg
+
+ Couch.delete("/#{db_name}")
+ end
+end
diff --git a/test/elixir/test/cluster_without_quorum_test.exs b/test/elixir/test/cluster_without_quorum_test.exs
new file mode 100644
index 000000000..4eee1e05a
--- /dev/null
+++ b/test/elixir/test/cluster_without_quorum_test.exs
@@ -0,0 +1,184 @@
+defmodule WithoutQuorumTest do
+ use CouchTestCase
+
+ @moduletag :without_quorum_test
+
+ @moduledoc """
+ Test CouchDB API in a cluster without quorum.
+ """
+ @tag :with_db_name
+ test "Creating/Deleting DB should return 202-Acepted", context do
+ db_name = context[:db_name]
+ resp = Couch.put("/#{db_name}")
+ msg = "Should return 202-Acepted"
+ assert resp.status_code == 202, msg
+ resp = Couch.delete("/#{db_name}")
+ assert resp.status_code == 202, msg
+ end
+
+ @tag :with_db_name
+ test "Creating/Updating/Deleting doc should return 202-Acepted", context do
+ db_name = context[:db_name]
+ Couch.put("/#{db_name}")
+
+ resp = Couch.post("/#{context[:db_name]}", body: %{:_id => "0", :a => 1})
+ msg = "Should return 202-Acepted"
+ assert resp.status_code == 202, msg
+
+ resp = Couch.get("/#{context[:db_name]}/0")
+ rev = resp.body["_rev"]
+
+ resp =
+ Couch.put("/#{context[:db_name]}/0", body: %{:_id => "0", :_rev => rev, :a => 2})
+
+ msg = "Should return 202-Acepted"
+ assert resp.status_code == 202, msg
+
+ resp = Couch.get("/#{context[:db_name]}/0")
+ rev = resp.body["_rev"]
+ resp = Couch.delete("/#{context[:db_name]}/0", query: %{:rev => rev})
+ msg = "Should return 202-Acepted"
+ assert resp.status_code == 202, msg
+
+ Couch.delete("/#{db_name}")
+ end
+
+ @tag :with_db_name
+ test "Creating-Updating/Deleting doc with overriden quorum should return 201-Created/200-OK",
+ context do
+ db_name = context[:db_name]
+ Couch.put("/#{db_name}")
+
+ resp =
+ Couch.post(
+ "/#{context[:db_name]}",
+ query: %{:w => 1},
+ body: %{:_id => "0", :a => 1}
+ )
+
+ msg = "Should return 201-Created"
+ assert resp.status_code == 201, msg
+
+ resp = Couch.get("/#{context[:db_name]}/0")
+ rev = resp.body["_rev"]
+
+ resp =
+ Couch.put(
+ "/#{context[:db_name]}/0",
+ query: %{:w => 1},
+ body: %{:_id => "0", :_rev => rev, :a => 2}
+ )
+
+ msg = "Should return 201-Created"
+ assert resp.status_code == 201, msg
+
+ resp = Couch.get("/#{context[:db_name]}/0")
+ rev = resp.body["_rev"]
+ resp = Couch.delete("/#{context[:db_name]}/0", query: %{:w => 1, :rev => rev})
+ msg = "Should return 200-Ok"
+ assert resp.status_code == 200, msg
+
+ Couch.delete("/#{db_name}")
+ end
+
+ @tag :with_db_name
+ test "Copy doc should return 202-Acepted", context do
+ db_name = context[:db_name]
+ Couch.put("/#{db_name}")
+
+ Couch.post(
+ "/#{context[:db_name]}",
+ body: %{:_id => "0", :a => 1}
+ )
+
+ headers = [Destination: "1"]
+ resp = Couch.request(:copy, "/#{context[:db_name]}/0", headers: headers)
+ msg = "Should return 202-Acepted"
+ assert resp.status_code == 202, msg
+ Couch.delete("/#{db_name}")
+ end
+
+ @doc_range 1..5
+
+ @tag :with_db_name
+ test "Bulk docs should return 202-Acepted", context do
+ db_name = context[:db_name]
+ Couch.put("/#{db_name}")
+ docs = create_docs(@doc_range)
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{docs: docs})
+ msg = "Should return 202-Acepted"
+ assert resp.status_code == 202, msg
+
+ Couch.delete("/#{db_name}")
+ end
+
+ @tag :with_db_name
+ test "Bulk docs overriden quorum should return 201-Created", context do
+ db_name = context[:db_name]
+ Couch.put("/#{db_name}")
+ docs = create_docs(@doc_range)
+ resp = Couch.post("/#{db_name}/_bulk_docs", query: %{:w => 1}, body: %{docs: docs})
+ msg = "Should return 201-Created"
+ assert resp.status_code == 201, msg
+
+ Couch.delete("/#{db_name}")
+ end
+
+ @tag :with_db_name
+ test "Attachments should return 202-Acepted", context do
+ db_name = context[:db_name]
+ Couch.put("/#{db_name}")
+ resp = Couch.post("/#{context[:db_name]}", body: %{:_id => "0"})
+ rev = resp.body["rev"]
+
+ resp =
+ Couch.put(
+ "/#{context[:db_name]}/0/foo.txt",
+ query: %{:rev => rev},
+ body: "This is a no bas64 encoded text",
+ headers: ["Content-Type": "text/plain;charset=utf-8"]
+ )
+
+ msg = "Should return 202-Acepted"
+ assert resp.status_code == 202, msg
+
+ rev = resp.body["rev"]
+ resp = Couch.delete("/#{context[:db_name]}/0/foo.txt", query: %{:rev => rev})
+ msg = "Should return 200-Ok"
+ assert resp.status_code == 200, msg
+
+ Couch.delete("/#{db_name}")
+ end
+
+ @tag :with_db_name
+ test "Attachments overriden quorum should return 201-Created", context do
+ db_name = context[:db_name]
+ Couch.put("/#{db_name}")
+ resp = Couch.post("/#{context[:db_name]}", body: %{:_id => "0"})
+ rev = resp.body["rev"]
+
+ resp =
+ Couch.put(
+ "/#{context[:db_name]}/0/foo.txt",
+ query: %{:rev => rev, :w => 1},
+ body: "This is a no bas64 encoded text",
+ headers: ["Content-Type": "text/plain;charset=utf-8"]
+ )
+
+ msg = "Should return 201-Created"
+ assert resp.status_code == 201, msg
+
+ rev = resp.body["rev"]
+
+ resp =
+ Couch.delete(
+ "/#{context[:db_name]}/0/foo.txt",
+ query: %{:rev => rev, :w => 1}
+ )
+
+ msg = "Should return 200-Ok"
+ assert resp.status_code == 200, msg
+
+ Couch.delete("/#{db_name}")
+ end
+end
diff --git a/test/elixir/test/rewrite_test.exs b/test/elixir/test/rewrite_test.exs
index 250eb53d7..51acc8bba 100644
--- a/test/elixir/test/rewrite_test.exs
+++ b/test/elixir/test/rewrite_test.exs
@@ -285,12 +285,14 @@ defmodule RewriteTest do
assert Couch.put("/#{db_name}/_design/test", body: ddoc).body["ok"]
- assert Couch.post("/#{db_name}/_bulk_docs",
+ assert Couch.post(
+ "/#{db_name}/_bulk_docs",
body: %{:docs => docs1},
query: %{w: 3}
).status_code == 201
- assert Couch.post("/#{db_name}/_bulk_docs",
+ assert Couch.post(
+ "/#{db_name}/_bulk_docs",
body: %{:docs => docs2},
query: %{w: 3}
).status_code == 201
diff --git a/test/elixir/test/security_validation_test.exs b/test/elixir/test/security_validation_test.exs
index 5f4ddba8c..56c4ec31b 100644
--- a/test/elixir/test/security_validation_test.exs
+++ b/test/elixir/test/security_validation_test.exs
@@ -130,7 +130,8 @@ defmodule SecurityValidationTest do
headers = @auth_headers[:tom]
# attempt to save doc in replication context, eg ?new_edits=false
resp =
- Couch.put("/#{db_name}/#{ddoc[:_id]}",
+ Couch.put(
+ "/#{db_name}/#{ddoc[:_id]}",
body: ddoc,
headers: headers,
query: %{new_edits: false}
@@ -164,7 +165,8 @@ defmodule SecurityValidationTest do
assert resp.body["reason"] == "Documents must have an author field"
# Jerry can write the document
- assert Couch.put("/#{db_name}/test_doc",
+ assert Couch.put(
+ "/#{db_name}/test_doc",
body: %{foo: 1, author: "jerry"},
headers: jerry
).body["ok"]
diff --git a/test/javascript/test_setup.js b/test/javascript/test_setup.js
index 9e49cf15c..6140c0e3f 100644
--- a/test/javascript/test_setup.js
+++ b/test/javascript/test_setup.js
@@ -106,12 +106,12 @@ function restartServer() {
} catch (e) {}
var now = new Date().getTime();
- if (now > start + 15000) {
+ if (now > start + 30000) {
try {
uptime = getUptime();
throw(Error('FAILED to restart: ' + uptime + ' not < ' + olduptime));
} catch (e) {
- throw(Error('FAILED to restart: server is unresponsive, waited 15s'));
+ throw(Error('FAILED to restart: server is unresponsive, waited 30s'));
}
}
}