summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoan Touzet <wohali@users.noreply.github.com>2018-12-12 17:24:34 -0500
committerGitHub <noreply@github.com>2018-12-12 17:24:34 -0500
commit82c921952202d0f511812edeb23670a069070f0b (patch)
tree530228bcccb950c71eacac2ed6c8a5552c461ea8
parentbe38d66ef9fb8b97bf2d5d2f7c3bf4698eee5f93 (diff)
parentc911f0652c3b33ad5a28fde2df54f3b20ae324f3 (diff)
downloadcouchdb-82c921952202d0f511812edeb23670a069070f0b.tar.gz
Merge branch 'master' into allow-specifying-individual-elixir-tests
-rwxr-xr-xdev/run97
-rw-r--r--rel/haproxy.cfg4
-rw-r--r--src/chttpd/test/chttpd_security_tests.erl118
-rw-r--r--src/couch/src/couch_db.erl28
-rw-r--r--src/couch/src/couch_util.erl5
-rw-r--r--src/couch_mrview/src/couch_mrview_index.erl26
-rw-r--r--test/elixir/README.md2
-rw-r--r--test/elixir/lib/couch.ex4
-rw-r--r--test/elixir/lib/couch/db_test.ex55
-rw-r--r--test/elixir/test/delayed_commits_test.exs31
-rw-r--r--test/elixir/test/support/couch_test_case.ex2
11 files changed, 327 insertions, 45 deletions
diff --git a/dev/run b/dev/run
index 94f6e5d2f..e3181aae6 100755
--- a/dev/run
+++ b/dev/run
@@ -24,10 +24,13 @@ import optparse
import os
import posixpath
import re
+import socket
import subprocess as sp
import sys
import time
import uuid
+import traceback
+from configparser import ConfigParser
from pbkdf2 import pbkdf2_hex
@@ -89,7 +92,7 @@ def main():
if ctx["cmd"]:
run_command(ctx, ctx["cmd"])
else:
- join(ctx, 15984, *ctx["admin"])
+ join(ctx, cluster_port(ctx, 1), *ctx["admin"])
def setup():
@@ -190,6 +193,13 @@ def setup_argparse():
default=False,
help="Do not eval subcommand output",
)
+ parser.add_option(
+ "--auto-ports",
+ dest="auto_ports",
+ default=False,
+ action="store_true",
+ help="Select available ports for nodes automatically",
+ )
return parser.parse_args()
@@ -215,6 +225,7 @@ def setup_context(opts, args):
"no_eval": opts.no_eval,
"reset_logs": True,
"procs": [],
+ "auto_ports": opts.auto_ports,
}
@@ -244,7 +255,7 @@ def setup_configs(ctx):
fauxton_root = "share/www"
for idx, node in enumerate(ctx["nodes"]):
- cluster_port, backend_port = get_ports(idx + ctx["node_number"])
+ cluster_port, backend_port = get_ports(ctx, idx + ctx["node_number"])
env = {
"prefix": toposixpath(ctx["rootdir"]),
"package_author_name": "The Apache Software Foundation",
@@ -263,6 +274,32 @@ def setup_configs(ctx):
"compaction_daemon": "{}",
}
write_config(ctx, node, env)
+ generate_haproxy_config(ctx)
+
+
+def generate_haproxy_config(ctx):
+ haproxy_config = os.path.join(ctx["devdir"], "lib", "haproxy.cfg")
+ template = os.path.join(ctx["rootdir"], "rel", "haproxy.cfg")
+
+ with open(template) as handle:
+ config = handle.readlines()
+
+ out = []
+ for line in config:
+ match = re.match("(.*?)<<(.*?)>>(.*?)", line, re.S)
+ if match:
+ prefix, template, suffix = match.groups()
+ for node in ctx["nodes"]:
+ node_idx = int(node.replace("node", ""))
+ text = template.format(
+ **{"node_idx": node_idx, "port": cluster_port(ctx, node_idx)}
+ )
+ out.append(prefix + text + suffix)
+ else:
+ out.append(line)
+
+ with open(haproxy_config, "w") as handle:
+ handle.write("\n".join(out))
def apply_config_overrides(ctx, content):
@@ -275,9 +312,43 @@ def apply_config_overrides(ctx, content):
return content
-def get_ports(idnode):
+def get_ports(ctx, idnode):
assert idnode
- return ((10000 * idnode) + 5984, (10000 * idnode) + 5986)
+ if idnode <= 5 and not ctx["auto_ports"]:
+ return ((10000 * idnode) + 5984, (10000 * idnode) + 5986)
+ else:
+ return tuple(get_available_ports(2))
+
+
+def get_available_ports(num):
+ ports = []
+ while len(ports) < num:
+ with contextlib.closing(
+ socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ ) as soc:
+ soc.bind(("localhost", 0))
+ _, port = soc.getsockname()
+ if port not in ports:
+ ports.append(port)
+ return ports
+
+
+def get_node_config(ctx, node_idx):
+ node = "node{}".format(node_idx)
+ config_dir = os.path.join(ctx["devdir"], "lib", node, "etc")
+ config = ConfigParser()
+ config.read(
+ [os.path.join(config_dir, "default.ini"), os.path.join(config_dir, "local.ini")]
+ )
+ return config
+
+
+def backend_port(ctx, n):
+ return int(get_node_config(ctx, n).get("httpd", "port"))
+
+
+def cluster_port(ctx, n):
+ return int(get_node_config(ctx, n).get("chttpd", "port"))
def write_config(ctx, node, env):
@@ -310,7 +381,7 @@ def write_config(ctx, node, env):
def boot_haproxy(ctx):
if not ctx["with_haproxy"]:
return
- config = os.path.join(ctx["rootdir"], "rel", "haproxy.cfg")
+ config = os.path.join(ctx["devdir"], "lib", "haproxy.cfg")
cmd = [ctx["haproxy"], "-f", config]
logfname = os.path.join(ctx["devdir"], "logs", "haproxy.log")
log = open(logfname, "w")
@@ -425,7 +496,7 @@ def ensure_all_nodes_alive(ctx):
for num in range(ctx["N"]):
if status[num]:
continue
- local_port, _ = get_ports(num + ctx["node_number"])
+ local_port = cluster_port(ctx, num + 1)
url = "http://127.0.0.1:{0}/".format(local_port)
try:
check_node_alive(url)
@@ -512,12 +583,13 @@ def boot_node(ctx, node):
@log("Running cluster setup")
def cluster_setup(ctx):
- lead_port, _ = get_ports(1)
+ lead_port = cluster_port(ctx, 1)
if enable_cluster(ctx["N"], lead_port, *ctx["admin"]):
for num in range(1, ctx["N"]):
- node_port, _ = get_ports(num + 1)
+ node_port = cluster_port(ctx, num + 1)
+ node_name = ctx["nodes"][num]
enable_cluster(ctx["N"], node_port, *ctx["admin"])
- add_node(lead_port, node_port, *ctx["admin"])
+ add_node(lead_port, node_name, node_port, *ctx["admin"])
finish_cluster(lead_port, *ctx["admin"])
return lead_port
@@ -550,7 +622,7 @@ def enable_cluster(node_count, port, user, pswd):
return True
-def add_node(lead_port, node_port, user, pswd):
+def add_node(lead_port, node_name, node_port, user, pswd):
conn = httpclient.HTTPConnection("127.0.0.1", lead_port)
conn.request(
"POST",
@@ -560,6 +632,7 @@ def add_node(lead_port, node_port, user, pswd):
"action": "add_node",
"host": "127.0.0.1",
"port": node_port,
+ "name": node_name,
"username": user,
"password": pswd,
}
@@ -616,7 +689,7 @@ def generate_cookie():
def cluster_setup_with_admin_party(ctx):
- host, port = "127.0.0.1", 15986
+ host, port = "127.0.0.1", backend_port(ctx, 1)
for node in ctx["nodes"]:
body = "{}"
conn = httpclient.HTTPConnection(host, port)
@@ -625,7 +698,7 @@ def cluster_setup_with_admin_party(ctx):
if resp.status not in (200, 201, 202, 409):
print(("Failed to join %s into cluster: %s" % (node, resp.read())))
sys.exit(1)
- create_system_databases(host, 15984)
+ create_system_databases(host, cluster_port(ctx, 1))
def try_request(host, port, meth, path, success_codes, retries=10, retry_dt=1):
diff --git a/rel/haproxy.cfg b/rel/haproxy.cfg
index 45affaffe..540075761 100644
--- a/rel/haproxy.cfg
+++ b/rel/haproxy.cfg
@@ -42,6 +42,4 @@ frontend http-in
backend couchdbs
option httpchk GET /_up
http-check disable-on-404
- server couchdb1 127.0.0.1:15984 check inter 5s
- server couchdb2 127.0.0.1:25984 check inter 5s
- server couchdb3 127.0.0.1:35984 check inter 5s
+ <<server couchdb{node_idx} 127.0.0.1:{port} check inter 5s>>
diff --git a/src/chttpd/test/chttpd_security_tests.erl b/src/chttpd/test/chttpd_security_tests.erl
index 737a32e11..12a53acf2 100644
--- a/src/chttpd/test/chttpd_security_tests.erl
+++ b/src/chttpd/test/chttpd_security_tests.erl
@@ -116,6 +116,27 @@ all_test_() ->
}
}.
+security_object_validate_test_() ->
+ {
+ "chttpd security object validate tests",
+ {
+ setup,
+ fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
+ {
+ foreach,
+ fun setup/0, fun teardown/1,
+ [
+ fun should_return_ok_for_sec_obj_with_roles/1,
+ fun should_return_ok_for_sec_obj_with_names/1,
+ fun should_return_ok_for_sec_obj_with_roles_and_names/1,
+ fun should_return_error_for_sec_obj_with_incorrect_roles_and_names/1,
+ fun should_return_error_for_sec_obj_with_incorrect_roles/1,
+ fun should_return_error_for_sec_obj_with_incorrect_names/1
+ ]
+ }
+ }
+ }.
+
should_allow_admin_db_compaction([Url,_UsersUrl]) ->
?_assertEqual(true,
begin
@@ -126,7 +147,6 @@ should_allow_admin_db_compaction([Url,_UsersUrl]) ->
couch_util:get_value(<<"ok">>, InnerJson, undefined)
end).
-
should_allow_valid_password_to_create_user([_Url, UsersUrl]) ->
UserDoc = "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\",
\"type\": \"user\", \"roles\": [], \"password\": \"bar\"}",
@@ -207,3 +227,99 @@ should_disallow_anonymous_db_view_cleanup([Url,_UsersUrl]) ->
{InnerJson} = ResultJson,
ErrType = couch_util:get_value(<<"error">>, InnerJson),
?_assertEqual(<<"unauthorized">>, ErrType).
+
+should_return_ok_for_sec_obj_with_roles([Url,_UsersUrl]) ->
+ SecurityUrl = lists:concat([Url, "/_security"]),
+ SecurityProperties = [
+ {<<"admins">>,{[{<<"roles">>,[<<?TEST_ADMIN>>]}]}},
+ {<<"members">>,{[{<<"roles">>,[<<?TEST_MEMBER>>]}]}}
+ ],
+
+ Body = jiffy:encode({SecurityProperties}),
+ {ok, Status, _, _} = test_request:put(SecurityUrl,
+ [?CONTENT_JSON, ?AUTH], Body),
+ ?_assertEqual(200, Status).
+
+should_return_ok_for_sec_obj_with_names([Url,_UsersUrl]) ->
+ SecurityUrl = lists:concat([Url, "/_security"]),
+ SecurityProperties = [
+ {<<"admins">>,{[{<<"names">>,[<<?TEST_ADMIN>>]}]}},
+ {<<"members">>,{[{<<"names">>,[<<?TEST_MEMBER>>]}]}}
+ ],
+
+ Body = jiffy:encode({SecurityProperties}),
+ {ok, Status, _, _} = test_request:put(SecurityUrl,
+ [?CONTENT_JSON, ?AUTH], Body),
+ ?_assertEqual(200, Status).
+
+should_return_ok_for_sec_obj_with_roles_and_names([Url,_UsersUrl]) ->
+ SecurityUrl = lists:concat([Url, "/_security"]),
+ SecurityProperties = [
+ {<<"admins">>, {[{<<"names">>,[<<?TEST_ADMIN>>]},
+ {<<"roles">>,[<<?TEST_ADMIN>>]}]}},
+ {<<"members">>,{[{<<"names">>,[<<?TEST_MEMBER>>]},
+ {<<"roles">>,[<<?TEST_MEMBER>>]}]}}
+ ],
+
+ Body = jiffy:encode({SecurityProperties}),
+ {ok, Status, _, _} = test_request:put(SecurityUrl,
+ [?CONTENT_JSON, ?AUTH], Body),
+ ?_assertEqual(200, Status).
+
+should_return_error_for_sec_obj_with_incorrect_roles_and_names(
+ [Url,_UsersUrl]) ->
+ SecurityUrl = lists:concat([Url, "/_security"]),
+ SecurityProperties = [
+ {<<"admins">>,{[{<<"names">>,[123]}]}},
+ {<<"members">>,{[{<<"roles">>,["foo"]}]}}
+ ],
+
+ Body = jiffy:encode({SecurityProperties}),
+ {ok, Status, _, RespBody} = test_request:put(SecurityUrl,
+ [?CONTENT_JSON, ?AUTH], Body),
+ ResultJson = ?JSON_DECODE(RespBody),
+ [
+ ?_assertEqual(500, Status),
+ ?_assertEqual({[
+ {<<"error">>,<<"error">>},
+ {<<"reason">>,<<"no_majority">>}
+ ]}, ResultJson)
+ ].
+
+should_return_error_for_sec_obj_with_incorrect_roles([Url,_UsersUrl]) ->
+ SecurityUrl = lists:concat([Url, "/_security"]),
+ SecurityProperties = [
+ {<<"admins">>,{[{<<"roles">>,[?TEST_ADMIN]}]}},
+ {<<"members">>,{[{<<"roles">>,[<<?TEST_MEMBER>>]}]}}
+ ],
+
+ Body = jiffy:encode({SecurityProperties}),
+ {ok, Status, _, RespBody} = test_request:put(SecurityUrl,
+ [?CONTENT_JSON, ?AUTH], Body),
+ ResultJson = ?JSON_DECODE(RespBody),
+ [
+ ?_assertEqual(500, Status),
+ ?_assertEqual({[
+ {<<"error">>,<<"error">>},
+ {<<"reason">>,<<"no_majority">>}
+ ]}, ResultJson)
+ ].
+
+should_return_error_for_sec_obj_with_incorrect_names([Url,_UsersUrl]) ->
+ SecurityUrl = lists:concat([Url, "/_security"]),
+ SecurityProperties = [
+ {<<"admins">>,{[{<<"names">>,[<<?TEST_ADMIN>>]}]}},
+ {<<"members">>,{[{<<"names">>,[?TEST_MEMBER]}]}}
+ ],
+
+ Body = jiffy:encode({SecurityProperties}),
+ {ok, Status, _, RespBody} = test_request:put(SecurityUrl,
+ [?CONTENT_JSON, ?AUTH], Body),
+ ResultJson = ?JSON_DECODE(RespBody),
+ [
+ ?_assertEqual(500, Status),
+ ?_assertEqual({[
+ {<<"error">>,<<"error">>},
+ {<<"reason">>,<<"no_majority">>}
+ ]}, ResultJson)
+ ].
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
index 0d435c2ff..0ae164d9b 100644
--- a/src/couch/src/couch_db.erl
+++ b/src/couch/src/couch_db.erl
@@ -77,6 +77,7 @@
get_full_doc_info/2,
get_full_doc_infos/2,
get_missing_revs/2,
+ get_design_doc/2,
get_design_docs/1,
get_design_doc_count/1,
get_purge_infos/2,
@@ -608,6 +609,19 @@ get_db_info(Db) ->
],
{ok, InfoList}.
+get_design_doc(#db{name = <<"shards/", _/binary>> = ShardDbName}, DDocId0) ->
+ DDocId = couch_util:normalize_ddoc_id(DDocId0),
+ DbName = mem3:dbname(ShardDbName),
+ {_, Ref} = spawn_monitor(fun() ->
+ exit(fabric:open_doc(DbName, DDocId, []))
+ end),
+ receive {'DOWN', Ref, _, _, Response} ->
+ Response
+ end;
+get_design_doc(#db{} = Db, DDocId0) ->
+ DDocId = couch_util:normalize_ddoc_id(DDocId0),
+ couch_db:open_doc_int(Db, DDocId, [ejson_body]).
+
get_design_docs(#db{name = <<"shards/", _/binary>> = ShardDbName}) ->
DbName = mem3:dbname(ShardDbName),
{_, Ref} = spawn_monitor(fun() -> exit(fabric:design_docs(DbName)) end),
@@ -736,19 +750,23 @@ validate_security_object(SecProps) ->
% validate user input
validate_names_and_roles({Props}) when is_list(Props) ->
- case couch_util:get_value(<<"names">>,Props,[]) of
+ case couch_util:get_value(<<"names">>, Props, []) of
Ns when is_list(Ns) ->
[throw("names must be a JSON list of strings") ||N <- Ns, not is_binary(N)],
Ns;
- _ -> throw("names must be a JSON list of strings")
+ _ ->
+ throw("names must be a JSON list of strings")
end,
- case couch_util:get_value(<<"roles">>,Props,[]) of
+ case couch_util:get_value(<<"roles">>, Props, []) of
Rs when is_list(Rs) ->
[throw("roles must be a JSON list of strings") ||R <- Rs, not is_binary(R)],
Rs;
- _ -> throw("roles must be a JSON list of strings")
+ _ ->
+ throw("roles must be a JSON list of strings")
end,
- ok.
+ ok;
+validate_names_and_roles(_) ->
+ throw("admins or members must be a JSON list of strings").
get_revs_limit(#db{} = Db) ->
couch_db_engine:get_revs_limit(Db).
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index 3efec84a9..ddd24aab0 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -27,6 +27,7 @@
-export([reorder_results/2]).
-export([url_strip_password/1]).
-export([encode_doc_id/1]).
+-export([normalize_ddoc_id/1]).
-export([with_db/2]).
-export([rfc1123_date/0, rfc1123_date/1]).
-export([integer_to_boolean/1, boolean_to_integer/1]).
@@ -543,6 +544,10 @@ encode_doc_id(<<"_local/", Rest/binary>>) ->
encode_doc_id(Id) ->
url_encode(Id).
+normalize_ddoc_id(<<"_design/", _/binary>> = DDocId) ->
+ DDocId;
+normalize_ddoc_id(DDocId) when is_binary(DDocId) ->
+ <<"_design/", DDocId/binary>>.
with_db(DbName, Fun) when is_binary(DbName) ->
case couch_db:open_int(DbName, [?ADMIN_CTX]) of
diff --git a/src/couch_mrview/src/couch_mrview_index.erl b/src/couch_mrview/src/couch_mrview_index.erl
index 4718b562d..d3bcfe04b 100644
--- a/src/couch_mrview/src/couch_mrview_index.erl
+++ b/src/couch_mrview/src/couch_mrview_index.erl
@@ -226,16 +226,15 @@ verify_index_exists(DbName, Props) ->
if Type =/= <<"mrview">> -> false; true ->
DDocId = couch_util:get_value(<<"ddoc_id">>, Props),
couch_util:with_db(DbName, fun(Db) ->
- {ok, DesignDocs} = couch_db:get_design_docs(Db),
- case get_ddoc(DbName, DesignDocs, DDocId) of
- #doc{} = DDoc ->
+ case couch_db:get_design_doc(Db, DDocId) of
+ {ok, #doc{} = DDoc} ->
{ok, IdxState} = couch_mrview_util:ddoc_to_mrst(
DbName, DDoc),
IdxSig = IdxState#mrst.sig,
SigInLocal = couch_util:get_value(
<<"signature">>, Props),
couch_index_util:hexsig(IdxSig) == SigInLocal;
- not_found ->
+ {not_found, _} ->
false
end
end)
@@ -245,25 +244,6 @@ verify_index_exists(DbName, Props) ->
end.
-get_ddoc(<<"shards/", _/binary>> = _DbName, DesignDocs, DDocId) ->
- DDocs = [couch_doc:from_json_obj(DD) || DD <- DesignDocs],
- case lists:keyfind(DDocId, #doc.id, DDocs) of
- #doc{} = DDoc -> DDoc;
- false -> not_found
- end;
-get_ddoc(DbName, DesignDocs, DDocId) ->
- couch_util:with_db(DbName, fun(Db) ->
- case lists:keyfind(DDocId, #full_doc_info.id, DesignDocs) of
- #full_doc_info{} = DDocInfo ->
- {ok, DDoc} = couch_db:open_doc_int(
- Db, DDocInfo, [ejson_body]),
- DDoc;
- false ->
- not_found
- end
- end).
-
-
ensure_local_purge_docs(DbName, DDocs) ->
couch_util:with_db(DbName, fun(Db) ->
lists:foreach(fun(DDoc) ->
diff --git a/test/elixir/README.md b/test/elixir/README.md
index e80df1f31..54de35929 100644
--- a/test/elixir/README.md
+++ b/test/elixir/README.md
@@ -33,7 +33,7 @@ X means done, - means partially
- [ ] Port conflicts.js
- [ ] Port cookie_auth.js
- [ ] Port copy_doc.js
- - [ ] Port delayed_commits.js
+ - [X] Port delayed_commits.js
- [ ] Port design_docs.js
- [ ] Port design_options.js
- [ ] Port design_paths.js
diff --git a/test/elixir/lib/couch.ex b/test/elixir/lib/couch.ex
index 302b8276a..934262216 100644
--- a/test/elixir/lib/couch.ex
+++ b/test/elixir/lib/couch.ex
@@ -50,6 +50,10 @@ defmodule Couch do
CouchDB library to power test suite.
"""
+ def process_url("http://" <> _ = url) do
+ url
+ end
+
def process_url(url) do
"http://127.0.0.1:15984" <> url
end
diff --git a/test/elixir/lib/couch/db_test.ex b/test/elixir/lib/couch/db_test.ex
index 538a99877..3bbfb7eaa 100644
--- a/test/elixir/lib/couch/db_test.ex
+++ b/test/elixir/lib/couch/db_test.ex
@@ -1,4 +1,6 @@
defmodule Couch.DBTest do
+ @moduledoc false
+
import ExUnit.Callbacks, only: [on_exit: 1]
import ExUnit.Assertions, only: [assert: 1, assert: 2]
@@ -237,4 +239,57 @@ defmodule Couch.DBTest do
opts = [pretty: true, width: 20, limit: :infinity, printable_limit: :infinity]
inspect(resp, opts)
end
+
+ def restart_cluster() do
+ resp = Couch.get("/_membership")
+ assert resp.status_code == 200
+ nodes = resp.body["all_nodes"]
+
+ nodes_ports =
+ Enum.reduce(nodes, [], fn node, acc ->
+ port = node_to_port(node)
+ [{node, port} | acc]
+ end)
+
+ tasks =
+ Enum.map(nodes_ports, fn {node, port} ->
+ Task.async(fn -> restart_node(node, port) end)
+ end)
+
+ Task.yield_many(tasks, length(nodes) * 5000)
+ end
+
+ def restart_node(node \\ "node1@127.0.0.1") do
+ port = node_to_port(node)
+ restart_node(node, port)
+ end
+
+ defp restart_node(node, port) do
+ url = "http://127.0.0.1:#{port}/_node/#{node}/_restart"
+ resp = Couch.post(url)
+ assert HTTPotion.Response.success?(resp)
+ assert resp.body["ok"]
+ # make sure node went down. we assuming the node can't bounce quick
+ # enough to inroduce a race here
+ retry_until(fn -> !node_is_running(port) end)
+ # wait utill node is back
+ retry_until(fn -> node_is_running(port) end, 500, 10_000)
+ end
+
+ defp node_is_running(port) do
+ url = "http://127.0.0.1:#{port}/_up"
+ resp = Couch.get(url)
+
+ case HTTPotion.Response.success?(resp) do
+ true -> resp.status_code in 200..399
+ false -> false
+ end
+ end
+
+ defp node_to_port(node) do
+ url = "/_node/#{node}/_config/chttpd/port"
+ resp = Couch.get(url)
+ assert HTTPotion.Response.success?(resp)
+ resp.body
+ end
end
diff --git a/test/elixir/test/delayed_commits_test.exs b/test/elixir/test/delayed_commits_test.exs
new file mode 100644
index 000000000..e80d0bdfb
--- /dev/null
+++ b/test/elixir/test/delayed_commits_test.exs
@@ -0,0 +1,31 @@
+defmodule DelayedCommitsTest do
+ use CouchTestCase
+
+ @moduledoc """
+ Test CouchDB delayed commits
+ This is a port of the delayed_commits.js suite
+
+ Note that delayed_commits is deprecated in 2.0, so this is a minimal
+ test to show it still works. delayed_commits will be removed in 3.0.
+ """
+
+ @tag config: [
+ {"couchdb", "delayed_commits", "true"}
+ ]
+ @tag :with_db
+ test "delayed commit", context do
+ db_name = context[:db_name]
+ doc_id = "doc-1"
+ resp = Couch.put("/#{db_name}/#{doc_id}", body: %{a: 2, b: 4})
+ assert resp.status_code in 201..204
+ assert resp.body["ok"]
+
+ resp = Couch.get("/#{db_name}/#{doc_id}")
+ assert resp.status_code == 200, "The new doc should be in the database"
+
+ restart_cluster()
+
+ resp = Couch.get("/#{db_name}/#{doc_id}")
+ assert resp.status_code == 404, "The new doc should be missing"
+ end
+end
diff --git a/test/elixir/test/support/couch_test_case.ex b/test/elixir/test/support/couch_test_case.ex
index 02bee46af..2d29425f0 100644
--- a/test/elixir/test/support/couch_test_case.ex
+++ b/test/elixir/test/support/couch_test_case.ex
@@ -1,4 +1,6 @@
defmodule CouchTestCase do
+ @moduledoc false
+
use ExUnit.CaseTemplate
using do