summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdam Kocoloski <kocolosk@apache.org>2018-11-10 09:34:47 -0500
committerGitHub <noreply@github.com>2018-11-10 09:34:47 -0500
commite5afe23e415068998bfc45089994331fbc5ba258 (patch)
treea7f9272eb39fbb3e789fb40e078c0aee2eb93d14
parent83f2ea55294965a457b88ef33fa3e9428a2f8908 (diff)
parent00b28c265d97df675b725cd68897dc371cbd7168 (diff)
downloadcouchdb-mem3-seedlist.tar.gz
Merge branch 'master' into mem3-seedlistmem3-seedlist
-rw-r--r--.gitignore1
-rw-r--r--CONTRIBUTING.md2
-rw-r--r--Makefile5
-rwxr-xr-xconfigure2
-rwxr-xr-xdev/run51
-rw-r--r--rebar.config.script4
-rw-r--r--rel/files/couchdb.cmd.in (renamed from rel/overlay/bin/couchdb.cmd)3
-rwxr-xr-xrel/files/couchdb.in (renamed from rel/overlay/bin/couchdb)4
-rwxr-xr-xrel/overlay/bin/couchup30
-rw-r--r--rel/overlay/etc/default.ini55
-rw-r--r--rel/overlay/etc/local.ini6
-rw-r--r--rel/reltool.config4
-rw-r--r--src/chttpd/src/chttpd_db.erl28
-rw-r--r--src/chttpd/src/chttpd_misc.erl5
-rw-r--r--src/chttpd/src/chttpd_sup.erl33
-rw-r--r--src/chttpd/test/chttpd_purge_tests.erl33
-rw-r--r--src/chttpd/test/chttpd_view_test.erl1
-rw-r--r--src/chttpd/test/chttpd_welcome_test.erl20
-rw-r--r--src/couch/priv/couch_js/help.h1
-rw-r--r--src/couch/src/couch.app.src36
-rw-r--r--src/couch/src/couch_bt_engine_header.erl8
-rw-r--r--src/couch/src/couch_db.erl163
-rw-r--r--src/couch/src/couch_db_updater.erl108
-rw-r--r--src/couch/src/couch_doc.erl2
-rw-r--r--src/couch/src/couch_httpd.erl17
-rw-r--r--src/couch/src/couch_httpd_proxy.erl428
-rw-r--r--src/couch/src/couch_httpd_vhost.erl16
-rw-r--r--src/couch/src/couch_os_daemons.erl394
-rw-r--r--src/couch/src/couch_proc_manager.erl46
-rw-r--r--src/couch/src/couch_secondary_sup.erl32
-rw-r--r--src/couch/test/couch_bt_engine_upgrade_tests.erl19
-rw-r--r--src/couch/test/couchdb_http_proxy_tests.erl456
-rw-r--r--src/couch/test/couchdb_os_daemons_tests.erl259
-rw-r--r--src/couch/test/couchdb_os_proc_pool.erl7
-rw-r--r--src/couch/test/couchdb_update_conflicts_tests.erl64
-rw-r--r--src/couch_log/src/couch_log_formatter.erl52
-rw-r--r--src/couch_log/src/couch_log_sup.erl2
-rw-r--r--src/couch_log/test/couch_log_formatter_test.erl83
-rw-r--r--src/couch_mrview/src/couch_mrview_compactor.erl4
-rw-r--r--src/couch_replicator/src/couch_replicator.hrl3
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler.erl18
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler_job.erl22
-rw-r--r--src/couch_replicator/test/couch_replicator_retain_stats_between_job_runs.erl160
-rw-r--r--src/mango/Makefile2
-rw-r--r--src/mango/src/mango_cursor_view.erl8
-rw-r--r--src/mango/src/mango_native_proc.erl8
-rw-r--r--src/mango/test/01-index-crud-test.py35
-rw-r--r--src/mango/test/mango.py6
-rw-r--r--test/elixir/.gitignore2
-rw-r--r--test/elixir/Makefile2
-rw-r--r--test/elixir/README.md104
-rw-r--r--test/elixir/config/config.exs30
-rw-r--r--test/elixir/config/test.exs3
-rw-r--r--test/elixir/lib/couch.ex171
-rw-r--r--test/elixir/mix.exs30
-rw-r--r--test/elixir/mix.lock3
-rwxr-xr-xtest/elixir/run4
-rw-r--r--test/elixir/test/all_docs_test.exs132
-rw-r--r--test/elixir/test/basics_test.exs282
-rw-r--r--test/elixir/test/batch_save_test.exs42
-rw-r--r--test/elixir/test/bulk_docs_test.exs152
-rw-r--r--test/elixir/test/compact_test.exs83
-rw-r--r--test/elixir/test/config_test.exs149
-rw-r--r--test/elixir/test/conflicts_test.exs102
-rw-r--r--test/elixir/test/data/lorem.txt103
-rw-r--r--test/elixir/test/data/lorem_b64.txt1
-rw-r--r--test/elixir/test/reduce_test.exs415
-rw-r--r--test/elixir/test/replication_test.exs1706
-rw-r--r--test/elixir/test/rewrite_test.exs339
-rw-r--r--test/elixir/test/security_validation_test.exs310
-rw-r--r--test/elixir/test/test_helper.exs237
-rw-r--r--test/elixir/test/uuids_test.exs94
-rw-r--r--test/elixir/test/view_collation_test.exs133
-rwxr-xr-xtest/javascript/run2
-rw-r--r--test/javascript/tests/config.js3
75 files changed, 5530 insertions, 1850 deletions
diff --git a/.gitignore b/.gitignore
index 088303039..00d1b93eb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -18,6 +18,7 @@ dev/lib/
dev/logs/
ebin/
erl_crash.dump
+erln8.config
install.mk
rel/*.config
rel/couchdb
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index e345ea487..cd3a4437c 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -279,7 +279,7 @@ without needing any other steps like setting git upstreams! :sparkles:
## Thanks
-Special thanks to [Hoodie][https://github.com/hoodiehq/hoodie] for the great
+Special thanks to [Hoodie](https://github.com/hoodiehq/hoodie) for the great
CONTRIBUTING.md template.
[1]: http://mail-archives.apache.org/mod_mbox/couchdb-user/
diff --git a/Makefile b/Makefile
index 0ab7b4eed..4107c84f3 100644
--- a/Makefile
+++ b/Makefile
@@ -170,6 +170,11 @@ soak-eunit: couch
@$(REBAR) setup_eunit 2> /dev/null
while [ $$? -eq 0 ] ; do $(REBAR) -r eunit $(EUNIT_OPTS) ; done
+.PHONY: elixir
+elixir:
+ @rm -rf dev/lib
+ @dev/run -a adm:pass --no-eval test/elixir/run
+
.PHONY: javascript
# target: javascript - Run JavaScript test suites or specific ones defined by suites option
javascript: devclean
diff --git a/configure b/configure
index 370c964ae..b9a021d53 100755
--- a/configure
+++ b/configure
@@ -209,7 +209,7 @@ EOF
install_local_rebar() {
if [ ! -x "${rootdir}/bin/rebar" ]; then
if [ ! -d "${rootdir}/src/rebar" ]; then
- git clone --depth 1 --branch 2.6.0-couchdb https://github.com/apache/couchdb-rebar.git ${rootdir}/src/rebar
+ git clone --depth 1 https://github.com/apache/couchdb-rebar.git ${rootdir}/src/rebar
fi
make -C ${rootdir}/src/rebar
mv ${rootdir}/src/rebar/rebar ${rootdir}/bin/rebar
diff --git a/dev/run b/dev/run
index d105140b7..5bf5fc0f5 100755
--- a/dev/run
+++ b/dev/run
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
@@ -132,6 +132,8 @@ def setup_argparse():
help='Optional key=val config overrides. Can be repeated')
parser.add_option('--degrade-cluster', dest="degrade_cluster",type=int, default=0,
help='The number of nodes that should be stopped after cluster config')
+ parser.add_option('--no-eval', action='store_true', default=False,
+ help='Do not eval subcommand output')
return parser.parse_args()
@@ -153,6 +155,7 @@ def setup_context(opts, args):
'haproxy': opts.haproxy,
'haproxy_port': opts.haproxy_port,
'config_overrides': opts.config_overrides,
+ 'no_eval': opts.no_eval,
'reset_logs': True,
'procs': []}
@@ -270,16 +273,6 @@ def boot_haproxy(ctx):
def hack_default_ini(ctx, node, contents):
- # Replace couchjs command
- couchjs = os.path.join(ctx['rootdir'], "src", "couch", "priv", "couchjs")
- mainjs = os.path.join(ctx['rootdir'], "share", "server", "main.js")
- coffeejs = os.path.join(ctx['rootdir'], "share", "server", "main-coffee.js")
-
- repl = toposixpath("javascript = %s %s" % (couchjs, mainjs))
- contents = re.sub("(?m)^javascript.*$", repl, contents)
-
- repl = toposixpath("coffeescript = %s %s" % (couchjs, coffeejs))
- contents = re.sub("(?m)^coffeescript.*$", repl, contents)
if ctx['enable_erlang_views']:
contents = re.sub(
@@ -410,15 +403,29 @@ def check_node_alive(url):
if error is not None:
raise error
+def set_boot_env(ctx):
+
+ # fudge default query server paths
+ couchjs = os.path.join(ctx['rootdir'], "src", "couch", "priv", "couchjs")
+ mainjs = os.path.join(ctx['rootdir'], "share", "server", "main.js")
+ coffeejs = os.path.join(ctx['rootdir'], "share", "server", "main-coffee.js")
+
+ qs_javascript = toposixpath("%s %s" % (couchjs, mainjs))
+ qs_coffescript = toposixpath("%s %s" % (couchjs, coffeejs))
+
+ os.environ['COUCHDB_QUERY_SERVER_JAVASCRIPT'] = qs_javascript
+ os.environ['COUCHDB_QUERY_SERVER_COFFEESCRIPT'] = qs_coffescript
@log('Start node {node}')
def boot_node(ctx, node):
erl_libs = os.path.join(ctx['rootdir'], "src")
+ set_boot_env(ctx)
env = os.environ.copy()
env["ERL_LIBS"] = os.pathsep.join([erl_libs])
node_etcdir = os.path.join(ctx['devdir'], "lib", node, "etc")
reldir = os.path.join(ctx['rootdir'], "rel")
+
cmd = [
"erl",
"-args_file", os.path.join(node_etcdir, "vm.args"),
@@ -569,15 +576,19 @@ def join(ctx, lead_port, user, password):
@log('Exec command {cmd}')
def run_command(ctx, cmd):
- p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sys.stderr)
- while True:
- line = p.stdout.readline()
- if not line:
- break
- eval(line)
- p.wait()
- exit(p.returncode)
-
+ if ctx['no_eval']:
+ p = sp.Popen(cmd, shell=True)
+ p.wait()
+ exit(p.returncode)
+ else:
+ p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sys.stderr)
+ while True:
+ line = p.stdout.readline()
+ if not line:
+ break
+ eval(line)
+ p.wait()
+ exit(p.returncode)
@log('Restart all nodes')
def reboot_nodes(ctx):
diff --git a/rebar.config.script b/rebar.config.script
index 3bb1a584c..65c3a8a04 100644
--- a/rebar.config.script
+++ b/rebar.config.script
@@ -48,11 +48,11 @@ SubDirs = [
DepDescs = [
%% Independent Apps
-{config, "config", {tag, "1.0.4"}},
+{config, "config", {tag, "2.1.3"}},
{b64url, "b64url", {tag, "1.0.1"}},
{ets_lru, "ets-lru", {tag, "1.0.0"}},
{khash, "khash", {tag, "1.0.1"}},
-{snappy, "snappy", {tag, "CouchDB-1.0.1"}},
+{snappy, "snappy", {tag, "CouchDB-1.0.2"}},
{ioq, "ioq", {tag, "1.0.1"}},
%% Non-Erlang deps
diff --git a/rel/overlay/bin/couchdb.cmd b/rel/files/couchdb.cmd.in
index 5e5f2cfe6..9438872c6 100644
--- a/rel/overlay/bin/couchdb.cmd
+++ b/rel/files/couchdb.cmd.in
@@ -25,6 +25,9 @@ set EMU=beam
set PROGNAME=%~n0
set PATH=%PATH%;%COUCHDB_BIN_DIR%
+set COUCHDB_QUERY_SERVER_JAVASCRIPT="{{prefix}}/bin/couchjs {{prefix}}/share/server/main.js"
+set COUCHDB_QUERY_SERVER_COFFEESCRIPT="{{prefix}}/bin/couchjs {{prefix}}/share/server/main-coffee.js"
+
"%BINDIR%\erl" -boot "%ROOTDIR%\releases\%APP_VSN%\couchdb" ^
-args_file "%ROOTDIR%\etc\vm.args" ^
-config "%ROOTDIR%\releases\%APP_VSN%\sys.config" %*
diff --git a/rel/overlay/bin/couchdb b/rel/files/couchdb.in
index a9e6e9bea..aae179aa7 100755
--- a/rel/overlay/bin/couchdb
+++ b/rel/files/couchdb.in
@@ -26,10 +26,12 @@ export BINDIR="$ROOTDIR/erts-$ERTS_VSN/bin"
export EMU=beam
export PROGNAME=`echo $0 | sed 's/.*\///'`
+export COUCHDB_QUERY_SERVER_JAVASCRIPT="{{prefix}}/bin/couchjs {{prefix}}/share/server/main.js"
+export COUCHDB_QUERY_SERVER_COFFEESCRIPT="{{prefix}}/bin/couchjs {{prefix}}/share/server/main-coffee.js"
+
ARGS_FILE="${COUCHDB_ARGS_FILE:-$ROOTDIR/etc/vm.args}"
SYSCONFIG_FILE="${COUCHDB_SYSCONFIG_FILE:-$ROOTDIR/releases/$APP_VSN/sys.config}"
exec "$BINDIR/erlexec" -boot "$ROOTDIR/releases/$APP_VSN/couchdb" \
-args_file "${ARGS_FILE}" \
-config "${SYSCONFIG_FILE}" "$@"
-
diff --git a/rel/overlay/bin/couchup b/rel/overlay/bin/couchup
index 6532170aa..75b7d7e94 100755
--- a/rel/overlay/bin/couchup
+++ b/rel/overlay/bin/couchup
@@ -172,6 +172,25 @@ def _put_filter(args, db=None):
print(exc.response.text)
exit(1)
+def _do_security(args, db=None):
+ """Copies the _security object from source to target DB."""
+ try:
+ req = requests.get(
+ 'http://127.0.0.1:{}/{}/_security'.format(
+ args['local_port'], db),
+ auth=args['creds'])
+ req.raise_for_status()
+ security_doc = _tojson(req)
+ req = requests.put(
+ 'http://127.0.0.1:{}/{}/_security'.format(
+ args['clustered_port'], db),
+ data=json.dumps(security_doc),
+ auth=args['creds'])
+ req.raise_for_status()
+ except requests.exceptions.HTTPError as exc:
+ print(exc.response.text)
+ exit(1)
+
def _replicate(args):
args = _args(args)
if args['all_dbs']:
@@ -229,6 +248,11 @@ def _replicate(args):
if req.get('no_changes'):
if not args['quiet']:
print("No changes, replication is caught up.")
+
+ if not args['quiet']:
+ print('Copying _security object for ' + db + '...')
+ _do_security(args, db)
+
if not args['quiet']:
print("Replication complete.")
@@ -474,7 +498,11 @@ def main(argv):
parser_delete.set_defaults(func=_delete)
args = parser.parse_args(argv[1:])
- args.func(args)
+ try:
+ args.func(args)
+ except AttributeError:
+ parser.print_help()
+ sys.exit(0)
if __name__ == '__main__':
main(sys.argv)
diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index e29977ca3..edaebf9e2 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -139,9 +139,7 @@ database_prefix = userdb-
port = {{backend_port}}
bind_address = 127.0.0.1
authentication_handlers = {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
-default_handler = {couch_httpd_db, handle_request}
secure_rewrites = true
-vhost_global_handlers = _utils, _uuids, _session, _users
allow_jsonp = false
; Options for the MochiWeb HTTP server.
;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
@@ -269,13 +267,9 @@ credentials = false
; List of hosts separated by a comma. * means accept all
; hosts =
-[query_servers]
-javascript = {{prefix}}/bin/couchjs {{prefix}}/share/server/main.js
-coffeescript = {{prefix}}/bin/couchjs {{prefix}}/share/server/main-coffee.js
-
-; enable mango query engine
[native_query_servers]
-query = {mango_native_proc, start_link, []}
+; erlang query server
+; enable_erlang_query_server = false
; Changing reduce_limit to false will disable reduce_limit.
; If you think you're hitting reduce_limit with a "good" reduce function,
@@ -290,16 +284,6 @@ os_process_limit = 100
; "infinity" is also a valid configuration value.
;group_info_timeout = 5000
-[daemons]
-index_server={couch_index_server, start_link, []}
-query_servers={couch_proc_manager, start_link, []}
-vhosts={couch_httpd_vhost, start_link, []}
-httpd={couch_httpd, start_link, []}
-uuids={couch_uuids, start, []}
-auth_cache={couch_auth_cache, start_link, []}
-os_daemons={couch_os_daemons, start_link, []}
-compaction_daemon={couch_compaction_daemon, start_link, []}
-
[mango]
; Set to true to disable the "index all fields" text index, which can lead
; to out of memory issues when users have documents with nested array fields.
@@ -310,41 +294,6 @@ compaction_daemon={couch_compaction_daemon, start_link, []}
[indexers]
couch_mrview = true
-[httpd_global_handlers]
-/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
-favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "{{prefix}}/share/www"}
-
-_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "{{prefix}}/share/www"}
-_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
-_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
-_config = {couch_httpd_misc_handlers, handle_config_req}
-_replicate = {couch_replicator_httpd, handle_req}
-_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
-_stats = {couch_stats_httpd, handle_stats_req}
-_session = {couch_httpd_auth, handle_session_req}
-_plugins = {couch_plugins_httpd, handle_req}
-_system = {chttpd_misc, handle_system_req}
-
-[httpd_db_handlers]
-_all_docs = {couch_mrview_http, handle_all_docs_req}
-_local_docs = {couch_mrview_http, handle_local_docs_req}
-_design_docs = {couch_mrview_http, handle_design_docs_req}
-_changes = {couch_httpd_db, handle_db_changes_req}
-_compact = {couch_httpd_db, handle_compact_req}
-_design = {couch_httpd_db, handle_design_req}
-_temp_view = {couch_mrview_http, handle_temp_view_req}
-_view_cleanup = {couch_mrview_http, handle_cleanup_req}
-
-[httpd_design_handlers]
-_compact = {couch_mrview_http, handle_compact_req}
-_info = {couch_mrview_http, handle_info_req}
-_list = {couch_mrview_show, handle_view_list_req}
-_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
-_show = {couch_mrview_show, handle_doc_show_req}
-_update = {couch_mrview_show, handle_doc_update_req}
-_view = {couch_mrview_http, handle_view_req}
-_view_changes = {couch_mrview_http, handle_view_changes_req}
-
[uuids]
; Known algorithms:
; random - 128 bits of random awesome
diff --git a/rel/overlay/etc/local.ini b/rel/overlay/etc/local.ini
index ea5467c9a..ecc97f466 100644
--- a/rel/overlay/etc/local.ini
+++ b/rel/overlay/etc/local.ini
@@ -52,12 +52,8 @@
; Basic realm="server" in order to prevent you getting logged out.
; require_valid_user = false
-[daemons]
-; enable SSL support by uncommenting the following line and supply the PEM's below.
-; the default ssl port CouchDB listens on is 6984
-; httpsd = {chttpd, start_link, [https]}
-
[ssl]
+;enable = true
;cert_file = /full/path/to/server_cert.pem
;key_file = /full/path/to/server_key.pem
;password = somepassword
diff --git a/rel/reltool.config b/rel/reltool.config
index 2c55d0900..bf4ae448b 100644
--- a/rel/reltool.config
+++ b/rel/reltool.config
@@ -125,5 +125,7 @@
{copy, "files/sys.config", "releases/\{\{rel_vsn\}\}/sys.config"},
{copy, "files/vm.args", "releases/\{\{rel_vsn\}\}/vm.args"},
{template, "overlay/etc/default.ini", "etc/default.ini"},
- {template, "overlay/etc/vm.args", "etc/vm.args"}
+ {template, "overlay/etc/vm.args", "etc/vm.args"},
+ {template, "files/couchdb.in", "bin/couchdb"},
+ {template, "files/couchdb.cmd.in", "bin/couchdb.cmd"}
]}.
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index bd01b93bd..d46b5bbf2 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -420,19 +420,16 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req,
_ ->
Options = [{user_ctx,Ctx}, {w,W}]
end,
+ Docs = lists:map(fun(JsonObj) ->
+ Doc = couch_doc:from_json_obj_validate(JsonObj),
+ validate_attachment_names(Doc),
+ case Doc#doc.id of
+ <<>> -> Doc#doc{id = couch_uuids:new()};
+ _ -> Doc
+ end
+ end, DocsArray),
case couch_util:get_value(<<"new_edits">>, JsonProps, true) of
true ->
- Docs = lists:map(
- fun(JsonObj) ->
- Doc = couch_doc:from_json_obj_validate(JsonObj),
- validate_attachment_names(Doc),
- Id = case Doc#doc.id of
- <<>> -> couch_uuids:new();
- Id0 -> Id0
- end,
- Doc#doc{id=Id}
- end,
- DocsArray),
Options2 =
case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
true -> [all_or_nothing|Options];
@@ -455,8 +452,6 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req,
send_json(Req, 417, ErrorsJson)
end;
false ->
- Docs = [couch_doc:from_json_obj_validate(JsonObj) || JsonObj <- DocsArray],
- [validate_attachment_names(D) || D <- Docs],
case fabric:update_docs(Db, Docs, [replicated_changes|Options]) of
{ok, Errors} ->
ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
@@ -524,8 +519,11 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
true -> ok
end,
couch_stats:increment_counter([couchdb, document_purges, total], length(IdsRevs2)),
- {ok, Results} = fabric:purge_docs(Db, IdsRevs2, Options),
- {Code, Json} = purge_results_to_json(IdsRevs2, Results),
+ Results2 = case fabric:purge_docs(Db, IdsRevs2, Options) of
+ {ok, Results} -> Results;
+ {accepted, Results} -> Results
+ end,
+ {Code, Json} = purge_results_to_json(IdsRevs2, Results2),
send_json(Req, Code, {[{<<"purge_seq">>, null}, {<<"purged">>, {Json}}]});
db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) ->
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index f5772ba8b..fc03fb512 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -50,6 +50,7 @@ handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) ->
{couchdb, WelcomeMessage},
{version, list_to_binary(couch_server:get_version())},
{git_sha, list_to_binary(couch_server:get_git_sha())},
+ {uuid, couch_server:get_uuid()},
{features, config:features()}
] ++ case config:get("vendor") of
[] ->
@@ -242,7 +243,9 @@ cancel_replication(PostBody, Ctx) ->
{error, badrpc};
Else ->
% Unclear what to do here -- pick the first error?
- hd(Else)
+ % Except try ignoring any {error, not_found} responses
+ % because we'll always get two of those
+ hd(Else -- [{error, not_found}])
end
end.
diff --git a/src/chttpd/src/chttpd_sup.erl b/src/chttpd/src/chttpd_sup.erl
index fe84b67eb..369248ea6 100644
--- a/src/chttpd/src/chttpd_sup.erl
+++ b/src/chttpd/src/chttpd_sup.erl
@@ -80,21 +80,18 @@ maybe_replace(Key, Value, Settings) ->
end.
lru_opts() ->
- case config:get("chttpd_auth_cache", "max_objects") of
- MxObjs when is_integer(MxObjs), MxObjs > 0 ->
- [{max_objects, MxObjs}];
- _ ->
- []
- end ++
- case config:get("chttpd_auth_cache", "max_size", "104857600") of
- MxSize when is_integer(MxSize), MxSize > 0 ->
- [{max_size, MxSize}];
- _ ->
- []
- end ++
- case config:get("chttpd_auth_cache", "max_lifetime", "600000") of
- MxLT when is_integer(MxLT), MxLT > 0 ->
- [{max_lifetime, MxLT}];
- _ ->
- []
- end.
+ lists:foldl(fun append_if_set/2, [], [
+ {max_objects, config:get_integer("chttpd_auth_cache", "max_objects", 0)},
+ {max_size, config:get_integer("chttpd_auth_cache", "max_size", 104857600)},
+ {max_lifetime, config:get_integer("chttpd_auth_cache", "max_lifetime", 600000)}
+ ]).
+
+append_if_set({Key, Value}, Opts) when Value > 0 ->
+ [{Key, Value} | Opts];
+append_if_set({Key, 0}, Opts) ->
+ Opts;
+append_if_set({Key, Value}, Opts) ->
+ couch_log:error(
+ "The value for `~s` should be string convertable "
+ "to integer which is >= 0 (got `~p`)", [Key, Value]),
+ Opts.
diff --git a/src/chttpd/test/chttpd_purge_tests.erl b/src/chttpd/test/chttpd_purge_tests.erl
index 686552590..af1bd0b1c 100644
--- a/src/chttpd/test/chttpd_purge_tests.erl
+++ b/src/chttpd/test/chttpd_purge_tests.erl
@@ -70,6 +70,7 @@ purge_test_() ->
[
fun test_empty_purge_request/1,
fun test_ok_purge_request/1,
+ fun test_accepted_purge_request/1,
fun test_partial_purge_request/1,
fun test_mixed_purge_request/1,
fun test_overmany_ids_or_revs_purge_request/1,
@@ -135,6 +136,38 @@ test_ok_purge_request(Url) ->
end).
+test_accepted_purge_request(Url) ->
+ ?_test(begin
+ {ok, _, _, Body} = create_doc(Url, "doc1"),
+ {Json} = ?JSON_DECODE(Body),
+ Rev1 = couch_util:get_value(<<"rev">>, Json, undefined),
+ IdsRevsEJson = {[
+ {<<"doc1">>, [Rev1]}
+ ]},
+ IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
+ meck:new(fabric, [passthrough]),
+ meck:expect(fabric, purge_docs,
+ fun(_, _, _) -> {accepted,[{accepted,[{1,
+ <<57,27,64,134,152,18,73,243,40,1,141,214,135,104,79,188>>}]}]}
+ end
+ ),
+ {ok, Status, _, ResultBody} = test_request:post(Url ++ "/_purge/",
+ [?CONTENT_JSON, ?AUTH], IdsRevs),
+ ResultJson = ?JSON_DECODE(ResultBody),
+ meck:unload(fabric),
+ ?assert(Status =:= 202),
+ ?assertEqual(
+ {[
+ {<<"purge_seq">>, null},
+ {<<"purged">>, {[
+ {<<"doc1">>, [Rev1]}
+ ]}}
+ ]},
+ ResultJson
+ )
+ end).
+
+
test_partial_purge_request(Url) ->
?_test(begin
{ok, _, _, Body} = create_doc(Url, "doc1"),
diff --git a/src/chttpd/test/chttpd_view_test.erl b/src/chttpd/test/chttpd_view_test.erl
index 3457c6f30..114eb089b 100644
--- a/src/chttpd/test/chttpd_view_test.erl
+++ b/src/chttpd/test/chttpd_view_test.erl
@@ -31,6 +31,7 @@ setup() ->
TmpDb = ?tempdb(),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
+ os:putenv("COUCHDB_QUERY_SERVER_JAVASCRIPT", "../../../bin/couchjs ../../../share/server/main.js"),
Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
create_db(Url),
Url.
diff --git a/src/chttpd/test/chttpd_welcome_test.erl b/src/chttpd/test/chttpd_welcome_test.erl
index b737abd7a..e427f4dff 100644
--- a/src/chttpd/test/chttpd_welcome_test.erl
+++ b/src/chttpd/test/chttpd_welcome_test.erl
@@ -45,12 +45,30 @@ welcome_test_() ->
fun setup/0, fun teardown/1,
[
fun should_have_version/1,
- fun should_have_features/1
+ fun should_have_features/1,
+ fun should_have_uuid/1
]
}
}
}.
+should_have_uuid(Url) ->
+ ?_test(begin
+ {ok, Status, _, Body} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]),
+ ?assertEqual(200, Status),
+ {Json} = ?JSON_DECODE(Body),
+ CouchDB = couch_util:get_value(<<"couchdb">>, Json, undefined),
+ Uuid = couch_util:get_value(<<"uuid">>, Json, undefined),
+ Features = couch_util:get_value(<<"features">>, Json, undefined),
+ Sha = couch_util:get_value(<<"git_sha">>, Json, undefined),
+ ?assertNotEqual(Sha, undefined),
+ ?assertEqual(<<"Welcome">>, CouchDB),
+ RealUuid = couch_server:get_uuid(),
+
+ ?assertEqual(RealUuid, Uuid),
+ ?assert(is_list(Features))
+ end).
+
should_have_version(Url) ->
?_test(begin
diff --git a/src/couch/priv/couch_js/help.h b/src/couch/priv/couch_js/help.h
index c6d76b257..678651fd3 100644
--- a/src/couch/priv/couch_js/help.h
+++ b/src/couch/priv/couch_js/help.h
@@ -52,6 +52,7 @@ static const char USAGE_TEMPLATE[] =
" should not be enabled for production systems)\n"
" -S SIZE specify that the runtime should allow at\n"
" most SIZE bytes of memory to be allocated\n"
+ " default is 64 MiB\n"
" -u FILE path to a .uri file containing the address\n"
" (or addresses) of one or more servers\n"
" --eval Enable runtime code evaluation (dangerous!)\n"
diff --git a/src/couch/src/couch.app.src b/src/couch/src/couch.app.src
index 6af213624..208938745 100644
--- a/src/couch/src/couch.app.src
+++ b/src/couch/src/couch.app.src
@@ -48,5 +48,41 @@
ioq,
couch_stats,
hyper
+ ]},
+ {env, [
+ { httpd_global_handlers, [
+ {"/", "{couch_httpd_misc_handlers, handle_welcome_req, <<\"Welcome\">>}"},
+ {"favicon.ico", "{couch_httpd_misc_handlers, handle_favicon_req, \"{{prefix}}/share/www\"}"},
+ {"_utils", "{couch_httpd_misc_handlers, handle_utils_dir_req, \"{{prefix}}/share/www\"}"},
+ {"_all_dbs", "{couch_httpd_misc_handlers, handle_all_dbs_req}"},
+ {"_active_tasks", "{couch_httpd_misc_handlers, handle_task_status_req}"},
+ {"_config", "{couch_httpd_misc_handlers, handle_config_req}"},
+ {"_replicate", "{couch_replicator_httpd, handle_req}"},
+ {"_uuids", "{couch_httpd_misc_handlers, handle_uuids_req}"},
+ {"_stats", "{couch_stats_httpd, handle_stats_req}"},
+ {"_session", "{couch_httpd_auth, handle_session_req}"},
+ {"_plugins", "{couch_plugins_httpd, handle_req}"},
+ {"_system", "{chttpd_misc, handle_system_req}"}
+ ]},
+ { httpd_db_handlers, [
+ {"_all_docs", "{couch_mrview_http, handle_all_docs_req}"},
+ {"_local_docs", "{couch_mrview_http, handle_local_docs_req}"},
+ {"_design_docs", "{couch_mrview_http, handle_design_docs_req}"},
+ {"_changes", "{couch_httpd_db, handle_db_changes_req}"},
+ {"_compact", "{couch_httpd_db, handle_compact_req}"},
+ {"_design", "{couch_httpd_db, handle_design_req}"},
+ {"_temp_view", "{couch_mrview_http, handle_temp_view_req}"},
+ {"_view_cleanup", "{couch_mrview_http, handle_cleanup_req}"}
+ ]},
+ { httpd_design_handlers, [
+ {"_compact", "{couch_mrview_http, handle_compact_req}"},
+ {"_info", "{couch_mrview_http, handle_info_req}"},
+ {"_list", "{couch_mrview_show, handle_view_list_req}"},
+ {"_rewrite", "{couch_httpd_rewrite, handle_rewrite_req}"},
+ {"_show", "{couch_mrview_show, handle_doc_show_req}"},
+ {"_update", "{couch_mrview_show, handle_doc_update_req}"},
+ {"_view", "{couch_mrview_http, handle_view_req}"},
+ {"_view_changes", "{couch_mrview_http, handle_view_changes_req}"}
+ ]}
]}
]}.
diff --git a/src/couch/src/couch_bt_engine_header.erl b/src/couch/src/couch_bt_engine_header.erl
index 467bb2ff8..9c8e7adb3 100644
--- a/src/couch/src/couch_bt_engine_header.erl
+++ b/src/couch/src/couch_bt_engine_header.erl
@@ -234,8 +234,8 @@ upgrade_disk_version(#db_header{}=Header) ->
2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
4 -> Header#db_header{security_ptr = nil}; % [0.10 - 0.11)
- 5 -> Header; % pre 1.2
- 6 -> Header; % pre clustered purge
+ 5 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre 1.2
+ 6 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre clustered purge
?LATEST_DISK_VERSION -> Header;
_ ->
Reason = "Incorrect disk header version",
@@ -368,12 +368,12 @@ upgrade_v3_test() ->
-endif.
-upgrade_v5_test() ->
+upgrade_v5_to_v7_test() ->
Vsn5Header = mk_header(5),
NewHeader = upgrade_disk_version(upgrade_tuple(Vsn5Header)),
?assert(is_record(NewHeader, db_header)),
- ?assertEqual(5, disk_version(NewHeader)),
+ ?assertEqual(7, disk_version(NewHeader)),
% Security ptr isn't changed for v5 headers
?assertEqual(bang, security_ptr(NewHeader)).
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
index 4d76ceedc..9d6a5dc45 100644
--- a/src/couch/src/couch_db.erl
+++ b/src/couch/src/couch_db.erl
@@ -56,6 +56,7 @@
is_db/1,
is_system_db/1,
is_clustered/1,
+ is_system_db_name/1,
set_revs_limit/2,
set_purge_infos_limit/2,
@@ -424,20 +425,22 @@ get_minimum_purge_seq(#db{} = Db) ->
case DocId of
<<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
ClientSeq = couch_util:get_value(<<"purge_seq">>, Props),
+ DbName = couch_db:name(Db),
+ % If there's a broken doc we have to keep every
+ % purge info until the doc is fixed or removed.
+ Fmt = "Invalid purge doc '~s' on ~p with purge_seq '~w'",
case ClientSeq of
CS when is_integer(CS), CS >= PurgeSeq - PurgeInfosLimit ->
{ok, SeqAcc};
CS when is_integer(CS) ->
- case purge_client_exists(Db, DocId, Props) of
- true -> {ok, erlang:min(CS, SeqAcc)};
- false -> {ok, SeqAcc}
+ case purge_client_exists(DbName, DocId, Props) of
+ true ->
+ {ok, erlang:min(CS, SeqAcc)};
+ false ->
+ couch_log:error(Fmt, [DocId, DbName, ClientSeq]),
+ {ok, SeqAcc}
end;
_ ->
- % If there's a broken doc we have to keep every
- % purge info until the doc is fixed or removed.
- Fmt = "Invalid purge doc '~s' on database ~p
- with purge_seq '~w'",
- DbName = couch_db:name(Db),
couch_log:error(Fmt, [DocId, DbName, ClientSeq]),
{ok, erlang:min(OldestPurgeSeq, SeqAcc)}
end;
@@ -490,7 +493,7 @@ purge_client_exists(DbName, DocId, Props) ->
% it exists.
Fmt2 = "Failed to check purge checkpoint using
document '~p' in database ~p",
- couch_log:error(Fmt2, [DbName, DocId]),
+ couch_log:error(Fmt2, [DocId, DbName]),
true
end.
@@ -604,8 +607,8 @@ get_db_info(Db) ->
],
{ok, InfoList}.
-get_design_docs(#db{name = <<"shards/", _:18/binary, DbFullName/binary>>}) ->
- DbName = ?l2b(filename:rootname(filename:basename(?b2l(DbFullName)))),
+get_design_docs(#db{name = <<"shards/", _/binary>> = ShardDbName}) ->
+ DbName = mem3:dbname(ShardDbName),
{_, Ref} = spawn_monitor(fun() -> exit(fabric:design_docs(DbName)) end),
receive {'DOWN', Ref, _, _, Response} ->
Response
@@ -1111,69 +1114,35 @@ doc_tag(#doc{meta=Meta}) ->
end.
update_docs(Db, Docs0, Options, replicated_changes) ->
- increment_stat(Db, [couchdb, database_writes]),
Docs = tag_docs(Docs0),
- DocBuckets = before_docs_update(Db, group_alike_docs(Docs)),
-
- case (Db#db.validate_doc_funs /= []) orelse
- lists:any(
- fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true;
- (#doc{atts=Atts}) ->
- Atts /= []
- end, Docs) of
- true ->
- Ids = [Id || [#doc{id=Id}|_] <- DocBuckets],
- ExistingDocs = get_full_doc_infos(Db, Ids),
- {DocBuckets2, DocErrors} =
- prep_and_validate_replicated_updates(Db, DocBuckets, ExistingDocs, [], []),
- DocBuckets3 = [Bucket || [_|_]=Bucket <- DocBuckets2]; % remove empty buckets
- false ->
- DocErrors = [],
- DocBuckets3 = DocBuckets
+ PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) ->
+ prep_and_validate_replicated_updates(Db0, DocBuckets0,
+ ExistingDocInfos, [], [])
end,
- DocBuckets4 = [[doc_flush_atts(Db, check_dup_atts(Doc))
- || Doc <- Bucket] || Bucket <- DocBuckets3],
- {ok, []} = write_and_commit(Db, DocBuckets4, [], [merge_conflicts | Options]),
+
+ {ok, DocBuckets, NonRepDocs, DocErrors}
+ = before_docs_update(Db, Docs, PrepValidateFun),
+
+ DocBuckets2 = [[doc_flush_atts(Db, check_dup_atts(Doc))
+ || Doc <- Bucket] || Bucket <- DocBuckets],
+ {ok, _} = write_and_commit(Db, DocBuckets2,
+ NonRepDocs, [merge_conflicts | Options]),
{ok, DocErrors};
update_docs(Db, Docs0, Options, interactive_edit) ->
- increment_stat(Db, [couchdb, database_writes]),
- AllOrNothing = lists:member(all_or_nothing, Options),
Docs = tag_docs(Docs0),
- % Separate _local docs from normal docs
- IsLocal = fun
- (#doc{id= <<?LOCAL_DOC_PREFIX, _/binary>>}) -> true;
- (_) -> false
+ AllOrNothing = lists:member(all_or_nothing, Options),
+ PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) ->
+ prep_and_validate_updates(Db0, DocBuckets0, ExistingDocInfos,
+ AllOrNothing, [], [])
end,
- {NonRepDocs, Docs2} = lists:partition(IsLocal, Docs),
- DocBuckets = before_docs_update(Db, group_alike_docs(Docs2)),
-
- case (Db#db.validate_doc_funs /= []) orelse
- lists:any(
- fun(#doc{id= <<?DESIGN_DOC_PREFIX, _/binary>>}) ->
- true;
- (#doc{atts=Atts}) ->
- Atts /= []
- end, Docs2) of
- true ->
- % lookup the doc by id and get the most recent
- Ids = [Id || [#doc{id=Id}|_] <- DocBuckets],
- ExistingDocInfos = get_full_doc_infos(Db, Ids),
-
- {DocBucketsPrepped, PreCommitFailures} = prep_and_validate_updates(Db,
- DocBuckets, ExistingDocInfos, AllOrNothing, [], []),
-
- % strip out any empty buckets
- DocBuckets2 = [Bucket || [_|_] = Bucket <- DocBucketsPrepped];
- false ->
- PreCommitFailures = [],
- DocBuckets2 = DocBuckets
- end,
+ {ok, DocBuckets, NonRepDocs, DocErrors}
+ = before_docs_update(Db, Docs, PrepValidateFun),
- if (AllOrNothing) and (PreCommitFailures /= []) ->
+ if (AllOrNothing) and (DocErrors /= []) ->
RefErrorDict = dict:from_list([{doc_tag(Doc), Doc} || Doc <- Docs]),
{aborted, lists:map(fun({Ref, Error}) ->
#doc{id=Id,revs={Start,RevIds}} = dict:fetch(Ref, RefErrorDict),
@@ -1181,21 +1150,22 @@ update_docs(Db, Docs0, Options, interactive_edit) ->
{Pos, [RevId | _]} -> {{Id, {Pos, RevId}}, Error};
{0, []} -> {{Id, {0, <<>>}}, Error}
end
- end, PreCommitFailures)};
+ end, DocErrors)};
true ->
Options2 = if AllOrNothing -> [merge_conflicts];
true -> [] end ++ Options,
- DocBuckets3 = [[
+ DocBuckets2 = [[
doc_flush_atts(Db, set_new_att_revpos(
check_dup_atts(Doc)))
- || Doc <- B] || B <- DocBuckets2],
- {DocBuckets4, IdRevs} = new_revs(DocBuckets3, [], []),
+ || Doc <- B] || B <- DocBuckets],
+ {DocBuckets3, IdRevs} = new_revs(DocBuckets2, [], []),
- {ok, CommitResults} = write_and_commit(Db, DocBuckets4, NonRepDocs, Options2),
+ {ok, CommitResults} = write_and_commit(Db, DocBuckets3,
+ NonRepDocs, Options2),
ResultsDict = lists:foldl(fun({Key, Resp}, ResultsAcc) ->
dict:store(Key, Resp, ResultsAcc)
- end, dict:from_list(IdRevs), CommitResults ++ PreCommitFailures),
+ end, dict:from_list(IdRevs), CommitResults ++ DocErrors),
{ok, lists:map(fun(Doc) ->
dict:fetch(doc_tag(Doc), ResultsDict)
end, Docs)}
@@ -1313,13 +1283,42 @@ prepare_doc_summaries(Db, BucketList) ->
Bucket) || Bucket <- BucketList].
-before_docs_update(#db{} = Db, BucketList) ->
- [lists:map(
- fun(Doc) ->
- DocWithBody = couch_doc:with_ejson_body(Doc),
- couch_db_plugin:before_doc_update(Db, DocWithBody)
- end,
- Bucket) || Bucket <- BucketList].
+before_docs_update(#db{validate_doc_funs = VDFuns} = Db, Docs, PVFun) ->
+ increment_stat(Db, [couchdb, database_writes]),
+
+ % Separate _local docs from normal docs
+ IsLocal = fun
+ (#doc{id= <<?LOCAL_DOC_PREFIX, _/binary>>}) -> true;
+ (_) -> false
+ end,
+ {NonRepDocs, Docs2} = lists:partition(IsLocal, Docs),
+
+ BucketList = group_alike_docs(Docs2),
+
+ DocBuckets = lists:map(fun(Bucket) ->
+ lists:map(fun(Doc) ->
+ DocWithBody = couch_doc:with_ejson_body(Doc),
+ couch_db_plugin:before_doc_update(Db, DocWithBody)
+ end, Bucket)
+ end, BucketList),
+
+ ValidatePred = fun
+ (#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true;
+ (#doc{atts = Atts}) -> Atts /= []
+ end,
+
+ case (VDFuns /= []) orelse lists:any(ValidatePred, Docs2) of
+ true ->
+ % lookup the doc by id and get the most recent
+ Ids = [Id || [#doc{id = Id} | _] <- DocBuckets],
+ ExistingDocs = get_full_doc_infos(Db, Ids),
+ {DocBuckets2, DocErrors} = PVFun(Db, DocBuckets, ExistingDocs),
+ % remove empty buckets
+ DocBuckets3 = [Bucket || Bucket <- DocBuckets2, Bucket /= []],
+ {ok, DocBuckets3, NonRepDocs, DocErrors};
+ false ->
+ {ok, DocBuckets, NonRepDocs, []}
+ end.
set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts0}=Doc) ->
@@ -1729,15 +1728,15 @@ validate_dbname_int(DbName, Normalized) when is_binary(DbName) ->
match ->
ok;
nomatch ->
- case is_systemdb(Normalized) of
+ case is_system_db_name(Normalized) of
true -> ok;
false -> {error, {illegal_database_name, DbName}}
end
end.
-is_systemdb(DbName) when is_list(DbName) ->
- is_systemdb(?l2b(DbName));
-is_systemdb(DbName) when is_binary(DbName) ->
+is_system_db_name(DbName) when is_list(DbName) ->
+ is_system_db_name(?l2b(DbName));
+is_system_db_name(DbName) when is_binary(DbName) ->
Normalized = normalize_dbname(DbName),
Suffix = filename:basename(Normalized),
case {filename:dirname(Normalized), lists:member(Suffix, ?SYSTEM_DATABASES)} of
@@ -1860,7 +1859,7 @@ dbname_suffix_test_() ->
[{test_name({Expected, Db}), ?_assertEqual(Expected, dbname_suffix(Db))}
|| {Expected, Db} <- WithExpected].
-is_systemdb_test_() ->
+is_system_db_name_test_() ->
Cases = lists:append([
generate_cases_with_shards("long/co$mplex-/path+/" ++ ?b2l(Db))
|| Db <- ?SYSTEM_DATABASES]
@@ -1869,7 +1868,7 @@ is_systemdb_test_() ->
WithExpected = [{?l2b(filename:basename(filename:rootname(Arg))), Db}
|| {Arg, Db} <- Cases],
[{test_name({Expected, Db}) ++ " in ?SYSTEM_DATABASES",
- ?_assert(is_systemdb(Db))} || {Expected, Db} <- WithExpected].
+ ?_assert(is_system_db_name(Db))} || {Expected, Db} <- WithExpected].
should_pass_validate_dbname(DbName) ->
{test_name(DbName), ?_assertEqual(ok, validate_dbname(DbName))}.
diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl
index 52a4d2f1b..87301d2d8 100644
--- a/src/couch/src/couch_db_updater.erl
+++ b/src/couch/src/couch_db_updater.erl
@@ -627,28 +627,31 @@ update_docs_int(Db, DocsList, LocalDocs, MergeConflicts, FullCommit) ->
update_local_doc_revs(Docs) ->
- lists:map(fun({Client, NewDoc}) ->
- #doc{
- deleted = Delete,
- revs = {0, PrevRevs}
- } = NewDoc,
- case PrevRevs of
- [RevStr | _] ->
- PrevRev = binary_to_integer(RevStr);
- [] ->
- PrevRev = 0
- end,
- NewRev = case Delete of
- false ->
- PrevRev + 1;
- true ->
- 0
- end,
- send_result(Client, NewDoc, {ok, {0, integer_to_binary(NewRev)}}),
- NewDoc#doc{
- revs = {0, [NewRev]}
- }
- end, Docs).
+ lists:foldl(fun({Client, Doc}, Acc) ->
+ case increment_local_doc_revs(Doc) of
+ {ok, #doc{revs = {0, [NewRev]}} = NewDoc} ->
+ send_result(Client, Doc, {ok, {0, integer_to_binary(NewRev)}}),
+ [NewDoc | Acc];
+ {error, Error} ->
+ send_result(Client, Doc, {error, Error}),
+ Acc
+ end
+ end, [], Docs).
+
+
+increment_local_doc_revs(#doc{deleted = true} = Doc) ->
+ {ok, Doc#doc{revs = {0, [0]}}};
+increment_local_doc_revs(#doc{revs = {0, []}} = Doc) ->
+ {ok, Doc#doc{revs = {0, [1]}}};
+increment_local_doc_revs(#doc{revs = {0, [RevStr | _]}} = Doc) ->
+ try
+ PrevRev = binary_to_integer(RevStr),
+ {ok, Doc#doc{revs = {0, [PrevRev + 1]}}}
+ catch error:badarg ->
+ {error, <<"Invalid rev format">>}
+ end;
+increment_local_doc_revs(#doc{}) ->
+ {error, <<"Invalid rev format">>}.
purge_docs(Db, []) ->
@@ -808,3 +811,64 @@ hibernate_if_no_idle_limit() ->
Timeout when is_integer(Timeout) ->
Timeout
end.
+
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+
+update_local_doc_revs_test_() ->
+ {inparallel, [
+ {"Test local doc with valid rev", fun t_good_local_doc/0},
+ {"Test local doc with invalid rev", fun t_bad_local_doc/0},
+ {"Test deleted local doc", fun t_dead_local_doc/0}
+ ]}.
+
+
+t_good_local_doc() ->
+ Doc = #doc{
+ id = <<"_local/alice">>,
+ revs = {0, [<<"1">>]},
+ meta = [{ref, make_ref()}]
+ },
+ [NewDoc] = update_local_doc_revs([{self(), Doc}]),
+ ?assertEqual({0, [2]}, NewDoc#doc.revs),
+ {ok, Result} = receive_result(Doc),
+ ?assertEqual({ok,{0,<<"2">>}}, Result).
+
+
+t_bad_local_doc() ->
+ lists:foreach(fun(BadRevs) ->
+ Doc = #doc{
+ id = <<"_local/alice">>,
+ revs = BadRevs,
+ meta = [{ref, make_ref()}]
+ },
+ NewDocs = update_local_doc_revs([{self(), Doc}]),
+ ?assertEqual([], NewDocs),
+ {ok, Result} = receive_result(Doc),
+ ?assertEqual({error,<<"Invalid rev format">>}, Result)
+ end, [{0, [<<"a">>]}, {1, [<<"1">>]}]).
+
+
+
+t_dead_local_doc() ->
+ Doc = #doc{
+ id = <<"_local/alice">>,
+ revs = {0, [<<"122">>]},
+ deleted = true,
+ meta = [{ref, make_ref()}]
+ },
+ [NewDoc] = update_local_doc_revs([{self(), Doc}]),
+ ?assertEqual({0, [0]}, NewDoc#doc.revs),
+ {ok, Result} = receive_result(Doc),
+ ?assertEqual({ok,{0,<<"0">>}}, Result).
+
+
+receive_result(#doc{meta = Meta}) ->
+ Ref = couch_util:get_value(ref, Meta),
+ receive
+ {result, _, {Ref, Result}} -> {ok, Result}
+ end.
+
+-endif.
diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl
index f960ec5c2..e5ad9e9f1 100644
--- a/src/couch/src/couch_doc.erl
+++ b/src/couch/src/couch_doc.erl
@@ -200,7 +200,7 @@ parse_revs(_) ->
validate_docid(DocId, DbName) ->
case DbName =:= ?l2b(config:get("mem3", "shards_db", "_dbs")) andalso
- lists:member(DocId, ?SYSTEM_DATABASES) of
+ couch_db:is_system_db_name(DocId) of
true ->
ok;
false ->
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index e66a78e70..861fd58c4 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -104,25 +104,28 @@ start_link(Name, Options) ->
Else -> Else
end,
ok = validate_bind_address(BindAddress),
- DefaultSpec = "{couch_httpd_db, handle_request}",
- DefaultFun = make_arity_1_fun(
- config:get("httpd", "default_handler", DefaultSpec)
- ),
+ DefaultFun = make_arity_1_fun("{couch_httpd_db, handle_request}"),
+
+ {ok, HttpdGlobalHandlers} = application:get_env(httpd_global_handlers),
UrlHandlersList = lists:map(
fun({UrlKey, SpecStr}) ->
{?l2b(UrlKey), make_arity_1_fun(SpecStr)}
- end, config:get("httpd_global_handlers")),
+ end, HttpdGlobalHandlers),
+
+ {ok, HttpdDbHandlers} = application:get_env(httpd_db_handlers),
DbUrlHandlersList = lists:map(
fun({UrlKey, SpecStr}) ->
{?l2b(UrlKey), make_arity_2_fun(SpecStr)}
- end, config:get("httpd_db_handlers")),
+ end, HttpdDbHandlers),
+
+ {ok, HttpdDesignHandlers} = application:get_env(httpd_design_handlers),
DesignUrlHandlersList = lists:map(
fun({UrlKey, SpecStr}) ->
{?l2b(UrlKey), make_arity_3_fun(SpecStr)}
- end, config:get("httpd_design_handlers")),
+ end, HttpdDesignHandlers),
UrlHandlers = dict:from_list(UrlHandlersList),
DbUrlHandlers = dict:from_list(DbUrlHandlersList),
diff --git a/src/couch/src/couch_httpd_proxy.erl b/src/couch/src/couch_httpd_proxy.erl
deleted file mode 100644
index d2c7acc3a..000000000
--- a/src/couch/src/couch_httpd_proxy.erl
+++ /dev/null
@@ -1,428 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_httpd_proxy).
-
--compile(tuple_calls).
-
--export([handle_proxy_req/2]).
-
--include_lib("couch/include/couch_db.hrl").
--include_lib("ibrowse/include/ibrowse.hrl").
-
--define(TIMEOUT, infinity).
--define(PKT_SIZE, 4096).
-
-
-handle_proxy_req(Req, ProxyDest) ->
- Method = get_method(Req),
- Url = get_url(Req, ProxyDest),
- Version = get_version(Req),
- Headers = get_headers(Req),
- Body = get_body(Req),
- Options = [
- {http_vsn, Version},
- {headers_as_is, true},
- {response_format, binary},
- {stream_to, {self(), once}}
- ],
- case ibrowse:send_req(Url, Headers, Method, Body, Options, ?TIMEOUT) of
- {ibrowse_req_id, ReqId} ->
- stream_response(Req, ProxyDest, ReqId);
- {error, Reason} ->
- throw({error, Reason})
- end.
-
-
-get_method(#httpd{mochi_req=MochiReq}) ->
- case MochiReq:get(method) of
- Method when is_atom(Method) ->
- list_to_atom(string:to_lower(atom_to_list(Method)));
- Method when is_list(Method) ->
- list_to_atom(string:to_lower(Method));
- Method when is_binary(Method) ->
- list_to_atom(string:to_lower(?b2l(Method)))
- end.
-
-
-get_url(Req, ProxyDest) when is_binary(ProxyDest) ->
- get_url(Req, ?b2l(ProxyDest));
-get_url(#httpd{mochi_req=MochiReq}=Req, ProxyDest) ->
- BaseUrl = case mochiweb_util:partition(ProxyDest, "/") of
- {[], "/", _} -> couch_httpd:absolute_uri(Req, ProxyDest);
- _ -> ProxyDest
- end,
- ProxyPrefix = "/" ++ ?b2l(hd(Req#httpd.path_parts)),
- RequestedPath = MochiReq:get(raw_path),
- case mochiweb_util:partition(RequestedPath, ProxyPrefix) of
- {[], ProxyPrefix, []} ->
- BaseUrl;
- {[], ProxyPrefix, [$/ | DestPath]} ->
- remove_trailing_slash(BaseUrl) ++ "/" ++ DestPath;
- {[], ProxyPrefix, DestPath} ->
- remove_trailing_slash(BaseUrl) ++ "/" ++ DestPath;
- _Else ->
- throw({invalid_url_path, {ProxyPrefix, RequestedPath}})
- end.
-
-get_version(#httpd{mochi_req=MochiReq}) ->
- MochiReq:get(version).
-
-
-get_headers(#httpd{mochi_req=MochiReq}) ->
- to_ibrowse_headers(mochiweb_headers:to_list(MochiReq:get(headers)), []).
-
-to_ibrowse_headers([], Acc) ->
- lists:reverse(Acc);
-to_ibrowse_headers([{K, V} | Rest], Acc) when is_atom(K) ->
- to_ibrowse_headers([{atom_to_list(K), V} | Rest], Acc);
-to_ibrowse_headers([{K, V} | Rest], Acc) when is_list(K) ->
- case string:to_lower(K) of
- "content-length" ->
- to_ibrowse_headers(Rest, [{content_length, V} | Acc]);
- % This appears to make ibrowse too smart.
- %"transfer-encoding" ->
- % to_ibrowse_headers(Rest, [{transfer_encoding, V} | Acc]);
- _ ->
- to_ibrowse_headers(Rest, [{K, V} | Acc])
- end.
-
-get_body(#httpd{method='GET'}) ->
- fun() -> eof end;
-get_body(#httpd{method='HEAD'}) ->
- fun() -> eof end;
-get_body(#httpd{method='DELETE'}) ->
- fun() -> eof end;
-get_body(#httpd{mochi_req=MochiReq}) ->
- case MochiReq:get(body_length) of
- undefined ->
- <<>>;
- {unknown_transfer_encoding, Unknown} ->
- exit({unknown_transfer_encoding, Unknown});
- chunked ->
- {fun stream_chunked_body/1, {init, MochiReq, 0}};
- 0 ->
- <<>>;
- Length when is_integer(Length) andalso Length > 0 ->
- {fun stream_length_body/1, {init, MochiReq, Length}};
- Length ->
- exit({invalid_body_length, Length})
- end.
-
-
-remove_trailing_slash(Url) ->
- rem_slash(lists:reverse(Url)).
-
-rem_slash([]) ->
- [];
-rem_slash([$\s | RevUrl]) ->
- rem_slash(RevUrl);
-rem_slash([$\t | RevUrl]) ->
- rem_slash(RevUrl);
-rem_slash([$\r | RevUrl]) ->
- rem_slash(RevUrl);
-rem_slash([$\n | RevUrl]) ->
- rem_slash(RevUrl);
-rem_slash([$/ | RevUrl]) ->
- rem_slash(RevUrl);
-rem_slash(RevUrl) ->
- lists:reverse(RevUrl).
-
-
-stream_chunked_body({init, MReq, 0}) ->
- % First chunk, do expect-continue dance.
- init_body_stream(MReq),
- stream_chunked_body({stream, MReq, 0, [], ?PKT_SIZE});
-stream_chunked_body({stream, MReq, 0, Buf, BRem}) ->
- % Finished a chunk, get next length. If next length
- % is 0, its time to try and read trailers.
- {CRem, Data} = read_chunk_length(MReq),
- case CRem of
- 0 ->
- BodyData = lists:reverse(Buf, Data),
- {ok, BodyData, {trailers, MReq, [], ?PKT_SIZE}};
- _ ->
- stream_chunked_body(
- {stream, MReq, CRem, [Data | Buf], BRem-size(Data)}
- )
- end;
-stream_chunked_body({stream, MReq, CRem, Buf, BRem}) when BRem =< 0 ->
- % Time to empty our buffers to the upstream socket.
- BodyData = lists:reverse(Buf),
- {ok, BodyData, {stream, MReq, CRem, [], ?PKT_SIZE}};
-stream_chunked_body({stream, MReq, CRem, Buf, BRem}) ->
- % Buffer some more data from the client.
- Length = lists:min([CRem, BRem]),
- Socket = MReq:get(socket),
- NewState = case mochiweb_socket:recv(Socket, Length, ?TIMEOUT) of
- {ok, Data} when size(Data) == CRem ->
- case mochiweb_socket:recv(Socket, 2, ?TIMEOUT) of
- {ok, <<"\r\n">>} ->
- {stream, MReq, 0, [<<"\r\n">>, Data | Buf], BRem-Length-2};
- _ ->
- exit(normal)
- end;
- {ok, Data} ->
- {stream, MReq, CRem-Length, [Data | Buf], BRem-Length};
- _ ->
- exit(normal)
- end,
- stream_chunked_body(NewState);
-stream_chunked_body({trailers, MReq, Buf, BRem}) when BRem =< 0 ->
- % Empty our buffers and send data upstream.
- BodyData = lists:reverse(Buf),
- {ok, BodyData, {trailers, MReq, [], ?PKT_SIZE}};
-stream_chunked_body({trailers, MReq, Buf, BRem}) ->
- % Read another trailer into the buffer or stop on an
- % empty line.
- Socket = MReq:get(socket),
- mochiweb_socket:setopts(Socket, [{packet, line}]),
- case mochiweb_socket:recv(Socket, 0, ?TIMEOUT) of
- {ok, <<"\r\n">>} ->
- mochiweb_socket:setopts(Socket, [{packet, raw}]),
- BodyData = lists:reverse(Buf, <<"\r\n">>),
- {ok, BodyData, eof};
- {ok, Footer} ->
- mochiweb_socket:setopts(Socket, [{packet, raw}]),
- NewState = {trailers, MReq, [Footer | Buf], BRem-size(Footer)},
- stream_chunked_body(NewState);
- _ ->
- exit(normal)
- end;
-stream_chunked_body(eof) ->
- % Tell ibrowse we're done sending data.
- eof.
-
-
-stream_length_body({init, MochiReq, Length}) ->
- % Do the expect-continue dance
- init_body_stream(MochiReq),
- stream_length_body({stream, MochiReq, Length});
-stream_length_body({stream, _MochiReq, 0}) ->
- % Finished streaming.
- eof;
-stream_length_body({stream, MochiReq, Length}) ->
- BufLen = lists:min([Length, ?PKT_SIZE]),
- case MochiReq:recv(BufLen) of
- <<>> -> eof;
- Bin -> {ok, Bin, {stream, MochiReq, Length-BufLen}}
- end.
-
-
-init_body_stream(MochiReq) ->
- Expect = case MochiReq:get_header_value("expect") of
- undefined ->
- undefined;
- Value when is_list(Value) ->
- string:to_lower(Value)
- end,
- case Expect of
- "100-continue" ->
- MochiReq:start_raw_response({100, gb_trees:empty()});
- _Else ->
- ok
- end.
-
-
-read_chunk_length(MochiReq) ->
- Socket = MochiReq:get(socket),
- mochiweb_socket:setopts(Socket, [{packet, line}]),
- case mochiweb_socket:recv(Socket, 0, ?TIMEOUT) of
- {ok, Header} ->
- mochiweb_socket:setopts(Socket, [{packet, raw}]),
- Splitter = fun(C) ->
- C =/= $\r andalso C =/= $\n andalso C =/= $\s
- end,
- {Hex, _Rest} = lists:splitwith(Splitter, ?b2l(Header)),
- {mochihex:to_int(Hex), Header};
- _ ->
- exit(normal)
- end.
-
-
-stream_response(Req, ProxyDest, ReqId) ->
- receive
- {ibrowse_async_headers, ReqId, "100", _} ->
- % ibrowse doesn't handle 100 Continue responses which
- % means we have to discard them so the proxy client
- % doesn't get confused.
- ibrowse:stream_next(ReqId),
- stream_response(Req, ProxyDest, ReqId);
- {ibrowse_async_headers, ReqId, Status, Headers} ->
- {Source, Dest} = get_urls(Req, ProxyDest),
- FixedHeaders = fix_headers(Source, Dest, Headers, []),
- case body_length(FixedHeaders) of
- chunked ->
- {ok, Resp} = couch_httpd:start_chunked_response(
- Req, list_to_integer(Status), FixedHeaders
- ),
- ibrowse:stream_next(ReqId),
- stream_chunked_response(Req, ReqId, Resp),
- {ok, Resp};
- Length when is_integer(Length) ->
- {ok, Resp} = couch_httpd:start_response_length(
- Req, list_to_integer(Status), FixedHeaders, Length
- ),
- ibrowse:stream_next(ReqId),
- stream_length_response(Req, ReqId, Resp),
- {ok, Resp};
- _ ->
- {ok, Resp} = couch_httpd:start_response(
- Req, list_to_integer(Status), FixedHeaders
- ),
- ibrowse:stream_next(ReqId),
- stream_length_response(Req, ReqId, Resp),
- % XXX: MochiWeb apparently doesn't look at the
- % response to see if it must force close the
- % connection. So we help it out here.
- erlang:put(mochiweb_request_force_close, true),
- {ok, Resp}
- end
- end.
-
-
-stream_chunked_response(Req, ReqId, Resp) ->
- receive
- {ibrowse_async_response, ReqId, {error, Reason}} ->
- throw({error, Reason});
- {ibrowse_async_response, ReqId, Chunk} ->
- couch_httpd:send_chunk(Resp, Chunk),
- ibrowse:stream_next(ReqId),
- stream_chunked_response(Req, ReqId, Resp);
- {ibrowse_async_response_end, ReqId} ->
- couch_httpd:last_chunk(Resp)
- end.
-
-
-stream_length_response(Req, ReqId, Resp) ->
- receive
- {ibrowse_async_response, ReqId, {error, Reason}} ->
- throw({error, Reason});
- {ibrowse_async_response, ReqId, Chunk} ->
- couch_httpd:send(Resp, Chunk),
- ibrowse:stream_next(ReqId),
- stream_length_response(Req, ReqId, Resp);
- {ibrowse_async_response_end, ReqId} ->
- ok
- end.
-
-
-get_urls(Req, ProxyDest) ->
- SourceUrl = couch_httpd:absolute_uri(Req, "/" ++ hd(Req#httpd.path_parts)),
- Source = parse_url(?b2l(iolist_to_binary(SourceUrl))),
- case (catch parse_url(ProxyDest)) of
- Dest when is_record(Dest, url) ->
- {Source, Dest};
- _ ->
- DestUrl = couch_httpd:absolute_uri(Req, ProxyDest),
- {Source, parse_url(DestUrl)}
- end.
-
-
-fix_headers(_, _, [], Acc) ->
- lists:reverse(Acc);
-fix_headers(Source, Dest, [{K, V} | Rest], Acc) ->
- Fixed = case string:to_lower(K) of
- "location" -> rewrite_location(Source, Dest, V);
- "content-location" -> rewrite_location(Source, Dest, V);
- "uri" -> rewrite_location(Source, Dest, V);
- "destination" -> rewrite_location(Source, Dest, V);
- "set-cookie" -> rewrite_cookie(Source, Dest, V);
- _ -> V
- end,
- fix_headers(Source, Dest, Rest, [{K, Fixed} | Acc]).
-
-
-rewrite_location(Source, #url{host=Host, port=Port, protocol=Proto}, Url) ->
- case (catch parse_url(Url)) of
- #url{host=Host, port=Port, protocol=Proto} = Location ->
- DestLoc = #url{
- protocol=Source#url.protocol,
- host=Source#url.host,
- port=Source#url.port,
- path=join_url_path(Source#url.path, Location#url.path)
- },
- url_to_url(DestLoc);
- #url{} ->
- Url;
- _ ->
- url_to_url(Source#url{path=join_url_path(Source#url.path, Url)})
- end.
-
-
-rewrite_cookie(_Source, _Dest, Cookie) ->
- Cookie.
-
-
-parse_url(Url) when is_binary(Url) ->
- ibrowse_lib:parse_url(?b2l(Url));
-parse_url(Url) when is_list(Url) ->
- ibrowse_lib:parse_url(?b2l(iolist_to_binary(Url))).
-
-
-join_url_path(Src, Dst) ->
- Src2 = case lists:reverse(Src) of
- "/" ++ RestSrc -> lists:reverse(RestSrc);
- _ -> Src
- end,
- Dst2 = case Dst of
- "/" ++ RestDst -> RestDst;
- _ -> Dst
- end,
- Src2 ++ "/" ++ Dst2.
-
-
-url_to_url(#url{host=Host, port=Port, path=Path, protocol=Proto} = Url) ->
- LPort = case {Proto, Port} of
- {http, 80} -> "";
- {https, 443} -> "";
- _ -> ":" ++ integer_to_list(Port)
- end,
- LPath = case Path of
- "/" ++ _RestPath -> Path;
- _ -> "/" ++ Path
- end,
- HostPart = case Url#url.host_type of
- ipv6_address ->
- "[" ++ Host ++ "]";
- _ ->
- Host
- end,
- atom_to_list(Proto) ++ "://" ++ HostPart ++ LPort ++ LPath.
-
-
-body_length(Headers) ->
- case is_chunked(Headers) of
- true -> chunked;
- _ -> content_length(Headers)
- end.
-
-
-is_chunked([]) ->
- false;
-is_chunked([{K, V} | Rest]) ->
- case string:to_lower(K) of
- "transfer-encoding" ->
- string:to_lower(V) == "chunked";
- _ ->
- is_chunked(Rest)
- end.
-
-content_length([]) ->
- undefined;
-content_length([{K, V} | Rest]) ->
- case string:to_lower(K) of
- "content-length" ->
- list_to_integer(V);
- _ ->
- content_length(Rest)
- end.
-
diff --git a/src/couch/src/couch_httpd_vhost.erl b/src/couch/src/couch_httpd_vhost.erl
index d8f952190..574dba9c8 100644
--- a/src/couch/src/couch_httpd_vhost.erl
+++ b/src/couch/src/couch_httpd_vhost.erl
@@ -380,10 +380,6 @@ code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-handle_config_change("httpd", "vhost_global_handlers", _, _, _) ->
- {ok, ?MODULE:reload()};
-handle_config_change("httpd", "redirect_vhost_handler", _, _, _) ->
- {ok, ?MODULE:reload()};
handle_config_change("vhosts", _, _, _, _) ->
{ok, ?MODULE:reload()};
handle_config_change(_, _, _, _, _) ->
@@ -396,25 +392,23 @@ handle_config_terminate(_Server, _Reason, _State) ->
load_conf() ->
%% get vhost globals
- VHostGlobals = re:split(config:get("httpd",
- "vhost_global_handlers",""), "\\s*,\\s*",[{return, list}]),
+ VHostGlobals = re:split("_utils, _uuids, _session, _users", "\\s*,\\s*",
+ [{return, list}]),
%% build vhosts matching rules
VHosts = make_vhosts(),
%% build vhosts handler fun
DefaultVHostFun = "{couch_httpd_vhost, redirect_to_vhost}",
- Fun = couch_httpd:make_arity_2_fun(config:get("httpd",
- "redirect_vhost_handler", DefaultVHostFun)),
+ Fun = couch_httpd:make_arity_2_fun(DefaultVHostFun),
{VHostGlobals, VHosts, Fun}.
%% cheaply determine if there are any virtual hosts
%% configured at all.
vhost_enabled() ->
- case {config:get("httpd", "vhost_global_handlers"),
- config:get("vhosts")} of
- {undefined, []} ->
+ case config:get("vhosts") of
+ [] ->
false;
_ ->
true
diff --git a/src/couch/src/couch_os_daemons.erl b/src/couch/src/couch_os_daemons.erl
deleted file mode 100644
index cd019dbb5..000000000
--- a/src/couch/src/couch_os_daemons.erl
+++ /dev/null
@@ -1,394 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
--module(couch_os_daemons).
--behaviour(gen_server).
--vsn(1).
--behaviour(config_listener).
-
--export([start_link/0, info/0, info/1]).
-
--export([init/1, terminate/2, code_change/3]).
--export([handle_call/3, handle_cast/2, handle_info/2]).
-
-% config_listener api
--export([handle_config_change/5, handle_config_terminate/3]).
-
--include_lib("couch/include/couch_db.hrl").
-
--record(daemon, {
- port,
- name,
- cmd,
- kill,
- status=running,
- cfg_patterns=[],
- errors=[],
- buf=[]
-}).
-
--define(PORT_OPTIONS, [stream, {line, 1024}, binary, exit_status, hide]).
--define(RELISTEN_DELAY, 5000).
-
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-info() ->
- info([]).
-
-info(Options) ->
- gen_server:call(?MODULE, {daemon_info, Options}).
-
-init(_) ->
- process_flag(trap_exit, true),
- ok = config:listen_for_changes(?MODULE, nil),
- Table = ets:new(?MODULE, [protected, set, {keypos, #daemon.port}]),
- reload_daemons(Table),
- {ok, Table}.
-
-terminate(_Reason, Table) ->
- [stop_port(D) || D <- ets:tab2list(Table)],
- ok.
-
-handle_call({daemon_info, Options}, _From, Table) when is_list(Options) ->
- case lists:member(table, Options) of
- true ->
- {reply, {ok, ets:tab2list(Table)}, Table};
- _ ->
- {reply, {ok, Table}, Table}
- end;
-handle_call(Msg, From, Table) ->
- couch_log:error("Unknown call message to ~p from ~p: ~p",
- [?MODULE, From, Msg]),
- {stop, error, Table}.
-
-handle_cast({config_change, Sect, Key}, Table) ->
- restart_daemons(Table, Sect, Key),
- case Sect of
- "os_daemons" -> reload_daemons(Table);
- _ -> ok
- end,
- {noreply, Table};
-handle_cast(stop, Table) ->
- {stop, normal, Table};
-handle_cast(Msg, Table) ->
- couch_log:error("Unknown cast message to ~p: ~p", [?MODULE, Msg]),
- {stop, error, Table}.
-
-handle_info({'EXIT', Port, Reason}, Table) ->
- case ets:lookup(Table, Port) of
- [] ->
- couch_log:info("Port ~p exited after stopping: ~p~n",
- [Port, Reason]);
- [#daemon{status=stopping}] ->
- true = ets:delete(Table, Port);
- [#daemon{name=Name, status=restarting}=D] ->
- couch_log:info("Daemon ~p restarting after config change.", [Name]),
- true = ets:delete(Table, Port),
- {ok, Port2} = start_port(D#daemon.cmd),
- true = ets:insert(Table, D#daemon{
- port=Port2, status=running, kill=undefined, buf=[]
- });
- [#daemon{name=Name, status=halted}] ->
- couch_log:error("Halted daemon process: ~p", [Name]);
- [D] ->
- couch_log:error("Invalid port state at exit: ~p", [D])
- end,
- {noreply, Table};
-handle_info({Port, closed}, Table) ->
- handle_info({Port, {exit_status, closed}}, Table);
-handle_info({Port, {exit_status, Status}}, Table) ->
- case ets:lookup(Table, Port) of
- [] ->
- couch_log:error("Unknown port ~p exiting ~p", [Port, Status]),
- {stop, {error, unknown_port_died, Status}, Table};
- [#daemon{name=Name, status=restarting}=D] ->
- couch_log:info("Daemon ~p restarting after config change.", [Name]),
- true = ets:delete(Table, Port),
- {ok, Port2} = start_port(D#daemon.cmd),
- true = ets:insert(Table, D#daemon{
- port=Port2, status=running, kill=undefined, buf=[]
- }),
- {noreply, Table};
- [#daemon{status=stopping}=D] ->
- % The configuration changed and this daemon is no
- % longer needed.
- couch_log:debug("Port ~p shut down.", [D#daemon.name]),
- true = ets:delete(Table, Port),
- {noreply, Table};
- [D] ->
- % Port died for unknown reason. Check to see if it's
- % died too many times or if we should boot it back up.
- case should_halt([os:timestamp() | D#daemon.errors]) of
- {true, _} ->
- % Halting the process. We won't try and reboot
- % until the configuration changes.
- Fmt = "Daemon ~p halted with exit_status ~p",
- couch_log:error(Fmt, [D#daemon.name, Status]),
- D2 = D#daemon{status=halted, errors=nil, buf=nil},
- true = ets:insert(Table, D2),
- {noreply, Table};
- {false, Errors} ->
- % We're guessing it was a random error, this daemon
- % has behaved so we'll give it another chance.
- Fmt = "Daemon ~p is being rebooted after exit_status ~p",
- couch_log:info(Fmt, [D#daemon.name, Status]),
- true = ets:delete(Table, Port),
- {ok, Port2} = start_port(D#daemon.cmd),
- true = ets:insert(Table, D#daemon{
- port=Port2, status=running, kill=undefined,
- errors=Errors, buf=[]
- }),
- {noreply, Table}
- end;
- _Else ->
- throw(error)
- end;
-handle_info({Port, {data, {noeol, Data}}}, Table) ->
- [#daemon{buf=Buf}=D] = ets:lookup(Table, Port),
- true = ets:insert(Table, D#daemon{buf=[Data | Buf]}),
- {noreply, Table};
-handle_info({Port, {data, {eol, Data}}}, Table) ->
- [#daemon{buf=Buf}=D] = ets:lookup(Table, Port),
- Line = lists:reverse(Buf, Data),
- % The first line echoed back is the kill command
- % for when we go to get rid of the port. Lines after
- % that are considered part of the stdio API.
- case D#daemon.kill of
- undefined ->
- true = ets:insert(Table, D#daemon{kill=?b2l(Line), buf=[]});
- _Else ->
- D2 = case (catch ?JSON_DECODE(Line)) of
- {invalid_json, Rejected} ->
- couch_log:error("Ignoring OS daemon request: ~p",
- [Rejected]),
- D;
- JSON ->
- {ok, D3} = handle_port_message(D, JSON),
- D3
- end,
- true = ets:insert(Table, D2#daemon{buf=[]})
- end,
- {noreply, Table};
-handle_info({Port, Error}, Table) ->
- couch_log:error("Unexpectd message from port ~p: ~p", [Port, Error]),
- stop_port(Port),
- [D] = ets:lookup(Table, Port),
- true = ets:insert(Table, D#daemon{status=restarting, buf=nil}),
- {noreply, Table};
-handle_info(restart_config_listener, State) ->
- ok = config:listen_for_changes(?MODULE, nil),
- {noreply, State};
-handle_info(Msg, Table) ->
- couch_log:error("Unexpected info message to ~p: ~p", [?MODULE, Msg]),
- {stop, error, Table}.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-
-handle_config_change(Section, Key, _, _, _) ->
- gen_server:cast(?MODULE, {config_change, Section, Key}),
- {ok, nil}.
-
-handle_config_terminate(_, stop, _) ->
- ok;
-handle_config_terminate(_Server, _Reason, _State) ->
- erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener).
-
-
-% Internal API
-
-%
-% Port management helpers
-%
-
-start_port(Command) ->
- start_port(Command, []).
-
-start_port(Command, EnvPairs) ->
- PrivDir = couch_util:priv_dir(),
- Spawnkiller = "\"" ++ filename:join(PrivDir, "couchspawnkillable") ++ "\"",
- Opts = case lists:keytake(env, 1, ?PORT_OPTIONS) of
- false ->
- ?PORT_OPTIONS ++ [ {env,EnvPairs} ];
- {value, {env,OldPairs}, SubOpts} ->
- AllPairs = lists:keymerge(1, EnvPairs, OldPairs),
- SubOpts ++ [ {env,AllPairs} ]
- end,
- Port = open_port({spawn, Spawnkiller ++ " " ++ Command}, Opts),
- {ok, Port}.
-
-
-stop_port(#daemon{port=Port, kill=undefined}=D) ->
- couch_log:error("Stopping daemon without a kill command: ~p",
- [D#daemon.name]),
- catch port_close(Port);
-stop_port(#daemon{port=Port}=D) ->
- couch_log:debug("Stopping daemon: ~p", [D#daemon.name]),
- os:cmd(D#daemon.kill),
- catch port_close(Port).
-
-
-handle_port_message(#daemon{port=Port}=Daemon, [<<"get">>, Section]) ->
- KVs = config:get(Section),
- Data = lists:map(fun({K, V}) -> {?l2b(K), ?l2b(V)} end, KVs),
- Json = iolist_to_binary(?JSON_ENCODE({Data})),
- port_command(Port, <<Json/binary, "\n">>),
- {ok, Daemon};
-handle_port_message(#daemon{port=Port}=Daemon, [<<"get">>, Section, Key]) ->
- Value = case config:get(Section, Key, undefined) of
- undefined -> null;
- String -> ?l2b(String)
- end,
- Json = iolist_to_binary(?JSON_ENCODE(Value)),
- port_command(Port, <<Json/binary, "\n">>),
- {ok, Daemon};
-handle_port_message(Daemon, [<<"register">>, Sec]) when is_binary(Sec) ->
- Patterns = lists:usort(Daemon#daemon.cfg_patterns ++ [{?b2l(Sec)}]),
- {ok, Daemon#daemon{cfg_patterns=Patterns}};
-handle_port_message(Daemon, [<<"register">>, Sec, Key])
- when is_binary(Sec) andalso is_binary(Key) ->
- Pattern = {?b2l(Sec), ?b2l(Key)},
- Patterns = lists:usort(Daemon#daemon.cfg_patterns ++ [Pattern]),
- {ok, Daemon#daemon{cfg_patterns=Patterns}};
-handle_port_message(#daemon{name=Name}=Daemon, [<<"log">>, Msg]) ->
- handle_log_message(Name, Msg, <<"info">>),
- {ok, Daemon};
-handle_port_message(#daemon{name=Name}=Daemon, [<<"log">>, Msg, {Opts}]) ->
- Level = couch_util:get_value(<<"level">>, Opts, <<"info">>),
- handle_log_message(Name, Msg, Level),
- {ok, Daemon};
-handle_port_message(#daemon{name=Name}=Daemon, Else) ->
- couch_log:error("Daemon ~p made invalid request: ~p", [Name, Else]),
- {ok, Daemon}.
-
-
-handle_log_message(Name, Msg, _Level) when not is_binary(Msg) ->
- couch_log:error("Invalid log message from daemon ~p: ~p", [Name, Msg]);
-handle_log_message(Name, Msg, <<"debug">>) ->
- couch_log:debug("Daemon ~p :: ~s", [Name, ?b2l(Msg)]);
-handle_log_message(Name, Msg, <<"info">>) ->
- couch_log:info("Daemon ~p :: ~s", [Name, ?b2l(Msg)]);
-handle_log_message(Name, Msg, <<"error">>) ->
- couch_log:error("Daemon: ~p :: ~s", [Name, ?b2l(Msg)]);
-handle_log_message(Name, Msg, Level) ->
- couch_log:error("Invalid log level from daemon: ~p", [Level]),
- couch_log:info("Daemon: ~p :: ~s", [Name, ?b2l(Msg)]).
-
-%
-% Daemon management helpers
-%
-
-reload_daemons(Table) ->
- % List of daemons we want to have running.
- Configured = lists:sort(config:get("os_daemons")),
-
- % Remove records for daemons that were halted.
- MSpecHalted = #daemon{name='$1', cmd='$2', status=halted, _='_'},
- Halted = lists:sort([{N, C} || [N, C] <- ets:match(Table, MSpecHalted)]),
- ok = stop_os_daemons(Table, find_to_stop(Configured, Halted, [])),
-
- % Stop daemons that are running
- % Start newly configured daemons
- MSpecRunning = #daemon{name='$1', cmd='$2', status=running, _='_'},
- Running = lists:sort([{N, C} || [N, C] <- ets:match(Table, MSpecRunning)]),
- ok = stop_os_daemons(Table, find_to_stop(Configured, Running, [])),
- ok = boot_os_daemons(Table, find_to_boot(Configured, Running, [])),
- ok.
-
-
-restart_daemons(Table, Sect, Key) ->
- restart_daemons(Table, Sect, Key, ets:first(Table)).
-
-restart_daemons(_, _, _, '$end_of_table') ->
- ok;
-restart_daemons(Table, Sect, Key, Port) ->
- [D] = ets:lookup(Table, Port),
- HasSect = lists:member({Sect}, D#daemon.cfg_patterns),
- HasKey = lists:member({Sect, Key}, D#daemon.cfg_patterns),
- case HasSect or HasKey of
- true ->
- stop_port(D),
- D2 = D#daemon{status=restarting, buf=nil},
- true = ets:insert(Table, D2);
- _ ->
- ok
- end,
- restart_daemons(Table, Sect, Key, ets:next(Table, Port)).
-
-
-stop_os_daemons(_Table, []) ->
- ok;
-stop_os_daemons(Table, [{Name, Cmd} | Rest]) ->
- [[Port]] = ets:match(Table, #daemon{port='$1', name=Name, cmd=Cmd, _='_'}),
- [D] = ets:lookup(Table, Port),
- case D#daemon.status of
- halted ->
- ets:delete(Table, Port);
- _ ->
- stop_port(D),
- D2 = D#daemon{status=stopping, errors=nil, buf=nil},
- true = ets:insert(Table, D2)
- end,
- stop_os_daemons(Table, Rest).
-
-boot_os_daemons(_Table, []) ->
- ok;
-boot_os_daemons(Table, [{Name, Cmd} | Rest]) ->
- {ok, Port} = start_port(Cmd),
- true = ets:insert(Table, #daemon{port=Port, name=Name, cmd=Cmd}),
- boot_os_daemons(Table, Rest).
-
-% Elements unique to the configured set need to be booted.
-find_to_boot([], _Rest, Acc) ->
- % Nothing else configured.
- Acc;
-find_to_boot([D | R1], [D | R2], Acc) ->
- % Elements are equal, daemon already running.
- find_to_boot(R1, R2, Acc);
-find_to_boot([D1 | R1], [D2 | _]=A2, Acc) when D1 < D2 ->
- find_to_boot(R1, A2, [D1 | Acc]);
-find_to_boot(A1, [_ | R2], Acc) ->
- find_to_boot(A1, R2, Acc);
-find_to_boot(Rest, [], Acc) ->
- % No more candidates for already running. Boot all.
- Rest ++ Acc.
-
-% Elements unique to the running set need to be killed.
-find_to_stop([], Rest, Acc) ->
- % The rest haven't been found, so they must all
- % be ready to die.
- Rest ++ Acc;
-find_to_stop([D | R1], [D | R2], Acc) ->
- % Elements are equal, daemon already running.
- find_to_stop(R1, R2, Acc);
-find_to_stop([D1 | R1], [D2 | _]=A2, Acc) when D1 < D2 ->
- find_to_stop(R1, A2, Acc);
-find_to_stop(A1, [D2 | R2], Acc) ->
- find_to_stop(A1, R2, [D2 | Acc]);
-find_to_stop(_, [], Acc) ->
- % No more running daemons to worry about.
- Acc.
-
-should_halt(Errors) ->
- RetryTimeCfg = config:get("os_daemon_settings", "retry_time", "5"),
- RetryTime = list_to_integer(RetryTimeCfg),
-
- Now = os:timestamp(),
- RecentErrors = lists:filter(fun(Time) ->
- timer:now_diff(Now, Time) =< RetryTime * 1000000
- end, Errors),
-
- RetryCfg = config:get("os_daemon_settings", "max_retries", "3"),
- Retries = list_to_integer(RetryCfg),
-
- {length(RecentErrors) >= Retries, RecentErrors}.
diff --git a/src/couch/src/couch_proc_manager.erl b/src/couch/src/couch_proc_manager.erl
index 04101f240..d2a198e82 100644
--- a/src/couch/src/couch_proc_manager.erl
+++ b/src/couch/src/couch_proc_manager.erl
@@ -372,12 +372,54 @@ new_proc(Client) ->
end,
exit(Resp).
+get_env_for_spec(Spec, Target) ->
+ % loop over os:getenv(), match SPEC_TARGET
+ lists:filtermap(fun(VarName) ->
+ SpecStr = Spec ++ Target,
+ case string:tokens(VarName, "=") of
+ [SpecStr, Cmd] -> {true, Cmd};
+ _Else -> false
+ end
+ end, os:getenv()).
+
+get_query_server(LangStr) ->
+ % look for COUCH_QUERY_SERVER_LANGSTR in env
+ % if exists, return value, else undefined
+ UpperLangString = string:to_upper(LangStr),
+ case get_env_for_spec("COUCHDB_QUERY_SERVER_", UpperLangString) of
+ [] -> undefined;
+ [Command] -> Command
+ end.
+
+native_query_server_enabled() ->
+ % 1. [native_query_server] enable_erlang_query_server = true | false
+ % 2. if [native_query_server] erlang == {couch_native_process, start_link, []} -> pretend true as well
+ NativeEnabled = config:get_boolean("native_query_servers", "enable_erlang_query_server", false),
+ NativeLegacyConfig = config:get("native_query_servers", "erlang", ""),
+ NativeLegacyEnabled = NativeLegacyConfig =:= "{couch_native_process, start_link, []}",
+
+ NativeEnabled orelse NativeLegacyEnabled.
+
+get_native_query_server("query") -> % mango query server
+ "{mango_native_proc, start_link, []}";
+get_native_query_server("erlang") -> % erlang query server
+ case native_query_server_enabled() of
+ true -> "{couch_native_process, start_link, []}";
+ _Else -> undefined
+ end;
+get_native_query_server(LangStr) ->
+ % same as above, but COUCH_NATIVE_QUERY_SERVER_LANGSTR
+ UpperLangString = string:uppercase(LangStr),
+ case get_env_for_spec("COUCHDB_NATIVE_QUERY_SERVER_", UpperLangString) of
+ [] -> undefined;
+ [Command] -> Command
+ end.
new_proc_int(From, Lang) when is_binary(Lang) ->
LangStr = binary_to_list(Lang),
- case config:get("query_servers", LangStr) of
+ case get_query_server(LangStr) of
undefined ->
- case config:get("native_query_servers", LangStr) of
+ case get_native_query_server(LangStr) of
undefined ->
gen_server:reply(From, {unknown_query_language, Lang});
SpecStr ->
diff --git a/src/couch/src/couch_secondary_sup.erl b/src/couch/src/couch_secondary_sup.erl
index 0c3b7aa5a..9b424dc6a 100644
--- a/src/couch/src/couch_secondary_sup.erl
+++ b/src/couch/src/couch_secondary_sup.erl
@@ -26,18 +26,42 @@ init([]) ->
worker,
dynamic}
],
+ Daemons = [
+ {index_server, {couch_index_server, start_link, []}},
+ {query_servers, {couch_proc_manager, start_link, []}},
+ {vhosts, {couch_httpd_vhost, start_link, []}},
+ {httpd, {couch_httpd, start_link, []}},
+ {uuids, {couch_uuids, start, []}},
+ {auth_cache, {couch_auth_cache, start_link, []}},
+ {compaction_daemon, {couch_compaction_daemon, start_link, []}}
+ ],
+
+ MaybeHttps = case https_enabled() of
+ true -> [{httpsd, {chttpd, start_link, [https]}}];
+ _False -> []
+ end,
+
Children = SecondarySupervisors ++ [
begin
- {ok, {Module, Fun, Args}} = couch_util:parse_term(SpecStr),
+ {Module, Fun, Args} = Spec,
- {list_to_atom(Name),
+ {Name,
{Module, Fun, Args},
permanent,
brutal_kill,
worker,
[Module]}
end
- || {Name, SpecStr}
- <- config:get("daemons"), SpecStr /= ""],
+ || {Name, Spec}
+ <- Daemons ++ MaybeHttps, Spec /= ""],
{ok, {{one_for_one, 50, 3600},
couch_epi:register_service(couch_db_epi, Children)}}.
+
+https_enabled() ->
+ % 1. [ssl] enable = true | false
+ % 2. if [daemons] httpsd == {chttpd, start_link, [https]} -> pretend true as well
+ SSLEnabled = config:get_boolean("ssl", "enable", false),
+ LegacySSL = config:get("daemons", "httpsd"),
+ LegacySSLEnabled = LegacySSL =:= "{chttpd, start_link, [https]}",
+
+ SSLEnabled orelse LegacySSLEnabled.
diff --git a/src/couch/test/couch_bt_engine_upgrade_tests.erl b/src/couch/test/couch_bt_engine_upgrade_tests.erl
index 1d2a86d71..8025528fb 100644
--- a/src/couch/test/couch_bt_engine_upgrade_tests.erl
+++ b/src/couch/test/couch_bt_engine_upgrade_tests.erl
@@ -66,12 +66,13 @@ t_upgrade_without_purge_req() ->
% db with zero purge entries
DbName = <<"db_without_purge_req">>,
+ ?assertEqual(6, get_disk_version_from_header(DbName)),
{ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
?assertEqual(0, couch_db:get_purge_seq(Db)),
couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, [])
end),
?assertEqual([], UpgradedPurged),
-
+ ?assertEqual(7, get_disk_version_from_header(DbName)),
{ok, Rev} = save_doc(
DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}
),
@@ -104,10 +105,12 @@ t_upgrade_with_1_purge_req() ->
% with a single purge entry
DbName = <<"db_with_1_purge_req">>,
+ ?assertEqual(6, get_disk_version_from_header(DbName)),
{ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
?assertEqual(1, couch_db:get_purge_seq(Db)),
couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, [])
end),
+ ?assertEqual(7, get_disk_version_from_header(DbName)),
?assertEqual([{1, <<"doc1">>}], UpgradedPurged),
{ok, Rev} = save_doc(
@@ -142,10 +145,12 @@ t_upgrade_with_N_purge_req() ->
% with two docs that have been purged
DbName = <<"db_with_2_purge_req">>,
+ ?assertEqual(6, get_disk_version_from_header(DbName)),
{ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
?assertEqual(2, couch_db:get_purge_seq(Db)),
couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, [])
end),
+ ?assertEqual(7, get_disk_version_from_header(DbName)),
?assertEqual([{2, <<"doc2">>}], UpgradedPurged),
{ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}),
@@ -179,10 +184,12 @@ t_upgrade_with_1_purge_req_for_2_docs() ->
% with one purge req for Doc1 and another purge req for Doc 2 and Doc3
DbName = <<"db_with_1_purge_req_for_2_docs">>,
+ ?assertEqual(6, get_disk_version_from_header(DbName)),
{ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
?assertEqual(3, couch_db:get_purge_seq(Db)),
couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, [])
end),
+ ?assertEqual(7, get_disk_version_from_header(DbName)),
?assertEqual([{3,<<"doc2">>},{2,<<"doc3">>}], UpgradedPurged),
{ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc6">>}, {<<"v">>, 1}]}),
@@ -218,3 +225,13 @@ save_doc(DbName, Json) ->
fold_fun({PSeq, _UUID, Id, _Revs}, Acc) ->
{ok, [{PSeq, Id} | Acc]}.
+
+
+get_disk_version_from_header(DbFileName) ->
+ DbDir = config:get("couchdb", "database_dir"),
+ DbFilePath = filename:join([DbDir, ?l2b(?b2l(DbFileName) ++ ".couch")]),
+ {ok, Fd} = couch_file:open(DbFilePath, []),
+ {ok, Header} = couch_file:read_header(Fd),
+ DiskVerison = couch_bt_engine_header:disk_version(Header),
+ couch_file:close(Fd),
+ DiskVerison.
diff --git a/src/couch/test/couchdb_http_proxy_tests.erl b/src/couch/test/couchdb_http_proxy_tests.erl
deleted file mode 100644
index f60ba3b08..000000000
--- a/src/couch/test/couchdb_http_proxy_tests.erl
+++ /dev/null
@@ -1,456 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_http_proxy_tests).
-
--compile(tuple_calls).
-
--include_lib("couch/include/couch_eunit.hrl").
-
--record(req, {method=get, path="", headers=[], body="", opts=[]}).
-
--define(CONFIG_FIXTURE_TEMP,
- begin
- FileName = filename:join([?TEMPDIR, ?tempfile() ++ ".ini"]),
- {ok, Fd} = file:open(FileName, write),
- ok = file:truncate(Fd),
- ok = file:close(Fd),
- FileName
- end).
--define(TIMEOUT, 5000).
-
-
-start() ->
- % we have to write any config changes to temp ini file to not loose them
- % when supervisor will kill all children due to reaching restart threshold
- % (each httpd_global_handlers changes causes couch_httpd restart)
- Ctx = test_util:start_couch(?CONFIG_CHAIN ++ [?CONFIG_FIXTURE_TEMP], []),
- % 49151 is IANA Reserved, let's assume no one is listening there
- test_util:with_process_restart(couch_httpd, fun() ->
- config:set("httpd_global_handlers", "_error",
- "{couch_httpd_proxy, handle_proxy_req, <<\"http://127.0.0.1:49151/\">>}"
- )
- end),
- Ctx.
-
-setup() ->
- {ok, Pid} = test_web:start_link(),
- Value = lists:flatten(io_lib:format(
- "{couch_httpd_proxy, handle_proxy_req, ~p}",
- [list_to_binary(proxy_url())])),
- test_util:with_process_restart(couch_httpd, fun() ->
- config:set("httpd_global_handlers", "_test", Value)
- end),
- Pid.
-
-teardown(Pid) ->
- test_util:stop_sync_throw(Pid, fun() ->
- test_web:stop()
- end, {timeout, test_web_stop}, ?TIMEOUT).
-
-http_proxy_test_() ->
- {
- "HTTP Proxy handler tests",
- {
- setup,
- fun start/0, fun test_util:stop_couch/1,
- {
- foreach,
- fun setup/0, fun teardown/1,
- [
- fun should_proxy_basic_request/1,
- fun should_return_alternative_status/1,
- fun should_respect_trailing_slash/1,
- fun should_proxy_headers/1,
- fun should_proxy_host_header/1,
- fun should_pass_headers_back/1,
- fun should_use_same_protocol_version/1,
- fun should_proxy_body/1,
- fun should_proxy_body_back/1,
- fun should_proxy_chunked_body/1,
- fun should_proxy_chunked_body_back/1,
- fun should_rewrite_location_header/1,
- fun should_not_rewrite_external_locations/1,
- fun should_rewrite_relative_location/1,
- fun should_refuse_connection_to_backend/1
- ]
- }
-
- }
- }.
-
-
-should_proxy_basic_request(_) ->
- Remote = fun(Req) ->
- 'GET' = Req:get(method),
- "/" = Req:get(path),
- 0 = Req:get(body_length),
- <<>> = Req:recv_body(),
- {ok, {200, [{"Content-Type", "text/plain"}], "ok"}}
- end,
- Local = fun
- ({ok, "200", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- ?_test(check_request(#req{}, Remote, Local)).
-
-should_return_alternative_status(_) ->
- Remote = fun(Req) ->
- "/alternate_status" = Req:get(path),
- {ok, {201, [], "ok"}}
- end,
- Local = fun
- ({ok, "201", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{path = "/alternate_status"},
- ?_test(check_request(Req, Remote, Local)).
-
-should_respect_trailing_slash(_) ->
- Remote = fun(Req) ->
- "/trailing_slash/" = Req:get(path),
- {ok, {200, [], "ok"}}
- end,
- Local = fun
- ({ok, "200", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{path="/trailing_slash/"},
- ?_test(check_request(Req, Remote, Local)).
-
-should_proxy_headers(_) ->
- Remote = fun(Req) ->
- "/passes_header" = Req:get(path),
- "plankton" = Req:get_header_value("X-CouchDB-Ralph"),
- {ok, {200, [], "ok"}}
- end,
- Local = fun
- ({ok, "200", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{
- path="/passes_header",
- headers=[{"X-CouchDB-Ralph", "plankton"}]
- },
- ?_test(check_request(Req, Remote, Local)).
-
-should_proxy_host_header(_) ->
- Remote = fun(Req) ->
- "/passes_host_header" = Req:get(path),
- "www.google.com" = Req:get_header_value("Host"),
- {ok, {200, [], "ok"}}
- end,
- Local = fun
- ({ok, "200", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{
- path="/passes_host_header",
- headers=[{"Host", "www.google.com"}]
- },
- ?_test(check_request(Req, Remote, Local)).
-
-should_pass_headers_back(_) ->
- Remote = fun(Req) ->
- "/passes_header_back" = Req:get(path),
- {ok, {200, [{"X-CouchDB-Plankton", "ralph"}], "ok"}}
- end,
- Local = fun
- ({ok, "200", Headers, "ok"}) ->
- lists:member({"X-CouchDB-Plankton", "ralph"}, Headers);
- (_) ->
- false
- end,
- Req = #req{path="/passes_header_back"},
- ?_test(check_request(Req, Remote, Local)).
-
-should_use_same_protocol_version(_) ->
- Remote = fun(Req) ->
- "/uses_same_version" = Req:get(path),
- {1, 0} = Req:get(version),
- {ok, {200, [], "ok"}}
- end,
- Local = fun
- ({ok, "200", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{
- path="/uses_same_version",
- opts=[{http_vsn, {1, 0}}]
- },
- ?_test(check_request(Req, Remote, Local)).
-
-should_proxy_body(_) ->
- Remote = fun(Req) ->
- 'PUT' = Req:get(method),
- "/passes_body" = Req:get(path),
- <<"Hooray!">> = Req:recv_body(),
- {ok, {201, [], "ok"}}
- end,
- Local = fun
- ({ok, "201", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{
- method=put,
- path="/passes_body",
- body="Hooray!"
- },
- ?_test(check_request(Req, Remote, Local)).
-
-should_proxy_body_back(_) ->
- BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
- Remote = fun(Req) ->
- 'GET' = Req:get(method),
- "/passes_eof_body" = Req:get(path),
- {raw, {200, [{"Connection", "close"}], BodyChunks}}
- end,
- Local = fun
- ({ok, "200", _, "foobarbazinga"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{path="/passes_eof_body"},
- ?_test(check_request(Req, Remote, Local)).
-
-should_proxy_chunked_body(_) ->
- BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
- Remote = fun(Req) ->
- 'POST' = Req:get(method),
- "/passes_chunked_body" = Req:get(path),
- RecvBody = fun
- ({Length, Chunk}, [Chunk | Rest]) ->
- Length = size(Chunk),
- Rest;
- ({0, []}, []) ->
- ok
- end,
- ok = Req:stream_body(1024 * 1024, RecvBody, BodyChunks),
- {ok, {201, [], "ok"}}
- end,
- Local = fun
- ({ok, "201", _, "ok"}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{
- method=post,
- path="/passes_chunked_body",
- headers=[{"Transfer-Encoding", "chunked"}],
- body=chunked_body(BodyChunks)
- },
- ?_test(check_request(Req, Remote, Local)).
-
-should_proxy_chunked_body_back(_) ->
- ?_test(begin
- Remote = fun(Req) ->
- 'GET' = Req:get(method),
- "/passes_chunked_body_back" = Req:get(path),
- BodyChunks = [<<"foo">>, <<"bar">>, <<"bazinga">>],
- {chunked, {200, [{"Transfer-Encoding", "chunked"}], BodyChunks}}
- end,
- Req = #req{
- path="/passes_chunked_body_back",
- opts=[{stream_to, self()}]
- },
-
- Resp = check_request(Req, Remote, no_local),
- ?assertMatch({ibrowse_req_id, _}, Resp),
- {_, ReqId} = Resp,
-
- % Grab headers from response
- receive
- {ibrowse_async_headers, ReqId, "200", Headers} ->
- ?assertEqual("chunked",
- proplists:get_value("Transfer-Encoding", Headers)),
- ibrowse:stream_next(ReqId)
- after 1000 ->
- throw({error, timeout})
- end,
-
- ?assertEqual(<<"foobarbazinga">>, recv_body(ReqId, [])),
- ?assertEqual(was_ok, test_web:check_last())
- end).
-
-should_refuse_connection_to_backend(_) ->
- Local = fun
- ({ok, "500", _, _}) ->
- true;
- (_) ->
- false
- end,
- Req = #req{opts=[{url, server_url("/_error")}]},
- ?_test(check_request(Req, no_remote, Local)).
-
-should_rewrite_location_header(_) ->
- {
- "Testing location header rewrites",
- do_rewrite_tests([
- {"Location", proxy_url() ++ "/foo/bar",
- server_url() ++ "/foo/bar"},
- {"Content-Location", proxy_url() ++ "/bing?q=2",
- server_url() ++ "/bing?q=2"},
- {"Uri", proxy_url() ++ "/zip#frag",
- server_url() ++ "/zip#frag"},
- {"Destination", proxy_url(),
- server_url() ++ "/"}
- ])
- }.
-
-should_not_rewrite_external_locations(_) ->
- {
- "Testing no rewrite of external locations",
- do_rewrite_tests([
- {"Location", external_url() ++ "/search",
- external_url() ++ "/search"},
- {"Content-Location", external_url() ++ "/s?q=2",
- external_url() ++ "/s?q=2"},
- {"Uri", external_url() ++ "/f#f",
- external_url() ++ "/f#f"},
- {"Destination", external_url() ++ "/f?q=2#f",
- external_url() ++ "/f?q=2#f"}
- ])
- }.
-
-should_rewrite_relative_location(_) ->
- {
- "Testing relative rewrites",
- do_rewrite_tests([
- {"Location", "/foo",
- server_url() ++ "/foo"},
- {"Content-Location", "bar",
- server_url() ++ "/bar"},
- {"Uri", "/zing?q=3",
- server_url() ++ "/zing?q=3"},
- {"Destination", "bing?q=stuff#yay",
- server_url() ++ "/bing?q=stuff#yay"}
- ])
- }.
-
-
-do_rewrite_tests(Tests) ->
- lists:map(fun({Header, Location, Url}) ->
- should_rewrite_header(Header, Location, Url)
- end, Tests).
-
-should_rewrite_header(Header, Location, Url) ->
- Remote = fun(Req) ->
- "/rewrite_test" = Req:get(path),
- {ok, {302, [{Header, Location}], "ok"}}
- end,
- Local = fun
- ({ok, "302", Headers, "ok"}) ->
- ?assertEqual(Url, couch_util:get_value(Header, Headers)),
- true;
- (E) ->
- ?debugFmt("~p", [E]),
- false
- end,
- Req = #req{path="/rewrite_test"},
- {Header, ?_test(check_request(Req, Remote, Local))}.
-
-
-server_url() ->
- server_url("/_test").
-
-server_url(Resource) ->
- Addr = config:get("httpd", "bind_address"),
- Port = integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- lists:concat(["http://", Addr, ":", Port, Resource]).
-
-proxy_url() ->
- "http://127.0.0.1:" ++ integer_to_list(test_web:get_port()).
-
-external_url() ->
- "https://google.com".
-
-check_request(Req, Remote, Local) ->
- case Remote of
- no_remote ->
- ok;
- _ ->
- test_web:set_assert(Remote)
- end,
- Url = case proplists:lookup(url, Req#req.opts) of
- none ->
- server_url() ++ Req#req.path;
- {url, DestUrl} ->
- DestUrl
- end,
- Opts = [{headers_as_is, true} | Req#req.opts],
- Resp =ibrowse:send_req(
- Url, Req#req.headers, Req#req.method, Req#req.body, Opts
- ),
- %?debugFmt("ibrowse response: ~p", [Resp]),
- case Local of
- no_local ->
- ok;
- _ ->
- ?assert(Local(Resp))
- end,
- case {Remote, Local} of
- {no_remote, _} ->
- ok;
- {_, no_local} ->
- ok;
- _ ->
- ?assertEqual(was_ok, test_web:check_last())
- end,
- Resp.
-
-chunked_body(Chunks) ->
- chunked_body(Chunks, []).
-
-chunked_body([], Acc) ->
- iolist_to_binary(lists:reverse(Acc, "0\r\n\r\n"));
-chunked_body([Chunk | Rest], Acc) ->
- Size = to_hex(size(Chunk)),
- chunked_body(Rest, ["\r\n", Chunk, "\r\n", Size | Acc]).
-
-to_hex(Val) ->
- to_hex(Val, []).
-
-to_hex(0, Acc) ->
- Acc;
-to_hex(Val, Acc) ->
- to_hex(Val div 16, [hex_char(Val rem 16) | Acc]).
-
-hex_char(V) when V < 10 -> $0 + V;
-hex_char(V) -> $A + V - 10.
-
-recv_body(ReqId, Acc) ->
- receive
- {ibrowse_async_response, ReqId, Data} ->
- recv_body(ReqId, [Data | Acc]);
- {ibrowse_async_response_end, ReqId} ->
- iolist_to_binary(lists:reverse(Acc));
- Else ->
- throw({error, unexpected_mesg, Else})
- after ?TIMEOUT ->
- throw({error, timeout})
- end.
diff --git a/src/couch/test/couchdb_os_daemons_tests.erl b/src/couch/test/couchdb_os_daemons_tests.erl
deleted file mode 100644
index 1728314bb..000000000
--- a/src/couch/test/couchdb_os_daemons_tests.erl
+++ /dev/null
@@ -1,259 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couchdb_os_daemons_tests).
-
-%% tests are UNIX-specific, will not function under Windows
--ifdef(WINDOWS).
--undef(TEST).
--define(NOTEST, 1).
--endif.
-
--include_lib("couch/include/couch_eunit.hrl").
-
-%% keep in sync with couchdb/couch_os_daemons.erl
--record(daemon, {
- port,
- name,
- cmd,
- kill,
- status=running,
- cfg_patterns=[],
- errors=[],
- buf=[]
-}).
-
--define(DAEMON_CONFIGER, "os_daemon_configer.escript").
--define(DAEMON_LOOPER, "os_daemon_looper.escript").
--define(DAEMON_BAD_PERM, "os_daemon_bad_perm.sh").
--define(DAEMON_CAN_REBOOT, "os_daemon_can_reboot.sh").
--define(DAEMON_DIE_ON_BOOT, "os_daemon_die_on_boot.sh").
--define(DAEMON_DIE_QUICKLY, "os_daemon_die_quickly.sh").
--define(TRIES, 40).
--define(TRY_DELAY_MS, 100).
--define(TIMEOUT, 10000).
--define(CONFIG_TIMEOUT, 1000).
-
-
-setup(DName) ->
- Ctx = test_util:start(?MODULE, [couch_log], [{dont_mock, [config]}]),
- {ok, OsDPid} = couch_os_daemons:start_link(),
- config:set("os_daemons", DName,
- filename:join([?FIXTURESDIR, DName]), false),
- % Set configuration option to be used by configuration_reader_test_
- % This will be used in os_daemon_configer.escript:test_get_cfg2
- config:set("uuids", "algorithm","sequential", false),
- config:set("os_daemon_settings", "max_retries", "2", false),
- ensure_n_daemons_are_alive(1),
- {Ctx, OsDPid}.
-
-teardown(_, {Ctx, OsDPid}) ->
- try
- test_util:stop_sync_throw(OsDPid, fun() ->
- exit(OsDPid, shutdown)
- end, {timeout, os_daemon_stop}, ?TIMEOUT)
- catch
- {timeout, os_daemon_stop} ->
- Msg = "~nWARNING: OS daemons test stop ~p msec timeout exceeded~n",
- io:format(standard_error, Msg, [?TIMEOUT]),
- exit(OsDPid, kill)
- end,
- test_util:stop(Ctx).
-
-
-os_daemons_test_() ->
- {
- "OS Daemons tests",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{?DAEMON_LOOPER, Fun} || Fun <- [
- fun should_check_daemon/2,
- fun should_check_daemon_table_form/2,
- fun should_clean_tables_on_daemon_remove/2,
- fun should_spawn_multiple_daemons/2,
- fun should_keep_alive_one_daemon_on_killing_other/2
- ]]
- }
- }.
-
-configuration_reader_test_() ->
- {
- "OS Daemon requests CouchDB configuration",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{?DAEMON_CONFIGER,
- fun should_read_write_config_settings_by_daemon/2}]
-
- }
- }.
-
-error_test_() ->
- {
- "OS Daemon process error tests",
- {
- foreachx,
- fun setup/1, fun teardown/2,
- [{?DAEMON_BAD_PERM, fun should_fail_due_to_lack_of_permissions/2},
- {?DAEMON_DIE_ON_BOOT, fun should_die_on_boot/2},
- {?DAEMON_DIE_QUICKLY, fun should_die_quickly/2},
- {?DAEMON_CAN_REBOOT, fun should_not_being_halted/2}]
- }
- }.
-
-
-should_check_daemon(DName, _) ->
- ?_test(begin
- {ok, [D]} = couch_os_daemons:info([table]),
- check_daemon(D, DName)
- end).
-
-should_check_daemon_table_form(DName, _) ->
- ?_test(begin
- {ok, Tab} = couch_os_daemons:info(),
- [D] = ets:tab2list(Tab),
- check_daemon(D, DName)
- end).
-
-should_clean_tables_on_daemon_remove(DName, _) ->
- ?_test(begin
- config:delete("os_daemons", DName, false),
- {ok, Tab2} = couch_os_daemons:info(),
- ?_assertEqual([], ets:tab2list(Tab2))
- end).
-
-should_spawn_multiple_daemons(DName, _) ->
- ?_test(begin
- config:set("os_daemons", "bar",
- filename:join([?FIXTURESDIR, DName]), false),
- config:set("os_daemons", "baz",
- filename:join([?FIXTURESDIR, DName]), false),
- ensure_n_daemons_are_alive(3), % DName, "bar" and "baz"
- {ok, Daemons} = couch_os_daemons:info([table]),
- lists:foreach(fun(D) ->
- check_daemon(D)
- end, Daemons),
- {ok, Tab} = couch_os_daemons:info(),
- lists:foreach(fun(D) ->
- check_daemon(D)
- end, ets:tab2list(Tab))
- end).
-
-should_keep_alive_one_daemon_on_killing_other(DName, _) ->
- ?_test(begin
- config:set("os_daemons", "bar",
- filename:join([?FIXTURESDIR, DName]), false),
- ensure_n_daemons_are_alive(2), % DName and "bar"
- {ok, Daemons} = couch_os_daemons:info([table]),
- lists:foreach(fun(D) ->
- check_daemon(D)
- end, Daemons),
-
- config:delete("os_daemons", "bar", false),
- ensure_n_daemons_are_alive(1), % Dname only, "bar" should be dead
- {ok, [D2]} = couch_os_daemons:info([table]),
- check_daemon(D2, DName),
-
- {ok, Tab} = couch_os_daemons:info(),
- [T] = ets:tab2list(Tab),
- check_daemon(T, DName)
- end).
-
-should_read_write_config_settings_by_daemon(DName, _) ->
- ?_test(begin
- % have to wait till daemon run all his tests
- % see daemon's script for more info
- timer:sleep(?CONFIG_TIMEOUT),
- {ok, [D]} = couch_os_daemons:info([table]),
- check_daemon(D, DName)
- end).
-
-should_fail_due_to_lack_of_permissions(DName, _) ->
- ?_test(should_halts(DName, 1000)).
-
-should_die_on_boot(DName, _) ->
- ?_test(should_halts(DName, 2000)).
-
-should_die_quickly(DName, _) ->
- ?_test(should_halts(DName, 4000)).
-
-should_not_being_halted(DName, _) ->
- ?_test(begin
- timer:sleep(1000),
- {ok, [D1]} = couch_os_daemons:info([table]),
- check_daemon(D1, DName, 0),
-
- % Should reboot every two seconds. We're at 1s, so wait
- % until 3s to be in the middle of the next invocation's
- % life span.
-
- timer:sleep(2000),
- {ok, [D2]} = couch_os_daemons:info([table]),
- check_daemon(D2, DName, 1),
-
- % If the kill command changed, that means we rebooted the process.
- ?assertNotEqual(D1#daemon.kill, D2#daemon.kill)
- end).
-
-should_halts(DName, Time) ->
- timer:sleep(Time),
- {ok, [D]} = couch_os_daemons:info([table]),
- check_dead(D, DName),
- config:delete("os_daemons", DName, false).
-
-check_daemon(D) ->
- check_daemon(D, D#daemon.name).
-
-check_daemon(D, Name) ->
- check_daemon(D, Name, 0).
-
-check_daemon(D, Name, Errs) ->
- ?assert(is_port(D#daemon.port)),
- ?assertEqual(Name, D#daemon.name),
- ?assertNotEqual(undefined, D#daemon.kill),
- ?assertEqual(running, D#daemon.status),
- ?assertEqual(Errs, length(D#daemon.errors)),
- ?assertEqual([], D#daemon.buf).
-
-check_dead(D, Name) ->
- ?assert(is_port(D#daemon.port)),
- ?assertEqual(Name, D#daemon.name),
- ?assertNotEqual(undefined, D#daemon.kill),
- ?assertEqual(halted, D#daemon.status),
- ?assertEqual(nil, D#daemon.errors),
- ?assertEqual(nil, D#daemon.buf).
-
-daemons() ->
- {ok, Daemons} = couch_os_daemons:info([table]),
- Daemons.
-
-ensure_n_daemons_are_alive(NumDaemons) ->
- retry(fun() -> length(daemons()) == NumDaemons end, "spawning"),
- retry(fun() ->
- lists:all(fun(D) -> D#daemon.kill =/= undefined end, daemons())
- end, "waiting for kill flag").
-
-retry(Pred, FailReason) ->
- retry(Pred, ?TRIES, FailReason).
-
-retry(_Pred, 0, FailReason) ->
- erlang:error({assertion_failed,[{module, ?MODULE}, {line, ?LINE},
- {reason, "Timed out: " ++ FailReason}]});
-retry(Pred, N, FailReason) ->
- case Pred() of
- true ->
- ok;
- false ->
- timer:sleep(?TRY_DELAY_MS),
- retry(Pred, N - 1, FailReason)
- end.
diff --git a/src/couch/test/couchdb_os_proc_pool.erl b/src/couch/test/couchdb_os_proc_pool.erl
index 65ae5c54c..69f8051ad 100644
--- a/src/couch/test/couchdb_os_proc_pool.erl
+++ b/src/couch/test/couchdb_os_proc_pool.erl
@@ -206,8 +206,7 @@ should_reduce_pool_on_idle_os_procs() ->
setup_config() ->
- MFA = "{couch_native_process, start_link, []}",
- config:set("native_query_servers", "test_lang", MFA, false),
+ config:set("native_query_servers", "enable_erlang_query_server", "true", false),
config:set("query_server_config", "os_process_limit", "3", false),
config:set("query_server_config", "os_process_soft_limit", "2", false),
ok = confirm_config("os_process_soft_limit", "2").
@@ -235,7 +234,7 @@ spawn_client() ->
Parent = self(),
Ref = make_ref(),
Pid = spawn(fun() ->
- Proc = couch_query_servers:get_os_process(<<"test_lang">>),
+ Proc = couch_query_servers:get_os_process(<<"erlang">>),
loop(Parent, Ref, Proc)
end),
{Pid, Ref}.
@@ -245,7 +244,7 @@ spawn_client(DDocId) ->
Ref = make_ref(),
Pid = spawn(fun() ->
DDocKey = {DDocId, <<"1-abcdefgh">>},
- DDoc = #doc{body={[{<<"language">>, <<"test_lang">>}]}},
+ DDoc = #doc{body={[{<<"language">>, <<"erlang">>}]}},
Proc = couch_query_servers:get_ddoc_process(DDoc, DDocKey),
loop(Parent, Ref, Proc)
end),
diff --git a/src/couch/test/couchdb_update_conflicts_tests.erl b/src/couch/test/couchdb_update_conflicts_tests.erl
index 09c2834a8..e92c73856 100644
--- a/src/couch/test/couchdb_update_conflicts_tests.erl
+++ b/src/couch/test/couchdb_update_conflicts_tests.erl
@@ -17,6 +17,7 @@
-define(i2l(I), integer_to_list(I)).
-define(DOC_ID, <<"foobar">>).
+-define(LOCAL_DOC_ID, <<"_local/foobar">>).
-define(NUM_CLIENTS, [100, 500, 1000, 2000, 5000, 10000]).
-define(TIMEOUT, 20000).
@@ -52,7 +53,7 @@ view_indexes_cleanup_test_() ->
fun start/0, fun test_util:stop_couch/1,
[
concurrent_updates(),
- couchdb_188()
+ bulk_docs_updates()
]
}
}.
@@ -68,13 +69,17 @@ concurrent_updates()->
}
}.
-couchdb_188()->
+bulk_docs_updates()->
{
- "COUCHDB-188",
+ "Bulk docs updates",
{
foreach,
fun setup/0, fun teardown/1,
- [fun should_bulk_create_delete_doc/1]
+ [
+ fun should_bulk_create_delete_doc/1,
+ fun should_bulk_create_local_doc/1,
+ fun should_ignore_invalid_local_doc/1
+ ]
}
}.
@@ -91,6 +96,12 @@ should_concurrently_update_doc(NumClients, {DbName, InitRev})->
should_bulk_create_delete_doc({DbName, InitRev})->
?_test(bulk_delete_create(DbName, InitRev)).
+should_bulk_create_local_doc({DbName, _})->
+ ?_test(bulk_create_local_doc(DbName)).
+
+should_ignore_invalid_local_doc({DbName, _})->
+ ?_test(ignore_invalid_local_doc(DbName)).
+
concurrent_doc_update(NumClients, DbName, InitRev) ->
Clients = lists:map(
@@ -157,10 +168,10 @@ ensure_in_single_revision_leaf(DbName) ->
[{ok, Doc2}] = Leaves,
?assertEqual(Doc, Doc2).
-
+
bulk_delete_create(DbName, InitRev) ->
{ok, Db} = couch_db:open_int(DbName, []),
-
+
DeletedDoc = couch_doc:from_json_obj({[
{<<"_id">>, ?DOC_ID},
{<<"_rev">>, InitRev},
@@ -176,7 +187,7 @@ bulk_delete_create(DbName, InitRev) ->
?assertEqual(2, length([ok || {ok, _} <- Results])),
[{ok, Rev1}, {ok, Rev2}] = Results,
-
+
{ok, Db2} = couch_db:open_int(DbName, []),
{ok, [{ok, Doc1}]} = couch_db:open_doc_revs(
Db2, ?DOC_ID, [Rev1], [conflicts, deleted_conflicts]),
@@ -214,6 +225,45 @@ bulk_delete_create(DbName, InitRev) ->
?assertEqual(3, element(1, Rev2)).
+bulk_create_local_doc(DbName) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+
+ LocalDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, ?LOCAL_DOC_ID},
+ {<<"_rev">>, <<"0-1">>}
+ ]}),
+
+ {ok, Results} = couch_db:update_docs(Db, [LocalDoc],
+ [], replicated_changes),
+ ok = couch_db:close(Db),
+ ?assertEqual([], Results),
+
+ {ok, Db2} = couch_db:open_int(DbName, []),
+ {ok, LocalDoc1} = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []),
+ ok = couch_db:close(Db2),
+ ?assertEqual(?LOCAL_DOC_ID, LocalDoc1#doc.id),
+ ?assertEqual({0, [<<"2">>]}, LocalDoc1#doc.revs).
+
+
+ignore_invalid_local_doc(DbName) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+
+ LocalDoc = couch_doc:from_json_obj({[
+ {<<"_id">>, ?LOCAL_DOC_ID},
+ {<<"_rev">>, <<"0-abcdef">>}
+ ]}),
+
+ {ok, Results} = couch_db:update_docs(Db, [LocalDoc],
+ [], replicated_changes),
+ ok = couch_db:close(Db),
+ ?assertEqual([], Results),
+
+ {ok, Db2} = couch_db:open_int(DbName, []),
+ Result2 = couch_db:open_doc_int(Db2, ?LOCAL_DOC_ID, []),
+ ok = couch_db:close(Db2),
+ ?assertEqual({not_found, missing}, Result2).
+
+
spawn_client(DbName, Doc) ->
spawn(fun() ->
{ok, Db} = couch_db:open_int(DbName, []),
diff --git a/src/couch_log/src/couch_log_formatter.erl b/src/couch_log/src/couch_log_formatter.erl
index 5be3619f2..4d81f184f 100644
--- a/src/couch_log/src/couch_log_formatter.erl
+++ b/src/couch_log/src/couch_log_formatter.erl
@@ -56,23 +56,33 @@ format(Level, Pid, Msg) ->
}.
-format({error, _GL, {Pid, "** Generic server " ++ _, Args}}) ->
+format(Event) ->
+ try
+ do_format(Event)
+ catch
+ Tag:Err ->
+ Msg = "Encountered error ~w when formatting ~w",
+ format(error, self(), Msg, [{Tag, Err}, Event])
+ end.
+
+
+do_format({error, _GL, {Pid, "** Generic server " ++ _, Args}}) ->
%% gen_server terminate
- [Name, LastMsg, State, Reason] = Args,
+ [Name, LastMsg, State, Reason | Extra] = Args,
MsgFmt = "gen_server ~w terminated with reason: ~s~n" ++
- " last msg: ~p~n state: ~p",
- MsgArgs = [Name, format_reason(Reason), LastMsg, State],
+ " last msg: ~p~n state: ~p~n extra: ~p",
+ MsgArgs = [Name, format_reason(Reason), LastMsg, State, Extra],
format(error, Pid, MsgFmt, MsgArgs);
-format({error, _GL, {Pid, "** State machine " ++ _, Args}}) ->
+do_format({error, _GL, {Pid, "** State machine " ++ _, Args}}) ->
%% gen_fsm terminate
- [Name, LastMsg, StateName, State, Reason] = Args,
+ [Name, LastMsg, StateName, State, Reason | Extra] = Args,
MsgFmt = "gen_fsm ~w in state ~w terminated with reason: ~s~n" ++
- " last msg: ~p~n state: ~p",
- MsgArgs = [Name, StateName, format_reason(Reason), LastMsg, State],
+ " last msg: ~p~n state: ~p~n extra: ~p",
+ MsgArgs = [Name, StateName, format_reason(Reason), LastMsg, State, Extra],
format(error, Pid, MsgFmt, MsgArgs);
-format({error, _GL, {Pid, "** gen_event handler" ++ _, Args}}) ->
+do_format({error, _GL, {Pid, "** gen_event handler" ++ _, Args}}) ->
%% gen_event handler terminate
[ID, Name, LastMsg, State, Reason] = Args,
MsgFmt = "gen_event ~w installed in ~w terminated with reason: ~s~n" ++
@@ -80,20 +90,20 @@ format({error, _GL, {Pid, "** gen_event handler" ++ _, Args}}) ->
MsgArgs = [ID, Name, format_reason(Reason), LastMsg, State],
format(error, Pid, MsgFmt, MsgArgs);
-format({error, _GL, {emulator, "~s~n", [Msg]}}) when is_list(Msg) ->
+do_format({error, _GL, {emulator, "~s~n", [Msg]}}) when is_list(Msg) ->
% These messages are for whenever any process exits due
% to a throw or error. We intercept here to remove the
% extra newlines.
NewMsg = lists:sublist(Msg, length(Msg) - 1),
format(error, emulator, NewMsg);
-format({error, _GL, {Pid, Fmt, Args}}) ->
+do_format({error, _GL, {Pid, Fmt, Args}}) ->
format(error, Pid, Fmt, Args);
-format({error_report, _GL, {Pid, std_error, D}}) ->
+do_format({error_report, _GL, {Pid, std_error, D}}) ->
format(error, Pid, print_silly_list(D));
-format({error_report, _GL, {Pid, supervisor_report, D}}) ->
+do_format({error_report, _GL, {Pid, supervisor_report, D}}) ->
case lists:sort(D) of
[{errorContext, Ctx}, {offender, Off},
{reason, Reason}, {supervisor, Name}] ->
@@ -111,20 +121,20 @@ format({error_report, _GL, {Pid, supervisor_report, D}}) ->
format(error, Pid, "SUPERVISOR REPORT " ++ print_silly_list(D))
end;
-format({error_report, _GL, {Pid, crash_report, [Report, Neighbors]}}) ->
+do_format({error_report, _GL, {Pid, crash_report, [Report, Neighbors]}}) ->
Msg = "CRASH REPORT " ++ format_crash_report(Report, Neighbors),
format(error, Pid, Msg);
-format({warning_msg, _GL, {Pid, Fmt, Args}}) ->
+do_format({warning_msg, _GL, {Pid, Fmt, Args}}) ->
format(warning, Pid, Fmt, Args);
-format({warning_report, _GL, {Pid, std_warning, Report}}) ->
+do_format({warning_report, _GL, {Pid, std_warning, Report}}) ->
format(warning, Pid, print_silly_list(Report));
-format({info_msg, _GL, {Pid, Fmt, Args}}) ->
+do_format({info_msg, _GL, {Pid, Fmt, Args}}) ->
format(info, Pid, Fmt, Args);
-format({info_report, _GL, {Pid, std_info, D}}) when is_list(D) ->
+do_format({info_report, _GL, {Pid, std_info, D}}) when is_list(D) ->
case lists:sort(D) of
[{application, App}, {exited, Reason}, {type, _Type}] ->
MsgFmt = "Application ~w exited with reason: ~s",
@@ -133,10 +143,10 @@ format({info_report, _GL, {Pid, std_info, D}}) when is_list(D) ->
format(info, Pid, print_silly_list(D))
end;
-format({info_report, _GL, {Pid, std_info, D}}) ->
+do_format({info_report, _GL, {Pid, std_info, D}}) ->
format(info, Pid, "~w", [D]);
-format({info_report, _GL, {Pid, progress, D}}) ->
+do_format({info_report, _GL, {Pid, progress, D}}) ->
case lists:sort(D) of
[{application, App}, {started_at, Node}] ->
MsgFmt = "Application ~w started on node ~w",
@@ -150,7 +160,7 @@ format({info_report, _GL, {Pid, progress, D}}) ->
format(info, Pid, "PROGRESS REPORT " ++ print_silly_list(D))
end;
-format(Event) ->
+do_format(Event) ->
format(warning, self(), "Unexpected error_logger event ~w", [Event]).
diff --git a/src/couch_log/src/couch_log_sup.erl b/src/couch_log/src/couch_log_sup.erl
index 083f5fc33..6219a36e9 100644
--- a/src/couch_log/src/couch_log_sup.erl
+++ b/src/couch_log/src/couch_log_sup.erl
@@ -26,7 +26,7 @@ start_link() ->
init([]) ->
ok = couch_log_config:init(),
- {ok, {{one_for_one, 1, 1}, children()}}.
+ {ok, {{one_for_one, 10, 10}, children()}}.
children() ->
diff --git a/src/couch_log/test/couch_log_formatter_test.erl b/src/couch_log/test/couch_log_formatter_test.erl
index a8f69b221..795efcf29 100644
--- a/src/couch_log/test/couch_log_formatter_test.erl
+++ b/src/couch_log/test/couch_log_formatter_test.erl
@@ -37,6 +37,29 @@ format_reason_test() ->
?assertEqual(Formatted, lists:flatten(Entry#log_entry.msg)).
+crashing_formatting_test() ->
+ Pid = self(),
+ Event = {
+ error,
+ erlang:group_leader(),
+ {
+ Pid,
+ "** Generic server and some stuff",
+ [a_gen_server, {foo, bar}, server_state] % not enough args!
+ }
+ },
+ ?assertMatch(
+ #log_entry{
+ level = error,
+ pid = Pid
+ },
+ do_format(Event)
+ ),
+ do_matches(do_format(Event), [
+ "Encountered error {error,{badmatch"
+ ]).
+
+
gen_server_error_test() ->
Pid = self(),
Event = {
@@ -59,7 +82,35 @@ gen_server_error_test() ->
"gen_server a_gen_server terminated",
"with reason: some_reason",
"last msg: {foo,bar}",
- "state: server_state"
+ "state: server_state",
+ "extra: \\[\\]"
+ ]).
+
+
+gen_server_error_with_extra_args_test() ->
+ Pid = self(),
+ Event = {
+ error,
+ erlang:group_leader(),
+ {
+ Pid,
+ "** Generic server and some stuff",
+ [a_gen_server, {foo, bar}, server_state, some_reason, sad, args]
+ }
+ },
+ ?assertMatch(
+ #log_entry{
+ level = error,
+ pid = Pid
+ },
+ do_format(Event)
+ ),
+ do_matches(do_format(Event), [
+ "gen_server a_gen_server terminated",
+ "with reason: some_reason",
+ "last msg: {foo,bar}",
+ "state: server_state",
+ "extra: \\[sad,args\\]"
]).
@@ -85,7 +136,35 @@ gen_fsm_error_test() ->
"gen_fsm a_gen_fsm in state state_name",
"with reason: barf",
"last msg: {ohai,there}",
- "state: curr_state"
+ "state: curr_state",
+ "extra: \\[\\]"
+ ]).
+
+
+gen_fsm_error_with_extra_args_test() ->
+ Pid = self(),
+ Event = {
+ error,
+ erlang:group_leader(),
+ {
+ Pid,
+ "** State machine did a thing",
+ [a_gen_fsm, {ohai,there}, state_name, curr_state, barf, sad, args]
+ }
+ },
+ ?assertMatch(
+ #log_entry{
+ level = error,
+ pid = Pid
+ },
+ do_format(Event)
+ ),
+ do_matches(do_format(Event), [
+ "gen_fsm a_gen_fsm in state state_name",
+ "with reason: barf",
+ "last msg: {ohai,there}",
+ "state: curr_state",
+ "extra: \\[sad,args\\]"
]).
diff --git a/src/couch_mrview/src/couch_mrview_compactor.erl b/src/couch_mrview/src/couch_mrview_compactor.erl
index 3ef11805f..9a069cec0 100644
--- a/src/couch_mrview/src/couch_mrview_compactor.erl
+++ b/src/couch_mrview/src/couch_mrview_compactor.erl
@@ -86,7 +86,9 @@ compact(State) ->
{type, view_compaction},
{database, DbName},
{design_document, IdxName},
- {progress, 0}
+ {progress, 0},
+ {changes_done, 0},
+ {total_changes, TotalChanges}
]),
BufferSize0 = config:get(
diff --git a/src/couch_replicator/src/couch_replicator.hrl b/src/couch_replicator/src/couch_replicator.hrl
index d46c34720..2a5b7c8c8 100644
--- a/src/couch_replicator/src/couch_replicator.hrl
+++ b/src/couch_replicator/src/couch_replicator.hrl
@@ -22,7 +22,8 @@
view = nil :: any() | '_',
doc_id :: any() | '_',
db_name = null :: null | binary() | '_',
- start_time = {0, 0, 0} :: erlang:timestamp() | '_'
+ start_time = {0, 0, 0} :: erlang:timestamp() | '_',
+ stats = couch_replicator_stats:new() :: orddict:orddict() | '_'
}).
-type rep_id() :: {string(), string()}.
diff --git a/src/couch_replicator/src/couch_replicator_scheduler.erl b/src/couch_replicator/src/couch_replicator_scheduler.erl
index 762ef18fe..e3dbede83 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler.erl
@@ -40,7 +40,8 @@
health_threshold/0,
jobs/0,
job/1,
- restart_job/1
+ restart_job/1,
+ update_job_stats/2
]).
%% config_listener callbacks
@@ -215,6 +216,11 @@ restart_job(JobId) ->
end.
+-spec update_job_stats(job_id(), term()) -> ok.
+update_job_stats(JobId, Stats) ->
+ gen_server:cast(?MODULE, {update_job_stats, JobId, Stats}).
+
+
%% gen_server functions
init(_) ->
@@ -283,6 +289,16 @@ handle_cast({set_interval, Interval}, State) when is_integer(Interval),
couch_log:notice("~p: interval set to ~B", [?MODULE, Interval]),
{noreply, State#state{interval = Interval}};
+handle_cast({update_job_stats, JobId, Stats}, State) ->
+ case rep_state(JobId) of
+ nil ->
+ ok;
+ #rep{} = Rep ->
+ NewRep = Rep#rep{stats = Stats},
+ true = ets:update_element(?MODULE, JobId, {#job.rep, NewRep})
+ end,
+ {noreply, State};
+
handle_cast(UnexpectedMsg, State) ->
couch_log:error("~p: received un-expected cast ~p", [?MODULE, UnexpectedMsg]),
{noreply, State}.
diff --git a/src/couch_replicator/src/couch_replicator_scheduler_job.erl b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
index 1467d9f30..f669d464d 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler_job.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
@@ -118,7 +118,6 @@ do_init(#rep{options = Options, id = {BaseId, Ext}, user_ctx=UserCtx} = Rep) ->
source_name = SourceName,
target_name = TargetName,
start_seq = {_Ts, StartSeq},
- committed_seq = {_, CommittedSeq},
highest_seq_done = {_, HighestSeq},
checkpoint_interval = CheckpointInterval
} = State = init_state(Rep),
@@ -159,16 +158,9 @@ do_init(#rep{options = Options, id = {BaseId, Ext}, user_ctx=UserCtx} = Rep) ->
{source, ?l2b(SourceName)},
{target, ?l2b(TargetName)},
{continuous, get_value(continuous, Options, false)},
- {revisions_checked, 0},
- {missing_revisions_found, 0},
- {docs_read, 0},
- {docs_written, 0},
- {changes_pending, get_pending_count(State)},
- {doc_write_failures, 0},
{source_seq, HighestSeq},
- {checkpointed_source_seq, CommittedSeq},
{checkpoint_interval, CheckpointInterval}
- ]),
+ ] ++ rep_stats(State)),
couch_task_status:set_update_frequency(1000),
% Until OTP R14B03:
@@ -582,7 +574,8 @@ init_state(Rep) ->
source = Src0, target = Tgt,
options = Options, user_ctx = UserCtx,
type = Type, view = View,
- start_time = StartTime
+ start_time = StartTime,
+ stats = Stats
} = Rep,
% Adjust minimum number of http source connections to 2 to avoid deadlock
Src = adjust_maxconn(Src0, BaseId),
@@ -631,7 +624,8 @@ init_state(Rep) ->
checkpoint_interval = get_value(checkpoint_interval, Options,
?DEFAULT_CHECKPOINT_INTERVAL),
type = Type,
- view = View
+ view = View,
+ stats = Stats
},
State#rep_state{timer = start_timer(State)}.
@@ -983,6 +977,7 @@ update_task(State) ->
current_through_seq = {_, ThroughSeq},
highest_seq_done = {_, HighestSeq}
} = State,
+ update_scheduler_job_stats(State),
couch_task_status:update(
rep_stats(State) ++ [
{source_seq, HighestSeq},
@@ -990,6 +985,11 @@ update_task(State) ->
]).
+update_scheduler_job_stats(#rep_state{rep_details = Rep, stats = Stats}) ->
+ JobId = Rep#rep.id,
+ couch_replicator_scheduler:update_job_stats(JobId, Stats).
+
+
rep_stats(State) ->
#rep_state{
committed_seq = {_, CommittedSeq},
diff --git a/src/couch_replicator/test/couch_replicator_retain_stats_between_job_runs.erl b/src/couch_replicator/test/couch_replicator_retain_stats_between_job_runs.erl
new file mode 100644
index 000000000..3b7377b78
--- /dev/null
+++ b/src/couch_replicator/test/couch_replicator_retain_stats_between_job_runs.erl
@@ -0,0 +1,160 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_replicator_retain_stats_between_job_runs).
+
+-include_lib("couch/include/couch_eunit.hrl").
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch_replicator/src/couch_replicator.hrl").
+
+-define(DELAY, 500).
+-define(TIMEOUT, 60000).
+-define(i2l(I), integer_to_list(I)).
+-define(io2b(Io), iolist_to_binary(Io)).
+
+
+setup() ->
+ Ctx = test_util:start_couch([couch_replicator]),
+ Source = setup_db(),
+ Target = setup_db(),
+ {Ctx, {Source, Target}}.
+
+
+teardown({Ctx, {Source, Target}}) ->
+ teardown_db(Source),
+ teardown_db(Target),
+ ok = application:stop(couch_replicator),
+ ok = test_util:stop_couch(Ctx).
+
+
+stats_retained_test_() ->
+ {
+ setup,
+ fun setup/0,
+ fun teardown/1,
+ fun t_stats_retained/1
+ }.
+
+
+t_stats_retained({_Ctx, {Source, Target}}) ->
+ ?_test(begin
+ populate_db(Source, 42),
+ {ok, RepPid, RepId} = replicate(Source, Target),
+ wait_target_in_sync(Source, Target),
+ check_active_tasks(42, 42),
+ reschedule_job(RepPid),
+ check_active_tasks(42, 42),
+ couch_replicator_scheduler:remove_job(RepId)
+ end).
+
+
+setup_db() ->
+ DbName = ?tempdb(),
+ {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
+ ok = couch_db:close(Db),
+ DbName.
+
+
+teardown_db(DbName) ->
+ ok = couch_server:delete(DbName, [?ADMIN_CTX]),
+ ok.
+
+
+reschedule_job(RepPid) ->
+ Ref = erlang:monitor(process, RepPid),
+ gen_server:cast(couch_replicator_scheduler, {set_max_jobs, 0}),
+ couch_replicator_scheduler:reschedule(),
+ receive
+ {'DOWN', Ref, _, _, _} -> ok
+ after ?TIMEOUT ->
+ erlang:error(timeout)
+ end,
+ gen_server:cast(couch_replicator_scheduler, {set_max_jobs, 500}),
+ couch_replicator_scheduler:reschedule().
+
+
+check_active_tasks(DocsRead, DocsWritten) ->
+ RepTask = wait_for_task_status(),
+ ?assertNotEqual(timeout, RepTask),
+ ?assertEqual(DocsRead, couch_util:get_value(docs_read, RepTask)),
+ ?assertEqual(DocsWritten, couch_util:get_value(docs_written, RepTask)).
+
+
+replication_tasks() ->
+ lists:filter(fun(P) ->
+ couch_util:get_value(type, P) =:= replication
+ end, couch_task_status:all()).
+
+
+wait_for_task_status() ->
+ test_util:wait(fun() ->
+ case replication_tasks() of
+ [] -> wait;
+ [RepTask] -> RepTask
+ end
+ end).
+
+
+populate_db(DbName, DocCount) ->
+ {ok, Db} = couch_db:open_int(DbName, []),
+ Docs = lists:foldl(
+ fun(DocIdCounter, Acc) ->
+ Id = ?io2b(["doc", ?i2l(DocIdCounter)]),
+ Doc = #doc{id = Id, body = {[]}},
+ [Doc | Acc]
+ end,
+ [], lists:seq(1, DocCount)),
+ {ok, _} = couch_db:update_docs(Db, Docs, []),
+ ok = couch_db:close(Db).
+
+
+wait_target_in_sync(Source, Target) ->
+ {ok, SourceDb} = couch_db:open_int(Source, []),
+ {ok, SourceInfo} = couch_db:get_db_info(SourceDb),
+ ok = couch_db:close(SourceDb),
+ SourceDocCount = couch_util:get_value(doc_count, SourceInfo),
+ wait_target_in_sync_loop(SourceDocCount, Target, 300).
+
+
+wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
+ erlang:error({assertion_failed, [
+ {module, ?MODULE}, {line, ?LINE},
+ {reason, "Could not get source and target databases in sync"}
+ ]});
+
+wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
+ {ok, Target} = couch_db:open_int(TargetName, []),
+ {ok, TargetInfo} = couch_db:get_db_info(Target),
+ ok = couch_db:close(Target),
+ TargetDocCount = couch_util:get_value(doc_count, TargetInfo),
+ case TargetDocCount == DocCount of
+ true ->
+ true;
+ false ->
+ ok = timer:sleep(?DELAY),
+ wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1)
+ end.
+
+
+replicate(Source, Target) ->
+ SrcUrl = couch_replicator_test_helper:db_url(Source),
+ TgtUrl = couch_replicator_test_helper:db_url(Target),
+ RepObject = {[
+ {<<"source">>, SrcUrl},
+ {<<"target">>, TgtUrl},
+ {<<"continuous">>, true}
+ ]},
+ {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER),
+ ok = couch_replicator_scheduler:add_job(Rep),
+ couch_replicator_scheduler:reschedule(),
+ Pid = couch_replicator_test_helper:get_pid(Rep#rep.id),
+ {ok, Pid, Rep#rep.id}.
diff --git a/src/mango/Makefile b/src/mango/Makefile
index 1b2a50452..59f4a29c2 100644
--- a/src/mango/Makefile
+++ b/src/mango/Makefile
@@ -46,7 +46,7 @@ test:
# target: pip-install - Installs requires Python packages
pip-install:
pip install nose requests
- pip install hypothesis
+ pip install hypothesis==3.79.0
.PHONY: venv
diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl
index 174381e4a..b3a7f4080 100644
--- a/src/mango/src/mango_cursor_view.erl
+++ b/src/mango/src/mango_cursor_view.erl
@@ -229,6 +229,9 @@ view_cb({row, Row}, #mrargs{extra = Options} = Acc) ->
doc = couch_util:get_value(doc, Row)
},
case ViewRow#view_row.doc of
+ null ->
+ put(mango_docs_examined, get(mango_docs_examined) + 1),
+ maybe_send_mango_ping();
undefined ->
ViewRow2 = ViewRow#view_row{
value = couch_util:get_value(value, Row)
@@ -427,7 +430,10 @@ doc_member(Cursor, RowProps) ->
match_doc(Selector, Doc, ExecutionStats1);
Else ->
Else
- end
+ end;
+ null ->
+ ExecutionStats1 = mango_execution_stats:incr_docs_examined(ExecutionStats),
+ {no_match, null, {execution_stats, ExecutionStats1}}
end.
diff --git a/src/mango/src/mango_native_proc.erl b/src/mango/src/mango_native_proc.erl
index 6150e1d19..4c536f871 100644
--- a/src/mango/src/mango_native_proc.erl
+++ b/src/mango/src/mango_native_proc.erl
@@ -89,11 +89,11 @@ handle_call({prompt, [<<"add_fun">>, IndexInfo]}, _From, St) ->
handle_call({prompt, [<<"map_doc">>, Doc]}, _From, St) ->
{reply, map_doc(St, mango_json:to_binary(Doc)), St};
-handle_call({prompt, [<<"reduce">>, _, _]}, _From, St) ->
- {reply, null, St};
+handle_call({prompt, [<<"reduce">>, RedSrcs, _]}, _From, St) ->
+ {reply, [true, [null || _ <- RedSrcs]], St};
-handle_call({prompt, [<<"rereduce">>, _, _]}, _From, St) ->
- {reply, null, St};
+handle_call({prompt, [<<"rereduce">>, RedSrcs, _]}, _From, St) ->
+ {reply, [true, [null || _ <- RedSrcs]], St};
handle_call({prompt, [<<"index_doc">>, Doc]}, _From, St) ->
Vals = case index_doc(St, mango_json:to_binary(Doc)) of
diff --git a/src/mango/test/01-index-crud-test.py b/src/mango/test/01-index-crud-test.py
index cf5b91865..f57db39af 100644
--- a/src/mango/test/01-index-crud-test.py
+++ b/src/mango/test/01-index-crud-test.py
@@ -13,8 +13,24 @@
import random
import mango
+import copy
import unittest
+DOCS = [
+ {
+ "_id": "1",
+ "name": "Jimi",
+ "age": 10,
+ "cars": 1
+ },
+ {
+ "_id": "2",
+ "name": "kate",
+ "age": 8,
+ "cars": 0
+ }
+]
+
class IndexCrudTests(mango.DbPerClass):
def setUp(self):
self.db.recreate()
@@ -271,6 +287,25 @@ class IndexCrudTests(mango.DbPerClass):
except Exception as e:
self.assertEqual(e.response.status_code, 500)
+ def test_out_of_sync(self):
+ self.db.save_docs(copy.deepcopy(DOCS))
+ self.db.create_index(["age"], name="age")
+
+ selector = {
+ "age": {
+ "$gt": 0
+ },
+ }
+ docs = self.db.find(selector,
+ use_index="_design/a017b603a47036005de93034ff689bbbb6a873c4")
+ self.assertEqual(len(docs), 2)
+
+ self.db.delete_doc("1")
+
+ docs1 = self.db.find(selector, update="False",
+ use_index="_design/a017b603a47036005de93034ff689bbbb6a873c4")
+ self.assertEqual(len(docs1), 1)
+
@unittest.skipUnless(mango.has_text_service(), "requires text service")
class IndexCrudTextTests(mango.DbPerClass):
diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py
index bc12bbc68..59486c861 100644
--- a/src/mango/test/mango.py
+++ b/src/mango/test/mango.py
@@ -113,6 +113,12 @@ class Database(object):
r.raise_for_status()
return r.json()
+ def delete_doc(self, docid):
+ r = self.sess.get(self.path(docid))
+ r.raise_for_status()
+ original_rev = r.json()['_rev']
+ self.sess.delete(self.path(docid), params={"rev": original_rev})
+
def ddoc_info(self, ddocid):
r = self.sess.get(self.path([ddocid, "_info"]))
r.raise_for_status()
diff --git a/test/elixir/.gitignore b/test/elixir/.gitignore
new file mode 100644
index 000000000..2e39defe6
--- /dev/null
+++ b/test/elixir/.gitignore
@@ -0,0 +1,2 @@
+_build/
+deps/
diff --git a/test/elixir/Makefile b/test/elixir/Makefile
new file mode 100644
index 000000000..bfcf017d5
--- /dev/null
+++ b/test/elixir/Makefile
@@ -0,0 +1,2 @@
+all:
+ mix test --trace
diff --git a/test/elixir/README.md b/test/elixir/README.md
new file mode 100644
index 000000000..b1b745af3
--- /dev/null
+++ b/test/elixir/README.md
@@ -0,0 +1,104 @@
+# Elixir Test Suite
+
+Proof of concept porting the JS test suite to Elixir.
+
+Currently the basics.js suite has been partially ported over.
+
+To run the suite:
+
+```
+mix deps.get
+mix test --trace
+```
+
+# Tests to port
+
+X means done, - means partially
+
+ - [X] Port all_docs.js
+ - [ ] Port attachment_names.js
+ - [ ] Port attachment_paths.js
+ - [ ] Port attachment_ranges.js
+ - [ ] Port attachments.js
+ - [ ] Port attachments_multipart.js
+ - [ ] Port attachment_views.js
+ - [ ] Port auth_cache.js
+ - [X] Port basics.js
+ - [ ] Port batch_save.js
+ - [ ] Port bulk_docs.js
+ - [X] Port changes.js
+ - [ ] Port coffee.js
+ - [ ] Port compact.js
+ - [X] Port config.js
+ - [ ] Port conflicts.js
+ - [ ] Port cookie_auth.js
+ - [ ] Port copy_doc.js
+ - [ ] Port delayed_commits.js
+ - [ ] Port design_docs.js
+ - [ ] Port design_options.js
+ - [ ] Port design_paths.js
+ - [ ] Port erlang_views.js
+ - [ ] Port etags_head.js
+ - [ ] Port etags_views.js
+ - [ ] Port form_submit.js
+ - [ ] Port http.js
+ - [ ] Port invalid_docids.js
+ - [ ] Port jsonp.js
+ - [ ] Port large_docs.js
+ - [ ] Port list_views.js
+ - [ ] Port lorem_b64.txt
+ - [ ] Port lorem.txt
+ - [ ] Port lots_of_docs.js
+ - [ ] Port method_override.js
+ - [ ] Port multiple_rows.js
+ - [ ] Port proxyauth.js
+ - [ ] Port purge.js
+ - [ ] Port reader_acl.js
+ - [ ] Port recreate_doc.js
+ - [ ] Port reduce_builtin.js
+ - [ ] Port reduce_false.js
+ - [ ] Port reduce_false_temp.js
+ - [X] Port reduce.js
+ - [-] Port replication.js
+ - [ ] Port replicator_db_bad_rep_id.js
+ - [ ] Port replicator_db_by_doc_id.js
+ - [ ] Port replicator_db_compact_rep_db.js
+ - [ ] Port replicator_db_continuous.js
+ - [ ] Port replicator_db_credential_delegation.js
+ - [ ] Port replicator_db_field_validation.js
+ - [ ] Port replicator_db_filtered.js
+ - [ ] Port replicator_db_identical_continuous.js
+ - [ ] Port replicator_db_identical.js
+ - [ ] Port replicator_db_invalid_filter.js
+ - [ ] Port replicator_db_security.js
+ - [ ] Port replicator_db_simple.js
+ - [ ] Port replicator_db_successive.js
+ - [ ] Port replicator_db_survives.js
+ - [ ] Port replicator_db_swap_rep_db.js
+ - [ ] Port replicator_db_update_security.js
+ - [ ] Port replicator_db_user_ctx.js
+ - [ ] Port replicator_db_write_auth.js
+ - [ ] Port rev_stemming.js
+ - [X] Port rewrite.js
+ - [ ] Port rewrite_js.js
+ - [ ] Port security_validation.js
+ - [ ] Port show_documents.js
+ - [ ] Port stats.js
+ - [ ] Port update_documents.js
+ - [ ] Port users_db.js
+ - [ ] Port users_db_security.js
+ - [ ] Port utf8.js
+ - [X] Port uuids.js
+ - [X] Port view_collation.js
+ - [ ] Port view_collation_raw.js
+ - [ ] Port view_compaction.js
+ - [ ] Port view_conflicts.js
+ - [ ] Port view_errors.js
+ - [ ] Port view_include_docs.js
+ - [ ] Port view_multi_key_all_docs.js
+ - [ ] Port view_multi_key_design.js
+ - [ ] Port view_multi_key_temp.js
+ - [ ] Port view_offsets.js
+ - [ ] Port view_pagination.js
+ - [ ] Port view_sandboxing.js
+ - [ ] Port view_update_seq.js
diff --git a/test/elixir/config/config.exs b/test/elixir/config/config.exs
new file mode 100644
index 000000000..966ae83bb
--- /dev/null
+++ b/test/elixir/config/config.exs
@@ -0,0 +1,30 @@
+# This file is responsible for configuring your application
+# and its dependencies with the aid of the Mix.Config module.
+use Mix.Config
+
+# This configuration is loaded before any dependency and is restricted
+# to this project. If another project depends on this project, this
+# file won't be loaded nor affect the parent project. For this reason,
+# if you want to provide default values for your application for
+# 3rd-party users, it should be done in your "mix.exs" file.
+
+# You can configure your application as:
+#
+# config :foo, key: :value
+#
+# and access this configuration in your application as:
+#
+# Application.get_env(:foo, :key)
+#
+# You can also configure a 3rd-party app:
+#
+# config :logger, level: :info
+#
+
+# It is also possible to import configuration files, relative to this
+# directory. For example, you can emulate configuration per environment
+# by uncommenting the line below and defining dev.exs, test.exs and such.
+# Configuration from the imported file will override the ones defined
+# here (which is why it is important to import them last).
+#
+# import_config "#{Mix.env}.exs"
diff --git a/test/elixir/config/test.exs b/test/elixir/config/test.exs
new file mode 100644
index 000000000..4b28ea99b
--- /dev/null
+++ b/test/elixir/config/test.exs
@@ -0,0 +1,3 @@
+config :logger,
+ backends: [:console],
+ compile_time_purge_level: :debug
diff --git a/test/elixir/lib/couch.ex b/test/elixir/lib/couch.ex
new file mode 100644
index 000000000..6ae702eae
--- /dev/null
+++ b/test/elixir/lib/couch.ex
@@ -0,0 +1,171 @@
+defmodule Couch.Session do
+ @enforce_keys [:cookie]
+ defstruct [:cookie]
+
+ def new(cookie) do
+ %Couch.Session{cookie: cookie}
+ end
+
+ def logout(sess) do
+ headers = [
+ "Content-Type": "application/x-www-form-urlencoded",
+ "X-CouchDB-WWW-Authenticate": "Cookie",
+ "Cookie": sess.cookie
+ ]
+ Couch.delete!("/_session", headers: headers)
+ end
+
+ def get(sess, url, opts \\ []), do: go(sess, :get, url, opts)
+ def get!(sess, url, opts \\ []), do: go!(sess, :get, url, opts)
+ def put(sess, url, opts \\ []), do: go(sess, :put, url, opts)
+ def put!(sess, url, opts \\ []), do: go!(sess, :put, url, opts)
+ def post(sess, url, opts \\ []), do: go(sess, :post, url, opts)
+ def post!(sess, url, opts \\ []), do: go!(sess, :post, url, opts)
+ def delete(sess, url, opts \\ []), do: go(sess, :delete, url, opts)
+ def delete!(sess, url, opts \\ []), do: go!(sess, :delete, url, opts)
+
+ # Skipping head/patch/options for YAGNI. Feel free to add
+ # if the need arises.
+
+ def go(%Couch.Session{} = sess, method, url, opts) do
+ opts = Keyword.merge(opts, [cookie: sess.cookie])
+ Couch.request(method, url, opts)
+ end
+
+ def go!(%Couch.Session{} = sess, method, url, opts) do
+ opts = Keyword.merge(opts, [cookie: sess.cookie])
+ Couch.request!(method, url, opts)
+ end
+end
+
+
+defmodule Couch do
+ use HTTPotion.Base
+
+ @moduledoc """
+ CouchDB library to power test suite.
+ """
+
+ def process_url(url) do
+ "http://localhost:15984" <> url
+ end
+
+ def process_request_headers(headers, options) do
+ headers = Keyword.put(headers, :"User-Agent", "couch-potion")
+ headers = if headers[:"Content-Type"] do
+ headers
+ else
+ Keyword.put(headers, :"Content-Type", "application/json")
+ end
+ case Keyword.get options, :cookie do
+ nil ->
+ headers
+ cookie ->
+ Keyword.put headers, :"Cookie", cookie
+ end
+ end
+
+
+ def process_options(options) do
+
+ if Keyword.get(options, :cookie) == nil do
+ headers = Keyword.get(options, :headers, [])
+ if headers[:basic_auth] != nil or headers[:authorization] != nil do
+ options
+ else
+ Keyword.put(options, :basic_auth, {"adm", "pass"})
+ end
+ else
+ options
+ end
+ end
+
+ def process_request_body(body) do
+ if is_map(body) do
+ :jiffy.encode(body)
+ else
+ body
+ end
+ end
+
+ def process_response_body(headers, body) do
+ if String.match?(headers[:"Content-Type"], ~r/application\/json/) do
+ body |> IO.iodata_to_binary |> :jiffy.decode([:return_maps])
+ else
+ process_response_body(body)
+ end
+ end
+
+ def login(userinfo) do
+ [user, pass] = String.split(userinfo, ":", [parts: 2])
+ login(user, pass)
+ end
+
+ def login(user, pass) do
+ resp = Couch.post("/_session", body: %{:username => user, :password => pass})
+ true = resp.body["ok"]
+ cookie = resp.headers[:'set-cookie']
+ [token | _] = String.split(cookie, ";")
+ %Couch.Session{cookie: token}
+ end
+
+ # HACK: this is here until this commit lands in a release
+ # https://github.com/myfreeweb/httpotion/commit/f3fa2f0bc3b9b400573942b3ba4628b48bc3c614
+ def handle_response(response) do
+ case response do
+ { :ok, status_code, headers, body, _ } ->
+ processed_headers = process_response_headers(headers)
+ %HTTPotion.Response{
+ status_code: process_status_code(status_code),
+ headers: processed_headers,
+ body: process_response_body(processed_headers, body)
+ }
+ { :ok, status_code, headers, body } ->
+ processed_headers = process_response_headers(headers)
+ %HTTPotion.Response{
+ status_code: process_status_code(status_code),
+ headers: processed_headers,
+ body: process_response_body(processed_headers, body)
+ }
+ { :ibrowse_req_id, id } ->
+ %HTTPotion.AsyncResponse{ id: id }
+ { :error, { :conn_failed, { :error, reason }}} ->
+ %HTTPotion.ErrorResponse{ message: error_to_string(reason)}
+ { :error, :conn_failed } ->
+ %HTTPotion.ErrorResponse{ message: "conn_failed"}
+ { :error, reason } ->
+ %HTTPotion.ErrorResponse{ message: error_to_string(reason)}
+ end
+ end
+
+ # Anther HACK: Until we can get process_request_headers/2 merged
+ # upstream.
+ @spec process_arguments(atom, String.t, [{atom(), any()}]) :: %{}
+ defp process_arguments(method, url, options) do
+ options = process_options(options)
+
+ body = Keyword.get(options, :body, "")
+ headers = Keyword.merge Application.get_env(:httpotion, :default_headers, []), Keyword.get(options, :headers, [])
+ timeout = Keyword.get(options, :timeout, Application.get_env(:httpotion, :default_timeout, 5000))
+ ib_options = Keyword.merge Application.get_env(:httpotion, :default_ibrowse, []), Keyword.get(options, :ibrowse, [])
+ follow_redirects = Keyword.get(options, :follow_redirects, Application.get_env(:httpotion, :default_follow_redirects, false))
+
+ ib_options = if stream_to = Keyword.get(options, :stream_to), do: Keyword.put(ib_options, :stream_to, spawn(__MODULE__, :transformer, [stream_to, method, url, options])), else: ib_options
+ ib_options = if user_password = Keyword.get(options, :basic_auth) do
+ {user, password} = user_password
+ Keyword.put(ib_options, :basic_auth, { to_charlist(user), to_charlist(password) })
+ else
+ ib_options
+ end
+
+ %{
+ method: method,
+ url: url |> to_string |> process_url(options) |> to_charlist,
+ body: body |> process_request_body,
+ headers: headers |> process_request_headers(options) |> Enum.map(fn ({k, v}) -> { to_charlist(k), to_charlist(v) } end),
+ timeout: timeout,
+ ib_options: ib_options,
+ follow_redirects: follow_redirects
+ }
+ end
+end
diff --git a/test/elixir/mix.exs b/test/elixir/mix.exs
new file mode 100644
index 000000000..9b0f642dd
--- /dev/null
+++ b/test/elixir/mix.exs
@@ -0,0 +1,30 @@
+defmodule Foo.Mixfile do
+ use Mix.Project
+
+ def project do
+ [
+ app: :foo,
+ version: "0.1.0",
+ elixir: "~> 1.5",
+ start_permanent: Mix.env == :prod,
+ deps: deps()
+ ]
+ end
+
+ # Run "mix help compile.app" to learn about applications.
+ def application do
+ [
+ extra_applications: [:logger]
+ ]
+ end
+
+ # Run "mix help deps" to learn about dependencies.
+ defp deps do
+ [
+ # {:dep_from_hexpm, "~> 0.3.0"},
+ {:httpotion, "~> 3.0"},
+ {:jiffy, "~> 0.14.11"}
+ # {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"},
+ ]
+ end
+end
diff --git a/test/elixir/mix.lock b/test/elixir/mix.lock
new file mode 100644
index 000000000..0723e9474
--- /dev/null
+++ b/test/elixir/mix.lock
@@ -0,0 +1,3 @@
+%{"httpotion": {:hex, :httpotion, "3.0.3", "17096ea1a7c0b2df74509e9c15a82b670d66fc4d66e6ef584189f63a9759428d", [], [{:ibrowse, "~> 4.4", [hex: :ibrowse, repo: "hexpm", optional: false]}], "hexpm"},
+ "ibrowse": {:hex, :ibrowse, "4.4.0", "2d923325efe0d2cb09b9c6a047b2835a5eda69d8a47ed6ff8bc03628b764e991", [], [], "hexpm"},
+ "jiffy": {:hex, :jiffy, "0.14.11", "919a87d491c5a6b5e3bbc27fafedc3a0761ca0b4c405394f121f582fd4e3f0e5", [], [], "hexpm"}}
diff --git a/test/elixir/run b/test/elixir/run
new file mode 100755
index 000000000..66a5947b7
--- /dev/null
+++ b/test/elixir/run
@@ -0,0 +1,4 @@
+#!/bin/bash -e
+cd "$(dirname "$0")"
+mix deps.get
+mix test --trace
diff --git a/test/elixir/test/all_docs_test.exs b/test/elixir/test/all_docs_test.exs
new file mode 100644
index 000000000..2fd608dc6
--- /dev/null
+++ b/test/elixir/test/all_docs_test.exs
@@ -0,0 +1,132 @@
+defmodule AllDocsTest do
+ use CouchTestCase
+
+ @moduletag :all_docs
+
+ @moduledoc """
+ Test CouchDB _all_docs
+ This is a port of the all_docs.js suite
+ """
+
+ # TODO: do we need to bring this in?
+ # var db = new CouchDB(db_name, {"X-Couch-Full-Commit":"false"}, {w: 3});
+
+ @tag :with_db
+ test "All Docs tests", context do
+ db_name = context[:db_name]
+ resp1 = Couch.post("/#{db_name}", [body: %{:_id => "0", :a => 1, :b => 1}]).body
+ resp2 = Couch.post("/#{db_name}", [body: %{:_id => "3", :a => 4, :b => 16}]).body
+ resp3 = Couch.post("/#{db_name}", [body: %{:_id => "1", :a => 2, :b => 4}]).body
+ resp4 = Couch.post("/#{db_name}", [body: %{:_id => "2", :a => 3, :b => 9}]).body
+
+ assert resp1["ok"]
+ assert resp2["ok"]
+ assert resp3["ok"]
+ assert resp4["ok"]
+
+ revs = [resp1["rev"], resp2["rev"], resp3["rev"], resp4["rev"]]
+
+ # Check _all_docs
+ resp = Couch.get("/#{db_name}/_all_docs").body
+ rows = resp["rows"]
+ assert resp["total_rows"] == length(rows)
+ Enum.each(rows, fn row ->
+ assert row["id"] >= "0" && row["id"] <= "4"
+ end)
+
+ # Check _all_docs with descending=true
+ resp = Couch.get("/#{db_name}/_all_docs", query: %{:descending => true}).body
+ rows = resp["rows"]
+ assert resp["total_rows"] == length(rows)
+
+ # Check _all_docs offset
+ resp = Couch.get("/#{db_name}/_all_docs", query: %{:startkey => "\"2\""}).body
+ assert resp["offset"] == 2
+
+ # Confirm that queries may assume raw collation
+ resp = Couch.get("/#{db_name}/_all_docs", query: %{
+ :startkey => "\"org.couchdb.user:\"",
+ :endkey => "\"org.couchdb.user;\""
+ })
+ assert length(resp.body["rows"]) == 0
+
+ # Check that all docs show up in the changes feed; order can vary
+ resp = Couch.get("/#{db_name}/_changes").body
+ Enum.each(resp["results"], fn row ->
+ assert Enum.member?(revs, hd(row["changes"])["rev"]), "doc #{row["id"]} should be in changes"
+ end)
+
+ # Check that deletions also show up right
+ doc1 = Couch.get("/#{db_name}/1").body
+ assert Couch.delete("/#{db_name}/1", query: %{:rev => doc1["_rev"]}).body["ok"]
+ changes = Couch.get("/#{db_name}/_changes").body["results"]
+ assert length(changes) == 4
+ deleted = Enum.filter(changes, fn row -> row["deleted"] end)
+ assert length(deleted) == 1
+ assert hd(deleted)["id"] == "1"
+
+ # (remember old seq)
+ orig_doc = Enum.find(changes, fn row -> row["id"] == "3" end)
+ # Perform an update
+ doc3 = Couch.get("/#{db_name}/3").body
+ doc3 = Map.put(doc3, :updated, "totally")
+ assert Couch.put("/#{db_name}/3", body: doc3).body["ok"]
+
+ # The update should make doc id 3 have another seq num (not nec. higher or the last though)
+ changes = Couch.get("/#{db_name}/_changes").body["results"]
+ assert length(changes) == 4
+ updated_doc = Enum.find(changes, fn row -> row["id"] == "3" end)
+ assert orig_doc["seq"] != updated_doc["seq"], "seq num should be different"
+
+ # Ok, now let's see what happens with include docs
+ changes = Couch.get("/#{db_name}/_changes", query: %{:include_docs => true}).body["results"]
+ assert length(changes) == 4
+ updated_doc = Enum.find(changes, fn row -> row["id"] == doc3["_id"] end)
+ assert updated_doc["doc"]["updated"] == "totally"
+
+ deleted_doc = Enum.find(changes, fn row -> row["deleted"] end)
+ assert deleted_doc["doc"]["_deleted"]
+
+ # Test _all_docs with keys
+ rows = Couch.post("/#{db_name}/_all_docs", query: %{:include_docs => true}, body: %{:keys => ["1"]}).body["rows"]
+ row = hd(rows)
+ assert length(rows) == 1
+ assert row["key"] == "1"
+ assert row["id"] == "1"
+ assert row["value"]["deleted"]
+ assert row["doc"] == :null
+
+ # Add conflicts
+ conflicted_doc1 = %{:_id => "3", :_rev => "2-aa01552213fafa022e6167113ed01087", :value => "X"}
+ conflicted_doc2 = %{:_id => "3", :_rev => "2-ff01552213fafa022e6167113ed01087", :value => "Z"}
+ assert Couch.put("/#{db_name}/3", query: %{:new_edits => false}, body: conflicted_doc1).body["ok"]
+ assert Couch.put("/#{db_name}/3", query: %{:new_edits => false}, body: conflicted_doc2).body["ok"]
+
+ win_rev = Couch.get("/#{db_name}/3").body
+ changes = Couch.get("/#{db_name}/_changes", query: %{:include_docs => true, :conflicts => true, :style => "all_docs"}).body["results"]
+
+ doc3 = Enum.find(changes, fn row -> row["id"] == "3" end)
+ assert doc3["id"] == "3"
+ assert length(doc3["changes"]) == 3
+ assert win_rev["_rev"] == hd(doc3["changes"])["rev"]
+ assert is_list(doc3["doc"]["_conflicts"])
+ assert length(doc3["doc"]["_conflicts"]) == 2
+
+ rows = Couch.get("/#{db_name}/_all_docs", query: %{:include_docs => true, :conflicts => true}).body["rows"]
+ assert length(rows) == 3
+ change = hd(tl(tl(rows)))
+ assert change["key"] == "3"
+ assert change["id"] == "3"
+ assert change["value"]["rev"] == win_rev["_rev"]
+ assert change["doc"]["_rev"] == win_rev["_rev"]
+ assert change["doc"]["_id"] == "3"
+ assert is_list(change["doc"]["_conflicts"])
+ assert length(change["doc"]["_conflicts"]) == 2
+
+ # Test that _all_docs collates sanely
+ assert Couch.post("/#{db_name}", body: %{:_id => "Z", :foo => "Z"}).body["ok"]
+ assert Couch.post("/#{db_name}", body: %{:_id => "a", :foo => "a"}).body["ok"]
+ rows = Couch.get("/#{db_name}/_all_docs", query: %{:startkey => "\"Z\"", :endkey => "\"Z\""}).body["rows"]
+ assert length(rows) == 1
+ end
+end
diff --git a/test/elixir/test/basics_test.exs b/test/elixir/test/basics_test.exs
new file mode 100644
index 000000000..c71d05e6b
--- /dev/null
+++ b/test/elixir/test/basics_test.exs
@@ -0,0 +1,282 @@
+defmodule BasicsTest do
+ use CouchTestCase
+
+ @moduletag :basics
+
+ @moduledoc """
+ Test CouchDB basics.
+ This is a port of the basics.js suite
+ """
+
+ test "Session contains adm context" do
+ userCtx = Couch.get("/_session").body["userCtx"]
+ assert userCtx["name"] == "adm", "Should have adm user context"
+ assert userCtx["roles"] == ["_admin"], "Should have _admin role"
+ end
+
+ test "Welcome endpoint" do
+ assert Couch.get("/").body["couchdb"] == "Welcome", "Should say welcome"
+ end
+
+ @tag :with_db
+ test "PUT on existing DB should return 412 instead of 500", context do
+ db_name = context[:db_name]
+ resp = Couch.put("/#{db_name}")
+ assert resp.status_code == 412
+ refute resp.body["ok"]
+ end
+
+ @tag :with_db_name
+ test "Creating a new DB should return location header", context do
+ db_name = context[:db_name]
+ {:ok, resp} = create_db(db_name)
+ msg = "Should return Location header for new db"
+ assert String.ends_with?(resp.headers["location"], db_name), msg
+ {:ok, _} = delete_db(db_name)
+ end
+
+ @tag :with_db_name
+ test "Creating a new DB with slashes should return Location header (COUCHDB-411)", context do
+ db_name = context[:db_name] <> "%2Fwith_slashes"
+ {:ok, resp} = create_db(db_name)
+ msg = "Should return Location header for new db"
+ assert String.ends_with?(resp.headers["location"], db_name), msg
+ {:ok, _} = delete_db(db_name)
+ end
+
+ @tag :with_db
+ test "Created database has appropriate db info name", context do
+ db_name = context[:db_name]
+ assert Couch.get("/#{db_name}").body["db_name"] == db_name, "Get correct database name"
+ end
+
+ @tag :with_db
+ test "Database should be in _all_dbs", context do
+ assert context[:db_name] in Couch.get("/_all_dbs").body, "Db name in _all_dbs"
+ end
+
+ @tag :with_db
+ test "Empty database should have zero docs", context do
+ assert Couch.get("/#{context[:db_name]}").body["doc_count"] == 0, "Empty doc count in empty db"
+ end
+
+ @tag :with_db
+ test "Create a document and save it to the database", context do
+ resp = Couch.post("/#{context[:db_name]}", [body: %{:_id => "0", :a => 1, :b => 1}])
+ assert resp.status_code == 201, "Should be 201 created"
+ assert resp.body["id"], "Id should be present"
+ assert resp.body["rev"], "Rev should be present"
+
+ resp2 = Couch.get("/#{context[:db_name]}/#{resp.body["id"]}")
+ assert resp2.body["_id"] == resp.body["id"], "Ids should match"
+ assert resp2.body["_rev"] == resp.body["rev"], "Revs should match"
+ end
+
+ @tag :with_db
+ test "Revs info status is good", context do
+ db_name = context[:db_name]
+ {:ok, _} = create_doc(db_name, sample_doc_foo())
+ resp = Couch.get("/#{db_name}/foo", [query: %{:revs_info => true}])
+ assert hd(resp.body["_revs_info"])["status"] == "available", "Revs info is available"
+ end
+
+ @tag :with_db
+ test "Make sure you can do a seq=true option", context do
+ db_name = context[:db_name]
+ {:ok, _} = create_doc(db_name, sample_doc_foo())
+ resp = Couch.get("/#{db_name}/foo", [query: %{:local_seq => true}])
+ assert resp.body["_local_seq"] == 1, "Local seq value == 1"
+ end
+
+ @tag :with_db
+ test "Can create several documents", context do
+ db_name = context[:db_name]
+ assert Couch.post("/#{db_name}", [body: %{:_id => "1", :a => 2, :b => 4}]).body["ok"]
+ assert Couch.post("/#{db_name}", [body: %{:_id => "2", :a => 3, :b => 9}]).body["ok"]
+ assert Couch.post("/#{db_name}", [body: %{:_id => "3", :a => 4, :b => 16}]).body["ok"]
+ retry_until(fn ->
+ Couch.get("/#{db_name}").body["doc_count"] == 3
+ end)
+ end
+
+ @tag :pending
+ @tag :with_db
+ test "Regression test for COUCHDB-954", context do
+ db_name = context[:db_name]
+ doc = %{:_id => "COUCHDB-954", :a => 1}
+
+ resp1 = Couch.post("/#{db_name}", [body: doc])
+ assert resp1.body["ok"]
+ old_rev = resp1.body["rev"]
+
+ doc = Map.put(doc, :_rev, old_rev)
+ resp2 = Couch.post("/#{db_name}", [body: doc])
+ assert resp2.body["ok"]
+ _new_rev = resp2.body["rev"]
+
+ # TODO: enable chunked encoding
+ #resp3 = Couch.get("/#{db_name}/COUCHDB-954", [query: %{:open_revs => "[#{old_rev}, #{new_rev}]"}])
+ #assert length(resp3.body) == 2, "Should get two revisions back"
+ #resp3 = Couch.get("/#{db_name}/COUCHDB-954", [query: %{:open_revs => "[#{old_rev}]", :latest => true}])
+ #assert resp3.body["_rev"] == new_rev
+ end
+
+ @tag :with_db
+ test "Simple map functions", context do
+ db_name = context[:db_name]
+ map_fun = "function(doc) { if (doc.a==4) { emit(null, doc.b); } }"
+ red_fun = "function(keys, values) { return sum(values); }"
+ map_doc = %{:views => %{:baz => %{:map => map_fun}}}
+ red_doc = %{:views => %{:baz => %{:map => map_fun, :reduce => red_fun}}}
+
+ # Bootstrap database and ddoc
+ assert Couch.post("/#{db_name}", [body: %{:_id => "0", :a => 1, :b => 1}]).body["ok"]
+ assert Couch.post("/#{db_name}", [body: %{:_id => "1", :a => 2, :b => 4}]).body["ok"]
+ assert Couch.post("/#{db_name}", [body: %{:_id => "2", :a => 3, :b => 9}]).body["ok"]
+ assert Couch.post("/#{db_name}", [body: %{:_id => "3", :a => 4, :b => 16}]).body["ok"]
+ assert Couch.put("/#{db_name}/_design/foo", [body: map_doc]).body["ok"]
+ assert Couch.put("/#{db_name}/_design/bar", [body: red_doc]).body["ok"]
+ assert Couch.get("/#{db_name}").body["doc_count"] == 6
+
+ # Initial view query test
+ resp = Couch.get("/#{db_name}/_design/foo/_view/baz")
+ assert resp.body["total_rows"] == 1
+ assert hd(resp.body["rows"])["value"] == 16
+
+ # Modified doc and test for updated view results
+ doc0 = Couch.get("/#{db_name}/0").body
+ doc0 = Map.put(doc0, :a, 4)
+ assert Couch.put("/#{db_name}/0", [body: doc0]).body["ok"]
+ retry_until(fn ->
+ Couch.get("/#{db_name}/_design/foo/_view/baz").body["total_rows"] == 2
+ end)
+
+ # Write 2 more docs and test for updated view results
+ assert Couch.post("/#{db_name}", [body: %{:a => 3, :b => 9}]).body["ok"]
+ assert Couch.post("/#{db_name}", [body: %{:a => 4, :b => 16}]).body["ok"]
+ retry_until(fn ->
+ Couch.get("/#{db_name}/_design/foo/_view/baz").body["total_rows"] == 3
+ end)
+ assert Couch.get("/#{db_name}").body["doc_count"] == 8
+
+ # Test reduce function
+ resp = Couch.get("/#{db_name}/_design/bar/_view/baz")
+ assert hd(resp.body["rows"])["value"] == 33
+
+ # Delete doc and test for updated view results
+ doc0 = Couch.get("/#{db_name}/0").body
+ assert Couch.delete("/#{db_name}/0?rev=#{doc0["_rev"]}").body["ok"]
+ retry_until(fn ->
+ Couch.get("/#{db_name}/_design/foo/_view/baz").body["total_rows"] == 2
+ end)
+ assert Couch.get("/#{db_name}").body["doc_count"] == 7
+ assert Couch.get("/#{db_name}/0").status_code == 404
+ refute Couch.get("/#{db_name}/0?rev=#{doc0["_rev"]}").status_code == 404
+ end
+
+ @tag :with_db
+ test "POST doc response has a Location header", context do
+ db_name = context[:db_name]
+ resp = Couch.post("/#{db_name}", [body: %{:foo => :bar}])
+ assert resp.body["ok"]
+ loc = resp.headers["Location"]
+ assert loc, "should have a Location header"
+ locs = Enum.reverse(String.split(loc, "/"))
+ assert hd(locs) == resp.body["id"]
+ assert hd(tl(locs)) == db_name
+ end
+
+ @tag :with_db
+ test "POST doc with an _id field isn't overwritten by uuid", context do
+ db_name = context[:db_name]
+ resp = Couch.post("/#{db_name}", [body: %{:_id => "oppossum", :yar => "matey"}])
+ assert resp.body["ok"]
+ assert resp.body["id"] == "oppossum"
+ assert Couch.get("/#{db_name}/oppossum").body["yar"] == "matey"
+ end
+
+ @tag :pending
+ @tag :with_db
+ test "PUT doc has a Location header", context do
+ db_name = context[:db_name]
+ resp = Couch.put("/#{db_name}/newdoc", [body: %{:a => 1}])
+ assert String.ends_with?(resp.headers["location"], "/#{db_name}/newdoc")
+ # TODO: make protocol check use defined protocol value
+ assert String.starts_with?(resp.headers["location"], "http")
+ end
+
+ @tag :with_db
+ test "DELETE'ing a non-existent doc should 404", context do
+ db_name = context[:db_name]
+ assert Couch.delete("/#{db_name}/doc-does-not-exist").status_code == 404
+ end
+
+ @tag :with_db
+ test "Check for invalid document members", context do
+ db_name = context[:db_name]
+ bad_docs = [
+ {:goldfish, %{:_zing => 4}},
+ {:zebrafish, %{:_zoom => "hello"}},
+ {:mudfish, %{:zane => "goldfish", :_fan => "something smells delicious"}},
+ {:tastyfish, %{:_bing => %{"wha?" => "soda can"}}}
+ ]
+
+ Enum.each(bad_docs, fn {id, doc} ->
+ resp = Couch.put("/#{db_name}/#{id}", [body: doc])
+ assert resp.status_code == 400
+ assert resp.body["error"] == "doc_validation"
+
+ resp = Couch.post("/#{db_name}", [body: doc])
+ assert resp.status_code == 400
+ assert resp.body["error"] == "doc_validation"
+ end)
+ end
+
+ @tag :with_db
+ test "PUT error when body not an object", context do
+ db_name = context[:db_name]
+ resp = Couch.put("/#{db_name}/bar", [body: "[]"])
+ assert resp.status_code == 400
+ assert resp.body["error"] == "bad_request"
+ assert resp.body["reason"] == "Document must be a JSON object"
+ end
+
+ @tag :with_db
+ test "_bulk_docs POST error when body not an object", context do
+ db_name = context[:db_name]
+ resp = Couch.post("/#{db_name}/_bulk_docs", [body: "[]"])
+ assert resp.status_code == 400
+ assert resp.body["error"] == "bad_request"
+ assert resp.body["reason"] == "Request body must be a JSON object"
+ end
+
+ @tag :with_db
+ test "_all_docs POST error when multi-get is not a {'key': [...]} structure", context do
+ db_name = context[:db_name]
+ resp = Couch.post("/#{db_name}/_all_docs", [body: "[]"])
+ assert resp.status_code == 400
+ assert resp.body["error"] == "bad_request"
+ assert resp.body["reason"] == "Request body must be a JSON object"
+
+ resp = Couch.post("/#{db_name}/_all_docs", [body: %{:keys => 1}])
+ assert resp.status_code == 400
+ assert resp.body["error"] == "bad_request"
+ assert resp.body["reason"] == "`keys` body member must be an array."
+ end
+
+ @tag :with_db
+ test "oops, the doc id got lost in code nirwana", context do
+ db_name = context[:db_name]
+ resp = Couch.delete("/#{db_name}/?rev=foobarbaz")
+ assert resp.status_code == 400, "should return a bad request"
+ assert resp.body["error"] == "bad_request"
+ assert resp.body["reason"] == "You tried to DELETE a database with a ?=rev parameter. Did you mean to DELETE a document instead?"
+ end
+
+ @tag :pending
+ @tag :with_db
+ test "On restart, a request for creating an already existing db can not override", _context do
+ # TODO
+ assert true
+ end
+end
diff --git a/test/elixir/test/batch_save_test.exs b/test/elixir/test/batch_save_test.exs
new file mode 100644
index 000000000..4863a8d03
--- /dev/null
+++ b/test/elixir/test/batch_save_test.exs
@@ -0,0 +1,42 @@
+defmodule BatchSaveTest do
+ use CouchTestCase
+
+ @moduletag :batch_save
+
+ @moduledoc """
+ Test CouchDB batch save
+ This is a port of batch_save.js
+ """
+
+ @doc_count 100
+
+ @tag :with_db
+ test "batch put", context do
+ path_fun = &("/#{&1}/#{&2}")
+ run(&Couch.put/2, path_fun, context[:db_name], @doc_count)
+ end
+
+ @tag :with_db
+ test "batch post", context do
+ path_fun = fn(db_name, _) -> "/#{db_name}" end
+ run(&Couch.post/2, path_fun, context[:db_name], @doc_count)
+ end
+
+ @tag :with_db
+ test "batch put with identical doc ids", context do
+ path_fun = fn(db_name, _) -> "/#{db_name}/foo" end
+ run(&Couch.put/2, path_fun, context[:db_name], 1)
+ end
+
+ defp run(req_fun, path_fun, db_name, expected_doc_count) do
+ for i <- 1..@doc_count do
+ opts = [body: %{a: i, b: i}, query: %{batch: "ok"}]
+ resp = req_fun.(path_fun.(db_name, i), opts)
+ assert resp.body["ok"] and resp.status_code == 202
+ end
+ retry_until(fn ->
+ Couch.get("/#{db_name}").body["doc_count"] == expected_doc_count
+ end)
+ end
+
+end
diff --git a/test/elixir/test/bulk_docs_test.exs b/test/elixir/test/bulk_docs_test.exs
new file mode 100644
index 000000000..f6676a6b8
--- /dev/null
+++ b/test/elixir/test/bulk_docs_test.exs
@@ -0,0 +1,152 @@
+defmodule BulkDocsTest do
+ use CouchTestCase
+
+ @moduletag :bulk_docs
+
+ @moduledoc """
+ Test CouchDB bulk docs
+ This is a port of bulk_docs.js
+ """
+
+ @doc_range 1..5
+
+ @tag :with_db
+ test "bulk docs can create, update, & delete many docs per request", ctx do
+ db = ctx[:db_name]
+ docs = create_docs(@doc_range)
+ resp = bulk_post(docs, db)
+ assert revs_start_with(resp.body, "1-")
+ docs = rev(docs, resp.body)
+ # Modify each doc's `string` field and re-post
+ docs = Enum.map(docs, fn(doc = %{string: string}) ->
+ %{doc | string: string <> ".00"}
+ end)
+ resp = bulk_post(docs, db)
+ assert revs_start_with(resp.body, "2-")
+ docs = rev(docs, resp.body)
+ # Confirm changes were applied for each doc
+ assert Enum.all?(docs, fn(doc) ->
+ String.ends_with?(
+ Couch.get("/#{db}/#{doc._id}").body["string"], ".00")
+ end)
+ docs = Enum.map(docs, &Map.put(&1, :_deleted, true))
+ resp = bulk_post(docs, db)
+ assert revs_start_with(resp.body, "3-")
+ # Confirm docs were deleted
+ assert Enum.all?(docs, fn(doc) ->
+ resp = Couch.get("/#{db}/#{doc._id}")
+ assert resp.status_code == 404
+ assert resp.body["error"] == "not_found"
+ assert resp.body["reason"] == "deleted"
+ end)
+ end
+
+ @tag :with_db
+ test "bulk docs can detect conflicts", ctx do
+ db = ctx[:db_name]
+ docs = create_docs(@doc_range)
+ resp = bulk_post(docs, db)
+ assert revs_start_with(resp.body, "1-")
+ docs = rev(docs, resp.body)
+ # Update just the first doc to create a conflict in subsequent bulk update
+ doc = hd(docs)
+ resp = Couch.put("/#{db}/#{doc._id}", [body: doc])
+ assert resp.status_code == 201
+ # Attempt to delete all docs
+ docs = Enum.map(docs, fn(doc) -> Map.put(doc, :_deleted, true) end)
+ resp = bulk_post(docs, db)
+ # Confirm first doc not updated, and result has no rev field
+ res = hd(resp.body)
+ assert res["id"] == "1" and res["error"] == "conflict"
+ assert Map.get(res, "rev") == nil
+ # Confirm other docs updated normally
+ assert revs_start_with(tl(resp.body), "2-")
+ end
+
+ @tag :with_db
+ test "bulk docs supplies `id` if not provided in doc", ctx do
+ docs = [%{foo: "bar"}]
+ res = hd(bulk_post(docs, ctx[:db_name]).body)
+ assert res["id"]
+ assert res["rev"]
+ end
+
+ @tag :with_db
+ test "bulk docs raises error for `all_or_nothing` option", ctx do
+ opts = [body: %{docs: create_docs(@doc_range), all_or_nothing: true}]
+ resp = Couch.post("/#{ctx[:db_name]}/_bulk_docs", opts)
+ assert resp.status_code == 417
+ assert Enum.all?(resp.body, &(Map.get(&1, "error") == "not_implemented"))
+ expected_reason = "all_or_nothing is not supported"
+ assert Enum.all?(resp.body, &(Map.get(&1, "reason") == expected_reason))
+ end
+
+ @tag :with_db
+ test "bulk docs raises conflict error for combined update & delete", ctx do
+ db = ctx[:db_name]
+ doc = %{_id: "id", val: "val"}
+ resp = Couch.put("/#{db}/#{doc._id}", [body: doc])
+ doc = rev(doc, resp.body)
+ update = %{doc | val: "newval"}
+ delete = Map.put(doc, :_deleted, true)
+ body = bulk_post([update, delete], db).body
+ assert Enum.count(body, &(Map.get(&1, "error") == "conflict")) == 1
+ assert Enum.count(body, &(Map.get(&1, "rev"))) == 1
+ end
+
+ @tag :with_db
+ test "bulk docs raises error for missing `docs` parameter", ctx do
+ docs = [%{foo: "bar"}]
+ resp = Couch.post("/#{ctx[:db_name]}/_bulk_docs", [body: %{doc: docs}])
+ assert_bad_request(resp, "POST body must include `docs` parameter.")
+ end
+
+ @tag :with_db
+ test "bulk docs raises error for invlaid `docs` parameter", ctx do
+ resp = Couch.post(
+ "/#{ctx[:db_name]}/_bulk_docs", [body: %{docs: "foo"}])
+ assert_bad_request(resp, "`docs` parameter must be an array.")
+ end
+
+ @tag :with_db
+ test "bulk docs raises error for invlaid `new_edits` parameter", ctx do
+ opts = [body: %{docs: [], new_edits: 0}]
+ resp = Couch.post("/#{ctx[:db_name]}/_bulk_docs", opts)
+ assert_bad_request(resp, "`new_edits` parameter must be a boolean.")
+ end
+
+ @tag :with_db
+ test "bulk docs emits conflict error for duplicate doc `_id`s", ctx do
+ docs = [
+ %{_id: "0", a: 0},
+ %{_id: "1", a: 1},
+ %{_id: "1", a: 2},
+ %{_id: "3", a: 3}]
+ rows = bulk_post(docs, ctx[:db_name]).body
+ assert Enum.at(rows, 1)["id"] == "1"
+ assert Enum.at(rows, 1)["ok"]
+ assert Enum.at(rows, 2)["error"] == "conflict"
+ end
+
+
+ defp bulk_post(docs, db) do
+ resp = Couch.post("/#{db}/_bulk_docs", [body: %{docs: docs}])
+ assert resp.status_code == 201 and length(resp.body) == length(docs),
+ """
+ Expected 201 and the same number of response rows as in request, but got
+ #{pretty_inspect resp}
+ """
+ resp
+ end
+
+ defp revs_start_with(rows, prefix) do
+ Enum.all?(rows, fn(%{"rev" => rev}) -> String.starts_with?(rev, prefix) end)
+ end
+
+ defp assert_bad_request(resp, reason) do
+ assert resp.status_code == 400
+ assert resp.body["error"] == "bad_request"
+ assert resp.body["reason"] == reason
+ end
+
+end
diff --git a/test/elixir/test/compact_test.exs b/test/elixir/test/compact_test.exs
new file mode 100644
index 000000000..8ee20fa61
--- /dev/null
+++ b/test/elixir/test/compact_test.exs
@@ -0,0 +1,83 @@
+defmodule CompactTest do
+ use CouchTestCase
+
+ @moduletag :compact
+
+ @moduledoc """
+ Test CouchDB compaction
+ This is a port of compact.js
+ """
+
+ @att_doc_id "att_doc"
+ @att_name "foo.txt"
+ @att_plaintext "This is plain text"
+
+ @tag :with_db
+ test "compaction reduces size of deleted docs", context do
+ db = context[:db_name]
+ docs = populate(db)
+ info = get_info(db)
+ orig_data_size = info["data_size"]
+ orig_disk_size = info["disk_size"]
+ start_time = info["instance_start_time"]
+ assert is_integer(orig_data_size) and is_integer(orig_disk_size)
+ assert orig_data_size < orig_disk_size
+
+ delete(db, docs)
+ deleted_data_size = get_info(db)["data_size"]
+ assert deleted_data_size > orig_data_size
+
+ compact(db)
+ assert get_info(db)["instance_start_time"] == start_time
+ assert_attachment_available(db)
+ info = get_info(db)
+ final_data_size = info["data_size"]
+ final_disk_size = info["disk_size"]
+ assert final_data_size < final_disk_size
+ assert is_integer(final_data_size) and is_integer(final_disk_size)
+ assert final_data_size < deleted_data_size
+ end
+
+
+ defp assert_attachment_available(db) do
+ resp = Couch.get("/#{db}/#{@att_doc_id}/#{@att_name}")
+ assert resp.body == @att_plaintext
+ assert resp.headers["content-type"] == "text/plain"
+ assert Couch.get("/#{db}").body["doc_count"] == 1
+ end
+
+ defp populate(db) do
+ docs = create_docs(0..19)
+ resp = Couch.post("/#{db}/_bulk_docs", [body: %{docs: docs}])
+ assert resp.status_code == 201
+ docs = rev(docs, resp.body)
+ doc = %{
+ _id: "#{@att_doc_id}",
+ _attachments: %{
+ "#{@att_name}": %{
+ content_type: "text/plain",
+ data: Base.encode64(@att_plaintext)}}}
+ resp = Couch.put("/#{db}/#{doc._id}", [body: doc])
+ assert resp.status_code == 201
+ docs
+ end
+
+ defp delete(db, docs) do
+ docs = Enum.map(docs, &Map.put(&1, :_deleted, true))
+ resp = Couch.post("/#{db}/_bulk_docs", [body: %{docs: docs}])
+ assert resp.status_code == 201
+ assert Couch.post("/#{db}/_ensure_full_commit").body["ok"] == true
+ end
+
+ defp compact(db) do
+ assert Couch.post("/#{db}/_compact").status_code == 202
+ retry_until(fn() ->
+ Couch.get("/#{db}").body["compact_running"] == false
+ end, 200, 20_000)
+ end
+
+ defp get_info(db) do
+ Couch.get("/#{db}").body
+ end
+
+end
diff --git a/test/elixir/test/config_test.exs b/test/elixir/test/config_test.exs
new file mode 100644
index 000000000..1931a21f1
--- /dev/null
+++ b/test/elixir/test/config_test.exs
@@ -0,0 +1,149 @@
+defmodule ConfigTest do
+ use CouchTestCase
+
+ @moduletag :config
+
+ @moduledoc """
+ Test CouchDB config API
+ This is a port of the config.js suite
+ """
+
+ setup do
+ # TODO: switch this to _local when that's landed
+ config_url = "/_node/node1@127.0.0.1/_config"
+ resp = Couch.get(config_url)
+ assert resp.status_code == 200
+ {:ok, config: resp.body, config_url: config_url}
+ end
+
+ def set_config(context, section, key, val) do
+ set_config(context, section, key, val, 200)
+ end
+
+ def set_config(context, section, key, val, status_assert) do
+ url = "#{context[:config_url]}/#{section}/#{key}"
+ headers = ["X-Couch-Persist": "false"]
+ resp = Couch.put(url, headers: headers, body: :jiffy.encode(val))
+ if status_assert do
+ assert resp.status_code == status_assert
+ end
+ resp.body
+ end
+
+ def get_config(context, section) do
+ get_config(context, section, nil, 200)
+ end
+
+ def get_config(context, section, key) do
+ get_config(context, section, key, 200)
+ end
+
+ def get_config(context, section, key, status_assert) do
+ url = if key do
+ "#{context[:config_url]}/#{section}/#{key}"
+ else
+ "#{context[:config_url]}/#{section}"
+ end
+ resp = Couch.get(url)
+ if status_assert do
+ assert resp.status_code == status_assert
+ end
+ resp.body
+ end
+
+ def delete_config(context, section, key) do
+ delete_config(context, section, key, 200)
+ end
+
+ def delete_config(context, section, key, status_assert) do
+ url = "#{context[:config_url]}/#{section}/#{key}"
+ resp = Couch.delete(url)
+ if status_assert do
+ assert resp.status_code == status_assert
+ end
+ end
+
+ # TODO: port sever_port tests from config.js
+ @tag :pending
+ test "CouchDB respects configured protocols"
+
+ test "Standard config options are present", context do
+ assert context[:config]["couchdb"]["database_dir"]
+ assert context[:config]["daemons"]["httpd"]
+ assert context[:config]["httpd_global_handlers"]["_config"]
+ assert context[:config]["log"]["level"]
+ assert context[:config]["query_servers"]["javascript"]
+ end
+
+ test "Settings can be altered with undefined whitelist allowing any change", context do
+ refute context["config"]["httpd"]["config_whitelist"], "Default whitelist is empty"
+ set_config(context, "test", "foo", "bar")
+ assert get_config(context, "test")["foo"] == "bar"
+ assert get_config(context, "test", "foo") == "bar"
+ end
+
+ test "Server-side password hashing, and raw updates disabling that", context do
+ plain_pass = "s3cret"
+ set_config(context, "admins", "administrator", plain_pass)
+ assert Couch.login("administrator", plain_pass)
+ hash_pass = get_config(context, "admins", "administrator")
+ assert Regex.match?(~r/^-pbkdf2-/, hash_pass) or Regex.match?(~r/^-hashed-/, hash_pass)
+ delete_config(context, "admins", "administrator")
+ assert Couch.delete("/_session").body["ok"]
+ end
+
+ @tag :pending
+ test "PORT `BUGGED` ?raw tests from config.js"
+
+ test "Non-term whitelist values allow further modification of the whitelist", context do
+ val = "!This is an invalid Erlang term!"
+ set_config(context, "httpd", "config_whitelist", val)
+ assert val == get_config(context, "httpd", "config_whitelist")
+ delete_config(context, "httpd", "config_whitelist")
+ end
+
+ test "Non-list whitelist values allow further modification of the whitelist", context do
+ val = "{[yes, a_valid_erlang_term, but_unfortunately, not_a_list]}"
+ set_config(context, "httpd", "config_whitelist", val)
+ assert val == get_config(context, "httpd", "config_whitelist")
+ delete_config(context, "httpd", "config_whitelist")
+ end
+
+ test "Keys not in the whitelist may not be modified", context do
+ val = "[{httpd,config_whitelist}, {test,foo}]"
+ set_config(context, "httpd", "config_whitelist", val)
+ assert val == get_config(context, "httpd", "config_whitelist")
+ set_config(context, "test", "foo", "PUT to whitelisted config variable")
+ delete_config(context, "test", "foo")
+ end
+
+ test "Non-2-tuples in the whitelist are ignored", context do
+ val = "[{httpd,config_whitelist}, these, {are}, {nOt, 2, tuples}, [so], [they, will], [all, become, noops], {test,foo}]"
+ set_config(context, "httpd", "config_whitelist", val)
+ assert val == get_config(context, "httpd", "config_whitelist")
+ set_config(context, "test", "foo", "PUT to whitelisted config variable")
+ delete_config(context, "test", "foo")
+ end
+
+ test "Atoms, binaries, and strings suffice as whitelist sections and keys.", context do
+ vals = ["{test,foo}", "{\"test\",\"foo\"}", "{<<\"test\">>,<<\"foo\">>}"]
+ Enum.each(vals, fn pair ->
+ set_config(context, "httpd", "config_whitelist", "[{httpd,config_whitelist}, #{pair}")
+ pair_format = case String.at(pair, 1) do
+ "t" -> "tuple"
+ "\"" -> "string"
+ "<" -> "binary"
+ end
+ set_config(context, "test", "foo", "PUT with #{pair_format}")
+ delete_config(context, "test", "foo")
+ end)
+ delete_config(context, "httpd", "config_whitelist")
+ end
+
+ test "Blacklist is functional", context do
+ sections = ["daemons", "external", "httpd_design_handlers", "httpd_db_handlers", "native_query_servers", "os_daemons", "query_servers"]
+ Enum.each(sections, fn section ->
+ set_config(context, section, "wohali", "rules", 403)
+ end)
+ end
+end
diff --git a/test/elixir/test/conflicts_test.exs b/test/elixir/test/conflicts_test.exs
new file mode 100644
index 000000000..a27bd6fb6
--- /dev/null
+++ b/test/elixir/test/conflicts_test.exs
@@ -0,0 +1,102 @@
+defmodule RevisionTest do
+ use CouchTestCase
+
+ @moduletag :conflicts
+
+ @moduledoc """
+ Test CouchDB conflicts
+ This is a port of conflicts.js
+ (but is arguably more focused on revisions than conflicts)
+ """
+
+ setup context do
+ # Generate a doc with _rev field for each test
+ doc = %{_id: "doc-1", a: 1, b: 1}
+ doc = rev(doc, put(context[:db_name], doc))
+ %{doc: doc}
+ end
+
+ @tag :with_db
+ test "multiple updates with same _rev raise conflict errors", context do
+ db = context[:db_name]
+ doc = context[:doc]
+ doc2 = %{doc | a: 2, b: 2} # doc and doc2 have same _rev
+ _doc = rev(doc, put(db, doc)) # doc updated with new _rev
+ assert_conflict Couch.put("/#{db}/#{doc2._id}", [body: doc2])
+
+ resp = Couch.get("/#{db}/_changes")
+ assert length(resp.body["results"]) == 1
+
+ doc2 = Map.delete(doc2, :_rev)
+ assert_conflict(Couch.put("/#{db}/#{doc2._id}", [body: doc2]))
+ end
+
+ @tag :with_db
+ test "mismatched rev in body and query string returns error", context do
+ db = context[:db_name]
+ doc = context[:doc]
+ resp = Couch.put("/#{db}/#{doc._id}?rev=1-foobar", [body: doc])
+ expected_reason = "Document rev from request body and query string " <>
+ "have different values"
+ assert_bad_request(resp, expected_reason)
+ end
+
+ @tag :with_db
+ test "mismatched rev in body and etag returns error", context do
+ opts = [body: context[:doc], headers: [{:"If-Match", "1-foobar"}]]
+ resp = Couch.put("/#{context[:db_name]}/foobar", opts)
+ expected_reason = "Document rev and etag have different values"
+ assert_bad_request(resp, expected_reason)
+ end
+
+ @tag :with_db
+ test "`new_edits: false` prevents bulk updates (COUCHDB-1178)", context do
+ db = context[:db_name]
+
+ ddoc = %{_id: "_design/couchdb-1178", validate_doc_update: "function(){}"}
+ assert put(db, ddoc)["ok"] == true
+
+ r0 = %{_id: "doc", val: "r0"}
+ r1 = %{_id: "doc", val: "r1", _rev: "1-47f3268e7546965196b57572099f4372"}
+ r2 = %{_id: "doc", val: "r2", _rev: "2-1d8171ab3a91475cfece749291e6f897"}
+ r3 = %{_id: "doc", val: "r3", _rev: "3-3fb0a342d2ce092fdcc77856dbe8a2ef"}
+ assert put(db, r0)["ok"] == true
+ assert put(db, r1)["ok"] == true
+ assert put(db, r2)["ok"] == true
+ # N.b. that we *do not* put r3
+
+ expected = %{
+ "_id" => "doc",
+ "_rev" => r3._rev,
+ "_revisions" => %{
+ "ids" => (for r <- [r3._rev, r2._rev, r1._rev], do: suffix(r)),
+ "start" => 3},
+ "val" => r2.val}
+ assert Couch.get("/#{db}/doc?revs=true").body == expected
+
+ opts = [body: %{docs: [r3, r2, r1], new_edits: false}]
+ assert Couch.post("/#{db}/_bulk_docs", opts).body == []
+ end
+
+
+ defp put(db, doc) do
+ Couch.put("/#{db}/#{doc._id}", [body: doc]).body
+ end
+
+ defp suffix(rev) do
+ hd(tl(String.split(rev, "-")))
+ end
+
+ defp assert_conflict(resp) do
+ assert resp.status_code == 409
+ assert resp.body["error"] == "conflict"
+ assert resp.body["reason"] == "Document update conflict."
+ end
+
+ defp assert_bad_request(resp, reason) do
+ assert resp.status_code == 400
+ assert resp.body["error"] == "bad_request"
+ assert resp.body["reason"] == reason
+ end
+
+end
diff --git a/test/elixir/test/data/lorem.txt b/test/elixir/test/data/lorem.txt
new file mode 100644
index 000000000..0ef85bab8
--- /dev/null
+++ b/test/elixir/test/data/lorem.txt
@@ -0,0 +1,103 @@
+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus nunc sapien, porta id pellentesque at, elementum et felis. Curabitur condimentum ante in metus iaculis quis congue diam commodo. Donec eleifend ante sed nulla dapibus convallis. Ut cursus aliquam neque, vel porttitor tellus interdum ut. Sed pharetra lacinia adipiscing. In tristique tristique felis non tincidunt. Nulla auctor mauris a velit cursus ultricies. In at libero quis justo consectetur laoreet. Nullam id ultrices nunc. Donec non turpis nulla, eu lacinia ante. Nunc eu orci et turpis pretium venenatis. Nam molestie, lacus at dignissim elementum, ante libero consectetur libero, ut lacinia lacus urna et purus. Nullam lorem ipsum, dapibus vel ullamcorper a, malesuada a metus. Sed porta adipiscing magna, quis pulvinar purus mattis fringilla. Integer pellentesque sapien in neque tristique ac iaculis libero ultricies. Ut eget pharetra purus.
+
+Nulla in convallis tellus. Proin tincidunt suscipit vulputate. Suspendisse potenti. Nullam tristique justo mi, a tristique ligula. Duis convallis aliquam iaculis. Nulla dictum fringilla congue. Suspendisse ac leo lectus, ac aliquam justo. Ut porttitor commodo mi sed luctus. Nulla at enim lorem. Nunc eu justo sapien, a blandit odio. Curabitur faucibus sollicitudin dolor, id lacinia sem auctor in. Donec varius nunc at lectus sagittis nec luctus arcu pharetra. Nunc sed metus justo. Cras vel mauris diam. Ut feugiat felis eget neque pharetra vestibulum consectetur massa facilisis. Quisque consectetur luctus nisi quis tincidunt. Vivamus cursus cursus quam non blandit. Pellentesque et velit lacus. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas.
+
+In et dolor vitae orci adipiscing congue. Aliquam gravida nibh at nisl gravida molestie. Curabitur a bibendum sapien. Aliquam tincidunt, nulla nec pretium lobortis, odio augue tincidunt arcu, a lobortis odio sem ut purus. Donec accumsan mattis nunc vitae lacinia. Suspendisse potenti. Integer commodo nisl quis nibh interdum non fringilla dui sodales. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. In hac habitasse platea dictumst. Etiam ullamcorper, mi id feugiat bibendum, purus neque cursus mauris, id sodales quam nisi id velit. Sed lectus leo, tincidunt vel rhoncus imperdiet, blandit in leo. Integer quis magna nulla. Donec vel nisl magna, ut rhoncus dui. Aliquam gravida, nulla nec eleifend luctus, neque nibh pharetra ante, quis egestas elit metus a mi. Nunc nec augue quam. Morbi tincidunt tristique varius. Suspendisse iaculis elit feugiat magna pellentesque ultricies. Vestibulum aliquam tortor non ante ullamcorper fringilla. Donec iaculis mi quis mauris ornare vestibulum.
+
+In a magna nisi, a ultricies massa. Donec elit neque, viverra non tempor quis, fringilla in metus. Integer odio odio, euismod vitae mollis sed, sodales eget libero. Donec nec massa in felis ornare pharetra at nec tellus. Nunc lorem dolor, pretium vel auctor in, volutpat vitae felis. Maecenas rhoncus, orci vel blandit euismod, turpis erat tincidunt ante, elementum adipiscing nisl urna in nisi. Phasellus sagittis, enim sed accumsan consequat, urna augue lobortis erat, non malesuada quam metus sollicitudin ante. In leo purus, dignissim quis varius vel, pellentesque et nibh. In sed tortor iaculis libero mollis pellentesque id vitae lectus. In hac habitasse platea dictumst. Phasellus mauris enim, posuere eget luctus ac, iaculis et quam. Vivamus et nibh diam, elementum egestas tellus. Aenean vulputate malesuada est. Sed posuere porta diam a sodales. Proin eu sem non velit facilisis venenatis sed a turpis.
+
+Pellentesque sed risus a ante vulputate lobortis sit amet eu nisl. Suspendisse ut eros mi, a rhoncus lacus. Curabitur fermentum vehicula tellus, a ornare mi condimentum vel. Integer molestie volutpat viverra. Integer posuere euismod venenatis. Proin ac mauris sed nulla pharetra porttitor. Duis vel dui in risus sodales auctor sit amet non enim. Maecenas mollis lacus at ligula faucibus sodales. Cras vel neque arcu. Sed tincidunt tortor pretium nisi interdum quis dictum arcu laoreet. Morbi pretium ultrices feugiat. Maecenas convallis augue nec felis malesuada malesuada scelerisque mauris placerat. Sed at magna enim, at fringilla dolor. Quisque ut mattis dui. Praesent consectetur ante viverra nisi blandit pharetra. Quisque metus elit, dignissim vitae fermentum sit amet, fringilla imperdiet odio. Cras eget purus eget tellus feugiat luctus a ac purus. Cras vitae nisl vel augue rhoncus porttitor sit amet quis lorem. Donec interdum pellentesque adipiscing. Phasellus neque libero, aliquam in mattis vitae, consectetur adipiscing nibh.
+
+Donec nec nulla urna, ac sagittis lectus. Suspendisse non elit sed mi auctor facilisis vitae et lectus. Fusce ac vulputate mauris. Morbi condimentum ultrices metus, et accumsan purus malesuada at. Maecenas lobortis ante sed massa dictum vitae venenatis elit commodo. Proin tellus eros, adipiscing sed dignissim vitae, tempor eget ante. Aenean id tellus nec magna cursus pharetra vitae vel enim. Morbi vestibulum pharetra est in vulputate. Aliquam vitae metus arcu, id aliquet nulla. Phasellus ligula est, hendrerit nec iaculis ut, volutpat vel eros. Suspendisse vitae urna turpis, placerat adipiscing diam. Phasellus feugiat vestibulum neque eu dapibus. Nulla facilisi. Duis tortor felis, euismod sit amet aliquet in, volutpat nec turpis. Mauris rhoncus ipsum ut purus eleifend ut lobortis lectus dapibus. Quisque non erat lorem. Vivamus posuere imperdiet iaculis. Ut ligula lacus, eleifend at tempor id, auctor eu leo.
+
+Donec mi enim, laoreet pulvinar mollis eu, malesuada viverra nunc. In vitae metus vitae neque tempor dapibus. Maecenas tincidunt purus a felis aliquam placerat. Nulla facilisi. Suspendisse placerat pharetra mattis. Integer tempor malesuada justo at tempus. Maecenas vehicula lorem a sapien bibendum vel iaculis risus feugiat. Pellentesque diam erat, dapibus et pellentesque quis, molestie ut massa. Vivamus iaculis interdum massa id bibendum. Quisque ut mauris dui, sit amet varius elit. Vestibulum elit lorem, rutrum non consectetur ut, laoreet nec nunc. Donec nec mauris ante. Curabitur ut est sed odio pharetra laoreet. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur purus risus, laoreet sed porta id, sagittis vel ipsum. Maecenas nibh diam, cursus et varius sit amet, fringilla sed magna. Nullam id neque eu leo faucibus mollis. Duis nec adipiscing mauris. Suspendisse sollicitudin, enim eu pulvinar commodo, erat augue ultrices mi, a tristique magna sem non libero.
+
+Sed in metus nulla. Praesent nec adipiscing sapien. Donec laoreet, velit non rutrum vestibulum, ligula neque adipiscing turpis, at auctor sapien elit ut massa. Nullam aliquam, enim vel posuere rutrum, justo erat laoreet est, vel fringilla lacus nisi non lectus. Etiam lectus nunc, laoreet et placerat at, venenatis quis libero. Praesent in placerat elit. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Pellentesque fringilla augue eu nibh placerat dictum. Nunc porttitor tristique diam, eu aliquam enim aliquet vel. Aliquam lacinia interdum ipsum, in posuere metus luctus vel. Vivamus et nisl a eros semper elementum. Donec venenatis orci at diam tristique sollicitudin. In eu eros sed odio rutrum luctus non nec tellus.
+
+Nulla nec felis elit. Nullam in ipsum in ipsum consequat fringilla quis vel tortor. Phasellus non massa nisi, sit amet aliquam urna. Sed fermentum nibh vitae lacus tincidunt nec tincidunt massa bibendum. Etiam elit dui, facilisis sit amet vehicula nec, iaculis at sapien. Ut at massa id dui ultrices volutpat ut ac libero. Fusce ipsum mi, bibendum a lacinia et, pulvinar eget mauris. Proin faucibus urna ut lorem elementum vulputate. Duis quam leo, malesuada non euismod ut, blandit facilisis mauris. Suspendisse sit amet magna id velit tincidunt aliquet nec eu dolor. Curabitur bibendum lorem vel felis tempus dapibus. Aliquam erat volutpat. Aenean cursus tortor nec dui aliquet porta. Aenean commodo iaculis suscipit. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Quisque sit amet ornare elit. Nam ligula risus, vestibulum nec mattis in, condimentum ac ante. Donec fringilla, justo et ultrices faucibus, tellus est volutpat massa, vitae commodo sapien diam non risus. Vivamus at arcu gravida purus mollis feugiat.
+
+Nulla a turpis quis sapien commodo dignissim eu quis justo. Maecenas eu lorem odio, ut hendrerit velit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Proin facilisis porttitor ullamcorper. Praesent mollis dignissim massa, laoreet aliquet velit pellentesque non. Nunc facilisis convallis tristique. Mauris porttitor ante at tellus convallis placerat. Morbi aliquet nisi ac nisl pulvinar id dictum nisl mollis. Sed ornare sem et risus placerat lobortis id eget elit. Integer consequat, magna id suscipit pharetra, nulla velit suscipit orci, ut interdum augue augue quis quam. Fusce pretium aliquet vulputate. Mauris blandit dictum molestie. Proin nulla nibh, bibendum eu placerat at, tincidunt ac nisl. Nullam vulputate metus ut libero rutrum ultricies. Nunc sit amet dui mauris. Suspendisse adipiscing lacus in augue eleifend mollis.
+
+Duis pretium ultrices mattis. Nam euismod risus a erat lacinia bibendum. Morbi massa tortor, consectetur id eleifend id, pellentesque vel tortor. Praesent urna lorem, porttitor at condimentum vitae, luctus eget elit. Maecenas fringilla quam convallis est hendrerit viverra. Etiam vehicula, sapien non pulvinar adipiscing, nisi massa vestibulum est, id interdum mauris velit eu est. Vestibulum est arcu, facilisis at ultricies non, vulputate id sapien. Vestibulum ipsum metus, pharetra nec pellentesque id, facilisis id sapien. Donec rutrum odio et lacus ultricies ullamcorper. Integer sed est ut mi posuere tincidunt quis non leo. Morbi tellus justo, ultricies sit amet ultrices quis, facilisis vitae magna. Donec ligula metus, pellentesque non tristique ac, vestibulum sed erat. Aliquam erat volutpat.
+
+Nam dignissim, nisl eget consequat euismod, sem lectus auctor orci, ut porttitor lacus dui ac neque. In hac habitasse platea dictumst. Fusce egestas porta facilisis. In hac habitasse platea dictumst. Mauris cursus rhoncus risus ac euismod. Quisque vitae risus a tellus venenatis convallis. Curabitur laoreet sapien eu quam luctus lobortis. Vivamus sollicitudin sodales dolor vitae sodales. Suspendisse pharetra laoreet aliquet. Maecenas ullamcorper orci vel tortor luctus iaculis ut vitae metus. Vestibulum ut arcu ac tellus mattis eleifend eget vehicula elit.
+
+In sed feugiat eros. Donec bibendum ullamcorper diam, eu faucibus mauris dictum sed. Duis tincidunt justo in neque accumsan dictum. Maecenas in rutrum sapien. Ut id feugiat lacus. Nulla facilisi. Nunc ac lorem id quam varius cursus a et elit. Aenean posuere libero eu tortor vehicula ut ullamcorper odio consequat. Sed in dignissim dui. Curabitur iaculis tempor quam nec placerat. Aliquam venenatis nibh et justo iaculis lacinia. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque tempus magna sed mi aliquet eget varius odio congue.
+
+Integer sem sem, semper in vestibulum vitae, lobortis quis erat. Duis ante lectus, fermentum sed tempor sit amet, placerat sit amet sem. Mauris congue tincidunt ipsum. Ut viverra, lacus vel varius pharetra, purus enim pulvinar ipsum, non pellentesque enim justo non erat. Fusce ipsum orci, ultrices sed pellentesque at, hendrerit laoreet enim. Nunc blandit mollis pretium. Ut mollis, nulla aliquam sodales vestibulum, libero lorem tempus tortor, a pellentesque nibh elit a ipsum. Phasellus fermentum ligula at neque adipiscing sollicitudin. Suspendisse id ipsum arcu. Sed tincidunt placerat viverra. Donec libero augue, porttitor sit amet varius eget, rutrum nec lacus. Proin blandit orci sit amet diam dictum id porttitor risus iaculis. Integer lacinia feugiat leo, vitae auctor turpis eleifend vel. Suspendisse lorem quam, pretium id bibendum sed, viverra vitae tortor. Nullam ultricies libero eu risus convallis eget ullamcorper nisi elementum. Mauris nulla elit, bibendum id vulputate vitae, imperdiet rutrum lorem. Curabitur eget dignissim orci. Sed semper tellus ipsum, at blandit dui. Integer dapibus facilisis sodales. Vivamus sollicitudin varius est, quis ornare justo cursus id.
+
+Nunc vel ullamcorper mi. Suspendisse potenti. Nunc et urna a augue scelerisque ultrices non quis mi. In quis porttitor elit. Aenean quis erat nulla, a venenatis tellus. Fusce vestibulum nisi sed leo adipiscing dignissim. Nunc interdum, lorem et lacinia vestibulum, quam est mattis magna, sit amet volutpat elit augue at libero. Cras gravida dui quis velit lobortis condimentum et eleifend ligula. Phasellus ac metus quam, id venenatis mi. Aliquam ut turpis ac tellus dapibus dapibus eu in mi. Quisque eget nibh eros. Fusce consectetur leo velit.
+
+Vestibulum semper egestas mauris. Morbi vestibulum sem sem. Aliquam venenatis, felis sed eleifend porta, mauris diam semper arcu, sit amet ultricies est sapien sit amet libero. Vestibulum dui orci, ornare condimentum mollis nec, molestie ac eros. Proin vitae mollis velit. Praesent eget felis mi. Maecenas eu vulputate nisi. Vestibulum varius, arcu in ultricies vestibulum, nibh leo sagittis odio, ut bibendum nisl mi nec diam. Integer at enim feugiat nulla semper bibendum ut a velit. Proin at nisi ut lorem aliquam varius eget quis elit. Nullam nec odio vel lectus congue consequat adipiscing ac mi. Fusce vitae laoreet libero. Curabitur sit amet sem neque, nec posuere enim. Curabitur at massa a sem gravida iaculis nec et nibh. Sed vitae dui vitae leo tincidunt pretium a aliquam erat. Suspendisse ultricies odio at metus tempor in pellentesque arcu ultricies.
+
+Sed aliquam mattis quam, in vulputate sapien ultrices in. Pellentesque quis velit sed dui hendrerit cursus. Pellentesque non nunc lacus, a semper metus. Fusce euismod velit quis diam suscipit consequat. Praesent commodo accumsan neque. Proin viverra, ipsum non tristique ultrices, velit velit facilisis lorem, vel rutrum neque eros ac nisi. Suspendisse felis massa, faucibus in volutpat ac, dapibus et odio. Pellentesque id tellus sit amet risus ultricies ullamcorper non nec sapien. Nam placerat viverra ullamcorper. Nam placerat porttitor sapien nec pulvinar. Curabitur vel odio sit amet odio accumsan aliquet vitae a lectus. Pellentesque lobortis viverra consequat. Mauris elementum cursus nulla, sit amet hendrerit justo dictum sed. Maecenas diam odio, fringilla ac congue quis, adipiscing ut elit.
+
+Aliquam lorem eros, pharetra nec egestas vitae, mattis nec risus. Mauris arcu massa, sodales eget gravida sed, viverra vitae turpis. Ut ligula urna, euismod ac tincidunt eu, faucibus sed felis. Praesent mollis, ipsum quis rhoncus dignissim, odio sem venenatis nulla, at consequat felis augue vel erat. Nam fermentum feugiat volutpat. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Etiam vitae dui in nisi adipiscing ultricies non eu justo. Donec tristique ultricies adipiscing. Nulla sodales, nunc a tristique elementum, erat neque egestas nisl, at hendrerit orci sapien sed libero. Vivamus a mauris turpis, quis laoreet ipsum. Nunc nec mi et nisl pellentesque scelerisque. Vivamus volutpat, justo tristique lacinia condimentum, erat justo ultrices urna, elementum viverra eros augue non libero. Sed mollis mollis arcu, at fermentum diam suscipit quis.
+
+Etiam sit amet nibh justo, posuere volutpat nunc. Morbi pellentesque neque in orci volutpat eu scelerisque lorem dictum. Mauris mollis iaculis est, nec sagittis sapien consequat id. Nunc nec malesuada odio. Duis quis suscipit odio. Mauris purus dui, sodales id mattis sit amet, posuere in arcu. Phasellus porta elementum convallis. Maecenas at orci et mi vulputate sollicitudin in in turpis. Pellentesque cursus adipiscing neque sit amet commodo. Fusce ut mi eu lectus porttitor volutpat et nec felis.
+
+Curabitur scelerisque eros quis nisl viverra vel ultrices velit vestibulum. Sed lobortis pulvinar sapien ac venenatis. Sed ante nibh, rhoncus eget dictum in, mollis ut nisi. Phasellus facilisis mi non lorem tristique non eleifend sem fringilla. Integer ut augue est. In venenatis tincidunt scelerisque. Etiam ante dui, posuere quis malesuada vitae, malesuada a arcu. Aenean faucibus venenatis sapien, ut facilisis nisi blandit vel. Aenean ac lorem eu sem fermentum placerat. Proin neque purus, aliquet ut tincidunt ut, convallis sit amet eros. Phasellus vehicula ullamcorper enim non vehicula. Etiam porta odio ut ipsum adipiscing egestas id a odio. Pellentesque blandit, sapien ut pulvinar interdum, mi nulla hendrerit elit, in tempor diam enim a urna. In tellus odio, ornare sed condimentum a, mattis eu augue.
+
+Fusce hendrerit porttitor euismod. Donec malesuada egestas turpis, et ultricies felis elementum vitae. Nullam in sem nibh. Nullam ultricies hendrerit justo sit amet lobortis. Sed tincidunt, mauris at ornare laoreet, sapien purus elementum elit, nec porttitor nisl purus et erat. Donec felis nisi, rutrum ullamcorper gravida ac, tincidunt sit amet urna. Proin vel justo vitae eros sagittis bibendum a ut nibh. Phasellus sodales laoreet tincidunt. Maecenas odio massa, condimentum id aliquet ut, rhoncus vel lectus. Duis pharetra consectetur sapien. Phasellus posuere ultricies massa, non rhoncus risus aliquam tempus.
+
+Praesent venenatis magna id sem dictum eu vehicula ipsum vulputate. Sed a convallis sapien. Sed justo dolor, rhoncus vel rutrum mattis, sollicitudin ut risus. Nullam sit amet convallis est. Etiam non tincidunt ligula. Fusce suscipit pretium elit at ullamcorper. Quisque sollicitudin, diam id interdum porta, metus ipsum volutpat libero, id venenatis felis orci non velit. Suspendisse potenti. Mauris rutrum, tortor sit amet pellentesque tincidunt, erat quam ultricies odio, id aliquam elit leo nec leo. Pellentesque justo eros, rutrum at feugiat nec, porta et tellus. Aenean eget metus lectus.
+
+Praesent euismod, turpis quis laoreet consequat, neque ante imperdiet quam, ac semper tortor nibh in nulla. Integer scelerisque eros vehicula urna lacinia ac facilisis mauris accumsan. Phasellus at mauris nibh. Curabitur enim ante, rutrum sed adipiscing hendrerit, pellentesque non augue. In hac habitasse platea dictumst. Nam tempus euismod massa a dictum. Donec sit amet justo ac diam ultricies ultricies. Sed tincidunt erat quis quam tempus vel interdum erat rhoncus. In hac habitasse platea dictumst. Vestibulum vehicula varius sem eget interdum. Cras bibendum leo nec felis venenatis sed pharetra sem feugiat. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Sed quam orci, mollis eget sagittis accumsan, vulputate sit amet dui. Praesent eu elementum arcu.
+
+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum nisl metus, hendrerit ut laoreet sed, consectetur at purus. Duis interdum congue lobortis. Nullam sed massa porta felis eleifend consequat sit amet nec metus. Aliquam placerat dictum erat at eleifend. Vestibulum libero ante, ullamcorper a porttitor suscipit, accumsan vel nisi. Donec et magna neque. Nam elementum ultrices justo, eget sollicitudin sapien imperdiet eget. Nullam auctor dictum nunc, at feugiat odio vestibulum a. Sed erat nulla, viverra hendrerit commodo id, ullamcorper ac orci. Phasellus pellentesque feugiat suscipit. Etiam egestas fermentum enim. Etiam gravida interdum tellus ac laoreet. Morbi mattis aliquet eros, non tempor erat ullamcorper in. Etiam pulvinar interdum turpis ac vehicula. Sed quam justo, accumsan id consectetur a, aliquet sed leo. Aenean vitae blandit mauris.
+
+In sed eros augue, non rutrum odio. Etiam vitae dui neque, in tristique massa. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Maecenas dictum elit at lectus tempor non pharetra nisl hendrerit. Sed sed quam eu lectus ultrices malesuada tincidunt a est. Nam vel eros risus. Maecenas eros elit, blandit fermentum tempor eget, lobortis id diam. Vestibulum lacinia lacus vitae magna volutpat eu dignissim eros convallis. Vivamus ac velit tellus, a congue neque. Integer mi nulla, varius non luctus in, dictum sit amet sem. Ut laoreet, sapien sit amet scelerisque porta, purus sapien vestibulum nibh, sed luctus libero massa ac elit. Donec iaculis odio eget odio sagittis nec venenatis lorem blandit.
+
+Aliquam imperdiet tellus posuere justo vehicula sed vestibulum ante tristique. Fusce feugiat faucibus purus nec molestie. Nulla tempor neque id magna iaculis quis sollicitudin eros semper. Praesent viverra sagittis luctus. Morbi sit amet magna sed odio gravida varius. Ut nisi libero, vulputate feugiat pretium tempus, egestas sit amet justo. Pellentesque consequat tempor nisi in lobortis. Sed fermentum convallis dui ac sollicitudin. Integer auctor augue eget tellus tempus fringilla. Proin nec dolor sapien, nec tristique nibh. Aliquam a velit at mi mattis aliquet.
+
+Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Aliquam ultrices erat non turpis auctor id ornare mauris sagittis. Quisque porttitor, tellus ut convallis sagittis, mi libero feugiat tellus, rhoncus placerat ipsum tortor id risus. Donec tincidunt feugiat leo. Cras id mi neque, eu malesuada eros. Ut molestie magna quis libero placerat malesuada. Aliquam erat volutpat. Aliquam non mauris lorem, in adipiscing metus. Donec eget ipsum in elit commodo ornare bibendum a nibh. Vivamus odio erat, placerat ac vestibulum eget, malesuada ut nisi. Etiam suscipit sollicitudin leo semper sollicitudin. Sed rhoncus risus sit amet sem eleifend dictum pretium sapien egestas. Nulla at urna nunc, vel aliquet leo. Praesent ultricies, mi eu pretium lobortis, erat nibh euismod leo, sit amet gravida sapien eros et turpis. Donec lacinia venenatis lectus, non lacinia mi hendrerit sit amet. Integer sed felis vel orci aliquam pulvinar. Phasellus et risus id erat euismod tincidunt. Sed luctus tempor nisi, nec tempor ipsum elementum eget. Integer nisl tortor, viverra in dapibus at, mattis ac erat. Curabitur nec dui lectus.
+
+Phasellus suscipit, tortor eu varius fringilla, sapien magna egestas risus, ut suscipit dui mauris quis velit. Cras a sapien quis sapien hendrerit tristique a sit amet elit. Pellentesque dui arcu, malesuada et sodales sit amet, dapibus vel quam. Sed non adipiscing ligula. Ut vulputate purus at nisl posuere sodales. Maecenas diam velit, tincidunt id mattis eu, aliquam ac nisi. Maecenas pretium, augue a sagittis suscipit, leo ligula eleifend dolor, mollis feugiat odio augue non eros. Pellentesque scelerisque orci pretium quam mollis at lobortis dui facilisis. Morbi congue metus id tortor porta fringilla. Sed lorem mi, molestie fermentum sagittis at, gravida a nisi. Donec eu vestibulum velit. In viverra, enim eu elementum sodales, enim odio dapibus urna, eget commodo nisl mauris ut odio. Curabitur nec enim nulla. In nec elit ipsum. Nunc in massa suscipit magna elementum faucibus in nec ipsum. Nullam suscipit malesuada elementum. Etiam sed mi in nibh ultricies venenatis nec pharetra magna. In purus ante, rhoncus vel placerat sed, fermentum sit amet dui. Sed at sodales velit.
+
+Duis suscipit pellentesque pellentesque. Praesent porta lobortis cursus. Quisque sagittis velit non tellus bibendum at sollicitudin lacus aliquet. Sed nibh risus, blandit a aliquet eget, vehicula et est. Suspendisse facilisis bibendum aliquam. Fusce consectetur convallis erat, eget mollis diam fermentum sollicitudin. Quisque tincidunt porttitor pretium. Nullam id nisl et urna vulputate dapibus. Donec quis lorem urna. Quisque id justo nec nunc blandit convallis. Nunc volutpat, massa sollicitudin adipiscing vestibulum, massa urna congue lectus, sit amet ultricies augue orci convallis turpis. Nulla at lorem elit. Nunc tristique, quam facilisis commodo porttitor, lacus ligula accumsan nisi, et laoreet justo ante vitae eros. Curabitur sed augue arcu. Phasellus porttitor vestibulum felis, ut consectetur arcu tempor non. In justo risus, semper et suscipit id, ullamcorper at urna. Quisque tincidunt, urna nec aliquam tristique, nibh odio faucibus augue, in ornare enim turpis accumsan dolor. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Suspendisse sodales varius turpis eu fermentum.
+
+Morbi ultricies diam eget massa posuere lobortis. Aliquam volutpat pellentesque enim eu porttitor. Donec lacus felis, consectetur a pretium vitae, bibendum non enim. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Etiam ut nibh a quam pellentesque auctor ut id velit. Duis lacinia justo eget mi placerat bibendum. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec velit tortor, tempus nec tristique id, aliquet sit amet turpis. Praesent et neque nec magna porta fringilla. Morbi id egestas eros. Donec semper tincidunt ullamcorper. Phasellus tempus lacinia hendrerit. Quisque faucibus pretium neque non convallis. Nunc malesuada accumsan rhoncus. Cras lobortis, sem sed fringilla convallis, augue velit semper nisl, commodo varius nisi diam ac leo.
+
+Quisque interdum tellus ac ante posuere ut cursus lorem egestas. Nulla facilisi. Aenean sed massa nec nisi scelerisque vulputate. Etiam convallis consectetur iaculis. Maecenas ac purus ut ante dignissim auctor ac quis lorem. Pellentesque suscipit tincidunt orci. Fusce aliquam dapibus orci, at bibendum ipsum adipiscing eget. Morbi pellentesque hendrerit quam, nec placerat urna vulputate sed. Quisque vel diam lorem. Praesent id diam quis enim elementum rhoncus sagittis eget purus. Quisque fringilla bibendum leo in laoreet. Vestibulum id nibh risus, non elementum metus. Ut a felis diam, non mollis nisl. Cras elit ante, ullamcorper quis iaculis eu, sodales vel est. Curabitur quis lobortis dolor. Aliquam mattis gravida metus pellentesque vulputate.
+
+Ut id augue id dolor luctus euismod et quis velit. Maecenas enim dolor, tempus sit amet hendrerit eu, faucibus vitae neque. Proin sit amet varius elit. Proin varius felis ullamcorper purus dignissim consequat. Cras cursus tempus eros. Nunc ultrices venenatis ullamcorper. Aliquam et feugiat tellus. Phasellus sit amet vestibulum elit. Phasellus ac purus lacus, et accumsan eros. Morbi ultrices, purus a porta sodales, odio metus posuere neque, nec elementum risus turpis sit amet magna. Sed est quam, ultricies at congue adipiscing, lobortis in justo. Proin iaculis dictum nunc, eu laoreet quam varius vitae. Donec sit amet feugiat turpis. Mauris sit amet magna quam, ac consectetur dui. Curabitur eget magna tellus, eu pharetra felis. Donec sit amet tortor nisl. Aliquam et tortor facilisis lacus tincidunt commodo. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Curabitur nunc magna, ultricies id convallis at, ullamcorper vitae massa.
+
+Phasellus viverra iaculis placerat. Nulla consequat dolor sit amet erat dignissim posuere. Nulla lacinia augue vitae mi tempor gravida. Phasellus non tempor tellus. Quisque non enim semper tortor sagittis facilisis. Aliquam urna felis, egestas at posuere nec, aliquet eu nibh. Praesent sed vestibulum enim. Mauris iaculis velit dui, et fringilla enim. Nulla nec nisi orci. Sed volutpat, justo eget fringilla adipiscing, nisl nulla condimentum libero, sed sodales est est et odio. Cras ipsum dui, varius eu elementum consequat, faucibus in leo. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas.
+
+Ut malesuada molestie eleifend. Curabitur id enim dui, eu tincidunt nibh. Mauris sit amet ante leo. Duis turpis ipsum, bibendum sed mattis sit amet, accumsan quis dolor. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Aenean a imperdiet metus. Quisque sollicitudin felis id neque tempor scelerisque. Donec at orci felis. Vivamus tempus convallis auctor. Donec interdum euismod lobortis. Sed at lacus nec odio dignissim mollis. Sed sapien orci, porttitor tempus accumsan vel, tincidunt nec ante. Nunc rhoncus egestas dapibus. Suspendisse fermentum dictum fringilla. Nullam nisi justo, eleifend a consectetur convallis, porttitor et tortor. Proin vitae lorem non dolor suscipit lacinia eu eget nulla.
+
+Suspendisse egestas, sapien sit amet blandit scelerisque, nulla arcu tristique dui, a porta justo quam vitae arcu. In metus libero, bibendum non volutpat ut, laoreet vel turpis. Nunc faucibus velit eu ipsum commodo nec iaculis eros volutpat. Vivamus congue auctor elit sed suscipit. Duis commodo, libero eu vestibulum feugiat, leo mi dapibus tellus, in placerat nisl dui at est. Vestibulum viverra tristique lorem, ornare egestas erat rutrum a. Nullam at augue massa, ut consectetur ipsum. Pellentesque malesuada, velit ut lobortis sagittis, nisi massa semper odio, malesuada semper purus nisl vel lectus. Nunc dui sem, mattis vitae laoreet vitae, sollicitudin ac leo. Nulla vel fermentum est.
+
+Vivamus in odio a nisi dignissim rhoncus in in lacus. Donec et nisl tortor. Donec sagittis consequat mi, vel placerat tellus convallis id. Aliquam facilisis rutrum nisl sed pretium. Donec et lacinia nisl. Aliquam erat volutpat. Curabitur ac pulvinar tellus. Nullam varius lobortis porta. Cras dapibus, ligula ut porta ultricies, leo lacus viverra purus, quis mollis urna risus eu leo. Nunc malesuada consectetur purus, vel auctor lectus scelerisque posuere. Maecenas dui massa, vestibulum bibendum blandit non, interdum eget mauris. Phasellus est ante, pulvinar at imperdiet quis, imperdiet vel urna. Quisque eget volutpat orci. Quisque et arcu purus, ut faucibus velit.
+
+Praesent sed ipsum urna. Praesent sagittis varius magna, id commodo dolor malesuada ac. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Quisque sit amet nunc eu sem ornare tempor. Mauris id dolor nec erat convallis porta in lobortis nisi. Curabitur hendrerit rhoncus tortor eu hendrerit. Pellentesque eu ante vel elit luctus eleifend quis viverra nulla. Suspendisse odio diam, euismod eu porttitor molestie, sollicitudin sit amet nulla. Sed ante urna, dictum bibendum rhoncus et, blandit nec ante. Suspendisse tortor augue, accumsan quis suscipit id, accumsan sit amet erat. Donec pharetra varius lobortis. Maecenas ipsum diam, faucibus eu tempus id, convallis nec enim. Duis arcu turpis, fringilla nec egestas ut, dignissim tristique nulla. Curabitur suscipit dui non justo ultrices pharetra. Aliquam erat volutpat. Nulla facilisi. Quisque id felis eu sem aliquam fringilla.
+
+Etiam quis augue in tellus consequat eleifend. Aenean dignissim congue felis id elementum. Duis fringilla varius ipsum, nec suscipit leo semper vel. Ut sollicitudin, orci a tincidunt accumsan, diam lectus laoreet lacus, vel fermentum quam est vel eros. Aliquam fringilla sapien ac sapien faucibus convallis. Aliquam id nunc eu justo consequat tincidunt. Quisque nec nisl dui. Phasellus augue lectus, varius vitae auctor vel, rutrum at risus. Vivamus lacinia leo quis neque ultrices nec elementum felis fringilla. Proin vel porttitor lectus.
+
+Curabitur sapien lorem, mollis ut accumsan non, ultricies et metus. Curabitur vel lorem quis sapien fringilla laoreet. Morbi id urna ac orci elementum blandit eget volutpat neque. Pellentesque sem odio, iaculis eu pharetra vitae, cursus in quam. Nulla molestie ligula id massa luctus et pulvinar nisi pulvinar. Nunc fermentum augue a lacus fringilla rhoncus porttitor erat dictum. Nunc sit amet tellus et dui viverra auctor euismod at nisl. In sed congue magna. Proin et tortor ut augue placerat dignissim a eu justo. Morbi porttitor porta lobortis. Pellentesque nibh lacus, adipiscing ut tristique quis, consequat vitae velit. Maecenas ut luctus libero. Vivamus auctor odio et erat semper sagittis. Vivamus interdum velit in risus mattis quis dictum ante rhoncus. In sagittis porttitor eros, at lobortis metus ultrices vel. Curabitur non aliquam nisl. Vestibulum luctus feugiat suscipit. Etiam non lacus vel nulla egestas iaculis id quis risus.
+
+Etiam in auctor urna. Fusce ultricies molestie convallis. In hac habitasse platea dictumst. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Mauris iaculis lorem faucibus purus gravida at convallis turpis sollicitudin. Suspendisse at velit lorem, a fermentum ipsum. Etiam condimentum, dui vel condimentum elementum, sapien sem blandit sapien, et pharetra leo neque et lectus. Nunc viverra urna iaculis augue ultrices ac porttitor lacus dignissim. Aliquam ut turpis dui. Sed eget aliquet felis. In bibendum nibh sit amet sapien accumsan accumsan pharetra magna molestie.
+
+Mauris aliquet urna eget lectus adipiscing at congue turpis consequat. Vivamus tincidunt fermentum risus et feugiat. Nulla molestie ullamcorper nibh sed facilisis. Phasellus et cursus purus. Nam cursus, dui dictum ultrices viverra, erat risus varius elit, eu molestie dui eros quis quam. Aliquam et ante neque, ac consectetur dui. Donec condimentum erat id elit dictum sed accumsan leo sagittis. Proin consequat congue risus, vel tincidunt leo imperdiet eu. Vestibulum malesuada turpis eu metus imperdiet pretium. Aliquam condimentum ultrices nibh, eu semper enim eleifend a. Etiam condimentum nisl quam.
+
+Pellentesque id molestie nisl. Maecenas et lectus at justo molestie viverra sit amet sit amet ligula. Nullam non porttitor magna. Quisque elementum arcu cursus tortor rutrum lobortis. Morbi sit amet lectus vitae enim euismod dignissim eget at neque. Vivamus consequat vehicula dui, vitae auctor augue dignissim in. In tempus sem quis justo tincidunt sit amet auctor turpis lobortis. Pellentesque non est nunc. Vestibulum mollis fringilla interdum. Maecenas ipsum dolor, pharetra id tristique mattis, luctus vitae urna. Ut ullamcorper arcu eget elit convallis mollis. Pellentesque condimentum, massa ac hendrerit tempor, mauris purus blandit justo, et pharetra leo justo a est. Duis arcu augue, facilisis vel dignissim sed, aliquam quis magna. Quisque non consequat dolor. Suspendisse a ultrices leo.
+
+Donec vitae pretium nibh. Maecenas bibendum bibendum diam in placerat. Ut accumsan, mi vitae vestibulum euismod, nunc justo vulputate nisi, non placerat mi urna et diam. Maecenas malesuada lorem ut arcu mattis mollis. Nulla facilisi. Donec est leo, bibendum eu pulvinar in, cursus vel metus. Aliquam erat volutpat. Nullam feugiat porttitor neque in vulputate. Quisque nec mi eu magna consequat cursus non at arcu. Etiam risus metus, sollicitudin et ultrices at, tincidunt sed nunc. Sed eget scelerisque augue. Ut fringilla venenatis sem non eleifend. Nunc mattis, risus sit amet vulputate varius, risus justo egestas mauris, id interdum odio ipsum et nisl. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi id erat odio, nec pulvinar enim.
+
+Curabitur ac fermentum quam. Morbi eu eros sapien, vitae tempus dolor. Mauris vestibulum blandit enim ut venenatis. Aliquam egestas, eros at consectetur tincidunt, lorem augue iaculis est, nec mollis felis arcu in nunc. Sed in odio sed libero pellentesque volutpat vitae a ante. Morbi commodo volutpat tellus, ut viverra purus placerat fermentum. Integer iaculis facilisis arcu, at gravida lorem bibendum at. Aenean id eros eget est sagittis convallis sed et dui. Donec eu pulvinar tellus. Nunc dignissim rhoncus tellus, at pellentesque metus luctus at. Sed ornare aliquam diam, a porttitor leo sollicitudin sed. Nam vitae lectus lacus. Integer adipiscing quam neque, blandit posuere libero. Sed libero nunc, egestas sodales tempus sed, cursus blandit tellus. Vestibulum mi purus, ultricies quis placerat vel, molestie at dui.
+
+Nulla commodo odio justo. Pellentesque non ornare diam. In consectetur sapien ac nunc sagittis malesuada. Morbi ullamcorper tempor erat nec rutrum. Duis ut commodo justo. Cras est orci, consectetur sed interdum sed, scelerisque sit amet nulla. Vestibulum justo nulla, pellentesque a tempus et, dapibus et arcu. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi tristique, eros nec congue adipiscing, ligula sem rhoncus felis, at ornare tellus mauris ac risus. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Proin mauris dui, tempor fermentum dictum et, cursus a leo. Maecenas nec nisl a tellus pellentesque rhoncus. Nullam ultrices euismod dui eu congue.
+
+In nec tempor risus. In faucibus nisi eget diam dignissim consequat. Donec pulvinar ante nec enim mattis rutrum. Vestibulum leo augue, molestie nec dapibus in, dictum at enim. Integer aliquam, lorem eu vulputate lacinia, mi orci tempor enim, eget mattis ligula magna a magna. Praesent sed erat ut tortor interdum viverra. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla facilisi. Maecenas sit amet lectus lacus. Nunc vitae purus id ligula laoreet condimentum. Duis auctor tortor vel dui pulvinar a facilisis arcu dignissim. In hac habitasse platea dictumst. Donec sollicitudin pellentesque egestas. Sed sed sem justo. Maecenas laoreet hendrerit mauris, ut porttitor lorem iaculis ac. Quisque molestie sem quis lorem tempor rutrum. Phasellus nibh mauris, rhoncus in consectetur non, aliquet eu massa.
+
+Curabitur velit arcu, pretium porta placerat quis, varius ut metus. Vestibulum vulputate tincidunt justo, vitae porttitor lectus imperdiet sit amet. Vivamus enim dolor, sollicitudin ut semper non, ornare ornare dui. Aliquam tempor fermentum sapien eget condimentum. Curabitur laoreet bibendum ante, in euismod lacus lacinia eu. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Suspendisse potenti. Sed at libero eu tortor tempus scelerisque. Nulla facilisi. Nullam vitae neque id justo viverra rhoncus pretium at libero. Etiam est urna, aliquam vel pulvinar non, ornare vel purus.
+
+Nulla varius, nisi eget condimentum semper, metus est dictum odio, vel mattis risus est sed velit. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nunc non est nec tellus ultricies mattis ut eget velit. Integer condimentum ante id lorem blandit lacinia. Donec vel tortor augue, in condimentum nisi. Pellentesque pellentesque nulla ut nulla porttitor quis sodales enim rutrum. Sed augue risus, euismod a aliquet at, vulputate non libero. Nullam nibh odio, dignissim fermentum pulvinar ac, congue eu mi. Duis tincidunt, nibh id venenatis placerat, diam turpis gravida leo, sit amet mollis massa dolor quis mauris. Vivamus scelerisque sodales arcu et dapibus. Suspendisse potenti. Cras quis tellus arcu, quis laoreet sem. Fusce porttitor, sapien vel tristique sodales, velit leo porta arcu, quis pellentesque nunc metus non odio. Nam arcu libero, ullamcorper ut pharetra non, dignissim et velit. Quisque dolor lorem, vehicula sit amet scelerisque in, varius at nulla. Pellentesque vitae sem eget tortor iaculis pulvinar. Sed nunc justo, euismod gravida pulvinar eget, gravida eget turpis. Cras vel dictum nisi. Nullam nulla libero, gravida sit amet aliquam quis, commodo vitae odio. Cras vitae nibh nec dui placerat semper.
+
+Vivamus at fringilla eros. Vivamus at nisl id massa commodo feugiat quis non massa. Morbi tellus urna, auctor sit amet elementum sed, rutrum non lectus. Nulla feugiat dui in sapien ornare et imperdiet est ornare. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Vestibulum semper rutrum tempor. Sed in felis nibh, sed aliquam enim. Curabitur ut quam scelerisque velit placerat dictum. Donec eleifend vehicula purus, eu vestibulum sapien rutrum eu. Vivamus in odio vel est vulputate iaculis. Nunc rutrum feugiat pretium.
+
+Maecenas ipsum neque, auctor quis lacinia vitae, euismod ac orci. Donec molestie massa consequat est porta ac porta purus tincidunt. Nam bibendum leo nec lacus mollis non condimentum dolor rhoncus. Nulla ac volutpat lorem. Nullam erat purus, convallis eget commodo id, varius quis augue. Nullam aliquam egestas mi, vel suscipit nisl mattis consequat. Quisque vel egestas sapien. Nunc lorem velit, convallis nec laoreet et, aliquet eget massa. Nam et nibh ac dui vehicula aliquam quis eu augue. Cras vel magna ut elit rhoncus interdum iaculis volutpat nisl. Suspendisse arcu lorem, varius rhoncus tempor id, pulvinar sed tortor. Pellentesque ultricies laoreet odio ac dignissim. Aliquam diam arcu, placerat quis egestas eget, facilisis eu nunc. Mauris vulputate, nisl sit amet mollis interdum, risus tortor ornare orci, sed egestas orci eros non diam. Vestibulum hendrerit, metus quis placerat pellentesque, enim purus faucibus dui, sit amet ultricies lectus ipsum id lorem. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Praesent eget diam odio, eu bibendum elit. In vestibulum orci eu erat tincidunt tristique.
+
+Cras consectetur ante eu turpis placerat sollicitudin. Mauris et lacus tortor, eget pharetra velit. Donec accumsan ultrices tempor. Donec at nibh a elit condimentum dapibus. Integer sit amet vulputate ante. Suspendisse potenti. In sodales laoreet massa vitae lacinia. Morbi vel lacus feugiat arcu vulputate molestie. Aliquam massa magna, ullamcorper accumsan gravida quis, rhoncus pulvinar nulla. Praesent sit amet ipsum diam, sit amet lacinia neque. In et sapien augue. Etiam enim elit, ultrices vel rutrum id, scelerisque non enim.
+
+Proin et egestas neque. Praesent et ipsum dolor. Nunc non varius nisl. Fusce in tortor nisi. Maecenas convallis neque in ligula blandit quis vehicula leo mollis. Pellentesque sagittis blandit leo, dapibus pellentesque leo ultrices ac. Curabitur ac egestas libero. Donec pretium pharetra pretium. Fusce imperdiet, turpis eu aliquam porta, ante elit eleifend risus, luctus auctor arcu ante ut nunc. Vivamus in leo felis, vitae eleifend lacus. Donec tempus aliquam purus porttitor tristique. Suspendisse diam neque, suscipit feugiat fringilla non, eleifend sit nullam.
diff --git a/test/elixir/test/data/lorem_b64.txt b/test/elixir/test/data/lorem_b64.txt
new file mode 100644
index 000000000..8a21d79e6
--- /dev/null
+++ b/test/elixir/test/data/lorem_b64.txt
@@ -0,0 +1 @@
+TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gUGhhc2VsbHVzIG51bmMgc2FwaWVuLCBwb3J0YSBpZCBwZWxsZW50ZXNxdWUgYXQsIGVsZW1lbnR1bSBldCBmZWxpcy4gQ3VyYWJpdHVyIGNvbmRpbWVudHVtIGFudGUgaW4gbWV0dXMgaWFjdWxpcyBxdWlzIGNvbmd1ZSBkaWFtIGNvbW1vZG8uIERvbmVjIGVsZWlmZW5kIGFudGUgc2VkIG51bGxhIGRhcGlidXMgY29udmFsbGlzLiBVdCBjdXJzdXMgYWxpcXVhbSBuZXF1ZSwgdmVsIHBvcnR0aXRvciB0ZWxsdXMgaW50ZXJkdW0gdXQuIFNlZCBwaGFyZXRyYSBsYWNpbmlhIGFkaXBpc2NpbmcuIEluIHRyaXN0aXF1ZSB0cmlzdGlxdWUgZmVsaXMgbm9uIHRpbmNpZHVudC4gTnVsbGEgYXVjdG9yIG1hdXJpcyBhIHZlbGl0IGN1cnN1cyB1bHRyaWNpZXMuIEluIGF0IGxpYmVybyBxdWlzIGp1c3RvIGNvbnNlY3RldHVyIGxhb3JlZXQuIE51bGxhbSBpZCB1bHRyaWNlcyBudW5jLiBEb25lYyBub24gdHVycGlzIG51bGxhLCBldSBsYWNpbmlhIGFudGUuIE51bmMgZXUgb3JjaSBldCB0dXJwaXMgcHJldGl1bSB2ZW5lbmF0aXMuIE5hbSBtb2xlc3RpZSwgbGFjdXMgYXQgZGlnbmlzc2ltIGVsZW1lbnR1bSwgYW50ZSBsaWJlcm8gY29uc2VjdGV0dXIgbGliZXJvLCB1dCBsYWNpbmlhIGxhY3VzIHVybmEgZXQgcHVydXMuIE51bGxhbSBsb3JlbSBpcHN1bSwgZGFwaWJ1cyB2ZWwgdWxsYW1jb3JwZXIgYSwgbWFsZXN1YWRhIGEgbWV0dXMuIFNlZCBwb3J0YSBhZGlwaXNjaW5nIG1hZ25hLCBxdWlzIHB1bHZpbmFyIHB1cnVzIG1hdHRpcyBmcmluZ2lsbGEuIEludGVnZXIgcGVsbGVudGVzcXVlIHNhcGllbiBpbiBuZXF1ZSB0cmlzdGlxdWUgYWMgaWFjdWxpcyBsaWJlcm8gdWx0cmljaWVzLiBVdCBlZ2V0IHBoYXJldHJhIHB1cnVzLgoKTnVsbGEgaW4gY29udmFsbGlzIHRlbGx1cy4gUHJvaW4gdGluY2lkdW50IHN1c2NpcGl0IHZ1bHB1dGF0ZS4gU3VzcGVuZGlzc2UgcG90ZW50aS4gTnVsbGFtIHRyaXN0aXF1ZSBqdXN0byBtaSwgYSB0cmlzdGlxdWUgbGlndWxhLiBEdWlzIGNvbnZhbGxpcyBhbGlxdWFtIGlhY3VsaXMuIE51bGxhIGRpY3R1bSBmcmluZ2lsbGEgY29uZ3VlLiBTdXNwZW5kaXNzZSBhYyBsZW8gbGVjdHVzLCBhYyBhbGlxdWFtIGp1c3RvLiBVdCBwb3J0dGl0b3IgY29tbW9kbyBtaSBzZWQgbHVjdHVzLiBOdWxsYSBhdCBlbmltIGxvcmVtLiBOdW5jIGV1IGp1c3RvIHNhcGllbiwgYSBibGFuZGl0IG9kaW8uIEN1cmFiaXR1ciBmYXVjaWJ1cyBzb2xsaWNpdHVkaW4gZG9sb3IsIGlkIGxhY2luaWEgc2VtIGF1Y3RvciBpbi4gRG9uZWMgdmFyaXVzIG51bmMgYXQgbGVjdHVzIHNhZ2l0dGlzIG5lYyBsdWN0dXMgYXJjdSBwaGFyZXRyYS4gTnVuYyBzZWQgbWV0dXMganVzdG8uIENyYXMgdmVsIG1hdXJpcyBkaWFtLiBVdCBmZXVnaWF0IGZlbGlzIGVnZXQgbmVxdWUgcGhhcmV0cmEgdmVzdGlidWx1bSBjb25zZWN0ZXR1ciBtYXNzYSBmYWNpbGlzaXMuIFF1aXNxdWUgY29uc2VjdGV0dXIgbHVjdHVzIG5pc2kgcXVpcyB0aW5jaWR1bnQuIFZpdmFtdXMgY3Vyc3VzIGN1cnN1cyBxdWFtIG5vbiBibGFuZGl0LiBQZWxsZW50ZXNxdWUgZXQgdmVsaXQgbGFjdXMuIFBlbGxlbnRlc3F1ZSBoYWJpdGFudCBtb3JiaSB0cmlzdGlxdWUgc2VuZWN0dXMgZXQgbmV0dXMgZXQgbWFsZXN1YWRhIGZhbWVzIGFjIHR1cnBpcyBlZ2VzdGFzLgoKSW4gZXQgZG9sb3Igdml0YWUgb3JjaSBhZGlwaXNjaW5nIGNvbmd1ZS4gQWxpcXVhbSBncmF2aWRhIG5pYmggYXQgbmlzbCBncmF2aWRhIG1vbGVzdGllLiBDdXJhYml0dXIgYSBiaWJlbmR1bSBzYXBpZW4uIEFsaXF1YW0gdGluY2lkdW50LCBudWxsYSBuZWMgcHJldGl1bSBsb2JvcnRpcywgb2RpbyBhdWd1ZSB0aW5jaWR1bnQgYXJjdSwgYSBsb2JvcnRpcyBvZGlvIHNlbSB1dCBwdXJ1cy4gRG9uZWMgYWNjdW1zYW4gbWF0dGlzIG51bmMgdml0YWUgbGFjaW5pYS4gU3VzcGVuZGlzc2UgcG90ZW50aS4gSW50ZWdlciBjb21tb2RvIG5pc2wgcXVpcyBuaWJoIGludGVyZHVtIG5vbiBmcmluZ2lsbGEgZHVpIHNvZGFsZXMuIENsYXNzIGFwdGVudCB0YWNpdGkgc29jaW9zcXUgYWQgbGl0b3JhIHRvcnF1ZW50IHBlciBjb251YmlhIG5vc3RyYSwgcGVyIGluY2VwdG9zIGhpbWVuYWVvcy4gSW4gaGFjIGhhYml0YXNzZSBwbGF0ZWEgZGljdHVtc3QuIEV0aWFtIHVsbGFtY29ycGVyLCBtaSBpZCBmZXVnaWF0IGJpYmVuZHVtLCBwdXJ1cyBuZXF1ZSBjdXJzdXMgbWF1cmlzLCBpZCBzb2RhbGVzIHF1YW0gbmlzaSBpZCB2ZWxpdC4gU2VkIGxlY3R1cyBsZW8sIHRpbmNpZHVudCB2ZWwgcmhvbmN1cyBpbXBlcmRpZXQsIGJsYW5kaXQgaW4gbGVvLiBJbnRlZ2VyIHF1aXMgbWFnbmEgbnVsbGEuIERvbmVjIHZlbCBuaXNsIG1hZ25hLCB1dCByaG9uY3VzIGR1aS4gQWxpcXVhbSBncmF2aWRhLCBudWxsYSBuZWMgZWxlaWZlbmQgbHVjdHVzLCBuZXF1ZSBuaWJoIHBoYXJldHJhIGFudGUsIHF1aXMgZWdlc3RhcyBlbGl0IG1ldHVzIGEgbWkuIE51bmMgbmVjIGF1Z3VlIHF1YW0uIE1vcmJpIHRpbmNpZHVudCB0cmlzdGlxdWUgdmFyaXVzLiBTdXNwZW5kaXNzZSBpYWN1bGlzIGVsaXQgZmV1Z2lhdCBtYWduYSBwZWxsZW50ZXNxdWUgdWx0cmljaWVzLiBWZXN0aWJ1bHVtIGFsaXF1YW0gdG9ydG9yIG5vbiBhbnRlIHVsbGFtY29ycGVyIGZyaW5naWxsYS4gRG9uZWMgaWFjdWxpcyBtaSBxdWlzIG1hdXJpcyBvcm5hcmUgdmVzdGlidWx1bS4KCkluIGEgbWFnbmEgbmlzaSwgYSB1bHRyaWNpZXMgbWFzc2EuIERvbmVjIGVsaXQgbmVxdWUsIHZpdmVycmEgbm9uIHRlbXBvciBxdWlzLCBmcmluZ2lsbGEgaW4gbWV0dXMuIEludGVnZXIgb2RpbyBvZGlvLCBldWlzbW9kIHZpdGFlIG1vbGxpcyBzZWQsIHNvZGFsZXMgZWdldCBsaWJlcm8uIERvbmVjIG5lYyBtYXNzYSBpbiBmZWxpcyBvcm5hcmUgcGhhcmV0cmEgYXQgbmVjIHRlbGx1cy4gTnVuYyBsb3JlbSBkb2xvciwgcHJldGl1bSB2ZWwgYXVjdG9yIGluLCB2b2x1dHBhdCB2aXRhZSBmZWxpcy4gTWFlY2VuYXMgcmhvbmN1cywgb3JjaSB2ZWwgYmxhbmRpdCBldWlzbW9kLCB0dXJwaXMgZXJhdCB0aW5jaWR1bnQgYW50ZSwgZWxlbWVudHVtIGFkaXBpc2NpbmcgbmlzbCB1cm5hIGluIG5pc2kuIFBoYXNlbGx1cyBzYWdpdHRpcywgZW5pbSBzZWQgYWNjdW1zYW4gY29uc2VxdWF0LCB1cm5hIGF1Z3VlIGxvYm9ydGlzIGVyYXQsIG5vbiBtYWxlc3VhZGEgcXVhbSBtZXR1cyBzb2xsaWNpdHVkaW4gYW50ZS4gSW4gbGVvIHB1cnVzLCBkaWduaXNzaW0gcXVpcyB2YXJpdXMgdmVsLCBwZWxsZW50ZXNxdWUgZXQgbmliaC4gSW4gc2VkIHRvcnRvciBpYWN1bGlzIGxpYmVybyBtb2xsaXMgcGVsbGVudGVzcXVlIGlkIHZpdGFlIGxlY3R1cy4gSW4gaGFjIGhhYml0YXNzZSBwbGF0ZWEgZGljdHVtc3QuIFBoYXNlbGx1cyBtYXVyaXMgZW5pbSwgcG9zdWVyZSBlZ2V0IGx1Y3R1cyBhYywgaWFjdWxpcyBldCBxdWFtLiBWaXZhbXVzIGV0IG5pYmggZGlhbSwgZWxlbWVudHVtIGVnZXN0YXMgdGVsbHVzLiBBZW5lYW4gdnVscHV0YXRlIG1hbGVzdWFkYSBlc3QuIFNlZCBwb3N1ZXJlIHBvcnRhIGRpYW0gYSBzb2RhbGVzLiBQcm9pbiBldSBzZW0gbm9uIHZlbGl0IGZhY2lsaXNpcyB2ZW5lbmF0aXMgc2VkIGEgdHVycGlzLgoKUGVsbGVudGVzcXVlIHNlZCByaXN1cyBhIGFudGUgdnVscHV0YXRlIGxvYm9ydGlzIHNpdCBhbWV0IGV1IG5pc2wuIFN1c3BlbmRpc3NlIHV0IGVyb3MgbWksIGEgcmhvbmN1cyBsYWN1cy4gQ3VyYWJpdHVyIGZlcm1lbnR1bSB2ZWhpY3VsYSB0ZWxsdXMsIGEgb3JuYXJlIG1pIGNvbmRpbWVudHVtIHZlbC4gSW50ZWdlciBtb2xlc3RpZSB2b2x1dHBhdCB2aXZlcnJhLiBJbnRlZ2VyIHBvc3VlcmUgZXVpc21vZCB2ZW5lbmF0aXMuIFByb2luIGFjIG1hdXJpcyBzZWQgbnVsbGEgcGhhcmV0cmEgcG9ydHRpdG9yLiBEdWlzIHZlbCBkdWkgaW4gcmlzdXMgc29kYWxlcyBhdWN0b3Igc2l0IGFtZXQgbm9uIGVuaW0uIE1hZWNlbmFzIG1vbGxpcyBsYWN1cyBhdCBsaWd1bGEgZmF1Y2lidXMgc29kYWxlcy4gQ3JhcyB2ZWwgbmVxdWUgYXJjdS4gU2VkIHRpbmNpZHVudCB0b3J0b3IgcHJldGl1bSBuaXNpIGludGVyZHVtIHF1aXMgZGljdHVtIGFyY3UgbGFvcmVldC4gTW9yYmkgcHJldGl1bSB1bHRyaWNlcyBmZXVnaWF0LiBNYWVjZW5hcyBjb252YWxsaXMgYXVndWUgbmVjIGZlbGlzIG1hbGVzdWFkYSBtYWxlc3VhZGEgc2NlbGVyaXNxdWUgbWF1cmlzIHBsYWNlcmF0LiBTZWQgYXQgbWFnbmEgZW5pbSwgYXQgZnJpbmdpbGxhIGRvbG9yLiBRdWlzcXVlIHV0IG1hdHRpcyBkdWkuIFByYWVzZW50IGNvbnNlY3RldHVyIGFudGUgdml2ZXJyYSBuaXNpIGJsYW5kaXQgcGhhcmV0cmEuIFF1aXNxdWUgbWV0dXMgZWxpdCwgZGlnbmlzc2ltIHZpdGFlIGZlcm1lbnR1bSBzaXQgYW1ldCwgZnJpbmdpbGxhIGltcGVyZGlldCBvZGlvLiBDcmFzIGVnZXQgcHVydXMgZWdldCB0ZWxsdXMgZmV1Z2lhdCBsdWN0dXMgYSBhYyBwdXJ1cy4gQ3JhcyB2aXRhZSBuaXNsIHZlbCBhdWd1ZSByaG9uY3VzIHBvcnR0aXRvciBzaXQgYW1ldCBxdWlzIGxvcmVtLiBEb25lYyBpbnRlcmR1bSBwZWxsZW50ZXNxdWUgYWRpcGlzY2luZy4gUGhhc2VsbHVzIG5lcXVlIGxpYmVybywgYWxpcXVhbSBpbiBtYXR0aXMgdml0YWUsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgbmliaC4KCkRvbmVjIG5lYyBudWxsYSB1cm5hLCBhYyBzYWdpdHRpcyBsZWN0dXMuIFN1c3BlbmRpc3NlIG5vbiBlbGl0IHNlZCBtaSBhdWN0b3IgZmFjaWxpc2lzIHZpdGFlIGV0IGxlY3R1cy4gRnVzY2UgYWMgdnVscHV0YXRlIG1hdXJpcy4gTW9yYmkgY29uZGltZW50dW0gdWx0cmljZXMgbWV0dXMsIGV0IGFjY3Vtc2FuIHB1cnVzIG1hbGVzdWFkYSBhdC4gTWFlY2VuYXMgbG9ib3J0aXMgYW50ZSBzZWQgbWFzc2EgZGljdHVtIHZpdGFlIHZlbmVuYXRpcyBlbGl0IGNvbW1vZG8uIFByb2luIHRlbGx1cyBlcm9zLCBhZGlwaXNjaW5nIHNlZCBkaWduaXNzaW0gdml0YWUsIHRlbXBvciBlZ2V0IGFudGUuIEFlbmVhbiBpZCB0ZWxsdXMgbmVjIG1hZ25hIGN1cnN1cyBwaGFyZXRyYSB2aXRhZSB2ZWwgZW5pbS4gTW9yYmkgdmVzdGlidWx1bSBwaGFyZXRyYSBlc3QgaW4gdnVscHV0YXRlLiBBbGlxdWFtIHZpdGFlIG1ldHVzIGFyY3UsIGlkIGFsaXF1ZXQgbnVsbGEuIFBoYXNlbGx1cyBsaWd1bGEgZXN0LCBoZW5kcmVyaXQgbmVjIGlhY3VsaXMgdXQsIHZvbHV0cGF0IHZlbCBlcm9zLiBTdXNwZW5kaXNzZSB2aXRhZSB1cm5hIHR1cnBpcywgcGxhY2VyYXQgYWRpcGlzY2luZyBkaWFtLiBQaGFzZWxsdXMgZmV1Z2lhdCB2ZXN0aWJ1bHVtIG5lcXVlIGV1IGRhcGlidXMuIE51bGxhIGZhY2lsaXNpLiBEdWlzIHRvcnRvciBmZWxpcywgZXVpc21vZCBzaXQgYW1ldCBhbGlxdWV0IGluLCB2b2x1dHBhdCBuZWMgdHVycGlzLiBNYXVyaXMgcmhvbmN1cyBpcHN1bSB1dCBwdXJ1cyBlbGVpZmVuZCB1dCBsb2JvcnRpcyBsZWN0dXMgZGFwaWJ1cy4gUXVpc3F1ZSBub24gZXJhdCBsb3JlbS4gVml2YW11cyBwb3N1ZXJlIGltcGVyZGlldCBpYWN1bGlzLiBVdCBsaWd1bGEgbGFjdXMsIGVsZWlmZW5kIGF0IHRlbXBvciBpZCwgYXVjdG9yIGV1IGxlby4KCkRvbmVjIG1pIGVuaW0sIGxhb3JlZXQgcHVsdmluYXIgbW9sbGlzIGV1LCBtYWxlc3VhZGEgdml2ZXJyYSBudW5jLiBJbiB2aXRhZSBtZXR1cyB2aXRhZSBuZXF1ZSB0ZW1wb3IgZGFwaWJ1cy4gTWFlY2VuYXMgdGluY2lkdW50IHB1cnVzIGEgZmVsaXMgYWxpcXVhbSBwbGFjZXJhdC4gTnVsbGEgZmFjaWxpc2kuIFN1c3BlbmRpc3NlIHBsYWNlcmF0IHBoYXJldHJhIG1hdHRpcy4gSW50ZWdlciB0ZW1wb3IgbWFsZXN1YWRhIGp1c3RvIGF0IHRlbXB1cy4gTWFlY2VuYXMgdmVoaWN1bGEgbG9yZW0gYSBzYXBpZW4gYmliZW5kdW0gdmVsIGlhY3VsaXMgcmlzdXMgZmV1Z2lhdC4gUGVsbGVudGVzcXVlIGRpYW0gZXJhdCwgZGFwaWJ1cyBldCBwZWxsZW50ZXNxdWUgcXVpcywgbW9sZXN0aWUgdXQgbWFzc2EuIFZpdmFtdXMgaWFjdWxpcyBpbnRlcmR1bSBtYXNzYSBpZCBiaWJlbmR1bS4gUXVpc3F1ZSB1dCBtYXVyaXMgZHVpLCBzaXQgYW1ldCB2YXJpdXMgZWxpdC4gVmVzdGlidWx1bSBlbGl0IGxvcmVtLCBydXRydW0gbm9uIGNvbnNlY3RldHVyIHV0LCBsYW9yZWV0IG5lYyBudW5jLiBEb25lYyBuZWMgbWF1cmlzIGFudGUuIEN1cmFiaXR1ciB1dCBlc3Qgc2VkIG9kaW8gcGhhcmV0cmEgbGFvcmVldC4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gQ3VyYWJpdHVyIHB1cnVzIHJpc3VzLCBsYW9yZWV0IHNlZCBwb3J0YSBpZCwgc2FnaXR0aXMgdmVsIGlwc3VtLiBNYWVjZW5hcyBuaWJoIGRpYW0sIGN1cnN1cyBldCB2YXJpdXMgc2l0IGFtZXQsIGZyaW5naWxsYSBzZWQgbWFnbmEuIE51bGxhbSBpZCBuZXF1ZSBldSBsZW8gZmF1Y2lidXMgbW9sbGlzLiBEdWlzIG5lYyBhZGlwaXNjaW5nIG1hdXJpcy4gU3VzcGVuZGlzc2Ugc29sbGljaXR1ZGluLCBlbmltIGV1IHB1bHZpbmFyIGNvbW1vZG8sIGVyYXQgYXVndWUgdWx0cmljZXMgbWksIGEgdHJpc3RpcXVlIG1hZ25hIHNlbSBub24gbGliZXJvLgoKU2VkIGluIG1ldHVzIG51bGxhLiBQcmFlc2VudCBuZWMgYWRpcGlzY2luZyBzYXBpZW4uIERvbmVjIGxhb3JlZXQsIHZlbGl0IG5vbiBydXRydW0gdmVzdGlidWx1bSwgbGlndWxhIG5lcXVlIGFkaXBpc2NpbmcgdHVycGlzLCBhdCBhdWN0b3Igc2FwaWVuIGVsaXQgdXQgbWFzc2EuIE51bGxhbSBhbGlxdWFtLCBlbmltIHZlbCBwb3N1ZXJlIHJ1dHJ1bSwganVzdG8gZXJhdCBsYW9yZWV0IGVzdCwgdmVsIGZyaW5naWxsYSBsYWN1cyBuaXNpIG5vbiBsZWN0dXMuIEV0aWFtIGxlY3R1cyBudW5jLCBsYW9yZWV0IGV0IHBsYWNlcmF0IGF0LCB2ZW5lbmF0aXMgcXVpcyBsaWJlcm8uIFByYWVzZW50IGluIHBsYWNlcmF0IGVsaXQuIENsYXNzIGFwdGVudCB0YWNpdGkgc29jaW9zcXUgYWQgbGl0b3JhIHRvcnF1ZW50IHBlciBjb251YmlhIG5vc3RyYSwgcGVyIGluY2VwdG9zIGhpbWVuYWVvcy4gUGVsbGVudGVzcXVlIGZyaW5naWxsYSBhdWd1ZSBldSBuaWJoIHBsYWNlcmF0IGRpY3R1bS4gTnVuYyBwb3J0dGl0b3IgdHJpc3RpcXVlIGRpYW0sIGV1IGFsaXF1YW0gZW5pbSBhbGlxdWV0IHZlbC4gQWxpcXVhbSBsYWNpbmlhIGludGVyZHVtIGlwc3VtLCBpbiBwb3N1ZXJlIG1ldHVzIGx1Y3R1cyB2ZWwuIFZpdmFtdXMgZXQgbmlzbCBhIGVyb3Mgc2VtcGVyIGVsZW1lbnR1bS4gRG9uZWMgdmVuZW5hdGlzIG9yY2kgYXQgZGlhbSB0cmlzdGlxdWUgc29sbGljaXR1ZGluLiBJbiBldSBlcm9zIHNlZCBvZGlvIHJ1dHJ1bSBsdWN0dXMgbm9uIG5lYyB0ZWxsdXMuCgpOdWxsYSBuZWMgZmVsaXMgZWxpdC4gTnVsbGFtIGluIGlwc3VtIGluIGlwc3VtIGNvbnNlcXVhdCBmcmluZ2lsbGEgcXVpcyB2ZWwgdG9ydG9yLiBQaGFzZWxsdXMgbm9uIG1hc3NhIG5pc2ksIHNpdCBhbWV0IGFsaXF1YW0gdXJuYS4gU2VkIGZlcm1lbnR1bSBuaWJoIHZpdGFlIGxhY3VzIHRpbmNpZHVudCBuZWMgdGluY2lkdW50IG1hc3NhIGJpYmVuZHVtLiBFdGlhbSBlbGl0IGR1aSwgZmFjaWxpc2lzIHNpdCBhbWV0IHZlaGljdWxhIG5lYywgaWFjdWxpcyBhdCBzYXBpZW4uIFV0IGF0IG1hc3NhIGlkIGR1aSB1bHRyaWNlcyB2b2x1dHBhdCB1dCBhYyBsaWJlcm8uIEZ1c2NlIGlwc3VtIG1pLCBiaWJlbmR1bSBhIGxhY2luaWEgZXQsIHB1bHZpbmFyIGVnZXQgbWF1cmlzLiBQcm9pbiBmYXVjaWJ1cyB1cm5hIHV0IGxvcmVtIGVsZW1lbnR1bSB2dWxwdXRhdGUuIER1aXMgcXVhbSBsZW8sIG1hbGVzdWFkYSBub24gZXVpc21vZCB1dCwgYmxhbmRpdCBmYWNpbGlzaXMgbWF1cmlzLiBTdXNwZW5kaXNzZSBzaXQgYW1ldCBtYWduYSBpZCB2ZWxpdCB0aW5jaWR1bnQgYWxpcXVldCBuZWMgZXUgZG9sb3IuIEN1cmFiaXR1ciBiaWJlbmR1bSBsb3JlbSB2ZWwgZmVsaXMgdGVtcHVzIGRhcGlidXMuIEFsaXF1YW0gZXJhdCB2b2x1dHBhdC4gQWVuZWFuIGN1cnN1cyB0b3J0b3IgbmVjIGR1aSBhbGlxdWV0IHBvcnRhLiBBZW5lYW4gY29tbW9kbyBpYWN1bGlzIHN1c2NpcGl0LiBWZXN0aWJ1bHVtIGFudGUgaXBzdW0gcHJpbWlzIGluIGZhdWNpYnVzIG9yY2kgbHVjdHVzIGV0IHVsdHJpY2VzIHBvc3VlcmUgY3ViaWxpYSBDdXJhZTsgUXVpc3F1ZSBzaXQgYW1ldCBvcm5hcmUgZWxpdC4gTmFtIGxpZ3VsYSByaXN1cywgdmVzdGlidWx1bSBuZWMgbWF0dGlzIGluLCBjb25kaW1lbnR1bSBhYyBhbnRlLiBEb25lYyBmcmluZ2lsbGEsIGp1c3RvIGV0IHVsdHJpY2VzIGZhdWNpYnVzLCB0ZWxsdXMgZXN0IHZvbHV0cGF0IG1hc3NhLCB2aXRhZSBjb21tb2RvIHNhcGllbiBkaWFtIG5vbiByaXN1cy4gVml2YW11cyBhdCBhcmN1IGdyYXZpZGEgcHVydXMgbW9sbGlzIGZldWdpYXQuCgpOdWxsYSBhIHR1cnBpcyBxdWlzIHNhcGllbiBjb21tb2RvIGRpZ25pc3NpbSBldSBxdWlzIGp1c3RvLiBNYWVjZW5hcyBldSBsb3JlbSBvZGlvLCB1dCBoZW5kcmVyaXQgdmVsaXQuIEN1bSBzb2NpaXMgbmF0b3F1ZSBwZW5hdGlidXMgZXQgbWFnbmlzIGRpcyBwYXJ0dXJpZW50IG1vbnRlcywgbmFzY2V0dXIgcmlkaWN1bHVzIG11cy4gUHJvaW4gZmFjaWxpc2lzIHBvcnR0aXRvciB1bGxhbWNvcnBlci4gUHJhZXNlbnQgbW9sbGlzIGRpZ25pc3NpbSBtYXNzYSwgbGFvcmVldCBhbGlxdWV0IHZlbGl0IHBlbGxlbnRlc3F1ZSBub24uIE51bmMgZmFjaWxpc2lzIGNvbnZhbGxpcyB0cmlzdGlxdWUuIE1hdXJpcyBwb3J0dGl0b3IgYW50ZSBhdCB0ZWxsdXMgY29udmFsbGlzIHBsYWNlcmF0LiBNb3JiaSBhbGlxdWV0IG5pc2kgYWMgbmlzbCBwdWx2aW5hciBpZCBkaWN0dW0gbmlzbCBtb2xsaXMuIFNlZCBvcm5hcmUgc2VtIGV0IHJpc3VzIHBsYWNlcmF0IGxvYm9ydGlzIGlkIGVnZXQgZWxpdC4gSW50ZWdlciBjb25zZXF1YXQsIG1hZ25hIGlkIHN1c2NpcGl0IHBoYXJldHJhLCBudWxsYSB2ZWxpdCBzdXNjaXBpdCBvcmNpLCB1dCBpbnRlcmR1bSBhdWd1ZSBhdWd1ZSBxdWlzIHF1YW0uIEZ1c2NlIHByZXRpdW0gYWxpcXVldCB2dWxwdXRhdGUuIE1hdXJpcyBibGFuZGl0IGRpY3R1bSBtb2xlc3RpZS4gUHJvaW4gbnVsbGEgbmliaCwgYmliZW5kdW0gZXUgcGxhY2VyYXQgYXQsIHRpbmNpZHVudCBhYyBuaXNsLiBOdWxsYW0gdnVscHV0YXRlIG1ldHVzIHV0IGxpYmVybyBydXRydW0gdWx0cmljaWVzLiBOdW5jIHNpdCBhbWV0IGR1aSBtYXVyaXMuIFN1c3BlbmRpc3NlIGFkaXBpc2NpbmcgbGFjdXMgaW4gYXVndWUgZWxlaWZlbmQgbW9sbGlzLgoKRHVpcyBwcmV0aXVtIHVsdHJpY2VzIG1hdHRpcy4gTmFtIGV1aXNtb2QgcmlzdXMgYSBlcmF0IGxhY2luaWEgYmliZW5kdW0uIE1vcmJpIG1hc3NhIHRvcnRvciwgY29uc2VjdGV0dXIgaWQgZWxlaWZlbmQgaWQsIHBlbGxlbnRlc3F1ZSB2ZWwgdG9ydG9yLiBQcmFlc2VudCB1cm5hIGxvcmVtLCBwb3J0dGl0b3IgYXQgY29uZGltZW50dW0gdml0YWUsIGx1Y3R1cyBlZ2V0IGVsaXQuIE1hZWNlbmFzIGZyaW5naWxsYSBxdWFtIGNvbnZhbGxpcyBlc3QgaGVuZHJlcml0IHZpdmVycmEuIEV0aWFtIHZlaGljdWxhLCBzYXBpZW4gbm9uIHB1bHZpbmFyIGFkaXBpc2NpbmcsIG5pc2kgbWFzc2EgdmVzdGlidWx1bSBlc3QsIGlkIGludGVyZHVtIG1hdXJpcyB2ZWxpdCBldSBlc3QuIFZlc3RpYnVsdW0gZXN0IGFyY3UsIGZhY2lsaXNpcyBhdCB1bHRyaWNpZXMgbm9uLCB2dWxwdXRhdGUgaWQgc2FwaWVuLiBWZXN0aWJ1bHVtIGlwc3VtIG1ldHVzLCBwaGFyZXRyYSBuZWMgcGVsbGVudGVzcXVlIGlkLCBmYWNpbGlzaXMgaWQgc2FwaWVuLiBEb25lYyBydXRydW0gb2RpbyBldCBsYWN1cyB1bHRyaWNpZXMgdWxsYW1jb3JwZXIuIEludGVnZXIgc2VkIGVzdCB1dCBtaSBwb3N1ZXJlIHRpbmNpZHVudCBxdWlzIG5vbiBsZW8uIE1vcmJpIHRlbGx1cyBqdXN0bywgdWx0cmljaWVzIHNpdCBhbWV0IHVsdHJpY2VzIHF1aXMsIGZhY2lsaXNpcyB2aXRhZSBtYWduYS4gRG9uZWMgbGlndWxhIG1ldHVzLCBwZWxsZW50ZXNxdWUgbm9uIHRyaXN0aXF1ZSBhYywgdmVzdGlidWx1bSBzZWQgZXJhdC4gQWxpcXVhbSBlcmF0IHZvbHV0cGF0LgoKTmFtIGRpZ25pc3NpbSwgbmlzbCBlZ2V0IGNvbnNlcXVhdCBldWlzbW9kLCBzZW0gbGVjdHVzIGF1Y3RvciBvcmNpLCB1dCBwb3J0dGl0b3IgbGFjdXMgZHVpIGFjIG5lcXVlLiBJbiBoYWMgaGFiaXRhc3NlIHBsYXRlYSBkaWN0dW1zdC4gRnVzY2UgZWdlc3RhcyBwb3J0YSBmYWNpbGlzaXMuIEluIGhhYyBoYWJpdGFzc2UgcGxhdGVhIGRpY3R1bXN0LiBNYXVyaXMgY3Vyc3VzIHJob25jdXMgcmlzdXMgYWMgZXVpc21vZC4gUXVpc3F1ZSB2aXRhZSByaXN1cyBhIHRlbGx1cyB2ZW5lbmF0aXMgY29udmFsbGlzLiBDdXJhYml0dXIgbGFvcmVldCBzYXBpZW4gZXUgcXVhbSBsdWN0dXMgbG9ib3J0aXMuIFZpdmFtdXMgc29sbGljaXR1ZGluIHNvZGFsZXMgZG9sb3Igdml0YWUgc29kYWxlcy4gU3VzcGVuZGlzc2UgcGhhcmV0cmEgbGFvcmVldCBhbGlxdWV0LiBNYWVjZW5hcyB1bGxhbWNvcnBlciBvcmNpIHZlbCB0b3J0b3IgbHVjdHVzIGlhY3VsaXMgdXQgdml0YWUgbWV0dXMuIFZlc3RpYnVsdW0gdXQgYXJjdSBhYyB0ZWxsdXMgbWF0dGlzIGVsZWlmZW5kIGVnZXQgdmVoaWN1bGEgZWxpdC4KCkluIHNlZCBmZXVnaWF0IGVyb3MuIERvbmVjIGJpYmVuZHVtIHVsbGFtY29ycGVyIGRpYW0sIGV1IGZhdWNpYnVzIG1hdXJpcyBkaWN0dW0gc2VkLiBEdWlzIHRpbmNpZHVudCBqdXN0byBpbiBuZXF1ZSBhY2N1bXNhbiBkaWN0dW0uIE1hZWNlbmFzIGluIHJ1dHJ1bSBzYXBpZW4uIFV0IGlkIGZldWdpYXQgbGFjdXMuIE51bGxhIGZhY2lsaXNpLiBOdW5jIGFjIGxvcmVtIGlkIHF1YW0gdmFyaXVzIGN1cnN1cyBhIGV0IGVsaXQuIEFlbmVhbiBwb3N1ZXJlIGxpYmVybyBldSB0b3J0b3IgdmVoaWN1bGEgdXQgdWxsYW1jb3JwZXIgb2RpbyBjb25zZXF1YXQuIFNlZCBpbiBkaWduaXNzaW0gZHVpLiBDdXJhYml0dXIgaWFjdWxpcyB0ZW1wb3IgcXVhbSBuZWMgcGxhY2VyYXQuIEFsaXF1YW0gdmVuZW5hdGlzIG5pYmggZXQganVzdG8gaWFjdWxpcyBsYWNpbmlhLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gUGVsbGVudGVzcXVlIHRlbXB1cyBtYWduYSBzZWQgbWkgYWxpcXVldCBlZ2V0IHZhcml1cyBvZGlvIGNvbmd1ZS4KCkludGVnZXIgc2VtIHNlbSwgc2VtcGVyIGluIHZlc3RpYnVsdW0gdml0YWUsIGxvYm9ydGlzIHF1aXMgZXJhdC4gRHVpcyBhbnRlIGxlY3R1cywgZmVybWVudHVtIHNlZCB0ZW1wb3Igc2l0IGFtZXQsIHBsYWNlcmF0IHNpdCBhbWV0IHNlbS4gTWF1cmlzIGNvbmd1ZSB0aW5jaWR1bnQgaXBzdW0uIFV0IHZpdmVycmEsIGxhY3VzIHZlbCB2YXJpdXMgcGhhcmV0cmEsIHB1cnVzIGVuaW0gcHVsdmluYXIgaXBzdW0sIG5vbiBwZWxsZW50ZXNxdWUgZW5pbSBqdXN0byBub24gZXJhdC4gRnVzY2UgaXBzdW0gb3JjaSwgdWx0cmljZXMgc2VkIHBlbGxlbnRlc3F1ZSBhdCwgaGVuZHJlcml0IGxhb3JlZXQgZW5pbS4gTnVuYyBibGFuZGl0IG1vbGxpcyBwcmV0aXVtLiBVdCBtb2xsaXMsIG51bGxhIGFsaXF1YW0gc29kYWxlcyB2ZXN0aWJ1bHVtLCBsaWJlcm8gbG9yZW0gdGVtcHVzIHRvcnRvciwgYSBwZWxsZW50ZXNxdWUgbmliaCBlbGl0IGEgaXBzdW0uIFBoYXNlbGx1cyBmZXJtZW50dW0gbGlndWxhIGF0IG5lcXVlIGFkaXBpc2Npbmcgc29sbGljaXR1ZGluLiBTdXNwZW5kaXNzZSBpZCBpcHN1bSBhcmN1LiBTZWQgdGluY2lkdW50IHBsYWNlcmF0IHZpdmVycmEuIERvbmVjIGxpYmVybyBhdWd1ZSwgcG9ydHRpdG9yIHNpdCBhbWV0IHZhcml1cyBlZ2V0LCBydXRydW0gbmVjIGxhY3VzLiBQcm9pbiBibGFuZGl0IG9yY2kgc2l0IGFtZXQgZGlhbSBkaWN0dW0gaWQgcG9ydHRpdG9yIHJpc3VzIGlhY3VsaXMuIEludGVnZXIgbGFjaW5pYSBmZXVnaWF0IGxlbywgdml0YWUgYXVjdG9yIHR1cnBpcyBlbGVpZmVuZCB2ZWwuIFN1c3BlbmRpc3NlIGxvcmVtIHF1YW0sIHByZXRpdW0gaWQgYmliZW5kdW0gc2VkLCB2aXZlcnJhIHZpdGFlIHRvcnRvci4gTnVsbGFtIHVsdHJpY2llcyBsaWJlcm8gZXUgcmlzdXMgY29udmFsbGlzIGVnZXQgdWxsYW1jb3JwZXIgbmlzaSBlbGVtZW50dW0uIE1hdXJpcyBudWxsYSBlbGl0LCBiaWJlbmR1bSBpZCB2dWxwdXRhdGUgdml0YWUsIGltcGVyZGlldCBydXRydW0gbG9yZW0uIEN1cmFiaXR1ciBlZ2V0IGRpZ25pc3NpbSBvcmNpLiBTZWQgc2VtcGVyIHRlbGx1cyBpcHN1bSwgYXQgYmxhbmRpdCBkdWkuIEludGVnZXIgZGFwaWJ1cyBmYWNpbGlzaXMgc29kYWxlcy4gVml2YW11cyBzb2xsaWNpdHVkaW4gdmFyaXVzIGVzdCwgcXVpcyBvcm5hcmUganVzdG8gY3Vyc3VzIGlkLgoKTnVuYyB2ZWwgdWxsYW1jb3JwZXIgbWkuIFN1c3BlbmRpc3NlIHBvdGVudGkuIE51bmMgZXQgdXJuYSBhIGF1Z3VlIHNjZWxlcmlzcXVlIHVsdHJpY2VzIG5vbiBxdWlzIG1pLiBJbiBxdWlzIHBvcnR0aXRvciBlbGl0LiBBZW5lYW4gcXVpcyBlcmF0IG51bGxhLCBhIHZlbmVuYXRpcyB0ZWxsdXMuIEZ1c2NlIHZlc3RpYnVsdW0gbmlzaSBzZWQgbGVvIGFkaXBpc2NpbmcgZGlnbmlzc2ltLiBOdW5jIGludGVyZHVtLCBsb3JlbSBldCBsYWNpbmlhIHZlc3RpYnVsdW0sIHF1YW0gZXN0IG1hdHRpcyBtYWduYSwgc2l0IGFtZXQgdm9sdXRwYXQgZWxpdCBhdWd1ZSBhdCBsaWJlcm8uIENyYXMgZ3JhdmlkYSBkdWkgcXVpcyB2ZWxpdCBsb2JvcnRpcyBjb25kaW1lbnR1bSBldCBlbGVpZmVuZCBsaWd1bGEuIFBoYXNlbGx1cyBhYyBtZXR1cyBxdWFtLCBpZCB2ZW5lbmF0aXMgbWkuIEFsaXF1YW0gdXQgdHVycGlzIGFjIHRlbGx1cyBkYXBpYnVzIGRhcGlidXMgZXUgaW4gbWkuIFF1aXNxdWUgZWdldCBuaWJoIGVyb3MuIEZ1c2NlIGNvbnNlY3RldHVyIGxlbyB2ZWxpdC4KClZlc3RpYnVsdW0gc2VtcGVyIGVnZXN0YXMgbWF1cmlzLiBNb3JiaSB2ZXN0aWJ1bHVtIHNlbSBzZW0uIEFsaXF1YW0gdmVuZW5hdGlzLCBmZWxpcyBzZWQgZWxlaWZlbmQgcG9ydGEsIG1hdXJpcyBkaWFtIHNlbXBlciBhcmN1LCBzaXQgYW1ldCB1bHRyaWNpZXMgZXN0IHNhcGllbiBzaXQgYW1ldCBsaWJlcm8uIFZlc3RpYnVsdW0gZHVpIG9yY2ksIG9ybmFyZSBjb25kaW1lbnR1bSBtb2xsaXMgbmVjLCBtb2xlc3RpZSBhYyBlcm9zLiBQcm9pbiB2aXRhZSBtb2xsaXMgdmVsaXQuIFByYWVzZW50IGVnZXQgZmVsaXMgbWkuIE1hZWNlbmFzIGV1IHZ1bHB1dGF0ZSBuaXNpLiBWZXN0aWJ1bHVtIHZhcml1cywgYXJjdSBpbiB1bHRyaWNpZXMgdmVzdGlidWx1bSwgbmliaCBsZW8gc2FnaXR0aXMgb2RpbywgdXQgYmliZW5kdW0gbmlzbCBtaSBuZWMgZGlhbS4gSW50ZWdlciBhdCBlbmltIGZldWdpYXQgbnVsbGEgc2VtcGVyIGJpYmVuZHVtIHV0IGEgdmVsaXQuIFByb2luIGF0IG5pc2kgdXQgbG9yZW0gYWxpcXVhbSB2YXJpdXMgZWdldCBxdWlzIGVsaXQuIE51bGxhbSBuZWMgb2RpbyB2ZWwgbGVjdHVzIGNvbmd1ZSBjb25zZXF1YXQgYWRpcGlzY2luZyBhYyBtaS4gRnVzY2Ugdml0YWUgbGFvcmVldCBsaWJlcm8uIEN1cmFiaXR1ciBzaXQgYW1ldCBzZW0gbmVxdWUsIG5lYyBwb3N1ZXJlIGVuaW0uIEN1cmFiaXR1ciBhdCBtYXNzYSBhIHNlbSBncmF2aWRhIGlhY3VsaXMgbmVjIGV0IG5pYmguIFNlZCB2aXRhZSBkdWkgdml0YWUgbGVvIHRpbmNpZHVudCBwcmV0aXVtIGEgYWxpcXVhbSBlcmF0LiBTdXNwZW5kaXNzZSB1bHRyaWNpZXMgb2RpbyBhdCBtZXR1cyB0ZW1wb3IgaW4gcGVsbGVudGVzcXVlIGFyY3UgdWx0cmljaWVzLgoKU2VkIGFsaXF1YW0gbWF0dGlzIHF1YW0sIGluIHZ1bHB1dGF0ZSBzYXBpZW4gdWx0cmljZXMgaW4uIFBlbGxlbnRlc3F1ZSBxdWlzIHZlbGl0IHNlZCBkdWkgaGVuZHJlcml0IGN1cnN1cy4gUGVsbGVudGVzcXVlIG5vbiBudW5jIGxhY3VzLCBhIHNlbXBlciBtZXR1cy4gRnVzY2UgZXVpc21vZCB2ZWxpdCBxdWlzIGRpYW0gc3VzY2lwaXQgY29uc2VxdWF0LiBQcmFlc2VudCBjb21tb2RvIGFjY3Vtc2FuIG5lcXVlLiBQcm9pbiB2aXZlcnJhLCBpcHN1bSBub24gdHJpc3RpcXVlIHVsdHJpY2VzLCB2ZWxpdCB2ZWxpdCBmYWNpbGlzaXMgbG9yZW0sIHZlbCBydXRydW0gbmVxdWUgZXJvcyBhYyBuaXNpLiBTdXNwZW5kaXNzZSBmZWxpcyBtYXNzYSwgZmF1Y2lidXMgaW4gdm9sdXRwYXQgYWMsIGRhcGlidXMgZXQgb2Rpby4gUGVsbGVudGVzcXVlIGlkIHRlbGx1cyBzaXQgYW1ldCByaXN1cyB1bHRyaWNpZXMgdWxsYW1jb3JwZXIgbm9uIG5lYyBzYXBpZW4uIE5hbSBwbGFjZXJhdCB2aXZlcnJhIHVsbGFtY29ycGVyLiBOYW0gcGxhY2VyYXQgcG9ydHRpdG9yIHNhcGllbiBuZWMgcHVsdmluYXIuIEN1cmFiaXR1ciB2ZWwgb2RpbyBzaXQgYW1ldCBvZGlvIGFjY3Vtc2FuIGFsaXF1ZXQgdml0YWUgYSBsZWN0dXMuIFBlbGxlbnRlc3F1ZSBsb2JvcnRpcyB2aXZlcnJhIGNvbnNlcXVhdC4gTWF1cmlzIGVsZW1lbnR1bSBjdXJzdXMgbnVsbGEsIHNpdCBhbWV0IGhlbmRyZXJpdCBqdXN0byBkaWN0dW0gc2VkLiBNYWVjZW5hcyBkaWFtIG9kaW8sIGZyaW5naWxsYSBhYyBjb25ndWUgcXVpcywgYWRpcGlzY2luZyB1dCBlbGl0LgoKQWxpcXVhbSBsb3JlbSBlcm9zLCBwaGFyZXRyYSBuZWMgZWdlc3RhcyB2aXRhZSwgbWF0dGlzIG5lYyByaXN1cy4gTWF1cmlzIGFyY3UgbWFzc2EsIHNvZGFsZXMgZWdldCBncmF2aWRhIHNlZCwgdml2ZXJyYSB2aXRhZSB0dXJwaXMuIFV0IGxpZ3VsYSB1cm5hLCBldWlzbW9kIGFjIHRpbmNpZHVudCBldSwgZmF1Y2lidXMgc2VkIGZlbGlzLiBQcmFlc2VudCBtb2xsaXMsIGlwc3VtIHF1aXMgcmhvbmN1cyBkaWduaXNzaW0sIG9kaW8gc2VtIHZlbmVuYXRpcyBudWxsYSwgYXQgY29uc2VxdWF0IGZlbGlzIGF1Z3VlIHZlbCBlcmF0LiBOYW0gZmVybWVudHVtIGZldWdpYXQgdm9sdXRwYXQuIENsYXNzIGFwdGVudCB0YWNpdGkgc29jaW9zcXUgYWQgbGl0b3JhIHRvcnF1ZW50IHBlciBjb251YmlhIG5vc3RyYSwgcGVyIGluY2VwdG9zIGhpbWVuYWVvcy4gRXRpYW0gdml0YWUgZHVpIGluIG5pc2kgYWRpcGlzY2luZyB1bHRyaWNpZXMgbm9uIGV1IGp1c3RvLiBEb25lYyB0cmlzdGlxdWUgdWx0cmljaWVzIGFkaXBpc2NpbmcuIE51bGxhIHNvZGFsZXMsIG51bmMgYSB0cmlzdGlxdWUgZWxlbWVudHVtLCBlcmF0IG5lcXVlIGVnZXN0YXMgbmlzbCwgYXQgaGVuZHJlcml0IG9yY2kgc2FwaWVuIHNlZCBsaWJlcm8uIFZpdmFtdXMgYSBtYXVyaXMgdHVycGlzLCBxdWlzIGxhb3JlZXQgaXBzdW0uIE51bmMgbmVjIG1pIGV0IG5pc2wgcGVsbGVudGVzcXVlIHNjZWxlcmlzcXVlLiBWaXZhbXVzIHZvbHV0cGF0LCBqdXN0byB0cmlzdGlxdWUgbGFjaW5pYSBjb25kaW1lbnR1bSwgZXJhdCBqdXN0byB1bHRyaWNlcyB1cm5hLCBlbGVtZW50dW0gdml2ZXJyYSBlcm9zIGF1Z3VlIG5vbiBsaWJlcm8uIFNlZCBtb2xsaXMgbW9sbGlzIGFyY3UsIGF0IGZlcm1lbnR1bSBkaWFtIHN1c2NpcGl0IHF1aXMuCgpFdGlhbSBzaXQgYW1ldCBuaWJoIGp1c3RvLCBwb3N1ZXJlIHZvbHV0cGF0IG51bmMuIE1vcmJpIHBlbGxlbnRlc3F1ZSBuZXF1ZSBpbiBvcmNpIHZvbHV0cGF0IGV1IHNjZWxlcmlzcXVlIGxvcmVtIGRpY3R1bS4gTWF1cmlzIG1vbGxpcyBpYWN1bGlzIGVzdCwgbmVjIHNhZ2l0dGlzIHNhcGllbiBjb25zZXF1YXQgaWQuIE51bmMgbmVjIG1hbGVzdWFkYSBvZGlvLiBEdWlzIHF1aXMgc3VzY2lwaXQgb2Rpby4gTWF1cmlzIHB1cnVzIGR1aSwgc29kYWxlcyBpZCBtYXR0aXMgc2l0IGFtZXQsIHBvc3VlcmUgaW4gYXJjdS4gUGhhc2VsbHVzIHBvcnRhIGVsZW1lbnR1bSBjb252YWxsaXMuIE1hZWNlbmFzIGF0IG9yY2kgZXQgbWkgdnVscHV0YXRlIHNvbGxpY2l0dWRpbiBpbiBpbiB0dXJwaXMuIFBlbGxlbnRlc3F1ZSBjdXJzdXMgYWRpcGlzY2luZyBuZXF1ZSBzaXQgYW1ldCBjb21tb2RvLiBGdXNjZSB1dCBtaSBldSBsZWN0dXMgcG9ydHRpdG9yIHZvbHV0cGF0IGV0IG5lYyBmZWxpcy4KCkN1cmFiaXR1ciBzY2VsZXJpc3F1ZSBlcm9zIHF1aXMgbmlzbCB2aXZlcnJhIHZlbCB1bHRyaWNlcyB2ZWxpdCB2ZXN0aWJ1bHVtLiBTZWQgbG9ib3J0aXMgcHVsdmluYXIgc2FwaWVuIGFjIHZlbmVuYXRpcy4gU2VkIGFudGUgbmliaCwgcmhvbmN1cyBlZ2V0IGRpY3R1bSBpbiwgbW9sbGlzIHV0IG5pc2kuIFBoYXNlbGx1cyBmYWNpbGlzaXMgbWkgbm9uIGxvcmVtIHRyaXN0aXF1ZSBub24gZWxlaWZlbmQgc2VtIGZyaW5naWxsYS4gSW50ZWdlciB1dCBhdWd1ZSBlc3QuIEluIHZlbmVuYXRpcyB0aW5jaWR1bnQgc2NlbGVyaXNxdWUuIEV0aWFtIGFudGUgZHVpLCBwb3N1ZXJlIHF1aXMgbWFsZXN1YWRhIHZpdGFlLCBtYWxlc3VhZGEgYSBhcmN1LiBBZW5lYW4gZmF1Y2lidXMgdmVuZW5hdGlzIHNhcGllbiwgdXQgZmFjaWxpc2lzIG5pc2kgYmxhbmRpdCB2ZWwuIEFlbmVhbiBhYyBsb3JlbSBldSBzZW0gZmVybWVudHVtIHBsYWNlcmF0LiBQcm9pbiBuZXF1ZSBwdXJ1cywgYWxpcXVldCB1dCB0aW5jaWR1bnQgdXQsIGNvbnZhbGxpcyBzaXQgYW1ldCBlcm9zLiBQaGFzZWxsdXMgdmVoaWN1bGEgdWxsYW1jb3JwZXIgZW5pbSBub24gdmVoaWN1bGEuIEV0aWFtIHBvcnRhIG9kaW8gdXQgaXBzdW0gYWRpcGlzY2luZyBlZ2VzdGFzIGlkIGEgb2Rpby4gUGVsbGVudGVzcXVlIGJsYW5kaXQsIHNhcGllbiB1dCBwdWx2aW5hciBpbnRlcmR1bSwgbWkgbnVsbGEgaGVuZHJlcml0IGVsaXQsIGluIHRlbXBvciBkaWFtIGVuaW0gYSB1cm5hLiBJbiB0ZWxsdXMgb2Rpbywgb3JuYXJlIHNlZCBjb25kaW1lbnR1bSBhLCBtYXR0aXMgZXUgYXVndWUuCgpGdXNjZSBoZW5kcmVyaXQgcG9ydHRpdG9yIGV1aXNtb2QuIERvbmVjIG1hbGVzdWFkYSBlZ2VzdGFzIHR1cnBpcywgZXQgdWx0cmljaWVzIGZlbGlzIGVsZW1lbnR1bSB2aXRhZS4gTnVsbGFtIGluIHNlbSBuaWJoLiBOdWxsYW0gdWx0cmljaWVzIGhlbmRyZXJpdCBqdXN0byBzaXQgYW1ldCBsb2JvcnRpcy4gU2VkIHRpbmNpZHVudCwgbWF1cmlzIGF0IG9ybmFyZSBsYW9yZWV0LCBzYXBpZW4gcHVydXMgZWxlbWVudHVtIGVsaXQsIG5lYyBwb3J0dGl0b3IgbmlzbCBwdXJ1cyBldCBlcmF0LiBEb25lYyBmZWxpcyBuaXNpLCBydXRydW0gdWxsYW1jb3JwZXIgZ3JhdmlkYSBhYywgdGluY2lkdW50IHNpdCBhbWV0IHVybmEuIFByb2luIHZlbCBqdXN0byB2aXRhZSBlcm9zIHNhZ2l0dGlzIGJpYmVuZHVtIGEgdXQgbmliaC4gUGhhc2VsbHVzIHNvZGFsZXMgbGFvcmVldCB0aW5jaWR1bnQuIE1hZWNlbmFzIG9kaW8gbWFzc2EsIGNvbmRpbWVudHVtIGlkIGFsaXF1ZXQgdXQsIHJob25jdXMgdmVsIGxlY3R1cy4gRHVpcyBwaGFyZXRyYSBjb25zZWN0ZXR1ciBzYXBpZW4uIFBoYXNlbGx1cyBwb3N1ZXJlIHVsdHJpY2llcyBtYXNzYSwgbm9uIHJob25jdXMgcmlzdXMgYWxpcXVhbSB0ZW1wdXMuCgpQcmFlc2VudCB2ZW5lbmF0aXMgbWFnbmEgaWQgc2VtIGRpY3R1bSBldSB2ZWhpY3VsYSBpcHN1bSB2dWxwdXRhdGUuIFNlZCBhIGNvbnZhbGxpcyBzYXBpZW4uIFNlZCBqdXN0byBkb2xvciwgcmhvbmN1cyB2ZWwgcnV0cnVtIG1hdHRpcywgc29sbGljaXR1ZGluIHV0IHJpc3VzLiBOdWxsYW0gc2l0IGFtZXQgY29udmFsbGlzIGVzdC4gRXRpYW0gbm9uIHRpbmNpZHVudCBsaWd1bGEuIEZ1c2NlIHN1c2NpcGl0IHByZXRpdW0gZWxpdCBhdCB1bGxhbWNvcnBlci4gUXVpc3F1ZSBzb2xsaWNpdHVkaW4sIGRpYW0gaWQgaW50ZXJkdW0gcG9ydGEsIG1ldHVzIGlwc3VtIHZvbHV0cGF0IGxpYmVybywgaWQgdmVuZW5hdGlzIGZlbGlzIG9yY2kgbm9uIHZlbGl0LiBTdXNwZW5kaXNzZSBwb3RlbnRpLiBNYXVyaXMgcnV0cnVtLCB0b3J0b3Igc2l0IGFtZXQgcGVsbGVudGVzcXVlIHRpbmNpZHVudCwgZXJhdCBxdWFtIHVsdHJpY2llcyBvZGlvLCBpZCBhbGlxdWFtIGVsaXQgbGVvIG5lYyBsZW8uIFBlbGxlbnRlc3F1ZSBqdXN0byBlcm9zLCBydXRydW0gYXQgZmV1Z2lhdCBuZWMsIHBvcnRhIGV0IHRlbGx1cy4gQWVuZWFuIGVnZXQgbWV0dXMgbGVjdHVzLgoKUHJhZXNlbnQgZXVpc21vZCwgdHVycGlzIHF1aXMgbGFvcmVldCBjb25zZXF1YXQsIG5lcXVlIGFudGUgaW1wZXJkaWV0IHF1YW0sIGFjIHNlbXBlciB0b3J0b3IgbmliaCBpbiBudWxsYS4gSW50ZWdlciBzY2VsZXJpc3F1ZSBlcm9zIHZlaGljdWxhIHVybmEgbGFjaW5pYSBhYyBmYWNpbGlzaXMgbWF1cmlzIGFjY3Vtc2FuLiBQaGFzZWxsdXMgYXQgbWF1cmlzIG5pYmguIEN1cmFiaXR1ciBlbmltIGFudGUsIHJ1dHJ1bSBzZWQgYWRpcGlzY2luZyBoZW5kcmVyaXQsIHBlbGxlbnRlc3F1ZSBub24gYXVndWUuIEluIGhhYyBoYWJpdGFzc2UgcGxhdGVhIGRpY3R1bXN0LiBOYW0gdGVtcHVzIGV1aXNtb2QgbWFzc2EgYSBkaWN0dW0uIERvbmVjIHNpdCBhbWV0IGp1c3RvIGFjIGRpYW0gdWx0cmljaWVzIHVsdHJpY2llcy4gU2VkIHRpbmNpZHVudCBlcmF0IHF1aXMgcXVhbSB0ZW1wdXMgdmVsIGludGVyZHVtIGVyYXQgcmhvbmN1cy4gSW4gaGFjIGhhYml0YXNzZSBwbGF0ZWEgZGljdHVtc3QuIFZlc3RpYnVsdW0gdmVoaWN1bGEgdmFyaXVzIHNlbSBlZ2V0IGludGVyZHVtLiBDcmFzIGJpYmVuZHVtIGxlbyBuZWMgZmVsaXMgdmVuZW5hdGlzIHNlZCBwaGFyZXRyYSBzZW0gZmV1Z2lhdC4gQ3VtIHNvY2lpcyBuYXRvcXVlIHBlbmF0aWJ1cyBldCBtYWduaXMgZGlzIHBhcnR1cmllbnQgbW9udGVzLCBuYXNjZXR1ciByaWRpY3VsdXMgbXVzLiBTZWQgcXVhbSBvcmNpLCBtb2xsaXMgZWdldCBzYWdpdHRpcyBhY2N1bXNhbiwgdnVscHV0YXRlIHNpdCBhbWV0IGR1aS4gUHJhZXNlbnQgZXUgZWxlbWVudHVtIGFyY3UuCgpMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldCwgY29uc2VjdGV0dXIgYWRpcGlzY2luZyBlbGl0LiBWZXN0aWJ1bHVtIG5pc2wgbWV0dXMsIGhlbmRyZXJpdCB1dCBsYW9yZWV0IHNlZCwgY29uc2VjdGV0dXIgYXQgcHVydXMuIER1aXMgaW50ZXJkdW0gY29uZ3VlIGxvYm9ydGlzLiBOdWxsYW0gc2VkIG1hc3NhIHBvcnRhIGZlbGlzIGVsZWlmZW5kIGNvbnNlcXVhdCBzaXQgYW1ldCBuZWMgbWV0dXMuIEFsaXF1YW0gcGxhY2VyYXQgZGljdHVtIGVyYXQgYXQgZWxlaWZlbmQuIFZlc3RpYnVsdW0gbGliZXJvIGFudGUsIHVsbGFtY29ycGVyIGEgcG9ydHRpdG9yIHN1c2NpcGl0LCBhY2N1bXNhbiB2ZWwgbmlzaS4gRG9uZWMgZXQgbWFnbmEgbmVxdWUuIE5hbSBlbGVtZW50dW0gdWx0cmljZXMganVzdG8sIGVnZXQgc29sbGljaXR1ZGluIHNhcGllbiBpbXBlcmRpZXQgZWdldC4gTnVsbGFtIGF1Y3RvciBkaWN0dW0gbnVuYywgYXQgZmV1Z2lhdCBvZGlvIHZlc3RpYnVsdW0gYS4gU2VkIGVyYXQgbnVsbGEsIHZpdmVycmEgaGVuZHJlcml0IGNvbW1vZG8gaWQsIHVsbGFtY29ycGVyIGFjIG9yY2kuIFBoYXNlbGx1cyBwZWxsZW50ZXNxdWUgZmV1Z2lhdCBzdXNjaXBpdC4gRXRpYW0gZWdlc3RhcyBmZXJtZW50dW0gZW5pbS4gRXRpYW0gZ3JhdmlkYSBpbnRlcmR1bSB0ZWxsdXMgYWMgbGFvcmVldC4gTW9yYmkgbWF0dGlzIGFsaXF1ZXQgZXJvcywgbm9uIHRlbXBvciBlcmF0IHVsbGFtY29ycGVyIGluLiBFdGlhbSBwdWx2aW5hciBpbnRlcmR1bSB0dXJwaXMgYWMgdmVoaWN1bGEuIFNlZCBxdWFtIGp1c3RvLCBhY2N1bXNhbiBpZCBjb25zZWN0ZXR1ciBhLCBhbGlxdWV0IHNlZCBsZW8uIEFlbmVhbiB2aXRhZSBibGFuZGl0IG1hdXJpcy4KCkluIHNlZCBlcm9zIGF1Z3VlLCBub24gcnV0cnVtIG9kaW8uIEV0aWFtIHZpdGFlIGR1aSBuZXF1ZSwgaW4gdHJpc3RpcXVlIG1hc3NhLiBWZXN0aWJ1bHVtIGFudGUgaXBzdW0gcHJpbWlzIGluIGZhdWNpYnVzIG9yY2kgbHVjdHVzIGV0IHVsdHJpY2VzIHBvc3VlcmUgY3ViaWxpYSBDdXJhZTsgTWFlY2VuYXMgZGljdHVtIGVsaXQgYXQgbGVjdHVzIHRlbXBvciBub24gcGhhcmV0cmEgbmlzbCBoZW5kcmVyaXQuIFNlZCBzZWQgcXVhbSBldSBsZWN0dXMgdWx0cmljZXMgbWFsZXN1YWRhIHRpbmNpZHVudCBhIGVzdC4gTmFtIHZlbCBlcm9zIHJpc3VzLiBNYWVjZW5hcyBlcm9zIGVsaXQsIGJsYW5kaXQgZmVybWVudHVtIHRlbXBvciBlZ2V0LCBsb2JvcnRpcyBpZCBkaWFtLiBWZXN0aWJ1bHVtIGxhY2luaWEgbGFjdXMgdml0YWUgbWFnbmEgdm9sdXRwYXQgZXUgZGlnbmlzc2ltIGVyb3MgY29udmFsbGlzLiBWaXZhbXVzIGFjIHZlbGl0IHRlbGx1cywgYSBjb25ndWUgbmVxdWUuIEludGVnZXIgbWkgbnVsbGEsIHZhcml1cyBub24gbHVjdHVzIGluLCBkaWN0dW0gc2l0IGFtZXQgc2VtLiBVdCBsYW9yZWV0LCBzYXBpZW4gc2l0IGFtZXQgc2NlbGVyaXNxdWUgcG9ydGEsIHB1cnVzIHNhcGllbiB2ZXN0aWJ1bHVtIG5pYmgsIHNlZCBsdWN0dXMgbGliZXJvIG1hc3NhIGFjIGVsaXQuIERvbmVjIGlhY3VsaXMgb2RpbyBlZ2V0IG9kaW8gc2FnaXR0aXMgbmVjIHZlbmVuYXRpcyBsb3JlbSBibGFuZGl0LgoKQWxpcXVhbSBpbXBlcmRpZXQgdGVsbHVzIHBvc3VlcmUganVzdG8gdmVoaWN1bGEgc2VkIHZlc3RpYnVsdW0gYW50ZSB0cmlzdGlxdWUuIEZ1c2NlIGZldWdpYXQgZmF1Y2lidXMgcHVydXMgbmVjIG1vbGVzdGllLiBOdWxsYSB0ZW1wb3IgbmVxdWUgaWQgbWFnbmEgaWFjdWxpcyBxdWlzIHNvbGxpY2l0dWRpbiBlcm9zIHNlbXBlci4gUHJhZXNlbnQgdml2ZXJyYSBzYWdpdHRpcyBsdWN0dXMuIE1vcmJpIHNpdCBhbWV0IG1hZ25hIHNlZCBvZGlvIGdyYXZpZGEgdmFyaXVzLiBVdCBuaXNpIGxpYmVybywgdnVscHV0YXRlIGZldWdpYXQgcHJldGl1bSB0ZW1wdXMsIGVnZXN0YXMgc2l0IGFtZXQganVzdG8uIFBlbGxlbnRlc3F1ZSBjb25zZXF1YXQgdGVtcG9yIG5pc2kgaW4gbG9ib3J0aXMuIFNlZCBmZXJtZW50dW0gY29udmFsbGlzIGR1aSBhYyBzb2xsaWNpdHVkaW4uIEludGVnZXIgYXVjdG9yIGF1Z3VlIGVnZXQgdGVsbHVzIHRlbXB1cyBmcmluZ2lsbGEuIFByb2luIG5lYyBkb2xvciBzYXBpZW4sIG5lYyB0cmlzdGlxdWUgbmliaC4gQWxpcXVhbSBhIHZlbGl0IGF0IG1pIG1hdHRpcyBhbGlxdWV0LgoKUGVsbGVudGVzcXVlIGhhYml0YW50IG1vcmJpIHRyaXN0aXF1ZSBzZW5lY3R1cyBldCBuZXR1cyBldCBtYWxlc3VhZGEgZmFtZXMgYWMgdHVycGlzIGVnZXN0YXMuIEFsaXF1YW0gdWx0cmljZXMgZXJhdCBub24gdHVycGlzIGF1Y3RvciBpZCBvcm5hcmUgbWF1cmlzIHNhZ2l0dGlzLiBRdWlzcXVlIHBvcnR0aXRvciwgdGVsbHVzIHV0IGNvbnZhbGxpcyBzYWdpdHRpcywgbWkgbGliZXJvIGZldWdpYXQgdGVsbHVzLCByaG9uY3VzIHBsYWNlcmF0IGlwc3VtIHRvcnRvciBpZCByaXN1cy4gRG9uZWMgdGluY2lkdW50IGZldWdpYXQgbGVvLiBDcmFzIGlkIG1pIG5lcXVlLCBldSBtYWxlc3VhZGEgZXJvcy4gVXQgbW9sZXN0aWUgbWFnbmEgcXVpcyBsaWJlcm8gcGxhY2VyYXQgbWFsZXN1YWRhLiBBbGlxdWFtIGVyYXQgdm9sdXRwYXQuIEFsaXF1YW0gbm9uIG1hdXJpcyBsb3JlbSwgaW4gYWRpcGlzY2luZyBtZXR1cy4gRG9uZWMgZWdldCBpcHN1bSBpbiBlbGl0IGNvbW1vZG8gb3JuYXJlIGJpYmVuZHVtIGEgbmliaC4gVml2YW11cyBvZGlvIGVyYXQsIHBsYWNlcmF0IGFjIHZlc3RpYnVsdW0gZWdldCwgbWFsZXN1YWRhIHV0IG5pc2kuIEV0aWFtIHN1c2NpcGl0IHNvbGxpY2l0dWRpbiBsZW8gc2VtcGVyIHNvbGxpY2l0dWRpbi4gU2VkIHJob25jdXMgcmlzdXMgc2l0IGFtZXQgc2VtIGVsZWlmZW5kIGRpY3R1bSBwcmV0aXVtIHNhcGllbiBlZ2VzdGFzLiBOdWxsYSBhdCB1cm5hIG51bmMsIHZlbCBhbGlxdWV0IGxlby4gUHJhZXNlbnQgdWx0cmljaWVzLCBtaSBldSBwcmV0aXVtIGxvYm9ydGlzLCBlcmF0IG5pYmggZXVpc21vZCBsZW8sIHNpdCBhbWV0IGdyYXZpZGEgc2FwaWVuIGVyb3MgZXQgdHVycGlzLiBEb25lYyBsYWNpbmlhIHZlbmVuYXRpcyBsZWN0dXMsIG5vbiBsYWNpbmlhIG1pIGhlbmRyZXJpdCBzaXQgYW1ldC4gSW50ZWdlciBzZWQgZmVsaXMgdmVsIG9yY2kgYWxpcXVhbSBwdWx2aW5hci4gUGhhc2VsbHVzIGV0IHJpc3VzIGlkIGVyYXQgZXVpc21vZCB0aW5jaWR1bnQuIFNlZCBsdWN0dXMgdGVtcG9yIG5pc2ksIG5lYyB0ZW1wb3IgaXBzdW0gZWxlbWVudHVtIGVnZXQuIEludGVnZXIgbmlzbCB0b3J0b3IsIHZpdmVycmEgaW4gZGFwaWJ1cyBhdCwgbWF0dGlzIGFjIGVyYXQuIEN1cmFiaXR1ciBuZWMgZHVpIGxlY3R1cy4KClBoYXNlbGx1cyBzdXNjaXBpdCwgdG9ydG9yIGV1IHZhcml1cyBmcmluZ2lsbGEsIHNhcGllbiBtYWduYSBlZ2VzdGFzIHJpc3VzLCB1dCBzdXNjaXBpdCBkdWkgbWF1cmlzIHF1aXMgdmVsaXQuIENyYXMgYSBzYXBpZW4gcXVpcyBzYXBpZW4gaGVuZHJlcml0IHRyaXN0aXF1ZSBhIHNpdCBhbWV0IGVsaXQuIFBlbGxlbnRlc3F1ZSBkdWkgYXJjdSwgbWFsZXN1YWRhIGV0IHNvZGFsZXMgc2l0IGFtZXQsIGRhcGlidXMgdmVsIHF1YW0uIFNlZCBub24gYWRpcGlzY2luZyBsaWd1bGEuIFV0IHZ1bHB1dGF0ZSBwdXJ1cyBhdCBuaXNsIHBvc3VlcmUgc29kYWxlcy4gTWFlY2VuYXMgZGlhbSB2ZWxpdCwgdGluY2lkdW50IGlkIG1hdHRpcyBldSwgYWxpcXVhbSBhYyBuaXNpLiBNYWVjZW5hcyBwcmV0aXVtLCBhdWd1ZSBhIHNhZ2l0dGlzIHN1c2NpcGl0LCBsZW8gbGlndWxhIGVsZWlmZW5kIGRvbG9yLCBtb2xsaXMgZmV1Z2lhdCBvZGlvIGF1Z3VlIG5vbiBlcm9zLiBQZWxsZW50ZXNxdWUgc2NlbGVyaXNxdWUgb3JjaSBwcmV0aXVtIHF1YW0gbW9sbGlzIGF0IGxvYm9ydGlzIGR1aSBmYWNpbGlzaXMuIE1vcmJpIGNvbmd1ZSBtZXR1cyBpZCB0b3J0b3IgcG9ydGEgZnJpbmdpbGxhLiBTZWQgbG9yZW0gbWksIG1vbGVzdGllIGZlcm1lbnR1bSBzYWdpdHRpcyBhdCwgZ3JhdmlkYSBhIG5pc2kuIERvbmVjIGV1IHZlc3RpYnVsdW0gdmVsaXQuIEluIHZpdmVycmEsIGVuaW0gZXUgZWxlbWVudHVtIHNvZGFsZXMsIGVuaW0gb2RpbyBkYXBpYnVzIHVybmEsIGVnZXQgY29tbW9kbyBuaXNsIG1hdXJpcyB1dCBvZGlvLiBDdXJhYml0dXIgbmVjIGVuaW0gbnVsbGEuIEluIG5lYyBlbGl0IGlwc3VtLiBOdW5jIGluIG1hc3NhIHN1c2NpcGl0IG1hZ25hIGVsZW1lbnR1bSBmYXVjaWJ1cyBpbiBuZWMgaXBzdW0uIE51bGxhbSBzdXNjaXBpdCBtYWxlc3VhZGEgZWxlbWVudHVtLiBFdGlhbSBzZWQgbWkgaW4gbmliaCB1bHRyaWNpZXMgdmVuZW5hdGlzIG5lYyBwaGFyZXRyYSBtYWduYS4gSW4gcHVydXMgYW50ZSwgcmhvbmN1cyB2ZWwgcGxhY2VyYXQgc2VkLCBmZXJtZW50dW0gc2l0IGFtZXQgZHVpLiBTZWQgYXQgc29kYWxlcyB2ZWxpdC4KCkR1aXMgc3VzY2lwaXQgcGVsbGVudGVzcXVlIHBlbGxlbnRlc3F1ZS4gUHJhZXNlbnQgcG9ydGEgbG9ib3J0aXMgY3Vyc3VzLiBRdWlzcXVlIHNhZ2l0dGlzIHZlbGl0IG5vbiB0ZWxsdXMgYmliZW5kdW0gYXQgc29sbGljaXR1ZGluIGxhY3VzIGFsaXF1ZXQuIFNlZCBuaWJoIHJpc3VzLCBibGFuZGl0IGEgYWxpcXVldCBlZ2V0LCB2ZWhpY3VsYSBldCBlc3QuIFN1c3BlbmRpc3NlIGZhY2lsaXNpcyBiaWJlbmR1bSBhbGlxdWFtLiBGdXNjZSBjb25zZWN0ZXR1ciBjb252YWxsaXMgZXJhdCwgZWdldCBtb2xsaXMgZGlhbSBmZXJtZW50dW0gc29sbGljaXR1ZGluLiBRdWlzcXVlIHRpbmNpZHVudCBwb3J0dGl0b3IgcHJldGl1bS4gTnVsbGFtIGlkIG5pc2wgZXQgdXJuYSB2dWxwdXRhdGUgZGFwaWJ1cy4gRG9uZWMgcXVpcyBsb3JlbSB1cm5hLiBRdWlzcXVlIGlkIGp1c3RvIG5lYyBudW5jIGJsYW5kaXQgY29udmFsbGlzLiBOdW5jIHZvbHV0cGF0LCBtYXNzYSBzb2xsaWNpdHVkaW4gYWRpcGlzY2luZyB2ZXN0aWJ1bHVtLCBtYXNzYSB1cm5hIGNvbmd1ZSBsZWN0dXMsIHNpdCBhbWV0IHVsdHJpY2llcyBhdWd1ZSBvcmNpIGNvbnZhbGxpcyB0dXJwaXMuIE51bGxhIGF0IGxvcmVtIGVsaXQuIE51bmMgdHJpc3RpcXVlLCBxdWFtIGZhY2lsaXNpcyBjb21tb2RvIHBvcnR0aXRvciwgbGFjdXMgbGlndWxhIGFjY3Vtc2FuIG5pc2ksIGV0IGxhb3JlZXQganVzdG8gYW50ZSB2aXRhZSBlcm9zLiBDdXJhYml0dXIgc2VkIGF1Z3VlIGFyY3UuIFBoYXNlbGx1cyBwb3J0dGl0b3IgdmVzdGlidWx1bSBmZWxpcywgdXQgY29uc2VjdGV0dXIgYXJjdSB0ZW1wb3Igbm9uLiBJbiBqdXN0byByaXN1cywgc2VtcGVyIGV0IHN1c2NpcGl0IGlkLCB1bGxhbWNvcnBlciBhdCB1cm5hLiBRdWlzcXVlIHRpbmNpZHVudCwgdXJuYSBuZWMgYWxpcXVhbSB0cmlzdGlxdWUsIG5pYmggb2RpbyBmYXVjaWJ1cyBhdWd1ZSwgaW4gb3JuYXJlIGVuaW0gdHVycGlzIGFjY3Vtc2FuIGRvbG9yLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gU3VzcGVuZGlzc2Ugc29kYWxlcyB2YXJpdXMgdHVycGlzIGV1IGZlcm1lbnR1bS4KCk1vcmJpIHVsdHJpY2llcyBkaWFtIGVnZXQgbWFzc2EgcG9zdWVyZSBsb2JvcnRpcy4gQWxpcXVhbSB2b2x1dHBhdCBwZWxsZW50ZXNxdWUgZW5pbSBldSBwb3J0dGl0b3IuIERvbmVjIGxhY3VzIGZlbGlzLCBjb25zZWN0ZXR1ciBhIHByZXRpdW0gdml0YWUsIGJpYmVuZHVtIG5vbiBlbmltLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gRXRpYW0gdXQgbmliaCBhIHF1YW0gcGVsbGVudGVzcXVlIGF1Y3RvciB1dCBpZCB2ZWxpdC4gRHVpcyBsYWNpbmlhIGp1c3RvIGVnZXQgbWkgcGxhY2VyYXQgYmliZW5kdW0uIEN1bSBzb2NpaXMgbmF0b3F1ZSBwZW5hdGlidXMgZXQgbWFnbmlzIGRpcyBwYXJ0dXJpZW50IG1vbnRlcywgbmFzY2V0dXIgcmlkaWN1bHVzIG11cy4gRG9uZWMgdmVsaXQgdG9ydG9yLCB0ZW1wdXMgbmVjIHRyaXN0aXF1ZSBpZCwgYWxpcXVldCBzaXQgYW1ldCB0dXJwaXMuIFByYWVzZW50IGV0IG5lcXVlIG5lYyBtYWduYSBwb3J0YSBmcmluZ2lsbGEuIE1vcmJpIGlkIGVnZXN0YXMgZXJvcy4gRG9uZWMgc2VtcGVyIHRpbmNpZHVudCB1bGxhbWNvcnBlci4gUGhhc2VsbHVzIHRlbXB1cyBsYWNpbmlhIGhlbmRyZXJpdC4gUXVpc3F1ZSBmYXVjaWJ1cyBwcmV0aXVtIG5lcXVlIG5vbiBjb252YWxsaXMuIE51bmMgbWFsZXN1YWRhIGFjY3Vtc2FuIHJob25jdXMuIENyYXMgbG9ib3J0aXMsIHNlbSBzZWQgZnJpbmdpbGxhIGNvbnZhbGxpcywgYXVndWUgdmVsaXQgc2VtcGVyIG5pc2wsIGNvbW1vZG8gdmFyaXVzIG5pc2kgZGlhbSBhYyBsZW8uCgpRdWlzcXVlIGludGVyZHVtIHRlbGx1cyBhYyBhbnRlIHBvc3VlcmUgdXQgY3Vyc3VzIGxvcmVtIGVnZXN0YXMuIE51bGxhIGZhY2lsaXNpLiBBZW5lYW4gc2VkIG1hc3NhIG5lYyBuaXNpIHNjZWxlcmlzcXVlIHZ1bHB1dGF0ZS4gRXRpYW0gY29udmFsbGlzIGNvbnNlY3RldHVyIGlhY3VsaXMuIE1hZWNlbmFzIGFjIHB1cnVzIHV0IGFudGUgZGlnbmlzc2ltIGF1Y3RvciBhYyBxdWlzIGxvcmVtLiBQZWxsZW50ZXNxdWUgc3VzY2lwaXQgdGluY2lkdW50IG9yY2kuIEZ1c2NlIGFsaXF1YW0gZGFwaWJ1cyBvcmNpLCBhdCBiaWJlbmR1bSBpcHN1bSBhZGlwaXNjaW5nIGVnZXQuIE1vcmJpIHBlbGxlbnRlc3F1ZSBoZW5kcmVyaXQgcXVhbSwgbmVjIHBsYWNlcmF0IHVybmEgdnVscHV0YXRlIHNlZC4gUXVpc3F1ZSB2ZWwgZGlhbSBsb3JlbS4gUHJhZXNlbnQgaWQgZGlhbSBxdWlzIGVuaW0gZWxlbWVudHVtIHJob25jdXMgc2FnaXR0aXMgZWdldCBwdXJ1cy4gUXVpc3F1ZSBmcmluZ2lsbGEgYmliZW5kdW0gbGVvIGluIGxhb3JlZXQuIFZlc3RpYnVsdW0gaWQgbmliaCByaXN1cywgbm9uIGVsZW1lbnR1bSBtZXR1cy4gVXQgYSBmZWxpcyBkaWFtLCBub24gbW9sbGlzIG5pc2wuIENyYXMgZWxpdCBhbnRlLCB1bGxhbWNvcnBlciBxdWlzIGlhY3VsaXMgZXUsIHNvZGFsZXMgdmVsIGVzdC4gQ3VyYWJpdHVyIHF1aXMgbG9ib3J0aXMgZG9sb3IuIEFsaXF1YW0gbWF0dGlzIGdyYXZpZGEgbWV0dXMgcGVsbGVudGVzcXVlIHZ1bHB1dGF0ZS4KClV0IGlkIGF1Z3VlIGlkIGRvbG9yIGx1Y3R1cyBldWlzbW9kIGV0IHF1aXMgdmVsaXQuIE1hZWNlbmFzIGVuaW0gZG9sb3IsIHRlbXB1cyBzaXQgYW1ldCBoZW5kcmVyaXQgZXUsIGZhdWNpYnVzIHZpdGFlIG5lcXVlLiBQcm9pbiBzaXQgYW1ldCB2YXJpdXMgZWxpdC4gUHJvaW4gdmFyaXVzIGZlbGlzIHVsbGFtY29ycGVyIHB1cnVzIGRpZ25pc3NpbSBjb25zZXF1YXQuIENyYXMgY3Vyc3VzIHRlbXB1cyBlcm9zLiBOdW5jIHVsdHJpY2VzIHZlbmVuYXRpcyB1bGxhbWNvcnBlci4gQWxpcXVhbSBldCBmZXVnaWF0IHRlbGx1cy4gUGhhc2VsbHVzIHNpdCBhbWV0IHZlc3RpYnVsdW0gZWxpdC4gUGhhc2VsbHVzIGFjIHB1cnVzIGxhY3VzLCBldCBhY2N1bXNhbiBlcm9zLiBNb3JiaSB1bHRyaWNlcywgcHVydXMgYSBwb3J0YSBzb2RhbGVzLCBvZGlvIG1ldHVzIHBvc3VlcmUgbmVxdWUsIG5lYyBlbGVtZW50dW0gcmlzdXMgdHVycGlzIHNpdCBhbWV0IG1hZ25hLiBTZWQgZXN0IHF1YW0sIHVsdHJpY2llcyBhdCBjb25ndWUgYWRpcGlzY2luZywgbG9ib3J0aXMgaW4ganVzdG8uIFByb2luIGlhY3VsaXMgZGljdHVtIG51bmMsIGV1IGxhb3JlZXQgcXVhbSB2YXJpdXMgdml0YWUuIERvbmVjIHNpdCBhbWV0IGZldWdpYXQgdHVycGlzLiBNYXVyaXMgc2l0IGFtZXQgbWFnbmEgcXVhbSwgYWMgY29uc2VjdGV0dXIgZHVpLiBDdXJhYml0dXIgZWdldCBtYWduYSB0ZWxsdXMsIGV1IHBoYXJldHJhIGZlbGlzLiBEb25lYyBzaXQgYW1ldCB0b3J0b3IgbmlzbC4gQWxpcXVhbSBldCB0b3J0b3IgZmFjaWxpc2lzIGxhY3VzIHRpbmNpZHVudCBjb21tb2RvLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gQ3VyYWJpdHVyIG51bmMgbWFnbmEsIHVsdHJpY2llcyBpZCBjb252YWxsaXMgYXQsIHVsbGFtY29ycGVyIHZpdGFlIG1hc3NhLgoKUGhhc2VsbHVzIHZpdmVycmEgaWFjdWxpcyBwbGFjZXJhdC4gTnVsbGEgY29uc2VxdWF0IGRvbG9yIHNpdCBhbWV0IGVyYXQgZGlnbmlzc2ltIHBvc3VlcmUuIE51bGxhIGxhY2luaWEgYXVndWUgdml0YWUgbWkgdGVtcG9yIGdyYXZpZGEuIFBoYXNlbGx1cyBub24gdGVtcG9yIHRlbGx1cy4gUXVpc3F1ZSBub24gZW5pbSBzZW1wZXIgdG9ydG9yIHNhZ2l0dGlzIGZhY2lsaXNpcy4gQWxpcXVhbSB1cm5hIGZlbGlzLCBlZ2VzdGFzIGF0IHBvc3VlcmUgbmVjLCBhbGlxdWV0IGV1IG5pYmguIFByYWVzZW50IHNlZCB2ZXN0aWJ1bHVtIGVuaW0uIE1hdXJpcyBpYWN1bGlzIHZlbGl0IGR1aSwgZXQgZnJpbmdpbGxhIGVuaW0uIE51bGxhIG5lYyBuaXNpIG9yY2kuIFNlZCB2b2x1dHBhdCwganVzdG8gZWdldCBmcmluZ2lsbGEgYWRpcGlzY2luZywgbmlzbCBudWxsYSBjb25kaW1lbnR1bSBsaWJlcm8sIHNlZCBzb2RhbGVzIGVzdCBlc3QgZXQgb2Rpby4gQ3JhcyBpcHN1bSBkdWksIHZhcml1cyBldSBlbGVtZW50dW0gY29uc2VxdWF0LCBmYXVjaWJ1cyBpbiBsZW8uIFBlbGxlbnRlc3F1ZSBoYWJpdGFudCBtb3JiaSB0cmlzdGlxdWUgc2VuZWN0dXMgZXQgbmV0dXMgZXQgbWFsZXN1YWRhIGZhbWVzIGFjIHR1cnBpcyBlZ2VzdGFzLgoKVXQgbWFsZXN1YWRhIG1vbGVzdGllIGVsZWlmZW5kLiBDdXJhYml0dXIgaWQgZW5pbSBkdWksIGV1IHRpbmNpZHVudCBuaWJoLiBNYXVyaXMgc2l0IGFtZXQgYW50ZSBsZW8uIER1aXMgdHVycGlzIGlwc3VtLCBiaWJlbmR1bSBzZWQgbWF0dGlzIHNpdCBhbWV0LCBhY2N1bXNhbiBxdWlzIGRvbG9yLiBWZXN0aWJ1bHVtIGFudGUgaXBzdW0gcHJpbWlzIGluIGZhdWNpYnVzIG9yY2kgbHVjdHVzIGV0IHVsdHJpY2VzIHBvc3VlcmUgY3ViaWxpYSBDdXJhZTsgQWVuZWFuIGEgaW1wZXJkaWV0IG1ldHVzLiBRdWlzcXVlIHNvbGxpY2l0dWRpbiBmZWxpcyBpZCBuZXF1ZSB0ZW1wb3Igc2NlbGVyaXNxdWUuIERvbmVjIGF0IG9yY2kgZmVsaXMuIFZpdmFtdXMgdGVtcHVzIGNvbnZhbGxpcyBhdWN0b3IuIERvbmVjIGludGVyZHVtIGV1aXNtb2QgbG9ib3J0aXMuIFNlZCBhdCBsYWN1cyBuZWMgb2RpbyBkaWduaXNzaW0gbW9sbGlzLiBTZWQgc2FwaWVuIG9yY2ksIHBvcnR0aXRvciB0ZW1wdXMgYWNjdW1zYW4gdmVsLCB0aW5jaWR1bnQgbmVjIGFudGUuIE51bmMgcmhvbmN1cyBlZ2VzdGFzIGRhcGlidXMuIFN1c3BlbmRpc3NlIGZlcm1lbnR1bSBkaWN0dW0gZnJpbmdpbGxhLiBOdWxsYW0gbmlzaSBqdXN0bywgZWxlaWZlbmQgYSBjb25zZWN0ZXR1ciBjb252YWxsaXMsIHBvcnR0aXRvciBldCB0b3J0b3IuIFByb2luIHZpdGFlIGxvcmVtIG5vbiBkb2xvciBzdXNjaXBpdCBsYWNpbmlhIGV1IGVnZXQgbnVsbGEuCgpTdXNwZW5kaXNzZSBlZ2VzdGFzLCBzYXBpZW4gc2l0IGFtZXQgYmxhbmRpdCBzY2VsZXJpc3F1ZSwgbnVsbGEgYXJjdSB0cmlzdGlxdWUgZHVpLCBhIHBvcnRhIGp1c3RvIHF1YW0gdml0YWUgYXJjdS4gSW4gbWV0dXMgbGliZXJvLCBiaWJlbmR1bSBub24gdm9sdXRwYXQgdXQsIGxhb3JlZXQgdmVsIHR1cnBpcy4gTnVuYyBmYXVjaWJ1cyB2ZWxpdCBldSBpcHN1bSBjb21tb2RvIG5lYyBpYWN1bGlzIGVyb3Mgdm9sdXRwYXQuIFZpdmFtdXMgY29uZ3VlIGF1Y3RvciBlbGl0IHNlZCBzdXNjaXBpdC4gRHVpcyBjb21tb2RvLCBsaWJlcm8gZXUgdmVzdGlidWx1bSBmZXVnaWF0LCBsZW8gbWkgZGFwaWJ1cyB0ZWxsdXMsIGluIHBsYWNlcmF0IG5pc2wgZHVpIGF0IGVzdC4gVmVzdGlidWx1bSB2aXZlcnJhIHRyaXN0aXF1ZSBsb3JlbSwgb3JuYXJlIGVnZXN0YXMgZXJhdCBydXRydW0gYS4gTnVsbGFtIGF0IGF1Z3VlIG1hc3NhLCB1dCBjb25zZWN0ZXR1ciBpcHN1bS4gUGVsbGVudGVzcXVlIG1hbGVzdWFkYSwgdmVsaXQgdXQgbG9ib3J0aXMgc2FnaXR0aXMsIG5pc2kgbWFzc2Egc2VtcGVyIG9kaW8sIG1hbGVzdWFkYSBzZW1wZXIgcHVydXMgbmlzbCB2ZWwgbGVjdHVzLiBOdW5jIGR1aSBzZW0sIG1hdHRpcyB2aXRhZSBsYW9yZWV0IHZpdGFlLCBzb2xsaWNpdHVkaW4gYWMgbGVvLiBOdWxsYSB2ZWwgZmVybWVudHVtIGVzdC4KClZpdmFtdXMgaW4gb2RpbyBhIG5pc2kgZGlnbmlzc2ltIHJob25jdXMgaW4gaW4gbGFjdXMuIERvbmVjIGV0IG5pc2wgdG9ydG9yLiBEb25lYyBzYWdpdHRpcyBjb25zZXF1YXQgbWksIHZlbCBwbGFjZXJhdCB0ZWxsdXMgY29udmFsbGlzIGlkLiBBbGlxdWFtIGZhY2lsaXNpcyBydXRydW0gbmlzbCBzZWQgcHJldGl1bS4gRG9uZWMgZXQgbGFjaW5pYSBuaXNsLiBBbGlxdWFtIGVyYXQgdm9sdXRwYXQuIEN1cmFiaXR1ciBhYyBwdWx2aW5hciB0ZWxsdXMuIE51bGxhbSB2YXJpdXMgbG9ib3J0aXMgcG9ydGEuIENyYXMgZGFwaWJ1cywgbGlndWxhIHV0IHBvcnRhIHVsdHJpY2llcywgbGVvIGxhY3VzIHZpdmVycmEgcHVydXMsIHF1aXMgbW9sbGlzIHVybmEgcmlzdXMgZXUgbGVvLiBOdW5jIG1hbGVzdWFkYSBjb25zZWN0ZXR1ciBwdXJ1cywgdmVsIGF1Y3RvciBsZWN0dXMgc2NlbGVyaXNxdWUgcG9zdWVyZS4gTWFlY2VuYXMgZHVpIG1hc3NhLCB2ZXN0aWJ1bHVtIGJpYmVuZHVtIGJsYW5kaXQgbm9uLCBpbnRlcmR1bSBlZ2V0IG1hdXJpcy4gUGhhc2VsbHVzIGVzdCBhbnRlLCBwdWx2aW5hciBhdCBpbXBlcmRpZXQgcXVpcywgaW1wZXJkaWV0IHZlbCB1cm5hLiBRdWlzcXVlIGVnZXQgdm9sdXRwYXQgb3JjaS4gUXVpc3F1ZSBldCBhcmN1IHB1cnVzLCB1dCBmYXVjaWJ1cyB2ZWxpdC4KClByYWVzZW50IHNlZCBpcHN1bSB1cm5hLiBQcmFlc2VudCBzYWdpdHRpcyB2YXJpdXMgbWFnbmEsIGlkIGNvbW1vZG8gZG9sb3IgbWFsZXN1YWRhIGFjLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gUXVpc3F1ZSBzaXQgYW1ldCBudW5jIGV1IHNlbSBvcm5hcmUgdGVtcG9yLiBNYXVyaXMgaWQgZG9sb3IgbmVjIGVyYXQgY29udmFsbGlzIHBvcnRhIGluIGxvYm9ydGlzIG5pc2kuIEN1cmFiaXR1ciBoZW5kcmVyaXQgcmhvbmN1cyB0b3J0b3IgZXUgaGVuZHJlcml0LiBQZWxsZW50ZXNxdWUgZXUgYW50ZSB2ZWwgZWxpdCBsdWN0dXMgZWxlaWZlbmQgcXVpcyB2aXZlcnJhIG51bGxhLiBTdXNwZW5kaXNzZSBvZGlvIGRpYW0sIGV1aXNtb2QgZXUgcG9ydHRpdG9yIG1vbGVzdGllLCBzb2xsaWNpdHVkaW4gc2l0IGFtZXQgbnVsbGEuIFNlZCBhbnRlIHVybmEsIGRpY3R1bSBiaWJlbmR1bSByaG9uY3VzIGV0LCBibGFuZGl0IG5lYyBhbnRlLiBTdXNwZW5kaXNzZSB0b3J0b3IgYXVndWUsIGFjY3Vtc2FuIHF1aXMgc3VzY2lwaXQgaWQsIGFjY3Vtc2FuIHNpdCBhbWV0IGVyYXQuIERvbmVjIHBoYXJldHJhIHZhcml1cyBsb2JvcnRpcy4gTWFlY2VuYXMgaXBzdW0gZGlhbSwgZmF1Y2lidXMgZXUgdGVtcHVzIGlkLCBjb252YWxsaXMgbmVjIGVuaW0uIER1aXMgYXJjdSB0dXJwaXMsIGZyaW5naWxsYSBuZWMgZWdlc3RhcyB1dCwgZGlnbmlzc2ltIHRyaXN0aXF1ZSBudWxsYS4gQ3VyYWJpdHVyIHN1c2NpcGl0IGR1aSBub24ganVzdG8gdWx0cmljZXMgcGhhcmV0cmEuIEFsaXF1YW0gZXJhdCB2b2x1dHBhdC4gTnVsbGEgZmFjaWxpc2kuIFF1aXNxdWUgaWQgZmVsaXMgZXUgc2VtIGFsaXF1YW0gZnJpbmdpbGxhLgoKRXRpYW0gcXVpcyBhdWd1ZSBpbiB0ZWxsdXMgY29uc2VxdWF0IGVsZWlmZW5kLiBBZW5lYW4gZGlnbmlzc2ltIGNvbmd1ZSBmZWxpcyBpZCBlbGVtZW50dW0uIER1aXMgZnJpbmdpbGxhIHZhcml1cyBpcHN1bSwgbmVjIHN1c2NpcGl0IGxlbyBzZW1wZXIgdmVsLiBVdCBzb2xsaWNpdHVkaW4sIG9yY2kgYSB0aW5jaWR1bnQgYWNjdW1zYW4sIGRpYW0gbGVjdHVzIGxhb3JlZXQgbGFjdXMsIHZlbCBmZXJtZW50dW0gcXVhbSBlc3QgdmVsIGVyb3MuIEFsaXF1YW0gZnJpbmdpbGxhIHNhcGllbiBhYyBzYXBpZW4gZmF1Y2lidXMgY29udmFsbGlzLiBBbGlxdWFtIGlkIG51bmMgZXUganVzdG8gY29uc2VxdWF0IHRpbmNpZHVudC4gUXVpc3F1ZSBuZWMgbmlzbCBkdWkuIFBoYXNlbGx1cyBhdWd1ZSBsZWN0dXMsIHZhcml1cyB2aXRhZSBhdWN0b3IgdmVsLCBydXRydW0gYXQgcmlzdXMuIFZpdmFtdXMgbGFjaW5pYSBsZW8gcXVpcyBuZXF1ZSB1bHRyaWNlcyBuZWMgZWxlbWVudHVtIGZlbGlzIGZyaW5naWxsYS4gUHJvaW4gdmVsIHBvcnR0aXRvciBsZWN0dXMuCgpDdXJhYml0dXIgc2FwaWVuIGxvcmVtLCBtb2xsaXMgdXQgYWNjdW1zYW4gbm9uLCB1bHRyaWNpZXMgZXQgbWV0dXMuIEN1cmFiaXR1ciB2ZWwgbG9yZW0gcXVpcyBzYXBpZW4gZnJpbmdpbGxhIGxhb3JlZXQuIE1vcmJpIGlkIHVybmEgYWMgb3JjaSBlbGVtZW50dW0gYmxhbmRpdCBlZ2V0IHZvbHV0cGF0IG5lcXVlLiBQZWxsZW50ZXNxdWUgc2VtIG9kaW8sIGlhY3VsaXMgZXUgcGhhcmV0cmEgdml0YWUsIGN1cnN1cyBpbiBxdWFtLiBOdWxsYSBtb2xlc3RpZSBsaWd1bGEgaWQgbWFzc2EgbHVjdHVzIGV0IHB1bHZpbmFyIG5pc2kgcHVsdmluYXIuIE51bmMgZmVybWVudHVtIGF1Z3VlIGEgbGFjdXMgZnJpbmdpbGxhIHJob25jdXMgcG9ydHRpdG9yIGVyYXQgZGljdHVtLiBOdW5jIHNpdCBhbWV0IHRlbGx1cyBldCBkdWkgdml2ZXJyYSBhdWN0b3IgZXVpc21vZCBhdCBuaXNsLiBJbiBzZWQgY29uZ3VlIG1hZ25hLiBQcm9pbiBldCB0b3J0b3IgdXQgYXVndWUgcGxhY2VyYXQgZGlnbmlzc2ltIGEgZXUganVzdG8uIE1vcmJpIHBvcnR0aXRvciBwb3J0YSBsb2JvcnRpcy4gUGVsbGVudGVzcXVlIG5pYmggbGFjdXMsIGFkaXBpc2NpbmcgdXQgdHJpc3RpcXVlIHF1aXMsIGNvbnNlcXVhdCB2aXRhZSB2ZWxpdC4gTWFlY2VuYXMgdXQgbHVjdHVzIGxpYmVyby4gVml2YW11cyBhdWN0b3Igb2RpbyBldCBlcmF0IHNlbXBlciBzYWdpdHRpcy4gVml2YW11cyBpbnRlcmR1bSB2ZWxpdCBpbiByaXN1cyBtYXR0aXMgcXVpcyBkaWN0dW0gYW50ZSByaG9uY3VzLiBJbiBzYWdpdHRpcyBwb3J0dGl0b3IgZXJvcywgYXQgbG9ib3J0aXMgbWV0dXMgdWx0cmljZXMgdmVsLiBDdXJhYml0dXIgbm9uIGFsaXF1YW0gbmlzbC4gVmVzdGlidWx1bSBsdWN0dXMgZmV1Z2lhdCBzdXNjaXBpdC4gRXRpYW0gbm9uIGxhY3VzIHZlbCBudWxsYSBlZ2VzdGFzIGlhY3VsaXMgaWQgcXVpcyByaXN1cy4KCkV0aWFtIGluIGF1Y3RvciB1cm5hLiBGdXNjZSB1bHRyaWNpZXMgbW9sZXN0aWUgY29udmFsbGlzLiBJbiBoYWMgaGFiaXRhc3NlIHBsYXRlYSBkaWN0dW1zdC4gVmVzdGlidWx1bSBhbnRlIGlwc3VtIHByaW1pcyBpbiBmYXVjaWJ1cyBvcmNpIGx1Y3R1cyBldCB1bHRyaWNlcyBwb3N1ZXJlIGN1YmlsaWEgQ3VyYWU7IE1hdXJpcyBpYWN1bGlzIGxvcmVtIGZhdWNpYnVzIHB1cnVzIGdyYXZpZGEgYXQgY29udmFsbGlzIHR1cnBpcyBzb2xsaWNpdHVkaW4uIFN1c3BlbmRpc3NlIGF0IHZlbGl0IGxvcmVtLCBhIGZlcm1lbnR1bSBpcHN1bS4gRXRpYW0gY29uZGltZW50dW0sIGR1aSB2ZWwgY29uZGltZW50dW0gZWxlbWVudHVtLCBzYXBpZW4gc2VtIGJsYW5kaXQgc2FwaWVuLCBldCBwaGFyZXRyYSBsZW8gbmVxdWUgZXQgbGVjdHVzLiBOdW5jIHZpdmVycmEgdXJuYSBpYWN1bGlzIGF1Z3VlIHVsdHJpY2VzIGFjIHBvcnR0aXRvciBsYWN1cyBkaWduaXNzaW0uIEFsaXF1YW0gdXQgdHVycGlzIGR1aS4gU2VkIGVnZXQgYWxpcXVldCBmZWxpcy4gSW4gYmliZW5kdW0gbmliaCBzaXQgYW1ldCBzYXBpZW4gYWNjdW1zYW4gYWNjdW1zYW4gcGhhcmV0cmEgbWFnbmEgbW9sZXN0aWUuCgpNYXVyaXMgYWxpcXVldCB1cm5hIGVnZXQgbGVjdHVzIGFkaXBpc2NpbmcgYXQgY29uZ3VlIHR1cnBpcyBjb25zZXF1YXQuIFZpdmFtdXMgdGluY2lkdW50IGZlcm1lbnR1bSByaXN1cyBldCBmZXVnaWF0LiBOdWxsYSBtb2xlc3RpZSB1bGxhbWNvcnBlciBuaWJoIHNlZCBmYWNpbGlzaXMuIFBoYXNlbGx1cyBldCBjdXJzdXMgcHVydXMuIE5hbSBjdXJzdXMsIGR1aSBkaWN0dW0gdWx0cmljZXMgdml2ZXJyYSwgZXJhdCByaXN1cyB2YXJpdXMgZWxpdCwgZXUgbW9sZXN0aWUgZHVpIGVyb3MgcXVpcyBxdWFtLiBBbGlxdWFtIGV0IGFudGUgbmVxdWUsIGFjIGNvbnNlY3RldHVyIGR1aS4gRG9uZWMgY29uZGltZW50dW0gZXJhdCBpZCBlbGl0IGRpY3R1bSBzZWQgYWNjdW1zYW4gbGVvIHNhZ2l0dGlzLiBQcm9pbiBjb25zZXF1YXQgY29uZ3VlIHJpc3VzLCB2ZWwgdGluY2lkdW50IGxlbyBpbXBlcmRpZXQgZXUuIFZlc3RpYnVsdW0gbWFsZXN1YWRhIHR1cnBpcyBldSBtZXR1cyBpbXBlcmRpZXQgcHJldGl1bS4gQWxpcXVhbSBjb25kaW1lbnR1bSB1bHRyaWNlcyBuaWJoLCBldSBzZW1wZXIgZW5pbSBlbGVpZmVuZCBhLiBFdGlhbSBjb25kaW1lbnR1bSBuaXNsIHF1YW0uCgpQZWxsZW50ZXNxdWUgaWQgbW9sZXN0aWUgbmlzbC4gTWFlY2VuYXMgZXQgbGVjdHVzIGF0IGp1c3RvIG1vbGVzdGllIHZpdmVycmEgc2l0IGFtZXQgc2l0IGFtZXQgbGlndWxhLiBOdWxsYW0gbm9uIHBvcnR0aXRvciBtYWduYS4gUXVpc3F1ZSBlbGVtZW50dW0gYXJjdSBjdXJzdXMgdG9ydG9yIHJ1dHJ1bSBsb2JvcnRpcy4gTW9yYmkgc2l0IGFtZXQgbGVjdHVzIHZpdGFlIGVuaW0gZXVpc21vZCBkaWduaXNzaW0gZWdldCBhdCBuZXF1ZS4gVml2YW11cyBjb25zZXF1YXQgdmVoaWN1bGEgZHVpLCB2aXRhZSBhdWN0b3IgYXVndWUgZGlnbmlzc2ltIGluLiBJbiB0ZW1wdXMgc2VtIHF1aXMganVzdG8gdGluY2lkdW50IHNpdCBhbWV0IGF1Y3RvciB0dXJwaXMgbG9ib3J0aXMuIFBlbGxlbnRlc3F1ZSBub24gZXN0IG51bmMuIFZlc3RpYnVsdW0gbW9sbGlzIGZyaW5naWxsYSBpbnRlcmR1bS4gTWFlY2VuYXMgaXBzdW0gZG9sb3IsIHBoYXJldHJhIGlkIHRyaXN0aXF1ZSBtYXR0aXMsIGx1Y3R1cyB2aXRhZSB1cm5hLiBVdCB1bGxhbWNvcnBlciBhcmN1IGVnZXQgZWxpdCBjb252YWxsaXMgbW9sbGlzLiBQZWxsZW50ZXNxdWUgY29uZGltZW50dW0sIG1hc3NhIGFjIGhlbmRyZXJpdCB0ZW1wb3IsIG1hdXJpcyBwdXJ1cyBibGFuZGl0IGp1c3RvLCBldCBwaGFyZXRyYSBsZW8ganVzdG8gYSBlc3QuIER1aXMgYXJjdSBhdWd1ZSwgZmFjaWxpc2lzIHZlbCBkaWduaXNzaW0gc2VkLCBhbGlxdWFtIHF1aXMgbWFnbmEuIFF1aXNxdWUgbm9uIGNvbnNlcXVhdCBkb2xvci4gU3VzcGVuZGlzc2UgYSB1bHRyaWNlcyBsZW8uCgpEb25lYyB2aXRhZSBwcmV0aXVtIG5pYmguIE1hZWNlbmFzIGJpYmVuZHVtIGJpYmVuZHVtIGRpYW0gaW4gcGxhY2VyYXQuIFV0IGFjY3Vtc2FuLCBtaSB2aXRhZSB2ZXN0aWJ1bHVtIGV1aXNtb2QsIG51bmMganVzdG8gdnVscHV0YXRlIG5pc2ksIG5vbiBwbGFjZXJhdCBtaSB1cm5hIGV0IGRpYW0uIE1hZWNlbmFzIG1hbGVzdWFkYSBsb3JlbSB1dCBhcmN1IG1hdHRpcyBtb2xsaXMuIE51bGxhIGZhY2lsaXNpLiBEb25lYyBlc3QgbGVvLCBiaWJlbmR1bSBldSBwdWx2aW5hciBpbiwgY3Vyc3VzIHZlbCBtZXR1cy4gQWxpcXVhbSBlcmF0IHZvbHV0cGF0LiBOdWxsYW0gZmV1Z2lhdCBwb3J0dGl0b3IgbmVxdWUgaW4gdnVscHV0YXRlLiBRdWlzcXVlIG5lYyBtaSBldSBtYWduYSBjb25zZXF1YXQgY3Vyc3VzIG5vbiBhdCBhcmN1LiBFdGlhbSByaXN1cyBtZXR1cywgc29sbGljaXR1ZGluIGV0IHVsdHJpY2VzIGF0LCB0aW5jaWR1bnQgc2VkIG51bmMuIFNlZCBlZ2V0IHNjZWxlcmlzcXVlIGF1Z3VlLiBVdCBmcmluZ2lsbGEgdmVuZW5hdGlzIHNlbSBub24gZWxlaWZlbmQuIE51bmMgbWF0dGlzLCByaXN1cyBzaXQgYW1ldCB2dWxwdXRhdGUgdmFyaXVzLCByaXN1cyBqdXN0byBlZ2VzdGFzIG1hdXJpcywgaWQgaW50ZXJkdW0gb2RpbyBpcHN1bSBldCBuaXNsLiBMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldCwgY29uc2VjdGV0dXIgYWRpcGlzY2luZyBlbGl0LiBNb3JiaSBpZCBlcmF0IG9kaW8sIG5lYyBwdWx2aW5hciBlbmltLgoKQ3VyYWJpdHVyIGFjIGZlcm1lbnR1bSBxdWFtLiBNb3JiaSBldSBlcm9zIHNhcGllbiwgdml0YWUgdGVtcHVzIGRvbG9yLiBNYXVyaXMgdmVzdGlidWx1bSBibGFuZGl0IGVuaW0gdXQgdmVuZW5hdGlzLiBBbGlxdWFtIGVnZXN0YXMsIGVyb3MgYXQgY29uc2VjdGV0dXIgdGluY2lkdW50LCBsb3JlbSBhdWd1ZSBpYWN1bGlzIGVzdCwgbmVjIG1vbGxpcyBmZWxpcyBhcmN1IGluIG51bmMuIFNlZCBpbiBvZGlvIHNlZCBsaWJlcm8gcGVsbGVudGVzcXVlIHZvbHV0cGF0IHZpdGFlIGEgYW50ZS4gTW9yYmkgY29tbW9kbyB2b2x1dHBhdCB0ZWxsdXMsIHV0IHZpdmVycmEgcHVydXMgcGxhY2VyYXQgZmVybWVudHVtLiBJbnRlZ2VyIGlhY3VsaXMgZmFjaWxpc2lzIGFyY3UsIGF0IGdyYXZpZGEgbG9yZW0gYmliZW5kdW0gYXQuIEFlbmVhbiBpZCBlcm9zIGVnZXQgZXN0IHNhZ2l0dGlzIGNvbnZhbGxpcyBzZWQgZXQgZHVpLiBEb25lYyBldSBwdWx2aW5hciB0ZWxsdXMuIE51bmMgZGlnbmlzc2ltIHJob25jdXMgdGVsbHVzLCBhdCBwZWxsZW50ZXNxdWUgbWV0dXMgbHVjdHVzIGF0LiBTZWQgb3JuYXJlIGFsaXF1YW0gZGlhbSwgYSBwb3J0dGl0b3IgbGVvIHNvbGxpY2l0dWRpbiBzZWQuIE5hbSB2aXRhZSBsZWN0dXMgbGFjdXMuIEludGVnZXIgYWRpcGlzY2luZyBxdWFtIG5lcXVlLCBibGFuZGl0IHBvc3VlcmUgbGliZXJvLiBTZWQgbGliZXJvIG51bmMsIGVnZXN0YXMgc29kYWxlcyB0ZW1wdXMgc2VkLCBjdXJzdXMgYmxhbmRpdCB0ZWxsdXMuIFZlc3RpYnVsdW0gbWkgcHVydXMsIHVsdHJpY2llcyBxdWlzIHBsYWNlcmF0IHZlbCwgbW9sZXN0aWUgYXQgZHVpLgoKTnVsbGEgY29tbW9kbyBvZGlvIGp1c3RvLiBQZWxsZW50ZXNxdWUgbm9uIG9ybmFyZSBkaWFtLiBJbiBjb25zZWN0ZXR1ciBzYXBpZW4gYWMgbnVuYyBzYWdpdHRpcyBtYWxlc3VhZGEuIE1vcmJpIHVsbGFtY29ycGVyIHRlbXBvciBlcmF0IG5lYyBydXRydW0uIER1aXMgdXQgY29tbW9kbyBqdXN0by4gQ3JhcyBlc3Qgb3JjaSwgY29uc2VjdGV0dXIgc2VkIGludGVyZHVtIHNlZCwgc2NlbGVyaXNxdWUgc2l0IGFtZXQgbnVsbGEuIFZlc3RpYnVsdW0ganVzdG8gbnVsbGEsIHBlbGxlbnRlc3F1ZSBhIHRlbXB1cyBldCwgZGFwaWJ1cyBldCBhcmN1LiBMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldCwgY29uc2VjdGV0dXIgYWRpcGlzY2luZyBlbGl0LiBNb3JiaSB0cmlzdGlxdWUsIGVyb3MgbmVjIGNvbmd1ZSBhZGlwaXNjaW5nLCBsaWd1bGEgc2VtIHJob25jdXMgZmVsaXMsIGF0IG9ybmFyZSB0ZWxsdXMgbWF1cmlzIGFjIHJpc3VzLiBWZXN0aWJ1bHVtIGFudGUgaXBzdW0gcHJpbWlzIGluIGZhdWNpYnVzIG9yY2kgbHVjdHVzIGV0IHVsdHJpY2VzIHBvc3VlcmUgY3ViaWxpYSBDdXJhZTsgUHJvaW4gbWF1cmlzIGR1aSwgdGVtcG9yIGZlcm1lbnR1bSBkaWN0dW0gZXQsIGN1cnN1cyBhIGxlby4gTWFlY2VuYXMgbmVjIG5pc2wgYSB0ZWxsdXMgcGVsbGVudGVzcXVlIHJob25jdXMuIE51bGxhbSB1bHRyaWNlcyBldWlzbW9kIGR1aSBldSBjb25ndWUuCgpJbiBuZWMgdGVtcG9yIHJpc3VzLiBJbiBmYXVjaWJ1cyBuaXNpIGVnZXQgZGlhbSBkaWduaXNzaW0gY29uc2VxdWF0LiBEb25lYyBwdWx2aW5hciBhbnRlIG5lYyBlbmltIG1hdHRpcyBydXRydW0uIFZlc3RpYnVsdW0gbGVvIGF1Z3VlLCBtb2xlc3RpZSBuZWMgZGFwaWJ1cyBpbiwgZGljdHVtIGF0IGVuaW0uIEludGVnZXIgYWxpcXVhbSwgbG9yZW0gZXUgdnVscHV0YXRlIGxhY2luaWEsIG1pIG9yY2kgdGVtcG9yIGVuaW0sIGVnZXQgbWF0dGlzIGxpZ3VsYSBtYWduYSBhIG1hZ25hLiBQcmFlc2VudCBzZWQgZXJhdCB1dCB0b3J0b3IgaW50ZXJkdW0gdml2ZXJyYS4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gTnVsbGEgZmFjaWxpc2kuIE1hZWNlbmFzIHNpdCBhbWV0IGxlY3R1cyBsYWN1cy4gTnVuYyB2aXRhZSBwdXJ1cyBpZCBsaWd1bGEgbGFvcmVldCBjb25kaW1lbnR1bS4gRHVpcyBhdWN0b3IgdG9ydG9yIHZlbCBkdWkgcHVsdmluYXIgYSBmYWNpbGlzaXMgYXJjdSBkaWduaXNzaW0uIEluIGhhYyBoYWJpdGFzc2UgcGxhdGVhIGRpY3R1bXN0LiBEb25lYyBzb2xsaWNpdHVkaW4gcGVsbGVudGVzcXVlIGVnZXN0YXMuIFNlZCBzZWQgc2VtIGp1c3RvLiBNYWVjZW5hcyBsYW9yZWV0IGhlbmRyZXJpdCBtYXVyaXMsIHV0IHBvcnR0aXRvciBsb3JlbSBpYWN1bGlzIGFjLiBRdWlzcXVlIG1vbGVzdGllIHNlbSBxdWlzIGxvcmVtIHRlbXBvciBydXRydW0uIFBoYXNlbGx1cyBuaWJoIG1hdXJpcywgcmhvbmN1cyBpbiBjb25zZWN0ZXR1ciBub24sIGFsaXF1ZXQgZXUgbWFzc2EuCgpDdXJhYml0dXIgdmVsaXQgYXJjdSwgcHJldGl1bSBwb3J0YSBwbGFjZXJhdCBxdWlzLCB2YXJpdXMgdXQgbWV0dXMuIFZlc3RpYnVsdW0gdnVscHV0YXRlIHRpbmNpZHVudCBqdXN0bywgdml0YWUgcG9ydHRpdG9yIGxlY3R1cyBpbXBlcmRpZXQgc2l0IGFtZXQuIFZpdmFtdXMgZW5pbSBkb2xvciwgc29sbGljaXR1ZGluIHV0IHNlbXBlciBub24sIG9ybmFyZSBvcm5hcmUgZHVpLiBBbGlxdWFtIHRlbXBvciBmZXJtZW50dW0gc2FwaWVuIGVnZXQgY29uZGltZW50dW0uIEN1cmFiaXR1ciBsYW9yZWV0IGJpYmVuZHVtIGFudGUsIGluIGV1aXNtb2QgbGFjdXMgbGFjaW5pYSBldS4gUGVsbGVudGVzcXVlIGhhYml0YW50IG1vcmJpIHRyaXN0aXF1ZSBzZW5lY3R1cyBldCBuZXR1cyBldCBtYWxlc3VhZGEgZmFtZXMgYWMgdHVycGlzIGVnZXN0YXMuIFN1c3BlbmRpc3NlIHBvdGVudGkuIFNlZCBhdCBsaWJlcm8gZXUgdG9ydG9yIHRlbXB1cyBzY2VsZXJpc3F1ZS4gTnVsbGEgZmFjaWxpc2kuIE51bGxhbSB2aXRhZSBuZXF1ZSBpZCBqdXN0byB2aXZlcnJhIHJob25jdXMgcHJldGl1bSBhdCBsaWJlcm8uIEV0aWFtIGVzdCB1cm5hLCBhbGlxdWFtIHZlbCBwdWx2aW5hciBub24sIG9ybmFyZSB2ZWwgcHVydXMuCgpOdWxsYSB2YXJpdXMsIG5pc2kgZWdldCBjb25kaW1lbnR1bSBzZW1wZXIsIG1ldHVzIGVzdCBkaWN0dW0gb2RpbywgdmVsIG1hdHRpcyByaXN1cyBlc3Qgc2VkIHZlbGl0LiBDdW0gc29jaWlzIG5hdG9xdWUgcGVuYXRpYnVzIGV0IG1hZ25pcyBkaXMgcGFydHVyaWVudCBtb250ZXMsIG5hc2NldHVyIHJpZGljdWx1cyBtdXMuIE51bmMgbm9uIGVzdCBuZWMgdGVsbHVzIHVsdHJpY2llcyBtYXR0aXMgdXQgZWdldCB2ZWxpdC4gSW50ZWdlciBjb25kaW1lbnR1bSBhbnRlIGlkIGxvcmVtIGJsYW5kaXQgbGFjaW5pYS4gRG9uZWMgdmVsIHRvcnRvciBhdWd1ZSwgaW4gY29uZGltZW50dW0gbmlzaS4gUGVsbGVudGVzcXVlIHBlbGxlbnRlc3F1ZSBudWxsYSB1dCBudWxsYSBwb3J0dGl0b3IgcXVpcyBzb2RhbGVzIGVuaW0gcnV0cnVtLiBTZWQgYXVndWUgcmlzdXMsIGV1aXNtb2QgYSBhbGlxdWV0IGF0LCB2dWxwdXRhdGUgbm9uIGxpYmVyby4gTnVsbGFtIG5pYmggb2RpbywgZGlnbmlzc2ltIGZlcm1lbnR1bSBwdWx2aW5hciBhYywgY29uZ3VlIGV1IG1pLiBEdWlzIHRpbmNpZHVudCwgbmliaCBpZCB2ZW5lbmF0aXMgcGxhY2VyYXQsIGRpYW0gdHVycGlzIGdyYXZpZGEgbGVvLCBzaXQgYW1ldCBtb2xsaXMgbWFzc2EgZG9sb3IgcXVpcyBtYXVyaXMuIFZpdmFtdXMgc2NlbGVyaXNxdWUgc29kYWxlcyBhcmN1IGV0IGRhcGlidXMuIFN1c3BlbmRpc3NlIHBvdGVudGkuIENyYXMgcXVpcyB0ZWxsdXMgYXJjdSwgcXVpcyBsYW9yZWV0IHNlbS4gRnVzY2UgcG9ydHRpdG9yLCBzYXBpZW4gdmVsIHRyaXN0aXF1ZSBzb2RhbGVzLCB2ZWxpdCBsZW8gcG9ydGEgYXJjdSwgcXVpcyBwZWxsZW50ZXNxdWUgbnVuYyBtZXR1cyBub24gb2Rpby4gTmFtIGFyY3UgbGliZXJvLCB1bGxhbWNvcnBlciB1dCBwaGFyZXRyYSBub24sIGRpZ25pc3NpbSBldCB2ZWxpdC4gUXVpc3F1ZSBkb2xvciBsb3JlbSwgdmVoaWN1bGEgc2l0IGFtZXQgc2NlbGVyaXNxdWUgaW4sIHZhcml1cyBhdCBudWxsYS4gUGVsbGVudGVzcXVlIHZpdGFlIHNlbSBlZ2V0IHRvcnRvciBpYWN1bGlzIHB1bHZpbmFyLiBTZWQgbnVuYyBqdXN0bywgZXVpc21vZCBncmF2aWRhIHB1bHZpbmFyIGVnZXQsIGdyYXZpZGEgZWdldCB0dXJwaXMuIENyYXMgdmVsIGRpY3R1bSBuaXNpLiBOdWxsYW0gbnVsbGEgbGliZXJvLCBncmF2aWRhIHNpdCBhbWV0IGFsaXF1YW0gcXVpcywgY29tbW9kbyB2aXRhZSBvZGlvLiBDcmFzIHZpdGFlIG5pYmggbmVjIGR1aSBwbGFjZXJhdCBzZW1wZXIuCgpWaXZhbXVzIGF0IGZyaW5naWxsYSBlcm9zLiBWaXZhbXVzIGF0IG5pc2wgaWQgbWFzc2EgY29tbW9kbyBmZXVnaWF0IHF1aXMgbm9uIG1hc3NhLiBNb3JiaSB0ZWxsdXMgdXJuYSwgYXVjdG9yIHNpdCBhbWV0IGVsZW1lbnR1bSBzZWQsIHJ1dHJ1bSBub24gbGVjdHVzLiBOdWxsYSBmZXVnaWF0IGR1aSBpbiBzYXBpZW4gb3JuYXJlIGV0IGltcGVyZGlldCBlc3Qgb3JuYXJlLiBQZWxsZW50ZXNxdWUgaGFiaXRhbnQgbW9yYmkgdHJpc3RpcXVlIHNlbmVjdHVzIGV0IG5ldHVzIGV0IG1hbGVzdWFkYSBmYW1lcyBhYyB0dXJwaXMgZWdlc3Rhcy4gVmVzdGlidWx1bSBzZW1wZXIgcnV0cnVtIHRlbXBvci4gU2VkIGluIGZlbGlzIG5pYmgsIHNlZCBhbGlxdWFtIGVuaW0uIEN1cmFiaXR1ciB1dCBxdWFtIHNjZWxlcmlzcXVlIHZlbGl0IHBsYWNlcmF0IGRpY3R1bS4gRG9uZWMgZWxlaWZlbmQgdmVoaWN1bGEgcHVydXMsIGV1IHZlc3RpYnVsdW0gc2FwaWVuIHJ1dHJ1bSBldS4gVml2YW11cyBpbiBvZGlvIHZlbCBlc3QgdnVscHV0YXRlIGlhY3VsaXMuIE51bmMgcnV0cnVtIGZldWdpYXQgcHJldGl1bS4KCk1hZWNlbmFzIGlwc3VtIG5lcXVlLCBhdWN0b3IgcXVpcyBsYWNpbmlhIHZpdGFlLCBldWlzbW9kIGFjIG9yY2kuIERvbmVjIG1vbGVzdGllIG1hc3NhIGNvbnNlcXVhdCBlc3QgcG9ydGEgYWMgcG9ydGEgcHVydXMgdGluY2lkdW50LiBOYW0gYmliZW5kdW0gbGVvIG5lYyBsYWN1cyBtb2xsaXMgbm9uIGNvbmRpbWVudHVtIGRvbG9yIHJob25jdXMuIE51bGxhIGFjIHZvbHV0cGF0IGxvcmVtLiBOdWxsYW0gZXJhdCBwdXJ1cywgY29udmFsbGlzIGVnZXQgY29tbW9kbyBpZCwgdmFyaXVzIHF1aXMgYXVndWUuIE51bGxhbSBhbGlxdWFtIGVnZXN0YXMgbWksIHZlbCBzdXNjaXBpdCBuaXNsIG1hdHRpcyBjb25zZXF1YXQuIFF1aXNxdWUgdmVsIGVnZXN0YXMgc2FwaWVuLiBOdW5jIGxvcmVtIHZlbGl0LCBjb252YWxsaXMgbmVjIGxhb3JlZXQgZXQsIGFsaXF1ZXQgZWdldCBtYXNzYS4gTmFtIGV0IG5pYmggYWMgZHVpIHZlaGljdWxhIGFsaXF1YW0gcXVpcyBldSBhdWd1ZS4gQ3JhcyB2ZWwgbWFnbmEgdXQgZWxpdCByaG9uY3VzIGludGVyZHVtIGlhY3VsaXMgdm9sdXRwYXQgbmlzbC4gU3VzcGVuZGlzc2UgYXJjdSBsb3JlbSwgdmFyaXVzIHJob25jdXMgdGVtcG9yIGlkLCBwdWx2aW5hciBzZWQgdG9ydG9yLiBQZWxsZW50ZXNxdWUgdWx0cmljaWVzIGxhb3JlZXQgb2RpbyBhYyBkaWduaXNzaW0uIEFsaXF1YW0gZGlhbSBhcmN1LCBwbGFjZXJhdCBxdWlzIGVnZXN0YXMgZWdldCwgZmFjaWxpc2lzIGV1IG51bmMuIE1hdXJpcyB2dWxwdXRhdGUsIG5pc2wgc2l0IGFtZXQgbW9sbGlzIGludGVyZHVtLCByaXN1cyB0b3J0b3Igb3JuYXJlIG9yY2ksIHNlZCBlZ2VzdGFzIG9yY2kgZXJvcyBub24gZGlhbS4gVmVzdGlidWx1bSBoZW5kcmVyaXQsIG1ldHVzIHF1aXMgcGxhY2VyYXQgcGVsbGVudGVzcXVlLCBlbmltIHB1cnVzIGZhdWNpYnVzIGR1aSwgc2l0IGFtZXQgdWx0cmljaWVzIGxlY3R1cyBpcHN1bSBpZCBsb3JlbS4gQ2xhc3MgYXB0ZW50IHRhY2l0aSBzb2Npb3NxdSBhZCBsaXRvcmEgdG9ycXVlbnQgcGVyIGNvbnViaWEgbm9zdHJhLCBwZXIgaW5jZXB0b3MgaGltZW5hZW9zLiBQcmFlc2VudCBlZ2V0IGRpYW0gb2RpbywgZXUgYmliZW5kdW0gZWxpdC4gSW4gdmVzdGlidWx1bSBvcmNpIGV1IGVyYXQgdGluY2lkdW50IHRyaXN0aXF1ZS4KCkNyYXMgY29uc2VjdGV0dXIgYW50ZSBldSB0dXJwaXMgcGxhY2VyYXQgc29sbGljaXR1ZGluLiBNYXVyaXMgZXQgbGFjdXMgdG9ydG9yLCBlZ2V0IHBoYXJldHJhIHZlbGl0LiBEb25lYyBhY2N1bXNhbiB1bHRyaWNlcyB0ZW1wb3IuIERvbmVjIGF0IG5pYmggYSBlbGl0IGNvbmRpbWVudHVtIGRhcGlidXMuIEludGVnZXIgc2l0IGFtZXQgdnVscHV0YXRlIGFudGUuIFN1c3BlbmRpc3NlIHBvdGVudGkuIEluIHNvZGFsZXMgbGFvcmVldCBtYXNzYSB2aXRhZSBsYWNpbmlhLiBNb3JiaSB2ZWwgbGFjdXMgZmV1Z2lhdCBhcmN1IHZ1bHB1dGF0ZSBtb2xlc3RpZS4gQWxpcXVhbSBtYXNzYSBtYWduYSwgdWxsYW1jb3JwZXIgYWNjdW1zYW4gZ3JhdmlkYSBxdWlzLCByaG9uY3VzIHB1bHZpbmFyIG51bGxhLiBQcmFlc2VudCBzaXQgYW1ldCBpcHN1bSBkaWFtLCBzaXQgYW1ldCBsYWNpbmlhIG5lcXVlLiBJbiBldCBzYXBpZW4gYXVndWUuIEV0aWFtIGVuaW0gZWxpdCwgdWx0cmljZXMgdmVsIHJ1dHJ1bSBpZCwgc2NlbGVyaXNxdWUgbm9uIGVuaW0uCgpQcm9pbiBldCBlZ2VzdGFzIG5lcXVlLiBQcmFlc2VudCBldCBpcHN1bSBkb2xvci4gTnVuYyBub24gdmFyaXVzIG5pc2wuIEZ1c2NlIGluIHRvcnRvciBuaXNpLiBNYWVjZW5hcyBjb252YWxsaXMgbmVxdWUgaW4gbGlndWxhIGJsYW5kaXQgcXVpcyB2ZWhpY3VsYSBsZW8gbW9sbGlzLiBQZWxsZW50ZXNxdWUgc2FnaXR0aXMgYmxhbmRpdCBsZW8sIGRhcGlidXMgcGVsbGVudGVzcXVlIGxlbyB1bHRyaWNlcyBhYy4gQ3VyYWJpdHVyIGFjIGVnZXN0YXMgbGliZXJvLiBEb25lYyBwcmV0aXVtIHBoYXJldHJhIHByZXRpdW0uIEZ1c2NlIGltcGVyZGlldCwgdHVycGlzIGV1IGFsaXF1YW0gcG9ydGEsIGFudGUgZWxpdCBlbGVpZmVuZCByaXN1cywgbHVjdHVzIGF1Y3RvciBhcmN1IGFudGUgdXQgbnVuYy4gVml2YW11cyBpbiBsZW8gZmVsaXMsIHZpdGFlIGVsZWlmZW5kIGxhY3VzLiBEb25lYyB0ZW1wdXMgYWxpcXVhbSBwdXJ1cyBwb3J0dGl0b3IgdHJpc3RpcXVlLiBTdXNwZW5kaXNzZSBkaWFtIG5lcXVlLCBzdXNjaXBpdCBmZXVnaWF0IGZyaW5naWxsYSBub24sIGVsZWlmZW5kIHNpdCBudWxsYW0uCg== \ No newline at end of file
diff --git a/test/elixir/test/reduce_test.exs b/test/elixir/test/reduce_test.exs
new file mode 100644
index 000000000..bba1658e3
--- /dev/null
+++ b/test/elixir/test/reduce_test.exs
@@ -0,0 +1,415 @@
+defmodule ReduceTest do
+ use CouchTestCase
+
+ @moduletag :views
+
+ @moduledoc """
+ Test CouchDB view reduces
+ This is a port of the reduce.js suite
+ """
+
+ def summate(n) do
+ (n + 1) * n / 2
+ end
+
+ @tag :with_db
+ test "Basic reduce functions", context do
+ db_name = context[:db_name]
+ view_url = "/#{db_name}/_design/foo/_view/bar"
+ num_docs = 500
+ map = ~s"""
+ function (doc) {
+ emit(doc.integer, doc.integer);
+ emit(doc.integer, doc.integer);
+ };
+ """
+ reduce = "function (keys, values) { return sum(values); };"
+ red_doc = %{:views => %{:bar => %{:map => map, :reduce => reduce}}}
+
+ assert Couch.put("/#{db_name}/_design/foo", [body: red_doc]).body["ok"]
+ docs = make_docs(1..num_docs)
+ assert Couch.post("/#{db_name}/_bulk_docs", [body: %{:docs => docs}, query: %{w: 3}]).status_code == 201
+
+ rows = Couch.get(view_url).body["rows"]
+ assert hd(rows)["value"] == 2 * summate(num_docs)
+
+ query = %{:startkey => 4, :endkey => 4}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 8
+
+ query = %{:startkey => 4, :endkey => 5}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 18
+
+ query = %{:startkey => 4, :endkey => 6}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 30
+
+ query = %{:group => true, :limit => 3}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert Enum.at(rows, 0)["value"] == 2
+ assert Enum.at(rows, 1)["value"] == 4
+ assert Enum.at(rows, 2)["value"] == 6
+
+ half_num_docs = Integer.floor_div(num_docs, 2)
+ max = Integer.floor_div(num_docs, 30) + 1
+ for i <- 1..max, i * 30 + 1 < half_num_docs do
+ i = i * 30 + 1
+ query = %{:startkey => i, :endkey => num_docs - i}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 2 * (summate(num_docs - i) - summate(i - 1))
+ end
+ end
+
+ @tag :with_db
+ test "More complex array key view row testing", context do
+ db_name = context[:db_name]
+ view_url = "/#{db_name}/_design/foo/_view/bar"
+ map = "function (doc) { emit(doc.keys, 1); };"
+ reduce = "function (keys, values) { return sum(values); };"
+ red_doc = %{:views => %{bar: %{map: map, reduce: reduce}}}
+
+ assert Couch.put("/#{db_name}/_design/foo", [body: red_doc]).body["ok"]
+ for i <- 1..5 do
+ for j <- 0..9 do
+ docs = [
+ %{keys: ["a"]},
+ %{keys: ["a"]},
+ %{keys: ["a", "b"]},
+ %{keys: ["a", "b"]},
+ %{keys: ["a", "b", "c"]},
+ %{keys: ["a", "b", "d"]},
+ %{keys: ["a", "c", "d"]},
+ %{keys: ["d"]},
+ %{keys: ["d", "a"]},
+ %{keys: ["d", "b"]},
+ %{keys: ["d", "c"]}
+ ]
+ assert Couch.post("/#{db_name}/_bulk_docs", [body: %{docs: docs}, query: %{w: 3}]).status_code == 201
+ total_docs = 1 + ((i - 1) * 10 * 11) + ((j + 1) * 11);
+ assert Couch.get("/#{db_name}").body["doc_count"] == total_docs
+ end
+
+ # test group by exact key match
+ query = %{group: true}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20 * i}
+ assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20 * i}
+ assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20 * i}
+ assert Enum.at(rows, 1) == %{"key" => ["a", "b"], "value" => 20 * i}
+ assert Enum.at(rows, 2) == %{"key" => ["a", "b", "c"], "value" => 10 * i}
+ assert Enum.at(rows, 3) == %{"key" => ["a", "b", "d"], "value" => 10 * i}
+
+ # test group reduce and limit params provide valid json
+ query = %{group: true, limit: 2}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20 * i}
+ assert length(rows) == 2
+
+ # test group by the first element in the key array
+ query = %{group_level: 2}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20*i}
+ assert Enum.at(rows, 1) == %{"key" => ["a","b"], "value" => 40*i}
+ assert Enum.at(rows, 2) == %{"key" => ["a","c"], "value" => 10*i}
+ assert Enum.at(rows, 3) == %{"key" => ["d"], "value" => 10*i}
+ assert Enum.at(rows, 4) == %{"key" => ["d","a"], "value" => 10*i}
+ assert Enum.at(rows, 5) == %{"key" => ["d","b"], "value" => 10*i}
+ assert Enum.at(rows, 6) == %{"key" => ["d","c"], "value" => 10*i}
+
+ # test endkey with inclusive_end=true
+ query = %{group_level: 2, endkey: ~s(["d"]), inclusive_end: true}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20*i}
+ assert Enum.at(rows, 1) == %{"key" => ["a","b"], "value" => 40*i}
+ assert Enum.at(rows, 2) == %{"key" => ["a","c"], "value" => 10*i}
+ assert Enum.at(rows, 3) == %{"key" => ["d"], "value" => 10*i}
+ assert length(rows) == 4
+
+ # test endkey with inclusive_end=false
+ query = %{group_level: 2, endkey: ~s(["d"]), inclusive_end: false}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert Enum.at(rows, 0) == %{"key" => ["a"], "value" => 20*i}
+ assert Enum.at(rows, 1) == %{"key" => ["a","b"], "value" => 40*i}
+ assert Enum.at(rows, 2) == %{"key" => ["a","c"], "value" => 10*i}
+ assert length(rows) == 3
+ end
+ end
+
+ @tag :with_db
+ test "More complex reductions that need to use the combine option", context do
+ db_name = context[:db_name]
+ view_url = "/#{db_name}/_design/foo/_view/bar"
+ map = "function (doc) { emit(doc.val, doc.val); };"
+ reduce = ~s"""
+ function (keys, values, rereduce) {
+ // This computes the standard deviation of the mapped results
+ var stdDeviation=0.0;
+ var count=0;
+ var total=0.0;
+ var sqrTotal=0.0;
+
+ if (!rereduce) {
+ // This is the reduce phase, we are reducing over emitted values from
+ // the map functions.
+ for(var i in values) {
+ total = total + values[i];
+ sqrTotal = sqrTotal + (values[i] * values[i]);
+ }
+ count = values.length;
+ } else {
+ // This is the rereduce phase, we are re-reducing previosuly
+ // reduced values.
+ for(var i in values) {
+ count = count + values[i].count;
+ total = total + values[i].total;
+ sqrTotal = sqrTotal + values[i].sqrTotal;
+ }
+ }
+
+ var variance = (sqrTotal - ((total * total)/count)) / count;
+ stdDeviation = Math.sqrt(variance);
+
+ // the reduce result. It contains enough information to be rereduced
+ // with other reduce results.
+ return {"stdDeviation":stdDeviation,"count":count,
+ "total":total,"sqrTotal":sqrTotal};
+ }
+ """
+
+ red_doc = %{:views => %{:bar => %{:map => map, :reduce => reduce}}}
+ assert Couch.put("/#{db_name}/_design/foo", [body: red_doc]).body["ok"]
+
+ Enum.each(1..10, fn _ ->
+ docs = for i <- 1..10, do: %{val: i * 10}
+ assert Couch.post("/#{db_name}/_bulk_docs", [body: %{:docs => docs}, query: %{w: 3}]).status_code == 201
+ end)
+
+ rows = Couch.get(view_url).body["rows"]
+ assert_in_delta hd(rows)["value"]["stdDeviation"], 28.722813232690143, 0.0000000001
+ end
+
+ @tag :with_db
+ test "Reduce pagination", context do
+ db_name = context[:db_name]
+ view_url = "/#{db_name}/_design/foo/_view/bar"
+ ddoc = %{
+ _id: "_design/foo",
+ language: "javascript",
+ views: %{
+ bar: %{
+ reduce: "_count",
+ map: ~s"""
+ function(doc) {
+ emit(doc.int, doc._id);
+ emit(doc.int + 1, doc._id);
+ emit(doc.int + 2, doc._id);
+ }
+ """
+ }
+ }
+ }
+
+ assert Couch.put("/#{db_name}/_design/foo", [body: ddoc]).body["ok"]
+ docs = for i <- 0..1122, do: %{_id: Integer.to_string(i), int: i}
+ assert Couch.post("/#{db_name}/_bulk_docs", [body: %{:docs => docs}, query: %{w: 3}]).status_code == 201
+
+ rand_val = fn -> :rand.uniform(100000000) end
+
+ # ?group=false tests
+ query = %{startkey: 400, endkey: 402, foobar: rand_val.()}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 9
+ query = %{startkey: 402, endkey: 400, foobar: rand_val.(), descending: true}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 9
+
+ query = %{startkey: 400, endkey: 402, foobar: rand_val.(), inclusive_end: false}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 6
+ query = %{startkey: 402, endkey: 400, foobar: rand_val.(), inclusive_end: false, descending: true}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 6
+
+ query = %{startkey: 400, endkey: 402, foobar: rand_val.(), endkey_docid: "400"}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 7
+ query = %{startkey: 400, endkey: 402, foobar: rand_val.(), endkey_docid: "400", inclusive_end: false}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 6
+
+ query = %{startkey: 400, endkey: 402, foobar: rand_val.(), endkey_docid: "401"}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 8
+ query = %{startkey: 400, endkey: 402, foobar: rand_val.(), endkey_docid: "401", inclusive_end: false}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 7
+
+ query = %{startkey: 400, endkey: 402, foobar: rand_val.(), endkey_docid: "402"}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 9
+ query = %{startkey: 400, endkey: 402, foobar: rand_val.(), endkey_docid: "402", inclusive_end: false}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 8
+
+ query = %{startkey: 402, endkey: 400, foobar: rand_val.(), endkey_docid: "398", descending: true}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 9
+ query = %{startkey: 402, endkey: 400, foobar: rand_val.(), endkey_docid: "398", descending: true, inclusive_end: false}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 8
+
+ query = %{startkey: 402, endkey: 400, foobar: rand_val.(), endkey_docid: "399", descending: true}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 8
+ query = %{startkey: 402, endkey: 400, foobar: rand_val.(), endkey_docid: "399", descending: true, inclusive_end: false}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 7
+
+ query = %{startkey: 402, endkey: 400, foobar: rand_val.(), endkey_docid: "400", descending: true}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 7
+ query = %{startkey: 402, endkey: 400, foobar: rand_val.(), endkey_docid: "400", descending: true, inclusive_end: false}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 6
+
+ query = %{startkey: 402, endkey: 400, foobar: rand_val.(), startkey_docid: "400", descending: true}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 7
+
+ query = %{startkey: 402, endkey: 400, foobar: rand_val.(), startkey_docid: "401", descending: true, inclusive_end: false}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert hd(rows)["value"] == 5
+
+ # ?group=true tests
+ query = %{:group => true, startkey: 400, endkey: 402, foobar: rand_val.()}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert length(rows) == 3
+ assert Enum.at(rows, 0)["key"] == 400
+ assert Enum.at(rows, 0)["value"] == 3
+ assert Enum.at(rows, 1)["key"] == 401
+ assert Enum.at(rows, 1)["value"] == 3
+ assert Enum.at(rows, 2)["key"] == 402
+ assert Enum.at(rows, 2)["value"] == 3
+
+ query = %{:group => true, startkey: 402, endkey: 400, foobar: rand_val.(), descending: true}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert length(rows) == 3
+ assert Enum.at(rows, 0)["key"] == 402
+ assert Enum.at(rows, 0)["value"] == 3
+ assert Enum.at(rows, 1)["key"] == 401
+ assert Enum.at(rows, 1)["value"] == 3
+ assert Enum.at(rows, 2)["key"] == 400
+ assert Enum.at(rows, 2)["value"] == 3
+
+ query = %{:group => true, startkey: 400, endkey: 402, foobar: rand_val.(), inclusive_end: false}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert length(rows) == 2
+ assert Enum.at(rows, 0)["key"] == 400
+ assert Enum.at(rows, 0)["value"] == 3
+ assert Enum.at(rows, 1)["key"] == 401
+ assert Enum.at(rows, 1)["value"] == 3
+
+ query = %{:group => true, startkey: 402, endkey: 400, foobar: rand_val.(), inclusive_end: false, descending: true}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert length(rows) == 2
+ assert Enum.at(rows, 0)["key"] == 402
+ assert Enum.at(rows, 0)["value"] == 3
+ assert Enum.at(rows, 1)["key"] == 401
+ assert Enum.at(rows, 1)["value"] == 3
+
+ query = %{:group => true, startkey: 400, endkey: 402, foobar: rand_val.(), endkey_docid: "401"}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert length(rows) == 3
+ assert Enum.at(rows, 0)["key"] == 400
+ assert Enum.at(rows, 0)["value"] == 3
+ assert Enum.at(rows, 1)["key"] == 401
+ assert Enum.at(rows, 1)["value"] == 3
+ assert Enum.at(rows, 2)["key"] == 402
+ assert Enum.at(rows, 2)["value"] == 2
+
+ query = %{:group => true, startkey: 400, endkey: 402, foobar: rand_val.(), endkey_docid: "400"}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert length(rows) == 3
+ assert Enum.at(rows, 0)["key"] == 400
+ assert Enum.at(rows, 0)["value"] == 3
+ assert Enum.at(rows, 1)["key"] == 401
+ assert Enum.at(rows, 1)["value"] == 3
+ assert Enum.at(rows, 2)["key"] == 402
+ assert Enum.at(rows, 2)["value"] == 1
+
+ query = %{:group => true, startkey: 402, endkey: 400, foobar: rand_val.(), startkey_docid: "401", descending: true}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert length(rows) == 3
+ assert Enum.at(rows, 0)["key"] == 402
+ assert Enum.at(rows, 0)["value"] == 2
+ assert Enum.at(rows, 1)["key"] == 401
+ assert Enum.at(rows, 1)["value"] == 3
+ assert Enum.at(rows, 2)["key"] == 400
+ assert Enum.at(rows, 2)["value"] == 3
+
+ query = %{:group => true, startkey: 402, endkey: 400, foobar: rand_val.(), startkey_docid: "400", descending: true}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert length(rows) == 3
+ assert Enum.at(rows, 0)["key"] == 402
+ assert Enum.at(rows, 0)["value"] == 1
+ assert Enum.at(rows, 1)["key"] == 401
+ assert Enum.at(rows, 1)["value"] == 3
+ assert Enum.at(rows, 2)["key"] == 400
+ assert Enum.at(rows, 2)["value"] == 3
+
+ query = %{:group => true, startkey: 402, endkey: 400, foobar: rand_val.(), startkey_docid: "401", descending: true, inclusive_end: false}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert length(rows) == 2
+ assert Enum.at(rows, 0)["key"] == 402
+ assert Enum.at(rows, 0)["value"] == 2
+ assert Enum.at(rows, 1)["key"] == 401
+ assert Enum.at(rows, 1)["value"] == 3
+
+ query = %{:group => true, startkey: 402, endkey: 400, foobar: rand_val.(), startkey_docid: "400", descending: true, inclusive_end: false}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert length(rows) == 2
+ assert Enum.at(rows, 0)["key"] == 402
+ assert Enum.at(rows, 0)["value"] == 1
+ assert Enum.at(rows, 1)["key"] == 401
+ assert Enum.at(rows, 1)["value"] == 3
+
+ query = %{:group => true, startkey: 402, endkey: 400, foobar: rand_val.(), endkey_docid: "398", descending: true, inclusive_end: true}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert length(rows) == 3
+ assert Enum.at(rows, 0)["key"] == 402
+ assert Enum.at(rows, 0)["value"] == 3
+ assert Enum.at(rows, 1)["key"] == 401
+ assert Enum.at(rows, 1)["value"] == 3
+ assert Enum.at(rows, 2)["key"] == 400
+ assert Enum.at(rows, 2)["value"] == 3
+
+ query = %{:group => true, startkey: 402, endkey: 400, foobar: rand_val.(), endkey_docid: "399", descending: true, inclusive_end: true}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert length(rows) == 3
+ assert Enum.at(rows, 0)["key"] == 402
+ assert Enum.at(rows, 0)["value"] == 3
+ assert Enum.at(rows, 1)["key"] == 401
+ assert Enum.at(rows, 1)["value"] == 3
+ assert Enum.at(rows, 2)["key"] == 400
+ assert Enum.at(rows, 2)["value"] == 2
+
+ query = %{:group => true, startkey: 402, endkey: 400, foobar: rand_val.(), endkey_docid: "399", descending: true, inclusive_end: false}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert length(rows) == 3
+ assert Enum.at(rows, 0)["key"] == 402
+ assert Enum.at(rows, 0)["value"] == 3
+ assert Enum.at(rows, 1)["key"] == 401
+ assert Enum.at(rows, 1)["value"] == 3
+ assert Enum.at(rows, 2)["key"] == 400
+ assert Enum.at(rows, 2)["value"] == 1
+
+ query = %{:group => true, startkey: 402, endkey: 400, foobar: rand_val.(), endkey_docid: "400", descending: true, inclusive_end: false}
+ rows = Couch.get(view_url, query: query).body["rows"]
+ assert length(rows) == 2
+ assert Enum.at(rows, 0)["key"] == 402
+ assert Enum.at(rows, 0)["value"] == 3
+ assert Enum.at(rows, 1)["key"] == 401
+ assert Enum.at(rows, 1)["value"] == 3
+ end
+end
diff --git a/test/elixir/test/replication_test.exs b/test/elixir/test/replication_test.exs
new file mode 100644
index 000000000..0c8d8e060
--- /dev/null
+++ b/test/elixir/test/replication_test.exs
@@ -0,0 +1,1706 @@
+defmodule ReplicationTest do
+ use CouchTestCase
+
+ @moduledoc """
+ Test CouchDB View Collation Behavior
+ This is a port of the view_collation.js suite
+ """
+
+ # TODO: Parameterize these
+ @admin_account "adm:pass"
+ @db_pairs_prefixes [
+ {"local-to-local", "", ""},
+ {"remote-to-local", "http://localhost:15984/", ""},
+ {"local-to-remote", "", "http://localhost:15984/"},
+ {"remote-to-remote", "http://localhost:15984/", "http://localhost:15984/"}
+ ]
+
+ # This should probably go into `make elixir` like what
+ # happens for JavaScript tests.
+ @moduletag config: [{"replicator", "startup_jitter", "0"}]
+
+ test "source database does not exist" do
+ name = random_db_name()
+ check_not_found(name <> "_src", name <> "_tgt")
+ end
+
+ test "source database not found with path - COUCHDB-317" do
+ name = random_db_name()
+ check_not_found(name <> "_src", name <> "_tgt")
+ end
+
+ test "source database not found with host" do
+ name = random_db_name()
+ url = "http://localhost:15984/" <> name <> "_src"
+ check_not_found(url, name <> "_tgt")
+ end
+
+ def check_not_found(src, tgt) do
+ body = %{:source => src, :target => tgt}
+ resp = Couch.post("/_replicate", body: body)
+ assert resp.body["error"] == "db_not_found"
+ end
+
+ test "replicating attachment without conflict - COUCHDB-885" do
+ name = random_db_name()
+ src_db_name = name <> "_src"
+ tgt_db_name = name <> "_tgt"
+
+ create_db(src_db_name)
+ create_db(tgt_db_name)
+ delete_on_exit [src_db_name, tgt_db_name]
+
+ doc = %{"_id" => "doc1"}
+ [doc] = save_docs(src_db_name, [doc])
+
+ result = replicate(src_db_name, "http://localhost:15984/" <> tgt_db_name)
+ assert result["ok"]
+ assert is_list(result["history"])
+ history = Enum.at(result["history"], 0)
+ assert history["docs_written"] == 1
+ assert history["docs_read"] == 1
+ assert history["doc_write_failures"] == 0
+
+ doc = Map.put(doc, "_attachments", %{
+ "hello.txt" => %{
+ "content_type" => "text/plain",
+ "data" => "aGVsbG8gd29ybGQ=" # base64:encode("hello world")
+ },
+ "foo.dat" => %{
+ "content_type" => "not/compressible",
+ "data" => "aSBhbSBub3QgZ3ppcGVk" # base64:encode("i am not gziped")
+ }
+ })
+ [doc] = save_docs(src_db_name, [doc])
+
+ result = replicate(src_db_name, "http://localhost:15984/" <> tgt_db_name)
+
+ assert result["ok"]
+ assert is_list(result["history"])
+ assert length(result["history"]) == 2
+ history = Enum.at(result["history"], 0)
+ assert history["docs_written"] == 1
+ assert history["docs_read"] == 1
+ assert history["doc_write_failures"] == 0
+
+ query = %{
+ :conflicts => true,
+ :deleted_conflicts => true,
+ :attachments => true,
+ :att_encoding_info => true
+ }
+ opts = [headers: ["Accept": "application/json"], query: query]
+ resp = Couch.get("/#{tgt_db_name}/#{doc["_id"]}", opts)
+ assert HTTPotion.Response.success? resp
+ assert is_map(resp.body)
+ refute Map.has_key? resp.body, "_conflicts"
+ refute Map.has_key? resp.body, "_deleted_conflicts"
+
+ atts = resp.body["_attachments"]
+
+ assert atts["hello.txt"]["content_type"] == "text/plain"
+ assert atts["hello.txt"]["data"] == "aGVsbG8gd29ybGQ="
+ assert atts["hello.txt"]["encoding"] == "gzip"
+
+ assert atts["foo.dat"]["content_type"] == "not/compressible"
+ assert atts["foo.dat"]["data"] == "aSBhbSBub3QgZ3ppcGVk"
+ refute Map.has_key? atts["foo.dat"], "encoding"
+ end
+
+ test "replication cancellation" do
+ name = random_db_name()
+ src_db_name = name <> "_src"
+ tgt_db_name = name <> "_tgt"
+
+ create_db(src_db_name)
+ create_db(tgt_db_name)
+ delete_on_exit [src_db_name, tgt_db_name]
+
+ save_docs(src_db_name, make_docs(1..6))
+
+ repl_body = %{:continuous => true, :create_target => true}
+ repl_src = "http://127.0.0.1:15984/" <> src_db_name
+ result = replicate(repl_src, tgt_db_name, body: repl_body)
+
+ assert result["ok"]
+ assert is_binary(result["_local_id"])
+ repl_id = result["_local_id"]
+
+ task = get_task(repl_id, 3_000)
+ assert is_map(task)
+
+ assert task["replication_id"] == repl_id
+ repl_body = %{
+ "replication_id" => repl_id,
+ "cancel": true
+ }
+ result = Couch.post("/_replicate", body: repl_body)
+ assert result.status_code == 200
+
+ wait_for_repl_stop(repl_id)
+
+ assert get_task(repl_id, 0) == :nil
+
+ result = Couch.post("/_replicate", body: repl_body)
+ assert result.status_code == 404
+ end
+
+ @tag user: [name: "joe", password: "erly", roles: ["erlanger"]]
+ test "unauthorized replication cancellation", ctx do
+ name = random_db_name()
+ src_db_name = name <> "_src"
+ tgt_db_name = name <> "_tgt"
+
+ create_db(src_db_name)
+ create_db(tgt_db_name)
+ delete_on_exit [src_db_name, tgt_db_name]
+
+ save_docs(src_db_name, make_docs(1..6))
+
+ repl_src = "http://localhost:15984/" <> src_db_name
+ repl_body = %{"continuous" => true}
+ result = replicate(repl_src, tgt_db_name, body: repl_body)
+
+ assert result["ok"]
+ assert is_binary(result["_local_id"])
+ repl_id = result["_local_id"]
+
+ task = get_task(repl_id, 5_000)
+ assert is_map(task)
+
+ sess = Couch.login(ctx[:userinfo])
+ resp = Couch.Session.get(sess, "/_session")
+ assert resp.body["ok"]
+ assert resp.body["userCtx"]["name"] == "joe"
+
+ repl_body = %{
+ "replication_id" => repl_id,
+ "cancel": true
+ }
+ resp = Couch.Session.post(sess, "/_replicate", body: repl_body)
+ assert resp.status_code == 401
+ assert resp.body["error"] == "unauthorized"
+
+ assert Couch.Session.logout(sess).body["ok"]
+
+ resp = Couch.post("/_replicate", body: repl_body)
+ assert resp.status_code == 200
+ end
+
+ Enum.each(@db_pairs_prefixes, fn {name, src_prefix, tgt_prefix} ->
+ @src_prefix src_prefix
+ @tgt_prefix tgt_prefix
+
+ test "simple #{name} replication - #{name}" do
+ run_simple_repl(@src_prefix, @tgt_prefix)
+ end
+
+ test "replicate with since_seq - #{name}" do
+ run_since_seq_repl(@src_prefix, @tgt_prefix)
+ end
+
+ test "validate_doc_update failure replications - #{name}" do
+ run_vdu_repl(@src_prefix, @tgt_prefix)
+ end
+
+ test "create_target filter option - #{name}" do
+ run_create_target_repl(@src_prefix, @tgt_prefix)
+ end
+
+ test "filtered replications - #{name}" do
+ run_filtered_repl(@src_prefix, @tgt_prefix)
+ end
+
+ test "replication restarts after filter change - COUCHDB-892 - #{name}" do
+ run_filter_changed_repl(@src_prefix, @tgt_prefix)
+ end
+
+ test "replication by doc ids - #{name}" do
+ run_by_id_repl(@src_prefix, @tgt_prefix)
+ end
+
+ test "continuous replication - #{name}" do
+ run_continuous_repl(@src_prefix, @tgt_prefix)
+ end
+
+ @tag config: [
+ {"attachments", "compression_level", "8"},
+ {"attachments", "compressible_types", "text/*"}
+ ]
+ test "compressed attachment replication - #{name}" do
+ run_compressed_att_repl(@src_prefix, @tgt_prefix)
+ end
+
+ @tag user: [name: "joe", password: "erly", roles: ["erlanger"]]
+ test "non-admin user on target - #{name}", ctx do
+ run_non_admin_target_user_repl(@src_prefix, @tgt_prefix, ctx)
+ end
+
+ @tag user: [name: "joe", password: "erly", roles: ["erlanger"]]
+ test "non-admin or reader user on source - #{name}", ctx do
+ run_non_admin_or_reader_source_user_repl(@src_prefix, @tgt_prefix, ctx)
+ end
+ end)
+
+ def run_simple_repl(src_prefix, tgt_prefix) do
+ base_db_name = random_db_name()
+ src_db_name = base_db_name <> "_src"
+ tgt_db_name = base_db_name <> "_tgt"
+
+ create_db(src_db_name)
+ create_db(tgt_db_name)
+ delete_on_exit [src_db_name, tgt_db_name]
+
+ att1_data = get_att1_data()
+ att2_data = get_att2_data()
+
+ ddoc = %{
+ "_id" => "_design/foo",
+ "language" => "javascript",
+ "value" => "ddoc"
+ }
+ docs = make_docs(1..20) ++ [ddoc]
+ docs = save_docs(src_db_name, docs)
+
+ docs = for doc <- docs do
+ if doc["integer"] >= 10 and doc["integer"] < 15 do
+ add_attachment(src_db_name, doc, body: att1_data)
+ else
+ doc
+ end
+ end
+
+ result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
+ assert result["ok"]
+
+ src_info = get_db_info(src_db_name)
+ tgt_info = get_db_info(tgt_db_name)
+
+ assert src_info["doc_count"] == tgt_info["doc_count"]
+
+ assert is_binary(result["session_id"])
+ assert is_list(result["history"])
+ assert length(result["history"]) == 1
+ history = Enum.at(result["history"], 0)
+ assert is_binary(history["start_time"])
+ assert is_binary(history["end_time"])
+ assert history["start_last_seq"] == 0
+ assert history["missing_checked"] == src_info["doc_count"]
+ assert history["missing_found"] == src_info["doc_count"]
+ assert history["docs_read"] == src_info["doc_count"]
+ assert history["docs_written"] == src_info["doc_count"]
+ assert history["doc_write_failures"] == 0
+
+ for doc <- docs do
+ copy = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}").body
+ assert cmp_json(doc, copy)
+
+ if doc["integer"] >= 10 and doc["integer"] < 15 do
+ atts = copy["_attachments"]
+ assert is_map(atts)
+ att = atts["readme.txt"]
+ assert is_map(att)
+ assert att["revpos"] == 2
+ assert String.match?(att["content_type"], ~r/text\/plain/)
+ assert att["stub"]
+
+ resp = Couch.get!("/#{tgt_db_name}/#{copy["_id"]}/readme.txt")
+ assert String.length(resp.body) == String.length(att1_data)
+ assert resp.body == att1_data
+ end
+ end
+
+ # Add one more doc to source and more attachments to existing docs
+ new_doc = %{"_id" => "foo666", "value" => "d"}
+ [new_doc] = save_docs(src_db_name, [new_doc])
+
+ docs = for doc <- docs do
+ if doc["integer"] >= 10 and doc["integer"] < 15 do
+ ctype = "application/binary"
+ opts = [name: "data.dat", body: att2_data, content_type: ctype]
+ add_attachment(src_db_name, doc, opts)
+ else
+ doc
+ end
+ end
+
+ result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
+ assert result["ok"]
+
+ src_info = get_db_info(src_db_name)
+ tgt_info = get_db_info(tgt_db_name)
+
+ assert tgt_info["doc_count"] == src_info["doc_count"]
+
+ assert is_binary(result["session_id"])
+ assert is_list(result["history"])
+ assert length(result["history"]) == 2
+ history = Enum.at(result["history"], 0)
+ assert history["session_id"] == result["session_id"]
+ assert is_binary(history["start_time"])
+ assert is_binary(history["end_time"])
+ assert history["missing_checked"] == 6
+ assert history["missing_found"] == 6
+ assert history["docs_read"] == 6
+ assert history["docs_written"] == 6
+ assert history["doc_write_failures"] == 0
+
+ copy = Couch.get!("/#{tgt_db_name}/#{new_doc["_id"]}").body
+ assert copy["_id"] == new_doc["_id"]
+ assert copy["value"] == new_doc["value"]
+
+ for i <- 10..14 do
+ doc = Enum.at(docs, i - 1)
+ copy = Couch.get!("/#{tgt_db_name}/#{i}").body
+ assert cmp_json(doc, copy)
+
+ atts = copy["_attachments"]
+ assert is_map(atts)
+ att = atts["readme.txt"]
+ assert is_map(atts)
+ assert att["revpos"] == 2
+ assert String.match?(att["content_type"], ~r/text\/plain/)
+ assert att["stub"]
+
+ resp = Couch.get!("/#{tgt_db_name}/#{i}/readme.txt")
+ assert String.length(resp.body) == String.length(att1_data)
+ assert resp.body == att1_data
+
+ att = atts["data.dat"]
+ assert is_map(att)
+ assert att["revpos"] == 3
+ assert String.match?(att["content_type"], ~r/application\/binary/)
+ assert att["stub"]
+
+ resp = Couch.get!("/#{tgt_db_name}/#{i}/data.dat")
+ assert String.length(resp.body) == String.length(att2_data)
+ assert resp.body == att2_data
+ end
+
+ # Test deletion is replicated
+ del_doc = %{
+ "_id" => "1",
+ "_rev" => Enum.at(docs, 0)["_rev"],
+ "_deleted" => true
+ }
+ [del_doc] = save_docs(src_db_name, [del_doc])
+
+ result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
+ assert result["ok"]
+
+ src_info = get_db_info(src_db_name)
+ tgt_info = get_db_info(tgt_db_name)
+
+ assert tgt_info["doc_count"] == src_info["doc_count"]
+ assert tgt_info["doc_del_count"] == src_info["doc_del_count"]
+ assert tgt_info["doc_del_count"] == 1
+
+ assert is_list(result["history"])
+ assert length(result["history"]) == 3
+ history = Enum.at(result["history"], 0)
+ assert history["missing_checked"] == 1
+ assert history["missing_found"] == 1
+ assert history["docs_read"] == 1
+ assert history["docs_written"] == 1
+ assert history["doc_write_failures"] == 0
+
+ resp = Couch.get("/#{tgt_db_name}/#{del_doc["_id"]}")
+ assert resp.status_code == 404
+
+ resp = Couch.get!("/#{tgt_db_name}/_changes")
+ [change] = Enum.filter(resp.body["results"], &(&1["id"] == del_doc["_id"]))
+ assert change["id"] == del_doc["_id"]
+ assert change["deleted"]
+
+ # Test replicating a conflict
+ doc = Couch.get!("/#{src_db_name}/2").body
+ [doc] = save_docs(src_db_name, [Map.put(doc, :value, "white")])
+
+ copy = Couch.get!("/#{tgt_db_name}/2").body
+ save_docs(tgt_db_name, [Map.put(copy, :value, "black")])
+
+ result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
+ assert result["ok"]
+
+ src_info = get_db_info(src_db_name)
+ tgt_info = get_db_info(tgt_db_name)
+
+ assert tgt_info["doc_count"] == src_info["doc_count"]
+
+ assert is_list(result["history"])
+ assert length(result["history"]) == 4
+ history = Enum.at(result["history"], 0)
+ assert history["missing_checked"] == 1
+ assert history["missing_found"] == 1
+ assert history["docs_read"] == 1
+ assert history["docs_written"] == 1
+ assert history["doc_write_failures"] == 0
+
+ copy = Couch.get!("/#{tgt_db_name}/2", query: %{:conflicts => true}).body
+ assert String.match?(copy["_rev"], ~r/^2-/)
+ assert is_list(copy["_conflicts"])
+ assert length(copy["_conflicts"]) == 1
+ conflict = Enum.at(copy["_conflicts"], 0)
+ assert String.match?(conflict, ~r/^2-/)
+
+ # Re-replicate updated conflict
+ [doc] = save_docs(src_db_name, [Map.put(doc, :value, "yellow")])
+
+ result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
+ assert result["ok"]
+
+ src_info = get_db_info(src_db_name)
+ tgt_info = get_db_info(tgt_db_name)
+
+ assert tgt_info["doc_count"] == src_info["doc_count"]
+
+ assert is_list(result["history"])
+ assert length(result["history"]) == 5
+ history = Enum.at(result["history"], 0)
+ assert history["missing_checked"] == 1
+ assert history["missing_found"] == 1
+ assert history["docs_read"] == 1
+ assert history["docs_written"] == 1
+ assert history["doc_write_failures"] == 0
+
+ copy = Couch.get!("/#{tgt_db_name}/2", query: %{:conflicts => true}).body
+ assert String.match?(copy["_rev"], ~r/^3-/)
+ assert is_list(copy["_conflicts"])
+ assert length(copy["_conflicts"]) == 1
+ conflict = Enum.at(copy["_conflicts"], 0)
+ assert String.match?(conflict, ~r/^2-/)
+
+ # Resolve the conflict and re-replicate new revision
+ resolve_doc = %{"_id" => "2", "_rev" => conflict, "_deleted" => true}
+ save_docs(tgt_db_name, [resolve_doc])
+ save_docs(src_db_name, [Map.put(doc, :value, "rainbow")])
+
+ result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
+ assert result["ok"]
+
+ src_info = get_db_info(src_db_name)
+ tgt_info = get_db_info(tgt_db_name)
+
+ assert tgt_info["doc_count"] == src_info["doc_count"]
+
+ assert is_list(result["history"])
+ assert length(result["history"]) == 6
+ history = Enum.at(result["history"], 0)
+ assert history["missing_checked"] == 1
+ assert history["missing_found"] == 1
+ assert history["docs_read"] == 1
+ assert history["docs_written"] == 1
+ assert history["doc_write_failures"] == 0
+
+ copy = Couch.get!("/#{tgt_db_name}/2", query: %{:conflicts => true}).body
+
+ assert String.match?(copy["_rev"], ~r/^4-/)
+ assert not Map.has_key?(copy, "_conflicts")
+
+ # Test that existing revisions are not replicated
+ src_docs = [
+ %{"_id" => "foo1", "value" => 111},
+ %{"_id" => "foo2", "value" => 222},
+ %{"_id" => "foo3", "value" => 333}
+ ]
+ save_docs(src_db_name, src_docs)
+ save_docs(tgt_db_name, Enum.filter(src_docs, &(&1["_id"] != "foo2")))
+
+ result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
+ assert result["ok"]
+
+ src_info = get_db_info(src_db_name)
+ tgt_info = get_db_info(tgt_db_name)
+
+ assert tgt_info["doc_count"] == src_info["doc_count"]
+
+ assert is_list(result["history"])
+ assert length(result["history"]) == 7
+ history = Enum.at(result["history"], 0)
+ assert history["missing_checked"] == 3
+ assert history["missing_found"] == 1
+ assert history["docs_read"] == 1
+ assert history["docs_written"] == 1
+ assert history["doc_write_failures"] == 0
+
+ docs = [
+ %{"_id" => "foo4", "value" => 444},
+ %{"_id" => "foo5", "value" => 555}
+ ]
+ save_docs(src_db_name, docs)
+ save_docs(tgt_db_name, docs)
+
+ result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
+ assert result["ok"]
+
+ src_info = get_db_info(src_db_name)
+ tgt_info = get_db_info(tgt_db_name)
+
+ assert tgt_info["doc_count"] == src_info["doc_count"]
+
+ assert is_list(result["history"])
+ assert length(result["history"]) == 8
+ history = Enum.at(result["history"], 0)
+ assert history["missing_checked"] == 2
+ assert history["missing_found"] == 0
+ assert history["docs_read"] == 0
+ assert history["docs_written"] == 0
+ assert history["doc_write_failures"] == 0
+
+ # Test nothing to replicate
+ result = replicate(src_prefix <> src_db_name, tgt_prefix <> tgt_db_name)
+ assert result["ok"]
+ assert result["no_changes"]
+ end
+
+ def run_since_seq_repl(src_prefix, tgt_prefix) do
+ base_db_name = random_db_name()
+ src_db_name = base_db_name <> "_src"
+ tgt_db_name = base_db_name <> "_tgt"
+ repl_src = src_prefix <> src_db_name
+ repl_tgt = tgt_prefix <> tgt_db_name
+
+ create_db(src_db_name)
+ create_db(tgt_db_name)
+ delete_on_exit [src_db_name, tgt_db_name]
+
+ docs = make_docs(1..5)
+ docs = save_docs(src_db_name, docs)
+
+ changes = get_db_changes(src_db_name)["results"]
+ since_seq = Enum.at(changes, 2)["seq"]
+
+ # TODO: In JS we re-fetch _changes with since_seq, is that
+ # really necessary?
+ expected_ids = for change <- Enum.drop(changes, 3) do
+ change["id"]
+ end
+ assert length(expected_ids) == 2
+
+ cancel_replication(repl_src, repl_tgt)
+ result = replicate(repl_src, repl_tgt, body: %{:since_seq => since_seq})
+ cancel_replication(repl_src, repl_tgt)
+
+ assert result["ok"]
+ assert is_list(result["history"])
+ history = Enum.at(result["history"], 0)
+ assert history["missing_checked"] == 2
+ assert history["missing_found"] == 2
+ assert history["docs_read"] == 2
+ assert history["docs_written"] == 2
+ assert history["doc_write_failures"] == 0
+
+ Enum.each(docs, fn doc ->
+ result = Couch.get("/#{tgt_db_name}/#{doc["_id"]}")
+ if Enum.member?(expected_ids, doc["_id"]) do
+ assert result.status_code < 300
+ assert cmp_json(doc, result.body)
+ else
+ assert result.status_code == 404
+ end
+ end)
+ end
+
+ def run_vdu_repl(src_prefix, tgt_prefix) do
+ base_db_name = random_db_name()
+ src_db_name = base_db_name <> "_src"
+ tgt_db_name = base_db_name <> "_tgt"
+ repl_src = src_prefix <> src_db_name
+ repl_tgt = tgt_prefix <> tgt_db_name
+
+ create_db(src_db_name)
+ create_db(tgt_db_name)
+ delete_on_exit [src_db_name, tgt_db_name]
+
+ docs = make_docs(1..7)
+ docs = for doc <- docs do
+ if doc["integer"] == 2 do
+ Map.put(doc, "_attachments", %{
+ "hello.txt" => %{
+ :content_type => "text/plain",
+ :data => "aGVsbG8gd29ybGQ=" # base64:encode("hello world")
+ }
+ })
+ else
+ doc
+ end
+ end
+ docs = save_docs(src_db_name, docs)
+
+ ddoc = %{
+ "_id" => "_design/test",
+ "language" => "javascript",
+ "validate_doc_update" => """
+ function(newDoc, oldDoc, userCtx, secObj) {
+ if((newDoc.integer % 2) !== 0) {
+ throw {forbidden: "I only like multiples of 2."};
+ }
+ }
+ """
+ }
+ [_] = save_docs(tgt_db_name, [ddoc])
+
+ result = replicate(repl_src, repl_tgt)
+ assert result["ok"]
+
+ assert is_list(result["history"])
+ history = Enum.at(result["history"], 0)
+ assert history["missing_checked"] == 7
+ assert history["missing_found"] == 7
+ assert history["docs_read"] == 7
+ assert history["docs_written"] == 3
+ assert history["doc_write_failures"] == 4
+
+ for doc <- docs do
+ result = Couch.get("/#{tgt_db_name}/#{doc["_id"]}")
+ if rem(doc["integer"], 2) == 0 do
+ assert result.status_code < 300
+ assert result.body["integer"] == doc["integer"]
+ else
+ assert result.status_code == 404
+ end
+ end
+ end
+
+ def run_create_target_repl(src_prefix, tgt_prefix) do
+ base_db_name = random_db_name()
+ src_db_name = base_db_name <> "_src"
+ tgt_db_name = base_db_name <> "_tgt"
+ repl_src = src_prefix <> src_db_name
+ repl_tgt = tgt_prefix <> tgt_db_name
+
+ create_db(src_db_name)
+ delete_on_exit [src_db_name, tgt_db_name]
+ # tgt_db_name is created by the replication
+
+ docs = make_docs(1..2)
+ save_docs(src_db_name, docs)
+
+ replicate(repl_src, repl_tgt, body: %{:create_target => true})
+
+ src_info = get_db_info(src_db_name)
+ tgt_info = get_db_info(tgt_db_name)
+
+ assert tgt_info["doc_count"] == src_info["doc_count"]
+
+ src_shards = seq_to_shards(src_info["update_seq"])
+ tgt_shards = seq_to_shards(tgt_info["update_seq"])
+ assert tgt_shards == src_shards
+ end
+
+ def run_filtered_repl(src_prefix, tgt_prefix) do
+ base_db_name = random_db_name()
+ src_db_name = base_db_name <> "_src"
+ tgt_db_name = base_db_name <> "_tgt"
+ repl_src = src_prefix <> src_db_name
+ repl_tgt = tgt_prefix <> tgt_db_name
+
+ create_db(src_db_name)
+ create_db(tgt_db_name)
+ delete_on_exit [src_db_name, tgt_db_name]
+
+ docs = make_docs(1..30)
+ ddoc = %{
+ "_id" => "_design/mydesign",
+ "language" => "javascript",
+ "filters" => %{
+ "myfilter" => """
+ function(doc, req) {
+ var modulus = Number(req.query.modulus);
+ var special = req.query.special;
+ return (doc.integer % modulus === 0) || (doc.string === special);
+ }
+ """
+ }
+ }
+
+ [_ | docs] = save_docs(src_db_name, [ddoc | docs])
+
+ repl_body = %{
+ "filter" => "mydesign/myfilter",
+ "query_params" => %{
+ "modulus" => "2",
+ "special" => "7"
+ }
+ }
+
+ result = replicate(repl_src, repl_tgt, body: repl_body)
+ assert result["ok"]
+
+ Enum.each(docs, fn doc ->
+ resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}")
+ if(rem(doc["integer"], 2) == 0 || doc["string"] == "7") do
+ assert resp.status_code < 300
+ assert cmp_json(doc, resp.body)
+ else
+ assert resp.status_code == 404
+ end
+ end)
+
+ assert is_list(result["history"])
+ assert length(result["history"]) == 1
+ history = Enum.at(result["history"], 0)
+
+ # We (incorrectly) don't record update sequences for things
+ # that don't pass the changes feed filter. Historically the
+ # last document to pass was the second to last doc which has
+ # an update sequence of 30. Work that has been applied to avoid
+ # conflicts from duplicate IDs breaking _bulk_docs updates added
+ # a sort to the logic which changes this. Now the last document
+ # to pass has a doc id of "8" and is at update_seq 29 (because only
+ # "9" and the design doc are after it).
+ #
+ # In the future the fix ought to be that we record that update
+ # sequence of the database. BigCouch has some existing work on
+ # this in the clustered case because if you have very few documents
+ # that pass the filter then (given single node's behavior) you end
+ # up having to rescan a large portion of the database.
+ # we can't rely on sequences in a cluster
+ # not only can one figure appear twice (at least for n>1), there's also
+ # hashes involved now - so comparing seq==29 is lottery
+ # (= cutting off hashes is nonsense) above, there was brute-force
+ # comparing all attrs of all docs - now we did check if excluded docs
+ # did NOT make it in any way, we can't rely on sequences in a
+ # cluster (so leave out)
+
+ # 16 => 15 docs with even integer field + 1 doc with string field "7"
+ assert history["missing_checked"] == 16
+ assert history["missing_found"] == 16
+ assert history["docs_read"] == 16
+ assert history["docs_written"] == 16
+ assert history["doc_write_failures"] == 0
+
+ new_docs = make_docs(50..55)
+ new_docs = save_docs(src_db_name, new_docs)
+
+ result = replicate(repl_src, repl_tgt, body: repl_body)
+ assert result["ok"]
+
+ Enum.each(new_docs, fn doc ->
+ resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}")
+ if(rem(doc["integer"], 2) == 0) do
+ assert resp.status_code < 300
+ assert cmp_json(doc, resp.body)
+ else
+ assert resp.status_code == 404
+ end
+ end)
+
+ assert is_list(result["history"])
+ assert length(result["history"]) == 2
+ history = Enum.at(result["history"], 0)
+
+ assert history["missing_checked"] == 3
+ assert history["missing_found"] == 3
+ assert history["docs_read"] == 3
+ assert history["docs_written"] == 3
+ assert history["doc_write_failures"] == 0
+ end
+
+ def run_filter_changed_repl(src_prefix, tgt_prefix) do
+ base_db_name = random_db_name()
+ src_db_name = base_db_name <> "_src"
+ tgt_db_name = base_db_name <> "_tgt"
+ repl_src = src_prefix <> src_db_name
+ repl_tgt = tgt_prefix <> tgt_db_name
+
+ create_db(src_db_name)
+ create_db(tgt_db_name)
+ delete_on_exit [src_db_name, tgt_db_name]
+
+ filter_fun_1 = """
+ function(doc, req) {
+ if(doc.value < Number(req.query.maxvalue)) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+ """
+
+ filter_fun_2 = """
+ function(doc, req) {
+ return true;
+ }
+ """
+
+ docs = [
+ %{"_id" => "foo1", "value" => 1},
+ %{"_id" => "foo2", "value" => 2},
+ %{"_id" => "foo3", :value => 3},
+ %{"_id" => "foo4", :value => 4}
+ ]
+ ddoc = %{
+ "_id" => "_design/mydesign",
+ :language => "javascript",
+ :filters => %{
+ :myfilter => filter_fun_1
+ }
+ }
+
+ [ddoc | _] = save_docs(src_db_name, [ddoc | docs])
+
+ repl_body = %{
+ :filter => "mydesign/myfilter",
+ :query_params => %{
+ :maxvalue => "3"
+ }
+ }
+ result = replicate(repl_src, repl_tgt, body: repl_body)
+ assert result["ok"]
+
+ assert is_list(result["history"])
+ assert length(result["history"]) == 1
+ history = Enum.at(result["history"], 0)
+ assert history["docs_read"] == 2
+ assert history["docs_written"] == 2
+ assert history["doc_write_failures"] == 0
+
+ resp = Couch.get!("/#{tgt_db_name}/foo1")
+ assert HTTPotion.Response.success?(resp)
+ assert resp.body["value"] == 1
+
+ resp = Couch.get!("/#{tgt_db_name}/foo2")
+ assert HTTPotion.Response.success?(resp)
+ assert resp.body["value"] == 2
+
+ resp = Couch.get!("/#{tgt_db_name}/foo3")
+ assert resp.status_code == 404
+
+ resp = Couch.get!("/#{tgt_db_name}/foo4")
+ assert resp.status_code == 404
+
+ # Replication should start from scratch after the filter's code changed
+ ddoc = Map.put(ddoc, :filters, %{:myfilter => filter_fun_2})
+ [_] = save_docs(src_db_name, [ddoc])
+
+ result = replicate(repl_src, repl_tgt, body: repl_body)
+ assert result["ok"]
+
+ assert is_list(result["history"])
+ assert length(result["history"]) == 1
+ history = Enum.at(result["history"], 0)
+ assert history["docs_read"] == 3
+ assert history["docs_written"] == 3
+ assert history["doc_write_failures"] == 0
+
+ resp = Couch.get!("/#{tgt_db_name}/foo1")
+ assert HTTPotion.Response.success?(resp)
+ assert resp.body["value"] == 1
+
+ resp = Couch.get!("/#{tgt_db_name}/foo2")
+ assert HTTPotion.Response.success?(resp)
+ assert resp.body["value"] == 2
+
+ resp = Couch.get!("/#{tgt_db_name}/foo3")
+ assert HTTPotion.Response.success?(resp)
+ assert resp.body["value"] == 3
+
+ resp = Couch.get!("/#{tgt_db_name}/foo4")
+ assert HTTPotion.Response.success?(resp)
+ assert resp.body["value"] == 4
+
+ resp = Couch.get!("/#{tgt_db_name}/_design/mydesign")
+ assert HTTPotion.Response.success?(resp)
+ end
+
+ def run_by_id_repl(src_prefix, tgt_prefix) do
+ target_doc_ids = [
+ %{
+ :initial => ["1", "2", "10"],
+ :after => [],
+ :conflict_id => "2"
+ },
+ %{
+ :initial => ["1", "2"],
+ :after => ["7"],
+ :conflict_id => "1"
+ },
+ %{
+ :initial => ["1", "foo_666", "10"],
+ :after => ["7"],
+ :conflict_id => "10"
+ },
+ %{
+ :initial => ["_design/foo", "8"],
+ :after => ["foo_5"],
+ :conflict_id => "8"
+ },
+ %{
+ :initial => ["_design%2Ffoo", "8"],
+ :after => ["foo_5"],
+ :conflict_id => "8"
+ },
+ %{
+ :initial => [],
+ :after => ["foo_1000", "_design/foo", "1"],
+ :conflict_id => "1"
+ }
+ ]
+
+ Enum.each(target_doc_ids, fn test_data ->
+ run_by_id_repl_impl(src_prefix, tgt_prefix, test_data)
+ end)
+ end
+
+ def run_by_id_repl_impl(src_prefix, tgt_prefix, test_data) do
+ base_db_name = random_db_name()
+ src_db_name = base_db_name <> "_src"
+ tgt_db_name = base_db_name <> "_tgt"
+ repl_src = src_prefix <> src_db_name
+ repl_tgt = tgt_prefix <> tgt_db_name
+
+ create_db(src_db_name)
+ create_db(tgt_db_name)
+ delete_on_exit [src_db_name, tgt_db_name]
+
+ docs = make_docs(1..10)
+ ddoc = %{
+ "_id" => "_design/foo",
+ :language => "javascript",
+ "integer" => 1
+ }
+
+ doc_ids = test_data[:initial]
+ num_missing = Enum.count(doc_ids, fn doc_id ->
+ String.starts_with?(doc_id, "foo_")
+ end)
+ total_replicated = length(doc_ids) - num_missing
+
+ [_ | docs] = save_docs(src_db_name, [ddoc | docs])
+
+ repl_body = %{:doc_ids => doc_ids}
+ result = replicate(repl_src, repl_tgt, body: repl_body)
+ assert result["ok"]
+
+ if(total_replicated == 0) do
+ assert result["no_changes"]
+ else
+ assert is_binary(result["start_time"])
+ assert is_binary(result["end_time"])
+ assert result["docs_read"] == total_replicated
+ assert result["docs_written"] == total_replicated
+ assert result["doc_write_failures"] == 0
+ end
+
+ Enum.each(doc_ids, fn doc_id ->
+ doc_id = URI.decode(doc_id)
+ orig = Couch.get!("/#{src_db_name}/#{doc_id}")
+ copy = Couch.get!("/#{tgt_db_name}/#{doc_id}")
+
+ if(String.starts_with?(doc_id, "foo_")) do
+ assert orig.status_code == 404
+ assert copy.status_code == 404
+ else
+ assert HTTPotion.Response.success?(orig)
+ assert HTTPotion.Response.success?(copy)
+ assert cmp_json(orig.body, copy.body)
+ end
+ end)
+
+ # Be absolutely sure that other docs were not replicated
+ Enum.each(docs, fn doc ->
+ encoded_id = URI.encode_www_form(doc["_id"])
+ copy = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}")
+ is_doc_id = &(Enum.member?(doc_ids, &1))
+ if(is_doc_id.(doc["_id"]) or is_doc_id.(encoded_id)) do
+ assert HTTPotion.Response.success?(copy)
+ else
+ assert copy.status_code == 404
+ end
+ end)
+
+ tgt_info = get_db_info(tgt_db_name)
+ assert tgt_info["doc_count"] == total_replicated
+
+ doc_ids_after = test_data[:after]
+ num_missing_after = Enum.count(doc_ids_after, fn doc_id ->
+ String.starts_with?(doc_id, "foo_")
+ end)
+
+ repl_body = %{:doc_ids => doc_ids_after}
+ result = replicate(repl_src, repl_tgt, body: repl_body)
+ assert result["ok"]
+
+ total_replicated_after = length(doc_ids_after) - num_missing_after
+ if(total_replicated_after == 0) do
+ assert result["no_changes"]
+ else
+ assert is_binary(result["start_time"])
+ assert is_binary(result["end_time"])
+ assert result["docs_read"] == total_replicated_after
+ assert result["docs_written"] == total_replicated_after
+ assert result["doc_write_failures"] == 0
+ end
+
+ Enum.each(doc_ids_after, fn doc_id ->
+ orig = Couch.get!("/#{src_db_name}/#{doc_id}")
+ copy = Couch.get!("/#{tgt_db_name}/#{doc_id}")
+
+ if(String.starts_with?(doc_id, "foo_")) do
+ assert orig.status_code == 404
+ assert copy.status_code == 404
+ else
+ assert HTTPotion.Response.success?(orig)
+ assert HTTPotion.Response.success?(copy)
+ assert cmp_json(orig.body, copy.body)
+ end
+ end)
+
+ # Be absolutely sure that other docs were not replicated
+ all_doc_ids = doc_ids ++ doc_ids_after
+ Enum.each(docs, fn doc ->
+ encoded_id = URI.encode_www_form(doc["_id"])
+ copy = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}")
+ is_doc_id = &(Enum.member?(all_doc_ids, &1))
+ if(is_doc_id.(doc["_id"]) or is_doc_id.(encoded_id)) do
+ assert HTTPotion.Response.success?(copy)
+ else
+ assert copy.status_code == 404
+ end
+ end)
+
+ tgt_info = get_db_info(tgt_db_name)
+ assert tgt_info["doc_count"] == total_replicated + total_replicated_after,
+ "#{inspect test_data}"
+
+ # Update a source document and re-replicate (no conflict introduced)
+ conflict_id = test_data[:conflict_id]
+ doc = Couch.get!("/#{src_db_name}/#{conflict_id}").body
+ assert is_map(doc)
+ doc = Map.put(doc, "integer", 666)
+ [doc] = save_docs(src_db_name, [doc])
+
+ att1 = [
+ name: "readme.txt",
+ body: get_att1_data(),
+ content_type: "text/plain"
+ ]
+ att2 = [
+ name: "data.dat",
+ body: get_att2_data(),
+ content_type: "application/binary"
+ ]
+ doc = add_attachment(src_db_name, doc, att1)
+ doc = add_attachment(src_db_name, doc, att2)
+
+ repl_body = %{:doc_ids => [conflict_id]}
+ result = replicate(repl_src, repl_tgt, body: repl_body)
+ assert result["ok"]
+
+ assert result["docs_read"] == 1
+ assert result["docs_written"] == 1
+ assert result["doc_write_failures"] == 0
+
+ query = %{"conflicts" => "true"}
+ copy = Couch.get!("/#{tgt_db_name}/#{conflict_id}", query: query)
+ assert HTTPotion.Response.success?(copy)
+ assert copy.body["integer"] == 666
+ assert String.starts_with?(copy.body["_rev"], "4-")
+ assert not Map.has_key?(doc, "_conflicts")
+
+ atts = copy.body["_attachments"]
+ assert is_map(atts)
+ assert is_map(atts["readme.txt"])
+ assert atts["readme.txt"]["revpos"] == 3
+ assert String.match?(atts["readme.txt"]["content_type"], ~r/text\/plain/)
+ assert atts["readme.txt"]["stub"]
+
+ att1_data = Couch.get!("/#{tgt_db_name}/#{conflict_id}/readme.txt").body
+ assert String.length(att1_data) == String.length(att1[:body])
+ assert att1_data == att1[:body]
+
+ assert is_map(atts["data.dat"])
+ assert atts["data.dat"]["revpos"] == 4
+ ct_re = ~r/application\/binary/
+ assert String.match?(atts["data.dat"]["content_type"], ct_re)
+ assert atts["data.dat"]["stub"]
+
+ att2_data = Couch.get!("/#{tgt_db_name}/#{conflict_id}/data.dat").body
+ assert String.length(att2_data) == String.length(att2[:body])
+ assert att2_data == att2[:body]
+
+ # Generate a conflict using replication by doc ids
+ orig = Couch.get!("/#{src_db_name}/#{conflict_id}").body
+ orig = Map.update!(orig, "integer", &(&1 + 100))
+ [_] = save_docs(src_db_name, [orig])
+
+ copy = Couch.get!("/#{tgt_db_name}/#{conflict_id}").body
+ copy = Map.update!(copy, "integer", &(&1 + 1))
+ [_] = save_docs(tgt_db_name, [copy])
+
+ result = replicate(repl_src, repl_tgt, body: repl_body)
+ assert result["ok"]
+ assert result["docs_read"] == 1
+ assert result["docs_written"] == 1
+ assert result["doc_write_failures"] == 0
+
+ copy = Couch.get!("/#{tgt_db_name}/#{conflict_id}", query: query).body
+ assert String.match?(copy["_rev"], ~r/^5-/)
+ assert is_list(copy["_conflicts"])
+ assert length(copy["_conflicts"]) == 1
+ conflict_rev = Enum.at(copy["_conflicts"], 0)
+ assert String.match?(conflict_rev, ~r/^5-/)
+ end
+
+ def run_continuous_repl(src_prefix, tgt_prefix) do
+ base_db_name = random_db_name()
+ src_db_name = base_db_name <> "_src"
+ tgt_db_name = base_db_name <> "_tgt"
+ repl_src = src_prefix <> src_db_name
+ repl_tgt = tgt_prefix <> tgt_db_name
+
+ create_db(src_db_name)
+ create_db(tgt_db_name)
+ delete_on_exit [src_db_name, tgt_db_name]
+
+ ddoc = %{
+ "_id" => "_design/mydesign",
+ "language" => "javascript",
+ "filters" => %{
+ "myfilter" => "function(doc, req) { return true; }"
+ }
+ }
+ docs = make_docs(1..25)
+ docs = save_docs(src_db_name, docs ++ [ddoc])
+
+ att1_data = get_att1_data()
+
+ docs = for doc <- docs do
+ if doc["integer"] >= 10 and doc["integer"] < 15 do
+ add_attachment(src_db_name, doc)
+ else
+ doc
+ end
+ end
+
+ repl_body = %{:continuous => true}
+ result = replicate(repl_src, repl_tgt, body: repl_body)
+
+ assert result["ok"]
+ assert is_binary(result["_local_id"])
+
+ repl_id = result["_local_id"]
+ task = get_task(repl_id, 30000)
+ assert is_map(task), "Error waiting for replication to start"
+
+ wait_for_repl(src_db_name, repl_id, 26)
+
+ Enum.each(docs, fn doc ->
+ resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}")
+ assert resp.status_code < 300
+ assert cmp_json(doc, resp.body)
+
+ if doc["integer"] >= 10 and doc["integer"] < 15 do
+ atts = resp.body["_attachments"]
+ assert is_map(atts)
+ att = atts["readme.txt"]
+ assert is_map(att)
+ assert att["revpos"] == 2
+ assert String.match?(att["content_type"], ~r/text\/plain/)
+ assert att["stub"]
+
+ resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}/readme.txt")
+ assert String.length(resp.body) == String.length("some text")
+ assert resp.body == "some text"
+ end
+ end)
+
+ src_info = get_db_info(src_db_name)
+ tgt_info = get_db_info(tgt_db_name)
+
+ assert tgt_info["doc_count"] == src_info["doc_count"]
+
+ # Add attachments to more source docs
+ docs = for doc <- docs do
+ is_ddoc = String.starts_with?(doc["_id"], "_design/")
+ case doc["integer"] do
+ n when n >= 10 and n < 15 ->
+ ctype = "application/binary"
+ opts = [name: "data.dat", body: att1_data, content_type: ctype]
+ add_attachment(src_db_name, doc, opts)
+ _ when is_ddoc ->
+ add_attachment(src_db_name, doc)
+ _ ->
+ doc
+ end
+ end
+
+ wait_for_repl(src_db_name, repl_id, 32)
+
+ Enum.each(docs, fn doc ->
+ is_ddoc = String.starts_with?(doc["_id"], "_design/")
+ case doc["integer"] do
+ N when N >= 10 and N < 15 or is_ddoc ->
+ resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}")
+ atts = resp.body["_attachments"]
+ assert is_map(atts)
+ att = atts["readme.txt"]
+ assert is_map(att)
+ assert att["revpos"] == 2
+ assert String.match?(att["content_type"], ~r/text\/plain/)
+ assert att["stub"]
+
+ resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}/readme.txt")
+ assert String.length(resp.body) == String.length("some text")
+ assert resp.body == "some text"
+
+ if not is_ddoc do
+ att = atts["data.dat"]
+ assert is_map(att)
+ assert att["revpos"] == 3
+ assert String.match?(att["content_type"], ~r/application\/binary/)
+ assert att["stub"]
+
+ resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}/data.dat")
+ assert String.length(resp.body) == String.length(att1_data)
+ assert resp.body == att1_data
+ end
+ _ ->
+ :ok
+ end
+ end)
+
+ src_info = get_db_info(src_db_name)
+ tgt_info = get_db_info(tgt_db_name)
+
+ assert tgt_info["doc_count"] == src_info["doc_count"]
+
+ ddoc = List.last(docs)
+ ctype = "application/binary"
+ opts = [name: "data.dat", body: att1_data, content_type: ctype]
+ add_attachment(src_db_name, ddoc, opts)
+
+ wait_for_repl(src_db_name, repl_id, 33)
+
+ resp = Couch.get("/#{tgt_db_name}/#{ddoc["_id"]}")
+ atts = resp.body["_attachments"]
+ assert is_map(atts)
+ att = atts["readme.txt"]
+ assert is_map(att)
+ assert att["revpos"] == 2
+ assert String.match?(att["content_type"], ~r/text\/plain/)
+ assert att["stub"]
+
+ resp = Couch.get!("/#{tgt_db_name}/#{ddoc["_id"]}/readme.txt")
+ assert String.length(resp.body) == String.length("some text")
+ assert resp.body == "some text"
+
+ att = atts["data.dat"]
+ assert is_map(att)
+ assert att["revpos"] == 3
+ assert String.match?(att["content_type"], ~r/application\/binary/)
+ assert att["stub"]
+
+ resp = Couch.get!("/#{tgt_db_name}/#{ddoc["_id"]}/data.dat")
+ assert String.length(resp.body) == String.length(att1_data)
+ assert resp.body == att1_data
+
+ src_info = get_db_info(src_db_name)
+ tgt_info = get_db_info(tgt_db_name)
+
+ assert tgt_info["doc_count"] == src_info["doc_count"]
+
+ # Check creating new normal documents
+ new_docs = make_docs(26..35)
+ new_docs = save_docs(src_db_name, new_docs)
+
+ wait_for_repl(src_db_name, repl_id, 43)
+
+ Enum.each(new_docs, fn doc ->
+ resp = Couch.get!("/#{tgt_db_name}/#{doc["_id"]}")
+ assert resp.status_code < 300
+ assert cmp_json(doc, resp.body)
+ end)
+
+ src_info = get_db_info(src_db_name)
+ tgt_info = get_db_info(tgt_db_name)
+
+ assert tgt_info["doc_count"] == src_info["doc_count"]
+
+ # Delete docs from the source
+
+ doc1 = Enum.at(new_docs, 0)
+ query = %{:rev => doc1["_rev"]}
+ Couch.delete!("/#{src_db_name}/#{doc1["_id"]}", query: query)
+
+ doc2 = Enum.at(new_docs, 6)
+ query = %{:rev => doc2["_rev"]}
+ Couch.delete!("/#{src_db_name}/#{doc2["_id"]}", query: query)
+
+ wait_for_repl(src_db_name, repl_id, 45)
+
+ resp = Couch.get("/#{tgt_db_name}/#{doc1["_id"]}")
+ assert resp.status_code == 404
+ resp = Couch.get("/#{tgt_db_name}/#{doc2["_id"]}")
+ assert resp.status_code == 404
+
+ changes = get_db_changes(tgt_db_name, %{:since => tgt_info["update_seq"]})
+ # quite unfortunately, there is no way on relying on ordering in a cluster
+ # but we can assume a length of 2
+ changes = for change <- changes["results"] do
+ {change["id"], change["deleted"]}
+ end
+ assert Enum.sort(changes) == [{doc1["_id"], true}, {doc2["_id"], true}]
+
+ # Cancel the replication
+ repl_body = %{:continuous => true, :cancel => true}
+ resp = replicate(repl_src, repl_tgt, body: repl_body)
+ assert resp["ok"]
+ assert resp["_local_id"] == repl_id
+
+ doc = %{"_id" => "foobar", "value": 666}
+ [doc] = save_docs(src_db_name, [doc])
+
+ wait_for_repl_stop(repl_id, 30000)
+
+ resp = Couch.get("/#{tgt_db_name}/#{doc["_id"]}")
+ assert resp.status_code == 404
+ end
+
+ def run_compressed_att_repl(src_prefix, tgt_prefix) do
+ base_db_name = random_db_name()
+ src_db_name = base_db_name <> "_src"
+ tgt_db_name = base_db_name <> "_tgt"
+ repl_src = src_prefix <> src_db_name
+ repl_tgt = tgt_prefix <> tgt_db_name
+
+ create_db(src_db_name)
+ create_db(tgt_db_name)
+ delete_on_exit [src_db_name, tgt_db_name]
+
+ doc = %{"_id" => "foobar"}
+ [doc] = save_docs(src_db_name, [doc])
+
+ att1_data = get_att1_data()
+ num_copies = 1 + round(128 * 1024 / String.length(att1_data))
+ big_att = List.foldl(Enum.to_list(1..num_copies), "", fn _, acc ->
+ acc <> att1_data
+ end)
+
+ doc = add_attachment(src_db_name, doc, [body: big_att])
+
+ # Disable attachment compression
+ set_config_raw("attachments", "compression_level", "0")
+
+ result = replicate(repl_src, repl_tgt)
+ assert result["ok"]
+ assert is_list(result["history"])
+ assert length(result["history"]) == 1
+ history = Enum.at(result["history"], 0)
+ assert history["missing_checked"] == 1
+ assert history["missing_found"] == 1
+ assert history["docs_read"] == 1
+ assert history["docs_written"] == 1
+ assert history["doc_write_failures"] == 0
+
+ token = Enum.random(1..1_000_000)
+ query = %{"att_encoding_info": "true", "bypass_cache": token}
+ resp = Couch.get("/#{tgt_db_name}/#{doc["_id"]}", query: query)
+ assert resp.status_code < 300
+ assert is_map(resp.body["_attachments"])
+ att = resp.body["_attachments"]["readme.txt"]
+ assert att["encoding"] == "gzip"
+ assert is_integer(att["length"])
+ assert is_integer(att["encoded_length"])
+ assert att["encoded_length"] < att["length"]
+ end
+
+ def run_non_admin_target_user_repl(src_prefix, tgt_prefix, ctx) do
+ base_db_name = random_db_name()
+ src_db_name = base_db_name <> "_src"
+ tgt_db_name = base_db_name <> "_tgt"
+ repl_src = src_prefix <> src_db_name
+ repl_tgt = tgt_prefix <> tgt_db_name
+
+ create_db(src_db_name)
+ create_db(tgt_db_name)
+ delete_on_exit [src_db_name, tgt_db_name]
+
+ set_security(tgt_db_name, %{
+ :admins => %{
+ :names => ["superman"],
+ :roles => ["god"]
+ }})
+
+ docs = make_docs(1..6)
+ ddoc = %{"_id" => "_design/foo", "language" => "javascript"}
+ docs = save_docs(src_db_name, [ddoc | docs])
+
+ sess = Couch.login(ctx[:userinfo])
+ resp = Couch.Session.get(sess, "/_session")
+ assert resp.body["ok"]
+ assert resp.body["userCtx"]["name"] == "joe"
+
+ opts = [
+ userinfo: ctx[:userinfo],
+ headers: [cookie: sess.cookie]
+ ]
+ result = replicate(repl_src, repl_tgt, opts)
+
+ assert Couch.Session.logout(sess).body["ok"]
+
+ assert result["ok"]
+ history = Enum.at(result["history"], 0)
+ assert history["docs_read"] == length(docs)
+ assert history["docs_written"] == length(docs) - 1 # ddoc write failed
+ assert history["doc_write_failures"] == 1 # ddoc write failed
+
+ Enum.each(docs, fn doc ->
+ resp = Couch.get("/#{tgt_db_name}/#{doc["_id"]}")
+ if String.starts_with?(doc["_id"], "_design/") do
+ assert resp.status_code == 404
+ else
+ assert HTTPotion.Response.success?(resp)
+ assert cmp_json(doc, resp.body)
+ end
+ end)
+ end
+
+ def run_non_admin_or_reader_source_user_repl(src_prefix, tgt_prefix, ctx) do
+ base_db_name = random_db_name()
+ src_db_name = base_db_name <> "_src"
+ tgt_db_name = base_db_name <> "_tgt"
+ repl_src = src_prefix <> src_db_name
+ repl_tgt = tgt_prefix <> tgt_db_name
+
+ create_db(src_db_name)
+ create_db(tgt_db_name)
+ delete_on_exit [src_db_name, tgt_db_name]
+
+ set_security(tgt_db_name, %{
+ :admins => %{
+ :names => ["superman"],
+ :roles => ["god"]
+ },
+ :readers => %{
+ :names => ["john"],
+ :roles => ["secret"]
+ }
+ })
+
+ docs = make_docs(1..6)
+ ddoc = %{"_id" => "_design/foo", "language" => "javascript"}
+ docs = save_docs(src_db_name, [ddoc | docs])
+
+ sess = Couch.login(ctx[:userinfo])
+ resp = Couch.Session.get(sess, "/_session")
+ assert resp.body["ok"]
+ assert resp.body["userCtx"]["name"] == "joe"
+
+ opts = [
+ userinfo: ctx[:userinfo],
+ headers: [cookie: sess.cookie]
+ ]
+ assert_raise(ExUnit.AssertionError, fn() ->
+ replicate(repl_src, repl_tgt, opts)
+ end)
+
+ assert Couch.Session.logout(sess).body["ok"]
+
+ Enum.each(docs, fn doc ->
+ resp = Couch.get("/#{tgt_db_name}/#{doc["_id"]}")
+ assert resp.status_code == 404
+ end)
+ end
+
+ def get_db_info(db_name) do
+ resp = Couch.get("/#{db_name}")
+ assert HTTPotion.Response.success?(resp)
+ resp.body
+ end
+
+ def replicate(src, tgt, options \\ []) do
+ {userinfo, options} = Keyword.pop(options, :userinfo)
+ userinfo = if userinfo == nil do
+ @admin_account
+ else
+ userinfo
+ end
+
+ src = set_user(src, userinfo)
+ tgt = set_user(tgt, userinfo)
+
+ defaults = [headers: [], body: %{}, timeout: 30_000]
+ options = Keyword.merge(defaults, options) |> Enum.into(%{})
+
+ %{body: body} = options
+ body = [source: src, target: tgt] |> Enum.into(body)
+ options = Map.put(options, :body, body)
+
+ resp = Couch.post("/_replicate", Enum.to_list options)
+ assert HTTPotion.Response.success?(resp), "#{inspect resp}"
+ resp.body
+ end
+
+ def cancel_replication(src, tgt) do
+ body = %{:cancel => true}
+ try do
+ replicate(src, tgt, body: body)
+ rescue
+ ExUnit.AssertionError -> :ok
+ end
+ end
+
+ def get_db_changes(db_name, query \\ %{}) do
+ resp = Couch.get("/#{db_name}/_changes", query: query)
+ assert HTTPotion.Response.success?(resp), "#{inspect resp}"
+ resp.body
+ end
+
+ def save_docs(db_name, docs) do
+ query = %{w: 3}
+ body = %{docs: docs}
+ resp = Couch.post("/#{db_name}/_bulk_docs", query: query, body: body)
+ assert HTTPotion.Response.success?(resp)
+ for {doc, resp} <- Enum.zip(docs, resp.body) do
+ assert resp["ok"], "Error saving doc: #{doc["_id"]}"
+ Map.put(doc, "_rev", resp["rev"])
+ end
+ end
+
+ def set_security(db_name, sec_props) do
+ resp = Couch.put("/#{db_name}/_security", body: :jiffy.encode(sec_props))
+ assert HTTPotion.Response.success?(resp)
+ assert resp.body["ok"]
+ end
+
+ def add_attachment(db_name, doc, att \\ []) do
+ defaults = [
+ name: <<"readme.txt">>,
+ body: <<"some text">>,
+ content_type: "text/plain"
+ ]
+ att = Keyword.merge(defaults, att) |> Enum.into(%{})
+ uri = "/#{db_name}/#{URI.encode(doc["_id"])}/#{att[:name]}"
+ headers = ["Content-Type": att[:content_type]]
+ params = if doc["_rev"] do
+ %{:w => 3, :rev => doc["_rev"]}
+ else
+ %{:w => 3}
+ end
+ resp = Couch.put(uri, headers: headers, query: params, body: att[:body])
+ assert HTTPotion.Response.success?(resp)
+ Map.put(doc, "_rev", resp.body["rev"])
+ end
+
+ def wait_for_repl(src_db_name, repl_id, expect_revs_checked) do
+ wait_for_repl(src_db_name, repl_id, expect_revs_checked, 30000)
+ end
+
+ def wait_for_repl(_, _, _, wait_left) when wait_left <= 0 do
+ assert false, "Timeout waiting for replication"
+ end
+
+ def wait_for_repl(src_db_name, repl_id, expect_revs_checked, wait_left) do
+ task = get_task(repl_id, 0)
+ through_seq = task["through_seq"]
+ revs_checked = task["revisions_checked"]
+ changes = get_db_changes(src_db_name, %{:since => through_seq})
+ if length(changes["results"]) > 0 or revs_checked < expect_revs_checked do
+ :timer.sleep(500)
+ wait_for_repl(src_db_name, repl_id, expect_revs_checked, wait_left - 500)
+ end
+ task
+ end
+
+ def wait_for_repl_stop(repl_id) do
+ wait_for_repl_stop(repl_id, 30000)
+ end
+
+ def wait_for_repl_stop(repl_id, wait_left) when wait_left <= 0 do
+ assert false, "Timeout waiting for replication task to stop: #{repl_id}"
+ end
+
+ def wait_for_repl_stop(repl_id, wait_left) do
+ task = get_task(repl_id, 0)
+ if is_map(task) do
+ :timer.sleep(500)
+ wait_for_repl_stop(repl_id, wait_left - 500)
+ end
+ end
+
+ def get_last_seq(db_name) do
+ body = get_db_changes(db_name, %{:since => "now"})
+ body["last_seq"]
+ end
+
+ def get_task(repl_id, delay) when delay <= 0 do
+ try_get_task(repl_id)
+ end
+
+ def get_task(repl_id, delay) do
+ case try_get_task(repl_id) do
+ result when is_map(result) ->
+ result
+ _ ->
+ :timer.sleep(500)
+ get_task(repl_id, delay - 500)
+ end
+ end
+
+ def try_get_task(repl_id) do
+ resp = Couch.get("/_active_tasks")
+ assert HTTPotion.Response.success?(resp)
+ assert is_list(resp.body)
+ Enum.find(resp.body, :nil, fn task ->
+ task["replication_id"] == repl_id
+ end)
+ end
+
+ def set_user(uri, userinfo) do
+ case URI.parse(uri) do
+ %{scheme: nil} ->
+ uri
+ %{userinfo: nil} = uri ->
+ URI.to_string(Map.put(uri, :userinfo, userinfo))
+ _ ->
+ uri
+ end
+ end
+
+ def get_att1_data do
+ File.read!("test/data/lorem.txt")
+ end
+
+ def get_att2_data do
+ File.read!("test/data/lorem_b64.txt")
+ end
+
+ def cmp_json(lhs, rhs) when is_map(lhs) and is_map(rhs) do
+ Enum.reduce_while(lhs, true, fn {k, v}, true ->
+ if Map.has_key?(rhs, k) do
+ if cmp_json(v, rhs[k]) do
+ {:cont, true}
+ else
+ Logger.error "#{inspect lhs} != #{inspect rhs}"
+ {:halt, false}
+ end
+ else
+ Logger.error "#{inspect lhs} != #{inspect rhs}"
+ {:halt, false}
+ end
+ end)
+ end
+
+ def cmp_json(lhs, rhs), do: lhs == rhs
+
+ def seq_to_shards(seq) do
+ for {_node, range, update_seq} <- decode_seq(seq) do
+ {range, update_seq}
+ end
+ end
+
+ def decode_seq(seq) do
+ seq = String.replace(seq, ~r/\d+-/, "", global: false)
+ :erlang.binary_to_term(Base.url_decode64!(seq, padding: false))
+ end
+
+ def delete_on_exit(db_names) when is_list(db_names) do
+ on_exit(fn ->
+ Enum.each(db_names, fn(name) ->
+ delete_db name
+ end)
+ end)
+ end
+end
diff --git a/test/elixir/test/rewrite_test.exs b/test/elixir/test/rewrite_test.exs
new file mode 100644
index 000000000..7db9d36bc
--- /dev/null
+++ b/test/elixir/test/rewrite_test.exs
@@ -0,0 +1,339 @@
+defmodule RewriteTest do
+ use CouchTestCase
+
+ @moduletag :js_engine
+
+ @moduledoc """
+ Test CouchDB rewrites
+ This is a port of the rewrite.js suite
+ """
+
+ Enum.each(["test_rewrite_suite_db", "test_rewrite_suite_db%2Fwith_slashes"], fn db_name ->
+ @tag with_random_db: db_name
+ @tag config: [
+ {"httpd", "authentication_handlers", "{couch_httpd_auth, special_test_authentication_handler}"},
+ {"httpd", "WWW-Authenticate", "X-Couch-Test-Auth"}
+ ]
+ test "Test basic rewrites on #{db_name}", context do
+ db_name = context[:db_name]
+ ddoc = ~S"""
+ {
+ "_id": "_design/test",
+ "language": "javascript",
+ "_attachments": {
+ "foo.txt": {
+ "content_type":"text/plain",
+ "data": "VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ="
+ }
+ },
+ "rewrites": [
+ {
+ "from": "foo",
+ "to": "foo.txt"
+ },
+ {
+ "from": "foo2",
+ "to": "foo.txt",
+ "method": "GET"
+ },
+ {
+ "from": "hello/:id",
+ "to": "_update/hello/:id",
+ "method": "PUT"
+ },
+ {
+ "from": "/welcome",
+ "to": "_show/welcome"
+ },
+ {
+ "from": "/welcome/:name",
+ "to": "_show/welcome",
+ "query": {
+ "name": ":name"
+ }
+ },
+ {
+ "from": "/welcome2",
+ "to": "_show/welcome",
+ "query": {
+ "name": "user"
+ }
+ },
+ {
+ "from": "/welcome3/:name",
+ "to": "_update/welcome2/:name",
+ "method": "PUT"
+ },
+ {
+ "from": "/welcome3/:name",
+ "to": "_show/welcome2/:name",
+ "method": "GET"
+ },
+ {
+ "from": "/welcome4/*",
+ "to" : "_show/welcome3",
+ "query": {
+ "name": "*"
+ }
+ },
+ {
+ "from": "/welcome5/*",
+ "to" : "_show/*",
+ "query": {
+ "name": "*"
+ }
+ },
+ {
+ "from": "basicView",
+ "to": "_view/basicView"
+ },
+ {
+ "from": "simpleForm/basicView",
+ "to": "_list/simpleForm/basicView"
+ },
+ {
+ "from": "simpleForm/basicViewFixed",
+ "to": "_list/simpleForm/basicView",
+ "query": {
+ "startkey": 3,
+ "endkey": 8
+ }
+ },
+ {
+ "from": "simpleForm/basicViewPath/:start/:end",
+ "to": "_list/simpleForm/basicView",
+ "query": {
+ "startkey": ":start",
+ "endkey": ":end"
+ },
+ "formats": {
+ "start": "int",
+ "end": "int"
+ }
+ },
+ {
+ "from": "simpleForm/complexView",
+ "to": "_list/simpleForm/complexView",
+ "query": {
+ "key": [1, 2]
+ }
+ },
+ {
+ "from": "simpleForm/complexView2",
+ "to": "_list/simpleForm/complexView",
+ "query": {
+ "key": ["test", {}]
+ }
+ },
+ {
+ "from": "simpleForm/complexView3",
+ "to": "_list/simpleForm/complexView",
+ "query": {
+ "key": ["test", ["test", "essai"]]
+ }
+ },
+ {
+ "from": "simpleForm/complexView4",
+ "to": "_list/simpleForm/complexView2",
+ "query": {
+ "key": {"c": 1}
+ }
+ },
+ {
+ "from": "simpleForm/complexView5/:a/:b",
+ "to": "_list/simpleForm/complexView3",
+ "query": {
+ "key": [":a", ":b"]
+ }
+ },
+ {
+ "from": "simpleForm/complexView6",
+ "to": "_list/simpleForm/complexView3",
+ "query": {
+ "key": [":a", ":b"]
+ }
+ },
+ {
+ "from": "simpleForm/complexView7/:a/:b",
+ "to": "_view/complexView3",
+ "query": {
+ "key": [":a", ":b"],
+ "include_docs": ":doc"
+ },
+ "format": {
+ "doc": "bool"
+ }
+
+ },
+ {
+ "from": "/",
+ "to": "_view/basicView"
+ },
+ {
+ "from": "/db/*",
+ "to": "../../*"
+ }
+ ],
+ "lists": {
+ "simpleForm": "function(head, req) {
+ log(\"simpleForm\");
+ send(\"<ul>\");
+ var row, row_number = 0, prevKey, firstKey = null;
+ while (row = getRow()) {
+ row_number += 1;
+ if (!firstKey) firstKey = row.key;
+ prevKey = row.key;
+ send(\"\\n<li>Key: \"+row.key
+ +\" Value: \"+row.value
+ +\" LineNo: \"+row_number+\"</li>\");
+ }
+ return \"</ul><p>FirstKey: \"+ firstKey + \" LastKey: \"+ prevKey+\"</p>\";
+ }"
+ },
+ "shows": {
+ "welcome": "(function(doc,req) {
+ return \"Welcome \" + req.query[\"name\"];
+ })",
+ "welcome2": "(function(doc, req) {
+ return \"Welcome \" + doc.name;
+ })",
+ "welcome3": "(function(doc,req) {
+ return \"Welcome \" + req.query[\"name\"];
+ })"
+ },
+ "updates": {
+ "hello" : "(function(doc, req) {
+ if (!doc) {
+ if (req.id) {
+ return [{
+ _id : req.id
+ }, \"New World\"]
+ }
+ return [null, \"Empty World\"];
+ }
+ doc.world = \"hello\";
+ doc.edited_by = req.userCtx;
+ return [doc, \"hello doc\"];
+ })",
+ "welcome2": "(function(doc, req) {
+ if (!doc) {
+ if (req.id) {
+ return [{
+ _id: req.id,
+ name: req.id
+ }, \"New World\"]
+ }
+ return [null, \"Empty World\"];
+ }
+ return [doc, \"hello doc\"];
+ })"
+ },
+ "views" : {
+ "basicView" : {
+ "map" : "(function(doc) {
+ if (doc.integer) {
+ emit(doc.integer, doc.string);
+ }
+
+ })"
+ },
+ "complexView": {
+ "map": "(function(doc) {
+ if (doc.type == \"complex\") {
+ emit([doc.a, doc.b], doc.string);
+ }
+ })"
+ },
+ "complexView2": {
+ "map": "(function(doc) {
+ if (doc.type == \"complex\") {
+ emit(doc.a, doc.string);
+ }
+ })"
+ },
+ "complexView3": {
+ "map": "(function(doc) {
+ if (doc.type == \"complex\") {
+ emit(doc.b, doc.string);
+ }
+ })"
+ }
+ }
+ }
+ """
+ ddoc = String.replace(ddoc, ~r/[\r\n]+/, "")
+
+ docs1 = make_docs(0..9)
+ docs2 = [
+ %{"a" => 1, "b" => 1, "string" => "doc 1", "type" => "complex"},
+ %{"a" => 1, "b" => 2, "string" => "doc 2", "type" => "complex"},
+ %{"a" => "test", "b" => %{}, "string" => "doc 3", "type" => "complex"},
+ %{"a" => "test", "b" => ["test", "essai"], "string" => "doc 4", "type" => "complex"},
+ %{"a" => %{"c" => 1}, "b" => "", "string" => "doc 5", "type" => "complex"}
+ ]
+
+ assert Couch.put("/#{db_name}/_design/test", [body: ddoc]).body["ok"]
+ assert Couch.post("/#{db_name}/_bulk_docs", [body: %{:docs => docs1}, query: %{w: 3}]).status_code == 201
+ assert Couch.post("/#{db_name}/_bulk_docs", [body: %{:docs => docs2}, query: %{w: 3}]).status_code == 201
+
+ # Test simple rewriting
+ resp = Couch.get("/#{db_name}/_design/test/_rewrite/foo")
+ assert resp.body == "This is a base64 encoded text"
+ assert resp.headers["Content-Type"] == "text/plain"
+
+ resp = Couch.get("/#{db_name}/_design/test/_rewrite/foo2")
+ assert resp.body == "This is a base64 encoded text"
+ assert resp.headers["Content-Type"] == "text/plain"
+
+ # Test POST, hello update world
+ resp = Couch.post("/#{db_name}", [body: %{"word" => "plankton", "name" => "Rusty"}]).body
+ assert resp["ok"]
+ doc_id = resp["id"]
+ assert doc_id
+
+ resp = Couch.put("/#{db_name}/_design/test/_rewrite/hello/#{doc_id}")
+ assert resp.status_code == 201
+ assert resp.body == "hello doc"
+ assert String.match?(resp.headers["Content-Type"], ~r/charset=utf-8/)
+
+ assert Couch.get("/#{db_name}/#{doc_id}").body["world"] == "hello"
+
+ resp = Couch.get("/#{db_name}/_design/test/_rewrite/welcome?name=user")
+ assert resp.body == "Welcome user"
+
+ resp = Couch.get("/#{db_name}/_design/test/_rewrite/welcome/user")
+ assert resp.body == "Welcome user"
+
+ resp = Couch.get("/#{db_name}/_design/test/_rewrite/welcome2")
+ assert resp.body == "Welcome user"
+
+ resp = Couch.put("/#{db_name}/_design/test/_rewrite/welcome3/test")
+ assert resp.status_code == 201
+ assert resp.body == "New World"
+ assert String.match?(resp.headers["Content-Type"], ~r/charset=utf-8/)
+
+ resp = Couch.get("/#{db_name}/_design/test/_rewrite/welcome3/test")
+ assert resp.body == "Welcome test"
+
+ # TODO: port the two "bugged" tests from rewrite.js
+
+ resp = Couch.get("/#{db_name}/_design/test/_rewrite/basicView")
+ assert resp.status_code == 200
+ assert resp.body["total_rows"] == 9
+
+ resp = Couch.get("/#{db_name}/_design/test/_rewrite")
+ assert resp.status_code == 200
+ assert resp.body["total_rows"] == 9
+
+ # TODO: port _list function tests and everything below in rewrite.js
+ # This is currently broken because _list funcitons default to application/json
+ # response bodies and my attempts to change the content-type from within the
+ # _list function have not yet succeeded.
+ #
+ # Test GET with query params
+ # resp = Couch.get("/#{db_name}/_design/test/_rewrite/simpleForm/basicView", query: %{startkey: 3, endkey: 8})
+ # Logger.error("GOT RESP: #{inspect resp.body}")
+ # assert resp.status_code == 200
+ end
+ end)
+end
diff --git a/test/elixir/test/security_validation_test.exs b/test/elixir/test/security_validation_test.exs
new file mode 100644
index 000000000..526f06b2a
--- /dev/null
+++ b/test/elixir/test/security_validation_test.exs
@@ -0,0 +1,310 @@
+defmodule SecurityValidationTest do
+ use CouchTestCase
+
+ @moduletag :security
+
+ @moduledoc """
+ Test CouchDB Security Validations
+ This is a port of the security_validation.js suite
+ """
+
+ @auth_headers %{
+ jerry: [
+ authorization: "Basic amVycnk6bW91c2U=" # jerry:mouse
+ ],
+ tom: [
+ authorization: "Basic dG9tOmNhdA==" # tom:cat
+ ],
+ spike_cat: [
+ authorization: "Basic c3Bpa2U6Y2F0" # spike:cat - which is wrong
+ ]
+ }
+
+ @ddoc %{
+ _id: "_design/test",
+ language: "javascript",
+ validate_doc_update: ~s"""
+ (function (newDoc, oldDoc, userCtx, secObj) {
+ if (secObj.admin_override) {
+ if (userCtx.roles.indexOf('_admin') != -1) {
+ // user is admin, they can do anything
+ return true;
+ }
+ }
+ // docs should have an author field.
+ if (!newDoc._deleted && !newDoc.author) {
+ throw {forbidden:
+ \"Documents must have an author field\"};
+ }
+ if (oldDoc && oldDoc.author != userCtx.name) {
+ throw {unauthorized:
+ \"You are '\" + userCtx.name + \"', not the author '\" + oldDoc.author + \"' of this document. You jerk.\"};
+ }
+ })
+ """
+ }
+
+ setup_all do
+ auth_db_name = random_db_name()
+ {:ok, _} = create_db(auth_db_name)
+ on_exit(fn -> delete_db(auth_db_name) end)
+
+ configs = [
+ {"httpd", "authentication_handlers", "{couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}"},
+ {"couch_httpd_auth", "authentication_db", auth_db_name},
+ {"chttpd_auth", "authentication_db", auth_db_name}
+ ]
+ Enum.each(configs, &set_config/1)
+
+ # port of comment from security_validation.js
+ # the special case handler does not exist (any longer) in clusters, so we have
+ # to replicate the behavior using a "normal" DB even though tests might no more
+ # run universally (why the "X-Couch-Test-Auth" header was introduced).
+ # btw: this needs to be INSIDE configured server to propagate correctly ;-)
+ # At least they'd run in the build, though
+ users = [{"tom", "cat"}, {"jerry", "mouse"}, {"spike", "dog"}]
+ Enum.each(users, fn {name, pass} ->
+ doc = %{
+ :_id => "org.couchdb.user:#{name}",
+ :name => name,
+ :roles => [],
+ :password => pass
+ }
+ assert Couch.post("/#{auth_db_name}", body: doc).body["ok"]
+ end)
+
+ {:ok, [auth_db_name: auth_db_name]}
+ end
+
+ @tag :with_db_name
+ test "Saving document using the wrong credentials", context do
+ headers = @auth_headers[:spike_cat] # spike:cat - which is wrong
+ resp = Couch.post("/#{context[:db_name]}", [body: %{foo: 1}, headers: headers])
+ assert resp.body["error"] == "unauthorized"
+ assert resp.status_code == 401
+ end
+
+ test "Force basic login" do
+ headers = @auth_headers[:spike_cat] # spike:cat - which is wrong
+ resp = Couch.get("/_session", [query: %{basic: true}, headers: headers])
+ assert resp.status_code == 401
+ assert resp.body["error"] == "unauthorized"
+ end
+
+ @tag :with_db
+ test "Jerry can save a document normally", context do
+ headers = @auth_headers[:jerry]
+ assert Couch.get("/_session", headers: headers).body["userCtx"]["name"] == "jerry"
+
+ doc = %{_id: "testdoc", foo: 1, author: "jerry"}
+ assert Couch.post("/#{context[:db_name]}", body: doc).body["ok"]
+ end
+
+ @tag :with_db
+ test "Non-admin user cannot save a ddoc", context do
+ headers = @auth_headers[:jerry]
+ resp = Couch.post("/#{context[:db_name]}", [body: @ddoc, headers: headers])
+ assert resp.status_code == 403
+ assert resp.body["error"] == "forbidden"
+ end
+
+ @tag :with_db
+ test "Ddoc writes with admin and replication contexts", context do
+ db_name = context[:db_name]
+ sec_obj = %{admins: %{names: ["jerry"]}}
+
+ assert Couch.put("/#{db_name}/_security", body: sec_obj).body["ok"]
+ assert Couch.post("/#{db_name}", body: @ddoc).body["ok"]
+
+ new_rev = "2-642e20f96624a0aae6025b4dba0c6fb2"
+ ddoc = Map.put(@ddoc, :_rev, new_rev) |> Map.put(:foo, "bar")
+ headers = @auth_headers[:tom]
+ # attempt to save doc in replication context, eg ?new_edits=false
+ resp = Couch.put("/#{db_name}/#{ddoc[:_id]}", [body: ddoc, headers: headers, query: %{new_edits: false}])
+ assert resp.status_code == 403
+ assert resp.body["error"] == "forbidden"
+ end
+
+ test "_session API" do
+ headers = @auth_headers[:jerry]
+ resp = Couch.get("/_session", headers: headers)
+ assert resp.body["userCtx"]["name"] == "jerry"
+ assert resp.body["userCtx"]["roles"] == []
+ end
+
+ @tag :with_db
+ test "Author presence and user security", context do
+ db_name = context[:db_name]
+ sec_obj = %{admin_override: false, admins: %{names: ["jerry"]}}
+
+ jerry = @auth_headers[:jerry]
+ tom = @auth_headers[:tom]
+
+ assert Couch.put("/#{db_name}/_security", body: sec_obj).body["ok"]
+ assert Couch.post("/#{db_name}", body: @ddoc).body["ok"]
+
+ resp = Couch.put("/#{db_name}/test_doc", [body: %{foo: 1}, headers: jerry])
+ assert resp.status_code == 403
+ assert resp.body["error"] == "forbidden"
+ assert resp.body["reason"] == "Documents must have an author field"
+
+ # Jerry can write the document
+ assert Couch.put("/#{db_name}/test_doc", [body: %{foo: 1, author: "jerry"}, headers: jerry]).body["ok"]
+
+ test_doc = Couch.get("/#{db_name}/test_doc").body
+
+ # Tom cannot write the document
+ resp = Couch.post("/#{db_name}", [body: %{foo: 1}, headers: tom])
+ assert resp.status_code == 403
+ assert resp.body["error"] == "forbidden"
+
+ # Enable admin override for changing author values
+ assert Couch.put("/#{db_name}/_security", body: %{sec_obj | admin_override: true}).body["ok"]
+
+ # Change owner to Tom
+ test_doc = Map.put(test_doc, "author", "tom")
+ resp = Couch.put("/#{db_name}/test_doc", body: test_doc)
+ assert resp.body["ok"]
+ test_doc = Map.put(test_doc, "_rev", resp.body["rev"])
+
+ # Now Tom can update the document
+ test_doc = Map.put(test_doc, "foo", "asdf")
+ resp = Couch.put("/#{db_name}/test_doc", [body: test_doc, headers: tom])
+ assert resp.body["ok"]
+ test_doc = Map.put(test_doc, "_rev", resp.body["rev"])
+
+ # Jerry can't delete it
+ retry_until(fn() ->
+ opts = [headers: jerry]
+ resp = Couch.delete("/#{db_name}/test_doc?rev=#{test_doc["_rev"]}", opts)
+ resp.status_code == 401 and resp.body["error"] == "unauthorized"
+ end)
+ end
+end
+
+# TODO: port remainder of security_validation.js suite
+# remaining bits reproduced below:
+#
+# // try to do something lame
+# try {
+# db.setDbProperty("_security", ["foo"]);
+# T(false && "can't do this");
+# } catch(e) {}
+#
+# // go back to normal
+# T(db.setDbProperty("_security", {admin_override : false}).ok);
+#
+# // Now delete document
+# T(user2Db.deleteDoc(doc).ok);
+#
+# // now test bulk docs
+# var docs = [{_id:"bahbah",author:"jerry",foo:"bar"},{_id:"fahfah",foo:"baz"}];
+#
+# // Create the docs
+# var results = db.bulkSave(docs);
+#
+# T(results[0].rev)
+# T(results[0].error == undefined)
+# T(results[1].rev === undefined)
+# T(results[1].error == "forbidden")
+#
+# T(db.open("bahbah"));
+# T(db.open("fahfah") == null);
+#
+#
+# // now all or nothing with a failure - no more available on cluster
+#/* var docs = [{_id:"booboo",author:"Damien Katz",foo:"bar"},{_id:"foofoo",foo:"baz"}];
+#
+# // Create the docs
+# var results = db.bulkSave(docs, {all_or_nothing:true});
+#
+# T(results.errors.length == 1);
+# T(results.errors[0].error == "forbidden");
+# T(db.open("booboo") == null);
+# T(db.open("foofoo") == null);
+#*/
+#
+# // Now test replication
+# var AuthHeaders = {"Authorization": "Basic c3Bpa2U6ZG9n"}; // spike
+# adminDbA = new CouchDB("" + db_name + "_a", {"X-Couch-Full-Commit":"false"});
+# adminDbB = new CouchDB("" + db_name + "_b", {"X-Couch-Full-Commit":"false"});
+# var dbA = new CouchDB("" + db_name + "_a", AuthHeaders);
+# var dbB = new CouchDB("" + db_name + "_b", AuthHeaders);
+# // looping does not really add value as the scenario is the same anyway (there's nothing 2 be gained from it)
+# var A = CouchDB.protocol + CouchDB.host + "/" + db_name + "_a";
+# var B = CouchDB.protocol + CouchDB.host + "/" + db_name + "_b";
+#
+# // (the databases never exist b4 - and we made sure they're deleted below)
+# //adminDbA.deleteDb();
+# adminDbA.createDb();
+# //adminDbB.deleteDb();
+# adminDbB.createDb();
+#
+# // save and replicate a documents that will and will not pass our design
+# // doc validation function.
+# T(dbA.save({_id:"foo1",value:"a",author:"tom"}).ok);
+# T(dbA.save({_id:"foo2",value:"a",author:"spike"}).ok);
+# T(dbA.save({_id:"bad1",value:"a"}).ok);
+#
+# T(CouchDB.replicate(A, B, {headers:AuthHeaders}).ok);
+# T(CouchDB.replicate(B, A, {headers:AuthHeaders}).ok);
+#
+# T(dbA.open("foo1"));
+# T(dbB.open("foo1"));
+# T(dbA.open("foo2"));
+# T(dbB.open("foo2"));
+#
+# // save the design doc to dbA
+# delete designDoc._rev; // clear rev from previous saves
+# T(adminDbA.save(designDoc).ok);
+#
+# // no affect on already saved docs
+# T(dbA.open("bad1"));
+#
+# // Update some docs on dbB. Since the design hasn't replicated, anything
+# // is allowed.
+#
+# // this edit will fail validation on replication to dbA (no author)
+# T(dbB.save({_id:"bad2",value:"a"}).ok);
+#
+# // this edit will fail security on replication to dbA (wrong author
+# // replicating the change)
+# var foo1 = dbB.open("foo1");
+# foo1.value = "b";
+# T(dbB.save(foo1).ok);
+#
+# // this is a legal edit
+# var foo2 = dbB.open("foo2");
+# foo2.value = "b";
+# T(dbB.save(foo2).ok);
+#
+# var results = CouchDB.replicate({"url": B, "headers": AuthHeaders}, {"url": A, "headers": AuthHeaders}, {headers:AuthHeaders});
+# T(results.ok);
+# TEquals(1, results.history[0].docs_written);
+# TEquals(2, results.history[0].doc_write_failures);
+#
+# // bad2 should not be on dbA
+# T(dbA.open("bad2") == null);
+#
+# // The edit to foo1 should not have replicated.
+# T(dbA.open("foo1").value == "a");
+#
+# // The edit to foo2 should have replicated.
+# T(dbA.open("foo2").value == "b");
+# });
+#
+# // cleanup
+# db.deleteDb();
+# if(adminDbA){
+# adminDbA.deleteDb();
+# }
+# if(adminDbB){
+# adminDbB.deleteDb();
+# }
+# authDb.deleteDb();
+# // have to clean up authDb on the backside :(
+# var req = CouchDB.newXhr();
+# req.open("DELETE", "http://127.0.0.1:15986/" + authDb_name, false);
+# req.send("");
+# CouchDB.maybeThrowError(req);
+#};
diff --git a/test/elixir/test/test_helper.exs b/test/elixir/test/test_helper.exs
new file mode 100644
index 000000000..e8f394345
--- /dev/null
+++ b/test/elixir/test/test_helper.exs
@@ -0,0 +1,237 @@
+ExUnit.configure(exclude: [pending: true])
+ExUnit.start()
+
+defmodule CouchTestCase do
+ use ExUnit.Case
+
+ defmacro __using__(_opts) do
+ quote do
+ require Logger
+ use ExUnit.Case
+
+ setup context do
+ setup_funs = [
+ &set_db_context/1,
+ &set_config_context/1,
+ &set_user_context/1
+ ]
+ context = Enum.reduce(setup_funs, context, fn setup_fun, acc ->
+ setup_fun.(acc)
+ end)
+ {:ok, context}
+ end
+
+ def set_db_context(context) do
+ context = case context do
+ %{:with_db_name => true} ->
+ Map.put(context, :db_name, random_db_name())
+ %{:with_db_name => db_name} when is_binary(db_name) ->
+ Map.put(context, :db_name, db_name)
+ %{:with_random_db => db_name} when is_binary(db_name) ->
+ context
+ |> Map.put(:db_name, random_db_name(db_name))
+ |> Map.put(:with_db, true)
+ %{:with_db => true} ->
+ Map.put(context, :db_name, random_db_name())
+ %{:with_db => db_name} when is_binary(db_name) ->
+ Map.put(context, :db_name, db_name)
+ _ ->
+ context
+ end
+
+ if Map.has_key? context, :with_db do
+ {:ok, _} = create_db(context[:db_name])
+ on_exit(fn -> delete_db(context[:db_name]) end)
+ end
+
+ context
+ end
+
+ def set_config_context(context) do
+ if is_list(context[:config]) do
+ Enum.each(context[:config], fn cfg ->
+ set_config(cfg)
+ end)
+ end
+ context
+ end
+
+ def set_user_context(context) do
+ case Map.get(context, :user) do
+ nil ->
+ context
+ user when is_list(user) ->
+ user = create_user(user)
+ on_exit(fn ->
+ query = %{:rev => user["_rev"]}
+ resp = Couch.delete("/_users/#{user["_id"]}", query: query)
+ assert HTTPotion.Response.success? resp
+ end)
+ context = Map.put(context, :user, user)
+ userinfo = user["name"] <> ":" <> user["password"]
+ Map.put(context, :userinfo, userinfo)
+ end
+ end
+
+ def random_db_name do
+ random_db_name("random-test-db")
+ end
+
+ def random_db_name(prefix) do
+ time = :erlang.monotonic_time()
+ umi = :erlang.unique_integer([:monotonic])
+ "#{prefix}-#{time}-#{umi}"
+ end
+
+ def set_config({section, key, value}) do
+ existing = set_config_raw(section, key, value)
+ on_exit(fn ->
+ Enum.each(existing, fn {node, prev_value} ->
+ if prev_value != "" do
+ url = "/_node/#{node}/_config/#{section}/#{key}"
+ headers = ["X-Couch-Persist": "false"]
+ body = :jiffy.encode(prev_value)
+ resp = Couch.put(url, headers: headers, body: body)
+ assert resp.status_code == 200
+ else
+ url = "/_node/#{node}/_config/#{section}/#{key}"
+ headers = ["X-Couch-Persist": "false"]
+ resp = Couch.delete(url, headers: headers)
+ assert resp.status_code == 200
+ end
+ end)
+ end)
+ end
+
+ def set_config_raw(section, key, value) do
+ resp = Couch.get("/_membership")
+ Enum.map(resp.body["all_nodes"], fn node ->
+ url = "/_node/#{node}/_config/#{section}/#{key}"
+ headers = ["X-Couch-Persist": "false"]
+ body = :jiffy.encode(value)
+ resp = Couch.put(url, headers: headers, body: body)
+ assert resp.status_code == 200
+ {node, resp.body}
+ end)
+ end
+
+ def create_user(user) do
+ required = [:name, :password, :roles]
+ Enum.each(required, fn key ->
+ assert Keyword.has_key?(user, key), "User missing key: #{key}"
+ end)
+
+ name = Keyword.get(user, :name)
+ password = Keyword.get(user, :password)
+ roles = Keyword.get(user, :roles)
+
+ assert is_binary(name), "User name must be a string"
+ assert is_binary(password), "User password must be a string"
+ assert is_list(roles), "Roles must be a list of strings"
+ Enum.each(roles, fn role ->
+ assert is_binary(role), "Roles must be a list of strings"
+ end)
+
+ user_doc = %{
+ "_id" => "org.couchdb.user:" <> name,
+ "type" => "user",
+ "name" => name,
+ "roles" => roles,
+ "password" => password
+ }
+ resp = Couch.get("/_users/#{user_doc["_id"]}")
+ user_doc = case resp.status_code do
+ 404 ->
+ user_doc
+ sc when sc >= 200 and sc < 300 ->
+ Map.put(user_doc, "_rev", resp.body["_rev"])
+ end
+ resp = Couch.post("/_users", body: user_doc)
+ assert HTTPotion.Response.success? resp
+ assert resp.body["ok"]
+ Map.put(user_doc, "_rev", resp.body["rev"])
+ end
+
+ def create_db(db_name) do
+ resp = Couch.put("/#{db_name}")
+ assert resp.status_code == 201
+ assert resp.body == %{"ok" => true}
+ {:ok, resp}
+ end
+
+ def delete_db(db_name) do
+ resp = Couch.delete("/#{db_name}")
+ assert resp.status_code == 200
+ assert resp.body == %{"ok" => true}
+ {:ok, resp}
+ end
+
+ def create_doc(db_name, body) do
+ resp = Couch.post("/#{db_name}", [body: body])
+ assert resp.status_code == 201
+ assert resp.body["ok"]
+ {:ok, resp}
+ end
+
+ def sample_doc_foo do
+ %{
+ "_id": "foo",
+ "bar": "baz"
+ }
+ end
+
+ # Generate range of docs with strings as keys
+ def make_docs(id_range) do
+ for id <- id_range, str_id = Integer.to_string(id) do
+ %{"_id" => str_id, "integer" => id, "string" => str_id}
+ end
+ end
+
+ # Generate range of docs with atoms as keys, which are more
+ # idiomatic, and are encoded by jiffy to binaries
+ def create_docs(id_range) do
+ for id <- id_range, str_id = Integer.to_string(id) do
+ %{_id: str_id, integer: id, string: str_id}
+ end
+ end
+
+ def retry_until(condition, sleep \\ 100, timeout \\ 5000) do
+ retry_until(condition, now(:ms), sleep, timeout)
+ end
+
+ defp retry_until(condition, start, sleep, timeout) do
+ if (now(:ms) > start + timeout) do
+ raise "timed out"
+ else
+ if condition.() do
+ :ok
+ else
+ :timer.sleep(sleep)
+ retry_until(condition, start, sleep, timeout)
+ end
+ end
+ end
+
+ defp now(:ms) do
+ div(:erlang.system_time, 100000)
+ end
+
+ @spec rev(map(), map()) :: map()
+ def rev(doc = %{_id: id}, %{"id" => id, "rev" => rev}) do
+ Map.put(doc, :_rev, rev)
+ end
+
+ @spec rev([map()], [map()]) :: [map()]
+ def rev(docs, rows) when length(docs) == length(rows) do
+ for {doc, row} <- Enum.zip(docs, rows), do: rev(doc, row)
+ end
+
+ def pretty_inspect(resp) do
+ opts = [
+ pretty: true, width: 20, limit: :infinity, printable_limit: :infinity]
+ inspect(resp, opts)
+ end
+
+ end
+ end
+end
diff --git a/test/elixir/test/uuids_test.exs b/test/elixir/test/uuids_test.exs
new file mode 100644
index 000000000..3eda45824
--- /dev/null
+++ b/test/elixir/test/uuids_test.exs
@@ -0,0 +1,94 @@
+defmodule UUIDsTest do
+ use CouchTestCase
+
+ @moduledoc """
+ Test CouchDB UUIDs API
+ This is a port of the uuids.js suite
+ """
+
+ test "cache busting headers are set" do
+ resp = Couch.get("/_uuids")
+ assert resp.status_code == 200
+ assert Regex.match?(~r/no-cache/, resp.headers["Cache-Control"])
+ assert resp.headers["Pragma"] == "no-cache"
+ assert String.length(resp.headers["ETag"]) > 0
+ end
+
+ test "can return single uuid" do
+ resp = Couch.get("/_uuids")
+ assert resp.status_code == 200
+ [uuid1] = resp.body["uuids"]
+
+ resp = Couch.get("/_uuids", query: %{:count => 1})
+ assert resp.status_code == 200
+ [uuid2] = resp.body["uuids"]
+
+ assert uuid1 != uuid2
+ end
+
+ test "no duplicates in 1,000 UUIDs" do
+ resp = Couch.get("/_uuids", query: %{:count => 1000})
+ assert resp.status_code == 200
+ uuids = resp.body["uuids"]
+
+ assert length(Enum.uniq(uuids)) == length(uuids)
+ end
+
+ test "Method Not Allowed error on POST" do
+ resp = Couch.post("/_uuids", query: %{:count => 1000})
+ assert resp.status_code == 405
+ end
+
+ test "Bad Request error when exceeding max UUID count" do
+ resp = Couch.get("/_uuids", query: %{:count => 1001})
+ assert resp.status_code == 400
+ end
+
+ @tag config: [
+ {"uuids", "algorithm", "sequential"}
+ ]
+ test "sequential uuids are sequential" do
+ resp = Couch.get("/_uuids", query: %{:count => 1000})
+ assert resp.status_code == 200
+ Enum.reduce(resp.body["uuids"], fn curr, acc ->
+ assert String.length(curr) == 32
+ assert acc < curr
+ curr
+ end)
+ end
+
+ @tag config: [
+ {"uuids", "algorithm", "utc_random"}
+ ]
+ test "utc_random uuids are roughly random" do
+ resp = Couch.get("/_uuids", query: %{:count => 1000})
+ assert resp.status_code == 200
+ uuids = resp.body["uuids"]
+
+ assert String.length(Enum.at(uuids, 1)) == 32
+
+ # Assert no collisions
+ assert length(Enum.uniq(uuids)) == length(uuids)
+
+ # Assert rough ordering of UUIDs
+ u1 = String.slice(Enum.at(uuids, 1), 0..13)
+ u2 = String.slice(Enum.at(uuids, -1), 0..13)
+ assert u1 < u2
+ end
+
+ @utc_id_suffix "frog"
+ @tag config: [
+ {"uuids", "algorithm", "utc_id"},
+ {"uuids", "utc_id_suffix", @utc_id_suffix}
+ ]
+ test "utc_id uuids are correct" do
+ resp = Couch.get("/_uuids", query: %{:count => 10})
+ assert resp.status_code == 200
+ Enum.reduce(resp.body["uuids"], fn curr, acc ->
+ assert String.length(curr) == 14 + String.length(@utc_id_suffix)
+ assert String.slice(curr, 14..-1) == @utc_id_suffix
+ assert curr > acc
+ curr
+ end)
+ end
+end
diff --git a/test/elixir/test/view_collation_test.exs b/test/elixir/test/view_collation_test.exs
new file mode 100644
index 000000000..10aec2f22
--- /dev/null
+++ b/test/elixir/test/view_collation_test.exs
@@ -0,0 +1,133 @@
+defmodule ViewCollationTest do
+ use CouchTestCase
+
+ @moduledoc """
+ Test CouchDB View Collation Behavior
+ This is a port of the view_collation.js suite
+ """
+
+ @values [
+ # Special values sort before all other types
+ :null,
+ :false,
+ :true,
+
+ # Then numbers
+ 1,
+ 2,
+ 3.0,
+ 4,
+
+ # Then text, case sensitive
+ "a",
+ "A",
+ "aa",
+ "b",
+ "B",
+ "ba",
+ "bb",
+
+ # Then arrays, compared element by element until different.
+ # Longer arrays sort after their prefixes
+ ["a"],
+ ["b"],
+ ["b", "c"],
+ ["b", "c", "a"],
+ ["b", "d"],
+ ["b", "d", "e"],
+
+ # Then objects, compared each key value in the list until different.
+ # Larger objects sort after their subset objects
+ {[a: 1]},
+ {[a: 2]},
+ {[b: 1]},
+ {[b: 2]},
+ # Member order does matter for collation
+ {[b: 2, a: 1]},
+ {[b: 2, c: 2]}
+ ]
+
+ setup_all do
+ db_name = random_db_name()
+ {:ok, _} = create_db(db_name)
+ on_exit(fn -> delete_db(db_name) end)
+
+ {docs, _} = Enum.flat_map_reduce(@values, 1, fn value, idx ->
+ doc = %{:_id => Integer.to_string(idx), :foo => value}
+ {[doc], idx + 1}
+ end)
+ resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs})
+ Enum.each(resp.body, &(assert &1["ok"]))
+
+ map_fun = "function(doc) { emit(doc.foo, null); }"
+ map_doc = %{:views => %{:foo => %{:map => map_fun}}}
+ resp = Couch.put("/#{db_name}/_design/foo", body: map_doc)
+ assert resp.body["ok"]
+
+ {:ok, [db_name: db_name]}
+ end
+
+ test "ascending collation order", context do
+ resp = Couch.get(url(context))
+ pairs = Enum.zip(resp.body["rows"], @values)
+ Enum.each(pairs, fn {row, value} ->
+ assert row["key"] == convert(value)
+ end)
+ end
+
+ test "descending collation order", context do
+ resp = Couch.get(url(context), query: %{"descending" => "true"})
+ pairs = Enum.zip(resp.body["rows"], Enum.reverse(@values))
+ Enum.each(pairs, fn {row, value} ->
+ assert row["key"] == convert(value)
+ end)
+ end
+
+ test "key query option", context do
+ Enum.each(@values, fn value ->
+ resp = Couch.get(url(context), query: %{:key => :jiffy.encode(value)})
+ assert length(resp.body["rows"]) == 1
+ assert Enum.at(resp.body["rows"], 0)["key"] == convert(value)
+ end)
+ end
+
+ test "inclusive_end=true", context do
+ query = %{:endkey => :jiffy.encode("b"), :inclusive_end => true}
+ resp = Couch.get(url(context), query: query)
+ assert Enum.at(resp.body["rows"], -1)["key"] == "b"
+
+ query = Map.put(query, :descending, true)
+ resp = Couch.get(url(context), query: query)
+ assert Enum.at(resp.body["rows"], -1)["key"] == "b"
+ end
+
+ test "inclusive_end=false", context do
+ query = %{:endkey => :jiffy.encode("b"), :inclusive_end => false}
+ resp = Couch.get(url(context), query: query)
+ assert Enum.at(resp.body["rows"], -1)["key"] == "aa"
+
+ query = Map.put(query, :descending, true)
+ resp = Couch.get(url(context), query: query)
+ assert Enum.at(resp.body["rows"], -1)["key"] == "B"
+
+ query = %{
+ :endkey => :jiffy.encode("b"),
+ :endkey_docid => 11,
+ :inclusive_end => false
+ }
+ resp = Couch.get(url(context), query: query)
+ assert Enum.at(resp.body["rows"], -1)["key"] == "aa"
+
+ query = Map.put(query, :endkey_docid, 12)
+ resp = Couch.get(url(context), query: query)
+ assert Enum.at(resp.body["rows"], -1)["key"] == "b"
+ end
+
+ def url(context) do
+ "/#{context[:db_name]}/_design/foo/_view/foo"
+ end
+
+ def convert(value) do
+ :jiffy.decode(:jiffy.encode(value), [:return_maps])
+ end
+end \ No newline at end of file
diff --git a/test/javascript/run b/test/javascript/run
index 11f9faee2..ec12431b0 100755
--- a/test/javascript/run
+++ b/test/javascript/run
@@ -146,7 +146,7 @@ def main():
sys.stderr.write("======================================================="
+ os.linesep)
sys.stderr.write("JavaScript tests complete." + os.linesep)
- sys.stderr.write(" Failed: {}. Skipped or passed: {}.".format(
+ sys.stderr.write(" Failed: {0}. Skipped or passed: {1}.".format(
failed, passed) + os.linesep)
exit(failed > 0)
diff --git a/test/javascript/tests/config.js b/test/javascript/tests/config.js
index 8c7ce9917..a9dce637f 100644
--- a/test/javascript/tests/config.js
+++ b/test/javascript/tests/config.js
@@ -50,10 +50,7 @@ couchTests.config = function(debug) {
}
T(config.couchdb.database_dir);
- T(config.daemons.httpd);
- T(config.httpd_global_handlers._config);
T(config.log.level);
- T(config.query_servers.javascript);
// test that settings can be altered, and that an undefined whitelist allows any change
TEquals(undefined, config.httpd.config_whitelist, "Default whitelist is empty");