summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBessenyei Balázs Donát <bessbd@apache.org>2021-06-24 16:46:03 +0200
committerGitHub <noreply@github.com>2021-06-24 16:46:03 +0200
commit15cbb5502503c6e4d786bf0a85b7c225ccd14f71 (patch)
treeabf352dc14b982de6d6fa54ad5e5b6f58470b8d7
parent30c09b644669e97631d72d06ee883d8601254945 (diff)
downloadcouchdb-15cbb5502503c6e4d786bf0a85b7c225ccd14f71.tar.gz
Reformat src files with erlfmt (#3568)
-rw-r--r--.gitignore1
-rw-r--r--Makefile16
-rw-r--r--Makefile.win16
-rw-r--r--README-DEV.rst14
-rwxr-xr-xconfigure59
-rw-r--r--dev/__init__.py5
-rw-r--r--dev/format_all.py80
-rw-r--r--dev/format_check.py64
-rw-r--r--dev/format_lib.py42
-rw-r--r--src/aegis/src/aegis.erl44
-rw-r--r--src/aegis/src/aegis_app.erl3
-rw-r--r--src/aegis/src/aegis_key_manager.erl8
-rw-r--r--src/aegis/src/aegis_keywrap.erl87
-rw-r--r--src/aegis/src/aegis_noop_key_manager.erl5
-rw-r--r--src/aegis/src/aegis_server.erl94
-rw-r--r--src/aegis/src/aegis_sup.erl3
-rw-r--r--src/chttpd/src/chttpd.erl1202
-rw-r--r--src/chttpd/src/chttpd_auth.erl20
-rw-r--r--src/chttpd/src/chttpd_auth_cache.erl196
-rw-r--r--src/chttpd/src/chttpd_auth_request.erl117
-rw-r--r--src/chttpd/src/chttpd_changes.erl611
-rw-r--r--src/chttpd/src/chttpd_cors.erl279
-rw-r--r--src/chttpd/src/chttpd_db.erl2674
-rw-r--r--src/chttpd/src/chttpd_epi.erl2
-rw-r--r--src/chttpd/src/chttpd_external.erl162
-rw-r--r--src/chttpd/src/chttpd_handlers.erl31
-rw-r--r--src/chttpd/src/chttpd_httpd_handlers.erl272
-rw-r--r--src/chttpd/src/chttpd_misc.erl215
-rw-r--r--src/chttpd/src/chttpd_node.erl196
-rw-r--r--src/chttpd/src/chttpd_plugin.erl7
-rw-r--r--src/chttpd/src/chttpd_prefer_header.erl27
-rw-r--r--src/chttpd/src/chttpd_show.erl92
-rw-r--r--src/chttpd/src/chttpd_stats.erl49
-rw-r--r--src/chttpd/src/chttpd_sup.erl42
-rw-r--r--src/chttpd/src/chttpd_test_util.erl1
-rw-r--r--src/chttpd/src/chttpd_util.erl79
-rw-r--r--src/chttpd/src/chttpd_view.erl242
-rw-r--r--src/chttpd/src/chttpd_xframe_options.erl48
-rw-r--r--src/couch/src/couch.erl25
-rw-r--r--src/couch/src/couch_att.erl388
-rw-r--r--src/couch/src/couch_auth_cache.erl56
-rw-r--r--src/couch/src/couch_base32.erl167
-rw-r--r--src/couch/src/couch_db_epi.erl1
-rw-r--r--src/couch/src/couch_debug.erl146
-rw-r--r--src/couch/src/couch_doc.erl452
-rw-r--r--src/couch/src/couch_drv.erl38
-rw-r--r--src/couch/src/couch_ejson_compare.erl87
-rw-r--r--src/couch/src/couch_ejson_size.erl57
-rw-r--r--src/couch/src/couch_flags.erl27
-rw-r--r--src/couch/src/couch_flags_config.erl126
-rw-r--r--src/couch/src/couch_hotp.erl11
-rw-r--r--src/couch/src/couch_httpd.erl777
-rw-r--r--src/couch/src/couch_httpd_auth.erl681
-rw-r--r--src/couch/src/couch_httpd_external.erl160
-rw-r--r--src/couch/src/couch_httpd_multipart.erl353
-rw-r--r--src/couch/src/couch_httpd_vhost.erl238
-rw-r--r--src/couch/src/couch_io_logger.erl12
-rw-r--r--src/couch/src/couch_key_tree.erl295
-rw-r--r--src/couch/src/couch_native_process.erl299
-rw-r--r--src/couch/src/couch_os_process.erl171
-rw-r--r--src/couch/src/couch_partition.erl43
-rw-r--r--src/couch/src/couch_passwords.erl151
-rw-r--r--src/couch/src/couch_primary_sup.erl21
-rw-r--r--src/couch/src/couch_proc_manager.erl321
-rw-r--r--src/couch/src/couch_query_servers.erl589
-rw-r--r--src/couch/src/couch_rand.erl3
-rw-r--r--src/couch/src/couch_secondary_sup.erl49
-rw-r--r--src/couch/src/couch_server.erl37
-rw-r--r--src/couch/src/couch_sup.erl137
-rw-r--r--src/couch/src/couch_totp.erl13
-rw-r--r--src/couch/src/couch_util.erl375
-rw-r--r--src/couch/src/couch_uuids.erl21
-rw-r--r--src/couch/src/couch_work_queue.erl113
-rw-r--r--src/couch/src/test_request.erl1
-rw-r--r--src/couch/src/test_util.erl112
-rw-r--r--src/couch_epi/src/couch_epi.erl88
-rw-r--r--src/couch_epi/src/couch_epi_codechange_monitor.erl10
-rw-r--r--src/couch_epi/src/couch_epi_codegen.erl45
-rw-r--r--src/couch_epi/src/couch_epi_data.erl11
-rw-r--r--src/couch_epi/src/couch_epi_data_gen.erl173
-rw-r--r--src/couch_epi/src/couch_epi_functions.erl10
-rw-r--r--src/couch_epi/src/couch_epi_functions_gen.erl330
-rw-r--r--src/couch_epi/src/couch_epi_module_keeper.erl31
-rw-r--r--src/couch_epi/src/couch_epi_plugin.erl190
-rw-r--r--src/couch_epi/src/couch_epi_sup.erl59
-rw-r--r--src/couch_epi/src/couch_epi_util.erl2
-rw-r--r--src/couch_eval/src/couch_eval.erl43
-rw-r--r--src/couch_expiring_cache/src/couch_expiring_cache.erl47
-rw-r--r--src/couch_expiring_cache/src/couch_expiring_cache_fdb.erl82
-rw-r--r--src/couch_expiring_cache/src/couch_expiring_cache_server.erl28
-rw-r--r--src/couch_jobs/src/couch_jobs.erl140
-rw-r--r--src/couch_jobs/src/couch_jobs_activity_monitor.erl122
-rw-r--r--src/couch_jobs/src/couch_jobs_activity_monitor_sup.erl16
-rw-r--r--src/couch_jobs/src/couch_jobs_app.erl4
-rw-r--r--src/couch_jobs/src/couch_jobs_fdb.erl232
-rw-r--r--src/couch_jobs/src/couch_jobs_notifier.erl247
-rw-r--r--src/couch_jobs/src/couch_jobs_notifier_sup.erl16
-rw-r--r--src/couch_jobs/src/couch_jobs_pending.erl16
-rw-r--r--src/couch_jobs/src/couch_jobs_server.erl74
-rw-r--r--src/couch_jobs/src/couch_jobs_sup.erl4
-rw-r--r--src/couch_jobs/src/couch_jobs_type_monitor.erl16
-rw-r--r--src/couch_jobs/src/couch_jobs_util.erl28
-rw-r--r--src/couch_js/src/couch_js.erl32
-rw-r--r--src/couch_js/src/couch_js_app.erl7
-rw-r--r--src/couch_js/src/couch_js_io_logger.erl12
-rw-r--r--src/couch_js/src/couch_js_native_process.erl340
-rw-r--r--src/couch_js/src/couch_js_os_process.erl197
-rw-r--r--src/couch_js/src/couch_js_proc_manager.erl329
-rw-r--r--src/couch_js/src/couch_js_query_servers.erl510
-rw-r--r--src/couch_js/src/couch_js_sup.erl4
-rw-r--r--src/couch_lib/src/couch_lib_parse.erl40
-rw-r--r--src/couch_log/src/couch_log.erl11
-rw-r--r--src/couch_log/src/couch_log_app.erl1
-rw-r--r--src/couch_log/src/couch_log_config.erl49
-rw-r--r--src/couch_log/src/couch_log_config_dyn.erl2
-rw-r--r--src/couch_log/src/couch_log_error_logger_h.erl9
-rw-r--r--src/couch_log/src/couch_log_formatter.erl287
-rw-r--r--src/couch_log/src/couch_log_monitor.erl13
-rw-r--r--src/couch_log/src/couch_log_server.erl27
-rw-r--r--src/couch_log/src/couch_log_sup.erl12
-rw-r--r--src/couch_log/src/couch_log_trunc_io.erl873
-rw-r--r--src/couch_log/src/couch_log_trunc_io_fmt.erl433
-rw-r--r--src/couch_log/src/couch_log_util.erl170
-rw-r--r--src/couch_log/src/couch_log_writer.erl24
-rw-r--r--src/couch_log/src/couch_log_writer_file.erl11
-rw-r--r--src/couch_log/src/couch_log_writer_journald.erl24
-rw-r--r--src/couch_log/src/couch_log_writer_stderr.erl5
-rw-r--r--src/couch_log/src/couch_log_writer_syslog.erl169
-rw-r--r--src/couch_prometheus/src/couch_prometheus_http.erl54
-rw-r--r--src/couch_prometheus/src/couch_prometheus_server.erl73
-rw-r--r--src/couch_prometheus/src/couch_prometheus_sup.erl3
-rw-r--r--src/couch_prometheus/src/couch_prometheus_util.erl71
-rw-r--r--src/couch_replicator/src/couch_replicator.erl217
-rw-r--r--src/couch_replicator/src/couch_replicator_api_wrap.erl967
-rw-r--r--src/couch_replicator/src/couch_replicator_auth.erl11
-rw-r--r--src/couch_replicator/src/couch_replicator_auth_noop.erl8
-rw-r--r--src/couch_replicator/src/couch_replicator_auth_session.erl259
-rw-r--r--src/couch_replicator/src/couch_replicator_changes_reader.erl137
-rw-r--r--src/couch_replicator/src/couch_replicator_connection.erl171
-rw-r--r--src/couch_replicator/src/couch_replicator_docs.erl205
-rw-r--r--src/couch_replicator/src/couch_replicator_epi.erl10
-rw-r--r--src/couch_replicator/src/couch_replicator_fabric2_plugin.erl7
-rw-r--r--src/couch_replicator/src/couch_replicator_filters.erl128
-rw-r--r--src/couch_replicator/src/couch_replicator_httpc.erl365
-rw-r--r--src/couch_replicator/src/couch_replicator_httpc_pool.erl94
-rw-r--r--src/couch_replicator/src/couch_replicator_httpd.erl104
-rw-r--r--src/couch_replicator/src/couch_replicator_ids.erl277
-rw-r--r--src/couch_replicator/src/couch_replicator_job.erl904
-rw-r--r--src/couch_replicator/src/couch_replicator_job_server.erl106
-rw-r--r--src/couch_replicator/src/couch_replicator_jobs.erl45
-rw-r--r--src/couch_replicator/src/couch_replicator_parse.erl811
-rw-r--r--src/couch_replicator/src/couch_replicator_rate_limiter.erl69
-rw-r--r--src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl12
-rw-r--r--src/couch_replicator/src/couch_replicator_stats.erl34
-rw-r--r--src/couch_replicator/src/couch_replicator_sup.erl21
-rw-r--r--src/couch_replicator/src/couch_replicator_utils.erl144
-rw-r--r--src/couch_replicator/src/couch_replicator_worker.erl570
-rw-r--r--src/couch_replicator/src/json_stream_parse.erl391
-rw-r--r--src/couch_stats/src/couch_stats.erl14
-rw-r--r--src/couch_stats/src/couch_stats_aggregator.erl46
-rw-r--r--src/couch_stats/src/couch_stats_httpd.erl60
-rw-r--r--src/couch_stats/src/couch_stats_process_tracker.erl6
-rw-r--r--src/couch_stats/src/couch_stats_sup.erl16
-rw-r--r--src/couch_tests/src/couch_tests.erl69
-rw-r--r--src/couch_tests/src/couch_tests_combinatorics.erl5
-rw-r--r--src/couch_views/src/couch_views.erl185
-rw-r--r--src/couch_views/src/couch_views_app.erl5
-rw-r--r--src/couch_views/src/couch_views_batch.erl43
-rw-r--r--src/couch_views/src/couch_views_batch_impl.erl115
-rw-r--r--src/couch_views/src/couch_views_ddoc.erl16
-rw-r--r--src/couch_views/src/couch_views_encoding.erl59
-rw-r--r--src/couch_views/src/couch_views_epi.erl10
-rw-r--r--src/couch_views/src/couch_views_fabric2_plugin.erl5
-rw-r--r--src/couch_views/src/couch_views_fdb.erl102
-rw-r--r--src/couch_views/src/couch_views_http.erl242
-rw-r--r--src/couch_views/src/couch_views_http_util.erl273
-rw-r--r--src/couch_views/src/couch_views_indexer.erl456
-rw-r--r--src/couch_views/src/couch_views_jobs.erl20
-rw-r--r--src/couch_views/src/couch_views_plugin.erl8
-rw-r--r--src/couch_views/src/couch_views_reader.erl191
-rw-r--r--src/couch_views/src/couch_views_server.erl23
-rw-r--r--src/couch_views/src/couch_views_sup.erl30
-rw-r--r--src/couch_views/src/couch_views_trees.erl433
-rw-r--r--src/couch_views/src/couch_views_updater.erl103
-rw-r--r--src/couch_views/src/couch_views_util.erl218
-rw-r--r--src/couch_views/src/couch_views_validate.erl361
-rw-r--r--src/ctrace/src/ctrace.erl132
-rw-r--r--src/ctrace/src/ctrace_config.erl117
-rw-r--r--src/ctrace/src/ctrace_dsl.erl58
-rw-r--r--src/ctrace/src/ctrace_sup.erl2
-rw-r--r--src/ebtree/src/ebtree.erl1153
-rw-r--r--src/fabric/src/fabric2_active_tasks.erl39
-rw-r--r--src/fabric/src/fabric2_app.erl3
-rw-r--r--src/fabric/src/fabric2_db.erl1722
-rw-r--r--src/fabric/src/fabric2_db_expiration.erl91
-rw-r--r--src/fabric/src/fabric2_db_plugin.erl26
-rw-r--r--src/fabric/src/fabric2_events.erl42
-rw-r--r--src/fabric/src/fabric2_fdb.erl1282
-rw-r--r--src/fabric/src/fabric2_index.erl77
-rw-r--r--src/fabric/src/fabric2_node_types.erl6
-rw-r--r--src/fabric/src/fabric2_server.erl150
-rw-r--r--src/fabric/src/fabric2_sup.erl3
-rw-r--r--src/fabric/src/fabric2_txids.erl50
-rw-r--r--src/fabric/src/fabric2_users_db.erl172
-rw-r--r--src/fabric/src/fabric2_util.erl195
-rw-r--r--src/jwtf/src/jwtf.erl83
-rw-r--r--src/jwtf/src/jwtf_keystore.erl35
-rw-r--r--src/jwtf/src/jwtf_sup.erl2
-rw-r--r--src/mango/src/mango_crud.erl56
-rw-r--r--src/mango/src/mango_cursor.erl139
-rw-r--r--src/mango/src/mango_cursor_special.erl4
-rw-r--r--src/mango/src/mango_cursor_view.erl125
-rw-r--r--src/mango/src/mango_doc.erl394
-rw-r--r--src/mango/src/mango_epi.erl2
-rw-r--r--src/mango/src/mango_error.erl54
-rw-r--r--src/mango/src/mango_eval.erl97
-rw-r--r--src/mango/src/mango_execution_stats.erl20
-rw-r--r--src/mango/src/mango_fields.erl30
-rw-r--r--src/mango/src/mango_httpd.erl199
-rw-r--r--src/mango/src/mango_httpd_handlers.erl15
-rw-r--r--src/mango/src/mango_idx.erl216
-rw-r--r--src/mango/src/mango_idx_special.erl33
-rw-r--r--src/mango/src/mango_idx_view.erl212
-rw-r--r--src/mango/src/mango_json.erl11
-rw-r--r--src/mango/src/mango_json_bookmark.erl25
-rw-r--r--src/mango/src/mango_opts.erl35
-rw-r--r--src/mango/src/mango_plugin.erl3
-rw-r--r--src/mango/src/mango_selector.erl639
-rw-r--r--src/mango/src/mango_selector_text.erl191
-rw-r--r--src/mango/src/mango_sort.erl7
-rw-r--r--src/mango/src/mango_sup.erl3
-rw-r--r--src/mango/src/mango_util.erl140
232 files changed, 19614 insertions, 17171 deletions
diff --git a/.gitignore b/.gitignore
index 719294101..ba6dac99c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -53,6 +53,7 @@ src/mango/src/mango_cursor_text.nocompile
src/docs/
src/emilio/
src/erlfdb/
+src/erlfmt/
src/ets_lru/
src/excoveralls/
src/fauxton/
diff --git a/Makefile b/Makefile
index fc9011707..adac5d10f 100644
--- a/Makefile
+++ b/Makefile
@@ -17,6 +17,7 @@
include version.mk
REBAR?=$(shell echo `pwd`/bin/rebar)
+ERLFMT?=$(shell echo `pwd`/bin/erlfmt)
# Handle the following scenarios:
# 1. When building from a tarball, use version.mk.
@@ -160,6 +161,7 @@ endif
.PHONY: check
check: all
@$(MAKE) emilio
+ @$(MAKE) erlfmt-check
@$(MAKE) eunit
@$(MAKE) elixir-suite
@$(MAKE) exunit
@@ -209,6 +211,12 @@ soak-eunit: couch
emilio:
@bin/emilio -c emilio.config src/ | bin/warnings_in_scope -s 3 || exit 0
+erlfmt-check:
+ ERLFMT_PATH=$(ERLFMT) python3 dev/format_check.py
+
+erlfmt-format:
+ ERLFMT_PATH=$(ERLFMT) python3 dev/format_all.py
+
.venv/bin/black:
@python3 -m venv .venv
@.venv/bin/pip3 install black || touch .venv/bin/black
@@ -219,16 +227,16 @@ python-black: .venv/bin/black
echo "Python formatter not supported on Python < 3.6; check results on a newer platform"
@python3 -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \
LC_ALL=C.UTF-8 LANG=C.UTF-8 .venv/bin/black --check \
- --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/rebar/pr2relnotes.py|src/fauxton" \
- build-aux/*.py dev/run src/mango/test/*.py src/docs/src/conf.py src/docs/ext/*.py .
+ --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/erlfmt|src/rebar/pr2relnotes.py|src/fauxton" \
+ build-aux/*.py dev/run dev/format_*.py src/mango/test/*.py src/docs/src/conf.py src/docs/ext/*.py .
python-black-update: .venv/bin/black
@python3 -c "import sys; exit(1 if sys.version_info < (3,6) else 0)" || \
echo "Python formatter not supported on Python < 3.6; check results on a newer platform"
@python3 -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \
LC_ALL=C.UTF-8 LANG=C.UTF-8 .venv/bin/black \
- --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/rebar/pr2relnotes.py|src/fauxton" \
- build-aux/*.py dev/run src/mango/test/*.py src/docs/src/conf.py src/docs/ext/*.py .
+ --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/erlfmt|src/rebar/pr2relnotes.py|src/fauxton" \
+ build-aux/*.py dev/run dev/format_*.py src/mango/test/*.py src/docs/src/conf.py src/docs/ext/*.py .
.PHONY: elixir
elixir: export MIX_ENV=integration
diff --git a/Makefile.win b/Makefile.win
index 5240da377..4add8e47e 100644
--- a/Makefile.win
+++ b/Makefile.win
@@ -18,6 +18,7 @@ include version.mk
SHELL=cmd.exe
REBAR=bin\rebar.cmd
+ERLFMT=bin/erlfmt
MAKE=make -f Makefile.win
# REBAR?=$(shell where rebar.cmd)
@@ -132,6 +133,7 @@ fauxton: share\www
# target: check - Test everything
check: all python-black
@$(MAKE) emilio
+ @$(MAKE) erlfmt-check
@$(MAKE) eunit
@$(MAKE) mango-test
@$(MAKE) elixir
@@ -176,6 +178,12 @@ just-eunit:
emilio:
@bin\emilio -c emilio.config src\ | python.exe bin\warnings_in_scope -s 3 || exit 0
+erlfmt-check:
+ ERLFMT_PATH=bin\erlfmt python3 dev\format_check.py
+
+erlfmt-format:
+ ERLFMT_PATH=bin\erlfmt python3 dev\format_all.py
+
.venv/bin/black:
@python.exe -m venv .venv
@.venv\Scripts\pip3.exe install black || copy /b .venv\Scripts\black.exe +,,
@@ -186,16 +194,16 @@ python-black: .venv/bin/black
echo 'Python formatter not supported on Python < 3.6; check results on a newer platform'
@python.exe -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \
.venv\Scripts\black.exe --check \
- --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/rebar/pr2relnotes.py|src/fauxton" \
- build-aux dev\run src\mango\test src\docs\src\conf.py src\docs\ext .
+ --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/erlfmt|src/rebar/pr2relnotes.py|src/fauxton" \
+ build-aux dev\run dev\format_*.py src\mango\test src\docs\src\conf.py src\docs\ext .
python-black-update: .venv/bin/black
@python.exe -c "import sys; exit(1 if sys.version_info < (3,6) else 0)" || \
echo 'Python formatter not supported on Python < 3.6; check results on a newer platform'
@python.exe -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \
.venv\Scripts\black.exe \
- --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/rebar/pr2relnotes.py|src/fauxton" \
- build-aux dev\run src\mango\test src\docs\src\conf.py src\docs\ext .
+ --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/erlfmt|src/rebar/pr2relnotes.py|src/fauxton" \
+ build-aux dev\run dev\format_*.py src\mango\test src\docs\src\conf.py src\docs\ext .
.PHONY: elixir
elixir: export MIX_ENV=integration
diff --git a/README-DEV.rst b/README-DEV.rst
index f4031b767..eb271c149 100644
--- a/README-DEV.rst
+++ b/README-DEV.rst
@@ -127,6 +127,20 @@ ignore their build and avoid any issues with their dependencies.
See ``./configure --help`` for more information.
+Developing
+----------
+
+Formatting
+~~~~~~~~~~
+
+The ``erl`` files in ``src`` are formatted using erlfmt_. The checks are run
+for every PR in the CI. To run the checks locally, run ``make erlfmt-check``.
+To format the ``erl`` files in ``src``, run ``make erlfmt-format``.
+To use ``erlfmt`` for specific files only, use the executable ``bin/erlfmt``
+that is installed by ``configure``.
+
+.. _erlfmt: https://github.com/WhatsApp/erlfmt
+
Testing
-------
diff --git a/configure b/configure
index 07f02e802..adceffb60 100755
--- a/configure
+++ b/configure
@@ -53,6 +53,8 @@ Options:
--spidermonkey-version VSN specify the version of SpiderMonkey to use (defaults to $SM_VSN)
--skip-deps do not update erlang dependencies
--rebar=PATH use rebar by specified path (version >=2.6.0 && <3.0 required)
+ --rebar3=PATH use rebar3 by specified path
+ --erlfmt=PATH use erlfmt by specified path
EOF
}
@@ -135,6 +137,28 @@ parse_opts() {
fi
;;
+ --rebar3)
+ if [ -x "$2" ]; then
+ eval REBAR3=$2
+ shift 2
+ continue
+ else
+ printf 'ERROR: "--rebar3" requires valid path to executable.\n' >&2
+ exit 1
+ fi
+ ;;
+
+ --erlfmt)
+ if [ -x "$2" ]; then
+ eval ERLFMT=$2
+ shift 2
+ continue
+ else
+ printf 'ERROR: "--erlfmt" requires valid path to executable.\n' >&2
+ exit 1
+ fi
+ ;;
+
--user|-u)
if [ -n "$2" ]; then
eval COUCHDB_USER=$2
@@ -265,6 +289,31 @@ install_local_rebar() {
fi
}
+install_local_rebar3() {
+ if [ ! -x "${rootdir}/bin/rebar3" ]; then
+ if [ ! -d "${rootdir}/src/rebar3" ]; then
+ git clone --depth 1 https://github.com/erlang/rebar3.git ${rootdir}/src/rebar3
+ fi
+ cd src/rebar3
+ ./bootstrap
+ mv ${rootdir}/src/rebar3/rebar3 ${rootdir}/bin/rebar3
+ cd ../..
+ fi
+}
+
+install_local_erlfmt() {
+ if [ ! -x "${rootdir}/bin/erlfmt" ]; then
+ if [ ! -d "${rootdir}/src/erlfmt" ]; then
+ git clone --depth 1 https://github.com/WhatsApp/erlfmt.git ${rootdir}/src/erlfmt
+ fi
+ cd "${rootdir}"/src/erlfmt
+ ${REBAR3} as release escriptize
+ mv ${rootdir}/src/erlfmt/_build/release/bin/erlfmt ${rootdir}/bin/erlfmt
+ ${REBAR3} clean
+ cd ../..
+ fi
+}
+
install_local_emilio() {
if [ ! -x "${rootdir}/bin/emilio" ]; then
if [ ! -d "${rootdir}/src/emilio" ]; then
@@ -282,6 +331,16 @@ if [ -z "${REBAR}" ]; then
REBAR=${rootdir}/bin/rebar
fi
+if [ -z "${REBAR3}" ]; then
+ install_local_rebar3
+ REBAR3=${rootdir}/bin/rebar3
+fi
+
+if [ -z "${ERLFMT}" ]; then
+ install_local_erlfmt
+ ERLFMT=${rootdir}/bin/erlfmt
+fi
+
install_local_emilio
# only update dependencies, when we are not in a release tarball
diff --git a/dev/__init__.py b/dev/__init__.py
new file mode 100644
index 000000000..47f814fa2
--- /dev/null
+++ b/dev/__init__.py
@@ -0,0 +1,5 @@
+# For relative imports to work in Python 3.6
+import os
+import sys
+
+sys.path.append(os.path.dirname(os.path.realpath(__file__)))
diff --git a/dev/format_all.py b/dev/format_all.py
new file mode 100644
index 000000000..cf42fdcec
--- /dev/null
+++ b/dev/format_all.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python3
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+"""Erlang formatter for CouchDB
+Warning: this file needs to run from the CouchDB repo root.
+USAGE: ERLFMT_PATH=<path_to_erlfmt> python3 dev/format_all.py
+"""
+
+import os
+import subprocess
+
+from format_lib import get_source_paths
+
+
+def get_hashes():
+ hashes = {}
+ for item in get_source_paths():
+ if item["is_source_path"]:
+ beam_path = f"{item['dirname']}/ebin/{item['filename']}.beam"
+ hashes[item["raw_path"]] = subprocess.run(
+ ["md5sum", beam_path], encoding="utf-8", capture_output=True
+ ).stdout
+ else:
+ # command = ["erl",
+ # "-eval",
+ # "{ok, _, Binary} = compile:file(\"" + item['raw_path'] +
+ # "\", [binary, no_line_info, deterministic])," +
+ # "erlang:display(crypto:hash(md5, Binary)), halt().",
+ # "-noshell"]
+ # hashes[item['raw_path']] = subprocess.run(command, encoding="utf-8",
+ # capture_output=True).stdout
+ pass
+ return hashes
+
+
+if __name__ == "__main__":
+ print("Cleaning...")
+ subprocess.run(["make", "clean"], encoding="utf-8", stdout=subprocess.PIPE)
+ print("Compiling...")
+ subprocess.run(
+ ["bin/rebar", "compile"],
+ encoding="utf-8",
+ stdout=subprocess.PIPE,
+ env={"ERL_OPTS": "no_line_info"},
+ )
+ os.chdir("src")
+ print("Getting previous hashes...")
+ prev = get_hashes()
+ for key in prev.keys():
+ subprocess.run(
+ [os.environ["ERLFMT_PATH"], "-w", key],
+ encoding="utf-8",
+ stdout=subprocess.PIPE,
+ )
+ os.chdir("..")
+ subprocess.run(
+ ["bin/rebar", "compile"],
+ encoding="utf-8",
+ stdout=subprocess.PIPE,
+ env={"ERL_OPTS": "no_line_info"},
+ )
+ os.chdir("src")
+ print("Getting post hashes...")
+ post = get_hashes()
+ if prev == post:
+ print("Hashes match")
+ else:
+ print("Hash mismatch")
+ print("Diff: ", set(prev.items()) ^ set(post.items()))
diff --git a/dev/format_check.py b/dev/format_check.py
new file mode 100644
index 000000000..de099516f
--- /dev/null
+++ b/dev/format_check.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python3
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+"""Erlang formatter for CouchDB
+Warning: this file needs to run from the CouchDB repo root.
+USAGE: ERLFMT_PATH=<path_to_erlfmt> python3 dev/format_check.py
+"""
+
+import os
+import subprocess
+import sys
+
+from format_lib import get_source_paths
+
+FILTERED_LINES = [
+ "Checking formatting...",
+ "[warn] Code style issues found in the above file(s). Forgot to run erlfmt?",
+ "",
+]
+
+if __name__ == "__main__":
+ os.chdir("src")
+ failed_checks = 0
+ for item in get_source_paths():
+ if item["is_source_path"]:
+ run_result = subprocess.run(
+ [
+ os.environ["ERLFMT_PATH"],
+ "-c",
+ "--verbose",
+ # We have some long lines and erlfmt doesn't forcefully wrap
+ # them all. We should decrease this over time
+ "--print-width=167",
+ item["raw_path"],
+ ],
+ encoding="utf-8",
+ capture_output=True,
+ )
+ if run_result.returncode != 0:
+ # erlfmt sometimes returns a non-zero status code with no
+ # actual errors. This is a workaround
+ stderr_lines = [
+ line
+ for line in run_result.stderr.split("\n")
+ if line not in FILTERED_LINES
+ and not line.startswith("Formatting ")
+ and not line.startswith("[warn] ")
+ ]
+ if len(stderr_lines) > 0:
+ print("\n".join(stderr_lines), file=sys.stderr)
+ failed_checks += 1
+ os.chdir("..")
+ sys.exit(failed_checks)
diff --git a/dev/format_lib.py b/dev/format_lib.py
new file mode 100644
index 000000000..269311414
--- /dev/null
+++ b/dev/format_lib.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python3
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+"""Erlang formatter lib for CouchDB
+Warning: this file is not meant to be executed manually
+"""
+
+import pathlib
+import re
+import subprocess
+
+
+def get_source_paths():
+ for item in subprocess.run(
+ ["git", "ls-files"], encoding="utf-8", capture_output=True
+ ).stdout.split("\n"):
+ item_path = pathlib.Path(item)
+ if item_path.suffix != ".erl":
+ continue
+
+ regex_result = re.search(r"([^/]+?)/src/([^/]+?).erl", item)
+ result_dict = {
+ "raw_path": item,
+ "item_path": item_path,
+ "is_source_path": regex_result is not None,
+ }
+ if result_dict["is_source_path"]:
+ result_dict.update(
+ {"dirname": regex_result.group(1), "filename": regex_result.group(2)}
+ )
+ yield result_dict
diff --git a/src/aegis/src/aegis.erl b/src/aegis/src/aegis.erl
index f2da69faf..fe96e8573 100644
--- a/src/aegis/src/aegis.erl
+++ b/src/aegis/src/aegis.erl
@@ -13,10 +13,8 @@
-module(aegis).
-include_lib("fabric/include/fabric2.hrl").
-
-define(WRAPPED_KEY, {?DB_AEGIS, 1}).
-
-export([
key_manager/0,
init_db/2,
@@ -32,56 +30,52 @@
key_manager() ->
?AEGIS_KEY_MANAGER.
-
init_db(#{} = Db, Options) ->
Db#{
is_encrypted => aegis_server:init_db(Db, Options)
}.
-
open_db(#{} = Db) ->
Db#{
is_encrypted => aegis_server:open_db(Db)
}.
-
get_db_info(#{is_encrypted := IsEncrypted} = Db) ->
- KeyManagerInfo = case erlang:function_exported(?AEGIS_KEY_MANAGER, get_db_info, 1) of
- true ->
- ?AEGIS_KEY_MANAGER:get_db_info(Db);
- false ->
- []
- end,
+ KeyManagerInfo =
+ case erlang:function_exported(?AEGIS_KEY_MANAGER, get_db_info, 1) of
+ true ->
+ ?AEGIS_KEY_MANAGER:get_db_info(Db);
+ false ->
+ []
+ end,
[{enabled, IsEncrypted}, {key_manager, {KeyManagerInfo}}].
-
encrypt(#{} = _Db, _Key, <<>>) ->
<<>>;
-
encrypt(#{is_encrypted := false}, _Key, Value) when is_binary(Value) ->
Value;
-
-encrypt(#{is_encrypted := true} = Db, Key, Value)
- when is_binary(Key), is_binary(Value) ->
+encrypt(#{is_encrypted := true} = Db, Key, Value) when
+ is_binary(Key), is_binary(Value)
+->
aegis_server:encrypt(Db, Key, Value).
-
decrypt(#{} = Db, Rows) when is_list(Rows) ->
- lists:map(fun({Key, Value}) ->
- {Key, decrypt(Db, Key, Value)}
- end, Rows).
+ lists:map(
+ fun({Key, Value}) ->
+ {Key, decrypt(Db, Key, Value)}
+ end,
+ Rows
+ ).
decrypt(#{} = _Db, _Key, <<>>) ->
<<>>;
-
decrypt(#{is_encrypted := false}, _Key, Value) when is_binary(Value) ->
Value;
-
-decrypt(#{is_encrypted := true} = Db, Key, Value)
- when is_binary(Key), is_binary(Value) ->
+decrypt(#{is_encrypted := true} = Db, Key, Value) when
+ is_binary(Key), is_binary(Value)
+->
aegis_server:decrypt(Db, Key, Value).
-
wrap_fold_fun(Db, Fun) when is_function(Fun, 2) ->
fun({Key, Value}, Acc) ->
Fun({Key, decrypt(Db, Key, Value)}, Acc)
diff --git a/src/aegis/src/aegis_app.erl b/src/aegis/src/aegis_app.erl
index 4a5a11f0c..c52e51281 100644
--- a/src/aegis/src/aegis_app.erl
+++ b/src/aegis/src/aegis_app.erl
@@ -14,13 +14,10 @@
-behaviour(application).
-
-export([start/2, stop/1]).
-
start(_StartType, _StartArgs) ->
aegis_sup:start_link().
-
stop(_State) ->
ok.
diff --git a/src/aegis/src/aegis_key_manager.erl b/src/aegis/src/aegis_key_manager.erl
index 4426c4f10..d35f78a89 100644
--- a/src/aegis/src/aegis_key_manager.erl
+++ b/src/aegis/src/aegis_key_manager.erl
@@ -12,19 +12,15 @@
-module(aegis_key_manager).
-
-
-callback init_db(
Db :: #{},
- DbOptions :: list()) -> {ok, binary()} | false.
-
+ DbOptions :: list()
+) -> {ok, binary()} | false.
-callback open_db(Db :: #{}) -> {ok, binary()} | false.
-
-callback get_db_info(Db :: #{}) -> list().
-
-optional_callbacks([
get_db_info/1
]).
diff --git a/src/aegis/src/aegis_keywrap.erl b/src/aegis/src/aegis_keywrap.erl
index 58c7668e8..3e9a9d8c0 100644
--- a/src/aegis/src/aegis_keywrap.erl
+++ b/src/aegis/src/aegis_keywrap.erl
@@ -21,8 +21,9 @@
-define(ICV1, 16#A6A6A6A6A6A6A6A6).
-spec key_wrap(WrappingKey :: binary(), KeyToWrap :: binary()) -> binary().
-key_wrap(WrappingKey, KeyToWrap)
- when is_binary(WrappingKey), bit_size(KeyToWrap) rem 64 == 0 ->
+key_wrap(WrappingKey, KeyToWrap) when
+ is_binary(WrappingKey), bit_size(KeyToWrap) rem 64 == 0
+->
N = bit_size(KeyToWrap) div 64,
wrap(WrappingKey, <<?ICV1:64>>, KeyToWrap, 1, 6 * N).
@@ -33,10 +34,10 @@ wrap(WrappingKey, A, R, T, End) ->
<<MSB_B:64, LSB_B:64>> = ?aes_ecb_encrypt(WrappingKey, <<A/binary, R1:64>>),
wrap(WrappingKey, <<(MSB_B bxor T):64>>, <<Rest/binary, LSB_B:64>>, T + 1, End).
-
-spec key_unwrap(WrappingKey :: binary(), KeyToUnwrap :: binary()) -> binary() | fail.
-key_unwrap(WrappingKey, KeyToUnwrap)
- when is_binary(WrappingKey), bit_size(KeyToUnwrap) rem 64 == 0 ->
+key_unwrap(WrappingKey, KeyToUnwrap) when
+ is_binary(WrappingKey), bit_size(KeyToUnwrap) rem 64 == 0
+->
N = (bit_size(KeyToUnwrap) div 64),
<<A:64, R/binary>> = KeyToUnwrap,
case unwrap(WrappingKey, <<A:64>>, R, 6 * (N - 1)) of
@@ -50,48 +51,66 @@ unwrap(_WrappingKey, A, R, 0) ->
<<A/binary, R/binary>>;
unwrap(WrappingKey, <<A:64>>, R, T) ->
RestSize = bit_size(R) - 64,
- <<Rest:RestSize, R2: 64>> = R,
+ <<Rest:RestSize, R2:64>> = R,
<<MSB_B:64, LSB_B:64>> = ?aes_ecb_decrypt(WrappingKey, <<(A bxor T):64, R2:64>>),
unwrap(WrappingKey, <<MSB_B:64>>, <<LSB_B:64, Rest:RestSize>>, T - 1).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
wrap_test_() ->
[
- %% 128 KEK / 128 DATA
- test_wrap_unwrap(<<16#000102030405060708090A0B0C0D0E0F:128>>,
- <<16#00112233445566778899AABBCCDDEEFF:128>>,
- <<16#1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5:192>>),
- %% 192 KEK / 128 DATA
- test_wrap_unwrap(<<16#000102030405060708090A0B0C0D0E0F1011121314151617:192>>,
- <<16#00112233445566778899AABBCCDDEEFF:128>>,
- <<16#96778B25AE6CA435F92B5B97C050AED2468AB8A17AD84E5D:192>>),
- %% 256 KEK / 128 DATA
- test_wrap_unwrap(<<16#000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F:256>>,
- <<16#00112233445566778899AABBCCDDEEFF:128>>,
- <<16#64E8C3F9CE0F5BA263E9777905818A2A93C8191E7D6E8AE7:192>>),
- %% 192 KEK / 192 DATA
- test_wrap_unwrap(<<16#000102030405060708090A0B0C0D0E0F1011121314151617:192>>,
- <<16#00112233445566778899AABBCCDDEEFF0001020304050607:192>>,
- <<16#031D33264E15D33268F24EC260743EDCE1C6C7DDEE725A936BA814915C6762D2:256>>),
- %% 256 KEK / 192 DATA
- test_wrap_unwrap(<<16#000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F:256>>,
- <<16#00112233445566778899AABBCCDDEEFF0001020304050607:192>>,
- <<16#A8F9BC1612C68B3FF6E6F4FBE30E71E4769C8B80A32CB8958CD5D17D6B254DA1:256>>),
- %% 256 KEK / 256 DATA
- test_wrap_unwrap(<<16#000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F:256>>,
- <<16#00112233445566778899AABBCCDDEEFF000102030405060708090A0B0C0D0E0F:256>>,
- <<16#28C9F404C4B810F4CBCCB35CFB87F8263F5786E2D80ED326CBC7F0E71A99F43BFB988B9B7A02DD21:320>>)].
+ %% 128 KEK / 128 DATA
+ test_wrap_unwrap(
+ <<16#000102030405060708090A0B0C0D0E0F:128>>,
+ <<16#00112233445566778899AABBCCDDEEFF:128>>,
+ <<16#1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5:192>>
+ ),
+ %% 192 KEK / 128 DATA
+ test_wrap_unwrap(
+ <<16#000102030405060708090A0B0C0D0E0F1011121314151617:192>>,
+ <<16#00112233445566778899AABBCCDDEEFF:128>>,
+ <<16#96778B25AE6CA435F92B5B97C050AED2468AB8A17AD84E5D:192>>
+ ),
+ %% 256 KEK / 128 DATA
+ test_wrap_unwrap(
+ <<16#000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F:256>>,
+ <<16#00112233445566778899AABBCCDDEEFF:128>>,
+ <<16#64E8C3F9CE0F5BA263E9777905818A2A93C8191E7D6E8AE7:192>>
+ ),
+ %% 192 KEK / 192 DATA
+ test_wrap_unwrap(
+ <<16#000102030405060708090A0B0C0D0E0F1011121314151617:192>>,
+ <<16#00112233445566778899AABBCCDDEEFF0001020304050607:192>>,
+ <<16#031D33264E15D33268F24EC260743EDCE1C6C7DDEE725A936BA814915C6762D2:256>>
+ ),
+ %% 256 KEK / 192 DATA
+ test_wrap_unwrap(
+ <<16#000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F:256>>,
+ <<16#00112233445566778899AABBCCDDEEFF0001020304050607:192>>,
+ <<16#A8F9BC1612C68B3FF6E6F4FBE30E71E4769C8B80A32CB8958CD5D17D6B254DA1:256>>
+ ),
+ %% 256 KEK / 256 DATA
+ test_wrap_unwrap(
+ <<16#000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F:256>>,
+ <<16#00112233445566778899AABBCCDDEEFF000102030405060708090A0B0C0D0E0F:256>>,
+ <<
+ 16#28C9F404C4B810F4CBCCB35CFB87F8263F5786E2D80ED326CBC7F0E71A99F43BFB988B9B7A02DD21:320
+ >>
+ )
+ ].
test_wrap_unwrap(WrappingKey, KeyToWrap, ExpectedWrappedKey) ->
- [?_assertEqual(ExpectedWrappedKey, key_wrap(WrappingKey, KeyToWrap)),
- ?_assertEqual(KeyToWrap, key_unwrap(WrappingKey, key_wrap(WrappingKey, KeyToWrap)))].
+ [
+ ?_assertEqual(ExpectedWrappedKey, key_wrap(WrappingKey, KeyToWrap)),
+ ?_assertEqual(KeyToWrap, key_unwrap(WrappingKey, key_wrap(WrappingKey, KeyToWrap)))
+ ].
fail_test() ->
KEK = <<16#000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F:256>>,
- CipherText = <<16#28C9F404C4B810F4CBCCB35CFB87F8263F5786E2D80ED326CBC7F0E71A99F43BFB988B9B7A02DD20:320>>,
+ CipherText = <<
+ 16#28C9F404C4B810F4CBCCB35CFB87F8263F5786E2D80ED326CBC7F0E71A99F43BFB988B9B7A02DD20:320
+ >>,
?assertEqual(fail, key_unwrap(KEK, CipherText)).
-endif.
diff --git a/src/aegis/src/aegis_noop_key_manager.erl b/src/aegis/src/aegis_noop_key_manager.erl
index 2b61f1d29..95732a35d 100644
--- a/src/aegis/src/aegis_noop_key_manager.erl
+++ b/src/aegis/src/aegis_noop_key_manager.erl
@@ -12,20 +12,15 @@
-module(aegis_noop_key_manager).
-
-behaviour(aegis_key_manager).
-
-export([
init_db/2,
open_db/1
]).
-
-
init_db(#{} = _Db, _Options) ->
false.
-
open_db(#{} = _Db) ->
false.
diff --git a/src/aegis/src/aegis_server.erl b/src/aegis/src/aegis_server.erl
index 508e4531a..efc99b48b 100644
--- a/src/aegis/src/aegis_server.erl
+++ b/src/aegis/src/aegis_server.erl
@@ -16,11 +16,9 @@
-vsn(1).
-
-include("aegis.hrl").
-include_lib("kernel/include/logger.hrl").
-
%% aegis_server API
-export([
start_link/0,
@@ -40,8 +38,6 @@
code_change/3
]).
-
-
-define(KEY_CHECK, aegis_key_check).
-define(INIT_TIMEOUT, 60000).
-define(TIMEOUT, 10000).
@@ -49,16 +45,14 @@
-define(CACHE_MAX_AGE_SEC, 1800).
-define(CACHE_EXPIRATION_CHECK_SEC, 10).
-define(LAST_ACCESSED_INACTIVITY_SEC, 10).
--define(CACHE_DELETION_GRACE_SEC, 5). % Keep in cache after expiration
-
+% Keep in cache after expiration
+-define(CACHE_DELETION_GRACE_SEC, 5).
-record(entry, {uuid, encryption_key, counter, last_accessed, expires_at}).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-spec init_db(Db :: #{}, Options :: list()) -> boolean().
init_db(#{uuid := UUID} = Db, Options) ->
sensitive(fun() ->
@@ -71,7 +65,6 @@ init_db(#{uuid := UUID} = Db, Options) ->
end
end).
-
-spec open_db(Db :: #{}) -> boolean().
open_db(#{} = Db) ->
sensitive(fun() ->
@@ -83,7 +76,6 @@ open_db(#{} = Db) ->
end
end).
-
-spec encrypt(Db :: #{}, Key :: binary(), Value :: binary()) -> binary().
encrypt(#{} = Db, Key, Value) when is_binary(Key), is_binary(Value) ->
#{
@@ -95,7 +87,7 @@ encrypt(#{} = Db, Key, Value) when is_binary(Key), is_binary(Value) ->
case gen_server:call(?MODULE, {encrypt, Db, Key, Value}) of
CipherText when is_binary(CipherText) ->
CipherText;
- {error, {_Tag, {_C_FileName,_LineNumber}, _Desc} = Reason} ->
+ {error, {_Tag, {_C_FileName, _LineNumber}, _Desc} = Reason} ->
?LOG_ERROR(#{what => encrypt_failure, details => Reason}),
couch_log:error("aegis encryption failure: ~p ", [Reason]),
erlang:error(decryption_failed);
@@ -109,7 +101,6 @@ encrypt(#{} = Db, Key, Value) when is_binary(Key), is_binary(Value) ->
end)
end.
-
-spec decrypt(Db :: #{}, Key :: binary(), Value :: binary()) -> binary().
decrypt(#{} = Db, Key, Value) when is_binary(Key), is_binary(Value) ->
#{
@@ -121,7 +112,7 @@ decrypt(#{} = Db, Key, Value) when is_binary(Key), is_binary(Value) ->
case gen_server:call(?MODULE, {decrypt, Db, Key, Value}) of
PlainText when is_binary(PlainText) ->
PlainText;
- {error, {_Tag, {_C_FileName,_LineNumber}, _Desc} = Reason} ->
+ {error, {_Tag, {_C_FileName, _LineNumber}, _Desc} = Reason} ->
?LOG_ERROR(#{what => decrypt_failure, details => Reason}),
couch_log:error("aegis decryption failure: ~p ", [Reason]),
erlang:error(decryption_failed);
@@ -135,14 +126,15 @@ decrypt(#{} = Db, Key, Value) when is_binary(Key), is_binary(Value) ->
end)
end.
-
%% gen_server functions
init([]) ->
process_flag(sensitive, true),
Cache = ets:new(?MODULE, [set, private, {keypos, #entry.uuid}]),
- ByAccess = ets:new(?MODULE,
- [ordered_set, private, {keypos, #entry.counter}]),
+ ByAccess = ets:new(
+ ?MODULE,
+ [ordered_set, private, {keypos, #entry.counter}]
+ ),
ets:new(?KEY_CHECK, [named_table, protected, {read_concurrency, true}]),
erlang:send_after(0, self(), maybe_remove_expired),
@@ -154,11 +146,9 @@ init([]) ->
},
{ok, St, ?INIT_TIMEOUT}.
-
terminate(_Reason, _St) ->
ok.
-
handle_call({insert_key, UUID, DbKey}, _From, #{cache := Cache} = St) ->
case ets:lookup(Cache, UUID) of
[#entry{uuid = UUID} = Entry] ->
@@ -168,44 +158,34 @@ handle_call({insert_key, UUID, DbKey}, _From, #{cache := Cache} = St) ->
end,
NewSt = insert(St, UUID, DbKey),
{reply, ok, NewSt, ?TIMEOUT};
-
handle_call({encrypt, Db, Key, Value}, From, St) ->
handle_crypto_call(fun do_encrypt/4, Db, Key, Value, From, St);
-
handle_call({decrypt, Db, Key, Value}, From, St) ->
handle_crypto_call(fun do_decrypt/4, Db, Key, Value, From, St);
-
handle_call(_Msg, _From, St) ->
{noreply, St}.
-
handle_cast({accessed, UUID}, St) ->
NewSt = bump_last_accessed(St, UUID),
{noreply, NewSt};
-
-
handle_cast(_Msg, St) ->
{noreply, St}.
-
handle_info(maybe_remove_expired, St) ->
remove_expired_entries(St),
CheckInterval = erlang:convert_time_unit(
- expiration_check_interval(), second, millisecond),
+ expiration_check_interval(), second, millisecond
+ ),
erlang:send_after(CheckInterval, self(), maybe_remove_expired),
{noreply, St};
-
handle_info(_Msg, St) ->
{noreply, St}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
%% private functions
-
handle_crypto_call(DoCryptoOp, Db, Key, Value, From, St) ->
#{uuid := UUID} = Db,
case lookup(St, UUID) of
@@ -214,9 +194,7 @@ handle_crypto_call(DoCryptoOp, Db, Key, Value, From, St) ->
{ok, DbKey} ->
erlang:spawn(fun() ->
process_flag(sensitive, true),
- try
- DoCryptoOp(DbKey, Db, Key, Value)
- of
+ try DoCryptoOp(DbKey, Db, Key, Value) of
Resp ->
gen_server:reply(From, Resp)
catch
@@ -227,7 +205,6 @@ handle_crypto_call(DoCryptoOp, Db, Key, Value, From, St) ->
{noreply, St, ?TIMEOUT}
end.
-
do_open_db(#{uuid := UUID} = Db) ->
case ?AEGIS_KEY_MANAGER:open_db(Db) of
{ok, DbKey} ->
@@ -237,20 +214,19 @@ do_open_db(#{uuid := UUID} = Db) ->
false
end.
-
do_encrypt(DbKey, #{uuid := UUID}, Key, Value) ->
EncryptionKey = crypto:strong_rand_bytes(32),
<<WrappedKey:320>> = aegis_keywrap:key_wrap(DbKey, EncryptionKey),
{CipherText, <<CipherTag:128>>} =
?aes_gcm_encrypt(
- EncryptionKey,
- <<0:96>>,
- <<UUID/binary, 0:8, Key/binary>>,
- Value),
+ EncryptionKey,
+ <<0:96>>,
+ <<UUID/binary, 0:8, Key/binary>>,
+ Value
+ ),
<<1:8, WrappedKey:320, CipherTag:128, CipherText/binary>>.
-
do_decrypt(DbKey, #{uuid := UUID}, Key, Value) ->
case Value of
<<1:8, WrappedKey:320, CipherTag:128, CipherText/binary>> ->
@@ -259,21 +235,22 @@ do_decrypt(DbKey, #{uuid := UUID}, Key, Value) ->
erlang:error(decryption_failed);
DecryptionKey ->
Decrypted =
- ?aes_gcm_decrypt(
- DecryptionKey,
- <<0:96>>,
- <<UUID/binary, 0:8, Key/binary>>,
- CipherText,
- <<CipherTag:128>>),
- if Decrypted /= error -> Decrypted; true ->
- erlang:error(decryption_failed)
+ ?aes_gcm_decrypt(
+ DecryptionKey,
+ <<0:96>>,
+ <<UUID/binary, 0:8, Key/binary>>,
+ CipherText,
+ <<CipherTag:128>>
+ ),
+ if
+ Decrypted /= error -> Decrypted;
+ true -> erlang:error(decryption_failed)
end
end;
_ ->
erlang:error(not_ciphertext)
end.
-
is_key_fresh(UUID) ->
Now = fabric2_util:now(sec),
@@ -282,7 +259,6 @@ is_key_fresh(UUID) ->
_ -> false
end.
-
%% cache functions
insert(St, UUID, DbKey) ->
@@ -321,7 +297,6 @@ insert(St, UUID, DbKey) ->
St#{counter := Counter + 1}.
-
lookup(#{cache := Cache}, UUID) ->
case ets:lookup(Cache, UUID) of
[#entry{uuid = UUID, encryption_key = DbKey} = Entry] ->
@@ -331,7 +306,6 @@ lookup(#{cache := Cache}, UUID) ->
{error, not_found}
end.
-
delete(St, #entry{uuid = UUID} = Entry) ->
#{
cache := Cache,
@@ -342,7 +316,6 @@ delete(St, #entry{uuid = UUID} = Entry) ->
true = ets:delete_object(Cache, Entry),
true = ets:delete_object(ByAccess, Entry).
-
maybe_bump_last_accessed(#entry{last_accessed = LastAccessed} = Entry) ->
case fabric2_util:now(sec) > LastAccessed + ?LAST_ACCESSED_INACTIVITY_SEC of
true ->
@@ -351,7 +324,6 @@ maybe_bump_last_accessed(#entry{last_accessed = LastAccessed} = Entry) ->
ok
end.
-
bump_last_accessed(St, UUID) ->
#{
cache := Cache,
@@ -359,7 +331,6 @@ bump_last_accessed(St, UUID) ->
counter := Counter
} = St,
-
[#entry{counter = OldCounter} = Entry0] = ets:lookup(Cache, UUID),
Entry = Entry0#entry{
@@ -374,7 +345,6 @@ bump_last_accessed(St, UUID) ->
St#{counter := Counter + 1}.
-
remove_expired_entries(St) ->
#{
cache := Cache,
@@ -393,25 +363,21 @@ remove_expired_entries(St) ->
Count = ets:select_delete(Cache, CacheExpired),
Count = ets:select_delete(ByAccess, CacheExpired).
-
-
max_age() ->
config:get_integer("aegis", "cache_max_age_sec", ?CACHE_MAX_AGE_SEC).
-
expiration_check_interval() ->
config:get_integer(
- "aegis", "cache_expiration_check_sec", ?CACHE_EXPIRATION_CHECK_SEC).
-
+ "aegis", "cache_expiration_check_sec", ?CACHE_EXPIRATION_CHECK_SEC
+ ).
cache_limit() ->
config:get_integer("aegis", "cache_limit", ?CACHE_LIMIT).
-
cache_deletion_grace() ->
config:get_integer(
- "aegis", "cache_deletion_grace_sec", ?CACHE_DELETION_GRACE_SEC).
-
+ "aegis", "cache_deletion_grace_sec", ?CACHE_DELETION_GRACE_SEC
+ ).
sensitive(Fun) when is_function(Fun, 0) ->
OldValue = process_flag(sensitive, true),
diff --git a/src/aegis/src/aegis_sup.erl b/src/aegis/src/aegis_sup.erl
index 6d3ee83d8..4d7e2c48b 100644
--- a/src/aegis/src/aegis_sup.erl
+++ b/src/aegis/src/aegis_sup.erl
@@ -16,7 +16,6 @@
-vsn(1).
-
-export([
start_link/0
]).
@@ -25,11 +24,9 @@
init/1
]).
-
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
init([]) ->
Flags = #{
strategy => one_for_one,
diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
index e232ff448..1f32e8260 100644
--- a/src/chttpd/src/chttpd.erl
+++ b/src/chttpd/src/chttpd.erl
@@ -18,30 +18,66 @@
-include_lib("chttpd/include/chttpd.hrl").
-include_lib("kernel/include/logger.hrl").
--export([start_link/0, start_link/1, start_link/2,
- stop/0, handle_request/1, handle_request_int/1,
- primary_header_value/2, header_value/2, header_value/3, qs_value/2,
- qs_value/3, qs/1, qs_json_value/3, path/1, absolute_uri/2, body_length/1,
- verify_is_server_admin/1, unquote/1, quote/1, recv/2, recv_chunked/4,
- error_info/1, parse_form/1, json_body/1, json_body_obj/1, body/1,
- doc_etag/1, make_etag/1, etag_respond/3, etag_match/2,
- partition/1, serve_file/3, serve_file/4,
- server_header/0, start_chunked_response/3,send_chunk/2,last_chunk/1,
- start_response_length/4, send/2, start_json_response/2,
- start_json_response/3, end_json_response/1, send_response/4,
+-export([
+ start_link/0, start_link/1, start_link/2,
+ stop/0,
+ handle_request/1,
+ handle_request_int/1,
+ primary_header_value/2,
+ header_value/2, header_value/3,
+ qs_value/2,
+ qs_value/3,
+ qs/1,
+ qs_json_value/3,
+ path/1,
+ absolute_uri/2,
+ body_length/1,
+ verify_is_server_admin/1,
+ unquote/1,
+ quote/1,
+ recv/2,
+ recv_chunked/4,
+ error_info/1,
+ parse_form/1,
+ json_body/1,
+ json_body_obj/1,
+ body/1,
+ doc_etag/1,
+ make_etag/1,
+ etag_respond/3,
+ etag_match/2,
+ partition/1,
+ serve_file/3, serve_file/4,
+ server_header/0,
+ start_chunked_response/3,
+ send_chunk/2,
+ last_chunk/1,
+ start_response_length/4,
+ send/2,
+ start_json_response/2,
+ start_json_response/3,
+ end_json_response/1,
+ send_response/4,
send_response_no_cors/4,
- send_method_not_allowed/2, send_error/2, send_error/4, send_redirect/2,
- send_chunked_error/2, send_json/2,send_json/3,send_json/4,
- validate_ctype/2]).
+ send_method_not_allowed/2,
+ send_error/2, send_error/4,
+ send_redirect/2,
+ send_chunked_error/2,
+ send_json/2, send_json/3, send_json/4,
+ validate_ctype/2
+]).
-export([authenticate_request/3]).
--export([start_delayed_json_response/2, start_delayed_json_response/3,
- start_delayed_json_response/4,
+-export([
+ start_delayed_json_response/2, start_delayed_json_response/3, start_delayed_json_response/4,
start_delayed_chunked_response/3, start_delayed_chunked_response/4,
- send_delayed_chunk/2, send_delayed_last_chunk/1,
- send_delayed_error/2, end_delayed_json_response/1,
- get_delayed_req/1]).
+ send_delayed_chunk/2,
+ send_delayed_last_chunk/1,
+ send_delayed_error/2,
+ end_delayed_json_response/1,
+ get_delayed_req/1
+]).
-export([
chunked_response_buffer_size/0,
@@ -54,8 +90,8 @@
code,
headers,
chunks,
- resp=nil,
- buffer_response=false
+ resp = nil,
+ buffer_response = false
}).
-define(DEFAULT_SERVER_OPTIONS, "[{recbuf, undefined}]").
@@ -66,23 +102,28 @@ start_link() ->
start_link(http) ->
Port = config:get("chttpd", "port", "5984"),
start_link(?MODULE, [{port, Port}]);
-
start_link(https) ->
Port = config:get("ssl", "port", "6984"),
{ok, Ciphers} = couch_util:parse_term(config:get("ssl", "ciphers", "undefined")),
{ok, Versions} = couch_util:parse_term(config:get("ssl", "tls_versions", "undefined")),
- {ok, SecureRenegotiate} = couch_util:parse_term(config:get("ssl", "secure_renegotiate", "undefined")),
+ {ok, SecureRenegotiate} = couch_util:parse_term(
+ config:get("ssl", "secure_renegotiate", "undefined")
+ ),
ServerOpts0 =
- [{cacertfile, config:get("ssl", "cacert_file", undefined)},
- {keyfile, config:get("ssl", "key_file", undefined)},
- {certfile, config:get("ssl", "cert_file", undefined)},
- {password, config:get("ssl", "password", undefined)},
- {secure_renegotiate, SecureRenegotiate},
- {versions, Versions},
- {ciphers, Ciphers}],
-
- case (couch_util:get_value(keyfile, ServerOpts0) == undefined orelse
- couch_util:get_value(certfile, ServerOpts0) == undefined) of
+ [
+ {cacertfile, config:get("ssl", "cacert_file", undefined)},
+ {keyfile, config:get("ssl", "key_file", undefined)},
+ {certfile, config:get("ssl", "cert_file", undefined)},
+ {password, config:get("ssl", "password", undefined)},
+ {secure_renegotiate, SecureRenegotiate},
+ {versions, Versions},
+ {ciphers, Ciphers}
+ ],
+
+ case
+ (couch_util:get_value(keyfile, ServerOpts0) == undefined orelse
+ couch_util:get_value(certfile, ServerOpts0) == undefined)
+ of
true ->
io:format("SSL enabled but PEM certificates are missing.", []),
throw({error, missing_certs});
@@ -90,41 +131,53 @@ start_link(https) ->
ok
end,
- ServerOpts = [Opt || {_, V}=Opt <- ServerOpts0, V /= undefined],
-
- ClientOpts = case config:get("ssl", "verify_ssl_certificates", "false") of
- "false" ->
- [];
- "true" ->
- FailIfNoPeerCert = case config:get("ssl", "fail_if_no_peer_cert", "false") of
- "false" -> false;
- "true" -> true
- end,
- [{depth, list_to_integer(config:get("ssl",
- "ssl_certificate_max_depth", "1"))},
- {fail_if_no_peer_cert, FailIfNoPeerCert},
- {verify, verify_peer}] ++
- case config:get("ssl", "verify_fun", undefined) of
- undefined -> [];
- SpecStr ->
- [{verify_fun, couch_httpd:make_arity_3_fun(SpecStr)}]
- end
- end,
+ ServerOpts = [Opt || {_, V} = Opt <- ServerOpts0, V /= undefined],
+
+ ClientOpts =
+ case config:get("ssl", "verify_ssl_certificates", "false") of
+ "false" ->
+ [];
+ "true" ->
+ FailIfNoPeerCert =
+ case config:get("ssl", "fail_if_no_peer_cert", "false") of
+ "false" -> false;
+ "true" -> true
+ end,
+ [
+ {depth,
+ list_to_integer(
+ config:get(
+ "ssl",
+ "ssl_certificate_max_depth",
+ "1"
+ )
+ )},
+ {fail_if_no_peer_cert, FailIfNoPeerCert},
+ {verify, verify_peer}
+ ] ++
+ case config:get("ssl", "verify_fun", undefined) of
+ undefined -> [];
+ SpecStr -> [{verify_fun, couch_httpd:make_arity_3_fun(SpecStr)}]
+ end
+ end,
SslOpts = ServerOpts ++ ClientOpts,
Options0 =
- [{port, Port},
- {ssl, true},
- {ssl_opts, SslOpts}],
+ [
+ {port, Port},
+ {ssl, true},
+ {ssl_opts, SslOpts}
+ ],
CustomServerOpts = get_server_options("httpsd"),
Options = merge_server_options(Options0, CustomServerOpts),
start_link(https, Options).
start_link(Name, Options) ->
- IP = case config:get("chttpd", "bind_address", "any") of
- "any" -> any;
- Else -> Else
- end,
+ IP =
+ case config:get("chttpd", "bind_address", "any") of
+ "any" -> any;
+ Else -> Else
+ end,
ok = couch_httpd:validate_bind_address(IP),
% Ensure uuid is set so that concurrent replications
@@ -135,19 +188,21 @@ start_link(Name, Options) ->
set_auth_handlers(),
- Options1 = Options ++ [
- {loop, fun ?MODULE:handle_request/1},
- {name, Name},
- {ip, IP}
- ],
+ Options1 =
+ Options ++
+ [
+ {loop, fun ?MODULE:handle_request/1},
+ {name, Name},
+ {ip, IP}
+ ],
ServerOpts = get_server_options("chttpd"),
Options2 = merge_server_options(Options1, ServerOpts),
case mochiweb_http:start(Options2) of
- {ok, Pid} ->
- {ok, Pid};
- {error, Reason} ->
- io:format("Failure to start Mochiweb: ~s~n", [Reason]),
- {error, Reason}
+ {ok, Pid} ->
+ {ok, Pid};
+ {error, Reason} ->
+ io:format("Failure to start Mochiweb: ~s~n", [Reason]),
+ {error, Reason}
end.
get_server_options(Module) ->
@@ -174,8 +229,11 @@ handle_request(MochiReq0) ->
handle_request_int(MochiReq) ->
Begin = os:timestamp(),
- SocketOptsCfg = config:get("chttpd",
- "socket_options", ?DEFAULT_SOCKET_OPTIONS),
+ SocketOptsCfg = config:get(
+ "chttpd",
+ "socket_options",
+ ?DEFAULT_SOCKET_OPTIONS
+ ),
{ok, SocketOpts} = couch_util:parse_term(SocketOptsCfg),
ok = mochiweb_socket:setopts(MochiReq:get(socket), SocketOpts),
@@ -185,55 +243,73 @@ handle_request_int(MochiReq) ->
{"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
% get requested path
- RequestedPath = case MochiReq:get_header_value("x-couchdb-vhost-path") of
- undefined ->
- case MochiReq:get_header_value("x-couchdb-requested-path") of
- undefined -> RawUri;
- R -> R
- end;
- P -> P
- end,
+ RequestedPath =
+ case MochiReq:get_header_value("x-couchdb-vhost-path") of
+ undefined ->
+ case MochiReq:get_header_value("x-couchdb-requested-path") of
+ undefined -> RawUri;
+ R -> R
+ end;
+ P ->
+ P
+ end,
Peer = MochiReq:get(peer),
Method1 =
- case MochiReq:get(method) of
- % already an atom
- Meth when is_atom(Meth) -> Meth;
-
- % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
- % possible (if any module references the atom, then it's existing).
- Meth -> couch_util:to_existing_atom(Meth)
- end,
+ case MochiReq:get(method) of
+ % already an atom
+ Meth when is_atom(Meth) -> Meth;
+ % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
+ % possible (if any module references the atom, then it's existing).
+ Meth -> couch_util:to_existing_atom(Meth)
+ end,
increment_method_stats(Method1),
% allow broken HTTP clients to fake a full method vocabulary with an X-HTTP-METHOD-OVERRIDE header
MethodOverride = MochiReq:get_primary_header_value("X-HTTP-Method-Override"),
- Method2 = case lists:member(MethodOverride, ["GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "COPY"]) of
- true ->
- couch_log:notice("MethodOverride: ~s (real method was ~s)", [MethodOverride, Method1]),
- case Method1 of
- 'POST' ->
- ?LOG_NOTICE(#{
- what => http_method_override,
- result => ok,
- new_method => MethodOverride
- }),
- couch_util:to_existing_atom(MethodOverride);
- _ ->
- % Ignore X-HTTP-Method-Override when the original verb isn't POST.
- % I'd like to send a 406 error to the client, but that'd require a nasty refactor.
- % throw({not_acceptable, <<"X-HTTP-Method-Override may only be used with POST requests.">>})
- Method1
- end;
- _ -> Method1
- end,
+ Method2 =
+ case
+ lists:member(MethodOverride, [
+ "GET",
+ "HEAD",
+ "POST",
+ "PUT",
+ "DELETE",
+ "TRACE",
+ "CONNECT",
+ "COPY"
+ ])
+ of
+ true ->
+ couch_log:notice("MethodOverride: ~s (real method was ~s)", [
+ MethodOverride,
+ Method1
+ ]),
+ case Method1 of
+ 'POST' ->
+ ?LOG_NOTICE(#{
+ what => http_method_override,
+ result => ok,
+ new_method => MethodOverride
+ }),
+ couch_util:to_existing_atom(MethodOverride);
+ _ ->
+ % Ignore X-HTTP-Method-Override when the original verb isn't POST.
+ % I'd like to send a 406 error to the client, but that'd require a nasty refactor.
+ % throw({not_acceptable, <<"X-HTTP-Method-Override may only be used with POST requests.">>})
+ Method1
+ end;
+ _ ->
+ Method1
+ end,
% alias HEAD to GET as mochiweb takes care of stripping the body
- Method = case Method2 of
- 'HEAD' -> 'GET';
- Other -> Other
- end,
+ Method =
+ case Method2 of
+ 'HEAD' -> 'GET';
+ Other -> Other
+ end,
Nonce = couch_util:to_hex(crypto:strong_rand_bytes(5)),
logger:set_process_metadata(#{request_id => Nonce}),
@@ -245,10 +321,14 @@ handle_request_int(MochiReq) ->
original_method = Method1,
nonce = Nonce,
method = Method,
- path_parts = [list_to_binary(chttpd:unquote(Part))
- || Part <- string:tokens(Path, "/")],
- requested_path_parts = [?l2b(unquote(Part))
- || Part <- string:tokens(RequestedPath, "/")]
+ path_parts = [
+ list_to_binary(chttpd:unquote(Part))
+ || Part <- string:tokens(Path, "/")
+ ],
+ requested_path_parts = [
+ ?l2b(unquote(Part))
+ || Part <- string:tokens(RequestedPath, "/")
+ ]
},
% put small token on heap to keep requests synced to backend calls
@@ -260,12 +340,13 @@ handle_request_int(MochiReq) ->
maybe_trace_fdb(MochiReq:get_header_value("x-couchdb-fdb-trace")),
- {HttpReq2, Response} = case before_request(HttpReq0) of
- {ok, HttpReq1} ->
- process_request(HttpReq1);
- {error, Response0} ->
- {HttpReq0, Response0}
- end,
+ {HttpReq2, Response} =
+ case before_request(HttpReq0) of
+ {ok, HttpReq1} ->
+ process_request(HttpReq1);
+ {error, Response0} ->
+ {HttpReq0, Response0}
+ end,
{Status, Code, Reason, Resp} = split_response(Response),
@@ -296,17 +377,19 @@ before_request(HttpReq) ->
{ok, HttpReq1} = chttpd_plugin:before_request(HttpReq),
chttpd_stats:init(HttpReq1),
{ok, HttpReq1}
- catch Tag:Error:Stack ->
- {error, catch_error(HttpReq, Tag, Error, Stack)}
+ catch
+ Tag:Error:Stack ->
+ {error, catch_error(HttpReq, Tag, Error, Stack)}
end.
after_request(HttpReq, HttpResp0) ->
{ok, HttpResp1} =
try
chttpd_plugin:after_request(HttpReq, HttpResp0)
- catch _Tag:Error:Stack ->
- send_error(HttpReq, {Error, nil, Stack}),
- {ok, HttpResp0#httpd_resp{status = aborted}}
+ catch
+ _Tag:Error:Stack ->
+ send_error(HttpReq, {Error, nil, Stack}),
+ {ok, HttpResp0#httpd_resp{status = aborted}}
end,
HttpResp2 = update_stats(HttpReq, HttpResp1),
chttpd_stats:report(HttpResp2),
@@ -317,7 +400,7 @@ process_request(#httpd{mochi_req = MochiReq} = HttpReq) ->
HandlerKey =
case HttpReq#httpd.path_parts of
[] -> <<>>;
- [Key|_] -> ?l2b(quote(Key))
+ [Key | _] -> ?l2b(quote(Key))
end,
RawUri = MochiReq:get(raw_path),
@@ -327,31 +410,37 @@ process_request(#httpd{mochi_req = MochiReq} = HttpReq) ->
check_request_uri_length(RawUri),
check_url_encoding(RawUri),
case chttpd_cors:maybe_handle_preflight_request(HttpReq) of
- not_preflight ->
- case chttpd_auth:authenticate(HttpReq, fun authenticate_request/1) of
- #httpd{} = Req ->
- handle_req_after_auth(HandlerKey, Req);
+ not_preflight ->
+ case chttpd_auth:authenticate(HttpReq, fun authenticate_request/1) of
+ #httpd{} = Req ->
+ handle_req_after_auth(HandlerKey, Req);
+ Response ->
+ {HttpReq, Response}
+ end;
Response ->
{HttpReq, Response}
- end;
- Response ->
- {HttpReq, Response}
end
- catch Tag:Error:Stack ->
- {HttpReq, catch_error(HttpReq, Tag, Error, Stack)}
+ catch
+ Tag:Error:Stack ->
+ {HttpReq, catch_error(HttpReq, Tag, Error, Stack)}
end.
handle_req_after_auth(HandlerKey, HttpReq) ->
#httpd{user_ctx = #user_ctx{name = User}} = HttpReq,
ctrace:tag(#{user => User}),
try
- HandlerFun = chttpd_handlers:url_handler(HandlerKey,
- fun chttpd_db:handle_request/1),
- AuthorizedReq = chttpd_auth:authorize(possibly_hack(HttpReq),
- fun chttpd_auth_request:authorize_request/1),
+ HandlerFun = chttpd_handlers:url_handler(
+ HandlerKey,
+ fun chttpd_db:handle_request/1
+ ),
+ AuthorizedReq = chttpd_auth:authorize(
+ possibly_hack(HttpReq),
+ fun chttpd_auth_request:authorize_request/1
+ ),
{AuthorizedReq, HandlerFun(AuthorizedReq)}
- catch Tag:Error:Stack ->
- {HttpReq, catch_error(HttpReq, Tag, Error, Stack)}
+ catch
+ Tag:Error:Stack ->
+ {HttpReq, catch_error(HttpReq, Tag, Error, Stack)}
end.
catch_error(_HttpReq, throw, {http_head_abort, Resp}, _Stack) ->
@@ -377,7 +466,8 @@ catch_error(HttpReq, exit, {mochiweb_recv_error, E}, _Stack) ->
Peer,
Method,
MochiReq:get(raw_path),
- E]),
+ E
+ ]),
exit(normal);
catch_error(HttpReq, exit, {uri_too_long, _}, _Stack) ->
send_error(HttpReq, request_uri_too_long);
@@ -397,12 +487,13 @@ catch_error(HttpReq, Tag, Error, Stack) ->
% TODO improve logging and metrics collection for client disconnects
case {Tag, Error, Stack} of
{exit, normal, [{mochiweb_request, send, _, _} | _]} ->
- exit(normal); % Client disconnect (R15+)
+ % Client disconnect (R15+)
+ exit(normal);
_Else ->
send_error(HttpReq, {Error, nil, Stack})
end.
-split_response({ok, #delayed_resp{resp=Resp}}) ->
+split_response({ok, #delayed_resp{resp = Resp}}) ->
{ok, Resp:get(code), undefined, Resp};
split_response({ok, Resp}) ->
{ok, Resp:get(code), undefined, Resp};
@@ -443,38 +534,50 @@ maybe_log(#httpd{} = HttpReq, #httpd_resp{should_log = true} = HttpResp) ->
% - client port
% - timers: connection, request, time to first byte, ...
% - response size
- %
- ?LOG_NOTICE(#{
- method => Method,
- path => RawUri,
- code => Code,
- user => User,
- % req_size => MochiReq:get(body_length),
- src => #{ip4 => Peer},
- duration => RequestTime
- }, #{domain => [chttpd_access_log]}),
- couch_log:notice("~s ~s ~s ~s ~s ~B ~p ~B", [Host, Peer, User,
- Method, RawUri, Code, Status, RequestTime]);
+ %
+ ?LOG_NOTICE(
+ #{
+ method => Method,
+ path => RawUri,
+ code => Code,
+ user => User,
+ % req_size => MochiReq:get(body_length),
+ src => #{ip4 => Peer},
+ duration => RequestTime
+ },
+ #{domain => [chttpd_access_log]}
+ ),
+ couch_log:notice("~s ~s ~s ~s ~s ~B ~p ~B", [
+ Host,
+ Peer,
+ User,
+ Method,
+ RawUri,
+ Code,
+ Status,
+ RequestTime
+ ]);
maybe_log(_HttpReq, #httpd_resp{should_log = false}) ->
ok.
-
%% HACK: replication currently handles two forms of input, #db{} style
%% and #http_db style. We need a third that makes use of fabric. #db{}
%% works fine for replicating the dbs and nodes database because they
%% aren't sharded. So for now when a local db is specified as the source or
%% the target, it's hacked to make it a full url and treated as a remote.
-possibly_hack(#httpd{path_parts=[<<"_replicate">>]}=Req) ->
+possibly_hack(#httpd{path_parts = [<<"_replicate">>]} = Req) ->
{Props0} = chttpd:json_body_obj(Req),
Props1 = fix_uri(Req, Props0, <<"source">>),
Props2 = fix_uri(Req, Props1, <<"target">>),
- Req#httpd{req_body={Props2}};
+ Req#httpd{req_body = {Props2}};
possibly_hack(Req) ->
Req.
check_request_uri_length(Uri) ->
- check_request_uri_length(Uri,
- chttpd_util:get_chttpd_config("max_uri_length")).
+ check_request_uri_length(
+ Uri,
+ chttpd_util:get_chttpd_config("max_uri_length")
+ ).
check_request_uri_length(_Uri, undefined) ->
ok;
@@ -497,24 +600,24 @@ check_url_encoding([_ | Rest]) ->
fix_uri(Req, Props, Type) ->
case replication_uri(Type, Props) of
- undefined ->
- Props;
- Uri0 ->
- case is_http(Uri0) of
- true ->
+ undefined ->
Props;
- false ->
- Uri = make_uri(Req, quote(Uri0)),
- [{Type,Uri}|proplists:delete(Type,Props)]
- end
+ Uri0 ->
+ case is_http(Uri0) of
+ true ->
+ Props;
+ false ->
+ Uri = make_uri(Req, quote(Uri0)),
+ [{Type, Uri} | proplists:delete(Type, Props)]
+ end
end.
replication_uri(Type, PostProps) ->
case couch_util:get_value(Type, PostProps) of
- {Props} ->
- couch_util:get_value(<<"url">>, Props);
- Else ->
- Else
+ {Props} ->
+ couch_util:get_value(<<"url">>, Props);
+ Else ->
+ Else
end.
is_http(<<"http://", _/binary>>) ->
@@ -526,13 +629,19 @@ is_http(_) ->
make_uri(Req, Raw) ->
Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
- Url = list_to_binary(["http://", config:get("httpd", "bind_address"),
- ":", Port, "/", Raw]),
+ Url = list_to_binary([
+ "http://",
+ config:get("httpd", "bind_address"),
+ ":",
+ Port,
+ "/",
+ Raw
+ ]),
Headers = [
- {<<"authorization">>, ?l2b(header_value(Req,"authorization",""))},
+ {<<"authorization">>, ?l2b(header_value(Req, "authorization", ""))},
{<<"cookie">>, ?l2b(extract_cookie(Req))}
],
- {[{<<"url">>,Url}, {<<"headers">>,{Headers}}]}.
+ {[{<<"url">>, Url}, {<<"headers">>, {Headers}}]}.
extract_cookie(#httpd{mochi_req = MochiReq}) ->
case MochiReq:get_cookie_value("AuthSession") of
@@ -543,6 +652,7 @@ extract_cookie(#httpd{mochi_req = MochiReq}) ->
end.
%%% end hack
+%% erlfmt-ignore
set_auth_handlers() ->
AuthenticationDefault = "{chttpd_auth, cookie_authentication_handler},
{chttpd_auth, default_authentication_handler}",
@@ -577,20 +687,21 @@ authenticate_request(Req) ->
authenticate_request(#httpd{} = Req0, AuthModule, AuthFuns) ->
Req = Req0#httpd{
auth_module = AuthModule,
- authentication_handlers = AuthFuns},
+ authentication_handlers = AuthFuns
+ },
authenticate_request(Req, AuthFuns).
% Try authentication handlers in order until one returns a result
-authenticate_request(#httpd{user_ctx=#user_ctx{}} = Req, _AuthFuns) ->
+authenticate_request(#httpd{user_ctx = #user_ctx{}} = Req, _AuthFuns) ->
Req;
-authenticate_request(#httpd{} = Req, [{Name, AuthFun}|Rest]) ->
+authenticate_request(#httpd{} = Req, [{Name, AuthFun} | Rest]) ->
authenticate_request(maybe_set_handler(AuthFun(Req), Name), Rest);
-authenticate_request(#httpd{} = Req, [AuthFun|Rest]) ->
+authenticate_request(#httpd{} = Req, [AuthFun | Rest]) ->
authenticate_request(AuthFun(Req), Rest);
authenticate_request(Response, _AuthFuns) ->
Response.
-maybe_set_handler(#httpd{user_ctx=#user_ctx{} = UserCtx} = Req, Name) ->
+maybe_set_handler(#httpd{user_ctx = #user_ctx{} = UserCtx} = Req, Name) ->
Req#httpd{user_ctx = UserCtx#user_ctx{handler = Name}};
maybe_set_handler(Else, _) ->
Else.
@@ -603,16 +714,16 @@ increment_method_stats(Method) ->
partition(Path) ->
mochiweb_util:partition(Path, "/").
-header_value(#httpd{mochi_req=MochiReq}, Key) ->
+header_value(#httpd{mochi_req = MochiReq}, Key) ->
MochiReq:get_header_value(Key).
-header_value(#httpd{mochi_req=MochiReq}, Key, Default) ->
+header_value(#httpd{mochi_req = MochiReq}, Key, Default) ->
case MochiReq:get_header_value(Key) of
- undefined -> Default;
- Value -> Value
+ undefined -> Default;
+ Value -> Value
end.
-primary_header_value(#httpd{mochi_req=MochiReq}, Key) ->
+primary_header_value(#httpd{mochi_req = MochiReq}, Key) ->
MochiReq:get_primary_header_value(Key).
serve_file(Req, RelativePath, DocumentRoot) ->
@@ -640,44 +751,52 @@ qs(#httpd{mochi_req = MochiReq, qs = undefined}) ->
qs(#httpd{qs = QS}) ->
QS.
-path(#httpd{mochi_req=MochiReq}) ->
+path(#httpd{mochi_req = MochiReq}) ->
MochiReq:get(path).
-absolute_uri(#httpd{mochi_req=MochiReq, absolute_uri = undefined}, Path) ->
+absolute_uri(#httpd{mochi_req = MochiReq, absolute_uri = undefined}, Path) ->
XHost = chttpd_util:get_chttpd_config(
- "x_forwarded_host", "X-Forwarded-Host"),
- Host = case MochiReq:get_header_value(XHost) of
- undefined ->
- case MochiReq:get_header_value("Host") of
- undefined ->
- {ok, {Address, Port}} = case MochiReq:get(socket) of
- {ssl, SslSocket} -> ssl:sockname(SslSocket);
- Socket -> inet:sockname(Socket)
- end,
- inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
- Value1 ->
- Value1
- end;
- Value -> Value
- end,
+ "x_forwarded_host", "X-Forwarded-Host"
+ ),
+ Host =
+ case MochiReq:get_header_value(XHost) of
+ undefined ->
+ case MochiReq:get_header_value("Host") of
+ undefined ->
+ {ok, {Address, Port}} =
+ case MochiReq:get(socket) of
+ {ssl, SslSocket} -> ssl:sockname(SslSocket);
+ Socket -> inet:sockname(Socket)
+ end,
+ inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
+ Value1 ->
+ Value1
+ end;
+ Value ->
+ Value
+ end,
XSsl = chttpd_util:get_chttpd_config("x_forwarded_ssl", "X-Forwarded-Ssl"),
- Scheme = case MochiReq:get_header_value(XSsl) of
- "on" -> "https";
- _ ->
- XProto = chttpd_util:get_chttpd_config(
- "x_forwarded_proto", "X-Forwarded-Proto"),
- case MochiReq:get_header_value(XProto) of
- % Restrict to "https" and "http" schemes only
- "https" -> "https";
- _ ->
- case MochiReq:get(scheme) of
- https ->
- "https";
- http ->
- "http"
- end
- end
- end,
+ Scheme =
+ case MochiReq:get_header_value(XSsl) of
+ "on" ->
+ "https";
+ _ ->
+ XProto = chttpd_util:get_chttpd_config(
+ "x_forwarded_proto", "X-Forwarded-Proto"
+ ),
+ case MochiReq:get_header_value(XProto) of
+ % Restrict to "https" and "http" schemes only
+ "https" ->
+ "https";
+ _ ->
+ case MochiReq:get(scheme) of
+ https ->
+ "https";
+ http ->
+ "http"
+ end
+ end
+ end,
Scheme ++ "://" ++ Host ++ Path;
absolute_uri(#httpd{absolute_uri = URI}, Path) ->
URI ++ Path.
@@ -688,27 +807,28 @@ unquote(UrlEncodedString) ->
quote(UrlDecodedString) ->
mochiweb_util:quote_plus(UrlDecodedString).
-parse_form(#httpd{mochi_req=MochiReq}) ->
+parse_form(#httpd{mochi_req = MochiReq}) ->
mochiweb_multipart:parse_form(MochiReq).
-recv(#httpd{mochi_req=MochiReq}, Len) ->
+recv(#httpd{mochi_req = MochiReq}, Len) ->
MochiReq:recv(Len).
-recv_chunked(#httpd{mochi_req=MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
+recv_chunked(#httpd{mochi_req = MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
% Fun is called once with each chunk
% Fun({Length, Binary}, State)
% called with Length == 0 on the last time.
MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState).
-body_length(#httpd{mochi_req=MochiReq}) ->
+body_length(#httpd{mochi_req = MochiReq}) ->
MochiReq:get(body_length).
-body(#httpd{mochi_req=MochiReq, req_body=ReqBody}) ->
+body(#httpd{mochi_req = MochiReq, req_body = ReqBody}) ->
case ReqBody of
undefined ->
% Maximum size of document PUT request body (4GB)
MaxSize = chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", 4294967296),
+ "max_http_request_size", 4294967296
+ ),
Begin = os:timestamp(),
try
MochiReq:recv_body(MaxSize)
@@ -723,38 +843,35 @@ body(#httpd{mochi_req=MochiReq, req_body=ReqBody}) ->
validate_ctype(Req, Ctype) ->
couch_httpd:validate_ctype(Req, Ctype).
-json_body(#httpd{req_body=undefined} = Httpd) ->
+json_body(#httpd{req_body = undefined} = Httpd) ->
case body(Httpd) of
undefined ->
throw({bad_request, "Missing request body"});
Body ->
?JSON_DECODE(maybe_decompress(Httpd, Body))
end;
-
-json_body(#httpd{req_body=ReqBody}) ->
+json_body(#httpd{req_body = ReqBody}) ->
ReqBody.
json_body_obj(Httpd) ->
case json_body(Httpd) of
{Props} -> {Props};
- _Else ->
- throw({bad_request, "Request body must be a JSON object"})
+ _Else -> throw({bad_request, "Request body must be a JSON object"})
end.
-
-doc_etag(#doc{id=Id, body=Body, revs={Start, [DiskRev|_]}}) ->
+doc_etag(#doc{id = Id, body = Body, revs = {Start, [DiskRev | _]}}) ->
couch_httpd:doc_etag(Id, Body, {Start, DiskRev}).
make_etag(Term) ->
<<SigInt:128/integer>> = couch_hash:md5_hash(term_to_binary(Term)),
- list_to_binary(io_lib:format("\"~.36B\"",[SigInt])).
+ list_to_binary(io_lib:format("\"~.36B\"", [SigInt])).
etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
etag_match(Req, binary_to_list(CurrentEtag));
-
etag_match(Req, CurrentEtag) ->
EtagsToMatch0 = string:tokens(
- chttpd:header_value(Req, "If-None-Match", ""), ", "),
+ chttpd:header_value(Req, "If-None-Match", ""), ", "
+ ),
EtagsToMatch = lists:map(fun strip_weak_prefix/1, EtagsToMatch0),
lists:member(CurrentEtag, EtagsToMatch).
@@ -765,27 +882,27 @@ strip_weak_prefix(Etag) ->
etag_respond(Req, CurrentEtag, RespFun) ->
case etag_match(Req, CurrentEtag) of
- true ->
- % the client has this in their cache.
- Headers = [{"ETag", CurrentEtag}],
- chttpd:send_response(Req, 304, Headers, <<>>);
- false ->
- % Run the function.
- RespFun()
+ true ->
+ % the client has this in their cache.
+ Headers = [{"ETag", CurrentEtag}],
+ chttpd:send_response(Req, 304, Headers, <<>>);
+ false ->
+ % Run the function.
+ RespFun()
end.
-verify_is_server_admin(#httpd{user_ctx=#user_ctx{roles=Roles}}) ->
+verify_is_server_admin(#httpd{user_ctx = #user_ctx{roles = Roles}}) ->
case lists:member(<<"_admin">>, Roles) of
- true -> ok;
- false -> throw({unauthorized, <<"You are not a server admin.">>})
+ true -> ok;
+ false -> throw({unauthorized, <<"You are not a server admin.">>})
end.
-start_response_length(#httpd{mochi_req=MochiReq}=Req, Code, Headers0, Length) ->
+start_response_length(#httpd{mochi_req = MochiReq} = Req, Code, Headers0, Length) ->
Headers1 = basic_headers(Req, Headers0),
Resp = handle_response(Req, Code, Headers1, Length, start_response_length),
case MochiReq:get(method) of
- 'HEAD' -> throw({http_head_abort, Resp});
- _ -> ok
+ 'HEAD' -> throw({http_head_abort, Resp});
+ _ -> ok
end,
{ok, Resp}.
@@ -793,12 +910,12 @@ send(Resp, Data) ->
Resp:send(Data),
{ok, Resp}.
-start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers0) ->
+start_chunked_response(#httpd{mochi_req = MochiReq} = Req, Code, Headers0) ->
Headers1 = basic_headers(Req, Headers0),
Resp = handle_response(Req, Code, Headers1, chunked, respond),
case MochiReq:get(method) of
- 'HEAD' -> throw({http_head_abort, Resp});
- _ -> ok
+ 'HEAD' -> throw({http_head_abort, Resp});
+ _ -> ok
end,
{ok, Resp}.
@@ -806,7 +923,8 @@ send_chunk({remote, _Pid, _Ref} = Resp, Data) ->
couch_httpd:send_chunk(Resp, Data);
send_chunk(Resp, Data) ->
case iolist_size(Data) of
- 0 -> ok; % do nothing
+ % do nothing
+ 0 -> ok;
_ -> Resp:write_chunk(Data)
end,
{ok, Resp}.
@@ -824,8 +942,14 @@ send_response_no_cors(Req, Code, Headers0, Body) ->
couch_httpd:send_response_no_cors(Req, Code, Headers1, Body).
send_method_not_allowed(Req, Methods) ->
- send_error(Req, 405, [{"Allow", Methods}], <<"method_not_allowed">>,
- ?l2b("Only " ++ Methods ++ " allowed"), []).
+ send_error(
+ Req,
+ 405,
+ [{"Allow", Methods}],
+ <<"method_not_allowed">>,
+ ?l2b("Only " ++ Methods ++ " allowed"),
+ []
+ ).
send_json(Req, Value) ->
send_json(Req, 200, Value).
@@ -847,15 +971,12 @@ start_json_response(Req, Code, Headers0) ->
end_json_response(Resp) ->
couch_httpd:end_json_response(Resp).
-
start_delayed_json_response(Req, Code) ->
start_delayed_json_response(Req, Code, []).
-
start_delayed_json_response(Req, Code, Headers) ->
start_delayed_json_response(Req, Code, Headers, "").
-
start_delayed_json_response(Req, Code, Headers, FirstChunk) ->
{ok, #delayed_resp{
start_fun = fun start_json_response/3,
@@ -863,13 +984,12 @@ start_delayed_json_response(Req, Code, Headers, FirstChunk) ->
code = Code,
headers = Headers,
chunks = [FirstChunk],
- buffer_response = buffer_response(Req)}}.
-
+ buffer_response = buffer_response(Req)
+ }}.
start_delayed_chunked_response(Req, Code, Headers) ->
start_delayed_chunked_response(Req, Code, Headers, "").
-
start_delayed_chunked_response(Req, Code, Headers, FirstChunk) ->
{ok, #delayed_resp{
start_fun = fun start_chunked_response/3,
@@ -877,34 +997,30 @@ start_delayed_chunked_response(Req, Code, Headers, FirstChunk) ->
code = Code,
headers = Headers,
chunks = [FirstChunk],
- buffer_response = buffer_response(Req)}}.
+ buffer_response = buffer_response(Req)
+ }}.
-
-send_delayed_chunk(#delayed_resp{buffer_response=false}=DelayedResp, Chunk) ->
- {ok, #delayed_resp{resp=Resp}=DelayedResp1} =
+send_delayed_chunk(#delayed_resp{buffer_response = false} = DelayedResp, Chunk) ->
+ {ok, #delayed_resp{resp = Resp} = DelayedResp1} =
start_delayed_response(DelayedResp),
{ok, Resp} = send_chunk(Resp, Chunk),
{ok, DelayedResp1};
-
-send_delayed_chunk(#delayed_resp{buffer_response=true}=DelayedResp, Chunk) ->
+send_delayed_chunk(#delayed_resp{buffer_response = true} = DelayedResp, Chunk) ->
#delayed_resp{chunks = Chunks} = DelayedResp,
{ok, DelayedResp#delayed_resp{chunks = [Chunk | Chunks]}}.
-
send_delayed_last_chunk(Req) ->
send_delayed_chunk(Req, []).
-
-send_delayed_error(#delayed_resp{req=Req,resp=nil}=DelayedResp, Reason) ->
+send_delayed_error(#delayed_resp{req = Req, resp = nil} = DelayedResp, Reason) ->
{Code, ErrorStr, ReasonStr} = error_info(Reason),
{ok, Resp} = send_error(Req, Code, ErrorStr, ReasonStr),
- {ok, DelayedResp#delayed_resp{resp=Resp}};
-send_delayed_error(#delayed_resp{resp=Resp, req=Req}, Reason) ->
+ {ok, DelayedResp#delayed_resp{resp = Resp}};
+send_delayed_error(#delayed_resp{resp = Resp, req = Req}, Reason) ->
update_timeout_stats(Reason, Req),
log_error_with_stack_trace(Reason),
throw({http_abort, Resp, Reason}).
-
close_delayed_json_object(Resp, Buffer, Terminator, 0) ->
% Use a separate chunk to close the streamed array to maintain strict
% compatibility with earlier versions. See COUCHDB-2724
@@ -913,13 +1029,11 @@ close_delayed_json_object(Resp, Buffer, Terminator, 0) ->
close_delayed_json_object(Resp, Buffer, Terminator, _Threshold) ->
send_delayed_chunk(Resp, [Buffer | Terminator]).
-
-end_delayed_json_response(#delayed_resp{buffer_response=false}=DelayedResp) ->
- {ok, #delayed_resp{resp=Resp}} =
+end_delayed_json_response(#delayed_resp{buffer_response = false} = DelayedResp) ->
+ {ok, #delayed_resp{resp = Resp}} =
start_delayed_response(DelayedResp),
end_json_response(Resp);
-
-end_delayed_json_response(#delayed_resp{buffer_response=true}=DelayedResp) ->
+end_delayed_json_response(#delayed_resp{buffer_response = true} = DelayedResp) ->
#delayed_resp{
start_fun = StartFun,
req = Req,
@@ -928,36 +1042,37 @@ end_delayed_json_response(#delayed_resp{buffer_response=true}=DelayedResp) ->
chunks = Chunks
} = DelayedResp,
{ok, Resp} = StartFun(Req, Code, Headers),
- lists:foreach(fun
- ([]) -> ok;
- (Chunk) -> send_chunk(Resp, Chunk)
- end, lists:reverse(Chunks)),
+ lists:foreach(
+ fun
+ ([]) -> ok;
+ (Chunk) -> send_chunk(Resp, Chunk)
+ end,
+ lists:reverse(Chunks)
+ ),
end_json_response(Resp).
-
-get_delayed_req(#delayed_resp{req=#httpd{mochi_req=MochiReq}}) ->
+get_delayed_req(#delayed_resp{req = #httpd{mochi_req = MochiReq}}) ->
MochiReq;
get_delayed_req(Resp) ->
Resp:get(request).
-start_delayed_response(#delayed_resp{resp=nil}=DelayedResp) ->
+start_delayed_response(#delayed_resp{resp = nil} = DelayedResp) ->
#delayed_resp{
- start_fun=StartFun,
- req=Req,
- code=Code,
- headers=Headers,
- chunks=[FirstChunk]
- }=DelayedResp,
+ start_fun = StartFun,
+ req = Req,
+ code = Code,
+ headers = Headers,
+ chunks = [FirstChunk]
+ } = DelayedResp,
{ok, Resp} = StartFun(Req, Code, Headers),
case FirstChunk of
"" -> ok;
_ -> {ok, Resp} = send_chunk(Resp, FirstChunk)
end,
- {ok, DelayedResp#delayed_resp{resp=Resp}};
-start_delayed_response(#delayed_resp{}=DelayedResp) ->
+ {ok, DelayedResp#delayed_resp{resp = Resp}};
+start_delayed_response(#delayed_resp{} = DelayedResp) ->
{ok, DelayedResp}.
-
buffer_response(Req) ->
case chttpd:qs_value(Req, "buffer_response") of
"false" ->
@@ -968,9 +1083,8 @@ buffer_response(Req) ->
config:get_boolean("chttpd", "buffer_response", false)
end.
-
error_info({erlfdb_error, ErrorCode}) ->
- ErrorDesc = erlfdb:get_error_string(ErrorCode),
+ ErrorDesc = erlfdb:get_error_string(ErrorCode),
Reason = ?l2b(io_lib:format("code: ~B, desc: ~s", [ErrorCode, ErrorDesc])),
{500, erlfdb_error, Reason};
error_info({Error, Reason}) when is_list(Reason) ->
@@ -1001,8 +1115,10 @@ error_info({conflict, _}) ->
{409, <<"conflict">>, <<"Document update conflict.">>};
error_info({partition_overflow, DocId}) ->
Descr = <<
- "Partition limit exceeded due to update on '", DocId/binary, "'"
- >>,
+ "Partition limit exceeded due to update on '",
+ DocId/binary,
+ "'"
+ >>,
{403, <<"partition_overflow">>, Descr};
error_info({{not_found, missing}, {_, _}}) ->
{409, <<"not_found">>, <<"missing_rev">>};
@@ -1013,8 +1129,10 @@ error_info({forbidden, Msg}) ->
error_info({unauthorized, Msg}) ->
{401, <<"unauthorized">>, Msg};
error_info(file_exists) ->
- {412, <<"file_exists">>, <<"The database could not be "
- "created, the file already exists.">>};
+ {412, <<"file_exists">>, <<
+ "The database could not be "
+ "created, the file already exists."
+ >>};
error_info({error, {nodedown, Reason}}) ->
{412, <<"nodedown">>, Reason};
error_info({maintenance_mode, Node}) ->
@@ -1030,16 +1148,17 @@ error_info({bad_ctype, Reason}) ->
error_info(requested_range_not_satisfiable) ->
{416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>};
error_info({error, {illegal_database_name, Name}}) ->
- Message = <<"Name: '", Name/binary, "'. Only lowercase characters (a-z), ",
- "digits (0-9), and any of the characters _, $, (, ), +, -, and / ",
- "are allowed. Must begin with a letter.">>,
+ Message =
+ <<"Name: '", Name/binary, "'. Only lowercase characters (a-z), ",
+ "digits (0-9), and any of the characters _, $, (, ), +, -, and / ",
+ "are allowed. Must begin with a letter.">>,
{400, <<"illegal_database_name">>, Message};
error_info({illegal_docid, Reason}) ->
{400, <<"illegal_docid">>, Reason};
error_info({illegal_partition, Reason}) ->
{400, <<"illegal_partition">>, Reason};
-error_info({_DocID,{illegal_docid,DocID}}) ->
- {400, <<"illegal_docid">>,DocID};
+error_info({_DocID, {illegal_docid, DocID}}) ->
+ {400, <<"illegal_docid">>, DocID};
error_info({error, {database_name_too_long, DbName}}) ->
{400, <<"database_name_too_long">>,
<<"At least one path segment of `", DbName/binary, "` is too long.">>};
@@ -1060,18 +1179,24 @@ error_info({request_entity_too_large, {bulk_get, Max}}) when is_integer(Max) ->
error_info({request_entity_too_large, DocID}) ->
{413, <<"document_too_large">>, DocID};
error_info({error, security_migration_updates_disabled}) ->
- {503, <<"security_migration">>, <<"Updates to security docs are disabled during "
- "security migration.">>};
+ {503, <<"security_migration">>, <<
+ "Updates to security docs are disabled during "
+ "security migration."
+ >>};
error_info(all_workers_died) ->
- {503, <<"service unvailable">>, <<"Nodes are unable to service this "
- "request due to overloading or maintenance mode.">>};
+ {503, <<"service unvailable">>, <<
+ "Nodes are unable to service this "
+ "request due to overloading or maintenance mode."
+ >>};
error_info(not_implemented) ->
{501, <<"not_implemented">>, <<"this feature is not yet implemented">>};
error_info({disabled, Reason}) ->
{501, <<"disabled">>, Reason};
error_info(timeout) ->
- {500, <<"timeout">>, <<"The request could not be processed in a reasonable"
- " amount of time.">>};
+ {500, <<"timeout">>, <<
+ "The request could not be processed in a reasonable"
+ " amount of time."
+ >>};
error_info(decryption_failed) ->
{500, <<"decryption_failed">>, <<"Decryption failed">>};
error_info(not_ciphertext) ->
@@ -1079,8 +1204,12 @@ error_info(not_ciphertext) ->
error_info({service_unavailable, Reason}) ->
{503, <<"service unavailable">>, Reason};
error_info({unknown_eval_api_language, Language}) ->
- {400, <<"unknown_eval_api_language">>, <<"unsupported language in design"
- " doc: `", Language/binary, "`">>};
+ {400, <<"unknown_eval_api_language">>, <<
+ "unsupported language in design"
+ " doc: `",
+ Language/binary,
+ "`"
+ >>};
error_info({timeout, _Reason}) ->
error_info(timeout);
error_info({Error, null}) ->
@@ -1106,61 +1235,83 @@ maybe_handle_error(Error) ->
{500, <<"unknown_error">>, couch_util:to_binary(Error)}
end.
-
-error_headers(#httpd{mochi_req=MochiReq}=Req, 401=Code, ErrorStr, ReasonStr) ->
+error_headers(#httpd{mochi_req = MochiReq} = Req, 401 = Code, ErrorStr, ReasonStr) ->
% this is where the basic auth popup is triggered
case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
- undefined ->
- case chttpd_util:get_chttpd_config("WWW-Authenticate") of
undefined ->
- % If the client is a browser and the basic auth popup isn't turned on
- % redirect to the session page.
- case ErrorStr of
- <<"unauthorized">> ->
- case chttpd_util:get_chttpd_auth_config(
- "authentication_redirect", "/_utils/session.html") of
- undefined -> {Code, []};
- AuthRedirect ->
- case chttpd_util:get_chttpd_auth_config_boolean(
- "require_valid_user", false) of
- true ->
- % send the browser popup header no matter what if we are require_valid_user
- {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]};
- false ->
- case MochiReq:accepts_content_type("application/json") of
- true ->
- {Code, []};
- false ->
- case MochiReq:accepts_content_type("text/html") of
- true ->
- % Redirect to the path the user requested, not
- % the one that is used internally.
- UrlReturnRaw = case MochiReq:get_header_value("x-couchdb-vhost-path") of
+ case chttpd_util:get_chttpd_config("WWW-Authenticate") of
+ undefined ->
+ % If the client is a browser and the basic auth popup isn't turned on
+ % redirect to the session page.
+ case ErrorStr of
+ <<"unauthorized">> ->
+ case
+ chttpd_util:get_chttpd_auth_config(
+ "authentication_redirect", "/_utils/session.html"
+ )
+ of
undefined ->
- MochiReq:get(path);
- VHostPath ->
- VHostPath
- end,
- RedirectLocation = lists:flatten([
- AuthRedirect,
- "?return=", couch_util:url_encode(UrlReturnRaw),
- "&reason=", couch_util:url_encode(ReasonStr)
- ]),
- {302, [{"Location", absolute_uri(Req, RedirectLocation)}]};
- false ->
- {Code, []}
- end
- end
- end
- end;
- _Else ->
- {Code, []}
+ {Code, []};
+ AuthRedirect ->
+ case
+ chttpd_util:get_chttpd_auth_config_boolean(
+ "require_valid_user", false
+ )
+ of
+ true ->
+ % send the browser popup header no matter what if we are require_valid_user
+ {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]};
+ false ->
+ case
+ MochiReq:accepts_content_type("application/json")
+ of
+ true ->
+ {Code, []};
+ false ->
+ case
+ MochiReq:accepts_content_type("text/html")
+ of
+ true ->
+ % Redirect to the path the user requested, not
+ % the one that is used internally.
+ UrlReturnRaw =
+ case
+ MochiReq:get_header_value(
+ "x-couchdb-vhost-path"
+ )
+ of
+ undefined ->
+ MochiReq:get(path);
+ VHostPath ->
+ VHostPath
+ end,
+ RedirectLocation = lists:flatten([
+ AuthRedirect,
+ "?return=",
+ couch_util:url_encode(UrlReturnRaw),
+ "&reason=",
+ couch_util:url_encode(ReasonStr)
+ ]),
+ {302, [
+ {"Location",
+ absolute_uri(
+ Req, RedirectLocation
+ )}
+ ]};
+ false ->
+ {Code, []}
+ end
+ end
+ end
+ end;
+ _Else ->
+ {Code, []}
+ end;
+ Type ->
+ {Code, [{"WWW-Authenticate", Type}]}
end;
Type ->
{Code, [{"WWW-Authenticate", Type}]}
- end;
- Type ->
- {Code, [{"WWW-Authenticate", Type}]}
end;
error_headers(_, Code, _, _) ->
{Code, []}.
@@ -1177,18 +1328,32 @@ send_error(#httpd{} = Req, Code, ErrorStr, ReasonStr) ->
send_error(Req, Code, [], ErrorStr, ReasonStr, []).
send_error(Req, Code, Headers, ErrorStr, ReasonStr, []) ->
- Return = send_json(Req, Code, Headers,
- {[{<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr}]}),
+ Return = send_json(
+ Req,
+ Code,
+ Headers,
+ {[
+ {<<"error">>, ErrorStr},
+ {<<"reason">>, ReasonStr}
+ ]}
+ ),
span_error(Code, ErrorStr, ReasonStr, []),
Return;
send_error(Req, Code, Headers, ErrorStr, ReasonStr, Stack) ->
log_error_with_stack_trace({ErrorStr, ReasonStr, Stack}),
- Return = send_json(Req, Code, [stack_trace_id(Stack) | Headers],
- {[{<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr} |
- case Stack of [] -> []; _ -> [{<<"ref">>, stack_hash(Stack)}] end
- ]}),
+ Return = send_json(
+ Req,
+ Code,
+ [stack_trace_id(Stack) | Headers],
+ {[
+ {<<"error">>, ErrorStr},
+ {<<"reason">>, ReasonStr}
+ | case Stack of
+ [] -> [];
+ _ -> [{<<"ref">>, stack_hash(Stack)}]
+ end
+ ]}
+ ),
span_error(Code, ErrorStr, ReasonStr, Stack),
Return.
@@ -1199,20 +1364,27 @@ update_timeout_stats(timeout, #httpd{requested_path_parts = PathParts}) ->
update_timeout_stats(_, _) ->
ok.
-update_timeout_stats([_, <<"_partition">>, _, <<"_design">>, _,
- <<"_view">> | _]) ->
+update_timeout_stats([
+ _,
+ <<"_partition">>,
+ _,
+ <<"_design">>,
+ _,
+ <<"_view">>
+ | _
+]) ->
couch_stats:increment_counter([couchdb, httpd, partition_view_timeouts]);
-update_timeout_stats([_, <<"_partition">>, _, <<"_find">>| _]) ->
+update_timeout_stats([_, <<"_partition">>, _, <<"_find">> | _]) ->
couch_stats:increment_counter([couchdb, httpd, partition_find_timeouts]);
-update_timeout_stats([_, <<"_partition">>, _, <<"_explain">>| _]) ->
+update_timeout_stats([_, <<"_partition">>, _, <<"_explain">> | _]) ->
couch_stats:increment_counter([couchdb, httpd, partition_explain_timeouts]);
update_timeout_stats([_, <<"_partition">>, _, <<"_all_docs">> | _]) ->
couch_stats:increment_counter([couchdb, httpd, partition_all_docs_timeouts]);
update_timeout_stats([_, <<"_design">>, _, <<"_view">> | _]) ->
couch_stats:increment_counter([couchdb, httpd, view_timeouts]);
-update_timeout_stats([_, <<"_find">>| _]) ->
+update_timeout_stats([_, <<"_find">> | _]) ->
couch_stats:increment_counter([couchdb, httpd, find_timeouts]);
-update_timeout_stats([_, <<"_explain">>| _]) ->
+update_timeout_stats([_, <<"_explain">> | _]) ->
couch_stats:increment_counter([couchdb, httpd, explain_timeouts]);
update_timeout_stats([_, <<"_all_docs">> | _]) ->
couch_stats:increment_counter([couchdb, httpd, all_docs_timeouts]);
@@ -1223,17 +1395,21 @@ update_timeout_stats(_) ->
send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) ->
send_chunk(Resp, Reason),
send_chunk(Resp, []);
-
send_chunked_error(Resp, Error) ->
Stack = json_stack(Error),
log_error_with_stack_trace(Error),
{Code, ErrorStr, ReasonStr} = error_info(Error),
- JsonError = {[{<<"code">>, Code},
- {<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr} |
- case Stack of [] -> []; _ -> [{<<"ref">>, stack_hash(Stack)}] end
- ]},
- send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])),
+ JsonError =
+ {[
+ {<<"code">>, Code},
+ {<<"error">>, ErrorStr},
+ {<<"reason">>, ReasonStr}
+ | case Stack of
+ [] -> [];
+ _ -> [{<<"ref">>, stack_hash(Stack)}]
+ end
+ ]},
+ send_chunk(Resp, ?l2b([$\n, ?JSON_ENCODE(JsonError), $\n])),
send_chunk(Resp, []).
send_redirect(Req, Path) ->
@@ -1261,32 +1437,43 @@ json_stack({_Error, _Reason, Stack}) when is_list(Stack) ->
json_stack(_) ->
[].
-json_stack_item({M,F,A}) ->
+json_stack_item({M, F, A}) ->
list_to_binary(io_lib:format("~s:~s/~B", [M, F, json_stack_arity(A)]));
-json_stack_item({M,F,A,L}) ->
+json_stack_item({M, F, A, L}) ->
case proplists:get_value(line, L) of
- undefined -> json_stack_item({M,F,A});
- Line -> list_to_binary(io_lib:format("~s:~s/~B L~B",
- [M, F, json_stack_arity(A), Line]))
+ undefined ->
+ json_stack_item({M, F, A});
+ Line ->
+ list_to_binary(
+ io_lib:format(
+ "~s:~s/~B L~B",
+ [M, F, json_stack_arity(A), Line]
+ )
+ )
end;
json_stack_item(_) ->
<<"bad entry in stacktrace">>.
json_stack_arity(A) ->
- if is_integer(A) -> A; is_list(A) -> length(A); true -> 0 end.
+ if
+ is_integer(A) -> A;
+ is_list(A) -> length(A);
+ true -> 0
+ end.
maybe_decompress(Httpd, Body) ->
case header_value(Httpd, "Content-Encoding", "identity") of
- "gzip" ->
- try
- zlib:gunzip(Body)
- catch error:data_error ->
- throw({bad_request, "Request body is not properly gzipped."})
- end;
- "identity" ->
- Body;
- Else ->
- throw({bad_ctype, [Else, " is not a supported content encoding."]})
+ "gzip" ->
+ try
+ zlib:gunzip(Body)
+ catch
+ error:data_error ->
+ throw({bad_request, "Request body is not properly gzipped."})
+ end;
+ "identity" ->
+ Body;
+ Else ->
+ throw({bad_ctype, [Else, " is not a supported content encoding."]})
end.
log_error_with_stack_trace({bad_request, _, _}) ->
@@ -1299,8 +1486,16 @@ log_error_with_stack_trace({Error, Reason, Stack}) ->
hash => stack_hash(Stack),
stacktrace => Stack
}),
- EFmt = if is_binary(Error) -> "~s"; true -> "~w" end,
- RFmt = if is_binary(Reason) -> "~s"; true -> "~w" end,
+ EFmt =
+ if
+ is_binary(Error) -> "~s";
+ true -> "~w"
+ end,
+ RFmt =
+ if
+ is_binary(Reason) -> "~s";
+ true -> "~w"
+ end,
Fmt = "req_err(~w) " ++ EFmt ++ " : " ++ RFmt ++ "~n ~p",
couch_log:error(Fmt, [stack_hash(Stack), Error, Reason, Stack]);
log_error_with_stack_trace(_) ->
@@ -1325,9 +1520,10 @@ chunked_response_buffer_size() ->
chttpd_util:get_chttpd_config_integer("chunked_response_buffer", 1490).
basic_headers(Req, Headers0) ->
- Headers = Headers0
- ++ server_header()
- ++ couch_httpd_auth:cookie_auth_header(Req, Headers0),
+ Headers =
+ Headers0 ++
+ server_header() ++
+ couch_httpd_auth:cookie_auth_header(Req, Headers0),
Headers1 = chttpd_cors:headers(Req, Headers),
Headers2 = chttpd_xframe_options:header(Req, Headers1),
Headers3 = [reqid(), timing() | Headers2],
@@ -1377,26 +1573,33 @@ start_span(Req) ->
path_parts = PathParts
} = Req,
{OperationName, ExtraTags} = get_action(Req),
- Path = case PathParts of
- [] -> <<"">>;
- [_ | _] -> filename:join(PathParts)
- end,
+ Path =
+ case PathParts of
+ [] -> <<"">>;
+ [_ | _] -> filename:join(PathParts)
+ end,
{IsExternalSpan, RootOptions} = root_span_options(MochiReq),
- Tags = maps:merge(#{
- peer => Peer,
- 'http.method' => Method,
- nonce => Nonce,
- 'http.url' => MochiReq:get(raw_path),
- path_parts => Path,
- 'span.kind' => <<"server">>,
- component => <<"couchdb.chttpd">>,
- external => IsExternalSpan
- }, ExtraTags),
-
- ctrace:start_span(OperationName, [
- {tags, Tags},
- {time, Begin}
- ] ++ RootOptions).
+ Tags = maps:merge(
+ #{
+ peer => Peer,
+ 'http.method' => Method,
+ nonce => Nonce,
+ 'http.url' => MochiReq:get(raw_path),
+ path_parts => Path,
+ 'span.kind' => <<"server">>,
+ component => <<"couchdb.chttpd">>,
+ external => IsExternalSpan
+ },
+ ExtraTags
+ ),
+
+ ctrace:start_span(
+ OperationName,
+ [
+ {tags, Tags},
+ {time, Begin}
+ ] ++ RootOptions
+ ).
root_span_options(MochiReq) ->
case get_trace_headers(MochiReq) of
@@ -1420,8 +1623,9 @@ parse_span_id(Hex) ->
to_int(Hex, N) when length(Hex) =:= N ->
try
list_to_integer(Hex, 16)
- catch error:badarg ->
- undefined
+ catch
+ error:badarg ->
+ undefined
end.
get_trace_headers(MochiReq) ->
@@ -1442,20 +1646,21 @@ get_trace_headers(MochiReq) ->
];
_ ->
[undefined, undefined, undefined]
- end
+ end
end.
get_action(#httpd{} = Req) ->
try
chttpd_handlers:handler_info(Req)
- catch Tag:Error ->
- ?LOG_ERROR(#{
- what => tracing_configuration_failure,
- tag => Tag,
- details => Error
- }),
- couch_log:error("Cannot set tracing action ~p:~p", [Tag, Error]),
- {undefined, #{}}
+ catch
+ Tag:Error ->
+ ?LOG_ERROR(#{
+ what => tracing_configuration_failure,
+ tag => Tag,
+ details => Error
+ }),
+ couch_log:error("Cannot set tracing action ~p:~p", [Tag, Error]),
+ {undefined, #{}}
end.
span_ok(#httpd_resp{code = Code}) ->
@@ -1501,52 +1706,83 @@ check_url_encoding_pass_test_() ->
check_url_encoding_fail_test_() ->
[
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname/doc_id%")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname/doc_id%?rev=1-abcdefgh")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%2")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname/doc_id%2")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/user%2Fdbname%")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/user%2Fdbname/doc_id%")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("%")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/%")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/%2")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%2%3A")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%%3Ae")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%2g")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%g2"))
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname%")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname/doc_id%")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname/doc_id%?rev=1-abcdefgh")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname%2")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname/doc_id%2")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/user%2Fdbname%")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/user%2Fdbname/doc_id%")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("%")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/%")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/%2")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname%2%3A")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname%%3Ae")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname%2g")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname%g2")
+ )
].
log_format_test() ->
?assertEqual(
"127.0.0.1:15984 127.0.0.1 undefined "
"GET /_cluster_setup 201 ok 10000",
- test_log_request("/_cluster_setup", undefined)),
+ test_log_request("/_cluster_setup", undefined)
+ ),
?assertEqual(
"127.0.0.1:15984 127.0.0.1 user_foo "
"GET /_all_dbs 201 ok 10000",
- test_log_request("/_all_dbs", #user_ctx{name = <<"user_foo">>})),
+ test_log_request("/_all_dbs", #user_ctx{name = <<"user_foo">>})
+ ),
%% Utf8Name = unicode:characters_to_binary(Something),
- Utf8User = <<227,130,136,227,129,134,227,129,147,227,129,157>>,
+ Utf8User = <<227, 130, 136, 227, 129, 134, 227, 129, 147, 227, 129, 157>>,
?assertEqual(
"127.0.0.1:15984 127.0.0.1 %E3%82%88%E3%81%86%E3%81%93%E3%81%9D "
"GET /_all_dbs 201 ok 10000",
- test_log_request("/_all_dbs", #user_ctx{name = Utf8User})),
+ test_log_request("/_all_dbs", #user_ctx{name = Utf8User})
+ ),
ok.
test_log_request(RawPath, UserCtx) ->
@@ -1554,14 +1790,14 @@ test_log_request(RawPath, UserCtx) ->
MochiReq = mochiweb_request:new(socket, [], 'POST', RawPath, version, Headers),
Req = #httpd{
mochi_req = MochiReq,
- begin_ts = {1458,588713,124003},
+ begin_ts = {1458, 588713, 124003},
original_method = 'GET',
peer = "127.0.0.1",
nonce = "nonce",
user_ctx = UserCtx
},
Resp = #httpd_resp{
- end_ts = {1458,588723,124303},
+ end_ts = {1458, 588723, 124303},
code = 201,
status = ok
},
@@ -1575,14 +1811,20 @@ test_log_request(RawPath, UserCtx) ->
handle_req_after_auth_test() ->
Headers = mochiweb_headers:make([{"HOST", "127.0.0.1:15984"}]),
- MochiReq = mochiweb_request:new(socket, [], 'PUT', "/newdb", version,
- Headers),
+ MochiReq = mochiweb_request:new(
+ socket,
+ [],
+ 'PUT',
+ "/newdb",
+ version,
+ Headers
+ ),
UserCtx = #user_ctx{name = <<"retain_user">>},
Roles = [<<"_reader">>],
AuthorizedCtx = #user_ctx{name = <<"retain_user">>, roles = Roles},
Req = #httpd{
mochi_req = MochiReq,
- begin_ts = {1458,588713,124003},
+ begin_ts = {1458, 588713, 124003},
original_method = 'PUT',
peer = "127.0.0.1",
nonce = "nonce",
@@ -1592,18 +1834,22 @@ handle_req_after_auth_test() ->
ok = meck:new(chttpd_handlers, [passthrough]),
ok = meck:new(chttpd_auth, [passthrough]),
ok = meck:expect(chttpd_handlers, url_handler, fun(_Key, _Fun) ->
- fun(_Req) -> handled_authorized_req end
+ fun(_Req) -> handled_authorized_req end
end),
ok = meck:expect(chttpd_auth, authorize, fun(_Req, _Fun) ->
AuthorizedReq
end),
- ?assertEqual({AuthorizedReq, handled_authorized_req},
- handle_req_after_auth(foo_key, Req)),
+ ?assertEqual(
+ {AuthorizedReq, handled_authorized_req},
+ handle_req_after_auth(foo_key, Req)
+ ),
ok = meck:expect(chttpd_auth, authorize, fun(_Req, _Fun) ->
meck:exception(throw, {http_abort, resp, some_reason})
end),
- ?assertEqual({Req, {aborted, resp, some_reason}},
- handle_req_after_auth(foo_key, Req)),
+ ?assertEqual(
+ {Req, {aborted, resp, some_reason}},
+ handle_req_after_auth(foo_key, Req)
+ ),
ok = meck:unload(chttpd_handlers),
ok = meck:unload(chttpd_auth).
diff --git a/src/chttpd/src/chttpd_auth.erl b/src/chttpd/src/chttpd_auth.erl
index ffae78171..20b5a05f1 100644
--- a/src/chttpd/src/chttpd_auth.erl
+++ b/src/chttpd/src/chttpd_auth.erl
@@ -27,7 +27,6 @@
-define(SERVICE_ID, chttpd_auth).
-
%% ------------------------------------------------------------------
%% API Function Definitions
%% ------------------------------------------------------------------
@@ -38,7 +37,6 @@ authenticate(HttpReq, Default) ->
authorize(HttpReq, Default) ->
maybe_handle(authorize, [HttpReq], Default).
-
%% ------------------------------------------------------------------
%% Default callbacks
%% ------------------------------------------------------------------
@@ -55,17 +53,20 @@ proxy_authentication_handler(Req) ->
jwt_authentication_handler(Req) ->
couch_httpd_auth:jwt_authentication_handler(Req).
-party_mode_handler(#httpd{method='POST', path_parts=[<<"_session">>]} = Req) ->
+party_mode_handler(#httpd{method = 'POST', path_parts = [<<"_session">>]} = Req) ->
% See #1947 - users should always be able to attempt a login
- Req#httpd{user_ctx=#user_ctx{}};
-party_mode_handler(#httpd{path_parts=[<<"_up">>]} = Req) ->
+ Req#httpd{user_ctx = #user_ctx{}};
+party_mode_handler(#httpd{path_parts = [<<"_up">>]} = Req) ->
RequireValidUser = config:get_boolean("chttpd", "require_valid_user", false),
- RequireValidUserExceptUp = config:get_boolean("chttpd", "require_valid_user_except_for_up", false),
+ RequireValidUserExceptUp = config:get_boolean(
+ "chttpd", "require_valid_user_except_for_up", false
+ ),
require_valid_user(Req, RequireValidUser andalso not RequireValidUserExceptUp);
-
party_mode_handler(Req) ->
RequireValidUser = config:get_boolean("chttpd", "require_valid_user", false),
- RequireValidUserExceptUp = config:get_boolean("chttpd", "require_valid_user_except_for_up", false),
+ RequireValidUserExceptUp = config:get_boolean(
+ "chttpd", "require_valid_user_except_for_up", false
+ ),
require_valid_user(Req, RequireValidUser orelse RequireValidUserExceptUp).
require_valid_user(_Req, true) ->
@@ -75,13 +76,12 @@ require_valid_user(Req, false) ->
[] ->
Req#httpd{user_ctx = ?ADMIN_USER};
_ ->
- Req#httpd{user_ctx=#user_ctx{}}
+ Req#httpd{user_ctx = #user_ctx{}}
end.
handle_session_req(Req) ->
couch_httpd_auth:handle_session_req(Req, chttpd_auth_cache).
-
%% ------------------------------------------------------------------
%% Internal Function Definitions
%% ------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd_auth_cache.erl b/src/chttpd/src/chttpd_auth_cache.erl
index 88ffb7ade..e0e8aed28 100644
--- a/src/chttpd/src/chttpd_auth_cache.erl
+++ b/src/chttpd/src/chttpd_auth_cache.erl
@@ -15,8 +15,14 @@
-behaviour(config_listener).
-export([start_link/0, get_user_creds/2, update_user_creds/3]).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
-export([listen_for_changes/1, changes_callback/2]).
-export([handle_config_change/5, handle_config_terminate/3]).
@@ -29,7 +35,7 @@
-record(state, {
changes_pid,
- last_seq="0"
+ last_seq = "0"
}).
%% public functions
@@ -40,18 +46,21 @@ start_link() ->
get_user_creds(Req, UserName) when is_list(UserName) ->
get_user_creds(Req, ?l2b(UserName));
get_user_creds(_Req, UserName) when is_binary(UserName) ->
- Resp = case couch_auth_cache:get_admin(UserName) of
- nil ->
- get_from_cache(UserName);
- Props ->
- case get_from_cache(UserName) of
- nil ->
- Props;
- UserProps when is_list(UserProps) ->
- couch_auth_cache:add_roles(Props,
- couch_util:get_value(<<"roles">>, UserProps))
- end
- end,
+ Resp =
+ case couch_auth_cache:get_admin(UserName) of
+ nil ->
+ get_from_cache(UserName);
+ Props ->
+ case get_from_cache(UserName) of
+ nil ->
+ Props;
+ UserProps when is_list(UserProps) ->
+ couch_auth_cache:add_roles(
+ Props,
+ couch_util:get_value(<<"roles">>, UserProps)
+ )
+ end
+ end,
maybe_validate_user_creds(Resp).
update_user_creds(_Req, UserDoc, _Ctx) ->
@@ -128,35 +137,38 @@ handle_call(reinit_cache, _From, State) ->
self() ! {start_listener, 0},
{reply, ok, State#state{changes_pid = undefined}};
-
handle_call(_Call, _From, State) ->
{noreply, State}.
handle_cast(_Msg, State) ->
{noreply, State}.
-handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid=Pid} = State) ->
- Seq = case Reason of
- {seq, EndSeq} ->
- EndSeq;
- {database_does_not_exist, _} ->
- ?LOG_NOTICE(#{
- what => changes_listener_died,
- reason => database_does_not_exist,
- details => "create the _users database to silence this notice"
- }),
- couch_log:notice("~p changes listener died because the _users database does not exist. Create the database to silence this notice.", [?MODULE]),
- 0;
- _ ->
- ?LOG_NOTICE(#{
- what => changes_listener_died,
- reason => Reason
- }),
- couch_log:notice("~p changes listener died ~r", [?MODULE, Reason]),
- 0
- end,
+handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid = Pid} = State) ->
+ Seq =
+ case Reason of
+ {seq, EndSeq} ->
+ EndSeq;
+ {database_does_not_exist, _} ->
+ ?LOG_NOTICE(#{
+ what => changes_listener_died,
+ reason => database_does_not_exist,
+ details => "create the _users database to silence this notice"
+ }),
+ couch_log:notice(
+ "~p changes listener died because the _users database does not exist. Create the database to silence this notice.",
+ [?MODULE]
+ ),
+ 0;
+ _ ->
+ ?LOG_NOTICE(#{
+ what => changes_listener_died,
+ reason => Reason
+ }),
+ couch_log:notice("~p changes listener died ~r", [?MODULE, Reason]),
+ 0
+ end,
erlang:send_after(5000, self(), {start_listener, Seq}),
- {noreply, State#state{last_seq=Seq}};
+ {noreply, State#state{last_seq = Seq}};
handle_info({start_listener, Seq}, State) ->
{noreply, State#state{changes_pid = spawn_changes(Seq)}};
handle_info(restart_config_listener, State) ->
@@ -170,7 +182,7 @@ terminate(_Reason, #state{changes_pid = Pid}) when is_pid(Pid) ->
terminate(_Reason, _State) ->
ok.
-code_change(_OldVsn, #state{}=State, _Extra) ->
+code_change(_OldVsn, #state{} = State, _Extra) ->
{ok, State}.
%% private functions
@@ -214,7 +226,6 @@ changes_callback({timeout, _ResponseType}, Acc) ->
changes_callback({error, _}, EndSeq) ->
exit({seq, EndSeq}).
-
handle_config_change("chttpd_auth", "authentication_db", _DbName, _, _) ->
{ok, gen_server:call(?MODULE, reinit_cache, infinity)};
handle_config_change(_, _, _, _, _) ->
@@ -226,36 +237,35 @@ handle_config_terminate(_Server, _Reason, _State) ->
Dst = whereis(?MODULE),
erlang:send_after(?RELISTEN_DELAY, Dst, restart_config_listener).
-
load_user_from_db(UserName) ->
{ok, Db} = fabric2_db:open(dbname(), [?ADMIN_CTX]),
try fabric2_db:open_doc(Db, docid(UserName), [conflicts]) of
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- Props;
- _Else ->
- ?LOG_DEBUG(#{
- what => missing_user_document,
- user => UserName
- }),
- couch_log:debug("no record of user ~s", [UserName]),
- nil
- catch error:database_does_not_exist ->
- nil
+ {ok, Doc} ->
+ {Props} = couch_doc:to_json_obj(Doc, []),
+ Props;
+ _Else ->
+ ?LOG_DEBUG(#{
+ what => missing_user_document,
+ user => UserName
+ }),
+ couch_log:debug("no record of user ~s", [UserName]),
+ nil
+ catch
+ error:database_does_not_exist ->
+ nil
end.
-
ensure_auth_db() ->
try
fabric2_db:open(dbname(), [?ADMIN_CTX])
- catch error:database_does_not_exist ->
- case fabric2_db:create(dbname(), [?ADMIN_CTX]) of
- {ok, _} -> ok;
- {error, file_exists} -> ok
- end
+ catch
+ error:database_does_not_exist ->
+ case fabric2_db:create(dbname(), [?ADMIN_CTX]) of
+ {ok, _} -> ok;
+ {error, file_exists} -> ok
+ end
end.
-
dbname() ->
DbNameStr = config:get("chttpd_auth", "authentication_db", "_users"),
iolist_to_binary(DbNameStr).
@@ -268,30 +278,37 @@ username(<<"org.couchdb.user:", UserName/binary>>) ->
ensure_auth_ddoc_exists(Db, DDocId) ->
case fabric2_db:open_doc(Db, DDocId) of
- {not_found, _Reason} ->
- {ok, AuthDesign} = couch_auth_cache:auth_design_doc(DDocId),
- update_doc_ignoring_conflict(Db, AuthDesign);
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- case couch_util:get_value(<<"validate_doc_update">>, Props, []) of
- ?AUTH_DB_DOC_VALIDATE_FUNCTION ->
- ok;
- _ ->
- Props1 = lists:keyreplace(<<"validate_doc_update">>, 1, Props,
- {<<"validate_doc_update">>,
- ?AUTH_DB_DOC_VALIDATE_FUNCTION}),
- NewDoc = couch_doc:from_json_obj({Props1}),
- update_doc_ignoring_conflict(Db, NewDoc)
- end;
- {error, Reason} ->
- ?LOG_NOTICE(#{
- what => ensure_auth_ddoc_exists_failure,
- db => dbname(),
- docid => DDocId,
- details => Reason
- }),
- couch_log:notice("Failed to ensure auth ddoc ~s/~s exists for reason: ~p", [dbname(), DDocId, Reason]),
- ok
+ {not_found, _Reason} ->
+ {ok, AuthDesign} = couch_auth_cache:auth_design_doc(DDocId),
+ update_doc_ignoring_conflict(Db, AuthDesign);
+ {ok, Doc} ->
+ {Props} = couch_doc:to_json_obj(Doc, []),
+ case couch_util:get_value(<<"validate_doc_update">>, Props, []) of
+ ?AUTH_DB_DOC_VALIDATE_FUNCTION ->
+ ok;
+ _ ->
+ Props1 = lists:keyreplace(
+ <<"validate_doc_update">>,
+ 1,
+ Props,
+ {<<"validate_doc_update">>, ?AUTH_DB_DOC_VALIDATE_FUNCTION}
+ ),
+ NewDoc = couch_doc:from_json_obj({Props1}),
+ update_doc_ignoring_conflict(Db, NewDoc)
+ end;
+ {error, Reason} ->
+ ?LOG_NOTICE(#{
+ what => ensure_auth_ddoc_exists_failure,
+ db => dbname(),
+ docid => DDocId,
+ details => Reason
+ }),
+ couch_log:notice("Failed to ensure auth ddoc ~s/~s exists for reason: ~p", [
+ dbname(),
+ DDocId,
+ Reason
+ ]),
+ ok
end,
ok.
@@ -308,15 +325,18 @@ maybe_validate_user_creds(nil) ->
% throws if UserCreds includes a _conflicts member
% returns UserCreds otherwise
maybe_validate_user_creds(UserCreds) ->
- AllowConflictedUserDocs = config:get_boolean("chttpd_auth", "allow_conflicted_user_docs", false),
+ AllowConflictedUserDocs = config:get_boolean(
+ "chttpd_auth", "allow_conflicted_user_docs", false
+ ),
case {couch_util:get_value(<<"_conflicts">>, UserCreds), AllowConflictedUserDocs} of
{undefined, _} ->
{ok, UserCreds, nil};
{_, true} ->
{ok, UserCreds, nil};
{_ConflictList, false} ->
- throw({unauthorized,
- <<"User document conflicts must be resolved before the document",
- " is used for authentication purposes.">>
- })
+ throw(
+ {unauthorized,
+ <<"User document conflicts must be resolved before the document",
+ " is used for authentication purposes.">>}
+ )
end.
diff --git a/src/chttpd/src/chttpd_auth_request.erl b/src/chttpd/src/chttpd_auth_request.erl
index 3f6f97602..93275a13c 100644
--- a/src/chttpd/src/chttpd_auth_request.erl
+++ b/src/chttpd/src/chttpd_auth_request.erl
@@ -14,104 +14,103 @@
-export([authorize_request/1]).
-include_lib("couch/include/couch_db.hrl").
-authorize_request(#httpd{auth=Auth, user_ctx=Ctx} = Req) ->
+authorize_request(#httpd{auth = Auth, user_ctx = Ctx} = Req) ->
try
- authorize_request_int(Req)
+ authorize_request_int(Req)
catch
- throw:{forbidden, Msg} ->
- case {Auth, Ctx} of
- {{cookie_auth_failed, {Error, Reason}}, _} ->
- throw({forbidden, {Error, Reason}});
- {_, #user_ctx{name=null}} ->
- throw({unauthorized, Msg});
- {_, _} ->
- throw({forbidden, Msg})
- end
+ throw:{forbidden, Msg} ->
+ case {Auth, Ctx} of
+ {{cookie_auth_failed, {Error, Reason}}, _} ->
+ throw({forbidden, {Error, Reason}});
+ {_, #user_ctx{name = null}} ->
+ throw({unauthorized, Msg});
+ {_, _} ->
+ throw({forbidden, Msg})
+ end
end.
-authorize_request_int(#httpd{path_parts=[]}=Req) ->
+authorize_request_int(#httpd{path_parts = []} = Req) ->
Req;
-authorize_request_int(#httpd{path_parts=[<<"favicon.ico">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"favicon.ico">> | _]} = Req) ->
Req;
-authorize_request_int(#httpd{path_parts=[<<"_all_dbs">>|_]}=Req) ->
- case config:get_boolean("chttpd", "admin_only_all_dbs", true) of
- true -> require_admin(Req);
- false -> Req
- end;
-authorize_request_int(#httpd{path_parts=[<<"_dbs_info">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_all_dbs">> | _]} = Req) ->
+ case config:get_boolean("chttpd", "admin_only_all_dbs", true) of
+ true -> require_admin(Req);
+ false -> Req
+ end;
+authorize_request_int(#httpd{path_parts = [<<"_dbs_info">> | _]} = Req) ->
Req;
-authorize_request_int(#httpd{path_parts=[<<"_replicator">>], method='PUT'}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_replicator">>], method = 'PUT'} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_replicator">>], method='DELETE'}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_replicator">>], method = 'DELETE'} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_replicator">>,<<"_all_docs">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_replicator">>, <<"_all_docs">> | _]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_replicator">>,<<"_changes">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_replicator">>, <<"_changes">> | _]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_replicator">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_replicator">> | _]} = Req) ->
db_authorization_check(Req);
-authorize_request_int(#httpd{path_parts=[<<"_reshard">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_reshard">> | _]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_users">>], method='PUT'}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_users">>], method = 'PUT'} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_users">>], method='DELETE'}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_users">>], method = 'DELETE'} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_users">>,<<"_all_docs">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_users">>, <<"_all_docs">> | _]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_users">>,<<"_changes">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_users">>, <<"_changes">> | _]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_users">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_users">> | _]} = Req) ->
db_authorization_check(Req);
-authorize_request_int(#httpd{path_parts=[<<"_", _/binary>>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_", _/binary>> | _]} = Req) ->
server_authorization_check(Req);
-authorize_request_int(#httpd{path_parts=[_DbName], method='PUT'}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName], method = 'PUT'} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[_DbName], method='DELETE'}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName], method = 'DELETE'} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[_DbName, <<"_compact">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName, <<"_compact">> | _]} = Req) ->
require_db_admin(Req);
-authorize_request_int(#httpd{path_parts=[_DbName, <<"_view_cleanup">>]}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName, <<"_view_cleanup">>]} = Req) ->
require_db_admin(Req);
-authorize_request_int(#httpd{path_parts=[_DbName, <<"_sync_shards">>]}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName, <<"_sync_shards">>]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[_DbName, <<"_purge">>]}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName, <<"_purge">>]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[_DbName, <<"_purged_infos_limit">>]}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName, <<"_purged_infos_limit">>]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[_DbName|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName | _]} = Req) ->
db_authorization_check(Req).
-
-server_authorization_check(#httpd{path_parts=[<<"_up">>]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_up">>]} = Req) ->
Req;
-server_authorization_check(#httpd{path_parts=[<<"_uuids">>]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_uuids">>]} = Req) ->
Req;
-server_authorization_check(#httpd{path_parts=[<<"_session">>]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_session">>]} = Req) ->
Req;
-server_authorization_check(#httpd{path_parts=[<<"_replicate">>]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_replicate">>]} = Req) ->
Req;
-server_authorization_check(#httpd{path_parts=[<<"_stats">>]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_stats">>]} = Req) ->
Req;
-server_authorization_check(#httpd{path_parts=[<<"_active_tasks">>]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_active_tasks">>]} = Req) ->
Req;
-server_authorization_check(#httpd{path_parts=[<<"_dbs_info">>]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_dbs_info">>]} = Req) ->
Req;
-server_authorization_check(#httpd{method=Method, path_parts=[<<"_utils">>|_]}=Req)
- when Method =:= 'HEAD' orelse Method =:= 'GET' ->
+server_authorization_check(#httpd{method = Method, path_parts = [<<"_utils">> | _]} = Req) when
+ Method =:= 'HEAD' orelse Method =:= 'GET'
+->
Req;
-server_authorization_check(#httpd{path_parts=[<<"_node">>,_ , <<"_stats">>|_]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_node">>, _, <<"_stats">> | _]} = Req) ->
require_metrics(Req);
-server_authorization_check(#httpd{path_parts=[<<"_node">>,_ , <<"_system">>|_]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_node">>, _, <<"_system">> | _]} = Req) ->
require_metrics(Req);
-server_authorization_check(#httpd{path_parts=[<<"_", _/binary>>|_]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_", _/binary>> | _]} = Req) ->
require_admin(Req).
-db_authorization_check(#httpd{path_parts=[_DbName|_]}=Req) ->
+db_authorization_check(#httpd{path_parts = [_DbName | _]} = Req) ->
% Db authorization checks are performed in fabric before every FDB operation
Req.
-
-require_metrics(#httpd{user_ctx=#user_ctx{roles=UserRoles}}=Req) ->
+require_metrics(#httpd{user_ctx = #user_ctx{roles = UserRoles}} = Req) ->
IsAdmin = lists:member(<<"_admin">>, UserRoles),
IsMetrics = lists:member(<<"_metrics">>, UserRoles),
case {IsAdmin, IsMetrics} of
@@ -124,15 +123,15 @@ require_admin(Req) ->
ok = couch_httpd:verify_is_server_admin(Req),
Req.
-require_db_admin(#httpd{path_parts=[DbName|_],user_ctx=Ctx}=Req) ->
+require_db_admin(#httpd{path_parts = [DbName | _], user_ctx = Ctx} = Req) ->
{ok, Db} = fabric2_db:open(DbName, [{user_ctx, Ctx}]),
Sec = fabric2_db:get_security(Db),
- case is_db_admin(Ctx,Sec) of
+ case is_db_admin(Ctx, Sec) of
true -> Req;
- false -> throw({unauthorized, <<"You are not a server or db admin.">>})
+ false -> throw({unauthorized, <<"You are not a server or db admin.">>})
end.
-is_db_admin(#user_ctx{name=UserName,roles=UserRoles}, {Security}) ->
+is_db_admin(#user_ctx{name = UserName, roles = UserRoles}, {Security}) ->
{Admins} = couch_util:get_value(<<"admins">>, Security, {[]}),
Names = couch_util:get_value(<<"names">>, Admins, []),
Roles = couch_util:get_value(<<"roles">>, Admins, []),
diff --git a/src/chttpd/src/chttpd_changes.erl b/src/chttpd/src/chttpd_changes.erl
index 29ead3d39..0d3ce396f 100644
--- a/src/chttpd/src/chttpd_changes.erl
+++ b/src/chttpd/src/chttpd_changes.erl
@@ -65,64 +65,82 @@ handle_db_changes(Args0, Req, Db0) ->
DbName = fabric2_db:name(Db0),
StartListenerFun = fun() ->
fabric2_events:link_listener(
- ?MODULE, handle_db_event, self(), [{dbname, DbName}]
- )
+ ?MODULE, handle_db_event, self(), [{dbname, DbName}]
+ )
end,
Start = fun() ->
- StartSeq = case Dir =:= rev orelse Since =:= now of
- true -> fabric2_db:get_update_seq(Db0);
- false -> Since
- end,
+ StartSeq =
+ case Dir =:= rev orelse Since =:= now of
+ true -> fabric2_db:get_update_seq(Db0);
+ false -> Since
+ end,
{Db0, StartSeq}
end,
% begin timer to deal with heartbeat when filter function fails
case Args#changes_args.heartbeat of
- undefined ->
- erlang:erase(last_changes_heartbeat);
- Val when is_integer(Val); Val =:= true ->
- put(last_changes_heartbeat, os:timestamp())
+ undefined ->
+ erlang:erase(last_changes_heartbeat);
+ Val when is_integer(Val); Val =:= true ->
+ put(last_changes_heartbeat, os:timestamp())
end,
case lists:member(Feed, ["continuous", "longpoll", "eventsource"]) of
- true ->
- fun(CallbackAcc) ->
- {Callback, UserAcc} = get_callback_acc(CallbackAcc),
- {ok, Listener} = StartListenerFun(),
-
- {Db, StartSeq} = Start(),
- UserAcc2 = start_sending_changes(Callback, UserAcc),
- {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
- Acc0 = build_acc(Args, Callback, UserAcc2, Db, StartSeq,
- <<"">>, Timeout, TimeoutFun),
- try
- keep_sending_changes(
- Args#changes_args{dir=fwd},
- Acc0,
- true)
- after
- fabric2_events:stop_listener(Listener),
- get_rest_updated(ok) % clean out any remaining update messages
+ true ->
+ fun(CallbackAcc) ->
+ {Callback, UserAcc} = get_callback_acc(CallbackAcc),
+ {ok, Listener} = StartListenerFun(),
+
+ {Db, StartSeq} = Start(),
+ UserAcc2 = start_sending_changes(Callback, UserAcc),
+ {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
+ Acc0 = build_acc(
+ Args,
+ Callback,
+ UserAcc2,
+ Db,
+ StartSeq,
+ <<"">>,
+ Timeout,
+ TimeoutFun
+ ),
+ try
+ keep_sending_changes(
+ Args#changes_args{dir = fwd},
+ Acc0,
+ true
+ )
+ after
+ fabric2_events:stop_listener(Listener),
+ % clean out any remaining update messages
+ get_rest_updated(ok)
+ end
+ end;
+ false ->
+ fun(CallbackAcc) ->
+ {Callback, UserAcc} = get_callback_acc(CallbackAcc),
+ UserAcc2 = start_sending_changes(Callback, UserAcc),
+ {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
+ {Db, StartSeq} = Start(),
+ Acc0 = build_acc(
+ Args#changes_args{feed = "normal"},
+ Callback,
+ UserAcc2,
+ Db,
+ StartSeq,
+ <<>>,
+ Timeout,
+ TimeoutFun
+ ),
+ {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} =
+ send_changes(
+ Acc0,
+ Dir,
+ true
+ ),
+ end_sending_changes(Callback, UserAcc3, LastSeq)
end
- end;
- false ->
- fun(CallbackAcc) ->
- {Callback, UserAcc} = get_callback_acc(CallbackAcc),
- UserAcc2 = start_sending_changes(Callback, UserAcc),
- {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
- {Db, StartSeq} = Start(),
- Acc0 = build_acc(Args#changes_args{feed="normal"}, Callback,
- UserAcc2, Db, StartSeq, <<>>,
- Timeout, TimeoutFun),
- {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} =
- send_changes(
- Acc0,
- Dir,
- true),
- end_sending_changes(Callback, UserAcc3, LastSeq)
- end
end.
-
handle_db_event(_DbName, updated, Parent) ->
Parent ! updated,
{ok, Parent};
@@ -132,7 +150,6 @@ handle_db_event(_DbName, deleted, Parent) ->
handle_db_event(_DbName, _Event, Parent) ->
{ok, Parent}.
-
handle_view_event(_DbName, Msg, {Parent, DDocId}) ->
case Msg of
{index_commit, DDocId} ->
@@ -149,20 +166,20 @@ get_callback_acc({Callback, _UserAcc} = Pair) when is_function(Callback, 2) ->
get_callback_acc(Callback) when is_function(Callback, 1) ->
{fun(Ev, _) -> Callback(Ev) end, ok}.
-
configure_filter(Filter, _Style, _Req, _Db) when is_tuple(Filter) ->
% Filter has already been configured
Filter;
configure_filter("_doc_ids", Style, Req, _Db) ->
{doc_ids, Style, get_doc_ids(Req)};
configure_filter("_selector", Style, Req, _Db) ->
- {selector, Style, get_selector_and_fields(Req)};
+ {selector, Style, get_selector_and_fields(Req)};
configure_filter("_design", Style, _Req, _Db) ->
{design_docs, Style};
configure_filter("_view", Style, Req, Db) ->
ViewName = get_view_qs(Req),
- if ViewName /= "" -> ok; true ->
- throw({bad_request, "`view` filter parameter is not provided."})
+ if
+ ViewName /= "" -> ok;
+ true -> throw({bad_request, "`view` filter parameter is not provided."})
end,
ViewNameParts = string:tokens(ViewName, "/"),
case [?l2b(couch_httpd:unquote(Part)) || Part <- ViewNameParts] of
@@ -200,7 +217,6 @@ configure_filter(FilterName, Style, Req, Db) ->
throw({bad_request, Msg})
end.
-
filter(Db, Change, {default, Style}) ->
apply_style(Db, Change, Style);
filter(Db, Change, {doc_ids, Style, DocIds}) ->
@@ -212,8 +228,10 @@ filter(Db, Change, {doc_ids, Style, DocIds}) ->
end;
filter(Db, Change, {selector, Style, {Selector, _Fields}}) ->
Docs = open_revs(Db, Change, Style),
- Passes = [mango_selector:match(Selector, couch_doc:to_json_obj(Doc, []))
- || Doc <- Docs],
+ Passes = [
+ mango_selector:match(Selector, couch_doc:to_json_obj(Doc, []))
+ || Doc <- Docs
+ ],
filter_revs(Passes, Docs);
filter(Db, Change, {design_docs, Style}) ->
case maps:get(id, Change) of
@@ -227,17 +245,17 @@ filter(Db, Change, {view, Style, DDoc, VName}) ->
{ok, Passes} = couch_query_servers:filter_view(DDoc, VName, Docs),
filter_revs(Passes, Docs);
filter(Db, Change, {custom, Style, Req0, DDoc, FName}) ->
- Req = case Req0 of
- {json_req, _} -> Req0;
- #httpd{} -> {json_req, chttpd_external:json_req_obj(Req0, Db)}
- end,
+ Req =
+ case Req0 of
+ {json_req, _} -> Req0;
+ #httpd{} -> {json_req, chttpd_external:json_req_obj(Req0, Db)}
+ end,
Docs = open_revs(Db, Change, Style),
{ok, Passes} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs),
filter_revs(Passes, Docs);
filter(Db, Change, Filter) ->
erlang:error({filter_error, Db, Change, Filter}).
-
get_view_qs({json_req, {Props}}) ->
{Query} = couch_util:get_value(<<"query">>, Props, {[]}),
binary_to_list(couch_util:get_value(<<"view">>, Query, ""));
@@ -246,42 +264,43 @@ get_view_qs(Req) ->
get_doc_ids({json_req, {Props}}) ->
check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method='POST'}=Req) ->
+get_doc_ids(#httpd{method = 'POST'} = Req) ->
couch_httpd:validate_ctype(Req, "application/json"),
{Props} = couch_httpd:json_body_obj(Req),
check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method='GET'}=Req) ->
+get_doc_ids(#httpd{method = 'GET'} = Req) ->
DocIds = ?JSON_DECODE(couch_httpd:qs_value(Req, "doc_ids", "null")),
check_docids(DocIds);
get_doc_ids(_) ->
throw({bad_request, no_doc_ids_provided}).
-
get_selector_and_fields({json_req, {Props}}) ->
Selector = check_selector(couch_util:get_value(<<"selector">>, Props)),
Fields = check_fields(couch_util:get_value(<<"fields">>, Props, nil)),
{Selector, Fields};
-get_selector_and_fields(#httpd{method='POST'}=Req) ->
+get_selector_and_fields(#httpd{method = 'POST'} = Req) ->
couch_httpd:validate_ctype(Req, "application/json"),
- get_selector_and_fields({json_req, couch_httpd:json_body_obj(Req)});
+ get_selector_and_fields({json_req, couch_httpd:json_body_obj(Req)});
get_selector_and_fields(_) ->
throw({bad_request, "Selector must be specified in POST payload"}).
-
check_docids(DocIds) when is_list(DocIds) ->
- lists:foreach(fun
- (DocId) when not is_binary(DocId) ->
- Msg = "`doc_ids` filter parameter is not a list of doc ids.",
- throw({bad_request, Msg});
- (_) -> ok
- end, DocIds),
+ lists:foreach(
+ fun
+ (DocId) when not is_binary(DocId) ->
+ Msg = "`doc_ids` filter parameter is not a list of doc ids.",
+ throw({bad_request, Msg});
+ (_) ->
+ ok
+ end,
+ DocIds
+ ),
DocIds;
check_docids(_) ->
Msg = "`doc_ids` filter parameter is not a list of doc ids.",
throw({bad_request, Msg}).
-
-check_selector(Selector={_}) ->
+check_selector(Selector = {_}) ->
try
mango_selector:normalize(Selector)
catch
@@ -292,7 +311,6 @@ check_selector(Selector={_}) ->
check_selector(_Selector) ->
throw({bad_request, "Selector error: expected a JSON object"}).
-
check_fields(nil) ->
nil;
check_fields(Fields) when is_list(Fields) ->
@@ -307,18 +325,15 @@ check_fields(Fields) when is_list(Fields) ->
check_fields(_Fields) ->
throw({bad_request, "Selector error: fields must be JSON array"}).
-
open_ddoc(Db, DDocId) ->
case fabric2_db:open_doc(Db, DDocId, [ejson_body, ?ADMIN_CTX]) of
{ok, _} = Resp -> Resp;
Else -> throw(Else)
end.
-
-check_member_exists(#doc{body={Props}}, Path) ->
+check_member_exists(#doc{body = {Props}}, Path) ->
couch_util:get_nested_json_value({Props}, Path).
-
apply_style(_Db, Change, main_only) ->
#{rev_id := RevId} = Change,
[{[{<<"rev">>, couch_doc:rev_to_str(RevId)}]}];
@@ -326,18 +341,20 @@ apply_style(Db, Change, all_docs) ->
% We have to fetch all revs for this row
#{id := DocId} = Change,
{ok, Resps} = fabric2_db:open_doc_revs(Db, DocId, all, [deleted]),
- lists:flatmap(fun(Resp) ->
- case Resp of
- {ok, #doc{revs = {Pos, [Rev | _]}}} ->
- [{[{<<"rev">>, couch_doc:rev_to_str({Pos, Rev})}]}];
- _ ->
- []
- end
- end, Resps);
+ lists:flatmap(
+ fun(Resp) ->
+ case Resp of
+ {ok, #doc{revs = {Pos, [Rev | _]}}} ->
+ [{[{<<"rev">>, couch_doc:rev_to_str({Pos, Rev})}]}];
+ _ ->
+ []
+ end
+ end,
+ Resps
+ );
apply_style(Db, Change, Style) ->
erlang:error({changes_apply_style, Db, Change, Style}).
-
open_revs(Db, Change, Style) ->
#{id := DocId} = Change,
Options = [deleted, conflicts],
@@ -350,22 +367,24 @@ open_revs(Db, Change, Style) ->
{ok, Docs} = fabric2_db:open_doc_revs(Db, DocId, all, Options),
[Doc || {ok, Doc} <- Docs]
end
- catch _:_ ->
- % We didn't log this before, should we now?
- []
+ catch
+ _:_ ->
+ % We didn't log this before, should we now?
+ []
end.
-
filter_revs(Passes, Docs) ->
- lists:flatmap(fun
- ({true, #doc{revs={RevPos, [RevId | _]}}}) ->
- RevStr = couch_doc:rev_to_str({RevPos, RevId}),
- Change = {[{<<"rev">>, RevStr}]},
- [Change];
- (_) ->
- []
- end, lists:zip(Passes, Docs)).
-
+ lists:flatmap(
+ fun
+ ({true, #doc{revs = {RevPos, [RevId | _]}}}) ->
+ RevStr = couch_doc:rev_to_str({RevPos, RevId}),
+ Change = {[{<<"rev">>, RevStr}]},
+ [Change];
+ (_) ->
+ []
+ end,
+ lists:zip(Passes, Docs)
+ ).
get_changes_timeout(Args, Callback) ->
#changes_args{
@@ -374,24 +393,24 @@ get_changes_timeout(Args, Callback) ->
feed = ResponseType
} = Args,
DefaultTimeout = chttpd_util:get_chttpd_config_integer(
- "changes_timeout", 60000),
+ "changes_timeout", 60000
+ ),
case Heartbeat of
- undefined ->
- case Timeout of
undefined ->
- {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
- infinity ->
- {infinity, fun(UserAcc) -> {stop, UserAcc} end};
+ case Timeout of
+ undefined ->
+ {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
+ infinity ->
+ {infinity, fun(UserAcc) -> {stop, UserAcc} end};
+ _ ->
+ {lists:min([DefaultTimeout, Timeout]), fun(UserAcc) -> {stop, UserAcc} end}
+ end;
+ true ->
+ {DefaultTimeout, fun(UserAcc) -> Callback({timeout, ResponseType}, UserAcc) end};
_ ->
- {lists:min([DefaultTimeout, Timeout]),
- fun(UserAcc) -> {stop, UserAcc} end}
- end;
- true ->
- {DefaultTimeout,
- fun(UserAcc) -> Callback({timeout, ResponseType}, UserAcc) end};
- _ ->
- {lists:min([DefaultTimeout, Heartbeat]),
- fun(UserAcc) -> Callback({timeout, ResponseType}, UserAcc) end}
+ {lists:min([DefaultTimeout, Heartbeat]), fun(UserAcc) ->
+ Callback({timeout, ResponseType}, UserAcc)
+ end}
end.
start_sending_changes(Callback, UserAcc) ->
@@ -421,8 +440,8 @@ build_acc(Args, Callback, UserAcc, Db, StartSeq, Prepend, Timeout, TimeoutFun) -
conflicts = Conflicts,
timeout = Timeout,
timeout_fun = TimeoutFun,
- aggregation_results=[],
- aggregation_kvs=[]
+ aggregation_results = [],
+ aggregation_kvs = []
}.
send_changes(Acc, Dir, FirstRound) ->
@@ -440,37 +459,43 @@ send_changes(Acc, Dir, FirstRound) ->
fabric2_db:fold_changes(Db, StartSeq, DbEnumFun, Acc, Opts)
end.
-
can_optimize(true, {doc_ids, _Style, DocIds}) ->
- MaxDocIds = config:get_integer("couchdb",
- "changes_doc_ids_optimization_threshold", 100),
- if length(DocIds) =< MaxDocIds ->
- {true, fun send_changes_doc_ids/6};
- true ->
- false
+ MaxDocIds = config:get_integer(
+ "couchdb",
+ "changes_doc_ids_optimization_threshold",
+ 100
+ ),
+ if
+ length(DocIds) =< MaxDocIds ->
+ {true, fun send_changes_doc_ids/6};
+ true ->
+ false
end;
can_optimize(true, {design_docs, _Style}) ->
{true, fun send_changes_design_docs/6};
can_optimize(_, _) ->
false.
-
send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
Results = fabric2_db:get_full_doc_infos(Db, DocIds),
- FullInfos = lists:foldl(fun
- (#full_doc_info{}=FDI, Acc) -> [FDI | Acc];
- (not_found, Acc) -> Acc
- end, [], Results),
+ FullInfos = lists:foldl(
+ fun
+ (#full_doc_info{} = FDI, Acc) -> [FDI | Acc];
+ (not_found, Acc) -> Acc
+ end,
+ [],
+ Results
+ ),
send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
FoldFun = fun(FDI, Acc) ->
case FDI of
{row, Row} ->
DocId = proplists:get_value(id, Row),
{ok, [fabric2_db:get_full_doc_info(Db, DocId) | Acc]};
- _ -> {ok, Acc}
+ _ ->
+ {ok, Acc}
end
end,
Opts = [
@@ -481,61 +506,73 @@ send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
{ok, FullInfos} = fabric2_db:fold_docs(Db, FoldFun, [], Opts),
send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
- FoldFun = case Dir of
- fwd -> fun lists:foldl/3;
- rev -> fun lists:foldr/3
- end,
- GreaterFun = case Dir of
- fwd -> fun(A, B) -> A > B end;
- rev -> fun(A, B) -> A =< B end
- end,
- DocInfos = lists:foldl(fun(FDI, Acc) ->
- DI = couch_doc:to_doc_info(FDI),
- case GreaterFun(DI#doc_info.high_seq, StartSeq) of
- true -> [DI | Acc];
- false -> Acc
- end
- end, [], FullDocInfos),
- SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos),
- FinalAcc = try
- FoldFun(fun(DocInfo, Acc) ->
- % Kinda gross that we're munging this back to a map
- % that will then have to re-read and rebuild the FDI
- % for all_docs style. But c'est la vie.
- #doc_info{
- id = DocId,
- high_seq = Seq,
- revs = [#rev_info{rev = Rev, deleted = Deleted} | _]
- } = DocInfo,
- Change = #{
- id => DocId,
- sequence => Seq,
- rev_id => Rev,
- deleted => Deleted
- },
- case Fun(Change, Acc) of
- {ok, NewAcc} ->
- NewAcc;
- {stop, NewAcc} ->
- throw({stop, NewAcc})
+ FoldFun =
+ case Dir of
+ fwd -> fun lists:foldl/3;
+ rev -> fun lists:foldr/3
+ end,
+ GreaterFun =
+ case Dir of
+ fwd -> fun(A, B) -> A > B end;
+ rev -> fun(A, B) -> A =< B end
+ end,
+ DocInfos = lists:foldl(
+ fun(FDI, Acc) ->
+ DI = couch_doc:to_doc_info(FDI),
+ case GreaterFun(DI#doc_info.high_seq, StartSeq) of
+ true -> [DI | Acc];
+ false -> Acc
end
- end, Acc0, SortedDocInfos)
- catch
- {stop, Acc} -> Acc
- end,
+ end,
+ [],
+ FullDocInfos
+ ),
+ SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos),
+ FinalAcc =
+ try
+ FoldFun(
+ fun(DocInfo, Acc) ->
+ % Kinda gross that we're munging this back to a map
+ % that will then have to re-read and rebuild the FDI
+ % for all_docs style. But c'est la vie.
+ #doc_info{
+ id = DocId,
+ high_seq = Seq,
+ revs = [#rev_info{rev = Rev, deleted = Deleted} | _]
+ } = DocInfo,
+ Change = #{
+ id => DocId,
+ sequence => Seq,
+ rev_id => Rev,
+ deleted => Deleted
+ },
+ case Fun(Change, Acc) of
+ {ok, NewAcc} ->
+ NewAcc;
+ {stop, NewAcc} ->
+ throw({stop, NewAcc})
+ end
+ end,
+ Acc0,
+ SortedDocInfos
+ )
+ catch
+ {stop, Acc} -> Acc
+ end,
case Dir of
fwd ->
- FinalAcc0 = case element(1, FinalAcc) of
- changes_acc -> % we came here via couch_http or internal call
- FinalAcc#changes_acc{seq = fabric2_db:get_update_seq(Db)}
- end,
+ FinalAcc0 =
+ case element(1, FinalAcc) of
+ % we came here via couch_http or internal call
+ changes_acc ->
+ FinalAcc#changes_acc{seq = fabric2_db:get_update_seq(Db)}
+ end,
{ok, FinalAcc0};
- rev -> {ok, FinalAcc}
+ rev ->
+ {ok, FinalAcc}
end.
-
keep_sending_changes(Args, Acc0, FirstRound) ->
#changes_args{
feed = ResponseType,
@@ -546,39 +583,50 @@ keep_sending_changes(Args, Acc0, FirstRound) ->
{ok, ChangesAcc} = send_changes(Acc0, fwd, FirstRound),
#changes_acc{
- db = Db, callback = Callback,
- timeout = Timeout, timeout_fun = TimeoutFun, seq = EndSeq,
- prepend = Prepend2, user_acc = UserAcc2, limit = NewLimit
+ db = Db,
+ callback = Callback,
+ timeout = Timeout,
+ timeout_fun = TimeoutFun,
+ seq = EndSeq,
+ prepend = Prepend2,
+ user_acc = UserAcc2,
+ limit = NewLimit
} = maybe_upgrade_changes_acc(ChangesAcc),
- if Limit > NewLimit, ResponseType == "longpoll" ->
- end_sending_changes(Callback, UserAcc2, EndSeq);
- true ->
- {Go, UserAcc3} = notify_waiting_for_updates(Callback, UserAcc2),
- if Go /= ok -> end_sending_changes(Callback, UserAcc3, EndSeq); true ->
- case wait_updated(Timeout, TimeoutFun, UserAcc3) of
- {updated, UserAcc4} ->
- UserCtx = fabric2_db:get_user_ctx(Db),
- DbOptions1 = [{user_ctx, UserCtx} | DbOptions],
- case fabric2_db:open(fabric2_db:name(Db), DbOptions1) of
- {ok, Db2} ->
- ?MODULE:keep_sending_changes(
- Args#changes_args{limit=NewLimit},
- ChangesAcc#changes_acc{
- db = Db2,
- user_acc = UserAcc4,
- seq = EndSeq,
- prepend = Prepend2,
- timeout = Timeout,
- timeout_fun = TimeoutFun},
- false);
- _Else ->
- end_sending_changes(Callback, UserAcc3, EndSeq)
- end;
- {stop, UserAcc4} ->
- end_sending_changes(Callback, UserAcc4, EndSeq)
+ if
+ Limit > NewLimit, ResponseType == "longpoll" ->
+ end_sending_changes(Callback, UserAcc2, EndSeq);
+ true ->
+ {Go, UserAcc3} = notify_waiting_for_updates(Callback, UserAcc2),
+ if
+ Go /= ok ->
+ end_sending_changes(Callback, UserAcc3, EndSeq);
+ true ->
+ case wait_updated(Timeout, TimeoutFun, UserAcc3) of
+ {updated, UserAcc4} ->
+ UserCtx = fabric2_db:get_user_ctx(Db),
+ DbOptions1 = [{user_ctx, UserCtx} | DbOptions],
+ case fabric2_db:open(fabric2_db:name(Db), DbOptions1) of
+ {ok, Db2} ->
+ ?MODULE:keep_sending_changes(
+ Args#changes_args{limit = NewLimit},
+ ChangesAcc#changes_acc{
+ db = Db2,
+ user_acc = UserAcc4,
+ seq = EndSeq,
+ prepend = Prepend2,
+ timeout = Timeout,
+ timeout_fun = TimeoutFun
+ },
+ false
+ );
+ _Else ->
+ end_sending_changes(Callback, UserAcc3, EndSeq)
+ end;
+ {stop, UserAcc4} ->
+ end_sending_changes(Callback, UserAcc4, EndSeq)
+ end
end
- end
end.
notify_waiting_for_updates(Callback, UserAcc) ->
@@ -600,65 +648,71 @@ changes_enumerator(Change, Acc) ->
Results0 = filter(Db, Change, Filter),
Results = [Result || Result <- Results0, Result /= null],
Seq = maps:get(sequence, Change),
- Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end,
- case Results of
- [] ->
- {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
- case Done of
- stop ->
- {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
- ok ->
- {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
- end;
- _ ->
- ChangesRow = changes_row(Results, Change, Acc),
- {UserGo, UserAcc2} = Callback({change, ChangesRow}, UserAcc),
- RealGo = case UserGo of
- ok -> Go;
- stop -> stop
+ Go =
+ if
+ (Limit =< 1) andalso Results =/= [] -> stop;
+ true -> ok
end,
- reset_heartbeat(),
- {RealGo, Acc#changes_acc{
- seq = Seq,
- user_acc = UserAcc2,
- limit = Limit - 1
- }}
+ case Results of
+ [] ->
+ {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
+ case Done of
+ stop ->
+ {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
+ ok ->
+ {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
+ end;
+ _ ->
+ ChangesRow = changes_row(Results, Change, Acc),
+ {UserGo, UserAcc2} = Callback({change, ChangesRow}, UserAcc),
+ RealGo =
+ case UserGo of
+ ok -> Go;
+ stop -> stop
+ end,
+ reset_heartbeat(),
+ {RealGo, Acc#changes_acc{
+ seq = Seq,
+ user_acc = UserAcc2,
+ limit = Limit - 1
+ }}
end.
-
changes_row(Results, Change, Acc) ->
#{
id := Id,
sequence := Seq,
deleted := Del
} = Change,
- {[
- {<<"seq">>, Seq},
- {<<"id">>, Id},
- {<<"changes">>, Results}
- ] ++ deleted_item(Del) ++ maybe_get_changes_doc(Change, Acc)}.
+ {
+ [
+ {<<"seq">>, Seq},
+ {<<"id">>, Id},
+ {<<"changes">>, Results}
+ ] ++ deleted_item(Del) ++ maybe_get_changes_doc(Change, Acc)
+ }.
-maybe_get_changes_doc(Value, #changes_acc{include_docs=true}=Acc) ->
+maybe_get_changes_doc(Value, #changes_acc{include_docs = true} = Acc) ->
#changes_acc{
db = Db,
doc_options = DocOpts0,
conflicts = Conflicts,
filter = Filter
} = Acc,
- OpenOpts = case Conflicts of
- true -> [deleted, conflicts];
- false -> [deleted]
- end,
- DocOpts1 = case Conflicts of
- true -> [conflicts | DocOpts0];
- false -> DocOpts0
- end,
+ OpenOpts =
+ case Conflicts of
+ true -> [deleted, conflicts];
+ false -> [deleted]
+ end,
+ DocOpts1 =
+ case Conflicts of
+ true -> [conflicts | DocOpts0];
+ false -> DocOpts0
+ end,
load_doc(Db, Value, OpenOpts, DocOpts1, Filter);
-
maybe_get_changes_doc(_Value, _Acc) ->
[].
-
load_doc(Db, Value, Opts, DocOpts, Filter) ->
case load_doc(Db, Value, Opts) of
null ->
@@ -667,7 +721,6 @@ load_doc(Db, Value, Opts, DocOpts, Filter) ->
[{doc, doc_to_json(Doc, DocOpts, Filter)}]
end.
-
load_doc(Db, Change, Opts) ->
#{
id := Id,
@@ -680,68 +733,66 @@ load_doc(Db, Change, Opts) ->
null
end.
-
-doc_to_json(Doc, DocOpts, {selector, _Style, {_Selector, Fields}})
- when Fields =/= nil ->
+doc_to_json(Doc, DocOpts, {selector, _Style, {_Selector, Fields}}) when
+ Fields =/= nil
+->
mango_fields:extract(couch_doc:to_json_obj(Doc, DocOpts), Fields);
doc_to_json(Doc, DocOpts, _Filter) ->
couch_doc:to_json_obj(Doc, DocOpts).
-
deleted_item(true) -> [{<<"deleted">>, true}];
deleted_item(_) -> [].
% waits for a updated msg, if there are multiple msgs, collects them.
wait_updated(Timeout, TimeoutFun, UserAcc) ->
receive
- updated ->
- get_rest_updated(UserAcc);
- deleted ->
- {stop, UserAcc}
+ updated ->
+ get_rest_updated(UserAcc);
+ deleted ->
+ {stop, UserAcc}
after Timeout ->
{Go, UserAcc2} = TimeoutFun(UserAcc),
case Go of
- ok ->
- ?MODULE:wait_updated(Timeout, TimeoutFun, UserAcc2);
- stop ->
- {stop, UserAcc2}
+ ok ->
+ ?MODULE:wait_updated(Timeout, TimeoutFun, UserAcc2);
+ stop ->
+ {stop, UserAcc2}
end
end.
get_rest_updated(UserAcc) ->
receive
- updated ->
- get_rest_updated(UserAcc)
+ updated ->
+ get_rest_updated(UserAcc)
after 0 ->
{updated, UserAcc}
end.
reset_heartbeat() ->
case get(last_changes_heartbeat) of
- undefined ->
- ok;
- _ ->
- put(last_changes_heartbeat, os:timestamp())
+ undefined ->
+ ok;
+ _ ->
+ put(last_changes_heartbeat, os:timestamp())
end.
maybe_heartbeat(Timeout, TimeoutFun, Acc) ->
Before = get(last_changes_heartbeat),
case Before of
- undefined ->
- {ok, Acc};
- _ ->
- Now = os:timestamp(),
- case timer:now_diff(Now, Before) div 1000 >= Timeout of
- true ->
- {StopOrGo, Acc2} = TimeoutFun(Acc),
- put(last_changes_heartbeat, Now),
- {StopOrGo, Acc2};
- false ->
- {ok, Acc}
- end
+ undefined ->
+ {ok, Acc};
+ _ ->
+ Now = os:timestamp(),
+ case timer:now_diff(Now, Before) div 1000 >= Timeout of
+ true ->
+ {StopOrGo, Acc2} = TimeoutFun(Acc),
+ put(last_changes_heartbeat, Now),
+ {StopOrGo, Acc2};
+ false ->
+ {ok, Acc}
+ end
end.
-
maybe_upgrade_changes_acc(#changes_acc{} = Acc) ->
Acc;
maybe_upgrade_changes_acc(Acc) when tuple_size(Acc) == 19 ->
diff --git a/src/chttpd/src/chttpd_cors.erl b/src/chttpd/src/chttpd_cors.erl
index a2cf16720..e907e7110 100644
--- a/src/chttpd/src/chttpd_cors.erl
+++ b/src/chttpd/src/chttpd_cors.erl
@@ -12,7 +12,6 @@
-module(chttpd_cors).
-
-export([
maybe_handle_preflight_request/1,
maybe_handle_preflight_request/2,
@@ -24,15 +23,13 @@
get_cors_config/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("chttpd/include/chttpd_cors.hrl").
-include_lib("kernel/include/logger.hrl").
-
%% http://www.w3.org/TR/cors/#resource-preflight-requests
-maybe_handle_preflight_request(#httpd{method=Method}) when Method /= 'OPTIONS' ->
+maybe_handle_preflight_request(#httpd{method = Method}) when Method /= 'OPTIONS' ->
not_preflight;
maybe_handle_preflight_request(Req) ->
case maybe_handle_preflight_request(Req, get_cors_config(Req)) of
@@ -42,8 +39,7 @@ maybe_handle_preflight_request(Req) ->
chttpd:send_response_no_cors(Req, 204, PreflightHeaders, <<>>)
end.
-
-maybe_handle_preflight_request(#httpd{}=Req, Config) ->
+maybe_handle_preflight_request(#httpd{} = Req, Config) ->
case is_cors_enabled(Config) of
true ->
case preflight_request(Req, Config) of
@@ -68,7 +64,6 @@ maybe_handle_preflight_request(#httpd{}=Req, Config) ->
not_preflight
end.
-
preflight_request(Req, Config) ->
case get_origin(Req) of
undefined ->
@@ -103,70 +98,85 @@ preflight_request(Req, Config) ->
end
end.
-
handle_preflight_request(Req, Config, Origin) ->
case chttpd:header_value(Req, "Access-Control-Request-Method") of
- undefined ->
- %% If there is no Access-Control-Request-Method header
- %% or if parsing failed, do not set any additional headers
- %% and terminate this set of steps. The request is outside
- %% the scope of this specification.
- %% http://www.w3.org/TR/cors/#resource-preflight-requests
- not_preflight;
- Method ->
- SupportedMethods = get_origin_config(Config, Origin,
- <<"allow_methods">>, ?SUPPORTED_METHODS),
-
- SupportedHeaders = get_origin_config(Config, Origin,
- <<"allow_headers">>, ?SUPPORTED_HEADERS),
-
-
- %% get max age
- MaxAge = couch_util:get_value(<<"max_age">>, Config,
- ?CORS_DEFAULT_MAX_AGE),
-
- PreflightHeaders0 = maybe_add_credentials(Config, Origin, [
- {"Access-Control-Allow-Origin", binary_to_list(Origin)},
- {"Access-Control-Max-Age", MaxAge},
- {"Access-Control-Allow-Methods",
- string:join(SupportedMethods, ", ")}]),
-
- case lists:member(Method, SupportedMethods) of
- true ->
- %% method ok , check headers
- AccessHeaders = chttpd:header_value(Req,
- "Access-Control-Request-Headers"),
- {FinalReqHeaders, ReqHeaders} = case AccessHeaders of
- undefined -> {"", []};
- "" -> {"", []};
- Headers ->
- %% transform header list in something we
- %% could check. make sure everything is a
- %% list
- RH = [to_lower(H)
- || H <- split_headers(Headers)],
- {Headers, RH}
- end,
- %% check if headers are supported
- case ReqHeaders -- SupportedHeaders of
- [] ->
- PreflightHeaders = PreflightHeaders0 ++
- [{"Access-Control-Allow-Headers",
- FinalReqHeaders}],
- {ok, PreflightHeaders};
- _ ->
- not_preflight
- end;
- false ->
- %% If method is not a case-sensitive match for any of
- %% the values in list of methods do not set any additional
- %% headers and terminate this set of steps.
+ undefined ->
+ %% If there is no Access-Control-Request-Method header
+ %% or if parsing failed, do not set any additional headers
+ %% and terminate this set of steps. The request is outside
+ %% the scope of this specification.
%% http://www.w3.org/TR/cors/#resource-preflight-requests
- not_preflight
- end
+ not_preflight;
+ Method ->
+ SupportedMethods = get_origin_config(
+ Config,
+ Origin,
+ <<"allow_methods">>,
+ ?SUPPORTED_METHODS
+ ),
+
+ SupportedHeaders = get_origin_config(
+ Config,
+ Origin,
+ <<"allow_headers">>,
+ ?SUPPORTED_HEADERS
+ ),
+
+ %% get max age
+ MaxAge = couch_util:get_value(
+ <<"max_age">>,
+ Config,
+ ?CORS_DEFAULT_MAX_AGE
+ ),
+
+ PreflightHeaders0 = maybe_add_credentials(Config, Origin, [
+ {"Access-Control-Allow-Origin", binary_to_list(Origin)},
+ {"Access-Control-Max-Age", MaxAge},
+ {"Access-Control-Allow-Methods", string:join(SupportedMethods, ", ")}
+ ]),
+
+ case lists:member(Method, SupportedMethods) of
+ true ->
+ %% method ok , check headers
+ AccessHeaders = chttpd:header_value(
+ Req,
+ "Access-Control-Request-Headers"
+ ),
+ {FinalReqHeaders, ReqHeaders} =
+ case AccessHeaders of
+ undefined ->
+ {"", []};
+ "" ->
+ {"", []};
+ Headers ->
+ %% transform header list in something we
+ %% could check. make sure everything is a
+ %% list
+ RH = [
+ to_lower(H)
+ || H <- split_headers(Headers)
+ ],
+ {Headers, RH}
+ end,
+ %% check if headers are supported
+ case ReqHeaders -- SupportedHeaders of
+ [] ->
+ PreflightHeaders =
+ PreflightHeaders0 ++
+ [{"Access-Control-Allow-Headers", FinalReqHeaders}],
+ {ok, PreflightHeaders};
+ _ ->
+ not_preflight
+ end;
+ false ->
+ %% If method is not a case-sensitive match for any of
+ %% the values in list of methods do not set any additional
+ %% headers and terminate this set of steps.
+ %% http://www.w3.org/TR/cors/#resource-preflight-requests
+ not_preflight
+ end
end.
-
headers(Req, RequestHeaders) ->
case get_origin(Req) of
undefined ->
@@ -179,7 +189,6 @@ headers(Req, RequestHeaders) ->
headers(Req, RequestHeaders, Origin, get_cors_config(Req))
end.
-
headers(_Req, RequestHeaders, undefined, _Config) ->
RequestHeaders;
headers(Req, RequestHeaders, Origin, Config) when is_list(Origin) ->
@@ -190,13 +199,13 @@ headers(Req, RequestHeaders, Origin, Config) ->
AcceptedOrigins = get_accepted_origins(Req, Config),
CorsHeaders = handle_headers(Config, Origin, AcceptedOrigins),
ExposedCouchHeaders = couch_util:get_value(
- <<"exposed_headers">>, Config, ?COUCH_HEADERS),
+ <<"exposed_headers">>, Config, ?COUCH_HEADERS
+ ),
maybe_apply_headers(CorsHeaders, RequestHeaders, ExposedCouchHeaders);
false ->
RequestHeaders
end.
-
maybe_apply_headers([], RequestHeaders, _ExposedCouchHeaders) ->
RequestHeaders;
maybe_apply_headers(CorsHeaders, RequestHeaders, ExposedCouchHeaders) ->
@@ -207,67 +216,64 @@ maybe_apply_headers(CorsHeaders, RequestHeaders, ExposedCouchHeaders) ->
%% need to be exposed.
%% return: RequestHeaders ++ CorsHeaders ++ ACEH
- ExposedHeaders0 = simple_headers([K || {K,_V} <- RequestHeaders]),
+ ExposedHeaders0 = simple_headers([K || {K, _V} <- RequestHeaders]),
%% If Content-Type is not in ExposedHeaders, and the Content-Type
%% is not a member of ?SIMPLE_CONTENT_TYPE_VALUES, then add it
%% into the list of ExposedHeaders
ContentType = proplists:get_value("content-type", ExposedHeaders0),
- IncludeContentType = case ContentType of
- undefined ->
- false;
- _ ->
- lists:member(string:to_lower(ContentType), ?SIMPLE_CONTENT_TYPE_VALUES)
+ IncludeContentType =
+ case ContentType of
+ undefined ->
+ false;
+ _ ->
+ lists:member(string:to_lower(ContentType), ?SIMPLE_CONTENT_TYPE_VALUES)
end,
- ExposedHeaders = case IncludeContentType of
- false ->
- ["content-type" | lists:delete("content-type", ExposedHeaders0)];
- true ->
- ExposedHeaders0
+ ExposedHeaders =
+ case IncludeContentType of
+ false ->
+ ["content-type" | lists:delete("content-type", ExposedHeaders0)];
+ true ->
+ ExposedHeaders0
end,
%% ExposedCouchHeaders may get added later, so expose them by default
- ACEH = [{"Access-Control-Expose-Headers",
- string:join(ExposedHeaders ++ ExposedCouchHeaders, ", ")}],
+ ACEH = [
+ {"Access-Control-Expose-Headers", string:join(ExposedHeaders ++ ExposedCouchHeaders, ", ")}
+ ],
CorsHeaders ++ RequestHeaders ++ ACEH.
-
simple_headers(Headers) ->
LCHeaders = [to_lower(H) || H <- Headers],
lists:filter(fun(H) -> lists:member(H, ?SIMPLE_HEADERS) end, LCHeaders).
-
to_lower(String) when is_binary(String) ->
to_lower(?b2l(String));
to_lower(String) ->
string:to_lower(String).
-
handle_headers(_Config, _Origin, []) ->
[];
handle_headers(Config, Origin, AcceptedOrigins) ->
AcceptAll = lists:member(<<"*">>, AcceptedOrigins),
case AcceptAll orelse lists:member(Origin, AcceptedOrigins) of
- true ->
- make_cors_header(Config, Origin);
- false ->
- %% If the value of the Origin header is not a
- %% case-sensitive match for any of the values
- %% in list of origins, do not set any additional
- %% headers and terminate this set of steps.
- %% http://www.w3.org/TR/cors/#resource-requests
- []
+ true ->
+ make_cors_header(Config, Origin);
+ false ->
+ %% If the value of the Origin header is not a
+ %% case-sensitive match for any of the values
+ %% in list of origins, do not set any additional
+ %% headers and terminate this set of steps.
+ %% http://www.w3.org/TR/cors/#resource-requests
+ []
end.
-
make_cors_header(Config, Origin) ->
Headers = [{"Access-Control-Allow-Origin", binary_to_list(Origin)}],
maybe_add_credentials(Config, Origin, Headers).
-
%% util
-
maybe_add_credentials(Config, Origin, Headers) ->
case allow_credentials(Config, Origin) of
false ->
@@ -276,13 +282,15 @@ maybe_add_credentials(Config, Origin, Headers) ->
Headers ++ [{"Access-Control-Allow-Credentials", "true"}]
end.
-
allow_credentials(_Config, <<"*">>) ->
false;
allow_credentials(Config, Origin) ->
- get_origin_config(Config, Origin, <<"allow_credentials">>,
- ?CORS_DEFAULT_ALLOW_CREDENTIALS).
-
+ get_origin_config(
+ Config,
+ Origin,
+ <<"allow_credentials">>,
+ ?CORS_DEFAULT_ALLOW_CREDENTIALS
+ ).
get_cors_config(#httpd{cors_config = undefined, mochi_req = MochiReq}) ->
Host = couch_httpd_vhost:host(MochiReq),
@@ -290,24 +298,27 @@ get_cors_config(#httpd{cors_config = undefined, mochi_req = MochiReq}) ->
EnableCors = chttpd_util:get_chttpd_config_boolean("enable_cors", false),
AllowCredentials = cors_config(Host, "credentials", "false") =:= "true",
- AllowHeaders = case cors_config(Host, "headers", undefined) of
- undefined ->
- ?SUPPORTED_HEADERS;
- AllowHeaders0 ->
- [to_lower(H) || H <- split_list(AllowHeaders0)]
- end,
- AllowMethods = case cors_config(Host, "methods", undefined) of
- undefined ->
- ?SUPPORTED_METHODS;
- AllowMethods0 ->
- split_list(AllowMethods0)
- end,
- ExposedHeaders = case cors_config(Host, "exposed_headers", undefined) of
- undefined ->
- ?COUCH_HEADERS;
- ExposedHeaders0 ->
- [to_lower(H) || H <- split_list(ExposedHeaders0)]
- end,
+ AllowHeaders =
+ case cors_config(Host, "headers", undefined) of
+ undefined ->
+ ?SUPPORTED_HEADERS;
+ AllowHeaders0 ->
+ [to_lower(H) || H <- split_list(AllowHeaders0)]
+ end,
+ AllowMethods =
+ case cors_config(Host, "methods", undefined) of
+ undefined ->
+ ?SUPPORTED_METHODS;
+ AllowMethods0 ->
+ split_list(AllowMethods0)
+ end,
+ ExposedHeaders =
+ case cors_config(Host, "exposed_headers", undefined) of
+ undefined ->
+ ?COUCH_HEADERS;
+ ExposedHeaders0 ->
+ [to_lower(H) || H <- split_list(ExposedHeaders0)]
+ end,
MaxAge = cors_config(Host, "max_age", ?CORS_DEFAULT_MAX_AGE),
Origins0 = binary_split_list(cors_config(Host, "origins", [])),
Origins = [{O, {[]}} || O <- Origins0],
@@ -323,25 +334,24 @@ get_cors_config(#httpd{cors_config = undefined, mochi_req = MochiReq}) ->
get_cors_config(#httpd{cors_config = Config}) ->
Config.
-
cors_config(Host, Key, Default) ->
- config:get(cors_section(Host), Key,
- config:get("cors", Key, Default)).
-
+ config:get(
+ cors_section(Host),
+ Key,
+ config:get("cors", Key, Default)
+ ).
cors_section(HostValue) ->
HostPort = maybe_strip_scheme(HostValue),
Host = hd(string:tokens(HostPort, ":")),
"cors:" ++ Host.
-
maybe_strip_scheme(Host) ->
case string:str(Host, "://") of
0 -> Host;
N -> string:substr(Host, N + 3)
end.
-
is_cors_enabled(Config) ->
case get(disable_couch_httpd_cors) of
undefined ->
@@ -351,7 +361,6 @@ is_cors_enabled(Config) ->
end,
couch_util:get_value(<<"enable_cors">>, Config, false).
-
%% Get a list of {Origin, OriginConfig} tuples
%% ie: get_origin_configs(Config) ->
%% [
@@ -369,7 +378,6 @@ get_origin_configs(Config) ->
{Origins} = couch_util:get_value(<<"origins">>, Config, {[]}),
Origins.
-
%% Get config for an individual Origin
%% ie: get_origin_config(Config, <<"http://foo.com">>) ->
%% [
@@ -381,15 +389,16 @@ get_origin_config(Config, Origin) ->
{OriginConfig} = couch_util:get_value(Origin, OriginConfigs, {[]}),
OriginConfig.
-
%% Get config of a single key for an individual Origin
%% ie: get_origin_config(Config, <<"http://foo.com">>, <<"allow_methods">>, [])
%% [<<"POST">>]
get_origin_config(Config, Origin, Key, Default) ->
OriginConfig = get_origin_config(Config, Origin),
- couch_util:get_value(Key, OriginConfig,
- couch_util:get_value(Key, Config, Default)).
-
+ couch_util:get_value(
+ Key,
+ OriginConfig,
+ couch_util:get_value(Key, Config, Default)
+ ).
get_origin(Req) ->
case chttpd:header_value(Req, "Origin") of
@@ -399,18 +408,14 @@ get_origin(Req) ->
?l2b(Origin)
end.
-
get_accepted_origins(_Req, Config) ->
- lists:map(fun({K,_V}) -> K end, get_origin_configs(Config)).
-
+ lists:map(fun({K, _V}) -> K end, get_origin_configs(Config)).
split_list(S) ->
re:split(S, "\\s*,\\s*", [trim, {return, list}]).
-
binary_split_list(S) ->
[list_to_binary(E) || E <- split_list(S)].
-
split_headers(H) ->
- re:split(H, ",\\s*", [{return,list}, trim]).
+ re:split(H, ",\\s*", [{return, list}, trim]).
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index 4a7b631f9..df609bb5e 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -18,17 +18,34 @@
-include_lib("couch_views/include/couch_views.hrl").
-include_lib("kernel/include/logger.hrl").
--export([handle_request/1, handle_compact_req/2, handle_design_req/2,
- db_req/2, couch_doc_open/4,handle_changes_req/2,
+-export([
+ handle_request/1,
+ handle_compact_req/2,
+ handle_design_req/2,
+ db_req/2,
+ couch_doc_open/4,
+ handle_changes_req/2,
update_doc_result_to_json/1, update_doc_result_to_json/2,
- handle_design_info_req/3, handle_view_cleanup_req/2,
- update_doc/4, http_code_from_status/1]).
-
--import(chttpd,
- [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
- start_json_response/2,send_chunk/2,end_json_response/1,
- start_chunked_response/3, absolute_uri/2, send/2,
- start_response_length/4]).
+ handle_design_info_req/3,
+ handle_view_cleanup_req/2,
+ update_doc/4,
+ http_code_from_status/1
+]).
+
+-import(
+ chttpd,
+ [
+ send_json/2, send_json/3, send_json/4,
+ send_method_not_allowed/2,
+ start_json_response/2,
+ send_chunk/2,
+ end_json_response/1,
+ start_chunked_response/3,
+ absolute_uri/2,
+ send/2,
+ start_response_length/4
+ ]
+).
-record(doc_query_args, {
options = [],
@@ -51,99 +68,110 @@
include_docs
}).
--define(IS_ALL_DOCS(T), (
- T == <<"_all_docs">>
- orelse T == <<"_local_docs">>
- orelse T == <<"_design_docs">>)).
+-define(IS_ALL_DOCS(T),
+ (T == <<"_all_docs">> orelse
+ T == <<"_local_docs">> orelse
+ T == <<"_design_docs">>)
+).
--define(IS_MANGO(T), (
- T == <<"_index">>
- orelse T == <<"_find">>
- orelse T == <<"_explain">>)).
+-define(IS_MANGO(T),
+ (T == <<"_index">> orelse
+ T == <<"_find">> orelse
+ T == <<"_explain">>)
+).
% Database request handlers
-handle_request(#httpd{path_parts=[DbName|RestParts],method=Method}=Req)->
+handle_request(#httpd{path_parts = [DbName | RestParts], method = Method} = Req) ->
case {Method, RestParts} of
- {'PUT', []} ->
- create_db_req(Req, DbName);
- {'DELETE', []} ->
- % if we get ?rev=... the user is using a faulty script where the
- % document id is empty by accident. Let them recover safely.
- case chttpd:qs_value(Req, "rev", false) of
- false -> delete_db_req(Req, DbName);
- _Rev -> throw({bad_request,
- "You tried to DELETE a database with a ?=rev parameter. "
- ++ "Did you mean to DELETE a document instead?"})
- end;
- {_, []} ->
- do_db_req(Req, fun db_req/2);
- {_, [SecondPart|_]} ->
- Handler = chttpd_handlers:db_handler(SecondPart, fun db_req/2),
- do_db_req(Req, Handler)
+ {'PUT', []} ->
+ create_db_req(Req, DbName);
+ {'DELETE', []} ->
+ % if we get ?rev=... the user is using a faulty script where the
+ % document id is empty by accident. Let them recover safely.
+ case chttpd:qs_value(Req, "rev", false) of
+ false ->
+ delete_db_req(Req, DbName);
+ _Rev ->
+ throw(
+ {bad_request,
+ "You tried to DELETE a database with a ?=rev parameter. " ++
+ "Did you mean to DELETE a document instead?"}
+ )
+ end;
+ {_, []} ->
+ do_db_req(Req, fun db_req/2);
+ {_, [SecondPart | _]} ->
+ Handler = chttpd_handlers:db_handler(SecondPart, fun db_req/2),
+ do_db_req(Req, Handler)
end.
-handle_changes_req(#httpd{method='POST'}=Req, Db) ->
+handle_changes_req(#httpd{method = 'POST'} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
fabric2_fdb:transactional(Db, fun(TxDb) ->
handle_changes_req_tx(Req, TxDb)
end);
-handle_changes_req(#httpd{method='GET'}=Req, Db) ->
+handle_changes_req(#httpd{method = 'GET'} = Req, Db) ->
fabric2_fdb:transactional(Db, fun(TxDb) ->
handle_changes_req_tx(Req, TxDb)
end);
-handle_changes_req(#httpd{path_parts=[_,<<"_changes">>]}=Req, _Db) ->
+handle_changes_req(#httpd{path_parts = [_, <<"_changes">>]} = Req, _Db) ->
send_method_not_allowed(Req, "GET,POST,HEAD").
-handle_changes_req_tx(#httpd{}=Req, Db) ->
+handle_changes_req_tx(#httpd{} = Req, Db) ->
ChangesArgs = parse_changes_query(Req),
ChangesFun = chttpd_changes:handle_db_changes(ChangesArgs, Req, Db),
Max = chttpd:chunked_response_buffer_size(),
case ChangesArgs#changes_args.feed of
- "normal" ->
- Acc0 = #cacc{
- feed = normal,
- mochi = Req,
- threshold = Max
- },
- ChangesFun({fun changes_callback/2, Acc0});
- Feed when Feed =:= "continuous"; Feed =:= "longpoll"; Feed =:= "eventsource" ->
- couch_stats:increment_counter([couchdb, httpd, clients_requesting_changes]),
- Acc0 = #cacc{
- feed = list_to_atom(Feed),
- mochi = Req,
- threshold = Max,
- include_docs = ChangesArgs#changes_args.include_docs
- },
- try
- ChangesFun({fun changes_callback/2, Acc0})
- after
- couch_stats:decrement_counter([couchdb, httpd, clients_requesting_changes])
- end;
- _ ->
- Msg = <<"Supported `feed` types: normal, continuous, live, longpoll, eventsource">>,
- throw({bad_request, Msg})
+ "normal" ->
+ Acc0 = #cacc{
+ feed = normal,
+ mochi = Req,
+ threshold = Max
+ },
+ ChangesFun({fun changes_callback/2, Acc0});
+ Feed when Feed =:= "continuous"; Feed =:= "longpoll"; Feed =:= "eventsource" ->
+ couch_stats:increment_counter([couchdb, httpd, clients_requesting_changes]),
+ Acc0 = #cacc{
+ feed = list_to_atom(Feed),
+ mochi = Req,
+ threshold = Max,
+ include_docs = ChangesArgs#changes_args.include_docs
+ },
+ try
+ ChangesFun({fun changes_callback/2, Acc0})
+ after
+ couch_stats:decrement_counter([couchdb, httpd, clients_requesting_changes])
+ end;
+ _ ->
+ Msg = <<"Supported `feed` types: normal, continuous, live, longpoll, eventsource">>,
+ throw({bad_request, Msg})
end.
% callbacks for continuous feed (newline-delimited JSON Objects)
changes_callback(start, #cacc{feed = continuous} = Acc) ->
{ok, Resp} = chttpd:start_delayed_json_response(Acc#cacc.mochi, 200),
{ok, Acc#cacc{mochi = Resp, responding = true}};
-changes_callback({change, Change}, #cacc{feed = continuous,
- include_docs = IncludeDocs} = Acc) ->
+changes_callback(
+ {change, Change},
+ #cacc{
+ feed = continuous,
+ include_docs = IncludeDocs
+ } = Acc
+) ->
incr_stats_changes_feed(IncludeDocs),
Data = [?JSON_ENCODE(Change) | "\n"],
Len = iolist_size(Data),
maybe_flush_changes_feed(Acc, Data, Len);
changes_callback({stop, EndSeq, Pending}, #cacc{feed = continuous} = Acc) ->
#cacc{mochi = Resp, buffer = Buf} = Acc,
- Row = {[
- {<<"last_seq">>, EndSeq},
- {<<"pending">>, Pending}
- ]},
+ Row =
+ {[
+ {<<"last_seq">>, EndSeq},
+ {<<"pending">>, Pending}
+ ]},
Data = [Buf, ?JSON_ENCODE(Row) | "\n"],
{ok, Resp1} = chttpd:send_delayed_chunk(Resp, Data),
chttpd:end_delayed_json_response(Resp1);
-
% callbacks for eventsource feed (newline-delimited eventsource Objects)
changes_callback(start, #cacc{feed = eventsource} = Acc) ->
#cacc{mochi = Req} = Acc,
@@ -153,13 +181,18 @@ changes_callback(start, #cacc{feed = eventsource} = Acc) ->
],
{ok, Resp} = chttpd:start_delayed_json_response(Req, 200, Headers),
{ok, Acc#cacc{mochi = Resp, responding = true}};
-changes_callback({change, {ChangeProp}=Change},
- #cacc{feed = eventsource, include_docs = IncludeDocs} = Acc) ->
+changes_callback(
+ {change, {ChangeProp} = Change},
+ #cacc{feed = eventsource, include_docs = IncludeDocs} = Acc
+) ->
incr_stats_changes_feed(IncludeDocs),
Seq = proplists:get_value(seq, ChangeProp),
Chunk = [
- "data: ", ?JSON_ENCODE(Change),
- "\n", "id: ", ?JSON_ENCODE(Seq),
+ "data: ",
+ ?JSON_ENCODE(Change),
+ "\n",
+ "id: ",
+ ?JSON_ENCODE(Seq),
"\n\n"
],
Len = iolist_size(Chunk),
@@ -173,7 +206,6 @@ changes_callback({stop, _EndSeq}, #cacc{feed = eventsource} = Acc) ->
#cacc{mochi = Resp, buffer = Buf} = Acc,
{ok, Resp1} = chttpd:send_delayed_chunk(Resp, Buf),
chttpd:end_delayed_json_response(Resp1);
-
% callbacks for longpoll and normal (single JSON Object)
changes_callback(start, #cacc{feed = normal} = Acc) ->
#cacc{mochi = Req} = Acc,
@@ -201,7 +233,6 @@ changes_callback({stop, EndSeq, Pending}, Acc) ->
],
{ok, Resp1} = chttpd:close_delayed_json_object(Resp, Buf, Terminator, Max),
chttpd:end_delayed_json_response(Resp1);
-
changes_callback(waiting_for_updates, #cacc{buffer = []} = Acc) ->
#cacc{mochi = Resp, chunks_sent = ChunksSent} = Acc,
case ChunksSent > 0 of
@@ -222,10 +253,11 @@ changes_callback(waiting_for_updates, Acc) ->
}};
changes_callback({timeout, ResponseType}, Acc) ->
#cacc{mochi = Resp, chunks_sent = ChunksSent} = Acc,
- Chunk = case ResponseType of
- "eventsource" -> "event: heartbeat\ndata: \n\n";
- _ -> "\n"
- end,
+ Chunk =
+ case ResponseType of
+ "eventsource" -> "event: heartbeat\ndata: \n\n";
+ _ -> "\n"
+ end,
{ok, Resp1} = chttpd:send_delayed_chunk(Resp, Chunk),
{ok, Acc#cacc{mochi = Resp1, chunks_sent = ChunksSent + 1}};
changes_callback({error, Reason}, #cacc{mochi = #httpd{}} = Acc) ->
@@ -237,11 +269,12 @@ changes_callback({error, Reason}, #cacc{feed = normal, responding = false} = Acc
changes_callback({error, Reason}, Acc) ->
chttpd:send_delayed_error(Acc#cacc.mochi, Reason).
-maybe_flush_changes_feed(#cacc{bufsize=Size, threshold=Max} = Acc, Data, Len)
- when Size > 0 andalso (Size + Len) > Max ->
+maybe_flush_changes_feed(#cacc{bufsize = Size, threshold = Max} = Acc, Data, Len) when
+ Size > 0 andalso (Size + Len) > Max
+->
#cacc{buffer = Buffer, mochi = Resp} = Acc,
{ok, R1} = chttpd:send_delayed_chunk(Resp, Buffer),
- {ok, Acc#cacc{prepend = ",\r\n", buffer = Data, bufsize=Len, mochi = R1}};
+ {ok, Acc#cacc{prepend = ",\r\n", buffer = Data, bufsize = Len, mochi = R1}};
maybe_flush_changes_feed(Acc0, Data, Len) ->
#cacc{buffer = Buf, bufsize = Size, chunks_sent = ChunksSent} = Acc0,
Acc = Acc0#cacc{
@@ -254,18 +287,18 @@ maybe_flush_changes_feed(Acc0, Data, Len) ->
incr_stats_changes_feed(IncludeDocs) ->
chttpd_stats:incr_rows(),
- if not IncludeDocs -> ok; true ->
- chttpd_stats:incr_reads()
+ if
+ not IncludeDocs -> ok;
+ true -> chttpd_stats:incr_reads()
end.
% Return the same response as if a compaction succeeded even though _compaction
% isn't a valid operation in CouchDB >= 4.x anymore. This is mostly to not
% break existing user script which maybe periodically call this endpoint. In
% the future this endpoint will return a 410 response then it will be removed.
-handle_compact_req(#httpd{method='POST'}=Req, _Db) ->
+handle_compact_req(#httpd{method = 'POST'} = Req, _Db) ->
chttpd:validate_ctype(Req, "application/json"),
send_json(Req, 202, {[{ok, true}]});
-
handle_compact_req(Req, _Db) ->
send_method_not_allowed(Req, "POST").
@@ -273,35 +306,40 @@ handle_view_cleanup_req(Req, Db) ->
ok = fabric2_index:cleanup(Db),
send_json(Req, 202, {[{ok, true}]}).
-handle_design_req(#httpd{
- path_parts=[_DbName, _Design, Name, <<"_",_/binary>> = Action | _Rest]
- }=Req, Db) ->
+handle_design_req(
+ #httpd{
+ path_parts = [_DbName, _Design, Name, <<"_", _/binary>> = Action | _Rest]
+ } = Req,
+ Db
+) ->
case fabric2_db:open_doc(Db, <<"_design/", Name/binary>>) of
- {ok, DDoc} ->
- Handler = chttpd_handlers:design_handler(Action, fun bad_action_req/3),
- Handler(Req, Db, DDoc);
- Error ->
- throw(Error)
+ {ok, DDoc} ->
+ Handler = chttpd_handlers:design_handler(Action, fun bad_action_req/3),
+ Handler(Req, Db, DDoc);
+ Error ->
+ throw(Error)
end;
-
handle_design_req(Req, Db) ->
db_req(Req, Db).
-bad_action_req(#httpd{path_parts=[_, _, Name|FileNameParts]}=Req, Db, _DDoc) ->
- db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts).
+bad_action_req(#httpd{path_parts = [_, _, Name | FileNameParts]} = Req, Db, _DDoc) ->
+ db_attachment_req(Req, Db, <<"_design/", Name/binary>>, FileNameParts).
-handle_design_info_req(#httpd{method='GET'}=Req, Db, #doc{} = DDoc) ->
+handle_design_info_req(#httpd{method = 'GET'} = Req, Db, #doc{} = DDoc) ->
[_, _, Name, _] = Req#httpd.path_parts,
{ok, GroupInfoList} = couch_views:get_info(Db, DDoc),
- send_json(Req, 200, {[
- {name, Name},
- {view_index, {GroupInfoList}}
- ]});
-
+ send_json(
+ Req,
+ 200,
+ {[
+ {name, Name},
+ {view_index, {GroupInfoList}}
+ ]}
+ );
handle_design_info_req(Req, _Db, _DDoc) ->
send_method_not_allowed(Req, "GET").
-create_db_req(#httpd{user_ctx=Ctx}=Req, DbName) ->
+create_db_req(#httpd{user_ctx = Ctx} = Req, DbName) ->
couch_httpd:verify_is_server_admin(Req),
DocUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
case fabric2_db:create(DbName, [{user_ctx, Ctx}]) of
@@ -313,7 +351,7 @@ create_db_req(#httpd{user_ctx=Ctx}=Req, DbName) ->
throw(Error)
end.
-delete_db_req(#httpd{user_ctx=Ctx}=Req, DbName) ->
+delete_db_req(#httpd{user_ctx = Ctx} = Req, DbName) ->
couch_httpd:verify_is_server_admin(Req),
case fabric2_db:delete(DbName, [{user_ctx, Ctx}]) of
ok ->
@@ -322,174 +360,201 @@ delete_db_req(#httpd{user_ctx=Ctx}=Req, DbName) ->
throw(Error)
end.
-do_db_req(#httpd{path_parts=[DbName|_], user_ctx=Ctx}=Req, Fun) ->
+do_db_req(#httpd{path_parts = [DbName | _], user_ctx = Ctx} = Req, Fun) ->
Options = [{user_ctx, Ctx}, {interactive, true}],
{ok, Db} = fabric2_db:open(DbName, Options),
Fun(Req, Db).
-db_req(#httpd{method='GET',path_parts=[_DbName]}=Req, Db) ->
+db_req(#httpd{method = 'GET', path_parts = [_DbName]} = Req, Db) ->
% measure the time required to generate the etag, see if it's worth it
T0 = os:timestamp(),
{ok, DbInfo} = fabric2_db:get_db_info(Db),
DeltaT = timer:now_diff(os:timestamp(), T0) / 1000,
couch_stats:update_histogram([couchdb, dbinfo], DeltaT),
send_json(Req, {DbInfo});
-
-db_req(#httpd{method='POST', path_parts=[DbName]}=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [DbName]} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
Doc0 = chttpd:json_body(Req),
Doc1 = couch_doc:from_json_obj_validate(Doc0, fabric2_db:name(Db)),
validate_attachment_names(Doc1),
- Doc2 = case Doc1#doc.id of
- <<"">> ->
- Doc1#doc{id=couch_uuids:new(), revs={0, []}};
- _ ->
- Doc1
- end,
+ Doc2 =
+ case Doc1#doc.id of
+ <<"">> ->
+ Doc1#doc{id = couch_uuids:new(), revs = {0, []}};
+ _ ->
+ Doc1
+ end,
Doc3 = read_att_data(Doc2),
DocId = Doc3#doc.id,
case chttpd:qs_value(Req, "batch") of
- "ok" ->
- % async_batching
- spawn(fun() ->
- case catch(fabric2_db:update_doc(Db, Doc3, [])) of
- {ok, _} ->
- chttpd_stats:incr_writes(),
- ok;
- {accepted, _} ->
- chttpd_stats:incr_writes(),
- ok;
- Error ->
- ?LOG_DEBUG(#{
- what => async_update_error,
- db => DbName,
- docid => DocId,
- details => Error
- }),
- couch_log:debug("Batch doc error (~s): ~p",[DocId, Error])
+ "ok" ->
+ % async_batching
+ spawn(fun() ->
+ case catch (fabric2_db:update_doc(Db, Doc3, [])) of
+ {ok, _} ->
+ chttpd_stats:incr_writes(),
+ ok;
+ {accepted, _} ->
+ chttpd_stats:incr_writes(),
+ ok;
+ Error ->
+ ?LOG_DEBUG(#{
+ what => async_update_error,
+ db => DbName,
+ docid => DocId,
+ details => Error
+ }),
+ couch_log:debug("Batch doc error (~s): ~p", [DocId, Error])
end
end),
- send_json(Req, 202, [], {[
- {ok, true},
- {id, DocId}
- ]});
- _Normal ->
- % normal
- DocUrl = absolute_uri(Req, [$/, couch_util:url_encode(DbName),
- $/, couch_util:url_encode(DocId)]),
- case fabric2_db:update_doc(Db, Doc3, []) of
- {ok, NewRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 201;
- {accepted, NewRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 202
- end,
- send_json(Req, HttpCode, [{"Location", DocUrl}], {[
- {ok, true},
- {id, DocId},
- {rev, couch_doc:rev_to_str(NewRev)}
- ]})
+ send_json(
+ Req,
+ 202,
+ [],
+ {[
+ {ok, true},
+ {id, DocId}
+ ]}
+ );
+ _Normal ->
+ % normal
+ DocUrl = absolute_uri(Req, [
+ $/,
+ couch_util:url_encode(DbName),
+ $/,
+ couch_util:url_encode(DocId)
+ ]),
+ case fabric2_db:update_doc(Db, Doc3, []) of
+ {ok, NewRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 201;
+ {accepted, NewRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 202
+ end,
+ send_json(
+ Req,
+ HttpCode,
+ [{"Location", DocUrl}],
+ {[
+ {ok, true},
+ {id, DocId},
+ {rev, couch_doc:rev_to_str(NewRev)}
+ ]}
+ )
end;
-
-db_req(#httpd{path_parts=[_DbName]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_DbName]} = Req, _Db) ->
send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
-
-db_req(#httpd{method='POST', path_parts=[_DbName, <<"_ensure_full_commit">>]
- }=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [_DbName, <<"_ensure_full_commit">>]} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
#{db_prefix := <<_/binary>>} = Db,
- send_json(Req, 201, {[
- {ok, true},
- {instance_start_time, <<"0">>}
- ]});
-
-db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
+ send_json(
+ Req,
+ 201,
+ {[
+ {ok, true},
+ {instance_start_time, <<"0">>}
+ ]}
+ );
+db_req(#httpd{path_parts = [_, <<"_ensure_full_commit">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [_, <<"_bulk_docs">>]} = Req, Db) ->
couch_stats:increment_counter([couchdb, httpd, bulk_requests]),
chttpd:validate_ctype(Req, "application/json"),
{JsonProps} = chttpd:json_body_obj(Req),
- DocsArray = case couch_util:get_value(<<"docs">>, JsonProps) of
- undefined ->
- throw({bad_request, <<"POST body must include `docs` parameter.">>});
- DocsArray0 when not is_list(DocsArray0) ->
- throw({bad_request, <<"`docs` parameter must be an array.">>});
- DocsArray0 ->
- DocsArray0
- end,
+ DocsArray =
+ case couch_util:get_value(<<"docs">>, JsonProps) of
+ undefined ->
+ throw({bad_request, <<"POST body must include `docs` parameter.">>});
+ DocsArray0 when not is_list(DocsArray0) ->
+ throw({bad_request, <<"`docs` parameter must be an array.">>});
+ DocsArray0 ->
+ DocsArray0
+ end,
MaxDocs = config:get_integer("couchdb", "max_bulk_docs_count", 10000),
case length(DocsArray) =< MaxDocs of
true -> ok;
false -> throw({request_entity_too_large, {bulk_docs, MaxDocs}})
end,
couch_stats:update_histogram([couchdb, httpd, bulk_docs], length(DocsArray)),
- Options = case chttpd:header_value(Req, "X-Couch-Full-Commit") of
- "true" ->
- [full_commit];
- "false" ->
- [delay_commit];
- _ ->
- []
- end,
+ Options =
+ case chttpd:header_value(Req, "X-Couch-Full-Commit") of
+ "true" ->
+ [full_commit];
+ "false" ->
+ [delay_commit];
+ _ ->
+ []
+ end,
DbName = fabric2_db:name(Db),
- Docs = lists:map(fun(JsonObj) ->
- Doc = couch_doc:from_json_obj_validate(JsonObj, DbName),
- validate_attachment_names(Doc),
- case Doc#doc.id of
- <<>> -> Doc#doc{id = couch_uuids:new()};
- _ -> Doc
- end
- end, DocsArray),
- case couch_util:get_value(<<"new_edits">>, JsonProps, true) of
- true ->
- Options2 =
- case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
- true -> [all_or_nothing|Options];
- _ -> Options
+ Docs = lists:map(
+ fun(JsonObj) ->
+ Doc = couch_doc:from_json_obj_validate(JsonObj, DbName),
+ validate_attachment_names(Doc),
+ case Doc#doc.id of
+ <<>> -> Doc#doc{id = couch_uuids:new()};
+ _ -> Doc
+ end
end,
- case fabric2_db:update_docs(Db, Docs, Options2) of
- {ok, Results} ->
- % output the results
- chttpd_stats:incr_writes(length(Results)),
- DocResults = lists:zipwith(fun update_doc_result_to_json/2,
- Docs, Results),
- send_json(Req, 201, DocResults);
- {accepted, Results} ->
- % output the results
- chttpd_stats:incr_writes(length(Results)),
- DocResults = lists:zipwith(fun update_doc_result_to_json/2,
- Docs, Results),
- send_json(Req, 202, DocResults);
- {aborted, Errors} ->
- ErrorsJson =
- lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 417, ErrorsJson)
- end;
- false ->
- case fabric2_db:update_docs(Db, Docs, [replicated_changes|Options]) of
- {ok, Errors} ->
- chttpd_stats:incr_writes(length(Docs)),
- ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 201, ErrorsJson);
- {accepted, Errors} ->
- chttpd_stats:incr_writes(length(Docs)),
- ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 202, ErrorsJson)
- end;
- _ ->
- throw({bad_request, <<"`new_edits` parameter must be a boolean.">>})
+ DocsArray
+ ),
+ case couch_util:get_value(<<"new_edits">>, JsonProps, true) of
+ true ->
+ Options2 =
+ case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
+ true -> [all_or_nothing | Options];
+ _ -> Options
+ end,
+ case fabric2_db:update_docs(Db, Docs, Options2) of
+ {ok, Results} ->
+ % output the results
+ chttpd_stats:incr_writes(length(Results)),
+ DocResults = lists:zipwith(
+ fun update_doc_result_to_json/2,
+ Docs,
+ Results
+ ),
+ send_json(Req, 201, DocResults);
+ {accepted, Results} ->
+ % output the results
+ chttpd_stats:incr_writes(length(Results)),
+ DocResults = lists:zipwith(
+ fun update_doc_result_to_json/2,
+ Docs,
+ Results
+ ),
+ send_json(Req, 202, DocResults);
+ {aborted, Errors} ->
+ ErrorsJson =
+ lists:map(fun update_doc_result_to_json/1, Errors),
+ send_json(Req, 417, ErrorsJson)
+ end;
+ false ->
+ case fabric2_db:update_docs(Db, Docs, [replicated_changes | Options]) of
+ {ok, Errors} ->
+ chttpd_stats:incr_writes(length(Docs)),
+ ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
+ send_json(Req, 201, ErrorsJson);
+ {accepted, Errors} ->
+ chttpd_stats:incr_writes(length(Docs)),
+ ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
+ send_json(Req, 202, ErrorsJson)
+ end;
+ _ ->
+ throw({bad_request, <<"`new_edits` parameter must be a boolean.">>})
end;
-
-db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_bulk_docs">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-
-db_req(#httpd{method='POST', path_parts=[_, <<"_bulk_get">>],
- mochi_req=MochiReq}=Req, Db) ->
+db_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [_, <<"_bulk_get">>],
+ mochi_req = MochiReq
+ } = Req,
+ Db
+) ->
couch_stats:increment_counter([couchdb, httpd, bulk_requests]),
couch_httpd:validate_ctype(Req, "application/json"),
{JsonProps} = chttpd:json_body_obj(Req),
@@ -505,7 +570,7 @@ db_req(#httpd{method='POST', path_parts=[_, <<"_bulk_get">>],
#doc_query_args{
options = Options
} = bulk_get_parse_doc_query(Req),
- AcceptJson = MochiReq:accepts_content_type("application/json"),
+ AcceptJson = MochiReq:accepts_content_type("application/json"),
AcceptMixedMp = MochiReq:accepts_content_type("multipart/mixed"),
AcceptRelatedMp = MochiReq:accepts_content_type("multipart/related"),
AcceptMp = not AcceptJson andalso (AcceptMixedMp orelse AcceptRelatedMp),
@@ -513,116 +578,145 @@ db_req(#httpd{method='POST', path_parts=[_, <<"_bulk_get">>],
false ->
{ok, Resp} = start_json_response(Req, 200),
send_chunk(Resp, <<"{\"results\": [">>),
- lists:foldl(fun(Doc, Sep) ->
- {DocId, Results, Options1} = bulk_get_open_doc_revs(Db, Doc,
- Options),
- bulk_get_send_docs_json(Resp, DocId, Results, Options1, Sep),
- <<",">>
- end, <<"">>, Docs),
+ lists:foldl(
+ fun(Doc, Sep) ->
+ {DocId, Results, Options1} = bulk_get_open_doc_revs(
+ Db,
+ Doc,
+ Options
+ ),
+ bulk_get_send_docs_json(Resp, DocId, Results, Options1, Sep),
+ <<",">>
+ end,
+ <<"">>,
+ Docs
+ ),
send_chunk(Resp, <<"]}">>),
end_json_response(Resp);
true ->
OuterBoundary = bulk_get_multipart_boundary(),
- MpType = case AcceptMixedMp of
- true ->
- "multipart/mixed";
- _ ->
- "multipart/related"
- end,
- CType = {"Content-Type", MpType ++ "; boundary=\"" ++
- ?b2l(OuterBoundary) ++ "\""},
+ MpType =
+ case AcceptMixedMp of
+ true ->
+ "multipart/mixed";
+ _ ->
+ "multipart/related"
+ end,
+ CType =
+ {
+ "Content-Type",
+ MpType ++
+ "; boundary=\"" ++
+ ?b2l(OuterBoundary) ++
+ "\""
+ },
{ok, Resp} = start_chunked_response(Req, 200, [CType]),
- lists:foldl(fun(Doc, _Pre) ->
- case bulk_get_open_doc_revs(Db, Doc, Options) of
- {_, {ok, []}, _Options1} ->
- ok;
- {_, {ok, Results}, Options1} ->
- send_docs_multipart_bulk_get(Results, Options1,
- OuterBoundary, Resp);
- {DocId, {error, {RevId, Error, Reason}}, _Options1} ->
- Json = ?JSON_ENCODE({[
- {<<"id">>, DocId},
- {<<"rev">>, RevId},
- {<<"error">>, Error},
- {<<"reason">>, Reason}
- ]}),
- couch_httpd:send_chunk(Resp,[
- <<"\r\n--", OuterBoundary/binary>>,
- <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
- Json
- ])
- end
- end, <<"">>, Docs),
+ lists:foldl(
+ fun(Doc, _Pre) ->
+ case bulk_get_open_doc_revs(Db, Doc, Options) of
+ {_, {ok, []}, _Options1} ->
+ ok;
+ {_, {ok, Results}, Options1} ->
+ send_docs_multipart_bulk_get(
+ Results,
+ Options1,
+ OuterBoundary,
+ Resp
+ );
+ {DocId, {error, {RevId, Error, Reason}}, _Options1} ->
+ Json = ?JSON_ENCODE(
+ {[
+ {<<"id">>, DocId},
+ {<<"rev">>, RevId},
+ {<<"error">>, Error},
+ {<<"reason">>, Reason}
+ ]}
+ ),
+ couch_httpd:send_chunk(Resp, [
+ <<"\r\n--", OuterBoundary/binary>>,
+ <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
+ Json
+ ])
+ end
+ end,
+ <<"">>,
+ Docs
+ ),
case Docs of
[] ->
ok;
_ ->
- couch_httpd:send_chunk(Resp, <<"\r\n", "--", OuterBoundary/binary, "--\r\n">>)
+ couch_httpd:send_chunk(
+ Resp, <<"\r\n", "--", OuterBoundary/binary, "--\r\n">>
+ )
end,
couch_httpd:last_chunk(Resp)
end
end;
-db_req(#httpd{path_parts=[_, <<"_bulk_get">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_bulk_get">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='GET',path_parts=[_,OP]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
+db_req(#httpd{method = 'GET', path_parts = [_, OP]} = Req, Db) when ?IS_ALL_DOCS(OP) ->
case chttpd:qs_json_value(Req, "keys", nil) of
- Keys when is_list(Keys) ->
- all_docs_view(Req, Db, Keys, OP);
- nil ->
- all_docs_view(Req, Db, undefined, OP);
- _ ->
- throw({bad_request, "`keys` parameter must be an array."})
+ Keys when is_list(Keys) ->
+ all_docs_view(Req, Db, Keys, OP);
+ nil ->
+ all_docs_view(Req, Db, undefined, OP);
+ _ ->
+ throw({bad_request, "`keys` parameter must be an array."})
end;
-
-db_req(#httpd{method='POST',
- path_parts=[_, OP, <<"queries">>]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
+db_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [_, OP, <<"queries">>]
+ } = Req,
+ Db
+) when ?IS_ALL_DOCS(OP) ->
Props = chttpd:json_body_obj(Req),
case couch_views_util:get_view_queries(Props) of
undefined ->
- throw({bad_request,
- <<"POST body must include `queries` parameter.">>});
+ throw({bad_request, <<"POST body must include `queries` parameter.">>});
Queries ->
multi_all_docs_view(Req, Db, OP, Queries)
end;
-
-db_req(#httpd{path_parts=[_, OP, <<"queries">>]}=Req,
- _Db) when ?IS_ALL_DOCS(OP) ->
+db_req(
+ #httpd{path_parts = [_, OP, <<"queries">>]} = Req,
+ _Db
+) when ?IS_ALL_DOCS(OP) ->
send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,OP]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
+db_req(#httpd{method = 'POST', path_parts = [_, OP]} = Req, Db) when ?IS_ALL_DOCS(OP) ->
chttpd:validate_ctype(Req, "application/json"),
{Fields} = chttpd:json_body_obj(Req),
case couch_util:get_value(<<"keys">>, Fields, nil) of
- Keys when is_list(Keys) ->
- all_docs_view(Req, Db, Keys, OP);
- nil ->
- all_docs_view(Req, Db, undefined, OP);
- _ ->
- throw({bad_request, "`keys` body member must be an array."})
+ Keys when is_list(Keys) ->
+ all_docs_view(Req, Db, Keys, OP);
+ nil ->
+ all_docs_view(Req, Db, undefined, OP);
+ _ ->
+ throw({bad_request, "`keys` body member must be an array."})
end;
-
-db_req(#httpd{path_parts=[_,OP]}=Req, _Db) when ?IS_ALL_DOCS(OP) ->
+db_req(#httpd{path_parts = [_, OP]} = Req, _Db) when ?IS_ALL_DOCS(OP) ->
send_method_not_allowed(Req, "GET,HEAD,POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [_, <<"_missing_revs">>]} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
{JsonDocIdRevs} = chttpd:json_body_obj(Req),
case fabric2_db:get_missing_revs(Db, JsonDocIdRevs) of
{error, Reason} ->
chttpd:send_error(Req, Reason);
{ok, Results} ->
- Results2 = [{Id, couch_doc:revs_to_strs(Revs)} ||
- {Id, Revs, _} <- Results],
- send_json(Req, {[
- {missing_revs, {Results2}}
- ]})
+ Results2 = [
+ {Id, couch_doc:revs_to_strs(Revs)}
+ || {Id, Revs, _} <- Results
+ ],
+ send_json(
+ Req,
+ {[
+ {missing_revs, {Results2}}
+ ]}
+ )
end;
-
-db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_missing_revs">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [_, <<"_revs_diff">>]} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
{JsonDocIdRevs} = chttpd:json_body_obj(Req),
case fabric2_db:get_missing_revs(Db, JsonDocIdRevs) of
@@ -630,23 +724,28 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
chttpd:send_error(Req, Reason);
{ok, Results} ->
Results2 =
- lists:map(fun({Id, MissingRevs, PossibleAncestors}) ->
- {Id,
- {[{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
- if PossibleAncestors == [] ->
- [];
- true ->
- [{possible_ancestors,
- couch_doc:revs_to_strs(PossibleAncestors)}]
- end}}
- end, Results),
+ lists:map(
+ fun({Id, MissingRevs, PossibleAncestors}) ->
+ {Id, {
+ [{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
+ if
+ PossibleAncestors == [] ->
+ [];
+ true ->
+ [
+ {possible_ancestors,
+ couch_doc:revs_to_strs(PossibleAncestors)}
+ ]
+ end
+ }}
+ end,
+ Results
+ ),
send_json(Req, {Results2})
end;
-
-db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_revs_diff">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method = 'PUT',path_parts = [_, <<"_security">>]} = Req, Db) ->
+db_req(#httpd{method = 'PUT', path_parts = [_, <<"_security">>]} = Req, Db) ->
validate_security_can_be_edited(fabric2_db:name(Db)),
SecObj = chttpd:json_body(Req),
case fabric2_db:set_security(Db, SecObj) of
@@ -655,56 +754,48 @@ db_req(#httpd{method = 'PUT',path_parts = [_, <<"_security">>]} = Req, Db) ->
Else ->
throw(Else)
end;
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_security">>]}=Req, Db) ->
+db_req(#httpd{method = 'GET', path_parts = [_, <<"_security">>]} = Req, Db) ->
send_json(Req, fabric2_db:get_security(Db));
-
-db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_security">>]} = Req, _Db) ->
send_method_not_allowed(Req, "PUT,GET");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
+db_req(#httpd{method = 'PUT', path_parts = [_, <<"_revs_limit">>]} = Req, Db) ->
Limit = chttpd:json_body(Req),
ok = fabric2_db:set_revs_limit(Db, Limit),
send_json(Req, {[{<<"ok">>, true}]});
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
+db_req(#httpd{method = 'GET', path_parts = [_, <<"_revs_limit">>]} = Req, Db) ->
send_json(Req, fabric2_db:get_revs_limit(Db));
-
-db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_revs_limit">>]} = Req, _Db) ->
send_method_not_allowed(Req, "PUT,GET");
-
% Special case to enable using an unencoded slash in the URL of design docs,
% as slashes in document IDs must otherwise be URL encoded.
-db_req(#httpd{method='GET', mochi_req=MochiReq, path_parts=[_DbName, <<"_design/", _/binary>> | _]}=Req, _Db) ->
+db_req(
+ #httpd{
+ method = 'GET',
+ mochi_req = MochiReq,
+ path_parts = [_DbName, <<"_design/", _/binary>> | _]
+ } = Req,
+ _Db
+) ->
[Head | Tail] = re:split(MochiReq:get(raw_path), "_design%2F", [{return, list}, caseless]),
chttpd:send_redirect(Req, Head ++ "_design/" ++ Tail);
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) ->
- db_doc_req(Req, Db, <<"_design/",Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) ->
- db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts);
-
-
+db_req(#httpd{path_parts = [_DbName, <<"_design">>, Name]} = Req, Db) ->
+ db_doc_req(Req, Db, <<"_design/", Name/binary>>);
+db_req(#httpd{path_parts = [_DbName, <<"_design">>, Name | FileNameParts]} = Req, Db) ->
+ db_attachment_req(Req, Db, <<"_design/", Name/binary>>, FileNameParts);
% Special case to allow for accessing local documents without %2F
% encoding the docid. Throws out requests that don't have the second
% path part or that specify an attachment name.
-db_req(#httpd{path_parts=[_DbName, <<"_local">>]}, _Db) ->
+db_req(#httpd{path_parts = [_DbName, <<"_local">>]}, _Db) ->
throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local/">>]}, _Db) ->
+db_req(#httpd{path_parts = [_DbName, <<"_local/">>]}, _Db) ->
throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">>, Name]}=Req, Db) ->
+db_req(#httpd{path_parts = [_DbName, <<"_local">>, Name]} = Req, Db) ->
db_doc_req(Req, Db, <<"_local/", Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">> | _Rest]}, _Db) ->
+db_req(#httpd{path_parts = [_DbName, <<"_local">> | _Rest]}, _Db) ->
throw({bad_request, <<"_local documents do not accept attachments.">>});
-
-db_req(#httpd{path_parts=[_, DocId]}=Req, Db) ->
+db_req(#httpd{path_parts = [_, DocId]} = Req, Db) ->
db_doc_req(Req, Db, DocId);
-
-db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
+db_req(#httpd{path_parts = [_, DocId | FileNameParts]} = Req, Db) ->
db_attachment_req(Req, Db, DocId, FileNameParts).
multi_all_docs_view(Req, Db, OP, Queries) ->
@@ -716,9 +807,8 @@ multi_all_docs_view(Req, Db, OP, Queries) ->
paginate_multi_all_docs_view(Req, Db, OP, Args, Queries)
end.
-
stream_multi_all_docs_view(Req, Db, OP, Args0, Queries) ->
- Args1 = Args0#mrargs{view_type=map},
+ Args1 = Args0#mrargs{view_type = map},
ArgQueries = chttpd_view:parse_queries(Req, Args1, Queries, fun(QArgs) ->
set_namespace(OP, QArgs)
end),
@@ -732,20 +822,23 @@ stream_multi_all_docs_view(Req, Db, OP, Args0, Queries) ->
threshold = Max,
prepend = "\r\n"
},
- VAcc1 = lists:foldl(fun
- (#mrargs{keys = undefined} = ArgsIn, Acc0) ->
- send_all_docs(Db, ArgsIn, Acc0);
- (#mrargs{keys = Keys} = ArgsIn, Acc0) when is_list(Keys) ->
- Acc1 = send_all_docs_keys(Db, ArgsIn, Acc0),
- {ok, Acc2} = view_cb(complete, Acc1),
- Acc2
- end, VAcc0, ArgQueries),
+ VAcc1 = lists:foldl(
+ fun
+ (#mrargs{keys = undefined} = ArgsIn, Acc0) ->
+ send_all_docs(Db, ArgsIn, Acc0);
+ (#mrargs{keys = Keys} = ArgsIn, Acc0) when is_list(Keys) ->
+ Acc1 = send_all_docs_keys(Db, ArgsIn, Acc0),
+ {ok, Acc2} = view_cb(complete, Acc1),
+ Acc2
+ end,
+ VAcc0,
+ ArgQueries
+ ),
{ok, Resp1} = chttpd:send_delayed_chunk(VAcc1#vacc.resp, "\r\n]}"),
chttpd:end_delayed_json_response(Resp1).
-
paginate_multi_all_docs_view(Req, Db, OP, Args0, Queries) ->
- Args1 = Args0#mrargs{view_type=map},
+ Args1 = Args0#mrargs{view_type = map},
ArgQueries = chttpd_view:parse_queries(Req, Args1, Queries, fun(QArgs) ->
set_namespace(OP, QArgs)
end),
@@ -757,13 +850,17 @@ paginate_multi_all_docs_view(Req, Db, OP, Args0, Queries) ->
UpdateSeq = fabric2_db:get_update_seq(Db),
EtagTerm = {Parts, UpdateSeq, Args0},
Response = couch_views_http:paginated(
- Req, EtagTerm, PageSize, ArgQueries, KeyFun,
+ Req,
+ EtagTerm,
+ PageSize,
+ ArgQueries,
+ KeyFun,
fun(Args) ->
all_docs_paginated_cb(Db, Args)
- end),
+ end
+ ),
chttpd:send_json(Req, Response).
-
all_docs_view(Req, Db, Keys, OP) ->
Args = couch_views_http:parse_body_and_query(Req, Keys),
case couch_views_util:is_paginated(Args) of
@@ -774,7 +871,7 @@ all_docs_view(Req, Db, Keys, OP) ->
end.
stream_all_docs_view(Req, Db, Args0, OP) ->
- Args1 = Args0#mrargs{view_type=map},
+ Args1 = Args0#mrargs{view_type = map},
Args2 = couch_views_util:validate_args(Args1),
Args3 = set_namespace(OP, Args2),
Max = chttpd:chunked_response_buffer_size(),
@@ -793,9 +890,8 @@ stream_all_docs_view(Req, Db, Args0, OP) ->
{ok, VAcc2#vacc.resp}
end.
-
paginate_all_docs_view(Req, Db, Args0, OP) ->
- Args1 = Args0#mrargs{view_type=map},
+ Args1 = Args0#mrargs{view_type = map},
Args2 = chttpd_view:validate_args(Req, Args1),
Args3 = set_namespace(OP, Args2),
KeyFun = fun({Props}) ->
@@ -805,167 +901,180 @@ paginate_all_docs_view(Req, Db, Args0, OP) ->
UpdateSeq = fabric2_db:get_update_seq(Db),
EtagTerm = {Parts, UpdateSeq, Args3},
Response = couch_views_http:paginated(
- Req, EtagTerm, Args3, KeyFun,
+ Req,
+ EtagTerm,
+ Args3,
+ KeyFun,
fun(Args) ->
all_docs_paginated_cb(Db, Args)
- end),
+ end
+ ),
chttpd:send_json(Req, Response).
-
all_docs_paginated_cb(Db, Args) ->
- #vacc{meta=MetaMap, buffer=Items} = case Args#mrargs.keys of
- undefined ->
- send_all_docs(Db, Args, #vacc{paginated=true});
- Keys when is_list(Keys) ->
- send_all_docs_keys(Db, Args, #vacc{paginated=true})
- end,
+ #vacc{meta = MetaMap, buffer = Items} =
+ case Args#mrargs.keys of
+ undefined ->
+ send_all_docs(Db, Args, #vacc{paginated = true});
+ Keys when is_list(Keys) ->
+ send_all_docs_keys(Db, Args, #vacc{paginated = true})
+ end,
{MetaMap, Items}.
-
send_all_docs(Db, #mrargs{keys = undefined} = Args, VAcc0) ->
Opts0 = fabric2_util:all_docs_view_opts(Args),
NS = couch_util:get_value(namespace, Opts0),
- FoldFun = case NS of
- <<"_all_docs">> -> fold_docs;
- <<"_design">> -> fold_design_docs;
- <<"_local">> -> fold_local_docs
- end,
- Opts = case couch_views_util:is_paginated(Args) of
- false ->
- Opts0 ++ [{restart_tx, true}];
- true ->
- Opts0
- end,
+ FoldFun =
+ case NS of
+ <<"_all_docs">> -> fold_docs;
+ <<"_design">> -> fold_design_docs;
+ <<"_local">> -> fold_local_docs
+ end,
+ Opts =
+ case couch_views_util:is_paginated(Args) of
+ false ->
+ Opts0 ++ [{restart_tx, true}];
+ true ->
+ Opts0
+ end,
ViewCb = fun view_cb/2,
Acc = {iter, Db, Args, VAcc0},
{ok, {iter, _, _, VAcc1}} = fabric2_db:FoldFun(Db, ViewCb, Acc, Opts),
VAcc1.
-
send_all_docs_keys(Db, #mrargs{} = Args, VAcc0) ->
Keys = apply_args_to_keylist(Args, Args#mrargs.keys),
NS = couch_util:get_value(namespace, Args#mrargs.extra),
TotalRows = fabric2_db:get_doc_count(Db, NS),
- Meta = case Args#mrargs.update_seq of
- true ->
- UpdateSeq = fabric2_db:get_update_seq(Db),
- [{update_seq, UpdateSeq}];
- false ->
- []
- end ++ [{total, TotalRows}, {offset, null}],
+ Meta =
+ case Args#mrargs.update_seq of
+ true ->
+ UpdateSeq = fabric2_db:get_update_seq(Db),
+ [{update_seq, UpdateSeq}];
+ false ->
+ []
+ end ++ [{total, TotalRows}, {offset, null}],
{ok, VAcc1} = view_cb({meta, Meta}, VAcc0),
- DocOpts = case Args#mrargs.conflicts of
- true -> [conflicts | Args#mrargs.doc_options];
- _ -> Args#mrargs.doc_options
- end,
+ DocOpts =
+ case Args#mrargs.conflicts of
+ true -> [conflicts | Args#mrargs.doc_options];
+ _ -> Args#mrargs.doc_options
+ end,
IncludeDocs = Args#mrargs.include_docs,
OpenOpts = [deleted | DocOpts],
CB = fun(DocId, Doc, Acc) ->
- Row0 = case Doc of
- {not_found, missing} ->
- #view_row{key = DocId};
- {ok, #doc{deleted = true, revs = Revs}} ->
- {RevPos, [RevId | _]} = Revs,
- Value = {[
- {rev, couch_doc:rev_to_str({RevPos, RevId})},
- {deleted, true}
- ]},
- DocValue = if not IncludeDocs -> undefined; true ->
- null
- end,
- #view_row{
- key = DocId,
- id = DocId,
- value = Value,
- doc = DocValue
- };
- {ok, #doc{revs = Revs} = Doc0} ->
- {RevPos, [RevId | _]} = Revs,
- Value = {[
- {rev, couch_doc:rev_to_str({RevPos, RevId})}
- ]},
- DocValue = if not IncludeDocs -> undefined; true ->
- couch_doc:to_json_obj(Doc0, DocOpts)
- end,
- #view_row{
- key = DocId,
- id = DocId,
- value = Value,
- doc = DocValue
- }
- end,
+ Row0 =
+ case Doc of
+ {not_found, missing} ->
+ #view_row{key = DocId};
+ {ok, #doc{deleted = true, revs = Revs}} ->
+ {RevPos, [RevId | _]} = Revs,
+ Value =
+ {[
+ {rev, couch_doc:rev_to_str({RevPos, RevId})},
+ {deleted, true}
+ ]},
+ DocValue =
+ if
+ not IncludeDocs -> undefined;
+ true -> null
+ end,
+ #view_row{
+ key = DocId,
+ id = DocId,
+ value = Value,
+ doc = DocValue
+ };
+ {ok, #doc{revs = Revs} = Doc0} ->
+ {RevPos, [RevId | _]} = Revs,
+ Value =
+ {[
+ {rev, couch_doc:rev_to_str({RevPos, RevId})}
+ ]},
+ DocValue =
+ if
+ not IncludeDocs -> undefined;
+ true -> couch_doc:to_json_obj(Doc0, DocOpts)
+ end,
+ #view_row{
+ key = DocId,
+ id = DocId,
+ value = Value,
+ doc = DocValue
+ }
+ end,
Row1 = couch_views_http:transform_row(Row0),
view_cb(Row1, Acc)
end,
{ok, VAcc2} = fabric2_db:fold_docs(Db, Keys, CB, VAcc1, OpenOpts),
VAcc2.
-
apply_args_to_keylist(Args, Keys0) ->
- Keys1 = case Args#mrargs.direction of
- fwd -> Keys0;
- _ -> lists:reverse(Keys0)
- end,
- Keys2 = case Args#mrargs.skip < length(Keys1) of
- true -> lists:nthtail(Args#mrargs.skip, Keys1);
- false -> []
- end,
+ Keys1 =
+ case Args#mrargs.direction of
+ fwd -> Keys0;
+ _ -> lists:reverse(Keys0)
+ end,
+ Keys2 =
+ case Args#mrargs.skip < length(Keys1) of
+ true -> lists:nthtail(Args#mrargs.skip, Keys1);
+ false -> []
+ end,
case Args#mrargs.limit < length(Keys2) of
true -> lists:sublist(Keys2, Args#mrargs.limit);
false -> Keys2
end.
-
view_cb({row, Row}, {iter, Db, Args, VAcc}) ->
- NewRow = case lists:keymember(doc, 1, Row) of
- true ->
- chttpd_stats:incr_reads(),
- Row;
- false when Args#mrargs.include_docs ->
- {id, DocId} = lists:keyfind(id, 1, Row),
- chttpd_stats:incr_reads(),
- DocOpts = case Args#mrargs.conflicts of
- true -> [conflicts | Args#mrargs.doc_options];
- _ -> Args#mrargs.doc_options
- end,
- OpenOpts = [deleted | DocOpts],
- DocMember = case fabric2_db:open_doc(Db, DocId, OpenOpts) of
- {not_found, missing} ->
- [];
- {ok, #doc{deleted = true}} ->
- [{doc, null}];
- {ok, #doc{} = Doc} ->
- [{doc, couch_doc:to_json_obj(Doc, DocOpts)}]
- end,
- Row ++ DocMember;
- _ ->
- Row
- end,
+ NewRow =
+ case lists:keymember(doc, 1, Row) of
+ true ->
+ chttpd_stats:incr_reads(),
+ Row;
+ false when Args#mrargs.include_docs ->
+ {id, DocId} = lists:keyfind(id, 1, Row),
+ chttpd_stats:incr_reads(),
+ DocOpts =
+ case Args#mrargs.conflicts of
+ true -> [conflicts | Args#mrargs.doc_options];
+ _ -> Args#mrargs.doc_options
+ end,
+ OpenOpts = [deleted | DocOpts],
+ DocMember =
+ case fabric2_db:open_doc(Db, DocId, OpenOpts) of
+ {not_found, missing} ->
+ [];
+ {ok, #doc{deleted = true}} ->
+ [{doc, null}];
+ {ok, #doc{} = Doc} ->
+ [{doc, couch_doc:to_json_obj(Doc, DocOpts)}]
+ end,
+ Row ++ DocMember;
+ _ ->
+ Row
+ end,
chttpd_stats:incr_rows(),
{Go, NewVAcc} = couch_views_http:view_cb({row, NewRow}, VAcc),
{Go, {iter, Db, Args, NewVAcc}};
-
view_cb(Msg, {iter, Db, Args, VAcc}) ->
{Go, NewVAcc} = couch_views_http:view_cb(Msg, VAcc),
{Go, {iter, Db, Args, NewVAcc}};
-
view_cb(Msg, Acc) ->
couch_views_http:view_cb(Msg, Acc).
-db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
+db_doc_req(#httpd{method = 'DELETE'} = Req, Db, DocId) ->
% check for the existence of the doc to handle the 404 case.
couch_doc_open(Db, DocId, nil, []),
case chttpd:qs_value(Req, "rev") of
- undefined ->
- Body = {[{<<"_deleted">>,true}]};
- Rev ->
- Body = {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]}
+ undefined ->
+ Body = {[{<<"_deleted">>, true}]};
+ Rev ->
+ Body = {[{<<"_rev">>, ?l2b(Rev)}, {<<"_deleted">>, true}]}
end,
Doc = couch_doc_from_req(Req, Db, DocId, Body),
send_updated_doc(Req, Db, DocId, Doc);
-
-db_doc_req(#httpd{method='GET', mochi_req=MochiReq}=Req, Db, DocId) ->
+db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
#doc_query_args{
rev = Rev,
open_revs = Revs,
@@ -973,306 +1082,382 @@ db_doc_req(#httpd{method='GET', mochi_req=MochiReq}=Req, Db, DocId) ->
atts_since = AttsSince
} = parse_doc_query(Req),
case Revs of
- [] ->
- Options2 =
- if AttsSince /= nil ->
- [{atts_since, AttsSince}, attachments | Options];
- true -> Options
- end,
- Doc = couch_doc_open(Db, DocId, Rev, Options2),
- send_doc(Req, Doc, Options2);
- _ ->
- case fabric2_db:open_doc_revs(Db, DocId, Revs, Options) of
- {ok, []} when Revs == all ->
- chttpd:send_error(Req, {not_found, missing});
- {ok, Results} ->
- chttpd_stats:incr_reads(length(Results)),
- case MochiReq:accepts_content_type("multipart/mixed") of
- false ->
- {ok, Resp} = start_json_response(Req, 200),
- send_chunk(Resp, "["),
- % We loop through the docs. The first time through the separator
- % is whitespace, then a comma on subsequent iterations.
- lists:foldl(
- fun(Result, AccSeparator) ->
- case Result of
- {ok, Doc} ->
- JsonDoc = couch_doc:to_json_obj(Doc, Options),
- Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
- send_chunk(Resp, AccSeparator ++ Json);
- {{not_found, missing}, RevId} ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
- send_chunk(Resp, AccSeparator ++ Json)
- end,
- "," % AccSeparator now has a comma
- end,
- "", Results),
- send_chunk(Resp, "]"),
- end_json_response(Resp);
- true ->
- send_docs_multipart(Req, Results, Options)
- end;
- {error, Error} ->
- chttpd:send_error(Req, Error)
- end
+ [] ->
+ Options2 =
+ if
+ AttsSince /= nil ->
+ [{atts_since, AttsSince}, attachments | Options];
+ true ->
+ Options
+ end,
+ Doc = couch_doc_open(Db, DocId, Rev, Options2),
+ send_doc(Req, Doc, Options2);
+ _ ->
+ case fabric2_db:open_doc_revs(Db, DocId, Revs, Options) of
+ {ok, []} when Revs == all ->
+ chttpd:send_error(Req, {not_found, missing});
+ {ok, Results} ->
+ chttpd_stats:incr_reads(length(Results)),
+ case MochiReq:accepts_content_type("multipart/mixed") of
+ false ->
+ {ok, Resp} = start_json_response(Req, 200),
+ send_chunk(Resp, "["),
+ % We loop through the docs. The first time through the separator
+ % is whitespace, then a comma on subsequent iterations.
+ lists:foldl(
+ fun(Result, AccSeparator) ->
+ case Result of
+ {ok, Doc} ->
+ JsonDoc = couch_doc:to_json_obj(Doc, Options),
+ Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
+ send_chunk(Resp, AccSeparator ++ Json);
+ {{not_found, missing}, RevId} ->
+ RevStr = couch_doc:rev_to_str(RevId),
+ Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
+ send_chunk(Resp, AccSeparator ++ Json)
+ end,
+ % AccSeparator now has a comma
+ ","
+ end,
+ "",
+ Results
+ ),
+ send_chunk(Resp, "]"),
+ end_json_response(Resp);
+ true ->
+ send_docs_multipart(Req, Results, Options)
+ end;
+ {error, Error} ->
+ chttpd:send_error(Req, Error)
+ end
end;
-
-db_doc_req(#httpd{method='POST'}=Req, Db, DocId) ->
+db_doc_req(#httpd{method = 'POST'} = Req, Db, DocId) ->
couch_httpd:validate_referer(Req),
fabric2_db:validate_docid(DocId),
chttpd:validate_ctype(Req, "multipart/form-data"),
Form = couch_httpd:parse_form(Req),
case proplists:is_defined("_doc", Form) of
- true ->
- Json = ?JSON_DECODE(couch_util:get_value("_doc", Form)),
- Doc = couch_doc_from_req(Req, Db, DocId, Json);
- false ->
- Rev = couch_doc:parse_rev(list_to_binary(couch_util:get_value("_rev", Form))),
- Doc = case fabric2_db:open_doc_revs(Db, DocId, [Rev], []) of
- {ok, [{ok, Doc0}]} ->
- chttpd_stats:incr_reads(),
- Doc0;
- {error, Error} ->
- throw(Error)
- end
+ true ->
+ Json = ?JSON_DECODE(couch_util:get_value("_doc", Form)),
+ Doc = couch_doc_from_req(Req, Db, DocId, Json);
+ false ->
+ Rev = couch_doc:parse_rev(list_to_binary(couch_util:get_value("_rev", Form))),
+ Doc =
+ case fabric2_db:open_doc_revs(Db, DocId, [Rev], []) of
+ {ok, [{ok, Doc0}]} ->
+ chttpd_stats:incr_reads(),
+ Doc0;
+ {error, Error} ->
+ throw(Error)
+ end
end,
UpdatedAtts = [
couch_att:new([
{name, validate_attachment_name(Name)},
{type, list_to_binary(ContentType)},
{data, Content}
- ]) ||
- {Name, {ContentType, _}, Content} <-
- proplists:get_all_values("_attachments", Form)
+ ])
+ || {Name, {ContentType, _}, Content} <-
+ proplists:get_all_values("_attachments", Form)
],
- #doc{atts=OldAtts} = Doc,
+ #doc{atts = OldAtts} = Doc,
OldAtts2 = lists:flatmap(
fun(Att) ->
OldName = couch_att:fetch(name, Att),
case [1 || A <- UpdatedAtts, couch_att:fetch(name, A) == OldName] of
- [] -> [Att]; % the attachment wasn't in the UpdatedAtts, return it
- _ -> [] % the attachment was in the UpdatedAtts, drop it
+ % the attachment wasn't in the UpdatedAtts, return it
+ [] -> [Att];
+ % the attachment was in the UpdatedAtts, drop it
+ _ -> []
end
- end, OldAtts),
+ end,
+ OldAtts
+ ),
NewDoc = Doc#doc{
atts = UpdatedAtts ++ OldAtts2
},
NewDoc1 = read_att_data(NewDoc),
case fabric2_db:update_doc(Db, NewDoc1, []) of
- {ok, NewRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 201;
- {accepted, NewRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 202
+ {ok, NewRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 201;
+ {accepted, NewRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 202
end,
- send_json(Req, HttpCode, [{"ETag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewRev)) ++ "\""}], {[
- {ok, true},
- {id, DocId},
- {rev, couch_doc:rev_to_str(NewRev)}
- ]});
-
-db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) ->
+ send_json(
+ Req,
+ HttpCode,
+ [{"ETag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewRev)) ++ "\""}],
+ {[
+ {ok, true},
+ {id, DocId},
+ {rev, couch_doc:rev_to_str(NewRev)}
+ ]}
+ );
+db_doc_req(#httpd{method = 'PUT'} = Req, Db, DocId) ->
#doc_query_args{
update_type = UpdateType
} = parse_doc_query(Req),
DbName = fabric2_db:name(Db),
fabric2_db:validate_docid(DocId),
- Loc = absolute_uri(Req, [$/, couch_util:url_encode(DbName),
- $/, couch_util:url_encode(DocId)]),
+ Loc = absolute_uri(Req, [
+ $/,
+ couch_util:url_encode(DbName),
+ $/,
+ couch_util:url_encode(DocId)
+ ]),
RespHeaders = [{"Location", Loc}],
case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
- ("multipart/related;" ++ _) = ContentType ->
- couch_httpd:check_max_request_length(Req),
- couch_httpd_multipart:num_mp_writers(1),
- {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(ContentType,
- fun() -> receive_request_data(Req) end),
- Doc = couch_doc_from_req(Req, Db, DocId, Doc0),
- try
- Result = send_updated_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType),
- WaitFun(),
- Result
- catch throw:Err ->
- % Document rejected by a validate_doc_update function.
- couch_httpd_multipart:abort_multipart_stream(Parser),
- throw(Err)
- end;
- _Else ->
- case chttpd:qs_value(Req, "batch") of
- "ok" ->
- % batch
- Doc0 = couch_doc_from_req(Req, Db, DocId, chttpd:json_body(Req)),
- Doc = read_att_data(Doc0),
- spawn(fun() ->
- case catch(fabric2_db:update_doc(Db, Doc, [])) of
- {ok, _} ->
- chttpd_stats:incr_writes(),
- ok;
- {accepted, _} ->
- chttpd_stats:incr_writes(),
- ok;
- Error ->
- ?LOG_NOTICE(#{
- what => async_update_error,
- db => DbName,
- docid => DocId,
- details => Error
- }),
- couch_log:notice("Batch doc error (~s): ~p",[DocId, Error])
- end
- end),
- send_json(Req, 202, [], {[
- {ok, true},
- {id, DocId}
- ]});
- _Normal ->
- % normal
- Body = chttpd:json_body(Req),
- Doc = couch_doc_from_req(Req, Db, DocId, Body),
- send_updated_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType)
- end
+ ("multipart/related;" ++ _) = ContentType ->
+ couch_httpd:check_max_request_length(Req),
+ couch_httpd_multipart:num_mp_writers(1),
+ {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(
+ ContentType,
+ fun() -> receive_request_data(Req) end
+ ),
+ Doc = couch_doc_from_req(Req, Db, DocId, Doc0),
+ try
+ Result = send_updated_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType),
+ WaitFun(),
+ Result
+ catch
+ throw:Err ->
+ % Document rejected by a validate_doc_update function.
+ couch_httpd_multipart:abort_multipart_stream(Parser),
+ throw(Err)
+ end;
+ _Else ->
+ case chttpd:qs_value(Req, "batch") of
+ "ok" ->
+ % batch
+ Doc0 = couch_doc_from_req(Req, Db, DocId, chttpd:json_body(Req)),
+ Doc = read_att_data(Doc0),
+ spawn(fun() ->
+ case catch (fabric2_db:update_doc(Db, Doc, [])) of
+ {ok, _} ->
+ chttpd_stats:incr_writes(),
+ ok;
+ {accepted, _} ->
+ chttpd_stats:incr_writes(),
+ ok;
+ Error ->
+ ?LOG_NOTICE(#{
+ what => async_update_error,
+ db => DbName,
+ docid => DocId,
+ details => Error
+ }),
+ couch_log:notice("Batch doc error (~s): ~p", [DocId, Error])
+ end
+ end),
+ send_json(
+ Req,
+ 202,
+ [],
+ {[
+ {ok, true},
+ {id, DocId}
+ ]}
+ );
+ _Normal ->
+ % normal
+ Body = chttpd:json_body(Req),
+ Doc = couch_doc_from_req(Req, Db, DocId, Body),
+ send_updated_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType)
+ end
end;
-
-db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) ->
+db_doc_req(#httpd{method = 'COPY'} = Req, Db, SourceDocId) ->
SourceRev =
- case extract_header_rev(Req, chttpd:qs_value(Req, "rev")) of
- missing_rev -> nil;
- Rev -> Rev
- end,
+ case extract_header_rev(Req, chttpd:qs_value(Req, "rev")) of
+ missing_rev -> nil;
+ Rev -> Rev
+ end,
{TargetDocId0, TargetRevs} = chttpd_util:parse_copy_destination_header(Req),
TargetDocId = list_to_binary(mochiweb_util:unquote(TargetDocId0)),
% open old doc
Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
% save new doc
- case fabric2_db:update_doc(Db,
- Doc#doc{id=TargetDocId, revs=TargetRevs}, []) of
- {ok, NewTargetRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 201;
- {accepted, NewTargetRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 202
+ case
+ fabric2_db:update_doc(
+ Db,
+ Doc#doc{id = TargetDocId, revs = TargetRevs},
+ []
+ )
+ of
+ {ok, NewTargetRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 201;
+ {accepted, NewTargetRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 202
end,
% respond
DbName = fabric2_db:name(Db),
{PartRes} = update_doc_result_to_json(TargetDocId, {ok, NewTargetRev}),
- Loc = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(TargetDocId)),
- send_json(Req, HttpCode,
- [{"Location", Loc},
- {"ETag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewTargetRev)) ++ "\""}],
- {PartRes});
-
+ Loc = absolute_uri(
+ Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(TargetDocId)
+ ),
+ send_json(
+ Req,
+ HttpCode,
+ [
+ {"Location", Loc},
+ {"ETag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewTargetRev)) ++ "\""}
+ ],
+ {PartRes}
+ );
db_doc_req(Req, _Db, _DocId) ->
send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY").
send_doc(Req, Doc, Options) ->
case Doc#doc.meta of
- [] ->
- DiskEtag = couch_httpd:doc_etag(Doc),
- % output etag only when we have no meta
- chttpd:etag_respond(Req, DiskEtag, fun() ->
- send_doc_efficiently(Req, Doc, [{"ETag", DiskEtag}], Options)
- end);
- _ ->
- send_doc_efficiently(Req, Doc, [], Options)
+ [] ->
+ DiskEtag = couch_httpd:doc_etag(Doc),
+ % output etag only when we have no meta
+ chttpd:etag_respond(Req, DiskEtag, fun() ->
+ send_doc_efficiently(Req, Doc, [{"ETag", DiskEtag}], Options)
+ end);
+ _ ->
+ send_doc_efficiently(Req, Doc, [], Options)
end.
-send_doc_efficiently(Req, #doc{atts=[]}=Doc, Headers, Options) ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
-send_doc_efficiently(#httpd{mochi_req=MochiReq}=Req, #doc{atts=Atts}=Doc, Headers, Options) ->
+send_doc_efficiently(Req, #doc{atts = []} = Doc, Headers, Options) ->
+ send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
+send_doc_efficiently(#httpd{mochi_req = MochiReq} = Req, #doc{atts = Atts} = Doc, Headers, Options) ->
case lists:member(attachments, Options) of
- true ->
- Refs = monitor_attachments(Atts),
- try
- case MochiReq:accepts_content_type("multipart/related") of
- false ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
true ->
- Boundary = couch_uuids:random(),
- JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc,
- [attachments, follows, att_encoding_info | Options])),
- {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
- Boundary,JsonBytes, Atts, true),
- CType = {"Content-Type", ContentType},
- {ok, Resp} = start_response_length(Req, 200, [CType|Headers], Len),
- couch_doc:doc_to_multi_part_stream(Boundary,JsonBytes,Atts,
- fun(Data) -> couch_httpd:send(Resp, Data) end, true)
- end
- after
- demonitor_refs(Refs)
- end;
- false ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
+ Refs = monitor_attachments(Atts),
+ try
+ case MochiReq:accepts_content_type("multipart/related") of
+ false ->
+ send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
+ true ->
+ Boundary = couch_uuids:random(),
+ JsonBytes = ?JSON_ENCODE(
+ couch_doc:to_json_obj(
+ Doc,
+ [attachments, follows, att_encoding_info | Options]
+ )
+ ),
+ {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
+ Boundary, JsonBytes, Atts, true
+ ),
+ CType = {"Content-Type", ContentType},
+ {ok, Resp} = start_response_length(Req, 200, [CType | Headers], Len),
+ couch_doc:doc_to_multi_part_stream(
+ Boundary,
+ JsonBytes,
+ Atts,
+ fun(Data) -> couch_httpd:send(Resp, Data) end,
+ true
+ )
+ end
+ after
+ demonitor_refs(Refs)
+ end;
+ false ->
+ send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
end.
send_docs_multipart_bulk_get(Results, Options0, OuterBoundary, Resp) ->
InnerBoundary = bulk_get_multipart_boundary(),
Options = [attachments, follows, att_encoding_info | Options0],
lists:foreach(
- fun({ok, #doc{id=Id, revs=Revs, atts=Atts}=Doc}) ->
- Refs = monitor_attachments(Doc#doc.atts),
- try
- JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
- couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>),
- case Atts of
- [] ->
- couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: application/json\r\n\r\n">>);
- _ ->
- lists:foreach(fun(Header) -> couch_httpd:send_chunk(Resp, Header) end,
- bulk_get_multipart_headers(Revs, Id, InnerBoundary))
- end,
- couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
- fun(Data) -> couch_httpd:send_chunk(Resp, Data)
- end, true)
- after
- demonitor_refs(Refs)
- end;
- ({{not_found, missing}, RevId}) ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{<<"rev">>, RevStr},
- {<<"error">>, <<"not_found">>},
- {<<"reason">>, <<"missing">>}]}),
- couch_httpd:send_chunk(Resp,
- [<<"\r\n--", OuterBoundary/binary>>,
- <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
- Json])
- end, Results).
+ fun
+ ({ok, #doc{id = Id, revs = Revs, atts = Atts} = Doc}) ->
+ Refs = monitor_attachments(Doc#doc.atts),
+ try
+ JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
+ couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>),
+ case Atts of
+ [] ->
+ couch_httpd:send_chunk(
+ Resp, <<"\r\nContent-Type: application/json\r\n\r\n">>
+ );
+ _ ->
+ lists:foreach(
+ fun(Header) -> couch_httpd:send_chunk(Resp, Header) end,
+ bulk_get_multipart_headers(Revs, Id, InnerBoundary)
+ )
+ end,
+ couch_doc:doc_to_multi_part_stream(
+ InnerBoundary,
+ JsonBytes,
+ Atts,
+ fun(Data) -> couch_httpd:send_chunk(Resp, Data) end,
+ true
+ )
+ after
+ demonitor_refs(Refs)
+ end;
+ ({{not_found, missing}, RevId}) ->
+ RevStr = couch_doc:rev_to_str(RevId),
+ Json = ?JSON_ENCODE(
+ {[
+ {<<"rev">>, RevStr},
+ {<<"error">>, <<"not_found">>},
+ {<<"reason">>, <<"missing">>}
+ ]}
+ ),
+ couch_httpd:send_chunk(
+ Resp,
+ [
+ <<"\r\n--", OuterBoundary/binary>>,
+ <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
+ Json
+ ]
+ )
+ end,
+ Results
+ ).
send_docs_multipart(Req, Results, Options1) ->
OuterBoundary = couch_uuids:random(),
InnerBoundary = couch_uuids:random(),
Options = [attachments, follows, att_encoding_info | Options1],
- CType = {"Content-Type",
- "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
+ CType = {"Content-Type", "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
{ok, Resp} = start_chunked_response(Req, 200, [CType]),
chttpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
lists:foreach(
- fun({ok, #doc{atts=Atts}=Doc}) ->
- Refs = monitor_attachments(Doc#doc.atts),
- try
- JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
- {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
- InnerBoundary, JsonBytes, Atts, true),
- chttpd:send_chunk(Resp, <<"\r\nContent-Type: ",
- ContentType/binary, "\r\n\r\n">>),
- couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
- fun(Data) -> chttpd:send_chunk(Resp, Data)
- end, true),
- chttpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>)
- after
- demonitor_refs(Refs)
- end;
- ({{not_found, missing}, RevId}) ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
- chttpd:send_chunk(Resp,
- [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
- Json,
- <<"\r\n--", OuterBoundary/binary>>])
- end, Results),
+ fun
+ ({ok, #doc{atts = Atts} = Doc}) ->
+ Refs = monitor_attachments(Doc#doc.atts),
+ try
+ JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
+ {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
+ InnerBoundary, JsonBytes, Atts, true
+ ),
+ chttpd:send_chunk(
+ Resp, <<"\r\nContent-Type: ", ContentType/binary, "\r\n\r\n">>
+ ),
+ couch_doc:doc_to_multi_part_stream(
+ InnerBoundary,
+ JsonBytes,
+ Atts,
+ fun(Data) -> chttpd:send_chunk(Resp, Data) end,
+ true
+ ),
+ chttpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>)
+ after
+ demonitor_refs(Refs)
+ end;
+ ({{not_found, missing}, RevId}) ->
+ RevStr = couch_doc:rev_to_str(RevId),
+ Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
+ chttpd:send_chunk(
+ Resp,
+ [
+ <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
+ Json,
+ <<"\r\n--", OuterBoundary/binary>>
+ ]
+ )
+ end,
+ Results
+ ),
chttpd:send_chunk(Resp, <<"--">>),
chttpd:last_chunk(Resp).
@@ -1281,7 +1466,7 @@ bulk_get_multipart_headers({0, []}, Id, Boundary) ->
<<"\r\nX-Doc-Id: ", Id/binary>>,
<<"\r\nContent-Type: multipart/related; boundary=", Boundary/binary, "\r\n\r\n">>
];
-bulk_get_multipart_headers({Start, [FirstRevId|_]}, Id, Boundary) ->
+bulk_get_multipart_headers({Start, [FirstRevId | _]}, Id, Boundary) ->
RevStr = couch_doc:rev_to_str({Start, FirstRevId}),
[
<<"\r\nX-Doc-Id: ", Id/binary>>,
@@ -1302,9 +1487,17 @@ receive_request_data(Req, Len) when Len == chunked ->
self() ! {chunk, Ref, Binary}
end,
couch_httpd:recv_chunked(Req, 4096, ChunkFun, ok),
- GetChunk = fun GC() -> receive {chunk, Ref, Binary} -> {Binary, GC} end end,
- {receive {chunk, Ref, Binary} -> Binary end, GetChunk};
-
+ GetChunk = fun GC() ->
+ receive
+ {chunk, Ref, Binary} -> {Binary, GC}
+ end
+ end,
+ {
+ receive
+ {chunk, Ref, Binary} -> Binary
+ end,
+ GetChunk
+ };
receive_request_data(Req, LenLeft) when LenLeft > 0 ->
Len = erlang:min(4096, LenLeft),
Data = chttpd:recv(Req, Len),
@@ -1313,11 +1506,15 @@ receive_request_data(_Req, _) ->
throw(<<"expected more data">>).
update_doc_result_to_json({{Id, Rev}, Error}) ->
- {_Code, Err, Msg} = chttpd:error_info(Error),
- {[{id, Id}, {rev, couch_doc:rev_to_str(Rev)},
- {error, Err}, {reason, Msg}]}.
-
-update_doc_result_to_json(#doc{id=DocId}, Result) ->
+ {_Code, Err, Msg} = chttpd:error_info(Error),
+ {[
+ {id, Id},
+ {rev, couch_doc:rev_to_str(Rev)},
+ {error, Err},
+ {reason, Msg}
+ ]}.
+
+update_doc_result_to_json(#doc{id = DocId}, Result) ->
update_doc_result_to_json(DocId, Result);
update_doc_result_to_json(DocId, {ok, NewRev}) ->
{[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
@@ -1335,19 +1532,29 @@ send_updated_doc(Req, Db, DocId, Json) ->
send_updated_doc(Req, Db, DocId, Doc, Headers) ->
send_updated_doc(Req, Db, DocId, Doc, Headers, interactive_edit).
-send_updated_doc(#httpd{} = Req, Db, DocId, #doc{deleted=Deleted}=Doc,
- Headers, UpdateType) ->
+send_updated_doc(
+ #httpd{} = Req,
+ Db,
+ DocId,
+ #doc{deleted = Deleted} = Doc,
+ Headers,
+ UpdateType
+) ->
Options =
case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
- "true" ->
- [full_commit, UpdateType];
- "false" ->
- [delay_commit, UpdateType];
- _ ->
- [UpdateType]
+ "true" ->
+ [full_commit, UpdateType];
+ "false" ->
+ [delay_commit, UpdateType];
+ _ ->
+ [UpdateType]
end,
- {Status, {etag, Etag}, Body} = update_doc(Db, DocId,
- #doc{deleted=Deleted}=Doc, Options),
+ {Status, {etag, Etag}, Body} = update_doc(
+ Db,
+ DocId,
+ #doc{deleted = Deleted} = Doc,
+ Options
+ ),
HttpCode = http_code_from_status(Status),
ResponseHeaders = [{"ETag", Etag} | Headers],
send_json(Req, HttpCode, ResponseHeaders, Body).
@@ -1362,341 +1569,422 @@ http_code_from_status(Status) ->
200
end.
-update_doc(Db, DocId, #doc{deleted=Deleted, body=DocBody}=Doc0, Options) ->
+update_doc(Db, DocId, #doc{deleted = Deleted, body = DocBody} = Doc0, Options) ->
Doc = read_att_data(Doc0),
case fabric2_db:update_doc(Db, Doc, Options) of
- {ok, NewRev} ->
- Accepted = false;
- {accepted, NewRev} ->
- Accepted = true
+ {ok, NewRev} ->
+ Accepted = false;
+ {accepted, NewRev} ->
+ Accepted = true
end,
Etag = couch_httpd:doc_etag(DocId, DocBody, NewRev),
- Status = case {Accepted, Deleted} of
- {true, _} ->
- accepted;
- {false, true} ->
- ok;
- {false, false} ->
- created
- end,
+ Status =
+ case {Accepted, Deleted} of
+ {true, _} ->
+ accepted;
+ {false, true} ->
+ ok;
+ {false, false} ->
+ created
+ end,
NewRevStr = couch_doc:rev_to_str(NewRev),
Body = {[{ok, true}, {id, DocId}, {rev, NewRevStr}]},
{Status, {etag, Etag}, Body}.
-couch_doc_from_req(Req, _Db, DocId, #doc{revs=Revs} = Doc) ->
+couch_doc_from_req(Req, _Db, DocId, #doc{revs = Revs} = Doc) ->
validate_attachment_names(Doc),
- Rev = case chttpd:qs_value(Req, "rev") of
- undefined ->
- undefined;
- QSRev ->
- couch_doc:parse_rev(QSRev)
- end,
+ Rev =
+ case chttpd:qs_value(Req, "rev") of
+ undefined ->
+ undefined;
+ QSRev ->
+ couch_doc:parse_rev(QSRev)
+ end,
Revs2 =
- case Revs of
- {Start, [RevId|_]} ->
- if Rev /= undefined andalso Rev /= {Start, RevId} ->
- throw({bad_request, "Document rev from request body and query "
- "string have different values"});
- true ->
- case extract_header_rev(Req, {Start, RevId}) of
- missing_rev -> {0, []};
- _ -> Revs
- end
- end;
- _ ->
- case extract_header_rev(Req, Rev) of
- missing_rev -> {0, []};
- {Pos, RevId2} -> {Pos, [RevId2]}
- end
- end,
- Doc#doc{id=DocId, revs=Revs2};
+ case Revs of
+ {Start, [RevId | _]} ->
+ if
+ Rev /= undefined andalso Rev /= {Start, RevId} ->
+ throw(
+ {bad_request,
+ "Document rev from request body and query "
+ "string have different values"}
+ );
+ true ->
+ case extract_header_rev(Req, {Start, RevId}) of
+ missing_rev -> {0, []};
+ _ -> Revs
+ end
+ end;
+ _ ->
+ case extract_header_rev(Req, Rev) of
+ missing_rev -> {0, []};
+ {Pos, RevId2} -> {Pos, [RevId2]}
+ end
+ end,
+ Doc#doc{id = DocId, revs = Revs2};
couch_doc_from_req(Req, Db, DocId, Json) ->
Doc = couch_doc:from_json_obj_validate(Json, fabric2_db:name(Db)),
couch_doc_from_req(Req, Db, DocId, Doc).
-
% Useful for debugging
% couch_doc_open(Db, DocId) ->
% couch_doc_open(Db, DocId, nil, []).
couch_doc_open(Db, DocId, Rev, Options) ->
case Rev of
- nil -> % open most recent rev
- case fabric2_db:open_doc(Db, DocId, Options) of
- {ok, Doc} ->
- chttpd_stats:incr_reads(),
- Doc;
- Error ->
- throw(Error)
- end;
- _ -> % open a specific rev (deletions come back as stubs)
- case fabric2_db:open_doc_revs(Db, DocId, [Rev], Options) of
- {ok, [{ok, Doc}]} ->
- chttpd_stats:incr_reads(),
- Doc;
- {ok, [{{not_found, missing}, Rev}]} ->
- throw(not_found);
- {ok, [Else]} ->
- throw(Else);
- {error, Error} ->
- throw(Error)
- end
- end.
+ % open most recent rev
+ nil ->
+ case fabric2_db:open_doc(Db, DocId, Options) of
+ {ok, Doc} ->
+ chttpd_stats:incr_reads(),
+ Doc;
+ Error ->
+ throw(Error)
+ end;
+ % open a specific rev (deletions come back as stubs)
+ _ ->
+ case fabric2_db:open_doc_revs(Db, DocId, [Rev], Options) of
+ {ok, [{ok, Doc}]} ->
+ chttpd_stats:incr_reads(),
+ Doc;
+ {ok, [{{not_found, missing}, Rev}]} ->
+ throw(not_found);
+ {ok, [Else]} ->
+ throw(Else);
+ {error, Error} ->
+ throw(Error)
+ end
+ end.
% Attachment request handlers
-db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNameParts) ->
- FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1,
- FileNameParts),"/")),
+db_attachment_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId, FileNameParts) ->
+ FileName = list_to_binary(
+ mochiweb_util:join(
+ lists:map(
+ fun binary_to_list/1,
+ FileNameParts
+ ),
+ "/"
+ )
+ ),
#doc_query_args{
- rev=Rev,
- options=Options
+ rev = Rev,
+ options = Options
} = parse_doc_query(Req),
#doc{
- atts=Atts
+ atts = Atts
} = Doc = couch_doc_open(Db, DocId, Rev, Options),
case [A || A <- Atts, couch_att:fetch(name, A) == FileName] of
- [] ->
- throw({not_found, "Document is missing attachment"});
- [Att] ->
- [Type, Enc, DiskLen, AttLen, Md5] = couch_att:fetch([type, encoding, disk_len, att_len, md5], Att),
- Refs = monitor_attachments(Att),
- try
- Etag = case Md5 of
- <<>> -> chttpd:doc_etag(Doc);
- _ -> "\"" ++ ?b2l(base64:encode(Md5)) ++ "\""
- end,
- ReqAcceptsAttEnc = lists:member(
- atom_to_list(Enc),
- couch_httpd:accepted_encodings(Req)
- ),
- Headers = [
- {"ETag", Etag},
- {"Cache-Control", "must-revalidate"},
- {"Content-Type", binary_to_list(Type)}
- ] ++ case ReqAcceptsAttEnc of
- true when Enc =/= identity ->
- % RFC 2616 says that the 'identify' encoding should not be used in
- % the Content-Encoding header
- [{"Content-Encoding", atom_to_list(Enc)}];
- _ ->
- []
- end ++ case Enc of
- identity ->
- [{"Accept-Ranges", "bytes"}];
- _ ->
- [{"Accept-Ranges", "none"}]
- end,
- Len = case {Enc, ReqAcceptsAttEnc} of
- {identity, _} ->
- % stored and served in identity form
- DiskLen;
- {_, false} when DiskLen =/= AttLen ->
- % Stored encoded, but client doesn't accept the encoding we used,
- % so we need to decode on the fly. DiskLen is the identity length
- % of the attachment.
- DiskLen;
- {_, true} ->
- % Stored and served encoded. AttLen is the encoded length.
- AttLen;
- _ ->
- % We received an encoded attachment and stored it as such, so we
- % don't know the identity length. The client doesn't accept the
- % encoding, and since we cannot serve a correct Content-Length
- % header we'll fall back to a chunked response.
- undefined
- end,
- AttFun = case ReqAcceptsAttEnc of
- false ->
- fun couch_att:foldl_decode/3;
- true ->
- fun couch_att:foldl/3
- end,
- chttpd:etag_respond(
- Req,
- Etag,
- fun() ->
- case Len of
- undefined ->
- {ok, Resp} = start_chunked_response(Req, 200, Headers),
- AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
- couch_httpd:last_chunk(Resp);
- _ ->
- Ranges = parse_ranges(MochiReq:get(range), Len),
- case {Enc, Ranges} of
- {identity, [{From, To}]} ->
- Headers1 = [{"Content-Range", make_content_range(From, To, Len)}]
- ++ Headers,
- {ok, Resp} = start_response_length(Req, 206, Headers1, To - From + 1),
- couch_att:range_foldl(Att, From, To + 1,
- fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp});
- {identity, Ranges} when is_list(Ranges) andalso length(Ranges) < 10 ->
- send_ranges_multipart(Req, Type, Len, Att, Ranges);
+ [] ->
+ throw({not_found, "Document is missing attachment"});
+ [Att] ->
+ [Type, Enc, DiskLen, AttLen, Md5] = couch_att:fetch(
+ [type, encoding, disk_len, att_len, md5], Att
+ ),
+ Refs = monitor_attachments(Att),
+ try
+ Etag =
+ case Md5 of
+ <<>> -> chttpd:doc_etag(Doc);
+ _ -> "\"" ++ ?b2l(base64:encode(Md5)) ++ "\""
+ end,
+ ReqAcceptsAttEnc = lists:member(
+ atom_to_list(Enc),
+ couch_httpd:accepted_encodings(Req)
+ ),
+ Headers =
+ [
+ {"ETag", Etag},
+ {"Cache-Control", "must-revalidate"},
+ {"Content-Type", binary_to_list(Type)}
+ ] ++
+ case ReqAcceptsAttEnc of
+ true when Enc =/= identity ->
+ % RFC 2616 says that the 'identify' encoding should not be used in
+ % the Content-Encoding header
+ [{"Content-Encoding", atom_to_list(Enc)}];
+ _ ->
+ []
+ end ++
+ case Enc of
+ identity ->
+ [{"Accept-Ranges", "bytes"}];
+ _ ->
+ [{"Accept-Ranges", "none"}]
+ end,
+ Len =
+ case {Enc, ReqAcceptsAttEnc} of
+ {identity, _} ->
+ % stored and served in identity form
+ DiskLen;
+ {_, false} when DiskLen =/= AttLen ->
+ % Stored encoded, but client doesn't accept the encoding we used,
+ % so we need to decode on the fly. DiskLen is the identity length
+ % of the attachment.
+ DiskLen;
+ {_, true} ->
+ % Stored and served encoded. AttLen is the encoded length.
+ AttLen;
_ ->
- Headers1 = Headers ++
- if Enc =:= identity orelse ReqAcceptsAttEnc =:= true ->
- [{"Content-MD5", base64:encode(couch_att:fetch(md5, Att))}];
- true ->
- []
- end,
- {ok, Resp} = start_response_length(Req, 200, Headers1, Len),
- AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
+ % We received an encoded attachment and stored it as such, so we
+ % don't know the identity length. The client doesn't accept the
+ % encoding, and since we cannot serve a correct Content-Length
+ % header we'll fall back to a chunked response.
+ undefined
+ end,
+ AttFun =
+ case ReqAcceptsAttEnc of
+ false ->
+ fun couch_att:foldl_decode/3;
+ true ->
+ fun couch_att:foldl/3
+ end,
+ chttpd:etag_respond(
+ Req,
+ Etag,
+ fun() ->
+ case Len of
+ undefined ->
+ {ok, Resp} = start_chunked_response(Req, 200, Headers),
+ AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
+ couch_httpd:last_chunk(Resp);
+ _ ->
+ Ranges = parse_ranges(MochiReq:get(range), Len),
+ case {Enc, Ranges} of
+ {identity, [{From, To}]} ->
+ Headers1 =
+ [{"Content-Range", make_content_range(From, To, Len)}] ++
+ Headers,
+ {ok, Resp} = start_response_length(
+ Req, 206, Headers1, To - From + 1
+ ),
+ couch_att:range_foldl(
+ Att,
+ From,
+ To + 1,
+ fun(Seg, _) -> send(Resp, Seg) end,
+ {ok, Resp}
+ );
+ {identity, Ranges} when
+ is_list(Ranges) andalso length(Ranges) < 10
+ ->
+ send_ranges_multipart(Req, Type, Len, Att, Ranges);
+ _ ->
+ Headers1 =
+ Headers ++
+ if
+ Enc =:= identity orelse
+ ReqAcceptsAttEnc =:= true ->
+ [
+ {"Content-MD5",
+ base64:encode(
+ couch_att:fetch(md5, Att)
+ )}
+ ];
+ true ->
+ []
+ end,
+ {ok, Resp} = start_response_length(Req, 200, Headers1, Len),
+ AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
+ end
+ end
end
- end
+ )
+ after
+ demonitor_refs(Refs)
end
- )
- after
- demonitor_refs(Refs)
- end
end;
-
-
-db_attachment_req(#httpd{method=Method}=Req, Db, DocId, FileNameParts)
- when (Method == 'PUT') or (Method == 'DELETE') ->
+db_attachment_req(#httpd{method = Method} = Req, Db, DocId, FileNameParts) when
+ (Method == 'PUT') or (Method == 'DELETE')
+->
#httpd{
mochi_req = MochiReq
} = Req,
FileName = validate_attachment_name(
- mochiweb_util:join(
- lists:map(fun binary_to_list/1,
- FileNameParts),"/")),
+ mochiweb_util:join(
+ lists:map(
+ fun binary_to_list/1,
+ FileNameParts
+ ),
+ "/"
+ )
+ ),
- NewAtt = case Method of
- 'DELETE' ->
- [];
- _ ->
- MimeType = case chttpd:header_value(Req,"Content-Type") of
- % We could throw an error here or guess by the FileName.
- % Currently, just giving it a default.
- undefined -> <<"application/octet-stream">>;
- CType -> list_to_binary(CType)
- end,
- Data = case chttpd:body_length(Req) of
- undefined ->
- <<"">>;
- {unknown_transfer_encoding, Unknown} ->
- exit({unknown_transfer_encoding, Unknown});
- chunked ->
- fun(MaxChunkSize, ChunkFun, InitState) ->
- chttpd:recv_chunked(
- Req, MaxChunkSize, ChunkFun, InitState
- )
- end;
- 0 ->
- <<"">>;
- Length when is_integer(Length) ->
- Expect = case chttpd:header_value(Req, "expect") of
+ NewAtt =
+ case Method of
+ 'DELETE' ->
+ [];
+ _ ->
+ MimeType =
+ case chttpd:header_value(Req, "Content-Type") of
+ % We could throw an error here or guess by the FileName.
+ % Currently, just giving it a default.
+ undefined -> <<"application/octet-stream">>;
+ CType -> list_to_binary(CType)
+ end,
+ Data =
+ case chttpd:body_length(Req) of
undefined ->
- undefined;
- Value when is_list(Value) ->
- string:to_lower(Value)
+ <<"">>;
+ {unknown_transfer_encoding, Unknown} ->
+ exit({unknown_transfer_encoding, Unknown});
+ chunked ->
+ fun(MaxChunkSize, ChunkFun, InitState) ->
+ chttpd:recv_chunked(
+ Req, MaxChunkSize, ChunkFun, InitState
+ )
+ end;
+ 0 ->
+ <<"">>;
+ Length when is_integer(Length) ->
+ Expect =
+ case chttpd:header_value(Req, "expect") of
+ undefined ->
+ undefined;
+ Value when is_list(Value) ->
+ string:to_lower(Value)
+ end,
+ case Expect of
+ "100-continue" ->
+ MochiReq:start_raw_response({100, gb_trees:empty()});
+ _Else ->
+ ok
+ end,
+ fun() -> chttpd:recv(Req, 0) end;
+ Length ->
+ exit({length_not_integer, Length})
end,
- case Expect of
- "100-continue" ->
- MochiReq:start_raw_response({100, gb_trees:empty()});
- _Else ->
- ok
+ ContentLen =
+ case couch_httpd:header_value(Req, "Content-Length") of
+ undefined -> undefined;
+ CL -> list_to_integer(CL)
end,
- fun() -> chttpd:recv(Req, 0) end;
- Length ->
- exit({length_not_integer, Length})
- end,
- ContentLen = case couch_httpd:header_value(Req,"Content-Length") of
- undefined -> undefined;
- CL -> list_to_integer(CL)
- end,
- ContentEnc = string:to_lower(string:strip(
- couch_httpd:header_value(Req, "Content-Encoding", "identity")
- )),
- Encoding = case ContentEnc of
- "identity" ->
- identity;
- "gzip" ->
- gzip;
- _ ->
- throw({
- bad_ctype,
- "Only gzip and identity content-encodings are supported"
- })
- end,
- [couch_att:new([
- {name, FileName},
- {type, MimeType},
- {data, Data},
- {att_len, ContentLen},
- {md5, get_md5_header(Req)},
- {encoding, Encoding}
- ])]
- end,
+ ContentEnc = string:to_lower(
+ string:strip(
+ couch_httpd:header_value(Req, "Content-Encoding", "identity")
+ )
+ ),
+ Encoding =
+ case ContentEnc of
+ "identity" ->
+ identity;
+ "gzip" ->
+ gzip;
+ _ ->
+ throw({
+ bad_ctype,
+ "Only gzip and identity content-encodings are supported"
+ })
+ end,
+ [
+ couch_att:new([
+ {name, FileName},
+ {type, MimeType},
+ {data, Data},
+ {att_len, ContentLen},
+ {md5, get_md5_header(Req)},
+ {encoding, Encoding}
+ ])
+ ]
+ end,
- Doc = case extract_header_rev(Req, chttpd:qs_value(Req, "rev")) of
- missing_rev -> % make the new doc
- if Method =/= 'DELETE' -> ok; true ->
- % check for the existence of the doc to handle the 404 case.
- couch_doc_open(Db, DocId, nil, [])
- end,
- fabric2_db:validate_docid(DocId),
- #doc{id=DocId};
- Rev ->
- case fabric2_db:open_doc_revs(Db, DocId, [Rev], []) of
- {ok, [{ok, Doc0}]} ->
- chttpd_stats:incr_reads(),
- Doc0;
- {ok, [Error]} ->
- throw(Error);
- {error, Error} ->
- throw(Error)
- end
- end,
+ Doc =
+ case extract_header_rev(Req, chttpd:qs_value(Req, "rev")) of
+ % make the new doc
+ missing_rev ->
+ if
+ Method =/= 'DELETE' ->
+ ok;
+ true ->
+ % check for the existence of the doc to handle the 404 case.
+ couch_doc_open(Db, DocId, nil, [])
+ end,
+ fabric2_db:validate_docid(DocId),
+ #doc{id = DocId};
+ Rev ->
+ case fabric2_db:open_doc_revs(Db, DocId, [Rev], []) of
+ {ok, [{ok, Doc0}]} ->
+ chttpd_stats:incr_reads(),
+ Doc0;
+ {ok, [Error]} ->
+ throw(Error);
+ {error, Error} ->
+ throw(Error)
+ end
+ end,
- #doc{atts=Atts} = Doc,
+ #doc{atts = Atts} = Doc,
DocEdited0 = Doc#doc{
atts = NewAtt ++ [A || A <- Atts, couch_att:fetch(name, A) /= FileName]
},
DocEdited = read_att_data(DocEdited0),
case fabric2_db:update_doc(Db, DocEdited, []) of
- {ok, UpdatedRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 201;
- {accepted, UpdatedRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 202
+ {ok, UpdatedRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 201;
+ {accepted, UpdatedRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 202
end,
erlang:put(mochiweb_request_recv, true),
DbName = fabric2_db:name(Db),
- {Status, Headers} = case Method of
- 'DELETE' ->
- {200, []};
- _ ->
- {HttpCode, [{"Location", absolute_uri(Req, [$/, DbName, $/, couch_util:url_encode(DocId), $/,
- couch_util:url_encode(FileName)])}]}
+ {Status, Headers} =
+ case Method of
+ 'DELETE' ->
+ {200, []};
+ _ ->
+ {HttpCode, [
+ {"Location",
+ absolute_uri(Req, [
+ $/,
+ DbName,
+ $/,
+ couch_util:url_encode(DocId),
+ $/,
+ couch_util:url_encode(FileName)
+ ])}
+ ]}
end,
- send_json(Req,Status, Headers, {[
- {ok, true},
- {id, DocId},
- {rev, couch_doc:rev_to_str(UpdatedRev)}
- ]});
-
+ send_json(
+ Req,
+ Status,
+ Headers,
+ {[
+ {ok, true},
+ {id, DocId},
+ {rev, couch_doc:rev_to_str(UpdatedRev)}
+ ]}
+ );
db_attachment_req(Req, _Db, _DocId, _FileNameParts) ->
send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT").
send_ranges_multipart(Req, ContentType, Len, Att, Ranges) ->
Boundary = couch_uuids:random(),
- CType = {"Content-Type",
- "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
+ CType = {"Content-Type", "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
{ok, Resp} = start_chunked_response(Req, 206, [CType]),
couch_httpd:send_chunk(Resp, <<"--", Boundary/binary>>),
- lists:foreach(fun({From, To}) ->
- ContentRange = make_content_range(From, To, Len),
- couch_httpd:send_chunk(Resp,
- <<"\r\nContent-Type: ", ContentType/binary, "\r\n",
- "Content-Range: ", ContentRange/binary, "\r\n",
- "\r\n">>),
- couch_att:range_foldl(Att, From, To + 1,
- fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
- couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
- end, Ranges),
+ lists:foreach(
+ fun({From, To}) ->
+ ContentRange = make_content_range(From, To, Len),
+ couch_httpd:send_chunk(
+ Resp,
+ <<"\r\nContent-Type: ", ContentType/binary, "\r\n", "Content-Range: ",
+ ContentRange/binary, "\r\n", "\r\n">>
+ ),
+ couch_att:range_foldl(
+ Att,
+ From,
+ To + 1,
+ fun(Seg, _) -> send_chunk(Resp, Seg) end,
+ {ok, Resp}
+ ),
+ couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
+ end,
+ Ranges
+ ),
couch_httpd:send_chunk(Resp, <<"--">>),
couch_httpd:last_chunk(Resp),
{ok, Resp}.
@@ -1710,18 +1998,21 @@ parse_ranges(Ranges, Len) ->
parse_ranges([], _Len, Acc) ->
lists:reverse(Acc);
-parse_ranges([{0, none}|_], _Len, _Acc) ->
+parse_ranges([{0, none} | _], _Len, _Acc) ->
undefined;
-parse_ranges([{From, To}|_], _Len, _Acc) when is_integer(From) andalso is_integer(To) andalso To < From ->
+parse_ranges([{From, To} | _], _Len, _Acc) when
+ is_integer(From) andalso is_integer(To) andalso To < From
+->
throw(requested_range_not_satisfiable);
-parse_ranges([{From, To}|Rest], Len, Acc)
- when is_integer(To) andalso To >= Len ->
- parse_ranges([{From, Len-1}] ++ Rest, Len, Acc);
-parse_ranges([{none, To}|Rest], Len, Acc) ->
+parse_ranges([{From, To} | Rest], Len, Acc) when
+ is_integer(To) andalso To >= Len
+->
+ parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
+parse_ranges([{none, To} | Rest], Len, Acc) ->
parse_ranges([{Len - To, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From, none}|Rest], Len, Acc) ->
+parse_ranges([{From, none} | Rest], Len, Acc) ->
parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From,To}|Rest], Len, Acc) ->
+parse_ranges([{From, To} | Rest], Len, Acc) ->
parse_ranges(Rest, Len, [{From, To}] ++ Acc).
make_content_range(From, To, Len) ->
@@ -1754,116 +2045,130 @@ parse_doc_query({Key, Value}, Args) ->
case {Key, Value} of
{"attachments", "true"} ->
Options = [attachments | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"meta", "true"} ->
Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"revs", "true"} ->
Options = [revs | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"local_seq", "true"} ->
Options = [local_seq | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"revs_info", "true"} ->
Options = [revs_info | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"conflicts", "true"} ->
Options = [conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"deleted", "true"} ->
Options = [deleted | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"deleted_conflicts", "true"} ->
Options = [deleted_conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"rev", Rev} ->
- Args#doc_query_args{rev=couch_doc:parse_rev(Rev)};
+ Args#doc_query_args{rev = couch_doc:parse_rev(Rev)};
{"open_revs", "all"} ->
- Args#doc_query_args{open_revs=all};
+ Args#doc_query_args{open_revs = all};
{"open_revs", RevsJsonStr} ->
JsonArray = ?JSON_DECODE(RevsJsonStr),
- Args#doc_query_args{open_revs=couch_doc:parse_revs(JsonArray)};
+ Args#doc_query_args{open_revs = couch_doc:parse_revs(JsonArray)};
{"latest", "true"} ->
Options = [latest | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"atts_since", RevsJsonStr} ->
JsonArray = ?JSON_DECODE(RevsJsonStr),
Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)};
{"new_edits", "false"} ->
- Args#doc_query_args{update_type=replicated_changes};
+ Args#doc_query_args{update_type = replicated_changes};
{"new_edits", "true"} ->
- Args#doc_query_args{update_type=interactive_edit};
+ Args#doc_query_args{update_type = interactive_edit};
{"att_encoding_info", "true"} ->
Options = [att_encoding_info | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"r", R} ->
- Options = [{r,R} | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Options = [{r, R} | Args#doc_query_args.options],
+ Args#doc_query_args{options = Options};
{"w", W} ->
- Options = [{w,W} | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- _Else -> % unknown key value pair, ignore.
+ Options = [{w, W} | Args#doc_query_args.options],
+ Args#doc_query_args{options = Options};
+ % unknown key value pair, ignore.
+ _Else ->
Args
end.
parse_changes_query(Req) ->
erlang:erase(changes_seq_interval),
- ChangesArgs = lists:foldl(fun({Key, Value}, Args) ->
- case {string:to_lower(Key), Value} of
- {"feed", "live"} ->
- %% sugar for continuous
- Args#changes_args{feed="continuous"};
- {"feed", _} ->
- Args#changes_args{feed=Value};
- {"descending", "true"} ->
- Args#changes_args{dir=rev};
- {"since", _} ->
- Args#changes_args{since=parse_since_seq(Value)};
- {"last-event-id", _} ->
- Args#changes_args{since=Value};
- {"limit", _} ->
- Args#changes_args{limit=list_to_integer(Value)};
- {"style", _} ->
- Args#changes_args{style=list_to_existing_atom(Value)};
- {"heartbeat", "true"} ->
- Args#changes_args{heartbeat=true};
- {"heartbeat", _} ->
- try list_to_integer(Value) of
- HeartbeatInteger when HeartbeatInteger > 0 ->
- Args#changes_args{heartbeat=HeartbeatInteger};
- _ ->
- throw({bad_request, <<"The heartbeat value should be a positive integer (in milliseconds).">>})
- catch error:badarg ->
- throw({bad_request, <<"Invalid heartbeat value. Expecting a positive integer value (in milliseconds).">>})
- end;
- {"timeout", _} ->
- Args#changes_args{timeout=list_to_integer(Value)};
- {"include_docs", "true"} ->
- Args#changes_args{include_docs=true};
- {"conflicts", "true"} ->
- Args#changes_args{conflicts=true};
- {"attachments", "true"} ->
- Options = [attachments | Args#changes_args.doc_options],
- Args#changes_args{doc_options=Options};
- {"att_encoding_info", "true"} ->
- Options = [att_encoding_info | Args#changes_args.doc_options],
- Args#changes_args{doc_options=Options};
- {"filter", _} ->
- Args#changes_args{filter=Value};
- {"seq_interval", _} ->
- try list_to_integer(Value) of
- V when V > 0 ->
- erlang:put(changes_seq_interval, V),
- Args;
- _ ->
- throw({bad_request, invalid_seq_interval})
- catch error:badarg ->
- throw({bad_request, invalid_seq_interval})
- end;
- _Else -> % unknown key value pair, ignore.
- Args
- end
- end, #changes_args{}, chttpd:qs(Req)),
+ ChangesArgs = lists:foldl(
+ fun({Key, Value}, Args) ->
+ case {string:to_lower(Key), Value} of
+ {"feed", "live"} ->
+ %% sugar for continuous
+ Args#changes_args{feed = "continuous"};
+ {"feed", _} ->
+ Args#changes_args{feed = Value};
+ {"descending", "true"} ->
+ Args#changes_args{dir = rev};
+ {"since", _} ->
+ Args#changes_args{since = parse_since_seq(Value)};
+ {"last-event-id", _} ->
+ Args#changes_args{since = Value};
+ {"limit", _} ->
+ Args#changes_args{limit = list_to_integer(Value)};
+ {"style", _} ->
+ Args#changes_args{style = list_to_existing_atom(Value)};
+ {"heartbeat", "true"} ->
+ Args#changes_args{heartbeat = true};
+ {"heartbeat", _} ->
+ try list_to_integer(Value) of
+ HeartbeatInteger when HeartbeatInteger > 0 ->
+ Args#changes_args{heartbeat = HeartbeatInteger};
+ _ ->
+ throw(
+ {bad_request,
+ <<"The heartbeat value should be a positive integer (in milliseconds).">>}
+ )
+ catch
+ error:badarg ->
+ throw(
+ {bad_request,
+ <<"Invalid heartbeat value. Expecting a positive integer value (in milliseconds).">>}
+ )
+ end;
+ {"timeout", _} ->
+ Args#changes_args{timeout = list_to_integer(Value)};
+ {"include_docs", "true"} ->
+ Args#changes_args{include_docs = true};
+ {"conflicts", "true"} ->
+ Args#changes_args{conflicts = true};
+ {"attachments", "true"} ->
+ Options = [attachments | Args#changes_args.doc_options],
+ Args#changes_args{doc_options = Options};
+ {"att_encoding_info", "true"} ->
+ Options = [att_encoding_info | Args#changes_args.doc_options],
+ Args#changes_args{doc_options = Options};
+ {"filter", _} ->
+ Args#changes_args{filter = Value};
+ {"seq_interval", _} ->
+ try list_to_integer(Value) of
+ V when V > 0 ->
+ erlang:put(changes_seq_interval, V),
+ Args;
+ _ ->
+ throw({bad_request, invalid_seq_interval})
+ catch
+ error:badarg ->
+ throw({bad_request, invalid_seq_interval})
+ end;
+ % unknown key value pair, ignore.
+ _Else ->
+ Args
+ end
+ end,
+ #changes_args{},
+ chttpd:qs(Req)
+ ),
%% if it's an EventSource request with a Last-event-ID header
%% that should override the `since` query string, since it's
%% probably the browser reconnecting.
@@ -1873,19 +2178,16 @@ parse_changes_query(Req) ->
undefined ->
ChangesArgs;
Value ->
- ChangesArgs#changes_args{since=Value}
+ ChangesArgs#changes_args{since = Value}
end;
_ ->
ChangesArgs
end.
-
parse_since_seq(<<"now">>) ->
now;
-
parse_since_seq(Seq) when is_binary(Seq), size(Seq) > 30 ->
throw({bad_request, url_encoded_since_seq});
-
parse_since_seq(Seq) when is_binary(Seq), size(Seq) > 2 ->
% We have implicitly allowed the since seq to either be
% JSON encoded or a "raw" string. Here we just remove the
@@ -1895,51 +2197,54 @@ parse_since_seq(Seq) when is_binary(Seq), size(Seq) > 2 ->
<<"\"", S:SeqSize/binary, "\"">> -> S;
S -> S
end;
-
parse_since_seq(Seq) when is_binary(Seq) ->
Seq;
-
parse_since_seq(Seq) when is_list(Seq) ->
parse_since_seq(iolist_to_binary(Seq)).
-
-extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)->
+extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev) ->
extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
extract_header_rev(Req, ExplicitRev) ->
- Etag = case chttpd:header_value(Req, "If-Match") of
- undefined -> undefined;
- Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
- end,
+ Etag =
+ case chttpd:header_value(Req, "If-Match") of
+ undefined -> undefined;
+ Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
+ end,
case {ExplicitRev, Etag} of
- {undefined, undefined} -> missing_rev;
- {_, undefined} -> ExplicitRev;
- {undefined, _} -> Etag;
- _ when ExplicitRev == Etag -> Etag;
- _ ->
- throw({bad_request, "Document rev and etag have different values"})
+ {undefined, undefined} -> missing_rev;
+ {_, undefined} -> ExplicitRev;
+ {undefined, _} -> Etag;
+ _ when ExplicitRev == Etag -> Etag;
+ _ -> throw({bad_request, "Document rev and etag have different values"})
end.
validate_security_can_be_edited(DbName) ->
UserDbName = ?l2b(config:get("chttpd_auth", "authentication_db", "_users")),
- CanEditUserSecurityObject = config:get("couchdb","users_db_security_editable","false"),
- case {DbName,CanEditUserSecurityObject} of
- {UserDbName,"false"} ->
+ CanEditUserSecurityObject = config:get("couchdb", "users_db_security_editable", "false"),
+ case {DbName, CanEditUserSecurityObject} of
+ {UserDbName, "false"} ->
Msg = "You can't edit the security object of the user database.",
throw({forbidden, Msg});
- {_,_} -> ok
+ {_, _} ->
+ ok
end.
validate_attachment_names(Doc) ->
- lists:foreach(fun(Att) ->
- Name = couch_att:fetch(name, Att),
- validate_attachment_name(Name)
- end, Doc#doc.atts).
+ lists:foreach(
+ fun(Att) ->
+ Name = couch_att:fetch(name, Att),
+ validate_attachment_name(Name)
+ end,
+ Doc#doc.atts
+ ).
validate_attachment_name(Name) when is_list(Name) ->
validate_attachment_name(list_to_binary(Name));
-validate_attachment_name(<<"_",Rest/binary>>) ->
- throw({bad_request, <<"Attachment name '_", Rest/binary,
- "' starts with prohibited character '_'">>});
+validate_attachment_name(<<"_", Rest/binary>>) ->
+ throw(
+ {bad_request,
+ <<"Attachment name '_", Rest/binary, "' starts with prohibited character '_'">>}
+ );
validate_attachment_name(Name) ->
case couch_util:validate_utf8(Name) of
true -> Name;
@@ -1948,30 +2253,33 @@ validate_attachment_name(Name) ->
-spec monitor_attachments(couch_att:att() | [couch_att:att()]) -> [reference()].
monitor_attachments(Atts) when is_list(Atts) ->
- lists:foldl(fun(Att, Monitors) ->
- case couch_att:fetch(data, Att) of
- {Fd, _} ->
- [monitor(process, Fd) | Monitors];
- {loc, _, _, _} ->
- Monitors;
- stub ->
- Monitors;
- Else ->
- ?LOG_ERROR(#{
- what => malformed_attachment_data,
- attachment => Att
- }),
- couch_log:error("~p from couch_att:fetch(data, ~p)", [Else, Att]),
- Monitors
- end
- end, [], Atts);
+ lists:foldl(
+ fun(Att, Monitors) ->
+ case couch_att:fetch(data, Att) of
+ {Fd, _} ->
+ [monitor(process, Fd) | Monitors];
+ {loc, _, _, _} ->
+ Monitors;
+ stub ->
+ Monitors;
+ Else ->
+ ?LOG_ERROR(#{
+ what => malformed_attachment_data,
+ attachment => Att
+ }),
+ couch_log:error("~p from couch_att:fetch(data, ~p)", [Else, Att]),
+ Monitors
+ end
+ end,
+ [],
+ Atts
+ );
monitor_attachments(Att) ->
monitor_attachments([Att]).
demonitor_refs(Refs) when is_list(Refs) ->
[demonitor(Ref) || Ref <- Refs].
-
set_namespace(<<"_local_docs">>, Args) ->
set_namespace(<<"_local">>, Args);
set_namespace(<<"_design_docs">>, Args) ->
@@ -1979,25 +2287,27 @@ set_namespace(<<"_design_docs">>, Args) ->
set_namespace(NS, #mrargs{} = Args) ->
couch_views_util:set_extra(Args, namespace, NS).
-
%% /db/_bulk_get stuff
bulk_get_parse_doc_query(Req) ->
- lists:foldl(fun({Key, Value}, Args) ->
- ok = validate_query_param(Key),
- parse_doc_query({Key, Value}, Args)
- end, #doc_query_args{}, chttpd:qs(Req)).
-
+ lists:foldl(
+ fun({Key, Value}, Args) ->
+ ok = validate_query_param(Key),
+ parse_doc_query({Key, Value}, Args)
+ end,
+ #doc_query_args{},
+ chttpd:qs(Req)
+ ).
-validate_query_param("open_revs"=Key) ->
+validate_query_param("open_revs" = Key) ->
throw_bad_query_param(Key);
-validate_query_param("new_edits"=Key) ->
+validate_query_param("new_edits" = Key) ->
throw_bad_query_param(Key);
-validate_query_param("w"=Key) ->
+validate_query_param("w" = Key) ->
throw_bad_query_param(Key);
-validate_query_param("rev"=Key) ->
+validate_query_param("rev" = Key) ->
throw_bad_query_param(Key);
-validate_query_param("atts_since"=Key) ->
+validate_query_param("atts_since" = Key) ->
throw_bad_query_param(Key);
validate_query_param(_) ->
ok.
@@ -2008,11 +2318,9 @@ throw_bad_query_param(Key) when is_binary(Key) ->
Msg = <<"\"", Key/binary, "\" query parameter is not acceptable">>,
throw({bad_request, Msg}).
-
bulk_get_open_doc_revs(Db, {Props}, Options) ->
bulk_get_open_doc_revs1(Db, Props, Options, {}).
-
bulk_get_open_doc_revs1(Db, Props, Options, {}) ->
case couch_util:get_value(<<"id">>, Props) of
undefined ->
@@ -2022,8 +2330,9 @@ bulk_get_open_doc_revs1(Db, Props, Options, {}) ->
try
fabric2_db:validate_docid(DocId),
bulk_get_open_doc_revs1(Db, Props, Options, {DocId})
- catch throw:{Error, Reason} ->
- {DocId, {error, {null, Error, Reason}}, Options}
+ catch
+ throw:{Error, Reason} ->
+ {DocId, {error, {null, Error, Reason}}, Options}
end
end;
bulk_get_open_doc_revs1(Db, Props, Options, {DocId}) ->
@@ -2032,10 +2341,8 @@ bulk_get_open_doc_revs1(Db, Props, Options, {DocId}) ->
case parse_field(<<"rev">>, RevStr) of
{error, {RevStr, Error, Reason}} ->
{DocId, {error, {RevStr, Error, Reason}}, Options};
-
{ok, undefined} ->
bulk_get_open_doc_revs1(Db, Props, Options, {DocId, all});
-
{ok, Rev} ->
bulk_get_open_doc_revs1(Db, Props, Options, {DocId, [Rev]})
end;
@@ -2045,10 +2352,8 @@ bulk_get_open_doc_revs1(Db, Props, Options, {DocId, Revs}) ->
case parse_field(<<"atts_since">>, AttsSinceStr) of
{error, {BadAttsSinceRev, Error, Reason}} ->
{DocId, {error, {BadAttsSinceRev, Error, Reason}}, Options};
-
{ok, []} ->
bulk_get_open_doc_revs1(Db, Props, Options, {DocId, Revs, Options});
-
{ok, RevList} ->
Options1 = [{atts_since, RevList}, attachments | Options],
bulk_get_open_doc_revs1(Db, Props, Options, {DocId, Revs, Options1})
@@ -2066,7 +2371,6 @@ bulk_get_open_doc_revs1(Db, Props, _, {DocId, Revs, Options}) ->
{DocId, Else, Options}
end.
-
parse_field(<<"rev">>, undefined) ->
{ok, undefined};
parse_field(<<"rev">>, Value) ->
@@ -2074,7 +2378,7 @@ parse_field(<<"rev">>, Value) ->
Rev = couch_doc:parse_rev(Value),
{ok, Rev}
catch
- throw:{bad_request=Error, Reason} ->
+ throw:{bad_request = Error, Reason} ->
{error, {Value, Error, Reason}}
end;
parse_field(<<"atts_since">>, undefined) ->
@@ -2086,18 +2390,16 @@ parse_field(<<"atts_since">>, Value) when is_list(Value) ->
parse_field(<<"atts_since">>, Value) ->
{error, {Value, bad_request, <<"att_since value must be array of revs.">>}}.
-
parse_atts_since([], Acc) ->
{ok, lists:reverse(Acc)};
parse_atts_since([RevStr | Rest], Acc) ->
case parse_field(<<"rev">>, RevStr) of
{ok, Rev} ->
parse_atts_since(Rest, [Rev | Acc]);
- {error, _}=Error ->
+ {error, _} = Error ->
Error
end.
-
bulk_get_send_docs_json(Resp, DocId, Results, Options, Sep) ->
Id = ?JSON_ENCODE(DocId),
send_chunk(Resp, [Sep, <<"{\"id\": ">>, Id, <<", \"docs\": [">>]),
@@ -2109,26 +2411,36 @@ bulk_get_send_docs_json1(Resp, DocId, {error, {Rev, Error, Reason}}, _) ->
bulk_get_send_docs_json1(_Resp, _DocId, {ok, []}, _) ->
ok;
bulk_get_send_docs_json1(Resp, DocId, {ok, Docs}, Options) ->
- lists:foldl(fun(Result, AccSeparator) ->
- case Result of
- {ok, Doc} ->
- JsonDoc = couch_doc:to_json_obj(Doc, Options),
- Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
- send_chunk(Resp, [AccSeparator, Json]);
- {{Error, Reason}, RevId} ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = bulk_get_json_error(DocId, RevStr, Error, Reason),
- send_chunk(Resp, [AccSeparator, Json])
+ lists:foldl(
+ fun(Result, AccSeparator) ->
+ case Result of
+ {ok, Doc} ->
+ JsonDoc = couch_doc:to_json_obj(Doc, Options),
+ Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
+ send_chunk(Resp, [AccSeparator, Json]);
+ {{Error, Reason}, RevId} ->
+ RevStr = couch_doc:rev_to_str(RevId),
+ Json = bulk_get_json_error(DocId, RevStr, Error, Reason),
+ send_chunk(Resp, [AccSeparator, Json])
+ end,
+ <<",">>
end,
- <<",">>
- end, <<"">>, Docs).
+ <<"">>,
+ Docs
+ ).
bulk_get_json_error(DocId, Rev, Error, Reason) ->
- ?JSON_ENCODE({[{error, {[{<<"id">>, DocId},
- {<<"rev">>, Rev},
- {<<"error">>, Error},
- {<<"reason">>, Reason}]}}]}).
-
+ ?JSON_ENCODE(
+ {[
+ {error,
+ {[
+ {<<"id">>, DocId},
+ {<<"rev">>, Rev},
+ {<<"error">>, Error},
+ {<<"reason">>, Reason}
+ ]}}
+ ]}
+ ).
read_att_data(#doc{} = Doc) ->
#doc{atts = Atts} = Doc,
diff --git a/src/chttpd/src/chttpd_epi.erl b/src/chttpd/src/chttpd_epi.erl
index ffbd87a07..5536c9e4d 100644
--- a/src/chttpd/src/chttpd_epi.erl
+++ b/src/chttpd/src/chttpd_epi.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(chttpd_epi).
-behaviour(couch_epi_plugin).
@@ -33,7 +32,6 @@ providers() ->
{chttpd_handlers, chttpd_httpd_handlers}
].
-
services() ->
[
{chttpd_auth, chttpd_auth},
diff --git a/src/chttpd/src/chttpd_external.erl b/src/chttpd/src/chttpd_external.erl
index c0a2a9fe0..3beaeec6a 100644
--- a/src/chttpd/src/chttpd_external.erl
+++ b/src/chttpd/src/chttpd_external.erl
@@ -18,7 +18,7 @@
-export([json_req_obj_fields/0, json_req_obj/2, json_req_obj/3, json_req_obj/4]).
-export([default_or_content_type/2, parse_external_response/1]).
--import(chttpd,[send_error/4]).
+-import(chttpd, [send_error/4]).
-include_lib("couch/include/couch_db.hrl").
@@ -33,9 +33,23 @@ json_req_obj(Req, Db, DocId, Fields) when is_list(Fields) ->
{[{Field, json_req_obj_field(Field, Req, Db, DocId)} || Field <- Fields]}.
json_req_obj_fields() ->
- [<<"info">>, <<"uuid">>, <<"id">>, <<"method">>, <<"requested_path">>,
- <<"path">>, <<"raw_path">>, <<"query">>, <<"headers">>, <<"body">>,
- <<"peer">>, <<"form">>, <<"cookie">>, <<"userCtx">>, <<"secObj">>].
+ [
+ <<"info">>,
+ <<"uuid">>,
+ <<"id">>,
+ <<"method">>,
+ <<"requested_path">>,
+ <<"path">>,
+ <<"raw_path">>,
+ <<"query">>,
+ <<"headers">>,
+ <<"body">>,
+ <<"peer">>,
+ <<"form">>,
+ <<"cookie">>,
+ <<"userCtx">>,
+ <<"secObj">>
+ ].
json_req_obj_field(<<"info">>, #httpd{}, Db, _DocId) ->
{ok, Info} = fabric2_db:get_db_info(Db),
@@ -44,49 +58,53 @@ json_req_obj_field(<<"uuid">>, #httpd{}, _Db, _DocId) ->
couch_uuids:new();
json_req_obj_field(<<"id">>, #httpd{}, _Db, DocId) ->
DocId;
-json_req_obj_field(<<"method">>, #httpd{method=Method}, _Db, _DocId) ->
+json_req_obj_field(<<"method">>, #httpd{method = Method}, _Db, _DocId) ->
Method;
-json_req_obj_field(<<"requested_path">>, #httpd{requested_path_parts=Path}, _Db, _DocId) ->
+json_req_obj_field(<<"requested_path">>, #httpd{requested_path_parts = Path}, _Db, _DocId) ->
Path;
-json_req_obj_field(<<"path">>, #httpd{path_parts=Path}, _Db, _DocId) ->
+json_req_obj_field(<<"path">>, #httpd{path_parts = Path}, _Db, _DocId) ->
Path;
-json_req_obj_field(<<"raw_path">>, #httpd{mochi_req=Req}, _Db, _DocId) ->
+json_req_obj_field(<<"raw_path">>, #httpd{mochi_req = Req}, _Db, _DocId) ->
?l2b(Req:get(raw_path));
-json_req_obj_field(<<"query">>, #httpd{mochi_req=Req}, _Db, _DocId) ->
+json_req_obj_field(<<"query">>, #httpd{mochi_req = Req}, _Db, _DocId) ->
json_query_keys(to_json_terms(Req:parse_qs()));
-json_req_obj_field(<<"headers">>, #httpd{mochi_req=Req}, _Db, _DocId) ->
+json_req_obj_field(<<"headers">>, #httpd{mochi_req = Req}, _Db, _DocId) ->
Headers = Req:get(headers),
Hlist = mochiweb_headers:to_list(Headers),
to_json_terms(Hlist);
-json_req_obj_field(<<"body">>, #httpd{req_body=undefined, mochi_req=Req}, _Db, _DocId) ->
+json_req_obj_field(<<"body">>, #httpd{req_body = undefined, mochi_req = Req}, _Db, _DocId) ->
MaxSize = chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", 4294967296),
+ "max_http_request_size", 4294967296
+ ),
try
Req:recv_body(MaxSize)
- catch exit:normal ->
- exit({bad_request, <<"Invalid request body">>})
+ catch
+ exit:normal ->
+ exit({bad_request, <<"Invalid request body">>})
end;
-json_req_obj_field(<<"body">>, #httpd{req_body=Body}, _Db, _DocId) ->
+json_req_obj_field(<<"body">>, #httpd{req_body = Body}, _Db, _DocId) ->
Body;
-json_req_obj_field(<<"peer">>, #httpd{mochi_req=Req}, _Db, _DocId) ->
+json_req_obj_field(<<"peer">>, #httpd{mochi_req = Req}, _Db, _DocId) ->
?l2b(Req:get(peer));
-json_req_obj_field(<<"form">>, #httpd{mochi_req=Req, method=Method}=HttpReq, Db, DocId) ->
+json_req_obj_field(<<"form">>, #httpd{mochi_req = Req, method = Method} = HttpReq, Db, DocId) ->
Body = json_req_obj_field(<<"body">>, HttpReq, Db, DocId),
- ParsedForm = case Req:get_primary_header_value("content-type") of
- "application/x-www-form-urlencoded" ++ _ when Method =:= 'POST' orelse Method =:= 'PUT' ->
- mochiweb_util:parse_qs(Body);
- _ ->
- []
- end,
+ ParsedForm =
+ case Req:get_primary_header_value("content-type") of
+ "application/x-www-form-urlencoded" ++ _ when
+ Method =:= 'POST' orelse Method =:= 'PUT'
+ ->
+ mochiweb_util:parse_qs(Body);
+ _ ->
+ []
+ end,
to_json_terms(ParsedForm);
-json_req_obj_field(<<"cookie">>, #httpd{mochi_req=Req}, _Db, _DocId) ->
+json_req_obj_field(<<"cookie">>, #httpd{mochi_req = Req}, _Db, _DocId) ->
to_json_terms(Req:parse_cookie());
json_req_obj_field(<<"userCtx">>, #httpd{}, Db, _DocId) ->
json_user_ctx(Db);
json_req_obj_field(<<"secObj">>, #httpd{user_ctx = #user_ctx{}}, Db, _DocId) ->
fabric2_db:get_security(Db).
-
json_user_ctx(Db) ->
Ctx = fabric2_db:get_user_ctx(Db),
{[
@@ -95,7 +113,6 @@ json_user_ctx(Db) ->
{<<"roles">>, Ctx#user_ctx.roles}
]}.
-
to_json_terms(Data) ->
to_json_terms(Data, []).
to_json_terms([], Acc) ->
@@ -110,15 +127,15 @@ json_query_keys({Json}) ->
json_query_keys([], Acc) ->
{lists:reverse(Acc)};
json_query_keys([{<<"startkey">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"startkey">>, ?JSON_DECODE(Value)}|Acc]);
+ json_query_keys(Rest, [{<<"startkey">>, ?JSON_DECODE(Value)} | Acc]);
json_query_keys([{<<"endkey">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"endkey">>, ?JSON_DECODE(Value)}|Acc]);
+ json_query_keys(Rest, [{<<"endkey">>, ?JSON_DECODE(Value)} | Acc]);
json_query_keys([{<<"key">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"key">>, ?JSON_DECODE(Value)}|Acc]);
+ json_query_keys(Rest, [{<<"key">>, ?JSON_DECODE(Value)} | Acc]);
json_query_keys([{<<"descending">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"descending">>, ?JSON_DECODE(Value)}|Acc]);
+ json_query_keys(Rest, [{<<"descending">>, ?JSON_DECODE(Value)} | Acc]);
json_query_keys([Term | Rest], Acc) ->
- json_query_keys(Rest, [Term|Acc]).
+ json_query_keys(Rest, [Term | Acc]).
send_external_response(Req, Response) ->
#extern_resp_args{
@@ -130,48 +147,59 @@ send_external_response(Req, Response) ->
} = parse_external_response(Response),
Headers1 = default_or_content_type(CType, Headers0),
case Json of
- nil ->
- chttpd:send_response(Req, Code, Headers1, Data);
- Json ->
- chttpd:send_json(Req, Code, Headers1, Json)
+ nil ->
+ chttpd:send_response(Req, Code, Headers1, Data);
+ Json ->
+ chttpd:send_json(Req, Code, Headers1, Json)
end.
parse_external_response({Response}) ->
- lists:foldl(fun({Key,Value}, Args) ->
- case {Key, Value} of
- {"", _} ->
- Args;
- {<<"code">>, Value} ->
- Args#extern_resp_args{code=Value};
- {<<"stop">>, true} ->
- Args#extern_resp_args{stop=true};
- {<<"json">>, Value} ->
- Args#extern_resp_args{
- json=Value,
- ctype="application/json"};
- {<<"body">>, Value} ->
- Args#extern_resp_args{data=Value, ctype="text/html; charset=utf-8"};
- {<<"base64">>, Value} ->
- Args#extern_resp_args{
- data=base64:decode(Value),
- ctype="application/binary"
- };
- {<<"headers">>, {Headers}} ->
- NewHeaders = lists:map(fun({Header, HVal}) ->
- {couch_util:to_list(Header), couch_util:to_list(HVal)}
- end, Headers),
- Args#extern_resp_args{headers=NewHeaders};
- _ -> % unknown key
- Msg = lists:flatten(io_lib:format("Invalid data from external server: ~p", [{Key, Value}])),
- throw({external_response_error, Msg})
+ lists:foldl(
+ fun({Key, Value}, Args) ->
+ case {Key, Value} of
+ {"", _} ->
+ Args;
+ {<<"code">>, Value} ->
+ Args#extern_resp_args{code = Value};
+ {<<"stop">>, true} ->
+ Args#extern_resp_args{stop = true};
+ {<<"json">>, Value} ->
+ Args#extern_resp_args{
+ json = Value,
+ ctype = "application/json"
+ };
+ {<<"body">>, Value} ->
+ Args#extern_resp_args{data = Value, ctype = "text/html; charset=utf-8"};
+ {<<"base64">>, Value} ->
+ Args#extern_resp_args{
+ data = base64:decode(Value),
+ ctype = "application/binary"
+ };
+ {<<"headers">>, {Headers}} ->
+ NewHeaders = lists:map(
+ fun({Header, HVal}) ->
+ {couch_util:to_list(Header), couch_util:to_list(HVal)}
+ end,
+ Headers
+ ),
+ Args#extern_resp_args{headers = NewHeaders};
+ % unknown key
+ _ ->
+ Msg = lists:flatten(
+ io_lib:format("Invalid data from external server: ~p", [{Key, Value}])
+ ),
+ throw({external_response_error, Msg})
end
- end, #extern_resp_args{}, Response).
+ end,
+ #extern_resp_args{},
+ Response
+ ).
default_or_content_type(DefaultContentType, Headers) ->
IsContentType = fun({X, _}) -> string:to_lower(X) == "content-type" end,
case lists:any(IsContentType, Headers) of
- false ->
- [{"Content-Type", DefaultContentType} | Headers];
- true ->
- Headers
+ false ->
+ [{"Content-Type", DefaultContentType} | Headers];
+ true ->
+ Headers
end.
diff --git a/src/chttpd/src/chttpd_handlers.erl b/src/chttpd/src/chttpd_handlers.erl
index d46875d75..790638830 100644
--- a/src/chttpd/src/chttpd_handlers.erl
+++ b/src/chttpd/src/chttpd_handlers.erl
@@ -45,21 +45,22 @@ handler_info(HttpReq) ->
Default = {'unknown.unknown', #{}},
try
select(collect(handler_info, [Method, PathParts, HttpReq]), Default)
- catch Type:Reason:Stack ->
- ?LOG_ERROR(#{
- what => handler_info_failure,
- result => Type,
- details => Reason,
- stack => Stack
- }),
- couch_log:error("~s :: handler_info failure for ~p : ~p:~p :: ~p", [
+ catch
+ Type:Reason:Stack ->
+ ?LOG_ERROR(#{
+ what => handler_info_failure,
+ result => Type,
+ details => Reason,
+ stack => Stack
+ }),
+ couch_log:error("~s :: handler_info failure for ~p : ~p:~p :: ~p", [
?MODULE,
get(nonce),
Type,
Reason,
Stack
]),
- Default
+ Default
end.
%% ------------------------------------------------------------------
@@ -84,9 +85,9 @@ select(Handlers, _Default) ->
do_select([], Acc) ->
Acc;
-do_select([{override, Handler}|_], _Acc) ->
+do_select([{override, Handler} | _], _Acc) ->
[Handler];
-do_select([{default, _}|Rest], Acc) ->
+do_select([{default, _} | Rest], Acc) ->
do_select(Rest, Acc);
do_select([Handler], Acc) ->
[Handler | Acc];
@@ -100,14 +101,16 @@ select_override_test() ->
?assertEqual(selected, select([{override, selected}, foo], default)),
?assertEqual(selected, select([foo, {override, selected}], default)),
?assertEqual(selected, select([{override, selected}, {override, bar}], default)),
- ?assertError({badmatch,[bar, foo]}, select([foo, bar], default)).
+ ?assertError({badmatch, [bar, foo]}, select([foo, bar], default)).
select_default_override_test() ->
?assertEqual(selected, select([{default, new_default}, selected], old_default)),
?assertEqual(selected, select([selected, {default, new_default}], old_default)),
?assertEqual(selected, select([{default, selected}], old_default)),
?assertEqual(selected, select([], selected)),
- ?assertEqual(selected,
- select([{default, new_default}, {override, selected}, bar], old_default)).
+ ?assertEqual(
+ selected,
+ select([{default, new_default}, {override, selected}, bar], old_default)
+ ).
-endif.
diff --git a/src/chttpd/src/chttpd_httpd_handlers.erl b/src/chttpd/src/chttpd_httpd_handlers.erl
index e5374b1b6..c8a399c7b 100644
--- a/src/chttpd/src/chttpd_httpd_handlers.erl
+++ b/src/chttpd/src/chttpd_httpd_handlers.erl
@@ -22,135 +22,108 @@
not_implemented/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-
-url_handler(<<>>) -> fun chttpd_misc:handle_welcome_req/1;
-url_handler(<<"favicon.ico">>) -> fun chttpd_misc:handle_favicon_req/1;
-url_handler(<<"_utils">>) -> fun chttpd_misc:handle_utils_dir_req/1;
-url_handler(<<"_all_dbs">>) -> fun chttpd_misc:handle_all_dbs_req/1;
-url_handler(<<"_deleted_dbs">>) -> fun chttpd_misc:handle_deleted_dbs_req/1;
-url_handler(<<"_dbs_info">>) -> fun chttpd_misc:handle_dbs_info_req/1;
-url_handler(<<"_active_tasks">>) -> fun chttpd_misc:handle_task_status_req/1;
-url_handler(<<"_scheduler">>) -> fun couch_replicator_httpd:handle_scheduler_req/1;
-url_handler(<<"_node">>) -> fun chttpd_node:handle_node_req/1;
+url_handler(<<>>) -> fun chttpd_misc:handle_welcome_req/1;
+url_handler(<<"favicon.ico">>) -> fun chttpd_misc:handle_favicon_req/1;
+url_handler(<<"_utils">>) -> fun chttpd_misc:handle_utils_dir_req/1;
+url_handler(<<"_all_dbs">>) -> fun chttpd_misc:handle_all_dbs_req/1;
+url_handler(<<"_deleted_dbs">>) -> fun chttpd_misc:handle_deleted_dbs_req/1;
+url_handler(<<"_dbs_info">>) -> fun chttpd_misc:handle_dbs_info_req/1;
+url_handler(<<"_active_tasks">>) -> fun chttpd_misc:handle_task_status_req/1;
+url_handler(<<"_scheduler">>) -> fun couch_replicator_httpd:handle_scheduler_req/1;
+url_handler(<<"_node">>) -> fun chttpd_node:handle_node_req/1;
url_handler(<<"_reload_query_servers">>) -> fun chttpd_misc:handle_reload_query_servers_req/1;
-url_handler(<<"_replicate">>) -> fun chttpd_misc:handle_replicate_req/1;
-url_handler(<<"_uuids">>) -> fun chttpd_misc:handle_uuids_req/1;
-url_handler(<<"_session">>) -> fun chttpd_auth:handle_session_req/1;
-url_handler(<<"_up">>) -> fun chttpd_misc:handle_up_req/1;
-url_handler(<<"_membership">>) -> fun ?MODULE:not_supported/1;
-url_handler(<<"_reshard">>) -> fun ?MODULE:not_supported/1;
-url_handler(<<"_db_updates">>) -> fun ?MODULE:not_implemented/1;
-url_handler(<<"_cluster_setup">>) -> fun ?MODULE:not_implemented/1;
+url_handler(<<"_replicate">>) -> fun chttpd_misc:handle_replicate_req/1;
+url_handler(<<"_uuids">>) -> fun chttpd_misc:handle_uuids_req/1;
+url_handler(<<"_session">>) -> fun chttpd_auth:handle_session_req/1;
+url_handler(<<"_up">>) -> fun chttpd_misc:handle_up_req/1;
+url_handler(<<"_membership">>) -> fun ?MODULE:not_supported/1;
+url_handler(<<"_reshard">>) -> fun ?MODULE:not_supported/1;
+url_handler(<<"_db_updates">>) -> fun ?MODULE:not_implemented/1;
+url_handler(<<"_cluster_setup">>) -> fun ?MODULE:not_implemented/1;
url_handler(_) -> no_match.
db_handler(<<"_view_cleanup">>) -> fun chttpd_db:handle_view_cleanup_req/2;
-db_handler(<<"_compact">>) -> fun chttpd_db:handle_compact_req/2;
-db_handler(<<"_design">>) -> fun chttpd_db:handle_design_req/2;
-db_handler(<<"_partition">>) -> fun ?MODULE:not_implemented/2;
-db_handler(<<"_temp_view">>) -> fun ?MODULE:not_supported/2;
-db_handler(<<"_changes">>) -> fun chttpd_db:handle_changes_req/2;
-db_handler(<<"_purge">>) -> fun ?MODULE:not_implemented/2;
+db_handler(<<"_compact">>) -> fun chttpd_db:handle_compact_req/2;
+db_handler(<<"_design">>) -> fun chttpd_db:handle_design_req/2;
+db_handler(<<"_partition">>) -> fun ?MODULE:not_implemented/2;
+db_handler(<<"_temp_view">>) -> fun ?MODULE:not_supported/2;
+db_handler(<<"_changes">>) -> fun chttpd_db:handle_changes_req/2;
+db_handler(<<"_purge">>) -> fun ?MODULE:not_implemented/2;
db_handler(<<"_purged_infos_limit">>) -> fun ?MODULE:not_implemented/2;
-db_handler(<<"_shards">>) -> fun ?MODULE:not_supported/2;
-db_handler(<<"_sync_shards">>) -> fun ?MODULE:not_supported/2;
+db_handler(<<"_shards">>) -> fun ?MODULE:not_supported/2;
+db_handler(<<"_sync_shards">>) -> fun ?MODULE:not_supported/2;
db_handler(_) -> no_match.
-design_handler(<<"_view">>) -> fun chttpd_view:handle_view_req/3;
-design_handler(<<"_show">>) -> fun ?MODULE:not_supported/3;
-design_handler(<<"_list">>) -> fun ?MODULE:not_supported/3;
-design_handler(<<"_update">>) -> fun chttpd_show:handle_doc_update_req/3;
-design_handler(<<"_info">>) -> fun chttpd_db:handle_design_info_req/3;
+design_handler(<<"_view">>) -> fun chttpd_view:handle_view_req/3;
+design_handler(<<"_show">>) -> fun ?MODULE:not_supported/3;
+design_handler(<<"_list">>) -> fun ?MODULE:not_supported/3;
+design_handler(<<"_update">>) -> fun chttpd_show:handle_doc_update_req/3;
+design_handler(<<"_info">>) -> fun chttpd_db:handle_design_info_req/3;
design_handler(<<"_rewrite">>) -> fun ?MODULE:not_supported/3;
design_handler(_) -> no_match.
-
handler_info('GET', [], _) ->
{'welcome_message.read', #{}};
-
handler_info('GET', [<<"_active_tasks">>], _) ->
{'active_tasks.read', #{}};
-
handler_info('GET', [<<"_all_dbs">>], _) ->
{'all_dbs.read', #{}};
-
handler_info('GET', [<<"_deleted_dbs">>], _) ->
{'account-deleted-dbs.read', #{}};
-
handler_info('POST', [<<"_deleted_dbs">>], _) ->
{'account-deleted-dbs.undelete', #{}};
-
handler_info('DELETE', [<<"_deleted_dbs">>, Db], _) ->
{'account-deleted-dbs.delete', #{'db.name' => Db}};
-
handler_info('POST', [<<"_dbs_info">>], _) ->
{'dbs_info.read', #{}};
-
handler_info('GET', [<<"_node">>, <<"_local">>], _) ->
{'node.name.read', #{}};
-
handler_info(Method, [<<"_node">>, <<"_local">> | Rest], HttpReq) ->
handler_info(Method, [<<"_node">>, node() | Rest], HttpReq);
-
handler_info('GET', [<<"_node">>, Node, <<"_config">>], _) ->
{'node.config.all.read', #{node => Node}};
-
handler_info('GET', [<<"_node">>, Node, <<"_config">>, Section], _) ->
{'node.config.section.read', #{node => Node, 'config.section' => Section}};
-
handler_info('GET', [<<"_node">>, Node, <<"_config">>, Section, Key], _) ->
{'node.config.key.read', #{
node => Node,
'config.section' => Section,
'config.key' => Key
}};
-
handler_info('PUT', [<<"_node">>, Node, <<"_config">>, Section, Key], _) ->
{'node.config.key.write', #{
node => Node,
'config.section' => Section,
'config.key' => Key
}};
-
handler_info('DELETE', [<<"_node">>, Node, <<"_config">>, Section, Key], _) ->
{'node.config.key.delete', #{
node => Node,
'config.section' => Section,
'config.key' => Key
}};
-
handler_info('GET', [<<"_node">>, Node, <<"_stats">> | Path], _) ->
{'node.stats.read', #{node => Node, 'stat.path' => Path}};
-
handler_info('GET', [<<"_node">>, Node, <<"_system">>], _) ->
{'node.system.read', #{node => Node}};
-
handler_info('POST', [<<"_node">>, Node, <<"_restart">>], _) ->
{'node.restart.execute', #{node => Node}};
-
handler_info('POST', [<<"_reload_query_servers">>], _) ->
{'query_servers.reload', #{}};
-
handler_info('POST', [<<"_replicate">>], _) ->
{'replication.create', #{}};
-
handler_info('GET', [<<"_scheduler">>, <<"jobs">>], _) ->
{'replication.jobs.read', #{}};
-
handler_info('GET', [<<"_scheduler">>, <<"jobs">>, JobId], _) ->
{'replication.job.read', #{'job.id' => JobId}};
-
handler_info('GET', [<<"_scheduler">>, <<"docs">>], _) ->
{'replication.docs.read', #{'db.name' => <<"_replicator">>}};
-
handler_info('GET', [<<"_scheduler">>, <<"docs">>, Db], _) ->
{'replication.docs.read', #{'db.name' => Db}};
-
handler_info('GET', [<<"_scheduler">>, <<"docs">>, Db, DocId], _) ->
{'replication.doc.read', #{'db.name' => Db, 'doc.id' => DocId}};
-
handler_info('GET', [<<"_scheduler">>, <<"docs">> | Path], _) ->
case lists:splitwith(fun(Elem) -> Elem /= <<"_replicator">> end, Path) of
{_, [<<"_replicator">>]} ->
@@ -165,111 +138,87 @@ handler_info('GET', [<<"_scheduler">>, <<"docs">> | Path], _) ->
_ ->
no_match
end;
-
handler_info('GET', [<<"_session">>], _) ->
{'session.read', #{}};
-
handler_info('POST', [<<"_session">>], _) ->
{'session.create', #{}};
-
handler_info('DELETE', [<<"_session">>], _) ->
{'session.delete', #{}};
-
handler_info('GET', [<<"_up">>], _) ->
{'health.read', #{}};
-
handler_info('GET', [<<"_utils">> | Path], _) ->
{'utils.read', #{'file.path' => filename:join(Path)}};
-
handler_info('GET', [<<"_uuids">>], _) ->
{'uuids.read', #{}};
-
handler_info('GET', [<<"favicon.ico">>], _) ->
{'favicon.ico.read', #{}};
-
-
-handler_info(Method, [<<"_", _/binary>> = Part| Rest], Req) ->
+handler_info(Method, [<<"_", _/binary>> = Part | Rest], Req) ->
% Maybe bail here so that we don't trample over a
% different url_handler plugin. However, we continue
% on for known system databases.
- DbName = case Part of
- <<"_dbs">> -> '_dbs';
- <<"_metadata">> -> '_metadata';
- <<"_nodes">> -> '_nodes';
- <<"_replicator">> -> '_replicator';
- <<"_users">> -> '_users';
- _ -> no_match
- end,
- if DbName == no_match -> no_match; true ->
- handler_info(Method, [DbName | Rest], Req)
+ DbName =
+ case Part of
+ <<"_dbs">> -> '_dbs';
+ <<"_metadata">> -> '_metadata';
+ <<"_nodes">> -> '_nodes';
+ <<"_replicator">> -> '_replicator';
+ <<"_users">> -> '_users';
+ _ -> no_match
+ end,
+ if
+ DbName == no_match -> no_match;
+ true -> handler_info(Method, [DbName | Rest], Req)
end;
-
handler_info('GET', [Db], _) ->
{'db.info.read', #{'db.name' => Db}};
-
handler_info('PUT', [Db], _) ->
{'db.create', #{'db.name' => Db}};
-
handler_info('POST', [Db], _) ->
{'db.doc.write', #{'db.name' => Db}};
-
handler_info('DELETE', [Db], _) ->
{'db.delete', #{'db.name' => Db}};
-
handler_info(M, [Db, <<"_all_docs">>], _) when M == 'GET'; M == 'POST' ->
{'db.all_docs.read', #{'db.name' => Db}};
-
handler_info('POST', [Db, <<"_all_docs">>, <<"queries">>], _) ->
{'db.all_docs.read', #{'db.name' => Db, multi => true}};
-
handler_info('POST', [Db, <<"_bulk_docs">>], _) ->
{'db.docs.write', #{'db.name' => Db, bulk => true}};
-
handler_info('POST', [Db, <<"_bulk_get">>], _) ->
{'db.docs.read', #{'db.name' => Db, bulk => true}};
-
handler_info('GET', [Db, <<"_changes">>], _) ->
{'db.changes.read', #{'db.name' => Db}};
-
handler_info('POST', [Db, <<"_changes">>], _) ->
{'db.changes.read', #{'db.name' => Db}};
-
handler_info('POST', [Db, <<"_compact">>], _) ->
{'db.compact.execute', #{'db.name' => Db}};
-
handler_info('GET', [Db, <<"_design">>, Name], _) ->
{'db.design.doc.read', #{'db.name' => Db, 'design.id' => Name}};
-
handler_info('POST', [Db, <<"_design">>, Name], _) ->
{'db.design.doc.write', #{'db.name' => Db, 'design.id' => Name}};
-
handler_info('PUT', [Db, <<"_design">>, Name], _) ->
{'db.design.doc.write', #{'db.name' => Db, 'design.id' => Name}};
-
handler_info('COPY', [Db, <<"_design">>, Name], Req) ->
{'db.design.doc.write', #{
'db.name' => Db,
'design.id' => get_copy_destination(Req),
'copy.source.doc.id' => <<"_design/", Name/binary>>
}};
-
handler_info('DELETE', [Db, <<"_design">>, Name], _) ->
{'db.design.doc.delete', #{'db.name' => Db, 'design.id' => Name}};
-
handler_info('GET', [Db, <<"_design">>, Name, <<"_info">>], _) ->
{'db.design.info.read', #{'db.name' => Db, 'design.id' => Name}};
-
-handler_info(M, [Db, <<"_design">>, Name, <<"_list">>, List, View], _)
- when M == 'GET'; M == 'POST', M == 'OPTIONS' ->
+handler_info(M, [Db, <<"_design">>, Name, <<"_list">>, List, View], _) when
+ M == 'GET'; M == 'POST', M == 'OPTIONS'
+->
{'db.design.list.read', #{
'db.name' => Db,
'design.id' => Name,
'design.list.name' => List,
'design.view.name' => View
}};
-
-handler_info(M, [Db, <<"_design">>, Name, <<"_list">>, List, Design, View], _)
- when M == 'GET'; M == 'POST', M == 'OPTIONS' ->
+handler_info(M, [Db, <<"_design">>, Name, <<"_list">>, List, Design, View], _) when
+ M == 'GET'; M == 'POST', M == 'OPTIONS'
+->
{'db.design.list.read', #{
'db.name' => Db,
'design.id' => Name,
@@ -277,14 +226,12 @@ handler_info(M, [Db, <<"_design">>, Name, <<"_list">>, List, Design, View], _)
'design.view.source.id' => Design,
'design.view.name' => View
}};
-
handler_info(_, [Db, <<"_design">>, Name, <<"_rewrite">> | Path], _) ->
{'db.design.rewrite.execute', #{
'db.name' => Db,
'design.id' => Name,
'rewrite.path' => filename:join(Path)
}};
-
handler_info(_, [Db, <<"_design">>, Name, <<"_show">>, Show, DocId], _) ->
{'db.design.show.execute', #{
'db.name' => Db,
@@ -292,242 +239,203 @@ handler_info(_, [Db, <<"_design">>, Name, <<"_show">>, Show, DocId], _) ->
'design.show.name' => Show,
'design.show.doc.id' => DocId
}};
-
handler_info(_, [Db, <<"_design">>, Name, <<"_update">>, Update | Rest], _) ->
BaseTags = #{
'db.name' => Db,
'design.id' => Name,
'design.update.name' => Update
},
- Tags = case Rest of
- [] ->
- BaseTags;
- _ ->
- DocId = filename:join(Rest),
- maps:put('design.update.doc.id', DocId, BaseTags)
- end,
+ Tags =
+ case Rest of
+ [] ->
+ BaseTags;
+ _ ->
+ DocId = filename:join(Rest),
+ maps:put('design.update.doc.id', DocId, BaseTags)
+ end,
{'db.design.update.execute', Tags};
-
handler_info('POST', [Db, <<"_design">>, Name, <<"_view">>, View, <<"queries">>], _) ->
{'db.design.view.multi.read', #{
'db.name' => Db,
'design.id' => Name,
'design.view.name' => View
}};
-
-handler_info(M, [Db, <<"_design">>, Name, <<"_view">>, View], _)
- when M == 'GET'; M == 'POST' ->
+handler_info(M, [Db, <<"_design">>, Name, <<"_view">>, View], _) when
+ M == 'GET'; M == 'POST'
+->
{'db.design.view.read', #{
'db.name' => Db,
'design.id' => Name,
'design.view.name' => View
}};
-
handler_info(_, [_Db, <<"_design">>, _Name, <<"_", _/binary>> | _], _) ->
% Bail here so that we don't treat a plugin
% design handler in place of a design attachment
no_match;
-
handler_info('GET', [Db, <<"_design">>, Name | Path], _) ->
{'db.design.doc.attachment.read', #{
'db.name' => Db,
'design.id' => Name,
'attachment.name' => filename:join(Path)
}};
-
handler_info('PUT', [Db, <<"_design">>, Name | Path], _) ->
{'db.design.doc.attachment.write', #{
'db.name' => Db,
'design.id' => Name,
'attachment.name' => filename:join(Path)
}};
-
handler_info('DELETE', [Db, <<"_design">>, Name | Path], _) ->
{'db.design.doc.attachment.delete', #{
'db.name' => Db,
'design.id' => Name,
'attachment.name' => filename:join(Path)
}};
-
handler_info(_, [Db, <<"_design/", Name/binary>> | Rest], Req) ->
% Recurse if someone sent us `_design%2Fname`
chttpd_handlers:handler_info(Req#httpd{
path_parts = [Db, <<"_design">>, Name | Rest]
});
-
handler_info(M, [Db, <<"_design_docs">>], _) when M == 'GET'; M == 'POST' ->
{'db.design_docs.read', #{'db.name' => Db}};
-
handler_info('POST', [Db, <<"_design_docs">>, <<"queries">>], _) ->
{'db.design_docs.read', #{'db.name' => Db, multi => true}};
-
handler_info('POST', [Db, <<"_ensure_full_commit">>], _) ->
{'db.ensure_full_commit.execute', #{'db.name' => Db}};
-
handler_info('GET', [Db, <<"_local">>, Name], _) ->
{'db.local.doc.read', #{'db.name' => Db, 'local.id' => Name}};
-
handler_info('POST', [Db, <<"_local">>, Name], _) ->
{'db.local.doc.write', #{'db.name' => Db, 'local.id' => Name}};
-
handler_info('PUT', [Db, <<"_local">>, Name], _) ->
{'db.local.doc.write', #{'db.name' => Db, 'local.id' => Name}};
-
handler_info('COPY', [Db, <<"_local">>, Name], Req) ->
{'db.local.doc.write', #{
'db.name' => Db,
'local.id' => get_copy_destination(Req),
'copy.source.doc.id' => <<"_local/", Name/binary>>
}};
-
handler_info('DELETE', [Db, <<"_local">>, Name], _) ->
{'db.local.doc.delete', #{'db.name' => Db, 'local.id' => Name}};
-
handler_info(_, [Db, <<"_local">>, Name | _Path], _) ->
{'db.local.doc.invalid_attachment_req', #{
'db.name' => Db,
'local.id' => Name
}};
-
handler_info(M, [Db, <<"_local_docs">>], _) when M == 'GET'; M == 'POST' ->
{'db.local_docs.read', #{'db.name' => Db}};
-
handler_info('POST', [Db, <<"_local_docs">>, <<"queries">>], _) ->
{'db.local_docs.read', #{'db.name' => Db, multi => true}};
-
handler_info('POST', [Db, <<"_missing_revs">>], _) ->
{'db.docs.missing_revs.execute', #{'db.name' => Db}};
-
handler_info('GET', [Db, <<"_partition">>, Partition], _) ->
{'db.partition.info.read', #{'db.name' => Db, partition => Partition}};
-
handler_info(_, [Db, <<"_partition">>, Partition | Rest], Req) ->
- NewPath = case Rest of
- [<<"_all_docs">> | _] ->
- [Db | Rest];
- [<<"_index">> | _] ->
- [Db | Rest];
- [<<"_find">> | _] ->
- [Db | Rest];
- [<<"_explain">> | _] ->
- [Db | Rest];
- [<<"_design">>, _Name, <<"_", _/binary>> | _] ->
- [Db | Rest];
- _ ->
- no_match
- end,
- if NewPath == no_match -> no_match; true ->
- {OpName, Tags} = chttpd_handlers:handler_info(Req#httpd{
- path_parts = NewPath
- }),
- NewOpName = case atom_to_list(OpName) of
- "db." ++ Name -> list_to_atom("db.partition." ++ Name);
- Else -> list_to_atom(Else ++ ".partition")
+ NewPath =
+ case Rest of
+ [<<"_all_docs">> | _] ->
+ [Db | Rest];
+ [<<"_index">> | _] ->
+ [Db | Rest];
+ [<<"_find">> | _] ->
+ [Db | Rest];
+ [<<"_explain">> | _] ->
+ [Db | Rest];
+ [<<"_design">>, _Name, <<"_", _/binary>> | _] ->
+ [Db | Rest];
+ _ ->
+ no_match
end,
- {NewOpName, maps:put(partition, Partition, Tags)}
+ if
+ NewPath == no_match ->
+ no_match;
+ true ->
+ {OpName, Tags} = chttpd_handlers:handler_info(Req#httpd{
+ path_parts = NewPath
+ }),
+ NewOpName =
+ case atom_to_list(OpName) of
+ "db." ++ Name -> list_to_atom("db.partition." ++ Name);
+ Else -> list_to_atom(Else ++ ".partition")
+ end,
+ {NewOpName, maps:put(partition, Partition, Tags)}
end;
-
handler_info('POST', [Db, <<"_purge">>], _) ->
{'db.docs.purge', #{'db.name' => Db}};
-
handler_info('GET', [Db, <<"_purged_infos_limit">>], _) ->
{'db.purged_infos_limit.read', #{'db.name' => Db}};
-
handler_info('PUT', [Db, <<"_purged_infos_limit">>], _) ->
{'db.purged_infos_limit.write', #{'db.name' => Db}};
-
handler_info('POST', [Db, <<"_revs_diff">>], _) ->
{'db.docs.revs_diff.execute', #{'db.name' => Db}};
-
handler_info('GET', [Db, <<"_revs_limit">>], _) ->
{'db.revs_limit.read', #{'db.name' => Db}};
-
handler_info('PUT', [Db, <<"_revs_limit">>], _) ->
{'db.revs_limit.write', #{'db.name' => Db}};
-
handler_info('GET', [Db, <<"_security">>], _) ->
{'db.security.read', #{'db.name' => Db}};
-
handler_info('PUT', [Db, <<"_security">>], _) ->
{'db.security.write', #{'db.name' => Db}};
-
handler_info(_, [Db, <<"_view_cleanup">>], _) ->
{'views.cleanup.execute', #{'db.name' => Db}};
-
handler_info(_, [_Db, <<"_", _/binary>> | _], _) ->
% Bail here for other possible db_handleres
no_match;
-
handler_info('GET', [Db, DocId], _) ->
{'db.doc.read', #{'db.name' => Db, 'doc.id' => DocId}};
-
handler_info('POST', [Db, DocId], _) ->
{'db.doc.write', #{'db.name' => Db, 'design.id' => DocId}};
-
handler_info('PUT', [Db, DocId], _) ->
{'db.doc.write', #{'db.name' => Db, 'design.id' => DocId}};
-
handler_info('COPY', [Db, DocId], Req) ->
{'db.doc.write', #{
'db.name' => Db,
'doc.id' => get_copy_destination(Req),
'copy.source.doc.id' => DocId
}};
-
handler_info('DELETE', [Db, DocId], _) ->
{'db.doc.delete', #{'db.name' => Db, 'doc.id' => DocId}};
-
handler_info('GET', [Db, DocId | Path], _) ->
{'db.doc.attachment.read', #{
'db.name' => Db,
'doc.id' => DocId,
'attachment.name' => filename:join(Path)
}};
-
handler_info('PUT', [Db, DocId | Path], _) ->
{'db.doc.attachment.write', #{
'db.name' => Db,
'doc.id' => DocId,
'attachment.name' => filename:join(Path)
}};
-
handler_info('DELETE', [Db, DocId | Path], _) ->
{'db.doc.attachment.delete', #{
'db.name' => Db,
'doc.id' => DocId,
'attachment.name' => filename:join(Path)
}};
-
handler_info(_, _, _) ->
no_match.
-
get_copy_destination(Req) ->
try
{DocIdStr, _} = chttpd_util:parse_copy_destination_header(Req),
list_to_binary(mochiweb_util:unquote(DocIdStr))
- catch _:_ ->
- unknown
+ catch
+ _:_ ->
+ unknown
end.
-
not_supported(#httpd{} = Req, Db, _DDoc) ->
not_supported(Req, Db).
-
not_supported(#httpd{} = Req, _Db) ->
not_supported(Req).
-
not_supported(#httpd{} = Req) ->
Msg = <<"resource is not supported in CouchDB >= 4.x">>,
chttpd:send_error(Req, 410, gone, Msg).
-
not_implemented(#httpd{} = Req, _Db) ->
not_implemented(Req).
-
not_implemented(#httpd{} = Req) ->
Msg = <<"resource is not implemented">>,
chttpd:send_error(Req, 501, not_implemented, Msg).
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index f8e47a2cb..5ed7e6052 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -32,9 +32,15 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_views/include/couch_views.hrl").
--import(chttpd,
- [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
- send_chunk/2,start_chunked_response/3]).
+-import(
+ chttpd,
+ [
+ send_json/2, send_json/3, send_json/4,
+ send_method_not_allowed/2,
+ send_chunk/2,
+ start_chunked_response/3
+ ]
+).
-define(MAX_DB_NUM_FOR_DBS_INFO, 100).
@@ -43,19 +49,21 @@
handle_welcome_req(Req) ->
handle_welcome_req(Req, <<"Welcome">>).
-handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) ->
- send_json(Req, {[
- {couchdb, WelcomeMessage},
- {version, list_to_binary(couch_server:get_version())},
- {git_sha, list_to_binary(couch_server:get_git_sha())},
- {uuid, couch_server:get_uuid()},
- {features, get_features()}
- ] ++ case config:get("vendor") of
- [] ->
- [];
- Properties ->
- [{vendor, {[{?l2b(K), ?l2b(V)} || {K, V} <- Properties]}}]
- end
+handle_welcome_req(#httpd{method = 'GET'} = Req, WelcomeMessage) ->
+ send_json(Req, {
+ [
+ {couchdb, WelcomeMessage},
+ {version, list_to_binary(couch_server:get_version())},
+ {git_sha, list_to_binary(couch_server:get_git_sha())},
+ {uuid, couch_server:get_uuid()},
+ {features, get_features()}
+ ] ++
+ case config:get("vendor") of
+ [] ->
+ [];
+ Properties ->
+ [{vendor, {[{?l2b(K), ?l2b(V)} || {K, V} <- Properties]}}]
+ end
});
handle_welcome_req(Req, _) ->
send_method_not_allowed(Req, "GET,HEAD").
@@ -66,7 +74,7 @@ get_features() ->
handle_favicon_req(Req) ->
handle_favicon_req(Req, get_docroot()).
-handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) ->
+handle_favicon_req(#httpd{method = 'GET'} = Req, DocumentRoot) ->
{DateNow, TimeNow} = calendar:universal_time(),
DaysNow = calendar:date_to_gregorian_days(DateNow),
DaysWhenExpires = DaysNow + 365,
@@ -83,32 +91,33 @@ handle_favicon_req(Req, _) ->
handle_utils_dir_req(Req) ->
handle_utils_dir_req(Req, get_docroot()).
-handle_utils_dir_req(#httpd{method='GET'}=Req, DocumentRoot) ->
+handle_utils_dir_req(#httpd{method = 'GET'} = Req, DocumentRoot) ->
"/" ++ UrlPath = chttpd:path(Req),
case chttpd:partition(UrlPath) of
- {_ActionKey, "/", RelativePath} ->
- % GET /_utils/path or GET /_utils/
- CachingHeaders = [{"Cache-Control", "private, must-revalidate"}],
- EnableCsp = config:get("csp", "enable", "true"),
- Headers = maybe_add_csp_headers(CachingHeaders, EnableCsp),
- chttpd:serve_file(Req, RelativePath, DocumentRoot, Headers);
- {_ActionKey, "", _RelativePath} ->
- % GET /_utils
- RedirectPath = chttpd:path(Req) ++ "/",
- chttpd:send_redirect(Req, RedirectPath)
+ {_ActionKey, "/", RelativePath} ->
+ % GET /_utils/path or GET /_utils/
+ CachingHeaders = [{"Cache-Control", "private, must-revalidate"}],
+ EnableCsp = config:get("csp", "enable", "true"),
+ Headers = maybe_add_csp_headers(CachingHeaders, EnableCsp),
+ chttpd:serve_file(Req, RelativePath, DocumentRoot, Headers);
+ {_ActionKey, "", _RelativePath} ->
+ % GET /_utils
+ RedirectPath = chttpd:path(Req) ++ "/",
+ chttpd:send_redirect(Req, RedirectPath)
end;
handle_utils_dir_req(Req, _) ->
send_method_not_allowed(Req, "GET,HEAD").
maybe_add_csp_headers(Headers, "true") ->
- DefaultValues = "child-src 'self' data: blob:; default-src 'self'; img-src 'self' data:; font-src 'self'; "
- "script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';",
+ DefaultValues =
+ "child-src 'self' data: blob:; default-src 'self'; img-src 'self' data:; font-src 'self'; "
+ "script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';",
Value = config:get("csp", "header_value", DefaultValues),
[{"Content-Security-Policy", Value} | Headers];
maybe_add_csp_headers(Headers, _) ->
Headers.
-handle_all_dbs_req(#httpd{method='GET'}=Req) ->
+handle_all_dbs_req(#httpd{method = 'GET'} = Req) ->
#mrargs{
start_key = StartKey,
end_key = EndKey,
@@ -127,32 +136,32 @@ handle_all_dbs_req(#httpd{method='GET'}=Req) ->
{ok, Resp} = chttpd:start_delayed_json_response(Req, 200, []),
Callback = fun all_dbs_callback/2,
- Acc = #vacc{req=Req,resp=Resp},
+ Acc = #vacc{req = Req, resp = Resp},
{ok, Acc1} = fabric2_db:list_dbs(Callback, Acc, Options),
{ok, Acc1#vacc.resp};
handle_all_dbs_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD").
-all_dbs_callback({meta, _Meta}, #vacc{resp=Resp0}=Acc) ->
+all_dbs_callback({meta, _Meta}, #vacc{resp = Resp0} = Acc) ->
{ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
- {ok, Acc#vacc{resp=Resp1}};
-all_dbs_callback({row, Row}, #vacc{resp=Resp0}=Acc) ->
+ {ok, Acc#vacc{resp = Resp1}};
+all_dbs_callback({row, Row}, #vacc{resp = Resp0} = Acc) ->
Prepend = couch_views_http_util:prepend_val(Acc),
DbName = couch_util:get_value(id, Row),
{ok, Resp1} = chttpd:send_delayed_chunk(Resp0, [Prepend, ?JSON_ENCODE(DbName)]),
- {ok, Acc#vacc{prepend=",", resp=Resp1}};
-all_dbs_callback(complete, #vacc{resp=Resp0}=Acc) ->
+ {ok, Acc#vacc{prepend = ",", resp = Resp1}};
+all_dbs_callback(complete, #vacc{resp = Resp0} = Acc) ->
{ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "]"),
{ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
- {ok, Acc#vacc{resp=Resp2}};
-all_dbs_callback({error, Reason}, #vacc{resp=Resp0}=Acc) ->
+ {ok, Acc#vacc{resp = Resp2}};
+all_dbs_callback({error, Reason}, #vacc{resp = Resp0} = Acc) ->
{ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
- {ok, Acc#vacc{resp=Resp1}}.
+ {ok, Acc#vacc{resp = Resp1}}.
handle_dbs_info_req(#httpd{method = 'GET'} = Req) ->
ok = chttpd:verify_is_server_admin(Req),
send_db_infos(Req, list_dbs_info);
-handle_dbs_info_req(#httpd{method='POST', user_ctx=UserCtx}=Req) ->
+handle_dbs_info_req(#httpd{method = 'POST', user_ctx = UserCtx} = Req) ->
chttpd:validate_ctype(Req, "application/json"),
Props = chttpd:json_body_obj(Req),
Keys = couch_views_util:get_view_keys(Props),
@@ -160,35 +169,44 @@ handle_dbs_info_req(#httpd{method='POST', user_ctx=UserCtx}=Req) ->
undefined -> throw({bad_request, "`keys` member must exist."});
_ -> ok
end,
- MaxNumber = config:get_integer("chttpd",
- "max_db_number_for_dbs_info_req", ?MAX_DB_NUM_FOR_DBS_INFO),
+ MaxNumber = config:get_integer(
+ "chttpd",
+ "max_db_number_for_dbs_info_req",
+ ?MAX_DB_NUM_FOR_DBS_INFO
+ ),
case length(Keys) =< MaxNumber of
true -> ok;
false -> throw({bad_request, too_many_keys})
end,
{ok, Resp} = chttpd:start_json_response(Req, 200),
send_chunk(Resp, "["),
- lists:foldl(fun(DbName, AccSeparator) ->
- try
- {ok, Db} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
- {ok, Info} = fabric2_db:get_db_info(Db),
- Json = ?JSON_ENCODE({[{key, DbName}, {info, {Info}}]}),
- send_chunk(Resp, AccSeparator ++ Json)
- catch error:database_does_not_exist ->
- ErrJson = ?JSON_ENCODE({[{key, DbName}, {error, not_found}]}),
- send_chunk(Resp, AccSeparator ++ ErrJson)
+ lists:foldl(
+ fun(DbName, AccSeparator) ->
+ try
+ {ok, Db} = fabric2_db:open(DbName, [{user_ctx, UserCtx}]),
+ {ok, Info} = fabric2_db:get_db_info(Db),
+ Json = ?JSON_ENCODE({[{key, DbName}, {info, {Info}}]}),
+ send_chunk(Resp, AccSeparator ++ Json)
+ catch
+ error:database_does_not_exist ->
+ ErrJson = ?JSON_ENCODE({[{key, DbName}, {error, not_found}]}),
+ send_chunk(Resp, AccSeparator ++ ErrJson)
+ end,
+ % AccSeparator now has a comma
+ ","
end,
- "," % AccSeparator now has a comma
- end, "", Keys),
+ "",
+ Keys
+ ),
send_chunk(Resp, "]"),
chttpd:end_json_response(Resp);
handle_dbs_info_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD,POST").
-handle_deleted_dbs_req(#httpd{method='GET', path_parts=[_]}=Req) ->
+handle_deleted_dbs_req(#httpd{method = 'GET', path_parts = [_]} = Req) ->
ok = chttpd:verify_is_server_admin(Req),
send_db_infos(Req, list_deleted_dbs_info);
-handle_deleted_dbs_req(#httpd{method='POST', user_ctx=Ctx, path_parts=[_]}=Req) ->
+handle_deleted_dbs_req(#httpd{method = 'POST', user_ctx = Ctx, path_parts = [_]} = Req) ->
couch_httpd:verify_is_server_admin(Req),
chttpd:validate_ctype(Req, "application/json"),
GetJSON = fun(Key, Props, Default) ->
@@ -218,16 +236,17 @@ handle_deleted_dbs_req(#httpd{method='POST', user_ctx=Ctx, path_parts=[_]}=Req)
Error ->
throw(Error)
end;
-handle_deleted_dbs_req(#httpd{path_parts = PP}=Req) when length(PP) == 1 ->
+handle_deleted_dbs_req(#httpd{path_parts = PP} = Req) when length(PP) == 1 ->
send_method_not_allowed(Req, "GET,HEAD,POST");
-handle_deleted_dbs_req(#httpd{method='DELETE', user_ctx=Ctx, path_parts=[_, DbName]}=Req) ->
+handle_deleted_dbs_req(#httpd{method = 'DELETE', user_ctx = Ctx, path_parts = [_, DbName]} = Req) ->
couch_httpd:verify_is_server_admin(Req),
- TS = case ?JSON_DECODE(couch_httpd:qs_value(Req, "timestamp", "null")) of
- null ->
- throw({bad_request, "`timestamp` parameter is not provided."});
- TS0 ->
- TS0
- end,
+ TS =
+ case ?JSON_DECODE(couch_httpd:qs_value(Req, "timestamp", "null")) of
+ null ->
+ throw({bad_request, "`timestamp` parameter is not provided."});
+ TS0 ->
+ TS0
+ end,
case fabric2_db:delete(DbName, [{user_ctx, Ctx}, {deleted_at, TS}]) of
ok ->
send_json(Req, 200, {[{ok, true}]});
@@ -236,7 +255,7 @@ handle_deleted_dbs_req(#httpd{method='DELETE', user_ctx=Ctx, path_parts=[_, DbNa
Error ->
throw(Error)
end;
-handle_deleted_dbs_req(#httpd{path_parts = PP}=Req) when length(PP) == 2 ->
+handle_deleted_dbs_req(#httpd{path_parts = PP} = Req) when length(PP) == 2 ->
send_method_not_allowed(Req, "HEAD,DELETE");
handle_deleted_dbs_req(Req) ->
chttpd:send_error(Req, not_found).
@@ -287,14 +306,14 @@ dbs_info_callback({error, Reason}, #vacc{resp = Resp0} = Acc) ->
{ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
{ok, Acc#vacc{resp = Resp1}}.
-handle_task_status_req(#httpd{method='GET'}=Req) ->
+handle_task_status_req(#httpd{method = 'GET'} = Req) ->
ok = chttpd:verify_is_server_admin(Req),
ActiveTasks = fabric2_active_tasks:get_active_tasks(),
send_json(Req, ActiveTasks);
handle_task_status_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD").
-handle_replicate_req(#httpd{method='POST', user_ctx=Ctx, req_body=PostBody} = Req) ->
+handle_replicate_req(#httpd{method = 'POST', user_ctx = Ctx, req_body = PostBody} = Req) ->
chttpd:validate_ctype(Req, "application/json"),
%% see HACK in chttpd.erl about replication
case couch_replicator:replicate(PostBody, Ctx) of
@@ -306,41 +325,43 @@ handle_replicate_req(#httpd{method='POST', user_ctx=Ctx, req_body=PostBody} = Re
send_json(Req, maps:merge(#{<<"ok">> => true}, JsonResults));
{ok, stopped} ->
send_json(Req, 200, {[{ok, stopped}]});
- {error, not_found=Error} ->
+ {error, not_found = Error} ->
chttpd:send_error(Req, Error);
{error, #{<<"error">> := Err, <<"reason">> := Reason}} when
- is_binary(Err), is_binary(Reason) ->
+ is_binary(Err), is_binary(Reason)
+ ->
% Safe to use binary_to_atom since this is only built
% from couch_replicator_jobs:error_info/1
chttpd:send_error(Req, {binary_to_atom(Err, utf8), Reason});
- {error, {_, _}=Error} ->
+ {error, {_, _} = Error} ->
chttpd:send_error(Req, Error);
- {_, _}=Error ->
+ {_, _} = Error ->
chttpd:send_error(Req, Error)
end;
handle_replicate_req(Req) ->
send_method_not_allowed(Req, "POST").
-
-handle_reload_query_servers_req(#httpd{method='POST'}=Req) ->
+handle_reload_query_servers_req(#httpd{method = 'POST'} = Req) ->
chttpd:validate_ctype(Req, "application/json"),
ok = couch_proc_manager:reload(),
send_json(Req, 200, {[{ok, true}]});
handle_reload_query_servers_req(Req) ->
send_method_not_allowed(Req, "POST").
-handle_uuids_req(#httpd{method='GET'}=Req) ->
+handle_uuids_req(#httpd{method = 'GET'} = Req) ->
Max = config:get_integer("uuids", "max_count", 1000),
- Count = try list_to_integer(couch_httpd:qs_value(Req, "count", "1")) of
- N when N > Max ->
- throw({bad_request, <<"count parameter too large">>});
- N when N < 0 ->
- throw({bad_request, <<"count must be a positive integer">>});
- N -> N
- catch
- error:badarg ->
- throw({bad_request, <<"count must be a positive integer">>})
- end,
+ Count =
+ try list_to_integer(couch_httpd:qs_value(Req, "count", "1")) of
+ N when N > Max ->
+ throw({bad_request, <<"count parameter too large">>});
+ N when N < 0 ->
+ throw({bad_request, <<"count must be a positive integer">>});
+ N ->
+ N
+ catch
+ error:badarg ->
+ throw({bad_request, <<"count must be a positive integer">>})
+ end,
UUIDs = [couch_uuids:new() || _ <- lists:seq(1, Count)],
Etag = couch_httpd:make_etag(UUIDs),
couch_httpd:etag_respond(Req, Etag, fun() ->
@@ -357,21 +378,21 @@ handle_uuids_req(#httpd{method='GET'}=Req) ->
handle_uuids_req(Req) ->
send_method_not_allowed(Req, "GET").
-handle_up_req(#httpd{method='GET'} = Req) ->
+handle_up_req(#httpd{method = 'GET'} = Req) ->
case config:get("couchdb", "maintenance_mode") of
- "true" ->
- send_json(Req, 404, {[{status, maintenance_mode}]});
- "nolb" ->
- send_json(Req, 404, {[{status, nolb}]});
- _ ->
- try
- fabric2_db:list_dbs([{limit, 0}]),
- send_json(Req, 200, {[{status, ok}]})
- catch error:{timeout, _} ->
- send_json(Req, 404, {[{status, backend_unavailable}]})
- end
+ "true" ->
+ send_json(Req, 404, {[{status, maintenance_mode}]});
+ "nolb" ->
+ send_json(Req, 404, {[{status, nolb}]});
+ _ ->
+ try
+ fabric2_db:list_dbs([{limit, 0}]),
+ send_json(Req, 200, {[{status, ok}]})
+ catch
+ error:{timeout, _} ->
+ send_json(Req, 404, {[{status, backend_unavailable}]})
+ end
end;
-
handle_up_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD").
diff --git a/src/chttpd/src/chttpd_node.erl b/src/chttpd/src/chttpd_node.erl
index 54e0e489e..e35781f51 100644
--- a/src/chttpd/src/chttpd_node.erl
+++ b/src/chttpd/src/chttpd_node.erl
@@ -21,52 +21,70 @@
-include_lib("couch/include/couch_db.hrl").
--import(chttpd,
- [send_json/2,send_json/3,send_method_not_allowed/2,
- send_chunk/2,start_chunked_response/3]).
+-import(
+ chttpd,
+ [
+ send_json/2, send_json/3,
+ send_method_not_allowed/2,
+ send_chunk/2,
+ start_chunked_response/3
+ ]
+).
% Node-specific request handler (_config and _stats)
% Support _local meaning this node
-handle_node_req(#httpd{path_parts=[_, <<"_local">>]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, <<"_local">>]} = Req) ->
send_json(Req, 200, {[{name, node()}]});
-handle_node_req(#httpd{path_parts=[A, <<"_local">>|Rest]}=Req) ->
- handle_node_req(Req#httpd{path_parts=[A, node()] ++ Rest});
+handle_node_req(#httpd{path_parts = [A, <<"_local">> | Rest]} = Req) ->
+ handle_node_req(Req#httpd{path_parts = [A, node()] ++ Rest});
% GET /_node/$node/_config
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>]}=Req) ->
- Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
- case dict:is_key(Section, Acc) of
- true ->
- dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
- false ->
- dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
- end
- end, dict:new(), call_node(Node, config, all, [])),
- KVs = dict:fold(fun(Section, Values, Acc) ->
- [{list_to_binary(Section), {Values}} | Acc]
- end, [], Grouped),
+handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_config">>]} = Req) ->
+ Grouped = lists:foldl(
+ fun({{Section, Key}, Value}, Acc) ->
+ case dict:is_key(Section, Acc) of
+ true ->
+ dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
+ false ->
+ dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
+ end
+ end,
+ dict:new(),
+ call_node(Node, config, all, [])
+ ),
+ KVs = dict:fold(
+ fun(Section, Values, Acc) ->
+ [{list_to_binary(Section), {Values}} | Acc]
+ end,
+ [],
+ Grouped
+ ),
send_json(Req, 200, {KVs});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_config">>]} = Req) ->
send_method_not_allowed(Req, "GET");
% POST /_node/$node/_config/_reload - Flushes unpersisted config values from RAM
-handle_node_req(#httpd{method='POST', path_parts=[_, Node, <<"_config">>, <<"_reload">>]}=Req) ->
+handle_node_req(
+ #httpd{method = 'POST', path_parts = [_, Node, <<"_config">>, <<"_reload">>]} = Req
+) ->
case call_node(Node, config, reload, []) of
ok ->
send_json(Req, 200, {[{ok, true}]});
{error, Reason} ->
chttpd:send_error(Req, {bad_request, Reason})
end;
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, <<"_reload">>]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_config">>, <<"_reload">>]} = Req) ->
send_method_not_allowed(Req, "POST");
% GET /_node/$node/_config/Section
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section]}=Req) ->
- KVs = [{list_to_binary(Key), list_to_binary(Value)}
- || {Key, Value} <- call_node(Node, config, get, [Section])],
+handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_config">>, Section]} = Req) ->
+ KVs = [
+ {list_to_binary(Key), list_to_binary(Value)}
+ || {Key, Value} <- call_node(Node, config, get, [Section])
+ ],
send_json(Req, 200, {KVs});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_config">>, _Section]} = Req) ->
send_method_not_allowed(Req, "GET");
% PUT /_node/$node/_config/Section/Key
% "value"
-handle_node_req(#httpd{method='PUT', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+handle_node_req(#httpd{method = 'PUT', path_parts = [_, Node, <<"_config">>, Section, Key]} = Req) ->
couch_util:check_config_blacklist(Section),
Value = couch_util:trim(chttpd:json_body(Req)),
Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
@@ -80,34 +98,36 @@ handle_node_req(#httpd{method='PUT', path_parts=[_, Node, <<"_config">>, Section
chttpd:send_error(Req, {bad_request, Reason})
end;
% GET /_node/$node/_config/Section/Key
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_config">>, Section, Key]} = Req) ->
case call_node(Node, config, get, [Section, Key, undefined]) of
- undefined ->
- throw({not_found, unknown_config_value});
- Value ->
- send_json(Req, 200, list_to_binary(Value))
+ undefined ->
+ throw({not_found, unknown_config_value});
+ Value ->
+ send_json(Req, 200, list_to_binary(Value))
end;
% DELETE /_node/$node/_config/Section/Key
-handle_node_req(#httpd{method='DELETE',path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+handle_node_req(
+ #httpd{method = 'DELETE', path_parts = [_, Node, <<"_config">>, Section, Key]} = Req
+) ->
couch_util:check_config_blacklist(Section),
Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
case call_node(Node, config, get, [Section, Key, undefined]) of
- undefined ->
- throw({not_found, unknown_config_value});
- OldValue ->
- case call_node(Node, config, delete, [Section, Key, Persist]) of
- ok ->
- send_json(Req, 200, list_to_binary(OldValue));
- {error, Reason} ->
- chttpd:send_error(Req, {bad_request, Reason})
- end
+ undefined ->
+ throw({not_found, unknown_config_value});
+ OldValue ->
+ case call_node(Node, config, delete, [Section, Key, Persist]) of
+ ok ->
+ send_json(Req, 200, list_to_binary(OldValue));
+ {error, Reason} ->
+ chttpd:send_error(Req, {bad_request, Reason})
+ end
end;
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_config">>, _Section, _Key]} = Req) ->
send_method_not_allowed(Req, "GET,PUT,DELETE");
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key | _]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_config">>, _Section, _Key | _]} = Req) ->
chttpd:send_error(Req, not_found);
% GET /_node/$node/_stats
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_stats">> | Path]}=Req) ->
+handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_stats">> | Path]} = Req) ->
flush(Node, Req),
Stats0 = call_node(Node, couch_stats, fetch, []),
Stats = couch_stats_httpd:transform_stats(Stats0),
@@ -115,44 +135,45 @@ handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_stats">> | Path]}=
EJSON0 = couch_stats_httpd:to_ejson(Nested),
EJSON1 = couch_stats_httpd:extract_path(Path, EJSON0),
chttpd:send_json(Req, EJSON1);
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_stats">>]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_stats">>]} = Req) ->
send_method_not_allowed(Req, "GET");
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_prometheus">>]}=Req) ->
+handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_prometheus">>]} = Req) ->
Metrics = call_node(Node, couch_prometheus_server, scrape, []),
Version = call_node(Node, couch_prometheus_server, version, []),
- Type = "text/plain; version=" ++ Version,
+ Type = "text/plain; version=" ++ Version,
Header = [{<<"Content-Type">>, ?l2b(Type)}],
chttpd:send_response(Req, 200, Header, Metrics);
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_prometheus">>]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_prometheus">>]} = Req) ->
send_method_not_allowed(Req, "GET");
% GET /_node/$node/_system
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_system">>]}=Req) ->
+handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_system">>]} = Req) ->
Stats = call_node(Node, chttpd_node, get_stats, []),
EJSON = couch_stats_httpd:to_ejson(Stats),
send_json(Req, EJSON);
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_system">>]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_system">>]} = Req) ->
send_method_not_allowed(Req, "GET");
% POST /_node/$node/_restart
-handle_node_req(#httpd{method='POST', path_parts=[_, Node, <<"_restart">>]}=Req) ->
+handle_node_req(#httpd{method = 'POST', path_parts = [_, Node, <<"_restart">>]} = Req) ->
call_node(Node, init, restart, []),
send_json(Req, 200, {[{ok, true}]});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_restart">>]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_restart">>]} = Req) ->
send_method_not_allowed(Req, "POST");
-handle_node_req(#httpd{path_parts=[_, _Node | _PathParts]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node | _PathParts]} = Req) ->
% Local (backend) dbs are not support any more
chttpd_httpd_handlers:not_supported(Req);
-handle_node_req(#httpd{path_parts=[_]}=Req) ->
+handle_node_req(#httpd{path_parts = [_]} = Req) ->
chttpd:send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
handle_node_req(Req) ->
chttpd:send_error(Req, not_found).
call_node(Node0, Mod, Fun, Args) when is_binary(Node0) ->
- Node1 = try
- list_to_existing_atom(?b2l(Node0))
- catch
- error:badarg ->
- throw({not_found, <<"no such node: ", Node0/binary>>})
- end,
+ Node1 =
+ try
+ list_to_existing_atom(?b2l(Node0))
+ catch
+ error:badarg ->
+ throw({not_found, <<"no such node: ", Node0/binary>>})
+ end,
call_node(Node1, Mod, Fun, Args);
call_node(Node, Mod, Fun, Args) when is_atom(Node) ->
case rpc:call(Node, Mod, Fun, Args) of
@@ -172,10 +193,25 @@ flush(Node, Req) ->
end.
get_stats() ->
- Other = erlang:memory(system) - lists:sum([X || {_,X} <-
- erlang:memory([atom, code, binary, ets])]),
- Memory = [{other, Other} | erlang:memory([atom, atom_used, processes,
- processes_used, binary, code, ets])],
+ Other =
+ erlang:memory(system) -
+ lists:sum([
+ X
+ || {_, X} <-
+ erlang:memory([atom, code, binary, ets])
+ ]),
+ Memory = [
+ {other, Other}
+ | erlang:memory([
+ atom,
+ atom_used,
+ processes,
+ processes_used,
+ binary,
+ code,
+ ets
+ ])
+ ],
{NumberOfGCs, WordsReclaimed, _} = statistics(garbage_collection),
{{input, Input}, {output, Output}} = statistics(io),
{CF, CDU} = db_pid_stats(),
@@ -221,13 +257,15 @@ db_pid_stats(Mod, Candidates) ->
{Mod, init, 1} ->
case proplists:get_value(message_queue_len, PI) of
undefined -> Acc;
- Len -> [Len|Acc]
+ Len -> [Len | Acc]
end;
- _ ->
+ _ ->
Acc
end
end
- end, [], Candidates
+ end,
+ [],
+ Candidates
),
format_pid_stats(Mailboxes).
@@ -246,17 +284,23 @@ format_pid_stats(Mailboxes) ->
].
get_distribution_stats() ->
- lists:map(fun({Node, Socket}) ->
- {ok, Stats} = inet:getstat(Socket),
- {Node, {Stats}}
- end, erlang:system_info(dist_ctrl)).
+ lists:map(
+ fun({Node, Socket}) ->
+ {ok, Stats} = inet:getstat(Socket),
+ {Node, {Stats}}
+ end,
+ erlang:system_info(dist_ctrl)
+ ).
message_queues(Registered) ->
- lists:map(fun(Name) ->
- Type = message_queue_len,
- {Type, Length} = process_info(whereis(Name), Type),
- {Name, Length}
- end, Registered).
+ lists:map(
+ fun(Name) ->
+ Type = message_queue_len,
+ {Type, Length} = process_info(whereis(Name), Type),
+ {Name, Length}
+ end,
+ Registered
+ ).
%% Workaround for https://bugs.erlang.org/browse/ERL-1355
run_queues() ->
diff --git a/src/chttpd/src/chttpd_plugin.erl b/src/chttpd/src/chttpd_plugin.erl
index 7ab458170..fdf20c576 100644
--- a/src/chttpd/src/chttpd_plugin.erl
+++ b/src/chttpd/src/chttpd_plugin.erl
@@ -48,7 +48,12 @@ before_response(HttpReq0, Code0, Headers0, Value0) ->
before_serve_file(Req0, Code0, Headers0, RelativePath0, DocumentRoot0) ->
[HttpReq, Code, Headers, RelativePath, DocumentRoot] =
with_pipe(before_serve_file, [
- Req0, Code0, Headers0, RelativePath0, DocumentRoot0]),
+ Req0,
+ Code0,
+ Headers0,
+ RelativePath0,
+ DocumentRoot0
+ ]),
{ok, {HttpReq, Code, Headers, RelativePath, DocumentRoot}}.
%% ------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd_prefer_header.erl b/src/chttpd/src/chttpd_prefer_header.erl
index 12677a5a4..0eeb4a5a3 100644
--- a/src/chttpd/src/chttpd_prefer_header.erl
+++ b/src/chttpd/src/chttpd_prefer_header.erl
@@ -18,42 +18,39 @@
maybe_return_minimal/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-
-define(DEFAULT_PREFER_MINIMAL,
"Cache-Control, Content-Length, Content-Range, "
- "Content-Type, ETag, Server, Transfer-Encoding, Vary").
-
+ "Content-Type, ETag, Server, Transfer-Encoding, Vary"
+).
maybe_return_minimal(#httpd{mochi_req = MochiReq}, Headers) ->
case get_prefer_header(MochiReq) of
- "return=minimal" ->
+ "return=minimal" ->
filter_headers(Headers, get_header_list());
- _ ->
+ _ ->
Headers
end.
-
get_prefer_header(Req) ->
case Req:get_header_value("Prefer") of
Value when is_list(Value) ->
string:to_lower(Value);
- undefined ->
+ undefined ->
undefined
end.
-
filter_headers(Headers, IncludeList) ->
- lists:filter(fun({HeaderName, _}) ->
- lists:member(HeaderName, IncludeList)
- end, Headers).
-
+ lists:filter(
+ fun({HeaderName, _}) ->
+ lists:member(HeaderName, IncludeList)
+ end,
+ Headers
+ ).
get_header_list() ->
split_list(config:get("chttpd", "prefer_minimal", ?DEFAULT_PREFER_MINIMAL)).
-
split_list(S) ->
- re:split(S, "\\s*,\\s*", [trim, {return, list}]).
+ re:split(S, "\\s*,\\s*", [trim, {return, list}]).
diff --git a/src/chttpd/src/chttpd_show.erl b/src/chttpd/src/chttpd_show.erl
index fb7124712..fded59ac4 100644
--- a/src/chttpd/src/chttpd_show.erl
+++ b/src/chttpd/src/chttpd_show.erl
@@ -17,14 +17,13 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_views/include/couch_views.hrl").
-
maybe_open_doc(Db, DocId, Options) ->
case fabric:open_doc(Db, DocId, Options) of
- {ok, Doc} ->
- chttpd_stats:incr_reads(),
- Doc;
- {not_found, _} ->
- nil
+ {ok, Doc} ->
+ chttpd_stats:incr_reads(),
+ Doc;
+ {not_found, _} ->
+ nil
end.
% /db/_design/foo/update/bar/docid
@@ -33,19 +32,25 @@ maybe_open_doc(Db, DocId, Options) ->
% % anything but GET
% send_method_not_allowed(Req, "POST,PUT,DELETE,ETC");
-handle_doc_update_req(#httpd{
- path_parts=[_, _, _, _, UpdateName]
- }=Req, Db, DDoc) ->
+handle_doc_update_req(
+ #httpd{
+ path_parts = [_, _, _, _, UpdateName]
+ } = Req,
+ Db,
+ DDoc
+) ->
send_doc_update_response(Req, Db, DDoc, UpdateName, nil, null);
-
-handle_doc_update_req(#httpd{
- path_parts=[_, _, _, _, UpdateName | DocIdParts]
- }=Req, Db, DDoc) ->
+handle_doc_update_req(
+ #httpd{
+ path_parts = [_, _, _, _, UpdateName | DocIdParts]
+ } = Req,
+ Db,
+ DDoc
+) ->
DocId = ?l2b(string:join([?b2l(P) || P <- DocIdParts], "/")),
Options = [conflicts, {user_ctx, Req#httpd.user_ctx}],
Doc = maybe_open_doc(Db, DocId, Options),
send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId);
-
handle_doc_update_req(Req, _Db, _DDoc) ->
chttpd:send_error(Req, 404, <<"update_error">>, <<"Invalid path.">>).
@@ -56,33 +61,34 @@ send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
JsonDoc = couch_query_servers:json_doc(Doc),
Cmd = [<<"updates">>, UpdateName],
UpdateResp = couch_query_servers:ddoc_prompt(DDoc, Cmd, [JsonDoc, JsonReq]),
- JsonResp = case UpdateResp of
- [<<"up">>, {NewJsonDoc}, {JsonResp0}] ->
- case chttpd:header_value(Req, "X-Couch-Full-Commit", "false") of
- "true" ->
- Options = [full_commit, {user_ctx, Req#httpd.user_ctx}];
- _ ->
- Options = [{user_ctx, Req#httpd.user_ctx}]
- end,
- NewDoc = couch_db:doc_from_json_obj_validate(Db, {NewJsonDoc}),
- fabric2_db:validate_docid(NewDoc#doc.id),
- {UpdateResult, NewRev} = fabric:update_doc(Db, NewDoc, Options),
- chttpd_stats:incr_writes(),
- NewRevStr = couch_doc:rev_to_str(NewRev),
- case {UpdateResult, NewRev} of
- {ok, _} ->
- Code = 201;
- {accepted, _} ->
- Code = 202
- end,
- {JsonResp1} = apply_headers(JsonResp0, [
- {<<"X-Couch-Update-NewRev">>, NewRevStr},
- {<<"X-Couch-Id">>, couch_util:url_encode(NewDoc#doc.id)}
- ]),
- {[{<<"code">>, Code} | JsonResp1]};
- [<<"up">>, _Other, {JsonResp0}] ->
- {[{<<"code">>, 200} | JsonResp0]}
- end,
+ JsonResp =
+ case UpdateResp of
+ [<<"up">>, {NewJsonDoc}, {JsonResp0}] ->
+ case chttpd:header_value(Req, "X-Couch-Full-Commit", "false") of
+ "true" ->
+ Options = [full_commit, {user_ctx, Req#httpd.user_ctx}];
+ _ ->
+ Options = [{user_ctx, Req#httpd.user_ctx}]
+ end,
+ NewDoc = couch_db:doc_from_json_obj_validate(Db, {NewJsonDoc}),
+ fabric2_db:validate_docid(NewDoc#doc.id),
+ {UpdateResult, NewRev} = fabric:update_doc(Db, NewDoc, Options),
+ chttpd_stats:incr_writes(),
+ NewRevStr = couch_doc:rev_to_str(NewRev),
+ case {UpdateResult, NewRev} of
+ {ok, _} ->
+ Code = 201;
+ {accepted, _} ->
+ Code = 202
+ end,
+ {JsonResp1} = apply_headers(JsonResp0, [
+ {<<"X-Couch-Update-NewRev">>, NewRevStr},
+ {<<"X-Couch-Id">>, couch_util:url_encode(NewDoc#doc.id)}
+ ]),
+ {[{<<"code">>, Code} | JsonResp1]};
+ [<<"up">>, _Other, {JsonResp0}] ->
+ {[{<<"code">>, 200} | JsonResp0]}
+ end,
% todo set location field
chttpd_external:send_external_response(Req, JsonResp).
@@ -98,14 +104,14 @@ json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
json_apply_field({Key, NewValue}, [], Acc) ->
% end of list, add ours
- {[{Key, NewValue}|Acc]}.
+ {[{Key, NewValue} | Acc]}.
apply_headers(JsonResp, []) ->
JsonResp;
apply_headers(JsonResp, NewHeaders) ->
case couch_util:get_value(<<"headers">>, JsonResp) of
undefined ->
- {[{<<"headers">>, {NewHeaders}}| JsonResp]};
+ {[{<<"headers">>, {NewHeaders}} | JsonResp]};
JsonHeaders ->
Headers = apply_headers1(JsonHeaders, NewHeaders),
NewKV = {<<"headers">>, Headers},
diff --git a/src/chttpd/src/chttpd_stats.erl b/src/chttpd/src/chttpd_stats.erl
index 18622783c..47b283008 100644
--- a/src/chttpd/src/chttpd_stats.erl
+++ b/src/chttpd/src/chttpd_stats.erl
@@ -30,7 +30,6 @@
update_interval/1
]).
-
-record(st, {
reads = 0,
writes = 0,
@@ -41,18 +40,23 @@
request
}).
-
-define(KEY, chttpd_stats).
-define(INTERVAL_IN_SEC, 60).
init(Request) ->
Reporter = config:get("chttpd", "stats_reporter"),
Time = erlang:monotonic_time(second),
- Interval = config:get_integer("chttpd", "stats_reporting_interval",
- ?INTERVAL_IN_SEC),
- put(?KEY, #st{reporter = Reporter, last_report_ts = Time,
- interval = Interval, request = Request}).
-
+ Interval = config:get_integer(
+ "chttpd",
+ "stats_reporting_interval",
+ ?INTERVAL_IN_SEC
+ ),
+ put(?KEY, #st{
+ reporter = Reporter,
+ last_report_ts = Time,
+ interval = Interval,
+ request = Request
+ }).
report(HttpResp) ->
try
@@ -62,21 +66,20 @@ report(HttpResp) ->
_ ->
ok
end
- catch T:R:S ->
- ?LOG_ERROR(#{
- what => stats_report_failure,
- tag => T,
- details => R,
- stacktrace => S
- }),
- Fmt = "Failed to report chttpd request stats: ~p:~p ~p",
- couch_log:error(Fmt, [T, R, S])
+ catch
+ T:R:S ->
+ ?LOG_ERROR(#{
+ what => stats_report_failure,
+ tag => T,
+ details => R,
+ stacktrace => S
+ }),
+ Fmt = "Failed to report chttpd request stats: ~p:~p ~p",
+ couch_log:error(Fmt, [T, R, S])
end.
-
report(_HttpResp, #st{reporter = undefined}) ->
ok;
-
report(HttpResp, #st{reporter = Reporter} = St) ->
Mod = list_to_existing_atom(Reporter),
#st{
@@ -87,31 +90,24 @@ report(HttpResp, #st{reporter = Reporter} = St) ->
} = St,
Mod:report(HttpReq, HttpResp, Reads, Writes, Rows).
-
incr_reads() ->
incr(#st.reads, 1).
-
incr_reads(N) when is_integer(N), N >= 0 ->
incr(#st.reads, N).
-
incr_writes() ->
incr(#st.writes, 1).
-
incr_writes(N) when is_integer(N), N >= 0 ->
incr(#st.writes, N).
-
incr_rows() ->
incr(#st.rows, 1).
-
incr_rows(N) when is_integer(N), N >= 0 ->
incr(#st.rows, N).
-
incr(Idx, Count) ->
case get(?KEY) of
#st{} = St ->
@@ -123,7 +119,6 @@ incr(Idx, Count) ->
ok
end.
-
maybe_report_intermittent(State) ->
#st{last_report_ts = LastTime, interval = Interval} = State,
CurrentTime = erlang:monotonic_time(second),
@@ -144,7 +139,6 @@ maybe_report_intermittent(State) ->
ok
end.
-
update_interval(Interval) ->
case get(?KEY) of
#st{} = St ->
@@ -153,7 +147,6 @@ update_interval(Interval) ->
ok
end.
-
reset_stats(State, NewTime) ->
put(?KEY, State#st{
reads = 0,
diff --git a/src/chttpd/src/chttpd_sup.erl b/src/chttpd/src/chttpd_sup.erl
index b65564879..d8c6a125a 100644
--- a/src/chttpd/src/chttpd_sup.erl
+++ b/src/chttpd/src/chttpd_sup.erl
@@ -30,17 +30,17 @@
-define(DEFAULT_SERVER_OPTIONS, "[{recbuf, undefined}]").
start_link() ->
- Arg = case fabric2_node_types:is_type(api_frontend) of
- true -> normal;
- false -> disabled
- end,
- supervisor:start_link({local,?MODULE}, ?MODULE, Arg).
+ Arg =
+ case fabric2_node_types:is_type(api_frontend) of
+ true -> normal;
+ false -> disabled
+ end,
+ supervisor:start_link({local, ?MODULE}, ?MODULE, Arg).
init(disabled) ->
?LOG_NOTICE(#{what => http_api_disabled}),
couch_log:notice("~p : api_frontend disabled", [?MODULE]),
{ok, {{one_for_one, 3, 10}, []}};
-
init(normal) ->
Children = [
{
@@ -53,13 +53,11 @@ init(normal) ->
},
?CHILD(chttpd, worker),
?CHILD(chttpd_auth_cache, worker),
- {chttpd_auth_cache_lru,
- {ets_lru, start_link, [chttpd_auth_cache_lru, lru_opts()]},
- permanent, 5000, worker, [ets_lru]}
+ {chttpd_auth_cache_lru, {ets_lru, start_link, [chttpd_auth_cache_lru, lru_opts()]},
+ permanent, 5000, worker, [ets_lru]}
],
- {ok, {{one_for_one, 3, 10},
- couch_epi:register_service(chttpd_epi, Children)}}.
+ {ok, {{one_for_one, 3, 10}, couch_epi:register_service(chttpd_epi, Children)}}.
handle_config_change("chttpd", "bind_address", Value, _, Settings) ->
maybe_replace(bind_address, Value, Settings);
@@ -80,17 +78,21 @@ settings() ->
{bind_address, config:get("chttpd", "bind_address")},
{port, config:get("chttpd", "port")},
{backlog, config:get_integer("chttpd", "backlog", ?DEFAULT_BACKLOG)},
- {server_options, config:get("chttpd",
- "server_options", ?DEFAULT_SERVER_OPTIONS)}
+ {server_options,
+ config:get(
+ "chttpd",
+ "server_options",
+ ?DEFAULT_SERVER_OPTIONS
+ )}
].
maybe_replace(Key, Value, Settings) ->
case couch_util:get_value(Key, Settings) of
- Value ->
- {ok, Settings};
- _ ->
- chttpd:stop(),
- {ok, lists:keyreplace(Key, 1, Settings, {Key, Value})}
+ Value ->
+ {ok, Settings};
+ _ ->
+ chttpd:stop(),
+ {ok, lists:keyreplace(Key, 1, Settings, {Key, Value})}
end.
lru_opts() ->
@@ -114,5 +116,7 @@ append_if_set({Key, Value}, Opts) ->
}),
couch_log:error(
"The value for `~s` should be string convertable "
- "to integer which is >= 0 (got `~p`)", [Key, Value]),
+ "to integer which is >= 0 (got `~p`)",
+ [Key, Value]
+ ),
Opts.
diff --git a/src/chttpd/src/chttpd_test_util.erl b/src/chttpd/src/chttpd_test_util.erl
index 8930a5a5e..d905a8d88 100644
--- a/src/chttpd/src/chttpd_test_util.erl
+++ b/src/chttpd/src/chttpd_test_util.erl
@@ -16,7 +16,6 @@
-include_lib("couch/include/couch_eunit.hrl").
-
start_couch() ->
start_couch(?CONFIG_CHAIN).
diff --git a/src/chttpd/src/chttpd_util.erl b/src/chttpd/src/chttpd_util.erl
index 5608c46d3..07106e9c7 100644
--- a/src/chttpd/src/chttpd_util.erl
+++ b/src/chttpd/src/chttpd_util.erl
@@ -12,7 +12,6 @@
-module(chttpd_util).
-
-export([
parse_copy_destination_header/1,
get_chttpd_config/1,
@@ -25,62 +24,68 @@
get_chttpd_auth_config_boolean/2
]).
-
parse_copy_destination_header(Req) ->
case couch_httpd:header_value(Req, "Destination") of
- undefined ->
- throw({bad_request, "Destination header is mandatory for COPY."});
- Destination ->
- case re:run(Destination, "^https?://", [{capture, none}]) of
- match ->
- throw({bad_request, "Destination URL must be relative."});
- nomatch ->
- % see if ?rev=revid got appended to the Destination header
- case re:run(Destination, "\\?", [{capture, none}]) of
- nomatch ->
- {list_to_binary(Destination), {0, []}};
- match ->
- [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
- [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
- {Pos, RevId} = couch_doc:parse_rev(Rev),
- {list_to_binary(DocId), {Pos, [RevId]}}
+ undefined ->
+ throw({bad_request, "Destination header is mandatory for COPY."});
+ Destination ->
+ case re:run(Destination, "^https?://", [{capture, none}]) of
+ match ->
+ throw({bad_request, "Destination URL must be relative."});
+ nomatch ->
+ % see if ?rev=revid got appended to the Destination header
+ case re:run(Destination, "\\?", [{capture, none}]) of
+ nomatch ->
+ {list_to_binary(Destination), {0, []}};
+ match ->
+ [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
+ [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
+ {Pos, RevId} = couch_doc:parse_rev(Rev),
+ {list_to_binary(DocId), {Pos, [RevId]}}
+ end
end
- end
end.
-
get_chttpd_config(Key) ->
config:get("chttpd", Key, config:get("httpd", Key)).
-
get_chttpd_config(Key, Default) ->
config:get("chttpd", Key, config:get("httpd", Key, Default)).
-
get_chttpd_config_integer(Key, Default) ->
- config:get_integer("chttpd", Key,
- config:get_integer("httpd", Key, Default)).
-
+ config:get_integer(
+ "chttpd",
+ Key,
+ config:get_integer("httpd", Key, Default)
+ ).
get_chttpd_config_boolean(Key, Default) ->
- config:get_boolean("chttpd", Key,
- config:get_boolean("httpd", Key, Default)).
-
+ config:get_boolean(
+ "chttpd",
+ Key,
+ config:get_boolean("httpd", Key, Default)
+ ).
get_chttpd_auth_config(Key) ->
config:get("chttpd_auth", Key, config:get("couch_httpd_auth", Key)).
-
get_chttpd_auth_config(Key, Default) ->
- config:get("chttpd_auth", Key,
- config:get("couch_httpd_auth", Key, Default)).
-
+ config:get(
+ "chttpd_auth",
+ Key,
+ config:get("couch_httpd_auth", Key, Default)
+ ).
get_chttpd_auth_config_integer(Key, Default) ->
- config:get_integer("chttpd_auth", Key,
- config:get_integer("couch_httpd_auth", Key, Default)).
-
+ config:get_integer(
+ "chttpd_auth",
+ Key,
+ config:get_integer("couch_httpd_auth", Key, Default)
+ ).
get_chttpd_auth_config_boolean(Key, Default) ->
- config:get_boolean("chttpd_auth", Key,
- config:get_boolean("couch_httpd_auth", Key, Default)).
+ config:get_boolean(
+ "chttpd_auth",
+ Key,
+ config:get_boolean("couch_httpd_auth", Key, Default)
+ ).
diff --git a/src/chttpd/src/chttpd_view.erl b/src/chttpd/src/chttpd_view.erl
index e0001da67..b77872e22 100644
--- a/src/chttpd/src/chttpd_view.erl
+++ b/src/chttpd/src/chttpd_view.erl
@@ -33,28 +33,30 @@ multi_query_view(Req, Db, DDoc, ViewName, Queries) ->
paginate_multi_query_view(Req, Db, DDoc, ViewName, Args, Queries)
end.
-
stream_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
- {ok, #mrst{views=Views}} = couch_views_util:ddoc_to_mrst(Db, DDoc),
+ {ok, #mrst{views = Views}} = couch_views_util:ddoc_to_mrst(Db, DDoc),
Args1 = couch_views_util:set_view_type(Args0, ViewName, Views),
ArgQueries = parse_queries(Req, Args1, Queries, fun(QueryArg) ->
couch_views_util:set_view_type(QueryArg, ViewName, Views)
end),
- VAcc0 = #vacc{db=Db, req=Req, prepend="\r\n"},
+ VAcc0 = #vacc{db = Db, req = Req, prepend = "\r\n"},
FirstChunk = "{\"results\":[",
{ok, Resp0} = chttpd:start_delayed_json_response(VAcc0#vacc.req, 200, [], FirstChunk),
- VAcc1 = VAcc0#vacc{resp=Resp0},
- VAcc2 = lists:foldl(fun(Args, Acc0) ->
- Fun = fun view_cb/2,
- {ok, Acc1} = couch_views:query(Db, DDoc, ViewName, Fun, Acc0, Args),
- Acc1
- end, VAcc1, ArgQueries),
+ VAcc1 = VAcc0#vacc{resp = Resp0},
+ VAcc2 = lists:foldl(
+ fun(Args, Acc0) ->
+ Fun = fun view_cb/2,
+ {ok, Acc1} = couch_views:query(Db, DDoc, ViewName, Fun, Acc0, Args),
+ Acc1
+ end,
+ VAcc1,
+ ArgQueries
+ ),
{ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"),
chttpd:end_delayed_json_response(Resp1).
-
paginate_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
- {ok, #mrst{views=Views}} = couch_views_util:ddoc_to_mrst(Db, DDoc),
+ {ok, #mrst{views = Views}} = couch_views_util:ddoc_to_mrst(Db, DDoc),
ArgQueries = parse_queries(Req, Args0, Queries, fun(QueryArg) ->
couch_views_util:set_view_type(QueryArg, ViewName, Views)
end),
@@ -66,15 +68,20 @@ paginate_multi_query_view(Req, Db, DDoc, ViewName, Args0, Queries) ->
UpdateSeq = fabric2_db:get_update_seq(Db),
EtagTerm = {Parts, UpdateSeq, Args0},
Response = couch_views_http:paginated(
- Req, EtagTerm, PageSize, ArgQueries, KeyFun,
+ Req,
+ EtagTerm,
+ PageSize,
+ ArgQueries,
+ KeyFun,
fun(Args) ->
- {ok, #vacc{meta=MetaMap, buffer=Items}} = couch_views:query(
- Db, DDoc, ViewName, fun view_cb/2, #vacc{paginated=true}, Args),
- {MetaMap, Items}
- end),
+ {ok, #vacc{meta = MetaMap, buffer = Items}} = couch_views:query(
+ Db, DDoc, ViewName, fun view_cb/2, #vacc{paginated = true}, Args
+ ),
+ {MetaMap, Items}
+ end
+ ),
chttpd:send_json(Req, Response).
-
design_doc_post_view(Req, Props, Db, DDoc, ViewName, Keys) ->
Args = couch_views_http_util:parse_body_and_query(Req, Props, Keys),
fabric_query_view(Db, Req, DDoc, ViewName, Args).
@@ -83,7 +90,6 @@ design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
Args = couch_views_http:parse_params(Req, Keys),
fabric_query_view(Db, Req, DDoc, ViewName, Args).
-
fabric_query_view(Db, Req, DDoc, ViewName, Args) ->
case couch_views_util:is_paginated(Args) of
false ->
@@ -92,15 +98,13 @@ fabric_query_view(Db, Req, DDoc, ViewName, Args) ->
paginate_fabric_query_view(Db, Req, DDoc, ViewName, Args)
end.
-
stream_fabric_query_view(Db, Req, DDoc, ViewName, Args) ->
Max = chttpd:chunked_response_buffer_size(),
Fun = fun view_cb/2,
- VAcc = #vacc{db=Db, req=Req, threshold=Max},
+ VAcc = #vacc{db = Db, req = Req, threshold = Max},
{ok, Resp} = couch_views:query(Db, DDoc, ViewName, Fun, VAcc, Args),
{ok, Resp#vacc.resp}.
-
paginate_fabric_query_view(Db, Req, DDoc, ViewName, Args0) ->
KeyFun = fun({Props}) ->
{couch_util:get_value(id, Props), couch_util:get_value(key, Props)}
@@ -109,13 +113,17 @@ paginate_fabric_query_view(Db, Req, DDoc, ViewName, Args0) ->
UpdateSeq = fabric2_db:get_update_seq(Db),
ETagTerm = {Parts, UpdateSeq, Args0},
Response = couch_views_http:paginated(
- Req, ETagTerm, Args0, KeyFun,
+ Req,
+ ETagTerm,
+ Args0,
+ KeyFun,
fun(Args) ->
- VAcc0 = #vacc{paginated=true},
+ VAcc0 = #vacc{paginated = true},
{ok, VAcc1} = couch_views:query(Db, DDoc, ViewName, fun view_cb/2, VAcc0, Args),
- #vacc{meta=Meta, buffer=Items} = VAcc1,
+ #vacc{meta = Meta, buffer = Items} = VAcc1,
{Meta, Items}
- end),
+ end
+ ),
chttpd:send_json(Req, Response).
view_cb({row, Row} = Msg, Acc) ->
@@ -125,46 +133,59 @@ view_cb({row, Row} = Msg, Acc) ->
end,
chttpd_stats:incr_rows(),
couch_views_http:view_cb(Msg, Acc);
-
view_cb(Msg, Acc) ->
couch_views_http:view_cb(Msg, Acc).
-
-handle_view_req(#httpd{method='POST',
- path_parts=[_, _, _, _, ViewName, <<"queries">>]}=Req, Db, DDoc) ->
+handle_view_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [_, _, _, _, ViewName, <<"queries">>]
+ } = Req,
+ Db,
+ DDoc
+) ->
chttpd:validate_ctype(Req, "application/json"),
Props = couch_httpd:json_body_obj(Req),
case couch_views_util:get_view_queries(Props) of
undefined ->
- throw({bad_request,
- <<"POST body must include `queries` parameter.">>});
+ throw({bad_request, <<"POST body must include `queries` parameter.">>});
Queries ->
multi_query_view(Req, Db, DDoc, ViewName, Queries)
end;
-
-handle_view_req(#httpd{path_parts=[_, _, _, _, _, <<"queries">>]}=Req,
- _Db, _DDoc) ->
+handle_view_req(
+ #httpd{path_parts = [_, _, _, _, _, <<"queries">>]} = Req,
+ _Db,
+ _DDoc
+) ->
chttpd:send_method_not_allowed(Req, "POST");
-
-handle_view_req(#httpd{method='GET',
- path_parts=[_, _, _, _, ViewName]}=Req, Db, DDoc) ->
+handle_view_req(
+ #httpd{
+ method = 'GET',
+ path_parts = [_, _, _, _, ViewName]
+ } = Req,
+ Db,
+ DDoc
+) ->
couch_stats:increment_counter([couchdb, httpd, view_reads]),
Keys = chttpd:qs_json_value(Req, "keys", undefined),
design_doc_view(Req, Db, DDoc, ViewName, Keys);
-
-handle_view_req(#httpd{method='POST',
- path_parts=[_, _, _, _, ViewName]}=Req, Db, DDoc) ->
+handle_view_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [_, _, _, _, ViewName]
+ } = Req,
+ Db,
+ DDoc
+) ->
chttpd:validate_ctype(Req, "application/json"),
Props = couch_httpd:json_body_obj(Req),
assert_no_queries_param(couch_views_util:get_view_queries(Props)),
Keys = couch_views_util:get_view_keys(Props),
couch_stats:increment_counter([couchdb, httpd, view_reads]),
design_doc_post_view(Req, Props, Db, DDoc, ViewName, Keys);
-
handle_view_req(Req, _Db, _DDoc) ->
chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
% See https://github.com/apache/couchdb/issues/2168
assert_no_queries_param(undefined) ->
ok;
@@ -174,92 +195,112 @@ assert_no_queries_param(_) ->
"The `queries` parameter is no longer supported at this endpoint"
}).
-
validate_args(Req, #mrargs{page_size = PageSize} = Args) when is_integer(PageSize) ->
MaxPageSize = max_page_size(Req),
couch_views_util:validate_args(Args, [{page_size, MaxPageSize}]);
-
validate_args(_Req, #mrargs{} = Args) ->
couch_views_util:validate_args(Args, []).
-
-max_page_size(#httpd{path_parts=[_Db, <<"_all_docs">>, <<"queries">>]}) ->
+max_page_size(#httpd{path_parts = [_Db, <<"_all_docs">>, <<"queries">>]}) ->
config:get_integer(
- "request_limits", "_all_docs/queries", ?DEFAULT_ALL_DOCS_PAGE_SIZE);
-
-max_page_size(#httpd{path_parts=[_Db, <<"_all_docs">>]}) ->
+ "request_limits", "_all_docs/queries", ?DEFAULT_ALL_DOCS_PAGE_SIZE
+ );
+max_page_size(#httpd{path_parts = [_Db, <<"_all_docs">>]}) ->
config:get_integer(
- "request_limits", "_all_docs", ?DEFAULT_ALL_DOCS_PAGE_SIZE);
-
-max_page_size(#httpd{path_parts=[_Db, <<"_local_docs">>, <<"queries">>]}) ->
+ "request_limits", "_all_docs", ?DEFAULT_ALL_DOCS_PAGE_SIZE
+ );
+max_page_size(#httpd{path_parts = [_Db, <<"_local_docs">>, <<"queries">>]}) ->
config:get_integer(
- "request_limits", "_all_docs/queries", ?DEFAULT_ALL_DOCS_PAGE_SIZE);
-
-max_page_size(#httpd{path_parts=[_Db, <<"_local_docs">>]}) ->
+ "request_limits", "_all_docs/queries", ?DEFAULT_ALL_DOCS_PAGE_SIZE
+ );
+max_page_size(#httpd{path_parts = [_Db, <<"_local_docs">>]}) ->
config:get_integer(
- "request_limits", "_all_docs", ?DEFAULT_ALL_DOCS_PAGE_SIZE);
-
-max_page_size(#httpd{path_parts=[_Db, <<"_design_docs">>, <<"queries">>]}) ->
+ "request_limits", "_all_docs", ?DEFAULT_ALL_DOCS_PAGE_SIZE
+ );
+max_page_size(#httpd{path_parts = [_Db, <<"_design_docs">>, <<"queries">>]}) ->
config:get_integer(
- "request_limits", "_all_docs/queries", ?DEFAULT_ALL_DOCS_PAGE_SIZE);
-
-max_page_size(#httpd{path_parts=[_Db, <<"_design_docs">>]}) ->
+ "request_limits", "_all_docs/queries", ?DEFAULT_ALL_DOCS_PAGE_SIZE
+ );
+max_page_size(#httpd{path_parts = [_Db, <<"_design_docs">>]}) ->
config:get_integer(
- "request_limits", "_all_docs", ?DEFAULT_ALL_DOCS_PAGE_SIZE);
-
-max_page_size(#httpd{path_parts=[
- _Db, <<"_design">>, _DDocName, <<"_view">>, _View, <<"queries">>]}) ->
+ "request_limits", "_all_docs", ?DEFAULT_ALL_DOCS_PAGE_SIZE
+ );
+max_page_size(#httpd{
+ path_parts = [
+ _Db,
+ <<"_design">>,
+ _DDocName,
+ <<"_view">>,
+ _View,
+ <<"queries">>
+ ]
+}) ->
config:get_integer(
- "request_limits", "_view/queries", ?DEFAULT_VIEWS_PAGE_SIZE);
-
-max_page_size(#httpd{path_parts=[
- _Db, <<"_design">>, _DDocName, <<"_view">>, _View]}) ->
+ "request_limits", "_view/queries", ?DEFAULT_VIEWS_PAGE_SIZE
+ );
+max_page_size(#httpd{
+ path_parts = [
+ _Db,
+ <<"_design">>,
+ _DDocName,
+ <<"_view">>,
+ _View
+ ]
+}) ->
config:get_integer(
- "request_limits", "_view", ?DEFAULT_VIEWS_PAGE_SIZE).
-
+ "request_limits", "_view", ?DEFAULT_VIEWS_PAGE_SIZE
+ ).
-parse_queries(Req, #mrargs{page_size = PageSize} = Args0, Queries, Fun)
- when is_integer(PageSize) ->
+parse_queries(Req, #mrargs{page_size = PageSize} = Args0, Queries, Fun) when
+ is_integer(PageSize)
+->
MaxPageSize = max_page_size(Req),
- if length(Queries) < PageSize -> ok; true ->
- throw({
- query_parse_error,
- <<"Provided number of queries is more than given page_size">>
- })
+ if
+ length(Queries) < PageSize ->
+ ok;
+ true ->
+ throw({
+ query_parse_error,
+ <<"Provided number of queries is more than given page_size">>
+ })
end,
couch_views_util:validate_args(Fun(Args0), [{page_size, MaxPageSize}]),
Args = Args0#mrargs{page_size = undefined},
- lists:map(fun({Query}) ->
- Args1 = couch_views_http:parse_params(Query, undefined, Args, [decoded]),
- if not is_integer(Args1#mrargs.page_size) -> ok; true ->
- throw({
- query_parse_error,
- <<"You cannot specify `page_size` inside the query">>
- })
+ lists:map(
+ fun({Query}) ->
+ Args1 = couch_views_http:parse_params(Query, undefined, Args, [decoded]),
+ if
+ not is_integer(Args1#mrargs.page_size) ->
+ ok;
+ true ->
+ throw({
+ query_parse_error,
+ <<"You cannot specify `page_size` inside the query">>
+ })
+ end,
+ Args2 = maybe_set_page_size(Args1, MaxPageSize),
+ couch_views_util:validate_args(Fun(Args2), [{page_size, MaxPageSize}])
end,
- Args2 = maybe_set_page_size(Args1, MaxPageSize),
- couch_views_util:validate_args(Fun(Args2), [{page_size, MaxPageSize}])
- end, Queries);
-
+ Queries
+ );
parse_queries(_Req, #mrargs{} = Args, Queries, Fun) ->
- lists:map(fun({Query}) ->
- Args1 = couch_views_http:parse_params(Query, undefined, Args, [decoded]),
- couch_views_util:validate_args(Fun(Args1))
- end, Queries).
-
+ lists:map(
+ fun({Query}) ->
+ Args1 = couch_views_http:parse_params(Query, undefined, Args, [decoded]),
+ couch_views_util:validate_args(Fun(Args1))
+ end,
+ Queries
+ ).
maybe_set_page_size(#mrargs{page_size = undefined} = Args, MaxPageSize) ->
Args#mrargs{page_size = MaxPageSize};
-
maybe_set_page_size(#mrargs{} = Args, _MaxPageSize) ->
Args.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
check_multi_query_reduce_view_overrides_test_() ->
{
setup,
@@ -276,7 +317,6 @@ check_multi_query_reduce_view_overrides_test_() ->
}
}.
-
t_check_include_docs_throw_validation_error() ->
?_test(begin
Req = #httpd{qs = []},
@@ -286,7 +326,6 @@ t_check_include_docs_throw_validation_error() ->
?assertThrow(Throw, multi_query_view(Req, Db, ddoc, <<"v">>, [Query]))
end).
-
t_check_user_can_override_individual_query_type() ->
?_test(begin
Req = #httpd{qs = []},
@@ -296,7 +335,6 @@ t_check_user_can_override_individual_query_type() ->
?assertEqual(1, meck:num_calls(chttpd, start_delayed_json_response, '_'))
end).
-
setup_all() ->
Views = [#mrview{reduce_funs = [{<<"v">>, <<"_count">>}]}],
meck:expect(couch_views_util, ddoc_to_mrst, 2, {ok, #mrst{views = Views}}),
@@ -305,11 +343,9 @@ setup_all() ->
meck:expect(chttpd, send_delayed_chunk, 2, {ok, resp}),
meck:expect(chttpd, end_delayed_json_response, 1, ok).
-
teardown_all(_) ->
meck:unload().
-
setup() ->
meck:reset([
chttpd,
@@ -317,9 +353,7 @@ setup() ->
couch_views_util
]).
-
teardown(_) ->
ok.
-
-endif.
diff --git a/src/chttpd/src/chttpd_xframe_options.erl b/src/chttpd/src/chttpd_xframe_options.erl
index 2a43617fa..15865057b 100644
--- a/src/chttpd/src/chttpd_xframe_options.erl
+++ b/src/chttpd/src/chttpd_xframe_options.erl
@@ -21,7 +21,6 @@
-define(SAMEORIGIN, "SAMEORIGIN").
-define(ALLOWFROM, "ALLOW-FROM ").
-
-include_lib("couch/include/couch_db.hrl").
% X-Frame-Options protects against clickjacking by limiting whether a response can be used in a
@@ -30,33 +29,28 @@
header(Req, Headers) ->
header(Req, Headers, get_xframe_config(Req)).
-
-
header(Req, Headers, Config) ->
case lists:keyfind(enabled, 1, Config) of
- {enabled, true} ->
+ {enabled, true} ->
generate_xframe_header(Req, Headers, Config);
- _ ->
+ _ ->
Headers
end.
-
-
generate_xframe_header(Req, Headers, Config) ->
- XframeOption = case lists:keyfind(same_origin, 1, Config) of
- {same_origin, true} ->
- ?SAMEORIGIN;
- _ ->
- check_host(Req, Config)
- end,
- [{"X-Frame-Options", XframeOption } | Headers].
-
-
+ XframeOption =
+ case lists:keyfind(same_origin, 1, Config) of
+ {same_origin, true} ->
+ ?SAMEORIGIN;
+ _ ->
+ check_host(Req, Config)
+ end,
+ [{"X-Frame-Options", XframeOption} | Headers].
check_host(#httpd{mochi_req = MochiReq} = Req, Config) ->
Host = couch_httpd_vhost:host(MochiReq),
case Host of
- [] ->
+ [] ->
?DENY;
Host ->
FullHost = chttpd:absolute_uri(Req, ""),
@@ -66,18 +60,18 @@ check_host(#httpd{mochi_req = MochiReq} = Req, Config) ->
true -> ?ALLOWFROM ++ FullHost;
false -> ?DENY
end
- end.
-
-
+ end.
get_xframe_config(#httpd{xframe_config = undefined}) ->
EnableXFrame = chttpd_util:get_chttpd_config_boolean(
- "enable_xframe_options", false),
+ "enable_xframe_options", false
+ ),
SameOrigin = config:get("x_frame_options", "same_origin", "false") =:= "true",
- AcceptedHosts = case config:get("x_frame_options", "hosts") of
- undefined -> [];
- Hosts -> split_list(Hosts)
- end,
+ AcceptedHosts =
+ case config:get("x_frame_options", "hosts") of
+ undefined -> [];
+ Hosts -> split_list(Hosts)
+ end,
[
{enabled, EnableXFrame},
{same_origin, SameOrigin},
@@ -86,15 +80,11 @@ get_xframe_config(#httpd{xframe_config = undefined}) ->
get_xframe_config(#httpd{xframe_config = Config}) ->
Config.
-
-
get_accepted_hosts(Config) ->
case lists:keyfind(hosts, 1, Config) of
false -> [];
{hosts, AcceptedHosts} -> AcceptedHosts
end.
-
-
split_list(S) ->
re:split(S, "\\s*,\\s*", [trim, {return, list}]).
diff --git a/src/couch/src/couch.erl b/src/couch/src/couch.erl
index 1c912ac2a..6952c16c8 100644
--- a/src/couch/src/couch.erl
+++ b/src/couch/src/couch.erl
@@ -18,7 +18,6 @@
restart/0
]).
-
deps() ->
[
sasl,
@@ -32,7 +31,6 @@ deps() ->
couch_log
].
-
start() ->
catch erlang:system_flag(scheduler_bind_type, default_bind),
case start_apps(deps()) of
@@ -42,26 +40,23 @@ start() ->
throw(Else)
end.
-
stop() ->
application:stop(couch).
-
restart() ->
init:restart().
-
start_apps([]) ->
ok;
-start_apps([App|Rest]) ->
+start_apps([App | Rest]) ->
case application:start(App) of
- ok ->
- start_apps(Rest);
- {error, {already_started, App}} ->
- start_apps(Rest);
- {error, _Reason} when App =:= public_key ->
- % ignore on R12B5
- start_apps(Rest);
- {error, _Reason} ->
- {error, {app_would_not_start, App}}
+ ok ->
+ start_apps(Rest);
+ {error, {already_started, App}} ->
+ start_apps(Rest);
+ {error, _Reason} when App =:= public_key ->
+ % ignore on R12B5
+ start_apps(Rest);
+ {error, _Reason} ->
+ {error, {app_would_not_start, App}}
end.
diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl
index 706e821d3..f796de2e8 100644
--- a/src/couch/src/couch_att.erl
+++ b/src/couch/src/couch_att.erl
@@ -55,34 +55,30 @@
-compile(nowarn_deprecated_type).
-export_type([att/0]).
-
-include_lib("couch/include/couch_db.hrl").
-
-define(CURRENT_ATT_FORMAT, 0).
-define(DEFAULT_COMPRESSIBLE_TYPES,
- "text/*, application/javascript, application/json, application/xml").
-
+ "text/*, application/javascript, application/json, application/xml"
+).
-type prop_name() ::
- name |
- type |
- att_len |
- disk_len |
- md5 |
- revpos |
- data |
- encoding.
-
+ name
+ | type
+ | att_len
+ | disk_len
+ | md5
+ | revpos
+ | data
+ | encoding.
-type data_prop_type() ::
- {loc, #{}, binary(), binary()} |
- stub |
- follows |
- binary() |
- {follows, pid(), reference()} |
- fun(() -> binary()).
-
+ {loc, #{}, binary(), binary()}
+ | stub
+ | follows
+ | binary()
+ | {follows, pid(), reference()}
+ | fun(() -> binary()).
-type att() :: #{
name := binary(),
@@ -95,7 +91,6 @@
encoding := identity | gzip | undefined
}.
-
new() ->
#{
name => <<>>,
@@ -108,40 +103,38 @@ new() ->
encoding => undefined
}.
-
-spec new([{prop_name(), any()}]) -> att().
new(Props) ->
store(Props, new()).
-
--spec fetch([atom()], att()) -> [any()];
- (atom(), att()) -> any().
+-spec fetch
+ ([atom()], att()) -> [any()];
+ (atom(), att()) -> any().
fetch(Fields, Att) when is_list(Fields) ->
[fetch(Field, Att) || Field <- Fields];
fetch(Field, Att) ->
maps:get(Field, Att).
-
-spec store([{atom(), any()}], att()) -> att().
store(Props, Att0) ->
- lists:foldl(fun({Field, Value}, Att) ->
- maps:update(Field, Value, Att)
- end, Att0, Props).
-
+ lists:foldl(
+ fun({Field, Value}, Att) ->
+ maps:update(Field, Value, Att)
+ end,
+ Att0,
+ Props
+ ).
store(Field, Value, Att) ->
maps:update(Field, Value, Att).
-
-spec transform(atom(), fun(), att()) -> att().
transform(Field, Fun, Att) ->
maps:update_with(Field, Fun, Att).
-
is_stub(#{data := stub}) -> true;
is_stub(#{}) -> false.
-
%% merge_stubs takes all stub attachments and replaces them with on disk
%% attachments. It will return {missing, Name} if a stub isn't matched with
%% an existing attachment on disk. If the revpos is supplied with the stub
@@ -152,7 +145,6 @@ merge_stubs(MemAtts, DiskAtts) ->
),
merge_stubs(MemAtts, OnDisk, []).
-
-spec merge_stubs([att()], dict:dict(), [att()]) -> [att()].
merge_stubs([Att | Rest], OnDisk, Merged) ->
case fetch(data, Att) of
@@ -180,31 +172,33 @@ merge_stubs([Att | Rest], OnDisk, Merged) ->
merge_stubs([], _, Merged) ->
{ok, lists:reverse(Merged)}.
-
external_size(Att) ->
NameSize = size(fetch(name, Att)),
- TypeSize = case fetch(type, Att) of
- undefined -> 0;
- Type -> size(Type)
- end,
+ TypeSize =
+ case fetch(type, Att) of
+ undefined -> 0;
+ Type -> size(Type)
+ end,
AttSize = fetch(att_len, Att),
- Md5Size = case fetch(md5, Att) of
- undefined -> 0;
- Md5 -> size(Md5)
- end,
+ Md5Size =
+ case fetch(md5, Att) of
+ undefined -> 0;
+ Md5 -> size(Md5)
+ end,
NameSize + TypeSize + AttSize + Md5Size.
-
size_info([]) ->
{ok, []};
size_info(Atts) ->
- Info = lists:map(fun(Att) ->
- [{loc, _Db, _DocId, AttId}, AttLen] = fetch([data, att_len], Att),
- {AttId, AttLen}
- end, Atts),
+ Info = lists:map(
+ fun(Att) ->
+ [{loc, _Db, _DocId, AttId}, AttLen] = fetch([data, att_len], Att),
+ {AttId, AttLen}
+ end,
+ Atts
+ ),
{ok, lists:usort(Info)}.
-
%% When converting an attachment to disk term format, attempt to stay with the
%% old format when possible. This should help make the attachment lazy upgrade
%% as safe as possible, avoiding the need for complicated disk versioning
@@ -222,7 +216,6 @@ to_disk_term(Att) ->
fetch(encoding, Att)
}}.
-
from_disk_term(#{} = Db, DocId, {?CURRENT_ATT_FORMAT, Props}) ->
{
Name,
@@ -245,7 +238,6 @@ from_disk_term(#{} = Db, DocId, {?CURRENT_ATT_FORMAT, Props}) ->
{encoding, Encoding}
]).
-
%% from_json reads in embedded JSON attachments and creates usable attachment
%% values. The attachment may be a stub,
from_json(Name, Props) ->
@@ -261,7 +253,6 @@ from_json(Name, Props) ->
true -> inline_from_json(Att, Props)
end.
-
stub_from_json(Att, Props) ->
{DiskLen, EncodedLen, Encoding} = encoded_lengths_from_json(Props),
Digest = digest_from_json(Props),
@@ -269,29 +260,33 @@ stub_from_json(Att, Props) ->
%% the revpos consistency check on stubs when it's not provided in the
%% json object. See merge_stubs/3 for the stub check.
RevPos = couch_util:get_value(<<"revpos">>, Props),
- store([
- {data, stub},
- {disk_len, DiskLen},
- {att_len, EncodedLen},
- {revpos, RevPos},
- {md5, Digest},
- {encoding, Encoding}
- ], Att).
-
+ store(
+ [
+ {data, stub},
+ {disk_len, DiskLen},
+ {att_len, EncodedLen},
+ {revpos, RevPos},
+ {md5, Digest},
+ {encoding, Encoding}
+ ],
+ Att
+ ).
follow_from_json(Att, Props) ->
{DiskLen, EncodedLen, Encoding} = encoded_lengths_from_json(Props),
Digest = digest_from_json(Props),
RevPos = couch_util:get_value(<<"revpos">>, Props, 0),
- store([
- {data, follows},
- {disk_len, DiskLen},
- {att_len, EncodedLen},
- {revpos, RevPos},
- {md5, Digest},
- {encoding, Encoding}
- ], Att).
-
+ store(
+ [
+ {data, follows},
+ {disk_len, DiskLen},
+ {att_len, EncodedLen},
+ {revpos, RevPos},
+ {md5, Digest},
+ {encoding, Encoding}
+ ],
+ Att
+ ).
inline_from_json(Att, Props) ->
B64Data = couch_util:get_value(<<"data">>, Props),
@@ -299,20 +294,22 @@ inline_from_json(Att, Props) ->
Data ->
Length = size(Data),
RevPos = couch_util:get_value(<<"revpos">>, Props, 0),
- store([
- {data, Data},
- {disk_len, Length},
- {att_len, Length},
- {revpos, RevPos}
- ], Att)
+ store(
+ [
+ {data, Data},
+ {disk_len, Length},
+ {att_len, Length},
+ {revpos, RevPos}
+ ],
+ Att
+ )
catch
_:_ ->
Name = fetch(name, Att),
- ErrMsg = <<"Invalid attachment data for ", Name/binary>>,
+ ErrMsg = <<"Invalid attachment data for ", Name/binary>>,
throw({bad_request, ErrMsg})
end.
-
encoded_lengths_from_json(Props) ->
Len = couch_util:get_value(<<"length">>, Props),
case couch_util:get_value(<<"encoding">>, Props) of
@@ -325,16 +322,15 @@ encoded_lengths_from_json(Props) ->
end,
{Len, EncodedLen, Encoding}.
-
digest_from_json(Props) ->
case couch_util:get_value(<<"digest">>, Props) of
<<"md5-", EncodedMd5/binary>> -> base64:decode(EncodedMd5);
_ -> <<>>
end.
-
-to_json(#{md5 := Md5} = Att, OutputData, DataToFollow, ShowEncoding)
- when is_binary(Md5) ->
+to_json(#{md5 := Md5} = Att, OutputData, DataToFollow, ShowEncoding) when
+ is_binary(Md5)
+->
#{
name := Name,
type := Type,
@@ -348,37 +344,39 @@ to_json(#{md5 := Md5} = Att, OutputData, DataToFollow, ShowEncoding)
{<<"content_type">>, Type},
{<<"revpos">>, RevPos}
],
- DigestProp = case base64:encode(Md5) of
- <<>> -> [];
- Digest -> [{<<"digest">>, <<"md5-", Digest/binary>>}]
- end,
- DataProps = if
- not OutputData orelse Data == stub ->
- [{<<"length">>, DiskLen}, {<<"stub">>, true}];
- DataToFollow ->
- [{<<"length">>, DiskLen}, {<<"follows">>, true}];
- true ->
- AttData = case Encoding of
- gzip -> zlib:gunzip(to_binary(Att));
- identity -> to_binary(Att)
- end,
- [{<<"data">>, base64:encode(AttData)}]
- end,
- EncodingProps = if
- ShowEncoding andalso Encoding /= identity ->
- [
- {<<"encoding">>, couch_util:to_binary(Encoding)},
- {<<"encoded_length">>, AttLen}
- ];
- true ->
- []
- end,
+ DigestProp =
+ case base64:encode(Md5) of
+ <<>> -> [];
+ Digest -> [{<<"digest">>, <<"md5-", Digest/binary>>}]
+ end,
+ DataProps =
+ if
+ not OutputData orelse Data == stub ->
+ [{<<"length">>, DiskLen}, {<<"stub">>, true}];
+ DataToFollow ->
+ [{<<"length">>, DiskLen}, {<<"follows">>, true}];
+ true ->
+ AttData =
+ case Encoding of
+ gzip -> zlib:gunzip(to_binary(Att));
+ identity -> to_binary(Att)
+ end,
+ [{<<"data">>, base64:encode(AttData)}]
+ end,
+ EncodingProps =
+ if
+ ShowEncoding andalso Encoding /= identity ->
+ [
+ {<<"encoding">>, couch_util:to_binary(Encoding)},
+ {<<"encoded_length">>, AttLen}
+ ];
+ true ->
+ []
+ end,
{Name, {Props ++ DigestProp ++ DataProps ++ EncodingProps}};
-
to_json(#{md5 := undefined} = Att, OutputData, DataToFollow, ShowEncoding) ->
to_json(Att#{md5 => <<>>}, OutputData, DataToFollow, ShowEncoding).
-
flush(Db, DocId, Att1) ->
Data0 = fetch(data, Att1),
case {Data0, Db} of
@@ -401,21 +399,23 @@ flush(Db, DocId, Att1) ->
% If we were sent a gzip'ed attachment with no
% length data, we have to set it here.
- Att3 = case DiskLen of
- undefined when AttLen /= undefined ->
- store(disk_len, AttLen, Att2);
- undefined when is_binary(Data) ->
- store(disk_len, size(Data), Att2);
- _ ->
- Att2
- end,
+ Att3 =
+ case DiskLen of
+ undefined when AttLen /= undefined ->
+ store(disk_len, AttLen, Att2);
+ undefined when is_binary(Data) ->
+ store(disk_len, size(Data), Att2);
+ _ ->
+ Att2
+ end,
% If no encoding has been set, default to
% identity
- Att4 = case Encoding of
- undefined -> store(encoding, identity, Att3);
- _ -> Att3
- end,
+ Att4 =
+ case Encoding of
+ undefined -> store(encoding, identity, Att3);
+ _ -> Att3
+ end,
case Data of
{loc, _, _, _} ->
@@ -423,24 +423,22 @@ flush(Db, DocId, Att1) ->
Att1;
_ when is_binary(Data) ->
DataMd5 = couch_hash:md5_hash(Data),
- if ReqMd5 == undefined -> ok; true ->
- couch_util:check_md5(DataMd5, ReqMd5)
+ if
+ ReqMd5 == undefined -> ok;
+ true -> couch_util:check_md5(DataMd5, ReqMd5)
end,
Att5 = store(md5, DataMd5, Att4),
Att6 = maybe_compress(Att5),
fabric2_db:write_attachment(Db, DocId, Att6)
end.
-
read_data(Att) ->
Data = fetch(data, Att),
read_data(Data, Att).
-
read_data({loc, #{}, _DocId, _AttId}, Att) ->
% Attachment already written to fdb
Att;
-
read_data({follows, Parser, Ref}, Att) ->
ParserRef = erlang:monitor(process, Parser),
Fun = fun() ->
@@ -459,13 +457,11 @@ read_data({follows, Parser, Ref}, Att) ->
after
erlang:demonitor(ParserRef, [flush])
end;
-
read_data(Data, Att) when is_binary(Data) ->
case fetch(att_len, Att) of
undefined -> store(att_len, size(Data), Att);
- Int when is_integer(Int) -> Att
+ Int when is_integer(Int) -> Att
end;
-
read_data(Fun, Att) when is_function(Fun) ->
[AttName, AttLen, InMd5] = fetch([name, att_len, md5], Att),
MaxAttSize = max_attachment_size(),
@@ -476,17 +472,20 @@ read_data(Fun, Att) when is_function(Fun) ->
WriterFun = fun
({0, Footers}, {Len, Acc}) ->
F = mochiweb_headers:from_binary(Footers),
- Md5 = case mochiweb_headers:get_value("Content-MD5", F) of
- undefined -> undefined;
- Value -> base64:decode(Value)
- end,
+ Md5 =
+ case mochiweb_headers:get_value("Content-MD5", F) of
+ undefined -> undefined;
+ Value -> base64:decode(Value)
+ end,
Props0 = [
{data, iolist_to_binary(lists:reverse(Acc))},
{att_len, Len}
],
- Props1 = if InMd5 /= md5_in_footer -> Props0; true ->
- [{md5, Md5} | Props0]
- end,
+ Props1 =
+ if
+ InMd5 /= md5_in_footer -> Props0;
+ true -> [{md5, Md5} | Props0]
+ end,
store(Props1, Att);
({ChunkLen, Chunk}, {Len, Acc}) ->
NewLen = Len + ChunkLen,
@@ -499,50 +498,44 @@ read_data(Fun, Att) when is_function(Fun) ->
read_streamed_attachment(Att, Fun, AttLen, [])
end.
-
read_streamed_attachment(Att, _F, 0, Acc) ->
Bin = iolist_to_binary(lists:reverse(Acc)),
- store([
- {data, Bin},
- {att_len, size(Bin)}
- ], Att);
-
+ store(
+ [
+ {data, Bin},
+ {att_len, size(Bin)}
+ ],
+ Att
+ );
read_streamed_attachment(_Att, _F, LenLeft, _Acc) when LenLeft < 0 ->
throw({bad_request, <<"attachment longer than expected">>});
-
read_streamed_attachment(Att, F, LenLeft, Acc) when LenLeft > 0 ->
- Bin = try
- read_next_chunk(F, LenLeft)
- catch
- {mp_parser_died, normal} ->
- throw({bad_request, <<"attachment shorter than expected">>})
- end,
+ Bin =
+ try
+ read_next_chunk(F, LenLeft)
+ catch
+ {mp_parser_died, normal} ->
+ throw({bad_request, <<"attachment shorter than expected">>})
+ end,
Size = iolist_size(Bin),
read_streamed_attachment(Att, F, LenLeft - Size, [Bin | Acc]).
-
read_next_chunk(F, _) when is_function(F, 0) ->
F();
-
read_next_chunk(F, LenLeft) when is_function(F, 1) ->
F(lists:min([LenLeft, 16#2000])).
-
foldl(Att, Fun, Acc) ->
foldl(fetch(data, Att), Att, Fun, Acc).
-
foldl({loc, Db, DocId, AttId}, _Att, Fun, Acc) ->
Bin = fabric2_db:read_attachment(Db#{tx := undefined}, DocId, AttId),
Fun(Bin, Acc);
-
foldl(Bin, _Att, Fun, Acc) when is_binary(Bin) ->
Fun(Bin, Acc);
-
foldl(DataFun, Att, Fun, Acc) when is_function(DataFun) ->
Len = fetch(att_len, Att),
fold_streamed_data(DataFun, Len, Fun, Acc);
-
foldl({follows, Parser, Ref}, Att, Fun, Acc) ->
ParserRef = erlang:monitor(process, Parser),
DataFun = fun() ->
@@ -562,25 +555,24 @@ foldl({follows, Parser, Ref}, Att, Fun, Acc) ->
erlang:demonitor(ParserRef, [flush])
end.
-
range_foldl(Bin1, From, To, Fun, Acc) when is_binary(Bin1) ->
ReadLen = To - From,
- Bin2 = case Bin1 of
- _ when size(Bin1) < From -> <<>>;
- <<_:From/binary, B2/binary>> -> B2
- end,
- Bin3 = case Bin2 of
- _ when size(Bin2) < ReadLen -> Bin2;
- <<B3:ReadLen/binary, _/binary>> -> B3
- end,
+ Bin2 =
+ case Bin1 of
+ _ when size(Bin1) < From -> <<>>;
+ <<_:From/binary, B2/binary>> -> B2
+ end,
+ Bin3 =
+ case Bin2 of
+ _ when size(Bin2) < ReadLen -> Bin2;
+ <<B3:ReadLen/binary, _/binary>> -> B3
+ end,
Fun(Bin3, Acc);
-
range_foldl(Att, From, To, Fun, Acc) ->
{loc, Db, DocId, AttId} = fetch(data, Att),
Bin = fabric2_db:read_attachment(Db, DocId, AttId),
range_foldl(Bin, From, To, Fun, Acc).
-
foldl_decode(Att, Fun, Acc) ->
[Encoding, Data] = fetch([encoding, data], Att),
case {Encoding, Data} of
@@ -599,11 +591,9 @@ foldl_decode(Att, Fun, Acc) ->
foldl(Att, Fun, Acc)
end.
-
to_binary(Att) ->
to_binary(fetch(data, Att), Att).
-
to_binary(Bin, _Att) when is_binary(Bin) ->
Bin;
to_binary(Iolist, _Att) when is_list(Iolist) ->
@@ -611,27 +601,26 @@ to_binary(Iolist, _Att) when is_list(Iolist) ->
to_binary({loc, Db, DocId, AttId}, _Att) ->
NoTxDb = Db#{tx := undefined},
fabric2_db:read_attachment(NoTxDb, DocId, AttId);
-to_binary(DataFun, Att) when is_function(DataFun)->
+to_binary(DataFun, Att) when is_function(DataFun) ->
Len = fetch(att_len, Att),
iolist_to_binary(
- lists:reverse(fold_streamed_data(
- DataFun,
- Len,
- fun(Data, Acc) -> [Data | Acc] end,
- []
- ))
+ lists:reverse(
+ fold_streamed_data(
+ DataFun,
+ Len,
+ fun(Data, Acc) -> [Data | Acc] end,
+ []
+ )
+ )
).
-
fold_streamed_data(_RcvFun, 0, _Fun, Acc) ->
Acc;
-
-fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0->
+fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0 ->
Bin = RcvFun(),
ResultAcc = Fun(Bin, Acc),
fold_streamed_data(RcvFun, LenLeft - size(Bin), Fun, ResultAcc).
-
maybe_compress(Att) ->
[Encoding, Type] = fetch([encoding, type], Att),
IsCompressible = is_compressible(Type),
@@ -643,7 +632,6 @@ maybe_compress(Att) ->
Att
end.
-
compress(Att, Level) ->
Data = fetch(data, Att),
@@ -655,33 +643,41 @@ compress(Att, Level) ->
ok = zlib:deflateEnd(Z),
ok = zlib:close(Z),
- store([
- {att_len, size(CompData)},
- {md5, couch_hash:md5_hash(CompData)},
- {data, CompData},
- {encoding, gzip}
- ], Att).
-
+ store(
+ [
+ {att_len, size(CompData)},
+ {md5, couch_hash:md5_hash(CompData)},
+ {data, CompData},
+ {encoding, gzip}
+ ],
+ Att
+ ).
is_compressible(Type) when is_binary(Type) ->
is_compressible(binary_to_list(Type));
is_compressible(Type) ->
TypeExpList = re:split(
- config:get("attachments", "compressible_types",
- ?DEFAULT_COMPRESSIBLE_TYPES),
+ config:get(
+ "attachments",
+ "compressible_types",
+ ?DEFAULT_COMPRESSIBLE_TYPES
+ ),
"\\s*,\\s*",
[{return, list}]
),
lists:any(
fun(TypeExp) ->
- Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"),
- "(?:\\s*;.*?)?\\s*", $$],
+ Regexp = [
+ "^\\s*",
+ re:replace(TypeExp, "\\*", ".*"),
+ "(?:\\s*;.*?)?\\s*",
+ $$
+ ],
re:run(Type, Regexp, [caseless]) =/= nomatch
end,
[T || T <- TypeExpList, T /= []]
).
-
max_attachment_size() ->
case config:get("couchdb", "max_attachment_size", "infinity") of
"infinity" ->
@@ -690,9 +686,9 @@ max_attachment_size() ->
list_to_integer(MaxAttSize)
end.
-
-validate_attachment_size(AttName, AttSize, MaxAttSize)
- when is_integer(AttSize), AttSize > MaxAttSize ->
+validate_attachment_size(AttName, AttSize, MaxAttSize) when
+ is_integer(AttSize), AttSize > MaxAttSize
+->
throw({request_entity_too_large, {attachment, AttName}});
validate_attachment_size(_AttName, _AttSize, _MAxAttSize) ->
ok.
diff --git a/src/couch/src/couch_auth_cache.erl b/src/couch/src/couch_auth_cache.erl
index 919d5614f..0f8af2a6c 100644
--- a/src/couch/src/couch_auth_cache.erl
+++ b/src/couch/src/couch_auth_cache.erl
@@ -12,7 +12,6 @@
-module(couch_auth_cache).
-
-export([
get_user_creds/1,
get_user_creds/2,
@@ -21,23 +20,20 @@
auth_design_doc/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_js_functions.hrl").
-
--spec get_user_creds(UserName::string() | binary()) ->
- {ok, Credentials::list(), term()} | nil.
+-spec get_user_creds(UserName :: string() | binary()) ->
+ {ok, Credentials :: list(), term()} | nil.
get_user_creds(UserName) ->
get_user_creds(nil, UserName).
--spec get_user_creds(Req::#httpd{} | nil, UserName::string() | binary()) ->
- {ok, Credentials::list(), term()} | nil.
+-spec get_user_creds(Req :: #httpd{} | nil, UserName :: string() | binary()) ->
+ {ok, Credentials :: list(), term()} | nil.
get_user_creds(Req, UserName) when is_list(UserName) ->
get_user_creds(Req, ?l2b(UserName));
-
get_user_creds(_Req, UserName) ->
get_admin(UserName).
@@ -49,35 +45,39 @@ get_admin(UserName) when is_binary(UserName) ->
get_admin(?b2l(UserName));
get_admin(UserName) when is_list(UserName) ->
case config:get("admins", UserName) of
- "-hashed-" ++ HashedPwdAndSalt ->
- % the name is an admin, now check to see if there is a user doc
- % which has a matching name, salt, and password_sha
- [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
- make_admin_doc(HashedPwd, Salt);
- "-pbkdf2-" ++ HashedPwdSaltAndIterations ->
- [HashedPwd, Salt, Iterations] = string:tokens(HashedPwdSaltAndIterations, ","),
- make_admin_doc(HashedPwd, Salt, Iterations);
- _Else ->
- nil
+ "-hashed-" ++ HashedPwdAndSalt ->
+ % the name is an admin, now check to see if there is a user doc
+ % which has a matching name, salt, and password_sha
+ [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
+ make_admin_doc(HashedPwd, Salt);
+ "-pbkdf2-" ++ HashedPwdSaltAndIterations ->
+ [HashedPwd, Salt, Iterations] = string:tokens(HashedPwdSaltAndIterations, ","),
+ make_admin_doc(HashedPwd, Salt, Iterations);
+ _Else ->
+ nil
end.
make_admin_doc(HashedPwd, Salt) ->
- [{<<"roles">>, [<<"_admin">>]},
- {<<"salt">>, ?l2b(Salt)},
- {<<"password_scheme">>, <<"simple">>},
- {<<"password_sha">>, ?l2b(HashedPwd)}].
+ [
+ {<<"roles">>, [<<"_admin">>]},
+ {<<"salt">>, ?l2b(Salt)},
+ {<<"password_scheme">>, <<"simple">>},
+ {<<"password_sha">>, ?l2b(HashedPwd)}
+ ].
make_admin_doc(DerivedKey, Salt, Iterations) ->
- [{<<"roles">>, [<<"_admin">>]},
- {<<"salt">>, ?l2b(Salt)},
- {<<"iterations">>, list_to_integer(Iterations)},
- {<<"password_scheme">>, <<"pbkdf2">>},
- {<<"derived_key">>, ?l2b(DerivedKey)}].
+ [
+ {<<"roles">>, [<<"_admin">>]},
+ {<<"salt">>, ?l2b(Salt)},
+ {<<"iterations">>, list_to_integer(Iterations)},
+ {<<"password_scheme">>, <<"pbkdf2">>},
+ {<<"derived_key">>, ?l2b(DerivedKey)}
+ ].
auth_design_doc(DocId) ->
DocProps = [
{<<"_id">>, DocId},
- {<<"language">>,<<"javascript">>},
+ {<<"language">>, <<"javascript">>},
{<<"validate_doc_update">>, ?AUTH_DB_DOC_VALIDATE_FUNCTION}
],
{ok, couch_doc:from_json_obj({DocProps})}.
diff --git a/src/couch/src/couch_base32.erl b/src/couch/src/couch_base32.erl
index d8d754f5e..776fe773d 100644
--- a/src/couch/src/couch_base32.erl
+++ b/src/couch/src/couch_base32.erl
@@ -16,7 +16,6 @@
-define(SET, <<"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567">>).
-
-spec encode(binary()) -> binary().
encode(Plain) when is_binary(Plain) ->
IoList = encode(Plain, 0, byte_size(Plain) * 8, []),
@@ -24,54 +23,63 @@ encode(Plain) when is_binary(Plain) ->
encode(_Plain, _ByteOffset, 0, Acc) ->
Acc;
-
encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 8 ->
<<A:5, B:3>> = binary:part(Plain, ByteOffset, 1),
- [<<(binary:at(?SET, A)),
- (binary:at(?SET, B bsl 2)),
- "======">> | Acc];
-
+ [<<(binary:at(?SET, A)), (binary:at(?SET, B bsl 2)), "======">> | Acc];
encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 16 ->
<<A:5, B:5, C:5, D:1>> = binary:part(Plain, ByteOffset, 2),
- [<<(binary:at(?SET, A)),
- (binary:at(?SET, B)),
- (binary:at(?SET, C)),
- (binary:at(?SET, D bsl 4)),
- "====">> | Acc];
-
+ [
+ <<
+ (binary:at(?SET, A)),
+ (binary:at(?SET, B)),
+ (binary:at(?SET, C)),
+ (binary:at(?SET, D bsl 4)),
+ "===="
+ >>
+ | Acc
+ ];
encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 24 ->
<<A:5, B:5, C:5, D:5, E:4>> = binary:part(Plain, ByteOffset, 3),
- [<<(binary:at(?SET, A)),
- (binary:at(?SET, B)),
- (binary:at(?SET, C)),
- (binary:at(?SET, D)),
- (binary:at(?SET, E bsl 1)),
- "===">> | Acc];
-
+ [
+ <<
+ (binary:at(?SET, A)),
+ (binary:at(?SET, B)),
+ (binary:at(?SET, C)),
+ (binary:at(?SET, D)),
+ (binary:at(?SET, E bsl 1)),
+ "==="
+ >>
+ | Acc
+ ];
encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 32 ->
<<A:5, B:5, C:5, D:5, E:5, F:5, G:2>> = binary:part(Plain, ByteOffset, 4),
- [<<(binary:at(?SET, A)),
- (binary:at(?SET, B)),
- (binary:at(?SET, C)),
- (binary:at(?SET, D)),
- (binary:at(?SET, E)),
- (binary:at(?SET, F)),
- (binary:at(?SET, G bsl 3)),
- "=">> | Acc];
-
+ [
+ <<
+ (binary:at(?SET, A)),
+ (binary:at(?SET, B)),
+ (binary:at(?SET, C)),
+ (binary:at(?SET, D)),
+ (binary:at(?SET, E)),
+ (binary:at(?SET, F)),
+ (binary:at(?SET, G bsl 3)),
+ "="
+ >>
+ | Acc
+ ];
encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining >= 40 ->
<<A:5, B:5, C:5, D:5, E:5, F:5, G:5, H:5>> =
binary:part(Plain, ByteOffset, 5),
- Output = <<(binary:at(?SET, A)),
- (binary:at(?SET, B)),
- (binary:at(?SET, C)),
- (binary:at(?SET, D)),
- (binary:at(?SET, E)),
- (binary:at(?SET, F)),
- (binary:at(?SET, G)),
- (binary:at(?SET, H))>>,
- encode(Plain, ByteOffset + 5, BitsRemaining - 40, [Output | Acc]).
-
+ Output = <<
+ (binary:at(?SET, A)),
+ (binary:at(?SET, B)),
+ (binary:at(?SET, C)),
+ (binary:at(?SET, D)),
+ (binary:at(?SET, E)),
+ (binary:at(?SET, F)),
+ (binary:at(?SET, G)),
+ (binary:at(?SET, H))
+ >>,
+ encode(Plain, ByteOffset + 5, BitsRemaining - 40, [Output | Acc]).
-spec decode(binary()) -> binary().
decode(Encoded) when is_binary(Encoded) ->
@@ -83,39 +91,60 @@ decode(Encoded, ByteOffset, Acc) when ByteOffset == byte_size(Encoded) ->
decode(Encoded, ByteOffset, Acc) ->
case binary:part(Encoded, ByteOffset, 8) of
<<A:1/binary, B:1/binary, "======">> ->
- [<<(find_in_set(A)):5,
- (find_in_set(B) bsr 2):3>> | Acc];
+ [<<(find_in_set(A)):5, (find_in_set(B) bsr 2):3>> | Acc];
<<A:1/binary, B:1/binary, C:1/binary, D:1/binary, "====">> ->
- [<<(find_in_set(A)):5,
- (find_in_set(B)):5,
- (find_in_set(C)):5,
- (find_in_set(D) bsr 4):1>> | Acc];
+ [
+ <<
+ (find_in_set(A)):5,
+ (find_in_set(B)):5,
+ (find_in_set(C)):5,
+ (find_in_set(D) bsr 4):1
+ >>
+ | Acc
+ ];
<<A:1/binary, B:1/binary, C:1/binary, D:1/binary, E:1/binary, "===">> ->
- [<<(find_in_set(A)):5,
- (find_in_set(B)):5,
- (find_in_set(C)):5,
- (find_in_set(D)):5,
- (find_in_set(E) bsr 1):4>> | Acc];
- <<A:1/binary, B:1/binary, C:1/binary, D:1/binary,
- E:1/binary, F:1/binary, G:1/binary, "=">> ->
- [<<(find_in_set(A)):5,
- (find_in_set(B)):5,
- (find_in_set(C)):5,
- (find_in_set(D)):5,
- (find_in_set(E)):5,
- (find_in_set(F)):5,
- (find_in_set(G) bsr 3):2>> | Acc];
- <<A:1/binary, B:1/binary, C:1/binary, D:1/binary,
- E:1/binary, F:1/binary, G:1/binary, H:1/binary>> ->
- decode(Encoded, ByteOffset + 8,
- [<<(find_in_set(A)):5,
- (find_in_set(B)):5,
- (find_in_set(C)):5,
- (find_in_set(D)):5,
- (find_in_set(E)):5,
- (find_in_set(F)):5,
- (find_in_set(G)):5,
- (find_in_set(H)):5>> | Acc])
+ [
+ <<
+ (find_in_set(A)):5,
+ (find_in_set(B)):5,
+ (find_in_set(C)):5,
+ (find_in_set(D)):5,
+ (find_in_set(E) bsr 1):4
+ >>
+ | Acc
+ ];
+ <<A:1/binary, B:1/binary, C:1/binary, D:1/binary, E:1/binary, F:1/binary, G:1/binary, "=">> ->
+ [
+ <<
+ (find_in_set(A)):5,
+ (find_in_set(B)):5,
+ (find_in_set(C)):5,
+ (find_in_set(D)):5,
+ (find_in_set(E)):5,
+ (find_in_set(F)):5,
+ (find_in_set(G) bsr 3):2
+ >>
+ | Acc
+ ];
+ <<A:1/binary, B:1/binary, C:1/binary, D:1/binary, E:1/binary, F:1/binary, G:1/binary,
+ H:1/binary>> ->
+ decode(
+ Encoded,
+ ByteOffset + 8,
+ [
+ <<
+ (find_in_set(A)):5,
+ (find_in_set(B)):5,
+ (find_in_set(C)):5,
+ (find_in_set(D)):5,
+ (find_in_set(E)):5,
+ (find_in_set(F)):5,
+ (find_in_set(G)):5,
+ (find_in_set(H)):5
+ >>
+ | Acc
+ ]
+ )
end.
find_in_set(Char) ->
diff --git a/src/couch/src/couch_db_epi.erl b/src/couch/src/couch_db_epi.erl
index bfd435ac8..dc02e5c5e 100644
--- a/src/couch/src/couch_db_epi.erl
+++ b/src/couch/src/couch_db_epi.erl
@@ -32,7 +32,6 @@ providers() ->
{chttpd_handlers, couch_httpd_handlers}
].
-
services() ->
[
{feature_flags, couch_flags}
diff --git a/src/couch/src/couch_debug.erl b/src/couch/src/couch_debug.erl
index 80feb615c..13b593e3e 100644
--- a/src/couch/src/couch_debug.erl
+++ b/src/couch/src/couch_debug.erl
@@ -49,6 +49,7 @@ help() ->
].
-spec help(Function :: atom()) -> ok.
+%% erlfmt-ignore
help(opened_files) ->
io:format("
opened_files()
@@ -205,9 +206,11 @@ help(Unknown) ->
[{port(), CouchFilePid :: pid(), Fd :: pid() | tuple(), FilePath :: string()}].
opened_files() ->
- Info = [couch_file_port_info(Port)
- || Port <- erlang:ports(),
- {name, "efile"} =:= erlang:port_info(Port, name)],
+ Info = [
+ couch_file_port_info(Port)
+ || Port <- erlang:ports(),
+ {name, "efile"} =:= erlang:port_info(Port, name)
+ ],
[I || I <- Info, is_tuple(I)].
couch_file_port_info(Port) ->
@@ -223,17 +226,22 @@ couch_file_port_info(Port) ->
[{port(), CouchFilePid :: pid(), Fd :: pid() | tuple(), FilePath :: string()}].
opened_files_by_regexp(FileRegExp) ->
{ok, RegExp} = re:compile(FileRegExp),
- lists:filter(fun({_Port, _Pid, _Fd, Path}) ->
- re:run(Path, RegExp) =/= nomatch
- end, couch_debug:opened_files()).
+ lists:filter(
+ fun({_Port, _Pid, _Fd, Path}) ->
+ re:run(Path, RegExp) =/= nomatch
+ end,
+ couch_debug:opened_files()
+ ).
-spec opened_files_contains(FileNameFragment :: iodata()) ->
[{port(), CouchFilePid :: pid(), Fd :: pid() | tuple(), FilePath :: string()}].
opened_files_contains(FileNameFragment) ->
- lists:filter(fun({_Port, _Pid, _Fd, Path}) ->
- string:str(Path, FileNameFragment) > 0
- end, couch_debug:opened_files()).
-
+ lists:filter(
+ fun({_Port, _Pid, _Fd, Path}) ->
+ string:str(Path, FileNameFragment) > 0
+ end,
+ couch_debug:opened_files()
+ ).
process_name(Pid) when is_pid(Pid) ->
Info = process_info(Pid, [registered_name, dictionary, initial_call]),
@@ -260,7 +268,8 @@ link_tree(RootPid, Info) ->
link_tree(RootPid, Info, Fun) ->
{_, Result} = link_tree(
- RootPid, [links | Info], gb_trees:empty(), 0, [RootPid], Fun),
+ RootPid, [links | Info], gb_trees:empty(), 0, [RootPid], Fun
+ ),
Result.
link_tree(RootPid, Info, Visited0, Pos, [Pid | Rest], Fun) ->
@@ -272,21 +281,23 @@ link_tree(RootPid, Info, Visited0, Pos, [Pid | Rest], Fun) ->
Visited1 = gb_trees:insert(Pid, Props, Visited0),
{links, Children} = lists:keyfind(links, 1, Props),
{Visited2, NewTree} = link_tree(
- RootPid, Info, Visited1, Pos + 1, Children, Fun),
+ RootPid, Info, Visited1, Pos + 1, Children, Fun
+ ),
{Visited3, Result} = link_tree(
- RootPid, Info, Visited2, Pos, Rest, Fun),
- {Visited3, [{Pos, {Pid, Fun(Pid, Props), NewTree}}] ++ Result};
+ RootPid, Info, Visited2, Pos, Rest, Fun
+ ),
+ {Visited3, [{Pos, {Pid, Fun(Pid, Props), NewTree}}] ++ Result};
none ->
Props = info(Pid, Info),
Visited1 = gb_trees:insert(Pid, Props, Visited0),
{Visited2, Result} = link_tree(
- RootPid, Info, Visited1, Pos, Rest, Fun),
+ RootPid, Info, Visited1, Pos, Rest, Fun
+ ),
{Visited2, [{Pos, {Pid, Fun(Pid, Props), []}}] ++ Result}
end;
link_tree(_RootPid, _Info, Visited, _Pos, [], _Fun) ->
{Visited, []}.
-
info(Pid, Info) when is_pid(Pid) ->
ValidProps = [
backtrace,
@@ -340,12 +351,16 @@ info(Port, Info) when is_port(Port) ->
port_info(Port, lists:usort(Validated)).
port_info(Port, Items) ->
- lists:foldl(fun(Item, Acc) ->
- case (catch erlang:port_info(Port, Item)) of
- {Item, _Value} = Info -> [Info | Acc];
- _Else -> Acc
- end
- end, [], Items).
+ lists:foldl(
+ fun(Item, Acc) ->
+ case (catch erlang:port_info(Port, Item)) of
+ {Item, _Value} = Info -> [Info | Acc];
+ _Else -> Acc
+ end
+ end,
+ [],
+ Items
+ ).
mapfold_tree([], Acc, _Fun) ->
{[], Acc};
@@ -378,13 +393,14 @@ print_linked_processes(Name) when is_atom(Name) ->
print_linked_processes(Pid) when is_pid(Pid) ->
Info = [reductions, message_queue_len, memory],
TableSpec = [
- {50, left, name}, {12, centre, reductions},
- {19, centre, message_queue_len}, {10, centre, memory}
+ {50, left, name},
+ {12, centre, reductions},
+ {19, centre, message_queue_len},
+ {10, centre, memory}
],
Tree = linked_processes_info(Pid, Info),
print_tree(Tree, TableSpec).
-
%% Pretty print functions
%% Limmitations:
@@ -438,31 +454,40 @@ random_processes(Pids, 0) ->
random_processes(Acc, Depth) ->
Caller = self(),
Ref = make_ref(),
- Pid = case oneof([spawn_link, open_port]) of
- spawn_monitor ->
- {P, _} = spawn_monitor(fun() ->
- Caller ! {Ref, random_processes(Depth - 1)},
- receive looper -> ok end
- end),
- P;
- spawn ->
- spawn(fun() ->
- Caller ! {Ref, random_processes(Depth - 1)},
- receive looper -> ok end
- end);
- spawn_link ->
- spawn_link(fun() ->
- Caller ! {Ref, random_processes(Depth - 1)},
- receive looper -> ok end
- end);
- open_port ->
- spawn_link(fun() ->
- Port = erlang:open_port({spawn, "sleep 10"}, []),
- true = erlang:link(Port),
- Caller ! {Ref, random_processes(Depth - 1)},
- receive looper -> ok end
- end)
- end,
+ Pid =
+ case oneof([spawn_link, open_port]) of
+ spawn_monitor ->
+ {P, _} = spawn_monitor(fun() ->
+ Caller ! {Ref, random_processes(Depth - 1)},
+ receive
+ looper -> ok
+ end
+ end),
+ P;
+ spawn ->
+ spawn(fun() ->
+ Caller ! {Ref, random_processes(Depth - 1)},
+ receive
+ looper -> ok
+ end
+ end);
+ spawn_link ->
+ spawn_link(fun() ->
+ Caller ! {Ref, random_processes(Depth - 1)},
+ receive
+ looper -> ok
+ end
+ end);
+ open_port ->
+ spawn_link(fun() ->
+ Port = erlang:open_port({spawn, "sleep 10"}, []),
+ true = erlang:link(Port),
+ Caller ! {Ref, random_processes(Depth - 1)},
+ receive
+ looper -> ok
+ end
+ end)
+ end,
receive
{Ref, Pids} -> random_processes([Pid | Pids] ++ Acc, Depth - 1)
end.
@@ -470,7 +495,6 @@ random_processes(Acc, Depth) ->
oneof(Options) ->
lists:nth(couch_rand:uniform(length(Options)), Options).
-
tree() ->
[InitialPid | _] = Processes = random_processes(5),
{InitialPid, Processes, link_tree(InitialPid)}.
@@ -486,7 +510,8 @@ link_tree_test_() ->
"link_tree tests",
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_have_same_shape/1,
fun should_include_extra_info/1
@@ -496,16 +521,16 @@ link_tree_test_() ->
should_have_same_shape({InitialPid, _Processes, Tree}) ->
?_test(begin
- InfoTree = linked_processes_info(InitialPid, []),
- ?assert(is_equal(InfoTree, Tree)),
- ok
+ InfoTree = linked_processes_info(InitialPid, []),
+ ?assert(is_equal(InfoTree, Tree)),
+ ok
end).
should_include_extra_info({InitialPid, _Processes, _Tree}) ->
Info = [reductions, message_queue_len, memory],
?_test(begin
- InfoTree = linked_processes_info(InitialPid, Info),
- map_tree(InfoTree, fun(Key, {_Id, Props}, _Pos) ->
+ InfoTree = linked_processes_info(InitialPid, Info),
+ map_tree(InfoTree, fun(Key, {_Id, Props}, _Pos) ->
case Key of
Pid when is_pid(Pid) ->
?assert(lists:keymember(reductions, 1, Props)),
@@ -515,11 +540,12 @@ should_include_extra_info({InitialPid, _Processes, _Tree}) ->
ok
end,
Props
- end),
- ok
+ end),
+ ok
end).
-is_equal([], []) -> true;
+is_equal([], []) ->
+ true;
is_equal([{Pos, {Pid, _, A}} | RestA], [{Pos, {Pid, _, B}} | RestB]) ->
case is_equal(RestA, RestB) of
false -> false;
diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl
index 4d0a13d14..42c62bd55 100644
--- a/src/couch/src/couch_doc.erl
+++ b/src/couch/src/couch_doc.erl
@@ -12,7 +12,7 @@
-module(couch_doc).
--export([to_doc_info/1,to_doc_info_path/1,parse_rev/1,parse_revs/1,rev_to_str/1,revs_to_strs/1]).
+-export([to_doc_info/1, to_doc_info_path/1, parse_rev/1, parse_revs/1, rev_to_str/1, revs_to_strs/1]).
-export([from_json_obj/1, from_json_obj_validate/1]).
-export([from_json_obj/2, from_json_obj_validate/2]).
-export([to_json_obj/2, has_stubs/1, merge_stubs/2]).
@@ -25,15 +25,14 @@
-export([is_deleted/1]).
-
-include_lib("couch/include/couch_db.hrl").
-spec to_path(#doc{}) -> path().
-to_path(#doc{revs={Start, RevIds}}=Doc) ->
+to_path(#doc{revs = {Start, RevIds}} = Doc) ->
[Branch] = to_branch(Doc, lists:reverse(RevIds)),
{Start - length(RevIds) + 1, Branch}.
--spec to_branch(#doc{}, [RevId::binary()]) -> [branch()].
+-spec to_branch(#doc{}, [RevId :: binary()]) -> [branch()].
to_branch(Doc, [RevId]) ->
[{RevId, Doc, []}];
to_branch(Doc, [RevId | Rest]) ->
@@ -42,8 +41,8 @@ to_branch(Doc, [RevId | Rest]) ->
% helpers used by to_json_obj
to_json_rev(0, []) ->
[];
-to_json_rev(Start, [FirstRevId|_]) ->
- [{<<"_rev">>, ?l2b([integer_to_list(Start),"-",revid_to_str(FirstRevId)])}].
+to_json_rev(Start, [FirstRevId | _]) ->
+ [{<<"_rev">>, ?l2b([integer_to_list(Start), "-", revid_to_str(FirstRevId)])}].
to_json_body(true, {Body}) ->
Body ++ [{<<"_deleted">>, true}];
@@ -51,53 +50,69 @@ to_json_body(false, {Body}) ->
Body.
to_json_revisions(Options, Start, RevIds0) ->
- RevIds = case proplists:get_value(revs, Options) of
+ RevIds =
+ case proplists:get_value(revs, Options) of
+ true ->
+ RevIds0;
+ Num when is_integer(Num), Num > 0 ->
+ lists:sublist(RevIds0, Num);
+ _ ->
+ []
+ end,
+ if
+ RevIds == [] ->
+ [];
true ->
- RevIds0;
- Num when is_integer(Num), Num > 0 ->
- lists:sublist(RevIds0, Num);
- _ ->
- []
- end,
- if RevIds == [] -> []; true ->
- [{<<"_revisions">>, {[{<<"start">>, Start},
- {<<"ids">>, [revid_to_str(R) ||R <- RevIds]}]}}]
+ [
+ {<<"_revisions">>,
+ {[
+ {<<"start">>, Start},
+ {<<"ids">>, [revid_to_str(R) || R <- RevIds]}
+ ]}}
+ ]
end.
-
revid_to_str(RevId) when size(RevId) =:= 16 ->
?l2b(couch_util:to_hex(RevId));
revid_to_str(RevId) ->
RevId.
rev_to_str({Pos, RevId}) ->
- ?l2b([integer_to_list(Pos),"-",revid_to_str(RevId)]).
-
+ ?l2b([integer_to_list(Pos), "-", revid_to_str(RevId)]).
revs_to_strs([]) ->
[];
-revs_to_strs([{Pos, RevId}| Rest]) ->
+revs_to_strs([{Pos, RevId} | Rest]) ->
[rev_to_str({Pos, RevId}) | revs_to_strs(Rest)].
to_json_meta(Meta) ->
lists:flatmap(
- fun({revs_info, Start, RevsInfo}) ->
- {JsonRevsInfo, _Pos} = lists:mapfoldl(
- fun({RevId, Status}, PosAcc) ->
- JsonObj = {[{<<"rev">>, rev_to_str({PosAcc, RevId})},
- {<<"status">>, ?l2b(atom_to_list(Status))}]},
- {JsonObj, PosAcc - 1}
- end, Start, RevsInfo),
- [{<<"_revs_info">>, JsonRevsInfo}];
- ({local_seq, Seq}) ->
- [{<<"_local_seq">>, Seq}];
- ({conflicts, Conflicts}) ->
- [{<<"_conflicts">>, revs_to_strs(Conflicts)}];
- ({deleted_conflicts, DConflicts}) ->
- [{<<"_deleted_conflicts">>, revs_to_strs(DConflicts)}];
- (_) ->
- []
- end, Meta).
+ fun
+ ({revs_info, Start, RevsInfo}) ->
+ {JsonRevsInfo, _Pos} = lists:mapfoldl(
+ fun({RevId, Status}, PosAcc) ->
+ JsonObj =
+ {[
+ {<<"rev">>, rev_to_str({PosAcc, RevId})},
+ {<<"status">>, ?l2b(atom_to_list(Status))}
+ ]},
+ {JsonObj, PosAcc - 1}
+ end,
+ Start,
+ RevsInfo
+ ),
+ [{<<"_revs_info">>, JsonRevsInfo}];
+ ({local_seq, Seq}) ->
+ [{<<"_local_seq">>, Seq}];
+ ({conflicts, Conflicts}) ->
+ [{<<"_conflicts">>, revs_to_strs(Conflicts)}];
+ ({deleted_conflicts, DConflicts}) ->
+ [{<<"_deleted_conflicts">>, revs_to_strs(DConflicts)}];
+ (_) ->
+ []
+ end,
+ Meta
+ ).
to_json_attachments(Attachments, Options) ->
to_json_attachments(
@@ -116,14 +131,23 @@ to_json_attachments(Atts, OutputData, Follows, ShowEnc) ->
to_json_obj(Doc, Options) ->
doc_to_json_obj(Doc, Options).
-doc_to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds},
- meta=Meta}=Doc,Options)->
- {[{<<"_id">>, Id}]
- ++ to_json_rev(Start, RevIds)
- ++ to_json_body(Del, Body)
- ++ to_json_revisions(Options, Start, RevIds)
- ++ to_json_meta(Meta)
- ++ to_json_attachments(Doc#doc.atts, Options)
+doc_to_json_obj(
+ #doc{
+ id = Id,
+ deleted = Del,
+ body = Body,
+ revs = {Start, RevIds},
+ meta = Meta
+ } = Doc,
+ Options
+) ->
+ {
+ [{<<"_id">>, Id}] ++
+ to_json_rev(Start, RevIds) ++
+ to_json_body(Del, Body) ++
+ to_json_revisions(Options, Start, RevIds) ++
+ to_json_meta(Meta) ++
+ to_json_attachments(Doc#doc.atts, Options)
}.
from_json_obj_validate(EJson) ->
@@ -134,48 +158,54 @@ from_json_obj_validate(EJson, DbName) ->
Doc = from_json_obj(EJson, DbName),
case couch_ejson_size:encoded_size(Doc#doc.body) =< MaxSize of
true ->
- validate_attachment_sizes(Doc#doc.atts),
- Doc;
+ validate_attachment_sizes(Doc#doc.atts),
+ Doc;
false ->
throw({request_entity_too_large, Doc#doc.id})
end.
-
validate_attachment_sizes([]) ->
ok;
validate_attachment_sizes(Atts) ->
MaxAttSize = couch_att:max_attachment_size(),
- lists:foreach(fun(Att) ->
- AttName = couch_att:fetch(name, Att),
- AttSize = couch_att:fetch(att_len, Att),
- couch_att:validate_attachment_size(AttName, AttSize, MaxAttSize)
- end, Atts).
-
+ lists:foreach(
+ fun(Att) ->
+ AttName = couch_att:fetch(name, Att),
+ AttSize = couch_att:fetch(att_len, Att),
+ couch_att:validate_attachment_size(AttName, AttSize, MaxAttSize)
+ end,
+ Atts
+ ).
from_json_obj({Props}) ->
from_json_obj({Props}, undefined).
from_json_obj({Props}, DbName) ->
- transfer_fields(Props, #doc{body=[]}, DbName);
+ transfer_fields(Props, #doc{body = []}, DbName);
from_json_obj(_Other, _) ->
throw({bad_request, "Document must be a JSON object"}).
parse_revid(RevId) when size(RevId) =:= 32 ->
RevInt = erlang:list_to_integer(?b2l(RevId), 16),
- <<RevInt:128>>;
+ <<RevInt:128>>;
parse_revid(RevId) when length(RevId) =:= 32 ->
RevInt = erlang:list_to_integer(RevId, 16),
- <<RevInt:128>>;
+ <<RevInt:128>>;
parse_revid(RevId) when is_binary(RevId) ->
RevId;
parse_revid(RevId) when is_list(RevId) ->
?l2b(RevId).
-
parse_rev(Rev) when is_binary(Rev) ->
parse_rev(?b2l(Rev));
parse_rev(Rev) when is_list(Rev) ->
- SplitRev = lists:splitwith(fun($-) -> false; (_) -> true end, Rev),
+ SplitRev = lists:splitwith(
+ fun
+ ($-) -> false;
+ (_) -> true
+ end,
+ Rev
+ ),
case SplitRev of
{Pos, [$- | RevId]} ->
try
@@ -184,7 +214,8 @@ parse_rev(Rev) when is_list(Rev) ->
catch
error:badarg -> throw({bad_request, <<"Invalid rev format">>})
end;
- _Else -> throw({bad_request, <<"Invalid rev format">>})
+ _Else ->
+ throw({bad_request, <<"Invalid rev format">>})
end;
parse_rev(_BadRev) ->
throw({bad_request, <<"Invalid rev format">>}).
@@ -196,53 +227,52 @@ parse_revs([Rev | Rest]) ->
parse_revs(_) ->
throw({bad_request, "Invalid list of revisions"}).
-
-transfer_fields([], #doc{body=Fields}=Doc, _) ->
+transfer_fields([], #doc{body = Fields} = Doc, _) ->
% convert fields back to json object
- Doc#doc{body={lists:reverse(Fields)}};
-
+ Doc#doc{body = {lists:reverse(Fields)}};
transfer_fields([{<<"_id">>, Id} | Rest], Doc, DbName) ->
fabric2_db:validate_docid(Id),
- transfer_fields(Rest, Doc#doc{id=Id}, DbName);
-
-transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc, DbName) ->
+ transfer_fields(Rest, Doc#doc{id = Id}, DbName);
+transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs = {0, []}} = Doc, DbName) ->
{Pos, RevId} = parse_rev(Rev),
- transfer_fields(Rest,
- Doc#doc{revs={Pos, [RevId]}}, DbName);
-
+ transfer_fields(
+ Rest,
+ Doc#doc{revs = {Pos, [RevId]}},
+ DbName
+ );
transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc, DbName) ->
% we already got the rev from the _revisions
transfer_fields(Rest, Doc, DbName);
-
transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc, DbName) ->
Atts = [couch_att:from_json(Name, Props) || {Name, {Props}} <- JsonBins],
- transfer_fields(Rest, Doc#doc{atts=Atts}, DbName);
-
+ transfer_fields(Rest, Doc#doc{atts = Atts}, DbName);
transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc, DbName) ->
RevIds = couch_util:get_value(<<"ids">>, Props),
Start = couch_util:get_value(<<"start">>, Props),
- if not is_integer(Start) ->
- throw({doc_validation, "_revisions.start isn't an integer."});
- not is_list(RevIds) ->
- throw({doc_validation, "_revisions.ids isn't a array."});
- true ->
- ok
+ if
+ not is_integer(Start) ->
+ throw({doc_validation, "_revisions.start isn't an integer."});
+ not is_list(RevIds) ->
+ throw({doc_validation, "_revisions.ids isn't a array."});
+ true ->
+ ok
end,
- RevIds2 = lists:map(fun(RevId) ->
- try
- parse_revid(RevId)
- catch
- error:function_clause ->
- throw({doc_validation, "RevId isn't a string"});
- error:badarg ->
- throw({doc_validation, "RevId isn't a valid hexadecimal"})
- end
- end, RevIds),
- transfer_fields(Rest, Doc#doc{revs={Start, RevIds2}}, DbName);
-
+ RevIds2 = lists:map(
+ fun(RevId) ->
+ try
+ parse_revid(RevId)
+ catch
+ error:function_clause ->
+ throw({doc_validation, "RevId isn't a string"});
+ error:badarg ->
+ throw({doc_validation, "RevId isn't a valid hexadecimal"})
+ end
+ end,
+ RevIds
+ ),
+ transfer_fields(Rest, Doc#doc{revs = {Start, RevIds2}}, DbName);
transfer_fields([{<<"_deleted">>, B} | Rest], Doc, DbName) when is_boolean(B) ->
- transfer_fields(Rest, Doc#doc{deleted=B}, DbName);
-
+ transfer_fields(Rest, Doc#doc{deleted = B}, DbName);
% ignored fields
transfer_fields([{<<"_revs_info">>, _} | Rest], Doc, DbName) ->
transfer_fields(Rest, Doc, DbName);
@@ -252,36 +282,49 @@ transfer_fields([{<<"_conflicts">>, _} | Rest], Doc, DbName) ->
transfer_fields(Rest, Doc, DbName);
transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc, DbName) ->
transfer_fields(Rest, Doc, DbName);
-
% special field for per doc access control, for future compatibility
-transfer_fields([{<<"_access">>, _} = Field | Rest],
- #doc{body=Fields} = Doc, DbName) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
-
+transfer_fields(
+ [{<<"_access">>, _} = Field | Rest],
+ #doc{body = Fields} = Doc,
+ DbName
+) ->
+ transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
% special fields for replication documents
-transfer_fields([{<<"_replication_state">>, _} = Field | Rest],
- #doc{body=Fields} = Doc, DbName) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
-transfer_fields([{<<"_replication_state_time">>, _} = Field | Rest],
- #doc{body=Fields} = Doc, DbName) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
-transfer_fields([{<<"_replication_state_reason">>, _} = Field | Rest],
- #doc{body=Fields} = Doc, DbName) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
-transfer_fields([{<<"_replication_id">>, _} = Field | Rest],
- #doc{body=Fields} = Doc, DbName) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
-transfer_fields([{<<"_replication_stats">>, _} = Field | Rest],
- #doc{body=Fields} = Doc, DbName) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
-
+transfer_fields(
+ [{<<"_replication_state">>, _} = Field | Rest],
+ #doc{body = Fields} = Doc,
+ DbName
+) ->
+ transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
+transfer_fields(
+ [{<<"_replication_state_time">>, _} = Field | Rest],
+ #doc{body = Fields} = Doc,
+ DbName
+) ->
+ transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
+transfer_fields(
+ [{<<"_replication_state_reason">>, _} = Field | Rest],
+ #doc{body = Fields} = Doc,
+ DbName
+) ->
+ transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
+transfer_fields(
+ [{<<"_replication_id">>, _} = Field | Rest],
+ #doc{body = Fields} = Doc,
+ DbName
+) ->
+ transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
+transfer_fields(
+ [{<<"_replication_stats">>, _} = Field | Rest],
+ #doc{body = Fields} = Doc,
+ DbName
+) ->
+ transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
% unknown special field
-transfer_fields([{<<"_",Name/binary>>, _} | _], _, _) ->
- throw({doc_validation,
- ?l2b(io_lib:format("Bad special document member: _~s", [Name]))});
-
-transfer_fields([Field | Rest], #doc{body=Fields}=Doc, DbName) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName).
+transfer_fields([{<<"_", Name/binary>>, _} | _], _, _) ->
+ throw({doc_validation, ?l2b(io_lib:format("Bad special document member: _~s", [Name]))});
+transfer_fields([Field | Rest], #doc{body = Fields} = Doc, DbName) ->
+ transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName).
to_doc_info(FullDocInfo) ->
{DocInfo, _Path} = to_doc_info_path(FullDocInfo),
@@ -293,10 +336,11 @@ max_seq(Tree, UpdateSeq) ->
{_Deleted, _DiskPos, OldTreeSeq} ->
% Older versions didn't track data sizes.
erlang:max(MaxOldSeq, OldTreeSeq);
- {_Deleted, _DiskPos, OldTreeSeq, _Size} -> % necessary clause?
+ % necessary clause?
+ {_Deleted, _DiskPos, OldTreeSeq, _Size} ->
% Older versions didn't store #leaf records.
erlang:max(MaxOldSeq, OldTreeSeq);
- #leaf{seq=OldTreeSeq} ->
+ #leaf{seq = OldTreeSeq} ->
erlang:max(MaxOldSeq, OldTreeSeq);
_ ->
MaxOldSeq
@@ -304,20 +348,25 @@ max_seq(Tree, UpdateSeq) ->
end,
couch_key_tree:fold(FoldFun, UpdateSeq, Tree).
-to_doc_info_path(#full_doc_info{id=Id,rev_tree=Tree,update_seq=FDISeq}) ->
+to_doc_info_path(#full_doc_info{id = Id, rev_tree = Tree, update_seq = FDISeq}) ->
RevInfosAndPath = [
- {rev_info(Node), Path} || {_Leaf, Path} = Node <-
+ {rev_info(Node), Path}
+ || {_Leaf, Path} = Node <-
couch_key_tree:get_all_leafs(Tree)
],
SortedRevInfosAndPath = lists:sort(
- fun({#rev_info{deleted=DeletedA,rev=RevA}, _PathA},
- {#rev_info{deleted=DeletedB,rev=RevB}, _PathB}) ->
+ fun(
+ {#rev_info{deleted = DeletedA, rev = RevA}, _PathA},
+ {#rev_info{deleted = DeletedB, rev = RevB}, _PathB}
+ ) ->
% sort descending by {not deleted, rev}
{not DeletedA, RevA} > {not DeletedB, RevB}
- end, RevInfosAndPath),
- [{_RevInfo, WinPath}|_] = SortedRevInfosAndPath,
+ end,
+ RevInfosAndPath
+ ),
+ [{_RevInfo, WinPath} | _] = SortedRevInfosAndPath,
RevInfos = [RevInfo || {RevInfo, _Path} <- SortedRevInfosAndPath],
- {#doc_info{id=Id, high_seq=max_seq(Tree, FDISeq), revs=RevInfos}, WinPath}.
+ {#doc_info{id = Id, high_seq = max_seq(Tree, FDISeq), revs = RevInfos}, WinPath}.
rev_info({#leaf{} = Leaf, {Pos, [RevId | _]}}) ->
#rev_info{
@@ -345,53 +394,56 @@ rev_info({#{} = RevInfo, {Pos, [RevId | _]}}) ->
rev = {Pos, RevId}
}.
-is_deleted(#full_doc_info{rev_tree=Tree}) ->
+is_deleted(#full_doc_info{rev_tree = Tree}) ->
is_deleted(Tree);
is_deleted(Tree) ->
Leafs = couch_key_tree:get_all_leafs(Tree),
try
- lists:foldl(fun
- ({#leaf{deleted=false},_}, _) ->
- throw(not_deleted);
- ({#doc{deleted=false},_}, _) ->
- throw(not_deleted);
- (_, Acc) ->
- Acc
- end, nil, Leafs),
+ lists:foldl(
+ fun
+ ({#leaf{deleted = false}, _}, _) ->
+ throw(not_deleted);
+ ({#doc{deleted = false}, _}, _) ->
+ throw(not_deleted);
+ (_, Acc) ->
+ Acc
+ end,
+ nil,
+ Leafs
+ ),
true
- catch throw:not_deleted ->
- false
+ catch
+ throw:not_deleted ->
+ false
end.
-
get_validate_doc_fun({Props}) ->
get_validate_doc_fun(couch_doc:from_json_obj({Props}));
-get_validate_doc_fun(#doc{body={Props}}=DDoc) ->
+get_validate_doc_fun(#doc{body = {Props}} = DDoc) ->
case couch_util:get_value(<<"validate_doc_update">>, Props) of
- undefined ->
- nil;
- _Else ->
- fun(EditDoc, DiskDoc, Ctx, SecObj) ->
- couch_query_servers:validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj)
- end
+ undefined ->
+ nil;
+ _Else ->
+ fun(EditDoc, DiskDoc, Ctx, SecObj) ->
+ couch_query_servers:validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj)
+ end
end.
-
-has_stubs(#doc{atts=Atts}) ->
+has_stubs(#doc{atts = Atts}) ->
lists:any(fun couch_att:is_stub/1, Atts);
has_stubs(Atts) ->
lists:any(fun couch_att:is_stub/1, Atts).
merge_stubs(#doc{id = Id}, nil) ->
throw({missing_stub, <<"Previous revision missing for document ", Id/binary>>});
-merge_stubs(#doc{id=Id,atts=MemBins}=StubsDoc, #doc{atts=DiskBins}) ->
+merge_stubs(#doc{id = Id, atts = MemBins} = StubsDoc, #doc{atts = DiskBins}) ->
case couch_att:merge_stubs(MemBins, DiskBins) of
{ok, MergedBins} ->
StubsDoc#doc{atts = MergedBins};
{missing, Name} ->
- throw({missing_stub,
- <<"Invalid attachment stub in ", Id/binary, " for ", Name/binary>>
- })
+ throw(
+ {missing_stub, <<"Invalid attachment stub in ", Id/binary, " for ", Name/binary>>}
+ )
end.
len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, SendEncodedAtts) ->
@@ -399,28 +451,38 @@ len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, SendEncodedAtts) ->
AttsDecoded = decode_attributes(AttsToInclude, SendEncodedAtts),
couch_httpd_multipart:length_multipart_stream(Boundary, JsonBytes, AttsDecoded).
-
-doc_to_multi_part_stream(Boundary, JsonBytes, Atts, WriteFun,
- SendEncodedAtts) ->
- AttsToInclude = lists:filter(fun(Att)-> couch_att:fetch(data, Att) /= stub end, Atts),
+doc_to_multi_part_stream(
+ Boundary,
+ JsonBytes,
+ Atts,
+ WriteFun,
+ SendEncodedAtts
+) ->
+ AttsToInclude = lists:filter(fun(Att) -> couch_att:fetch(data, Att) /= stub end, Atts),
AttsDecoded = decode_attributes(AttsToInclude, SendEncodedAtts),
- AttFun = case SendEncodedAtts of
- false -> fun couch_att:foldl_decode/3;
- true -> fun couch_att:foldl/3
- end,
+ AttFun =
+ case SendEncodedAtts of
+ false -> fun couch_att:foldl_decode/3;
+ true -> fun couch_att:foldl/3
+ end,
couch_httpd_multipart:encode_multipart_stream(
- Boundary, JsonBytes, AttsDecoded, WriteFun, AttFun).
+ Boundary, JsonBytes, AttsDecoded, WriteFun, AttFun
+ ).
decode_attributes(Atts, SendEncodedAtts) ->
- lists:map(fun(Att) ->
- [Name, AttLen, DiskLen, Type, Encoding] =
- couch_att:fetch([name, att_len, disk_len, type, encoding], Att),
- Len = case SendEncodedAtts of
- true -> AttLen;
- false -> DiskLen
- end,
- {Att, Name, Len, Type, Encoding}
- end, Atts).
+ lists:map(
+ fun(Att) ->
+ [Name, AttLen, DiskLen, Type, Encoding] =
+ couch_att:fetch([name, att_len, disk_len, type, encoding], Att),
+ Len =
+ case SendEncodedAtts of
+ true -> AttLen;
+ false -> DiskLen
+ end,
+ {Att, Name, Len, Type, Encoding}
+ end,
+ Atts
+ ).
doc_from_multi_part_stream(ContentType, DataFun) ->
doc_from_multi_part_stream(ContentType, DataFun, make_ref()).
@@ -430,25 +492,32 @@ doc_from_multi_part_stream(ContentType, DataFun, Ref) ->
doc_from_multi_part_stream(ContentType, DataFun, Ref, ValidateDocLimits) ->
case couch_httpd_multipart:decode_multipart_stream(ContentType, DataFun, Ref) of
- {{started_open_doc_revs, NewRef}, Parser, _ParserRef} ->
- restart_open_doc_revs(Parser, Ref, NewRef);
- {{doc_bytes, Ref, DocBytes}, Parser, ParserRef} ->
- Doc = case ValidateDocLimits of
- true ->
- from_json_obj_validate(?JSON_DECODE(DocBytes));
- false ->
- from_json_obj(?JSON_DECODE(DocBytes))
- end,
- erlang:put(mochiweb_request_recv, true),
- % we'll send the Parser process ID to the remote nodes so they can
- % retrieve their own copies of the attachment data
- WithParser = fun(follows) -> {follows, Parser, Ref}; (D) -> D end,
- Atts = [couch_att:transform(data, WithParser, A) || A <- Doc#doc.atts],
- WaitFun = fun() ->
- receive {'DOWN', ParserRef, _, _, _} -> ok end
- end,
- {ok, Doc#doc{atts=Atts}, WaitFun, Parser};
- ok -> ok
+ {{started_open_doc_revs, NewRef}, Parser, _ParserRef} ->
+ restart_open_doc_revs(Parser, Ref, NewRef);
+ {{doc_bytes, Ref, DocBytes}, Parser, ParserRef} ->
+ Doc =
+ case ValidateDocLimits of
+ true ->
+ from_json_obj_validate(?JSON_DECODE(DocBytes));
+ false ->
+ from_json_obj(?JSON_DECODE(DocBytes))
+ end,
+ erlang:put(mochiweb_request_recv, true),
+ % we'll send the Parser process ID to the remote nodes so they can
+ % retrieve their own copies of the attachment data
+ WithParser = fun
+ (follows) -> {follows, Parser, Ref};
+ (D) -> D
+ end,
+ Atts = [couch_att:transform(data, WithParser, A) || A <- Doc#doc.atts],
+ WaitFun = fun() ->
+ receive
+ {'DOWN', ParserRef, _, _, _} -> ok
+ end
+ end,
+ {ok, Doc#doc{atts = Atts}, WaitFun, Parser};
+ ok ->
+ ok
end.
restart_open_doc_revs(Parser, Ref, NewRef) ->
@@ -457,7 +526,6 @@ restart_open_doc_revs(Parser, Ref, NewRef) ->
flush_parser_messages(Ref),
erlang:error({restart_open_doc_revs, NewRef}).
-
flush_parser_messages(Ref) ->
receive
{headers, Ref, _} ->
diff --git a/src/couch/src/couch_drv.erl b/src/couch/src/couch_drv.erl
index 002facd48..20bb0e80d 100644
--- a/src/couch/src/couch_drv.erl
+++ b/src/couch/src/couch_drv.erl
@@ -13,8 +13,14 @@
-module(couch_drv).
-behaviour(gen_server).
-vsn(1).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
-export([start_link/0]).
@@ -27,15 +33,15 @@ start_link() ->
init([]) ->
LibDir = util_driver_dir(),
case erl_ddll:load(LibDir, "couch_icu_driver") of
- ok ->
- {ok, nil};
- {error, already_loaded} ->
- ?LOG_INFO(#{what => reload_couch_icu_driver}),
- couch_log:info("~p reloading couch_icu_driver", [?MODULE]),
- ok = erl_ddll:reload(LibDir, "couch_icu_driver"),
- {ok, nil};
- {error, Error} ->
- {stop, erl_ddll:format_error(Error)}
+ ok ->
+ {ok, nil};
+ {error, already_loaded} ->
+ ?LOG_INFO(#{what => reload_couch_icu_driver}),
+ couch_log:info("~p reloading couch_icu_driver", [?MODULE]),
+ ok = erl_ddll:reload(LibDir, "couch_icu_driver"),
+ {ok, nil};
+ {error, Error} ->
+ {stop, erl_ddll:format_error(Error)}
end.
handle_call(_Request, _From, State) ->
@@ -51,15 +57,13 @@ terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
-
{ok, State}.
-
% private API
util_driver_dir() ->
case config:get("couchdb", "util_driver_dir", undefined) of
- undefined ->
- couch_util:priv_dir();
- LibDir0 ->
- LibDir0
+ undefined ->
+ couch_util:priv_dir();
+ LibDir0 ->
+ LibDir0
end.
diff --git a/src/couch/src/couch_ejson_compare.erl b/src/couch/src/couch_ejson_compare.erl
index ca36c8656..e5605c324 100644
--- a/src/couch/src/couch_ejson_compare.erl
+++ b/src/couch/src/couch_ejson_compare.erl
@@ -16,7 +16,6 @@
-on_load(init/0).
-
init() ->
NumScheds = erlang:system_info(schedulers),
Dir = code:priv_dir(couch),
@@ -25,53 +24,45 @@ init() ->
% partitioned row comparison
less({p, PA, A}, {p, PB, B}) ->
less([PA, A], [PB, B]);
-
less(A, B) ->
try
less_nif(A, B)
catch
- error:badarg ->
- % Maybe the EJSON structure is too deep, fallback to Erlang land.
- less_erl(A, B)
+ error:badarg ->
+ % Maybe the EJSON structure is too deep, fallback to Erlang land.
+ less_erl(A, B)
end.
less_json_ids({JsonA, IdA}, {JsonB, IdB}) ->
case less(JsonA, JsonB) of
- 0 ->
- IdA < IdB;
- Result ->
- Result < 0
+ 0 ->
+ IdA < IdB;
+ Result ->
+ Result < 0
end.
-less_json(A,B) ->
+less_json(A, B) ->
less(A, B) < 0.
-
less_nif(A, B) ->
less_erl(A, B).
-
-less_erl(A,A) -> 0;
-
-less_erl(A,B) when is_atom(A), is_atom(B) -> atom_sort(A) - atom_sort(B);
-less_erl(A,_) when is_atom(A) -> -1;
-less_erl(_,B) when is_atom(B) -> 1;
-
-less_erl(A,B) when is_number(A), is_number(B) -> A - B;
-less_erl(A,_) when is_number(A) -> -1;
-less_erl(_,B) when is_number(B) -> 1;
-
-less_erl(A,B) when is_binary(A), is_binary(B) -> couch_util:collate(A,B);
-less_erl(A,_) when is_binary(A) -> -1;
-less_erl(_,B) when is_binary(B) -> 1;
-
-less_erl(A,B) when is_list(A), is_list(B) -> less_list(A,B);
-less_erl(A,_) when is_list(A) -> -1;
-less_erl(_,B) when is_list(B) -> 1;
-
-less_erl({A},{B}) when is_list(A), is_list(B) -> less_props(A,B);
-less_erl({A},_) when is_list(A) -> -1;
-less_erl(_,{B}) when is_list(B) -> 1.
+less_erl(A, A) -> 0;
+less_erl(A, B) when is_atom(A), is_atom(B) -> atom_sort(A) - atom_sort(B);
+less_erl(A, _) when is_atom(A) -> -1;
+less_erl(_, B) when is_atom(B) -> 1;
+less_erl(A, B) when is_number(A), is_number(B) -> A - B;
+less_erl(A, _) when is_number(A) -> -1;
+less_erl(_, B) when is_number(B) -> 1;
+less_erl(A, B) when is_binary(A), is_binary(B) -> couch_util:collate(A, B);
+less_erl(A, _) when is_binary(A) -> -1;
+less_erl(_, B) when is_binary(B) -> 1;
+less_erl(A, B) when is_list(A), is_list(B) -> less_list(A, B);
+less_erl(A, _) when is_list(A) -> -1;
+less_erl(_, B) when is_list(B) -> 1;
+less_erl({A}, {B}) when is_list(A), is_list(B) -> less_props(A, B);
+less_erl({A}, _) when is_list(A) -> -1;
+less_erl(_, {B}) when is_list(B) -> 1.
atom_sort(null) -> 1;
atom_sort(false) -> 2;
@@ -79,33 +70,33 @@ atom_sort(true) -> 3.
less_props([], []) ->
0;
-less_props([], [_|_]) ->
+less_props([], [_ | _]) ->
-1;
less_props(_, []) ->
1;
-less_props([{AKey, AValue}|RestA], [{BKey, BValue}|RestB]) ->
+less_props([{AKey, AValue} | RestA], [{BKey, BValue} | RestB]) ->
case couch_util:collate(AKey, BKey) of
- 0 ->
- case less_erl(AValue, BValue) of
0 ->
- less_props(RestA, RestB);
+ case less_erl(AValue, BValue) of
+ 0 ->
+ less_props(RestA, RestB);
+ Result ->
+ Result
+ end;
Result ->
Result
- end;
- Result ->
- Result
end.
less_list([], []) ->
0;
-less_list([], [_|_]) ->
+less_list([], [_ | _]) ->
-1;
less_list(_, []) ->
1;
-less_list([A|RestA], [B|RestB]) ->
- case less_erl(A,B) of
- 0 ->
- less_list(RestA, RestB);
- Result ->
- Result
+less_list([A | RestA], [B | RestB]) ->
+ case less_erl(A, B) of
+ 0 ->
+ less_list(RestA, RestB);
+ Result ->
+ Result
end.
diff --git a/src/couch/src/couch_ejson_size.erl b/src/couch/src/couch_ejson_size.erl
index f5505680f..54a7094ff 100644
--- a/src/couch/src/couch_ejson_size.erl
+++ b/src/couch/src/couch_ejson_size.erl
@@ -14,85 +14,78 @@
-export([encoded_size/1]).
-
%% Compound objects
encoded_size({[]}) ->
- 2; % opening { and closing }
-
+ % opening { and closing }
+ 2;
encoded_size({KVs}) ->
% Would add 2 because opening { and closing }, but then inside the LC
% would accumulate an extra , at the end so subtract 2 - 1
- 1 + lists:sum([encoded_size(K) + encoded_size(V) + 2 || {K,V} <- KVs]);
-
+ 1 + lists:sum([encoded_size(K) + encoded_size(V) + 2 || {K, V} <- KVs]);
encoded_size([]) ->
- 2; % opening [ and closing ]
-
+ % opening [ and closing ]
+ 2;
encoded_size(List) when is_list(List) ->
% 2 is for [ and ] but inside LC would accumulate an extra , so subtract
% 2 - 1
1 + lists:sum([encoded_size(V) + 1 || V <- List]);
-
%% Floats.
encoded_size(0.0) ->
3;
-
encoded_size(1.0) ->
3;
-
encoded_size(Float) when is_float(Float), Float < 0.0 ->
encoded_size(-Float) + 1;
-
encoded_size(Float) when is_float(Float), Float < 1.0 ->
if
- Float =< 1.0e-300 -> 3; % close enough to 0.0
- Float =< 1.0e-100 -> 6; % Xe-YYY
- Float =< 1.0e-10 -> 5; % Xe-YY
- Float =< 0.01 -> 4; % Xe-Y, 0.0X
- true -> 3 % 0.X
+ % close enough to 0.0
+ Float =< 1.0e-300 -> 3;
+ % Xe-YYY
+ Float =< 1.0e-100 -> 6;
+ % Xe-YY
+ Float =< 1.0e-10 -> 5;
+ % Xe-Y, 0.0X
+ Float =< 0.01 -> 4;
+ % 0.X
+ true -> 3
end;
-
encoded_size(Float) when is_float(Float) ->
if
- Float >= 1.0e100 -> 5; % XeYYY
- Float >= 1.0e10 -> 4; % XeYY
- true -> 3 % XeY, X.Y
+ % XeYYY
+ Float >= 1.0e100 -> 5;
+ % XeYY
+ Float >= 1.0e10 -> 4;
+ % XeY, X.Y
+ true -> 3
end;
-
%% Integers
encoded_size(0) ->
1;
-
encoded_size(Integer) when is_integer(Integer), Integer < 0 ->
encoded_size(-Integer) + 1;
-
encoded_size(Integer) when is_integer(Integer) ->
if
- Integer < 10 -> 1;
- Integer < 100 -> 2;
- Integer < 1000 -> 3;
+ Integer < 10 -> 1;
+ Integer < 100 -> 2;
+ Integer < 1000 -> 3;
Integer < 10000 -> 4;
- true -> trunc(math:log10(Integer)) + 1
+ true -> trunc(math:log10(Integer)) + 1
end;
-
%% Strings
encoded_size(Binary) when is_binary(Binary) ->
2 + byte_size(Binary);
-
%% Special terminal symbols as atoms
encoded_size(null) ->
4;
-
encoded_size(true) ->
4;
-
encoded_size(false) ->
5;
-
%% Other atoms
encoded_size(Atom) when is_atom(Atom) ->
diff --git a/src/couch/src/couch_flags.erl b/src/couch/src/couch_flags.erl
index 5bd133e29..4d20c7a90 100644
--- a/src/couch/src/couch_flags.erl
+++ b/src/couch/src/couch_flags.erl
@@ -62,11 +62,11 @@
-include_lib("couch/include/couch_db.hrl").
--type subject()
- :: map()
- | #httpd{}
- | string()
- | binary().
+-type subject() ::
+ map()
+ | #httpd{}
+ | string()
+ | binary().
-define(SERVICE_ID, feature_flags).
@@ -75,8 +75,10 @@
enabled(Subject) ->
Key = maybe_handle(subject_key, [Subject], fun subject_key/1),
Handle = couch_epi:get_handle({flags, config}),
- lists:usort(enabled(Handle, {<<"/", Key/binary>>})
- ++ enabled(Handle, {Key})).
+ lists:usort(
+ enabled(Handle, {<<"/", Key/binary>>}) ++
+ enabled(Handle, {Key})
+ ).
-spec is_enabled(FlagId :: atom(), subject()) -> boolean().
@@ -102,9 +104,9 @@ enabled(Handle, Key) ->
subject_key(#{} = Db) ->
subject_key(fabric2_db:name(Db));
-subject_key(#httpd{path_parts=[Name | _Rest]}) ->
+subject_key(#httpd{path_parts = [Name | _Rest]}) ->
subject_key(Name);
-subject_key(#httpd{path_parts=[]}) ->
+subject_key(#httpd{path_parts = []}) ->
<<>>;
subject_key(Name) when is_list(Name) ->
subject_key(list_to_binary(Name));
@@ -112,9 +114,10 @@ subject_key(Name) when is_binary(Name) ->
Name.
-spec maybe_handle(
- Function :: atom(),
- Args :: [term()],
- Default :: fun((Args :: [term()]) -> term())) ->
+ Function :: atom(),
+ Args :: [term()],
+ Default :: fun((Args :: [term()]) -> term())
+) ->
term().
maybe_handle(Func, Args, Default) ->
diff --git a/src/couch/src/couch_flags_config.erl b/src/couch/src/couch_flags_config.erl
index 883fe38e8..754a78962 100644
--- a/src/couch/src/couch_flags_config.erl
+++ b/src/couch/src/couch_flags_config.erl
@@ -30,23 +30,26 @@
-define(DATA_INTERVAL, 1000).
-define(MAX_FLAG_NAME_LENGTH, 256).
--type pattern()
- :: binary(). %% non empty binary which optionally can end with *
+-type pattern() ::
+ %% non empty binary which optionally can end with *
+ binary().
-type flag_id() :: atom().
-type flags() :: list(flag_id()).
--type parse_pattern()
- :: {
- binary(), %% pattern without trainig * if it is present
- pattern(),
- IsWildCard :: boolean(), %% true if the pattern has training *
- PatternSize :: pos_integer()
- }.
+-type parse_pattern() ::
+ {
+ %% pattern without trainig * if it is present
+ binary(),
+ pattern(),
+ %% true if the pattern has training *
+ IsWildCard :: boolean(),
+ PatternSize :: pos_integer()
+ }.
--type rule()
- :: {
+-type rule() ::
+ {
parse_pattern(),
EnabledFlags :: flags(),
DisabledFlags :: flags()
@@ -77,7 +80,7 @@ data() ->
data(Config) ->
ByPattern = collect_rules(Config),
- lists:reverse([{{P}, {P, size(P), E -- D}} || {P, {_, E, D}} <- ByPattern]).
+ lists:reverse([{{P}, {P, size(P), E -- D}} || {P, {_, E, D}} <- ByPattern]).
-spec parse_rules([{Key :: string(), Value :: string()}]) -> [rule()].
@@ -98,7 +101,7 @@ parse_rule(Key, Value) when Value =:= "true" orelse Value =:= "false" ->
details => "key must be in the form of `[flags]||pattern`"
}),
false
- end;
+ end;
parse_rule(Key, Value) ->
?LOG_ERROR(#{
what => invalid_flag_setting,
@@ -113,13 +116,16 @@ parse_rule(Key, Value) ->
parse_flags([FlagsBin, PatternBin], Value) ->
case {parse_flags_term(FlagsBin), Value} of
{{error, Errors}, _} ->
- lists:foreach(fun(Error) ->
- ?LOG_ERROR(#{
- what => invalid_flag_setting,
- flags => FlagsBin,
- error => Error
- })
- end, Errors),
+ lists:foreach(
+ fun(Error) ->
+ ?LOG_ERROR(#{
+ what => invalid_flag_setting,
+ flags => FlagsBin,
+ error => Error
+ })
+ end,
+ Errors
+ ),
false;
{Flags, true} ->
{true, {parse_pattern(PatternBin), Flags, []}};
@@ -131,19 +137,21 @@ parse_flags([FlagsBin, PatternBin], Value) ->
[flag_id()] | {error, Failures :: [term()]}.
parse_flags_term(FlagsBin) ->
- {Flags, Errors} = lists:splitwith(fun erlang:is_atom/1,
- [parse_flag(F) || F <- split_by_comma(FlagsBin)]),
+ {Flags, Errors} = lists:splitwith(
+ fun erlang:is_atom/1,
+ [parse_flag(F) || F <- split_by_comma(FlagsBin)]
+ ),
case Errors of
- [] ->
- lists:usort(Flags);
- _ ->
- {error, Errors}
+ [] ->
+ lists:usort(Flags);
+ _ ->
+ {error, Errors}
end.
split_by_comma(Binary) ->
case binary:split(Binary, <<",">>, [global]) of
- [<<>>] -> [];
- Tokens -> Tokens
+ [<<>>] -> [];
+ Tokens -> Tokens
end.
parse_flag(FlagName) when size(FlagName) > ?MAX_FLAG_NAME_LENGTH ->
@@ -151,7 +159,7 @@ parse_flag(FlagName) when size(FlagName) > ?MAX_FLAG_NAME_LENGTH ->
parse_flag(FlagName) ->
FlagNameS = string:strip(binary_to_list(FlagName)),
try
- list_to_existing_atom(FlagNameS)
+ list_to_existing_atom(FlagNameS)
catch
_:_ -> {invalid_flag, FlagName}
end.
@@ -174,8 +182,10 @@ parse_pattern(PatternBin) ->
collect_rules(ConfigData) ->
ByKey = by_key(parse_rules(ConfigData)),
Keys = lists:sort(fun sort_by_length/2, gb_trees:keys(ByKey)),
- FuzzyKeys = lists:sort(fun sort_by_length/2,
- [K || {K, {{_, _, true, _}, _, _}} <- gb_trees:to_list(ByKey)]),
+ FuzzyKeys = lists:sort(
+ fun sort_by_length/2,
+ [K || {K, {{_, _, true, _}, _, _}} <- gb_trees:to_list(ByKey)]
+ ),
Rules = collect_rules(lists:reverse(Keys), FuzzyKeys, ByKey),
gb_trees:to_list(Rules).
@@ -187,17 +197,22 @@ sort_by_length(A, B) ->
-spec by_key(Items :: [rule()]) -> Dictionary :: gb_trees:tree().
by_key(Items) ->
- lists:foldl(fun({{_, K, _, _}, _, _} = Item, Acc) ->
- update_element(Acc, K, Item, fun(Value) ->
- update_flags(Value, Item)
- end)
- end, gb_trees:empty(), Items).
+ lists:foldl(
+ fun({{_, K, _, _}, _, _} = Item, Acc) ->
+ update_element(Acc, K, Item, fun(Value) ->
+ update_flags(Value, Item)
+ end)
+ end,
+ gb_trees:empty(),
+ Items
+ ).
-spec update_element(
- Tree :: gb_trees:tree(),
- Key :: pattern(),
- Default :: rule(),
- Fun :: fun((Item :: rule()) -> rule())) ->
+ Tree :: gb_trees:tree(),
+ Key :: pattern(),
+ Default :: rule(),
+ Fun :: fun((Item :: rule()) -> rule())
+) ->
gb_trees:tree().
update_element(Tree, Key, Default, Fun) ->
@@ -209,9 +224,10 @@ update_element(Tree, Key, Default, Fun) ->
end.
-spec collect_rules(
- Keys :: [pattern()],
- FuzzyKeys :: [pattern()],
- ByKey :: gb_trees:tree()) ->
+ Keys :: [pattern()],
+ FuzzyKeys :: [pattern()],
+ ByKey :: gb_trees:tree()
+) ->
gb_trees:tree().
collect_rules([], _, Acc) ->
@@ -220,9 +236,10 @@ collect_rules([Current | Rest], Items, Acc) ->
collect_rules(Rest, Items -- [Current], inherit_flags(Current, Items, Acc)).
-spec inherit_flags(
- Current :: pattern(),
- FuzzyKeys :: [pattern()],
- ByKey :: gb_trees:tree()) ->
+ Current :: pattern(),
+ FuzzyKeys :: [pattern()],
+ ByKey :: gb_trees:tree()
+) ->
gb_trees:tree().
inherit_flags(_Current, [], Acc) ->
@@ -236,9 +253,10 @@ inherit_flags(Current, [Item | Items], Acc) ->
end.
-spec match_prefix(
- AKey :: pattern(),
- BKey :: pattern(),
- ByKey :: gb_trees:tree()) ->
+ AKey :: pattern(),
+ BKey :: pattern(),
+ ByKey :: gb_trees:tree()
+) ->
boolean().
match_prefix(AKey, BKey, Acc) ->
@@ -259,9 +277,10 @@ match_prefix({{Key0, _, _, _}, _, _}, {{Key1, _, true, S1}, _, _}) ->
end.
-spec update_flags(
- AKey :: pattern(),
- BKey :: pattern(),
- ByKey :: gb_trees:tree()) ->
+ AKey :: pattern(),
+ BKey :: pattern(),
+ ByKey :: gb_trees:tree()
+) ->
gb_trees:tree().
update_flags(AKey, BKey, Acc) ->
@@ -285,6 +304,7 @@ update_flags({Pattern, E0, D0}, {_, E1, D1}) ->
get_config_section(Section) ->
try
config:get(Section)
- catch error:badarg ->
+ catch
+ error:badarg ->
[]
end.
diff --git a/src/couch/src/couch_hotp.erl b/src/couch/src/couch_hotp.erl
index 4ba81c9bf..cdb8291f3 100644
--- a/src/couch/src/couch_hotp.erl
+++ b/src/couch/src/couch_hotp.erl
@@ -14,15 +14,16 @@
-export([generate/4]).
-generate(Alg, Key, Counter, OutputLen)
- when is_atom(Alg), is_binary(Key), is_integer(Counter), is_integer(OutputLen) ->
+generate(Alg, Key, Counter, OutputLen) when
+ is_atom(Alg), is_binary(Key), is_integer(Counter), is_integer(OutputLen)
+->
Hmac = couch_util:hmac(Alg, Key, <<Counter:64>>),
Offset = binary:last(Hmac) band 16#f,
Code =
((binary:at(Hmac, Offset) band 16#7f) bsl 24) +
- ((binary:at(Hmac, Offset + 1) band 16#ff) bsl 16) +
- ((binary:at(Hmac, Offset + 2) band 16#ff) bsl 8) +
- ((binary:at(Hmac, Offset + 3) band 16#ff)),
+ ((binary:at(Hmac, Offset + 1) band 16#ff) bsl 16) +
+ ((binary:at(Hmac, Offset + 2) band 16#ff) bsl 8) +
+ (binary:at(Hmac, Offset + 3) band 16#ff),
case OutputLen of
6 -> Code rem 1000000;
7 -> Code rem 10000000;
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index 1698a9814..c2ee42e4f 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -16,21 +16,27 @@
-include_lib("couch/include/couch_db.hrl").
--export([header_value/2,header_value/3,qs_value/2,qs_value/3,qs/1,qs_json_value/3]).
--export([path/1,absolute_uri/2,body_length/1]).
--export([verify_is_server_admin/1,unquote/1,quote/1,recv/2,recv_chunked/4,error_info/1]).
+-export([header_value/2, header_value/3, qs_value/2, qs_value/3, qs/1, qs_json_value/3]).
+-export([path/1, absolute_uri/2, body_length/1]).
+-export([verify_is_server_admin/1, unquote/1, quote/1, recv/2, recv_chunked/4, error_info/1]).
-export([make_fun_spec_strs/1]).
-export([make_arity_1_fun/1, make_arity_2_fun/1, make_arity_3_fun/1]).
--export([parse_form/1,json_body/1,json_body_obj/1,body/1]).
+-export([parse_form/1, json_body/1, json_body_obj/1, body/1]).
-export([doc_etag/1, doc_etag/3, make_etag/1, etag_match/2, etag_respond/3, etag_maybe/2]).
--export([primary_header_value/2,partition/1,serve_file/3,serve_file/4, server_header/0]).
--export([start_chunked_response/3,send_chunk/2,log_request/2]).
+-export([primary_header_value/2, partition/1, serve_file/3, serve_file/4, server_header/0]).
+-export([start_chunked_response/3, send_chunk/2, log_request/2]).
-export([start_response_length/4, start_response/3, send/2]).
-export([start_json_response/2, start_json_response/3, end_json_response/1]).
--export([send_response/4,send_response_no_cors/4,send_method_not_allowed/2,
- send_error/2,send_error/4, send_redirect/2,send_chunked_error/2]).
--export([send_json/2,send_json/3,send_json/4,last_chunk/1,parse_multipart_request/3]).
--export([accepted_encodings/1,validate_referer/1,validate_ctype/2]).
+-export([
+ send_response/4,
+ send_response_no_cors/4,
+ send_method_not_allowed/2,
+ send_error/2, send_error/4,
+ send_redirect/2,
+ send_chunked_error/2
+]).
+-export([send_json/2, send_json/3, send_json/4, last_chunk/1, parse_multipart_request/3]).
+-export([accepted_encodings/1, validate_referer/1, validate_ctype/2]).
-export([http_1_0_keep_alive/2]).
-export([validate_host/1]).
-export([validate_bind_address/1]).
@@ -42,31 +48,30 @@
-define(MAX_DRAIN_TIME_MSEC, 1000).
-define(DEFAULT_MAX_HTTP_REQUEST_SIZE, 4294967296).
-
% SpecStr is a string like "{my_module, my_fun}"
% or "{my_module, my_fun, <<"my_arg">>}"
make_arity_1_fun(SpecStr) ->
case couch_util:parse_term(SpecStr) of
- {ok, {Mod, Fun, SpecArg}} ->
- fun(Arg) -> Mod:Fun(Arg, SpecArg) end;
- {ok, {Mod, Fun}} ->
- fun(Arg) -> Mod:Fun(Arg) end
+ {ok, {Mod, Fun, SpecArg}} ->
+ fun(Arg) -> Mod:Fun(Arg, SpecArg) end;
+ {ok, {Mod, Fun}} ->
+ fun(Arg) -> Mod:Fun(Arg) end
end.
make_arity_2_fun(SpecStr) ->
case couch_util:parse_term(SpecStr) of
- {ok, {Mod, Fun, SpecArg}} ->
- fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2, SpecArg) end;
- {ok, {Mod, Fun}} ->
- fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2) end
+ {ok, {Mod, Fun, SpecArg}} ->
+ fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2, SpecArg) end;
+ {ok, {Mod, Fun}} ->
+ fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2) end
end.
make_arity_3_fun(SpecStr) ->
case couch_util:parse_term(SpecStr) of
- {ok, {Mod, Fun, SpecArg}} ->
- fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3, SpecArg) end;
- {ok, {Mod, Fun}} ->
- fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3) end
+ {ok, {Mod, Fun, SpecArg}} ->
+ fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3, SpecArg) end;
+ {ok, {Mod, Fun}} ->
+ fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3) end
end.
% SpecStr is "{my_module, my_fun}, {my_module2, my_fun2}"
@@ -105,34 +110,33 @@ validate_referer(Req) ->
Host = host_for_request(Req),
Referer = header_value(Req, "Referer", fail),
case Referer of
- fail ->
- throw({bad_request, <<"Referer header required.">>});
- Referer ->
- {_,RefererHost,_,_,_} = mochiweb_util:urlsplit(Referer),
- if
- RefererHost =:= Host -> ok;
- true -> throw({bad_request, <<"Referer header must match host.">>})
- end
+ fail ->
+ throw({bad_request, <<"Referer header required.">>});
+ Referer ->
+ {_, RefererHost, _, _, _} = mochiweb_util:urlsplit(Referer),
+ if
+ RefererHost =:= Host -> ok;
+ true -> throw({bad_request, <<"Referer header must match host.">>})
+ end
end.
validate_ctype(Req, Ctype) ->
case header_value(Req, "Content-Type") of
- undefined ->
- throw({bad_ctype, "Content-Type must be "++Ctype});
- ReqCtype ->
- case string:tokens(ReqCtype, ";") of
- [Ctype] -> ok;
- [Ctype | _Rest] -> ok;
- _Else ->
- throw({bad_ctype, "Content-Type must be "++Ctype})
- end
+ undefined ->
+ throw({bad_ctype, "Content-Type must be " ++ Ctype});
+ ReqCtype ->
+ case string:tokens(ReqCtype, ";") of
+ [Ctype] -> ok;
+ [Ctype | _Rest] -> ok;
+ _Else -> throw({bad_ctype, "Content-Type must be " ++ Ctype})
+ end
end.
-
check_max_request_length(Req) ->
Len = list_to_integer(header_value(Req, "Content-Length", "0")),
MaxLen = chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", ?DEFAULT_MAX_HTTP_REQUEST_SIZE),
+ "max_http_request_size", ?DEFAULT_MAX_HTTP_REQUEST_SIZE
+ ),
case Len > MaxLen of
true ->
exit({body_too_large, Len});
@@ -140,32 +144,31 @@ check_max_request_length(Req) ->
ok
end.
-
% Utilities
partition(Path) ->
mochiweb_util:partition(Path, "/").
-header_value(#httpd{mochi_req=MochiReq}, Key) ->
+header_value(#httpd{mochi_req = MochiReq}, Key) ->
MochiReq:get_header_value(Key).
-header_value(#httpd{mochi_req=MochiReq}, Key, Default) ->
+header_value(#httpd{mochi_req = MochiReq}, Key, Default) ->
case MochiReq:get_header_value(Key) of
- undefined -> Default;
- Value -> Value
+ undefined -> Default;
+ Value -> Value
end.
-primary_header_value(#httpd{mochi_req=MochiReq}, Key) ->
+primary_header_value(#httpd{mochi_req = MochiReq}, Key) ->
MochiReq:get_primary_header_value(Key).
-accepted_encodings(#httpd{mochi_req=MochiReq}) ->
+accepted_encodings(#httpd{mochi_req = MochiReq}) ->
case MochiReq:accepted_encodings(["gzip", "identity"]) of
- bad_accept_encoding_value ->
- throw(bad_accept_encoding_value);
- [] ->
- throw(unacceptable_encoding);
- EncList ->
- EncList
+ bad_accept_encoding_value ->
+ throw(bad_accept_encoding_value);
+ [] ->
+ throw(unacceptable_encoding);
+ EncList ->
+ EncList
end.
serve_file(Req, RelativePath, DocumentRoot) ->
@@ -175,7 +178,8 @@ serve_file(Req0, RelativePath0, DocumentRoot0, ExtraHeaders) ->
Headers0 = basic_headers(Req0, ExtraHeaders),
{ok, {Req1, Code1, Headers1, RelativePath1, DocumentRoot1}} =
chttpd_plugin:before_serve_file(
- Req0, 200, Headers0, RelativePath0, DocumentRoot0),
+ Req0, 200, Headers0, RelativePath0, DocumentRoot0
+ ),
log_request(Req1, Code1),
#httpd{mochi_req = MochiReq} = Req1,
{ok, MochiReq:serve_file(RelativePath1, DocumentRoot1, Headers1)}.
@@ -188,53 +192,61 @@ qs_value(Req, Key, Default) ->
qs_json_value(Req, Key, Default) ->
case qs_value(Req, Key, Default) of
- Default ->
- Default;
- Result ->
- ?JSON_DECODE(Result)
+ Default ->
+ Default;
+ Result ->
+ ?JSON_DECODE(Result)
end.
-qs(#httpd{mochi_req=MochiReq}) ->
+qs(#httpd{mochi_req = MochiReq}) ->
MochiReq:parse_qs().
-path(#httpd{mochi_req=MochiReq}) ->
+path(#httpd{mochi_req = MochiReq}) ->
MochiReq:get(path).
-host_for_request(#httpd{mochi_req=MochiReq}) ->
+host_for_request(#httpd{mochi_req = MochiReq}) ->
XHost = chttpd_util:get_chttpd_config(
- "x_forwarded_host", "X-Forwarded-Host"),
+ "x_forwarded_host", "X-Forwarded-Host"
+ ),
case MochiReq:get_header_value(XHost) of
undefined ->
case MochiReq:get_header_value("Host") of
undefined ->
- {ok, {Address, Port}} = case MochiReq:get(socket) of
- {ssl, SslSocket} -> ssl:sockname(SslSocket);
- Socket -> inet:sockname(Socket)
- end,
+ {ok, {Address, Port}} =
+ case MochiReq:get(socket) of
+ {ssl, SslSocket} -> ssl:sockname(SslSocket);
+ Socket -> inet:sockname(Socket)
+ end,
inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
Value1 ->
Value1
end;
- Value -> Value
+ Value ->
+ Value
end.
-absolute_uri(#httpd{mochi_req=MochiReq}=Req, [$/ | _] = Path) ->
+absolute_uri(#httpd{mochi_req = MochiReq} = Req, [$/ | _] = Path) ->
Host = host_for_request(Req),
XSsl = chttpd_util:get_chttpd_config("x_forwarded_ssl", "X-Forwarded-Ssl"),
- Scheme = case MochiReq:get_header_value(XSsl) of
- "on" -> "https";
- _ ->
- XProto = chttpd_util:get_chttpd_config(
- "x_forwarded_proto", "X-Forwarded-Proto"),
- case MochiReq:get_header_value(XProto) of
- %% Restrict to "https" and "http" schemes only
- "https" -> "https";
- _ -> case MochiReq:get(scheme) of
- https -> "https";
- http -> "http"
- end
- end
- end,
+ Scheme =
+ case MochiReq:get_header_value(XSsl) of
+ "on" ->
+ "https";
+ _ ->
+ XProto = chttpd_util:get_chttpd_config(
+ "x_forwarded_proto", "X-Forwarded-Proto"
+ ),
+ case MochiReq:get_header_value(XProto) of
+ %% Restrict to "https" and "http" schemes only
+ "https" ->
+ "https";
+ _ ->
+ case MochiReq:get(scheme) of
+ https -> "https";
+ http -> "http"
+ end
+ end
+ end,
Scheme ++ "://" ++ Host ++ Path;
absolute_uri(_Req, _Path) ->
throw({bad_request, "path must begin with a /."}).
@@ -245,60 +257,63 @@ unquote(UrlEncodedString) ->
quote(UrlDecodedString) ->
mochiweb_util:quote_plus(UrlDecodedString).
-parse_form(#httpd{mochi_req=MochiReq}) ->
+parse_form(#httpd{mochi_req = MochiReq}) ->
mochiweb_multipart:parse_form(MochiReq).
-recv(#httpd{mochi_req=MochiReq}, Len) ->
+recv(#httpd{mochi_req = MochiReq}, Len) ->
MochiReq:recv(Len).
-recv_chunked(#httpd{mochi_req=MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
+recv_chunked(#httpd{mochi_req = MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
% Fun is called once with each chunk
% Fun({Length, Binary}, State)
% called with Length == 0 on the last time.
- MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState,
+ MochiReq:stream_body(
+ MaxChunkSize,
+ ChunkFun,
+ InitState,
chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", ?DEFAULT_MAX_HTTP_REQUEST_SIZE)).
+ "max_http_request_size", ?DEFAULT_MAX_HTTP_REQUEST_SIZE
+ )
+ ).
-body_length(#httpd{mochi_req=MochiReq}) ->
+body_length(#httpd{mochi_req = MochiReq}) ->
MochiReq:get(body_length).
-body(#httpd{mochi_req=MochiReq, req_body=undefined}) ->
+body(#httpd{mochi_req = MochiReq, req_body = undefined}) ->
MaxSize = chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", ?DEFAULT_MAX_HTTP_REQUEST_SIZE),
+ "max_http_request_size", ?DEFAULT_MAX_HTTP_REQUEST_SIZE
+ ),
MochiReq:recv_body(MaxSize);
-body(#httpd{req_body=ReqBody}) ->
+body(#httpd{req_body = ReqBody}) ->
ReqBody.
-json_body(#httpd{req_body=undefined} = Httpd) ->
+json_body(#httpd{req_body = undefined} = Httpd) ->
case body(Httpd) of
undefined ->
throw({bad_request, "Missing request body"});
Body ->
?JSON_DECODE(maybe_decompress(Httpd, Body))
end;
-
-json_body(#httpd{req_body=ReqBody}) ->
+json_body(#httpd{req_body = ReqBody}) ->
ReqBody.
json_body_obj(Httpd) ->
case json_body(Httpd) of
{Props} -> {Props};
- _Else ->
- throw({bad_request, "Request body must be a JSON object"})
+ _Else -> throw({bad_request, "Request body must be a JSON object"})
end.
-
maybe_decompress(Httpd, Body) ->
case header_value(Httpd, "Content-Encoding", "identity") of
- "gzip" ->
- zlib:gunzip(Body);
- "identity" ->
- Body;
- Else ->
- throw({bad_ctype, [Else, " is not a supported content encoding."]})
+ "gzip" ->
+ zlib:gunzip(Body);
+ "identity" ->
+ Body;
+ Else ->
+ throw({bad_ctype, [Else, " is not a supported content encoding."]})
end.
-doc_etag(#doc{id=Id, body=Body, revs={Start, [DiskRev|_]}}) ->
+doc_etag(#doc{id = Id, body = Body, revs = {Start, [DiskRev | _]}}) ->
doc_etag(Id, Body, {Start, DiskRev}).
doc_etag(<<"_local/", _/binary>>, Body, {Start, DiskRev}) ->
@@ -308,7 +323,7 @@ doc_etag(_Id, _Body, {Start, DiskRev}) ->
rev_etag({Start, DiskRev}) ->
Rev = couch_doc:rev_to_str({Start, DiskRev}),
- <<$", Rev/binary, $">>.
+ <<$", Rev/binary, $">>.
make_etag(Term) ->
<<SigInt:128/integer>> = couch_hash:md5_hash(term_to_binary(Term)),
@@ -316,20 +331,20 @@ make_etag(Term) ->
etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
etag_match(Req, binary_to_list(CurrentEtag));
-
etag_match(Req, CurrentEtag) ->
EtagsToMatch = string:tokens(
- header_value(Req, "If-None-Match", ""), ", "),
+ header_value(Req, "If-None-Match", ""), ", "
+ ),
lists:member(CurrentEtag, EtagsToMatch).
etag_respond(Req, CurrentEtag, RespFun) ->
case etag_match(Req, CurrentEtag) of
- true ->
- % the client has this in their cache.
- send_response(Req, 304, [{"ETag", CurrentEtag}], <<>>);
- false ->
- % Run the function.
- RespFun()
+ true ->
+ % the client has this in their cache.
+ send_response(Req, 304, [{"ETag", CurrentEtag}], <<>>);
+ false ->
+ % Run the function.
+ RespFun()
end.
etag_maybe(Req, RespFun) ->
@@ -340,15 +355,15 @@ etag_maybe(Req, RespFun) ->
send_response(Req, 304, [{"ETag", ETag}], <<>>)
end.
-verify_is_server_admin(#httpd{user_ctx=UserCtx}) ->
+verify_is_server_admin(#httpd{user_ctx = UserCtx}) ->
verify_is_server_admin(UserCtx);
-verify_is_server_admin(#user_ctx{roles=Roles}) ->
+verify_is_server_admin(#user_ctx{roles = Roles}) ->
case lists:member(<<"_admin">>, Roles) of
- true -> ok;
- false -> throw({unauthorized, <<"You are not a server admin.">>})
+ true -> ok;
+ false -> throw({unauthorized, <<"You are not a server admin.">>})
end.
-log_request(#httpd{mochi_req=MochiReq,peer=Peer}=Req, Code) ->
+log_request(#httpd{mochi_req = MochiReq, peer = Peer} = Req, Code) ->
case erlang:get(dont_log_request) of
true ->
ok;
@@ -375,16 +390,16 @@ log_response(Code, Body) ->
couch_log:error("httpd ~p error response:~n ~s", [Code, Body])
end.
-start_response_length(#httpd{mochi_req=MochiReq}=Req, Code, Headers0, Length) ->
+start_response_length(#httpd{mochi_req = MochiReq} = Req, Code, Headers0, Length) ->
Headers1 = basic_headers(Req, Headers0),
Resp = handle_response(Req, Code, Headers1, Length, start_response_length),
case MochiReq:get(method) of
- 'HEAD' -> throw({http_head_abort, Resp});
- _ -> ok
+ 'HEAD' -> throw({http_head_abort, Resp});
+ _ -> ok
end,
{ok, Resp}.
-start_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers0) ->
+start_response(#httpd{mochi_req = MochiReq} = Req, Code, Headers0) ->
Headers1 = basic_headers(Req, Headers0),
Resp = handle_response(Req, Code, Headers1, undefined, start_response),
case MochiReq:get(method) of
@@ -402,9 +417,9 @@ send(Resp, Data) ->
no_resp_conn_header([]) ->
true;
-no_resp_conn_header([{Hdr, V}|Rest]) when is_binary(Hdr)->
- no_resp_conn_header([{?b2l(Hdr), V}|Rest]);
-no_resp_conn_header([{Hdr, _}|Rest]) when is_list(Hdr)->
+no_resp_conn_header([{Hdr, V} | Rest]) when is_binary(Hdr) ->
+ no_resp_conn_header([{?b2l(Hdr), V} | Rest]);
+no_resp_conn_header([{Hdr, _} | Rest]) when is_list(Hdr) ->
case string:to_lower(Hdr) of
"connection" -> false;
_ -> no_resp_conn_header(Rest)
@@ -421,12 +436,12 @@ http_1_0_keep_alive(Req, Headers) ->
false -> Headers
end.
-start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers0) ->
+start_chunked_response(#httpd{mochi_req = MochiReq} = Req, Code, Headers0) ->
Headers1 = add_headers(Req, Headers0),
Resp = handle_response(Req, Code, Headers1, chunked, respond),
case MochiReq:get(method) of
- 'HEAD' -> throw({http_head_abort, Resp});
- _ -> ok
+ 'HEAD' -> throw({http_head_abort, Resp});
+ _ -> ok
end,
{ok, Resp}.
@@ -435,8 +450,9 @@ send_chunk({remote, Pid, Ref} = Resp, Data) ->
{ok, Resp};
send_chunk(Resp, Data) ->
case iolist_size(Data) of
- 0 -> ok; % do nothing
- _ -> Resp:write_chunk(Data)
+ % do nothing
+ 0 -> ok;
+ _ -> Resp:write_chunk(Data)
end,
{ok, Resp}.
@@ -451,17 +467,23 @@ send_response(Req, Code, Headers0, Body) ->
Headers1 = chttpd_cors:headers(Req, Headers0),
send_response_no_cors(Req, Code, Headers1, Body).
-send_response_no_cors(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Body) ->
+send_response_no_cors(#httpd{mochi_req = MochiReq} = Req, Code, Headers, Body) ->
Headers1 = http_1_0_keep_alive(MochiReq, Headers),
Headers2 = basic_headers_no_cors(Req, Headers1),
Headers3 = chttpd_xframe_options:header(Req, Headers2),
- Headers4 = chttpd_prefer_header:maybe_return_minimal(Req, Headers3),
+ Headers4 = chttpd_prefer_header:maybe_return_minimal(Req, Headers3),
Resp = handle_response(Req, Code, Headers4, Body, respond),
log_response(Code, Body),
{ok, Resp}.
send_method_not_allowed(Req, Methods) ->
- send_error(Req, 405, [{"Allow", Methods}], <<"method_not_allowed">>, ?l2b("Only " ++ Methods ++ " allowed")).
+ send_error(
+ Req,
+ 405,
+ [{"Allow", Methods}],
+ <<"method_not_allowed">>,
+ ?l2b("Only " ++ Methods ++ " allowed")
+ ).
send_json(Req, Value) ->
send_json(Req, 200, Value).
@@ -504,13 +526,18 @@ initialize_jsonp(Req) ->
_ -> ok
end,
case get(jsonp) of
- no_jsonp -> [];
- [] -> [];
+ no_jsonp ->
+ [];
+ [] ->
+ [];
CallBack ->
try
% make sure jsonp is configured on (default off)
- case chttpd_util:get_chttpd_config_boolean(
- "allow_jsonp", false) of
+ case
+ chttpd_util:get_chttpd_config_boolean(
+ "allow_jsonp", false
+ )
+ of
true ->
validate_callback(CallBack);
false ->
@@ -550,12 +577,10 @@ validate_callback([Char | Rest]) ->
_ when Char == $_ -> ok;
_ when Char == $[ -> ok;
_ when Char == $] -> ok;
- _ ->
- throw({bad_request, invalid_callback})
+ _ -> throw({bad_request, invalid_callback})
end,
validate_callback(Rest).
-
error_info({Error, Reason}) when is_list(Reason) ->
error_info({Error, ?l2b(Reason)});
error_info(bad_request) ->
@@ -584,8 +609,10 @@ error_info({forbidden, Msg}) ->
error_info({unauthorized, Msg}) ->
{401, <<"unauthorized">>, Msg};
error_info(file_exists) ->
- {412, <<"file_exists">>, <<"The database could not be "
- "created, the file already exists.">>};
+ {412, <<"file_exists">>, <<
+ "The database could not be "
+ "created, the file already exists."
+ >>};
error_info(request_entity_too_large) ->
{413, <<"too_large">>, <<"the request entity is too large">>};
error_info({request_entity_too_large, {attachment, AttName}}) ->
@@ -599,9 +626,10 @@ error_info({bad_ctype, Reason}) ->
error_info(requested_range_not_satisfiable) ->
{416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>};
error_info({error, {illegal_database_name, Name}}) ->
- Message = <<"Name: '", Name/binary, "'. Only lowercase characters (a-z), ",
- "digits (0-9), and any of the characters _, $, (, ), +, -, and / ",
- "are allowed. Must begin with a letter.">>,
+ Message =
+ <<"Name: '", Name/binary, "'. Only lowercase characters (a-z), ",
+ "digits (0-9), and any of the characters _, $, (, ), +, -, and / ",
+ "are allowed. Must begin with a letter.">>,
{400, <<"illegal_database_name">>, Message};
error_info({missing_stub, Reason}) ->
{412, <<"missing_stub">>, Reason};
@@ -612,64 +640,102 @@ error_info({Error, Reason}) ->
error_info(Error) ->
{500, <<"unknown_error">>, couch_util:to_binary(Error)}.
-error_headers(#httpd{mochi_req=MochiReq}=Req, Code, ErrorStr, ReasonStr) ->
- if Code == 401 ->
- % this is where the basic auth popup is triggered
- case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
- undefined ->
- case chttpd_util:get_chttpd_config("WWW-Authenticate") of
- undefined ->
- % If the client is a browser and the basic auth popup isn't turned on
- % redirect to the session page.
- case ErrorStr of
- <<"unauthorized">> ->
- case chttpd_util:get_chttpd_auth_config(
- "authentication_redirect", "/_utils/session.html") of
- undefined -> {Code, []};
- AuthRedirect ->
- case chttpd_util:get_chttpd_auth_config_boolean(
- "require_valid_user", false) of
- true ->
- % send the browser popup header no matter what if we are require_valid_user
- {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]};
- false ->
- case MochiReq:accepts_content_type("application/json") of
- true ->
- {Code, []};
- false ->
- case MochiReq:accepts_content_type("text/html") of
- true ->
- % Redirect to the path the user requested, not
- % the one that is used internally.
- UrlReturnRaw = case MochiReq:get_header_value("x-couchdb-vhost-path") of
- undefined ->
- MochiReq:get(path);
- VHostPath ->
- VHostPath
- end,
- RedirectLocation = lists:flatten([
- AuthRedirect,
- "?return=", couch_util:url_encode(UrlReturnRaw),
- "&reason=", couch_util:url_encode(ReasonStr)
- ]),
- {302, [{"Location", absolute_uri(Req, RedirectLocation)}]};
- false ->
+error_headers(#httpd{mochi_req = MochiReq} = Req, Code, ErrorStr, ReasonStr) ->
+ if
+ Code == 401 ->
+ % this is where the basic auth popup is triggered
+ case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
+ undefined ->
+ case chttpd_util:get_chttpd_config("WWW-Authenticate") of
+ undefined ->
+ % If the client is a browser and the basic auth popup isn't turned on
+ % redirect to the session page.
+ case ErrorStr of
+ <<"unauthorized">> ->
+ case
+ chttpd_util:get_chttpd_auth_config(
+ "authentication_redirect", "/_utils/session.html"
+ )
+ of
+ undefined ->
+ {Code, []};
+ AuthRedirect ->
+ case
+ chttpd_util:get_chttpd_auth_config_boolean(
+ "require_valid_user", false
+ )
+ of
+ true ->
+ % send the browser popup header no matter what if we are require_valid_user
+ {Code, [
+ {"WWW-Authenticate",
+ "Basic realm=\"server\""}
+ ]};
+ false ->
+ case
+ MochiReq:accepts_content_type(
+ "application/json"
+ )
+ of
+ true ->
+ {Code, []};
+ false ->
+ case
+ MochiReq:accepts_content_type(
+ "text/html"
+ )
+ of
+ true ->
+ % Redirect to the path the user requested, not
+ % the one that is used internally.
+ UrlReturnRaw =
+ case
+ MochiReq:get_header_value(
+ "x-couchdb-vhost-path"
+ )
+ of
+ undefined ->
+ MochiReq:get(path);
+ VHostPath ->
+ VHostPath
+ end,
+ RedirectLocation = lists:flatten(
+ [
+ AuthRedirect,
+ "?return=",
+ couch_util:url_encode(
+ UrlReturnRaw
+ ),
+ "&reason=",
+ couch_util:url_encode(
+ ReasonStr
+ )
+ ]
+ ),
+ {302, [
+ {"Location",
+ absolute_uri(
+ Req,
+ RedirectLocation
+ )}
+ ]};
+ false ->
+ {Code, []}
+ end
+ end
+ end
+ end;
+ _Else ->
{Code, []}
- end
- end
- end
+ end;
+ Type ->
+ {Code, [{"WWW-Authenticate", Type}]}
end;
- _Else ->
- {Code, []}
- end;
- Type ->
- {Code, [{"WWW-Authenticate", Type}]}
+ Type ->
+ {Code, [{"WWW-Authenticate", Type}]}
end;
- Type ->
- {Code, [{"WWW-Authenticate", Type}]}
- end;
- true ->
- {Code, []}
+ true ->
+ {Code, []}
end.
send_error(Req, Error) ->
@@ -681,25 +747,33 @@ send_error(Req, Code, ErrorStr, ReasonStr) ->
send_error(Req, Code, [], ErrorStr, ReasonStr).
send_error(Req, Code, Headers, ErrorStr, ReasonStr) ->
- send_json(Req, Code, Headers,
- {[{<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr}]}).
+ send_json(
+ Req,
+ Code,
+ Headers,
+ {[
+ {<<"error">>, ErrorStr},
+ {<<"reason">>, ReasonStr}
+ ]}
+ ).
% give the option for list functions to output html or other raw errors
send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) ->
send_chunk(Resp, Reason),
last_chunk(Resp);
-
send_chunked_error(Resp, Error) ->
{Code, ErrorStr, ReasonStr} = error_info(Error),
- JsonError = {[{<<"code">>, Code},
- {<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr}]},
- send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])),
+ JsonError =
+ {[
+ {<<"code">>, Code},
+ {<<"error">>, ErrorStr},
+ {<<"reason">>, ReasonStr}
+ ]},
+ send_chunk(Resp, ?l2b([$\n, ?JSON_ENCODE(JsonError), $\n])),
last_chunk(Resp).
send_redirect(Req, Path) ->
- send_response(Req, 301, [{"Location", absolute_uri(Req, Path)}], <<>>).
+ send_response(Req, 301, [{"Location", absolute_uri(Req, Path)}], <<>>).
negotiate_content_type(_Req) ->
case get(jsonp) of
@@ -709,27 +783,33 @@ negotiate_content_type(_Req) ->
end.
server_header() ->
- [{"Server", "CouchDB/" ++ couch_server:get_version() ++
- " (Erlang OTP/" ++ erlang:system_info(otp_release) ++ ")"}].
-
+ [
+ {"Server",
+ "CouchDB/" ++ couch_server:get_version() ++
+ " (Erlang OTP/" ++ erlang:system_info(otp_release) ++ ")"}
+ ].
-record(mp, {boundary, buffer, data_fun, callback}).
-
parse_multipart_request(ContentType, DataFun, Callback) ->
Boundary0 = iolist_to_binary(get_boundary(ContentType)),
Boundary = <<"\r\n--", Boundary0/binary>>,
- Mp = #mp{boundary= Boundary,
- buffer= <<>>,
- data_fun=DataFun,
- callback=Callback},
- {Mp2, _NilCallback} = read_until(Mp, <<"--", Boundary0/binary>>,
- fun nil_callback/1),
- #mp{buffer=Buffer, data_fun=DataFun2, callback=Callback2} =
- parse_part_header(Mp2),
+ Mp = #mp{
+ boundary = Boundary,
+ buffer = <<>>,
+ data_fun = DataFun,
+ callback = Callback
+ },
+ {Mp2, _NilCallback} = read_until(
+ Mp,
+ <<"--", Boundary0/binary>>,
+ fun nil_callback/1
+ ),
+ #mp{buffer = Buffer, data_fun = DataFun2, callback = Callback2} =
+ parse_part_header(Mp2),
{Buffer, DataFun2, Callback2}.
-nil_callback(_Data)->
+nil_callback(_Data) ->
fun nil_callback/1.
get_boundary({"multipart/" ++ _, Opts}) ->
@@ -738,83 +818,102 @@ get_boundary({"multipart/" ++ _, Opts}) ->
S
end;
get_boundary(ContentType) ->
- {"multipart/" ++ _ , Opts} = mochiweb_util:parse_header(ContentType),
+ {"multipart/" ++ _, Opts} = mochiweb_util:parse_header(ContentType),
get_boundary({"multipart/", Opts}).
-
-
split_header(<<>>) ->
[];
split_header(Line) ->
- {Name, Rest} = lists:splitwith(fun (C) -> C =/= $: end,
- binary_to_list(Line)),
- [$: | Value] = case Rest of
- [] ->
- throw({bad_request, <<"bad part header">>});
- Res ->
- Res
- end,
- [{string:to_lower(string:strip(Name)),
- mochiweb_util:parse_header(Value)}].
+ {Name, Rest} = lists:splitwith(
+ fun(C) -> C =/= $: end,
+ binary_to_list(Line)
+ ),
+ [$: | Value] =
+ case Rest of
+ [] ->
+ throw({bad_request, <<"bad part header">>});
+ Res ->
+ Res
+ end,
+ [{string:to_lower(string:strip(Name)), mochiweb_util:parse_header(Value)}].
-read_until(#mp{data_fun=DataFun, buffer=Buffer}=Mp, Pattern, Callback) ->
+read_until(#mp{data_fun = DataFun, buffer = Buffer} = Mp, Pattern, Callback) ->
case couch_util:find_in_binary(Pattern, Buffer) of
- not_found ->
- Callback2 = Callback(Buffer),
- {Buffer2, DataFun2} = DataFun(),
- Buffer3 = iolist_to_binary(Buffer2),
- read_until(Mp#mp{data_fun=DataFun2,buffer=Buffer3}, Pattern, Callback2);
- {partial, 0} ->
- {NewData, DataFun2} = DataFun(),
- read_until(Mp#mp{data_fun=DataFun2,
- buffer= iolist_to_binary([Buffer,NewData])},
- Pattern, Callback);
- {partial, Skip} ->
- <<DataChunk:Skip/binary, Rest/binary>> = Buffer,
- Callback2 = Callback(DataChunk),
- {NewData, DataFun2} = DataFun(),
- read_until(Mp#mp{data_fun=DataFun2,
- buffer= iolist_to_binary([Rest | NewData])},
- Pattern, Callback2);
- {exact, 0} ->
- PatternLen = size(Pattern),
- <<_:PatternLen/binary, Rest/binary>> = Buffer,
- {Mp#mp{buffer= Rest}, Callback};
- {exact, Skip} ->
- PatternLen = size(Pattern),
- <<DataChunk:Skip/binary, _:PatternLen/binary, Rest/binary>> = Buffer,
- Callback2 = Callback(DataChunk),
- {Mp#mp{buffer= Rest}, Callback2}
+ not_found ->
+ Callback2 = Callback(Buffer),
+ {Buffer2, DataFun2} = DataFun(),
+ Buffer3 = iolist_to_binary(Buffer2),
+ read_until(Mp#mp{data_fun = DataFun2, buffer = Buffer3}, Pattern, Callback2);
+ {partial, 0} ->
+ {NewData, DataFun2} = DataFun(),
+ read_until(
+ Mp#mp{
+ data_fun = DataFun2,
+ buffer = iolist_to_binary([Buffer, NewData])
+ },
+ Pattern,
+ Callback
+ );
+ {partial, Skip} ->
+ <<DataChunk:Skip/binary, Rest/binary>> = Buffer,
+ Callback2 = Callback(DataChunk),
+ {NewData, DataFun2} = DataFun(),
+ read_until(
+ Mp#mp{
+ data_fun = DataFun2,
+ buffer = iolist_to_binary([Rest | NewData])
+ },
+ Pattern,
+ Callback2
+ );
+ {exact, 0} ->
+ PatternLen = size(Pattern),
+ <<_:PatternLen/binary, Rest/binary>> = Buffer,
+ {Mp#mp{buffer = Rest}, Callback};
+ {exact, Skip} ->
+ PatternLen = size(Pattern),
+ <<DataChunk:Skip/binary, _:PatternLen/binary, Rest/binary>> = Buffer,
+ Callback2 = Callback(DataChunk),
+ {Mp#mp{buffer = Rest}, Callback2}
end.
-
-parse_part_header(#mp{callback=UserCallBack}=Mp) ->
- {Mp2, AccCallback} = read_until(Mp, <<"\r\n\r\n">>,
- fun(Next) -> acc_callback(Next, []) end),
+parse_part_header(#mp{callback = UserCallBack} = Mp) ->
+ {Mp2, AccCallback} = read_until(
+ Mp,
+ <<"\r\n\r\n">>,
+ fun(Next) -> acc_callback(Next, []) end
+ ),
HeaderData = AccCallback(get_data),
Headers =
- lists:foldl(fun(Line, Acc) ->
- split_header(Line) ++ Acc
- end, [], re:split(HeaderData,<<"\r\n">>, [])),
+ lists:foldl(
+ fun(Line, Acc) ->
+ split_header(Line) ++ Acc
+ end,
+ [],
+ re:split(HeaderData, <<"\r\n">>, [])
+ ),
NextCallback = UserCallBack({headers, Headers}),
- parse_part_body(Mp2#mp{callback=NextCallback}).
-
-parse_part_body(#mp{boundary=Prefix, callback=Callback}=Mp) ->
- {Mp2, WrappedCallback} = read_until(Mp, Prefix,
- fun(Data) -> body_callback_wrapper(Data, Callback) end),
+ parse_part_body(Mp2#mp{callback = NextCallback}).
+
+parse_part_body(#mp{boundary = Prefix, callback = Callback} = Mp) ->
+ {Mp2, WrappedCallback} = read_until(
+ Mp,
+ Prefix,
+ fun(Data) -> body_callback_wrapper(Data, Callback) end
+ ),
Callback2 = WrappedCallback(get_callback),
Callback3 = Callback2(body_end),
- case check_for_last(Mp2#mp{callback=Callback3}) of
- {last, #mp{callback=Callback3}=Mp3} ->
- Mp3#mp{callback=Callback3(eof)};
- {more, Mp3} ->
- parse_part_header(Mp3)
+ case check_for_last(Mp2#mp{callback = Callback3}) of
+ {last, #mp{callback = Callback3} = Mp3} ->
+ Mp3#mp{callback = Callback3(eof)};
+ {more, Mp3} ->
+ parse_part_header(Mp3)
end.
-acc_callback(get_data, Acc)->
+acc_callback(get_data, Acc) ->
iolist_to_binary(lists:reverse(Acc));
-acc_callback(Data, Acc)->
+acc_callback(Data, Acc) ->
fun(Next) -> acc_callback(Next, [Data | Acc]) end.
body_callback_wrapper(get_callback, Callback) ->
@@ -823,18 +922,23 @@ body_callback_wrapper(Data, Callback) ->
Callback2 = Callback({body, Data}),
fun(Next) -> body_callback_wrapper(Next, Callback2) end.
-
-check_for_last(#mp{buffer=Buffer, data_fun=DataFun}=Mp) ->
+check_for_last(#mp{buffer = Buffer, data_fun = DataFun} = Mp) ->
case Buffer of
- <<"--",_/binary>> -> {last, Mp};
- <<_, _, _/binary>> -> {more, Mp};
- _ -> % not long enough
- {Data, DataFun2} = DataFun(),
- check_for_last(Mp#mp{buffer= <<Buffer/binary, Data/binary>>,
- data_fun = DataFun2})
+ <<"--", _/binary>> ->
+ {last, Mp};
+ <<_, _, _/binary>> ->
+ {more, Mp};
+ % not long enough
+ _ ->
+ {Data, DataFun2} = DataFun(),
+ check_for_last(Mp#mp{
+ buffer = <<Buffer/binary, Data/binary>>,
+ data_fun = DataFun2
+ })
end.
-validate_bind_address(any) -> ok;
+validate_bind_address(any) ->
+ ok;
validate_bind_address(Address) ->
case inet_parse:address(Address) of
{ok, _} -> ok;
@@ -852,9 +956,9 @@ basic_headers(Req, Headers0) ->
chttpd_cors:headers(Req, Headers2).
basic_headers_no_cors(Req, Headers) ->
- Headers
- ++ server_header()
- ++ couch_httpd_auth:cookie_auth_header(Req, Headers).
+ Headers ++
+ server_header() ++
+ couch_httpd_auth:cookie_auth_header(Req, Headers).
handle_response(Req0, Code0, Headers0, Args0, Type) ->
{ok, {Req1, Code1, Headers1, Args1}} = before_response(Req0, Code0, Headers0, Args0),
@@ -896,7 +1000,6 @@ http_respond_(#httpd{mochi_req = MochiReq}, 413, Headers, Args, Type) ->
http_respond_(#httpd{mochi_req = MochiReq}, Code, Headers, Args, Type) ->
MochiReq:Type({Code, Headers, Args}).
-
%%%%%%%% module tests below %%%%%%%%
-ifdef(TEST).
@@ -913,27 +1016,40 @@ maybe_add_default_headers_test_() ->
MustRevalidate = {"Cache-Control", "must-revalidate"},
ApplicationJavascript = {"Content-Type", "application/javascript"},
Cases = [
- {[],
- [MustRevalidate, ApplicationJavascript],
- "Should add Content-Type and Cache-Control to empty heaeders"},
-
- {[NoCache],
- [NoCache, ApplicationJavascript],
- "Should add Content-Type only if Cache-Control is present"},
-
- {[ApplicationJson],
- [MustRevalidate, ApplicationJson],
- "Should add Cache-Control if Content-Type is present"},
-
- {[NoCache, ApplicationJson],
- [NoCache, ApplicationJson],
- "Should not add headers if Cache-Control and Content-Type are there"}
+ {
+ [],
+ [MustRevalidate, ApplicationJavascript],
+ "Should add Content-Type and Cache-Control to empty heaeders"
+ },
+
+ {
+ [NoCache],
+ [NoCache, ApplicationJavascript],
+ "Should add Content-Type only if Cache-Control is present"
+ },
+
+ {
+ [ApplicationJson],
+ [MustRevalidate, ApplicationJson],
+ "Should add Cache-Control if Content-Type is present"
+ },
+
+ {
+ [NoCache, ApplicationJson],
+ [NoCache, ApplicationJson],
+ "Should not add headers if Cache-Control and Content-Type are there"
+ }
],
- Tests = lists:map(fun({InitialHeaders, ProperResult, Desc}) ->
- {Desc,
- ?_assertEqual(ProperResult,
- maybe_add_default_headers(DummyRequest, InitialHeaders))}
- end, Cases),
+ Tests = lists:map(
+ fun({InitialHeaders, ProperResult, Desc}) ->
+ {Desc,
+ ?_assertEqual(
+ ProperResult,
+ maybe_add_default_headers(DummyRequest, InitialHeaders)
+ )}
+ end,
+ Cases
+ ),
{"Tests adding default headers", Tests}.
log_request_test_() ->
@@ -953,27 +1069,24 @@ log_request_test_() ->
[
fun() -> should_accept_code_and_message(true) end,
fun() -> should_accept_code_and_message(false) end
- ]
- }.
+ ]}.
should_accept_code_and_message(DontLogFlag) ->
erlang:put(dont_log_response, DontLogFlag),
- {"with dont_log_response = " ++ atom_to_list(DontLogFlag),
- [
- {"Should accept code 200 and string message",
- ?_assertEqual(ok, log_response(200, "OK"))},
- {"Should accept code 200 and JSON message",
+ {"with dont_log_response = " ++ atom_to_list(DontLogFlag), [
+ {"Should accept code 200 and string message", ?_assertEqual(ok, log_response(200, "OK"))},
+ {"Should accept code 200 and JSON message",
?_assertEqual(ok, log_response(200, {json, {[{ok, true}]}}))},
- {"Should accept code >= 400 and string error",
+ {"Should accept code >= 400 and string error",
?_assertEqual(ok, log_response(405, method_not_allowed))},
- {"Should accept code >= 400 and JSON error",
- ?_assertEqual(ok,
- log_response(405, {json, {[{error, method_not_allowed}]}}))},
- {"Should accept code >= 500 and string error",
- ?_assertEqual(ok, log_response(500, undef))},
- {"Should accept code >= 500 and JSON error",
+ {"Should accept code >= 400 and JSON error",
+ ?_assertEqual(
+ ok,
+ log_response(405, {json, {[{error, method_not_allowed}]}})
+ )},
+ {"Should accept code >= 500 and string error", ?_assertEqual(ok, log_response(500, undef))},
+ {"Should accept code >= 500 and JSON error",
?_assertEqual(ok, log_response(500, {json, {[{error, undef}]}}))}
- ]
- }.
+ ]}.
-endif.
diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl
index 9e733aec3..f0ca2d56c 100644
--- a/src/couch/src/couch_httpd_auth.erl
+++ b/src/couch/src/couch_httpd_auth.erl
@@ -19,74 +19,107 @@
-export([party_mode_handler/1]).
--export([default_authentication_handler/1, default_authentication_handler/2,
- special_test_authentication_handler/1]).
--export([cookie_authentication_handler/1, cookie_authentication_handler/2]).
+-export([
+ default_authentication_handler/1,
+ default_authentication_handler/2,
+ special_test_authentication_handler/1
+]).
+-export([
+ cookie_authentication_handler/1,
+ cookie_authentication_handler/2
+]).
-export([null_authentication_handler/1]).
--export([proxy_authentication_handler/1, proxy_authentification_handler/1]).
+-export([
+ proxy_authentication_handler/1,
+ proxy_authentification_handler/1
+]).
-export([cookie_auth_header/2]).
--export([handle_session_req/1, handle_session_req/2]).
-
--export([authenticate/2, verify_totp/2]).
--export([ensure_cookie_auth_secret/0, make_cookie_time/0]).
--export([cookie_auth_cookie/4, cookie_scheme/1]).
+-export([
+ handle_session_req/1,
+ handle_session_req/2
+]).
+
+-export([
+ authenticate/2,
+ verify_totp/2
+]).
+-export([
+ ensure_cookie_auth_secret/0,
+ make_cookie_time/0
+]).
+-export([
+ cookie_auth_cookie/4,
+ cookie_scheme/1
+]).
-export([maybe_value/3]).
-export([jwt_authentication_handler/1]).
--import(couch_httpd, [header_value/2, send_json/2, send_json/4, send_method_not_allowed/2, maybe_decompress/2]).
+-import(couch_httpd, [
+ header_value/2,
+ send_json/2, send_json/4,
+ send_method_not_allowed/2,
+ maybe_decompress/2
+]).
--compile({no_auto_import,[integer_to_binary/1, integer_to_binary/2]}).
+-compile({no_auto_import, [integer_to_binary/1, integer_to_binary/2]}).
party_mode_handler(Req) ->
- case chttpd_util:get_chttpd_auth_config_boolean(
- "require_valid_user", false) of
- true ->
- throw({unauthorized, <<"Authentication required.">>});
- false ->
- Req#httpd{user_ctx=#user_ctx{}}
+ case
+ chttpd_util:get_chttpd_auth_config_boolean(
+ "require_valid_user", false
+ )
+ of
+ true ->
+ throw({unauthorized, <<"Authentication required.">>});
+ false ->
+ Req#httpd{user_ctx = #user_ctx{}}
end.
special_test_authentication_handler(Req) ->
case header_value(Req, "WWW-Authenticate") of
- "X-Couch-Test-Auth " ++ NamePass ->
- % NamePass is a colon separated string: "joe schmoe:a password".
- [Name, Pass] = re:split(NamePass, ":", [{return, list}, {parts, 2}]),
- case {Name, Pass} of
- {"Jan Lehnardt", "apple"} -> ok;
- {"Christopher Lenz", "dog food"} -> ok;
- {"Noah Slater", "biggiesmalls endian"} -> ok;
- {"Chris Anderson", "mp3"} -> ok;
- {"Damien Katz", "pecan pie"} -> ok;
- {_, _} ->
- throw({unauthorized, <<"Name or password is incorrect.">>})
- end,
- Req#httpd{user_ctx=#user_ctx{name=?l2b(Name)}};
- _ ->
- % No X-Couch-Test-Auth credentials sent, give admin access so the
- % previous authentication can be restored after the test
- Req#httpd{user_ctx=?ADMIN_USER}
+ "X-Couch-Test-Auth " ++ NamePass ->
+ % NamePass is a colon separated string: "joe schmoe:a password".
+ [Name, Pass] = re:split(NamePass, ":", [{return, list}, {parts, 2}]),
+ case {Name, Pass} of
+ {"Jan Lehnardt", "apple"} -> ok;
+ {"Christopher Lenz", "dog food"} -> ok;
+ {"Noah Slater", "biggiesmalls endian"} -> ok;
+ {"Chris Anderson", "mp3"} -> ok;
+ {"Damien Katz", "pecan pie"} -> ok;
+ {_, _} -> throw({unauthorized, <<"Name or password is incorrect.">>})
+ end,
+ Req#httpd{user_ctx = #user_ctx{name = ?l2b(Name)}};
+ _ ->
+ % No X-Couch-Test-Auth credentials sent, give admin access so the
+ % previous authentication can be restored after the test
+ Req#httpd{user_ctx = ?ADMIN_USER}
end.
basic_name_pw(Req) ->
AuthorizationHeader = header_value(Req, "Authorization"),
case AuthorizationHeader of
- "Basic " ++ Base64Value ->
- try re:split(base64:decode(Base64Value), ":",
- [{return, list}, {parts, 2}]) of
- ["_", "_"] ->
- % special name and pass to be logged out
- nil;
- [User, Pass] ->
- {User, Pass};
+ "Basic " ++ Base64Value ->
+ try
+ re:split(
+ base64:decode(Base64Value),
+ ":",
+ [{return, list}, {parts, 2}]
+ )
+ of
+ ["_", "_"] ->
+ % special name and pass to be logged out
+ nil;
+ [User, Pass] ->
+ {User, Pass};
+ _ ->
+ nil
+ catch
+ error:function_clause ->
+ throw({bad_request, "Authorization header has invalid base64 value"})
+ end;
_ ->
nil
- catch
- error:function_clause ->
- throw({bad_request, "Authorization header has invalid base64 value"})
- end;
- _ ->
- nil
end.
default_authentication_handler(Req) ->
@@ -94,42 +127,47 @@ default_authentication_handler(Req) ->
default_authentication_handler(Req, AuthModule) ->
case basic_name_pw(Req) of
- {User, Pass} ->
- case AuthModule:get_user_creds(Req, User) of
- nil ->
- throw({unauthorized, <<"Name or password is incorrect.">>});
- {ok, UserProps, _AuthCtx} ->
- reject_if_totp(UserProps),
- UserName = ?l2b(User),
- Password = ?l2b(Pass),
- case authenticate(Password, UserProps) of
- true ->
- Req#httpd{user_ctx=#user_ctx{
- name=UserName,
- roles=couch_util:get_value(<<"roles">>, UserProps, [])
- }};
- false ->
- authentication_warning(Req, UserName),
- throw({unauthorized, <<"Name or password is incorrect.">>})
- end
- end;
- nil ->
- case couch_server:has_admins() of
- true ->
- Req;
- false ->
- case chttpd_util:get_chttpd_auth_config_boolean(
- "require_valid_user", false) of
- true -> Req;
- % If no admins, and no user required, then everyone is admin!
- % Yay, admin party!
- false -> Req#httpd{user_ctx=?ADMIN_USER}
+ {User, Pass} ->
+ case AuthModule:get_user_creds(Req, User) of
+ nil ->
+ throw({unauthorized, <<"Name or password is incorrect.">>});
+ {ok, UserProps, _AuthCtx} ->
+ reject_if_totp(UserProps),
+ UserName = ?l2b(User),
+ Password = ?l2b(Pass),
+ case authenticate(Password, UserProps) of
+ true ->
+ Req#httpd{
+ user_ctx = #user_ctx{
+ name = UserName,
+ roles = couch_util:get_value(<<"roles">>, UserProps, [])
+ }
+ };
+ false ->
+ authentication_warning(Req, UserName),
+ throw({unauthorized, <<"Name or password is incorrect.">>})
+ end
+ end;
+ nil ->
+ case couch_server:has_admins() of
+ true ->
+ Req;
+ false ->
+ case
+ chttpd_util:get_chttpd_auth_config_boolean(
+ "require_valid_user", false
+ )
+ of
+ true -> Req;
+ % If no admins, and no user required, then everyone is admin!
+ % Yay, admin party!
+ false -> Req#httpd{user_ctx = ?ADMIN_USER}
+ end
end
- end
end.
null_authentication_handler(Req) ->
- Req#httpd{user_ctx=?ADMIN_USER}.
+ Req#httpd{user_ctx = ?ADMIN_USER}.
%% @doc proxy auth handler.
%
@@ -156,39 +194,53 @@ proxy_authentication_handler(Req) ->
%% @deprecated
proxy_authentification_handler(Req) ->
proxy_authentication_handler(Req).
-
+
proxy_auth_user(Req) ->
XHeaderUserName = chttpd_util:get_chttpd_auth_config(
- "x_auth_username", "X-Auth-CouchDB-UserName"),
+ "x_auth_username", "X-Auth-CouchDB-UserName"
+ ),
XHeaderRoles = chttpd_util:get_chttpd_auth_config(
- "x_auth_roles", "X-Auth-CouchDB-Roles"),
+ "x_auth_roles", "X-Auth-CouchDB-Roles"
+ ),
XHeaderToken = chttpd_util:get_chttpd_auth_config(
- "x_auth_token", "X-Auth-CouchDB-Token"),
+ "x_auth_token", "X-Auth-CouchDB-Token"
+ ),
case header_value(Req, XHeaderUserName) of
- undefined -> nil;
+ undefined ->
+ nil;
UserName ->
- Roles = case header_value(Req, XHeaderRoles) of
- undefined -> [];
- Else ->
- [?l2b(R) || R <- string:tokens(Else, ",")]
- end,
- case chttpd_util:get_chttpd_auth_config_boolean(
- "proxy_use_secret", false) of
+ Roles =
+ case header_value(Req, XHeaderRoles) of
+ undefined -> [];
+ Else -> [?l2b(R) || R <- string:tokens(Else, ",")]
+ end,
+ case
+ chttpd_util:get_chttpd_auth_config_boolean(
+ "proxy_use_secret", false
+ )
+ of
true ->
case chttpd_util:get_chttpd_auth_config("secret") of
undefined ->
- Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}};
+ Req#httpd{user_ctx = #user_ctx{name = ?l2b(UserName), roles = Roles}};
Secret ->
- ExpectedToken = couch_util:to_hex(couch_util:hmac(sha, Secret, UserName)),
+ ExpectedToken = couch_util:to_hex(
+ couch_util:hmac(sha, Secret, UserName)
+ ),
case header_value(Req, XHeaderToken) of
Token when Token == ExpectedToken ->
- Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName),
- roles=Roles}};
- _ -> nil
+ Req#httpd{
+ user_ctx = #user_ctx{
+ name = ?l2b(UserName),
+ roles = Roles
+ }
+ };
+ _ ->
+ nil
end
end;
false ->
- Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}}
+ Req#httpd{user_ctx = #user_ctx{name = ?l2b(UserName), roles = Roles}}
end
end.
@@ -199,22 +251,35 @@ jwt_authentication_handler(Req) ->
case jwtf:decode(?l2b(Jwt), [alg | RequiredClaims], fun jwtf_keystore:get/2) of
{ok, {Claims}} ->
case lists:keyfind(<<"sub">>, 1, Claims) of
- false -> throw({unauthorized, <<"Token missing sub claim.">>});
- {_, User} -> Req#httpd{user_ctx=#user_ctx{
- name = User,
- roles = couch_util:get_value(?l2b(config:get("jwt_auth", "roles_claim_name", "_couchdb.roles")), Claims, [])
- }}
+ false ->
+ throw({unauthorized, <<"Token missing sub claim.">>});
+ {_, User} ->
+ Req#httpd{
+ user_ctx = #user_ctx{
+ name = User,
+ roles = couch_util:get_value(
+ ?l2b(
+ config:get(
+ "jwt_auth", "roles_claim_name", "_couchdb.roles"
+ )
+ ),
+ Claims,
+ []
+ )
+ }
+ }
end;
{error, Reason} ->
throw(Reason)
end;
- _ -> Req
+ _ ->
+ Req
end.
get_configured_claims() ->
Claims = config:get("jwt_auth", "required_claims", ""),
Re = "((?<key1>[a-z]+)|{(?<key2>[a-z]+)\s*,\s*\"(?<val>[^\"]+)\"})",
- case re:run(Claims, Re, [global, {capture, [key1, key2, val], binary}]) of
+ case re:run(Claims, Re, [global, {capture, [key1, key2, val], binary}]) of
nomatch when Claims /= "" ->
?LOG_ERROR(#{
what => invalid_config_setting,
@@ -238,66 +303,82 @@ to_claim([<<>>, Key, Value]) ->
cookie_authentication_handler(Req) ->
cookie_authentication_handler(Req, couch_auth_cache).
-cookie_authentication_handler(#httpd{mochi_req=MochiReq}=Req, AuthModule) ->
+cookie_authentication_handler(#httpd{mochi_req = MochiReq} = Req, AuthModule) ->
case MochiReq:get_cookie_value("AuthSession") of
- undefined -> Req;
- [] -> Req;
- Cookie ->
- [User, TimeStr, HashStr] = try
- AuthSession = couch_util:decodeBase64Url(Cookie),
- [_A, _B, _Cs] = re:split(?b2l(AuthSession), ":",
- [{return, list}, {parts, 3}])
- catch
- _:_Error ->
- Reason = <<"Malformed AuthSession cookie. Please clear your cookies.">>,
- throw({bad_request, Reason})
- end,
- % Verify expiry and hash
- CurrentTime = make_cookie_time(),
- case chttpd_util:get_chttpd_auth_config("secret") of
undefined ->
- ?LOG_DEBUG(#{what => cookie_auth_secret_undefined}),
- couch_log:debug("cookie auth secret is not set",[]),
Req;
- SecretStr ->
- Secret = ?l2b(SecretStr),
- case AuthModule:get_user_creds(Req, User) of
- nil -> Req;
- {ok, UserProps, _AuthCtx} ->
- UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<"">>),
- FullSecret = <<Secret/binary, UserSalt/binary>>,
- ExpectedHash = couch_util:hmac(sha, FullSecret, User ++ ":" ++ TimeStr),
- Hash = ?l2b(HashStr),
- Timeout = chttpd_util:get_chttpd_auth_config_integer(
- "timeout", 600),
- couch_log:debug("timeout ~p", [Timeout]),
- case (catch erlang:list_to_integer(TimeStr, 16)) of
- TimeStamp when CurrentTime < TimeStamp + Timeout ->
- case couch_passwords:verify(ExpectedHash, Hash) of
- true ->
- TimeLeft = TimeStamp + Timeout - CurrentTime,
- ?LOG_DEBUG(#{
- what => successful_cookie_auth,
- username => User
- }),
- couch_log:debug("Successful cookie auth as: ~p",
- [User]),
- Req#httpd{user_ctx=#user_ctx{
- name=?l2b(User),
- roles=couch_util:get_value(<<"roles">>, UserProps, [])
- }, auth={FullSecret, TimeLeft < Timeout*0.9}};
- _Else ->
- Req
- end;
- _Else ->
- Req
- end
+ [] ->
+ Req;
+ Cookie ->
+ [User, TimeStr, HashStr] =
+ try
+ AuthSession = couch_util:decodeBase64Url(Cookie),
+ [_A, _B, _Cs] = re:split(
+ ?b2l(AuthSession),
+ ":",
+ [{return, list}, {parts, 3}]
+ )
+ catch
+ _:_Error ->
+ Reason = <<"Malformed AuthSession cookie. Please clear your cookies.">>,
+ throw({bad_request, Reason})
+ end,
+ % Verify expiry and hash
+ CurrentTime = make_cookie_time(),
+ case chttpd_util:get_chttpd_auth_config("secret") of
+ undefined ->
+ ?LOG_DEBUG(#{what => cookie_auth_secret_undefined}),
+ couch_log:debug("cookie auth secret is not set", []),
+ Req;
+ SecretStr ->
+ Secret = ?l2b(SecretStr),
+ case AuthModule:get_user_creds(Req, User) of
+ nil ->
+ Req;
+ {ok, UserProps, _AuthCtx} ->
+ UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<"">>),
+ FullSecret = <<Secret/binary, UserSalt/binary>>,
+ ExpectedHash = couch_util:hmac(sha, FullSecret, User ++ ":" ++ TimeStr),
+ Hash = ?l2b(HashStr),
+ Timeout = chttpd_util:get_chttpd_auth_config_integer(
+ "timeout", 600
+ ),
+ couch_log:debug("timeout ~p", [Timeout]),
+ case (catch erlang:list_to_integer(TimeStr, 16)) of
+ TimeStamp when CurrentTime < TimeStamp + Timeout ->
+ case couch_passwords:verify(ExpectedHash, Hash) of
+ true ->
+ TimeLeft = TimeStamp + Timeout - CurrentTime,
+ ?LOG_DEBUG(#{
+ what => successful_cookie_auth,
+ username => User
+ }),
+ couch_log:debug(
+ "Successful cookie auth as: ~p",
+ [User]
+ ),
+ Req#httpd{
+ user_ctx = #user_ctx{
+ name = ?l2b(User),
+ roles = couch_util:get_value(
+ <<"roles">>, UserProps, []
+ )
+ },
+ auth = {FullSecret, TimeLeft < Timeout * 0.9}
+ };
+ _Else ->
+ Req
+ end;
+ _Else ->
+ Req
+ end
+ end
end
- end
end.
-cookie_auth_header(#httpd{user_ctx=#user_ctx{name=null}}, _Headers) -> [];
-cookie_auth_header(#httpd{user_ctx=#user_ctx{name=User}, auth={Secret, true}}=Req, Headers) ->
+cookie_auth_header(#httpd{user_ctx = #user_ctx{name = null}}, _Headers) ->
+ [];
+cookie_auth_header(#httpd{user_ctx = #user_ctx{name = User}, auth = {Secret, true}} = Req, Headers) ->
% Note: we only set the AuthSession cookie if:
% * a valid AuthSession cookie has been received
% * we are outside a 10% timeout window
@@ -308,20 +389,24 @@ cookie_auth_header(#httpd{user_ctx=#user_ctx{name=User}, auth={Secret, true}}=Re
CookieHeader = couch_util:get_value("Set-Cookie", Headers, ""),
Cookies = mochiweb_cookies:parse_cookie(CookieHeader),
AuthSession = couch_util:get_value("AuthSession", Cookies),
- if AuthSession == undefined ->
- TimeStamp = make_cookie_time(),
- [cookie_auth_cookie(Req, ?b2l(User), Secret, TimeStamp)];
- true ->
- []
+ if
+ AuthSession == undefined ->
+ TimeStamp = make_cookie_time(),
+ [cookie_auth_cookie(Req, ?b2l(User), Secret, TimeStamp)];
+ true ->
+ []
end;
-cookie_auth_header(_Req, _Headers) -> [].
+cookie_auth_header(_Req, _Headers) ->
+ [].
cookie_auth_cookie(Req, User, Secret, TimeStamp) ->
SessionData = User ++ ":" ++ erlang:integer_to_list(TimeStamp, 16),
Hash = couch_util:hmac(sha, Secret, SessionData),
- mochiweb_cookies:cookie("AuthSession",
+ mochiweb_cookies:cookie(
+ "AuthSession",
couch_util:encodeBase64Url(SessionData ++ ":" ++ ?b2l(Hash)),
- [{path, "/"}] ++ cookie_scheme(Req) ++ max_age() ++ cookie_domain() ++ same_site()).
+ [{path, "/"}] ++ cookie_scheme(Req) ++ max_age() ++ cookie_domain() ++ same_site()
+ ).
ensure_cookie_auth_secret() ->
case chttpd_util:get_chttpd_auth_config("secret") of
@@ -329,7 +414,8 @@ ensure_cookie_auth_secret() ->
NewSecret = ?b2l(couch_uuids:random()),
config:set("couch_httpd_auth", "secret", NewSecret),
NewSecret;
- Secret -> Secret
+ Secret ->
+ Secret
end.
% session handlers
@@ -337,28 +423,33 @@ ensure_cookie_auth_secret() ->
handle_session_req(Req) ->
handle_session_req(Req, couch_auth_cache).
-handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req, AuthModule) ->
+handle_session_req(#httpd{method = 'POST', mochi_req = MochiReq} = Req, AuthModule) ->
ReqBody = MochiReq:recv_body(),
- Form = case MochiReq:get_primary_header_value("content-type") of
- % content type should be json
- "application/x-www-form-urlencoded" ++ _ ->
- mochiweb_util:parse_qs(ReqBody);
- "application/json" ++ _ ->
- {Pairs} = ?JSON_DECODE(maybe_decompress(Req, ReqBody)),
- lists:map(fun({Key, Value}) ->
- {?b2l(Key), ?b2l(Value)}
- end, Pairs);
- _ ->
- []
- end,
+ Form =
+ case MochiReq:get_primary_header_value("content-type") of
+ % content type should be json
+ "application/x-www-form-urlencoded" ++ _ ->
+ mochiweb_util:parse_qs(ReqBody);
+ "application/json" ++ _ ->
+ {Pairs} = ?JSON_DECODE(maybe_decompress(Req, ReqBody)),
+ lists:map(
+ fun({Key, Value}) ->
+ {?b2l(Key), ?b2l(Value)}
+ end,
+ Pairs
+ );
+ _ ->
+ []
+ end,
UserName = ?l2b(extract_username(Form)),
Password = ?l2b(couch_util:get_value("password", Form, "")),
?LOG_DEBUG(#{what => login_attempt, user => UserName}),
- couch_log:debug("Attempt Login: ~s",[UserName]),
- {ok, UserProps, _AuthCtx} = case AuthModule:get_user_creds(Req, UserName) of
- nil -> {ok, [], nil};
- Result -> Result
- end,
+ couch_log:debug("Attempt Login: ~s", [UserName]),
+ {ok, UserProps, _AuthCtx} =
+ case AuthModule:get_user_creds(Req, UserName) of
+ nil -> {ok, [], nil};
+ Result -> Result
+ end,
case authenticate(Password, UserProps) of
true ->
verify_totp(UserProps, Form),
@@ -366,68 +457,102 @@ handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req, AuthModule) ->
Secret = ?l2b(ensure_cookie_auth_secret()),
UserSalt = couch_util:get_value(<<"salt">>, UserProps),
CurrentTime = make_cookie_time(),
- Cookie = cookie_auth_cookie(Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime),
+ Cookie = cookie_auth_cookie(
+ Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime
+ ),
% TODO document the "next" feature in Futon
- {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
- nil ->
- {200, [Cookie]};
- Redirect ->
- {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
- end,
- send_json(Req#httpd{req_body=ReqBody}, Code, Headers,
+ {Code, Headers} =
+ case couch_httpd:qs_value(Req, "next", nil) of
+ nil ->
+ {200, [Cookie]};
+ Redirect ->
+ {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
+ end,
+ send_json(
+ Req#httpd{req_body = ReqBody},
+ Code,
+ Headers,
{[
{ok, true},
{name, UserName},
{roles, couch_util:get_value(<<"roles">>, UserProps, [])}
- ]});
+ ]}
+ );
false ->
authentication_warning(Req, UserName),
% clear the session
- Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}] ++ cookie_scheme(Req)),
- {Code, Headers} = case couch_httpd:qs_value(Req, "fail", nil) of
- nil ->
- {401, [Cookie]};
- Redirect ->
- {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
- end,
- send_json(Req, Code, Headers, {[{error, <<"unauthorized">>},{reason, <<"Name or password is incorrect.">>}]})
+ Cookie = mochiweb_cookies:cookie(
+ "AuthSession", "", [{path, "/"}] ++ cookie_scheme(Req)
+ ),
+ {Code, Headers} =
+ case couch_httpd:qs_value(Req, "fail", nil) of
+ nil ->
+ {401, [Cookie]};
+ Redirect ->
+ {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
+ end,
+ send_json(
+ Req,
+ Code,
+ Headers,
+ {[{error, <<"unauthorized">>}, {reason, <<"Name or password is incorrect.">>}]}
+ )
end;
% get user info
% GET /_session
-handle_session_req(#httpd{method='GET', user_ctx=UserCtx}=Req, _AuthModule) ->
+handle_session_req(#httpd{method = 'GET', user_ctx = UserCtx} = Req, _AuthModule) ->
Name = UserCtx#user_ctx.name,
ForceLogin = couch_httpd:qs_value(Req, "basic", "false"),
case {Name, ForceLogin} of
{null, "true"} ->
throw({unauthorized, <<"Please login.">>});
{Name, _} ->
- send_json(Req, {[
- % remove this ok
- {ok, true},
- {<<"userCtx">>, {[
- {name, Name},
- {roles, UserCtx#user_ctx.roles}
- ]}},
- {info, {[
- {authentication_handlers, [
- N || {N, _Fun} <- Req#httpd.authentication_handlers]}
- ] ++ maybe_value(authenticated, UserCtx#user_ctx.handler, fun(Handler) ->
- Handler
- end) ++ maybe_value(authentication_db, config:get("chttpd_auth", "authentication_db"), fun(Val) ->
- ?l2b(Val)
- end)}}
- ]})
+ send_json(
+ Req,
+ {[
+ % remove this ok
+ {ok, true},
+ {<<"userCtx">>,
+ {[
+ {name, Name},
+ {roles, UserCtx#user_ctx.roles}
+ ]}},
+ {info, {
+ [
+ {authentication_handlers, [
+ N
+ || {N, _Fun} <- Req#httpd.authentication_handlers
+ ]}
+ ] ++
+ maybe_value(authenticated, UserCtx#user_ctx.handler, fun(Handler) ->
+ Handler
+ end) ++
+ maybe_value(
+ authentication_db,
+ config:get("chttpd_auth", "authentication_db"),
+ fun(Val) ->
+ ?l2b(Val)
+ end
+ )
+ }}
+ ]}
+ )
end;
% logout by deleting the session
-handle_session_req(#httpd{method='DELETE'}=Req, _AuthModule) ->
- Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}] ++
- cookie_domain() ++ cookie_scheme(Req)),
- {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
- nil ->
- {200, [Cookie]};
- Redirect ->
- {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
- end,
+handle_session_req(#httpd{method = 'DELETE'} = Req, _AuthModule) ->
+ Cookie = mochiweb_cookies:cookie(
+ "AuthSession",
+ "",
+ [{path, "/"}] ++
+ cookie_domain() ++ cookie_scheme(Req)
+ ),
+ {Code, Headers} =
+ case couch_httpd:qs_value(Req, "next", nil) of
+ nil ->
+ {200, [Cookie]};
+ Redirect ->
+ {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
+ end,
send_json(Req, Code, Headers, {[{ok, true}]});
handle_session_req(Req, _AuthModule) ->
send_method_not_allowed(Req, "GET,HEAD,POST,DELETE").
@@ -446,22 +571,25 @@ extract_username(Form) ->
end.
maybe_value(_Key, undefined, _Fun) -> [];
-maybe_value(Key, Else, Fun) ->
- [{Key, Fun(Else)}].
+maybe_value(Key, Else, Fun) -> [{Key, Fun(Else)}].
authenticate(Pass, UserProps) ->
UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<>>),
{PasswordHash, ExpectedHash} =
case couch_util:get_value(<<"password_scheme">>, UserProps, <<"simple">>) of
- <<"simple">> ->
- {couch_passwords:simple(Pass, UserSalt),
- couch_util:get_value(<<"password_sha">>, UserProps, nil)};
- <<"pbkdf2">> ->
- Iterations = couch_util:get_value(<<"iterations">>, UserProps, 10000),
- verify_iterations(Iterations),
- {couch_passwords:pbkdf2(Pass, UserSalt, Iterations),
- couch_util:get_value(<<"derived_key">>, UserProps, nil)}
- end,
+ <<"simple">> ->
+ {
+ couch_passwords:simple(Pass, UserSalt),
+ couch_util:get_value(<<"password_sha">>, UserProps, nil)
+ };
+ <<"pbkdf2">> ->
+ Iterations = couch_util:get_value(<<"iterations">>, UserProps, 10000),
+ verify_iterations(Iterations),
+ {
+ couch_passwords:pbkdf2(Pass, UserSalt, Iterations),
+ couch_util:get_value(<<"derived_key">>, UserProps, nil)
+ }
+ end,
couch_passwords:verify(PasswordHash, ExpectedHash).
verify_iterations(Iterations) when is_integer(Iterations) ->
@@ -484,21 +612,25 @@ make_cookie_time() ->
{NowMS, NowS, _} = os:timestamp(),
NowMS * 1000000 + NowS.
-cookie_scheme(#httpd{mochi_req=MochiReq}) ->
+cookie_scheme(#httpd{mochi_req = MochiReq}) ->
[{http_only, true}] ++
- case MochiReq:get(scheme) of
- http -> [];
- https -> [{secure, true}]
- end.
+ case MochiReq:get(scheme) of
+ http -> [];
+ https -> [{secure, true}]
+ end.
max_age() ->
- case chttpd_util:get_chttpd_auth_config_boolean(
- "allow_persistent_cookies", true) of
+ case
+ chttpd_util:get_chttpd_auth_config_boolean(
+ "allow_persistent_cookies", true
+ )
+ of
false ->
[];
true ->
Timeout = chttpd_util:get_chttpd_auth_config_integer(
- "timeout", 600),
+ "timeout", 600
+ ),
[{max_age, Timeout}]
end.
@@ -509,14 +641,17 @@ cookie_domain() ->
_ -> [{domain, Domain}]
end.
-
same_site() ->
SameSite = chttpd_util:get_chttpd_auth_config("same_site", ""),
case string:to_lower(SameSite) of
- "" -> [];
- "none" -> [{same_site, none}];
- "lax" -> [{same_site, lax}];
- "strict" -> [{same_site, strict}];
+ "" ->
+ [];
+ "none" ->
+ [{same_site, none}];
+ "lax" ->
+ [{same_site, lax}];
+ "strict" ->
+ [{same_site, strict}];
_ ->
?LOG_ERROR(#{
what => invalid_config_setting,
@@ -525,11 +660,10 @@ same_site() ->
value => SameSite,
details => "value must be one of `none`, `lax`, `strict`"
}),
- couch_log:error("invalid config value couch_httpd_auth.same_site: ~p ",[SameSite]),
+ couch_log:error("invalid config value couch_httpd_auth.same_site: ~p ", [SameSite]),
[]
end.
-
reject_if_totp(User) ->
case get_totp_config(User) of
undefined ->
@@ -545,7 +679,8 @@ verify_totp(User, Form) ->
{Props} ->
Key = couch_base32:decode(couch_util:get_value(<<"key">>, Props)),
Alg = couch_util:to_existing_atom(
- couch_util:get_value(<<"algorithm">>, Props, <<"sha">>)),
+ couch_util:get_value(<<"algorithm">>, Props, <<"sha">>)
+ ),
Len = couch_util:get_value(<<"length">>, Props, 6),
Token = ?l2b(couch_util:get_value("token", Form, "")),
verify_token(Alg, Key, Len, Token)
@@ -556,12 +691,17 @@ get_totp_config(User) ->
verify_token(Alg, Key, Len, Token) ->
Now = make_cookie_time(),
- Tokens = [generate_token(Alg, Key, Len, Now - 30),
- generate_token(Alg, Key, Len, Now),
- generate_token(Alg, Key, Len, Now + 30)],
+ Tokens = [
+ generate_token(Alg, Key, Len, Now - 30),
+ generate_token(Alg, Key, Len, Now),
+ generate_token(Alg, Key, Len, Now + 30)
+ ],
%% evaluate all tokens in constant time
- Match = lists:foldl(fun(T, Acc) -> couch_util:verify(T, Token) or Acc end,
- false, Tokens),
+ Match = lists:foldl(
+ fun(T, Acc) -> couch_util:verify(T, Token) or Acc end,
+ false,
+ Tokens
+ ),
case Match of
true ->
ok;
@@ -573,12 +713,13 @@ generate_token(Alg, Key, Len, Timestamp) ->
integer_to_binary(couch_totp:generate(Alg, Key, Timestamp, 30, Len), Len).
integer_to_binary(Int, Len) when is_integer(Int), is_integer(Len) ->
- Unpadded = case erlang:function_exported(erlang, integer_to_binary, 1) of
- true ->
- erlang:integer_to_binary(Int);
- false ->
- ?l2b(integer_to_list(Int))
- end,
+ Unpadded =
+ case erlang:function_exported(erlang, integer_to_binary, 1) of
+ true ->
+ erlang:integer_to_binary(Int);
+ false ->
+ ?l2b(integer_to_list(Int))
+ end,
Padding = binary:copy(<<"0">>, Len),
Padded = <<Padding/binary, Unpadded/binary>>,
binary:part(Padded, byte_size(Padded), -Len).
@@ -590,5 +731,7 @@ authentication_warning(#httpd{mochi_req = Req}, User) ->
user => User,
peer => Peer
}),
- couch_log:warning("~p: Authentication failed for user ~s from ~s",
- [?MODULE, User, Peer]).
+ couch_log:warning(
+ "~p: Authentication failed for user ~s from ~s",
+ [?MODULE, User, Peer]
+ ).
diff --git a/src/couch/src/couch_httpd_external.erl b/src/couch/src/couch_httpd_external.erl
index d4842fb65..4a6f52dcc 100644
--- a/src/couch/src/couch_httpd_external.erl
+++ b/src/couch/src/couch_httpd_external.erl
@@ -17,40 +17,52 @@
-export([send_external_response/2, json_req_obj/2, json_req_obj/3]).
-export([default_or_content_type/2, parse_external_response/1]).
--import(couch_httpd,[send_error/4]).
+-import(couch_httpd, [send_error/4]).
-include_lib("couch/include/couch_db.hrl").
json_req_obj(Req, Db) -> json_req_obj(Req, Db, null).
-json_req_obj(#httpd{mochi_req=Req,
- method=Method,
- requested_path_parts=RequestedPath,
- path_parts=Path,
- req_body=ReqBody,
- peer=Peer
- }, Db, DocId) ->
- Body = case ReqBody of
- undefined ->
- MaxSize = config:get_integer("httpd", "max_http_request_size",
- 4294967296),
- Req:recv_body(MaxSize);
- Else -> Else
- end,
- ParsedForm = case Req:get_primary_header_value("content-type") of
- "application/x-www-form-urlencoded" ++ _ ->
- case Body of
- undefined -> [];
- _ -> mochiweb_util:parse_qs(Body)
- end;
- _ ->
- []
- end,
+json_req_obj(
+ #httpd{
+ mochi_req = Req,
+ method = Method,
+ requested_path_parts = RequestedPath,
+ path_parts = Path,
+ req_body = ReqBody,
+ peer = Peer
+ },
+ Db,
+ DocId
+) ->
+ Body =
+ case ReqBody of
+ undefined ->
+ MaxSize = config:get_integer(
+ "httpd",
+ "max_http_request_size",
+ 4294967296
+ ),
+ Req:recv_body(MaxSize);
+ Else ->
+ Else
+ end,
+ ParsedForm =
+ case Req:get_primary_header_value("content-type") of
+ "application/x-www-form-urlencoded" ++ _ ->
+ case Body of
+ undefined -> [];
+ _ -> mochiweb_util:parse_qs(Body)
+ end;
+ _ ->
+ []
+ end,
Headers = Req:get(headers),
Hlist = mochiweb_headers:to_list(Headers),
{ok, Info} = couch_db:get_db_info(Db),
-
-% add headers...
- {[{<<"info">>, {Info}},
+
+ % add headers...
+ {[
+ {<<"info">>, {Info}},
{<<"id">>, DocId},
{<<"uuid">>, couch_uuids:new()},
{<<"method">>, Method},
@@ -64,7 +76,8 @@ json_req_obj(#httpd{mochi_req=Req,
{<<"form">>, to_json_terms(ParsedForm)},
{<<"cookie">>, to_json_terms(Req:parse_cookie())},
{<<"userCtx">>, couch_util:json_user_ctx(Db)},
- {<<"secObj">>, couch_db:get_security(Db)}]}.
+ {<<"secObj">>, couch_db:get_security(Db)}
+ ]}.
to_json_terms(Data) ->
to_json_terms(Data, []).
@@ -81,13 +94,13 @@ json_query_keys({Json}) ->
json_query_keys([], Acc) ->
{lists:reverse(Acc)};
json_query_keys([{<<"startkey">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"startkey">>, ?JSON_DECODE(Value)}|Acc]);
+ json_query_keys(Rest, [{<<"startkey">>, ?JSON_DECODE(Value)} | Acc]);
json_query_keys([{<<"endkey">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"endkey">>, ?JSON_DECODE(Value)}|Acc]);
+ json_query_keys(Rest, [{<<"endkey">>, ?JSON_DECODE(Value)} | Acc]);
json_query_keys([{<<"key">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"key">>, ?JSON_DECODE(Value)}|Acc]);
+ json_query_keys(Rest, [{<<"key">>, ?JSON_DECODE(Value)} | Acc]);
json_query_keys([Term | Rest], Acc) ->
- json_query_keys(Rest, [Term|Acc]).
+ json_query_keys(Rest, [Term | Acc]).
send_external_response(Req, Response) ->
#extern_resp_args{
@@ -99,48 +112,59 @@ send_external_response(Req, Response) ->
} = parse_external_response(Response),
Headers1 = default_or_content_type(CType, Headers),
case Json of
- nil ->
- couch_httpd:send_response(Req, Code, Headers1, Data);
- Json ->
- couch_httpd:send_json(Req, Code, Headers1, Json)
+ nil ->
+ couch_httpd:send_response(Req, Code, Headers1, Data);
+ Json ->
+ couch_httpd:send_json(Req, Code, Headers1, Json)
end.
parse_external_response({Response}) ->
- lists:foldl(fun({Key,Value}, Args) ->
- case {Key, Value} of
- {"", _} ->
- Args;
- {<<"code">>, Value} ->
- Args#extern_resp_args{code=Value};
- {<<"stop">>, true} ->
- Args#extern_resp_args{stop=true};
- {<<"json">>, Value} ->
- Args#extern_resp_args{
- json=Value,
- ctype="application/json"};
- {<<"body">>, Value} ->
- Args#extern_resp_args{data=Value, ctype="text/html; charset=utf-8"};
- {<<"base64">>, Value} ->
- Args#extern_resp_args{
- data=base64:decode(Value),
- ctype="application/binary"
- };
- {<<"headers">>, {Headers}} ->
- NewHeaders = lists:map(fun({Header, HVal}) ->
- {binary_to_list(Header), binary_to_list(HVal)}
- end, Headers),
- Args#extern_resp_args{headers=NewHeaders};
- _ -> % unknown key
- Msg = lists:flatten(io_lib:format("Invalid data from external server: ~p", [{Key, Value}])),
- throw({external_response_error, Msg})
+ lists:foldl(
+ fun({Key, Value}, Args) ->
+ case {Key, Value} of
+ {"", _} ->
+ Args;
+ {<<"code">>, Value} ->
+ Args#extern_resp_args{code = Value};
+ {<<"stop">>, true} ->
+ Args#extern_resp_args{stop = true};
+ {<<"json">>, Value} ->
+ Args#extern_resp_args{
+ json = Value,
+ ctype = "application/json"
+ };
+ {<<"body">>, Value} ->
+ Args#extern_resp_args{data = Value, ctype = "text/html; charset=utf-8"};
+ {<<"base64">>, Value} ->
+ Args#extern_resp_args{
+ data = base64:decode(Value),
+ ctype = "application/binary"
+ };
+ {<<"headers">>, {Headers}} ->
+ NewHeaders = lists:map(
+ fun({Header, HVal}) ->
+ {binary_to_list(Header), binary_to_list(HVal)}
+ end,
+ Headers
+ ),
+ Args#extern_resp_args{headers = NewHeaders};
+ % unknown key
+ _ ->
+ Msg = lists:flatten(
+ io_lib:format("Invalid data from external server: ~p", [{Key, Value}])
+ ),
+ throw({external_response_error, Msg})
end
- end, #extern_resp_args{}, Response).
+ end,
+ #extern_resp_args{},
+ Response
+ ).
default_or_content_type(DefaultContentType, Headers) ->
IsContentType = fun({X, _}) -> string:to_lower(X) == "content-type" end,
case lists:any(IsContentType, Headers) of
- false ->
- [{"Content-Type", DefaultContentType} | Headers];
- true ->
- Headers
+ false ->
+ [{"Content-Type", DefaultContentType} | Headers];
+ true ->
+ Headers
end.
diff --git a/src/couch/src/couch_httpd_multipart.erl b/src/couch/src/couch_httpd_multipart.erl
index 7e6e7d6c9..926eff82b 100644
--- a/src/couch/src/couch_httpd_multipart.erl
+++ b/src/couch/src/couch_httpd_multipart.erl
@@ -31,57 +31,63 @@ decode_multipart_stream(ContentType, DataFun, Ref) ->
ParentRef = erlang:monitor(process, Parent),
put(mp_parent_ref, ParentRef),
num_mp_writers(NumMpWriters),
- {<<"--",_/binary>>, _, _} = couch_httpd:parse_multipart_request(
- ContentType, DataFun,
- fun(Next) -> mp_parse_doc(Next, []) end),
+ {<<"--", _/binary>>, _, _} = couch_httpd:parse_multipart_request(
+ ContentType,
+ DataFun,
+ fun(Next) -> mp_parse_doc(Next, []) end
+ ),
unlink(Parent)
- end),
+ end),
Parser ! {get_doc_bytes, Ref, self()},
receive
- {started_open_doc_revs, NewRef} ->
- %% FIXME: How to remove the knowledge about this message?
- {{started_open_doc_revs, NewRef}, Parser, ParserRef};
- {doc_bytes, Ref, DocBytes} ->
- {{doc_bytes, Ref, DocBytes}, Parser, ParserRef};
- {'DOWN', ParserRef, _, _, normal} ->
- ok;
- {'DOWN', ParserRef, process, Parser, {{nocatch, {Error, Msg}}, _}} ->
- ?LOG_ERROR(#{
- what => multipart_streamer_failure,
- ref => ParserRef,
- details => Msg
- }),
- couch_log:error("Multipart streamer ~p died with reason ~p",
- [ParserRef, Msg]),
- throw({Error, Msg});
- {'DOWN', ParserRef, _, _, Reason} ->
- ?LOG_ERROR(#{
- what => multipart_streamer_failure,
- ref => ParserRef,
- details => Reason
- }),
- couch_log:error("Multipart streamer ~p died with reason ~p",
- [ParserRef, Reason]),
- throw({error, Reason})
+ {started_open_doc_revs, NewRef} ->
+ %% FIXME: How to remove the knowledge about this message?
+ {{started_open_doc_revs, NewRef}, Parser, ParserRef};
+ {doc_bytes, Ref, DocBytes} ->
+ {{doc_bytes, Ref, DocBytes}, Parser, ParserRef};
+ {'DOWN', ParserRef, _, _, normal} ->
+ ok;
+ {'DOWN', ParserRef, process, Parser, {{nocatch, {Error, Msg}}, _}} ->
+ ?LOG_ERROR(#{
+ what => multipart_streamer_failure,
+ ref => ParserRef,
+ details => Msg
+ }),
+ couch_log:error(
+ "Multipart streamer ~p died with reason ~p",
+ [ParserRef, Msg]
+ ),
+ throw({Error, Msg});
+ {'DOWN', ParserRef, _, _, Reason} ->
+ ?LOG_ERROR(#{
+ what => multipart_streamer_failure,
+ ref => ParserRef,
+ details => Reason
+ }),
+ couch_log:error(
+ "Multipart streamer ~p died with reason ~p",
+ [ParserRef, Reason]
+ ),
+ throw({error, Reason})
end.
-
mp_parse_doc({headers, H}, []) ->
case couch_util:get_value("content-type", H) of
- {"application/json", _} ->
- fun (Next) ->
- mp_parse_doc(Next, [])
- end;
- _ ->
- throw({bad_ctype, <<"Content-Type must be application/json">>})
+ {"application/json", _} ->
+ fun(Next) ->
+ mp_parse_doc(Next, [])
+ end;
+ _ ->
+ throw({bad_ctype, <<"Content-Type must be application/json">>})
end;
mp_parse_doc({body, Bytes}, AccBytes) ->
- fun (Next) ->
+ fun(Next) ->
mp_parse_doc(Next, [Bytes | AccBytes])
end;
mp_parse_doc(body_end, AccBytes) ->
- receive {get_doc_bytes, Ref, From} ->
- From ! {doc_bytes, Ref, lists:reverse(AccBytes)}
+ receive
+ {get_doc_bytes, Ref, From} ->
+ From ! {doc_bytes, Ref, lists:reverse(AccBytes)}
end,
fun(Next) ->
mp_parse_atts(Next, {Ref, [], 0, orddict:new(), []})
@@ -92,7 +98,7 @@ mp_parse_atts({headers, _}, Acc) ->
mp_parse_atts(body_end, Acc) ->
fun(Next) -> mp_parse_atts(Next, Acc) end;
mp_parse_atts({body, Bytes}, {Ref, Chunks, Offset, Counters, Waiting}) ->
- case maybe_send_data({Ref, Chunks++[Bytes], Offset, Counters, Waiting}) of
+ case maybe_send_data({Ref, Chunks ++ [Bytes], Offset, Counters, Waiting}) of
abort_parsing ->
fun(Next) -> mp_abort_parse_atts(Next, nil) end;
NewAcc ->
@@ -102,34 +108,34 @@ mp_parse_atts(eof, {Ref, Chunks, Offset, Counters, Waiting}) ->
N = num_mp_writers(),
M = length(Counters),
case (M == N) andalso Chunks == [] of
- true ->
- ok;
- false ->
- ParentRef = get(mp_parent_ref),
- receive
- abort_parsing ->
+ true ->
ok;
- {get_bytes, Ref, From} ->
- C2 = update_writer(From, Counters),
- case maybe_send_data({Ref, Chunks, Offset, C2, [From|Waiting]}) of
- abort_parsing ->
- ok;
- NewAcc ->
- mp_parse_atts(eof, NewAcc)
- end;
- {'DOWN', ParentRef, _, _, _} ->
- exit(mp_reader_coordinator_died);
- {'DOWN', WriterRef, _, WriterPid, _} ->
- case remove_writer(WriterPid, WriterRef, Counters) of
+ false ->
+ ParentRef = get(mp_parent_ref),
+ receive
abort_parsing ->
ok;
- C2 ->
- NewAcc = {Ref, Chunks, Offset, C2, Waiting -- [WriterPid]},
- mp_parse_atts(eof, NewAcc)
+ {get_bytes, Ref, From} ->
+ C2 = update_writer(From, Counters),
+ case maybe_send_data({Ref, Chunks, Offset, C2, [From | Waiting]}) of
+ abort_parsing ->
+ ok;
+ NewAcc ->
+ mp_parse_atts(eof, NewAcc)
+ end;
+ {'DOWN', ParentRef, _, _, _} ->
+ exit(mp_reader_coordinator_died);
+ {'DOWN', WriterRef, _, WriterPid, _} ->
+ case remove_writer(WriterPid, WriterRef, Counters) of
+ abort_parsing ->
+ ok;
+ C2 ->
+ NewAcc = {Ref, Chunks, Offset, C2, Waiting -- [WriterPid]},
+ mp_parse_atts(eof, NewAcc)
+ end
+ after 300000 ->
+ ok
end
- after 300000 ->
- ok
- end
end.
mp_abort_parse_atts(eof, _) ->
@@ -138,82 +144,89 @@ mp_abort_parse_atts(_, _) ->
fun(Next) -> mp_abort_parse_atts(Next, nil) end.
maybe_send_data({Ref, Chunks, Offset, Counters, Waiting}) ->
- receive {get_bytes, Ref, From} ->
- NewCounters = update_writer(From, Counters),
- maybe_send_data({Ref, Chunks, Offset, NewCounters, [From|Waiting]})
+ receive
+ {get_bytes, Ref, From} ->
+ NewCounters = update_writer(From, Counters),
+ maybe_send_data({Ref, Chunks, Offset, NewCounters, [From | Waiting]})
after 0 ->
% reply to as many writers as possible
- NewWaiting = lists:filter(fun(Writer) ->
- {_, WhichChunk} = orddict:fetch(Writer, Counters),
- ListIndex = WhichChunk - Offset,
- if ListIndex =< length(Chunks) ->
- Writer ! {bytes, Ref, lists:nth(ListIndex, Chunks)},
- false;
- true ->
- true
- end
- end, Waiting),
+ NewWaiting = lists:filter(
+ fun(Writer) ->
+ {_, WhichChunk} = orddict:fetch(Writer, Counters),
+ ListIndex = WhichChunk - Offset,
+ if
+ ListIndex =< length(Chunks) ->
+ Writer ! {bytes, Ref, lists:nth(ListIndex, Chunks)},
+ false;
+ true ->
+ true
+ end
+ end,
+ Waiting
+ ),
% check if we can drop a chunk from the head of the list
- SmallestIndex = case Counters of
- [] ->
- 0;
- _ ->
- lists:min([C || {_WPid, {_WRef, C}} <- Counters])
- end,
+ SmallestIndex =
+ case Counters of
+ [] ->
+ 0;
+ _ ->
+ lists:min([C || {_WPid, {_WRef, C}} <- Counters])
+ end,
Size = length(Counters),
N = num_mp_writers(),
- if Size == N andalso SmallestIndex == (Offset+1) ->
- NewChunks = tl(Chunks),
- NewOffset = Offset+1;
- true ->
- NewChunks = Chunks,
- NewOffset = Offset
+ if
+ Size == N andalso SmallestIndex == (Offset + 1) ->
+ NewChunks = tl(Chunks),
+ NewOffset = Offset + 1;
+ true ->
+ NewChunks = Chunks,
+ NewOffset = Offset
end,
% we should wait for a writer if no one has written the last chunk
LargestIndex = lists:max([0] ++ [C || {_WPid, {_WRef, C}} <- Counters]),
- if LargestIndex >= (Offset + length(Chunks)) ->
- % someone has written all possible chunks, keep moving
- {Ref, NewChunks, NewOffset, Counters, NewWaiting};
- true ->
- ParentRef = get(mp_parent_ref),
- receive
- abort_parsing ->
- abort_parsing;
- {'DOWN', ParentRef, _, _, _} ->
- exit(mp_reader_coordinator_died);
- {'DOWN', WriterRef, _, WriterPid, _} ->
- case remove_writer(WriterPid, WriterRef, Counters) of
+ if
+ LargestIndex >= (Offset + length(Chunks)) ->
+ % someone has written all possible chunks, keep moving
+ {Ref, NewChunks, NewOffset, Counters, NewWaiting};
+ true ->
+ ParentRef = get(mp_parent_ref),
+ receive
abort_parsing ->
abort_parsing;
- C2 ->
- RestWaiting = NewWaiting -- [WriterPid],
- NewAcc = {Ref, NewChunks, NewOffset, C2, RestWaiting},
- maybe_send_data(NewAcc)
- end;
- {get_bytes, Ref, X} ->
- C2 = update_writer(X, Counters),
- maybe_send_data({Ref, NewChunks, NewOffset, C2, [X|NewWaiting]})
- after 300000 ->
- abort_parsing
- end
+ {'DOWN', ParentRef, _, _, _} ->
+ exit(mp_reader_coordinator_died);
+ {'DOWN', WriterRef, _, WriterPid, _} ->
+ case remove_writer(WriterPid, WriterRef, Counters) of
+ abort_parsing ->
+ abort_parsing;
+ C2 ->
+ RestWaiting = NewWaiting -- [WriterPid],
+ NewAcc = {Ref, NewChunks, NewOffset, C2, RestWaiting},
+ maybe_send_data(NewAcc)
+ end;
+ {get_bytes, Ref, X} ->
+ C2 = update_writer(X, Counters),
+ maybe_send_data({Ref, NewChunks, NewOffset, C2, [X | NewWaiting]})
+ after 300000 ->
+ abort_parsing
+ end
end
end.
-
update_writer(WriterPid, Counters) ->
UpdateFun = fun({WriterRef, Count}) -> {WriterRef, Count + 1} end,
- InitialValue = case orddict:find(WriterPid, Counters) of
- {ok, IV} ->
- IV;
- error ->
- WriterRef = erlang:monitor(process, WriterPid),
- {WriterRef, 1}
- end,
+ InitialValue =
+ case orddict:find(WriterPid, Counters) of
+ {ok, IV} ->
+ IV;
+ error ->
+ WriterRef = erlang:monitor(process, WriterPid),
+ {WriterRef, 1}
+ end,
orddict:update(WriterPid, UpdateFun, InitialValue, Counters).
-
remove_writer(WriterPid, WriterRef, Counters) ->
case orddict:find(WriterPid, Counters) of
{ok, {WriterRef, _}} ->
@@ -232,11 +245,9 @@ remove_writer(WriterPid, WriterRef, Counters) ->
abort_parsing
end.
-
num_mp_writers(N) ->
erlang:put(mp_att_writers, N).
-
num_mp_writers() ->
case erlang:get(mp_att_writers) of
undefined -> 1;
@@ -246,15 +257,21 @@ num_mp_writers() ->
encode_multipart_stream(_Boundary, JsonBytes, [], WriteFun, _AttFun) ->
WriteFun(JsonBytes);
encode_multipart_stream(Boundary, JsonBytes, Atts, WriteFun, AttFun) ->
- WriteFun([<<"--", Boundary/binary,
- "\r\nContent-Type: application/json\r\n\r\n">>,
- JsonBytes, <<"\r\n--", Boundary/binary>>]),
+ WriteFun([
+ <<"--", Boundary/binary, "\r\nContent-Type: application/json\r\n\r\n">>,
+ JsonBytes,
+ <<"\r\n--", Boundary/binary>>
+ ]),
atts_to_mp(Atts, Boundary, WriteFun, AttFun).
atts_to_mp([], _Boundary, WriteFun, _AttFun) ->
WriteFun(<<"--">>);
-atts_to_mp([{Att, Name, Len, Type, Encoding} | RestAtts], Boundary, WriteFun,
- AttFun) ->
+atts_to_mp(
+ [{Att, Name, Len, Type, Encoding} | RestAtts],
+ Boundary,
+ WriteFun,
+ AttFun
+) ->
LengthBin = list_to_binary(integer_to_list(Len)),
% write headers
WriteFun(<<"\r\nContent-Disposition: attachment; filename=\"", Name/binary, "\"">>),
@@ -275,40 +292,52 @@ atts_to_mp([{Att, Name, Len, Type, Encoding} | RestAtts], Boundary, WriteFun,
atts_to_mp(RestAtts, Boundary, WriteFun, AttFun).
length_multipart_stream(Boundary, JsonBytes, Atts) ->
- AttsSize = lists:foldl(fun({_Att, Name, Len, Type, Encoding}, AccAttsSize) ->
- AccAttsSize +
- 4 + % "\r\n\r\n"
- length(integer_to_list(Len)) +
- Len +
- 4 + % "\r\n--"
- size(Boundary) +
- % attachment headers
- % (the length of the Content-Length has already been set)
- size(Name) +
- size(Type) +
- length("\r\nContent-Disposition: attachment; filename=\"\"") +
- length("\r\nContent-Type: ") +
- length("\r\nContent-Length: ") +
- case Encoding of
- identity ->
- 0;
- _ ->
- length(atom_to_list(Encoding)) +
- length("\r\nContent-Encoding: ")
- end
- end, 0, Atts),
- if AttsSize == 0 ->
- {<<"application/json">>, iolist_size(JsonBytes)};
- true ->
- {<<"multipart/related; boundary=\"", Boundary/binary, "\"">>,
- 2 + % "--"
- size(Boundary) +
- 36 + % "\r\ncontent-type: application/json\r\n\r\n"
- iolist_size(JsonBytes) +
- 4 + % "\r\n--"
- size(Boundary) +
- + AttsSize +
- 2 % "--"
+ AttsSize = lists:foldl(
+ fun({_Att, Name, Len, Type, Encoding}, AccAttsSize) ->
+ AccAttsSize +
+ % "\r\n\r\n"
+ 4 +
+ length(integer_to_list(Len)) +
+ Len +
+ % "\r\n--"
+ 4 +
+ size(Boundary) +
+ % attachment headers
+ % (the length of the Content-Length has already been set)
+ size(Name) +
+ size(Type) +
+ length("\r\nContent-Disposition: attachment; filename=\"\"") +
+ length("\r\nContent-Type: ") +
+ length("\r\nContent-Length: ") +
+ case Encoding of
+ identity ->
+ 0;
+ _ ->
+ length(atom_to_list(Encoding)) +
+ length("\r\nContent-Encoding: ")
+ end
+ end,
+ 0,
+ Atts
+ ),
+ if
+ AttsSize == 0 ->
+ {<<"application/json">>, iolist_size(JsonBytes)};
+ true ->
+ {
+ <<"multipart/related; boundary=\"", Boundary/binary, "\"">>,
+ % "--"
+ 2 +
+ size(Boundary) +
+ % "\r\ncontent-type: application/json\r\n\r\n"
+ 36 +
+ iolist_size(JsonBytes) +
+ % "\r\n--"
+ 4 +
+ size(Boundary) +
+ +AttsSize +
+ % "--"
+ 2
}
end.
diff --git a/src/couch/src/couch_httpd_vhost.erl b/src/couch/src/couch_httpd_vhost.erl
index 4db3f6234..024b905d0 100644
--- a/src/couch/src/couch_httpd_vhost.erl
+++ b/src/couch/src/couch_httpd_vhost.erl
@@ -34,9 +34,10 @@
-define(RELISTEN_DELAY, 5000).
-record(vhosts_state, {
- vhosts,
- vhost_globals,
- vhosts_fun}).
+ vhosts,
+ vhost_globals,
+ vhosts_fun
+}).
%% doc the vhost manager.
%% This gen_server keep state of vhosts added to the ini and try to
@@ -110,34 +111,44 @@ dispatch_host_int(MochiReq) ->
#vhosts_state{
vhost_globals = VHostGlobals,
vhosts = VHosts,
- vhosts_fun=Fun} = get_state(),
+ vhosts_fun = Fun
+ } = get_state(),
{"/" ++ VPath, Query, Fragment} = mochiweb_util:urlsplit_path(MochiReq:get(raw_path)),
- VPathParts = string:tokens(VPath, "/"),
+ VPathParts = string:tokens(VPath, "/"),
VHost = host(MochiReq),
{VHostParts, VhostPort} = split_host_port(VHost),
- FinalMochiReq = case try_bind_vhost(VHosts, lists:reverse(VHostParts),
- VhostPort, VPathParts) of
- no_vhost_matched -> MochiReq;
- {VhostTarget, NewPath} ->
- case vhost_global(VHostGlobals, MochiReq) of
- true ->
- MochiReq;
- _Else ->
- NewPath1 = mochiweb_util:urlunsplit_path({NewPath, Query,
- Fragment}),
- MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
- MochiReq:get(method),
- NewPath1,
- MochiReq:get(version),
- MochiReq:get(headers)),
- Fun(MochiReq1, VhostTarget)
- end
- end,
+ FinalMochiReq =
+ case
+ try_bind_vhost(
+ VHosts,
+ lists:reverse(VHostParts),
+ VhostPort,
+ VPathParts
+ )
+ of
+ no_vhost_matched ->
+ MochiReq;
+ {VhostTarget, NewPath} ->
+ case vhost_global(VHostGlobals, MochiReq) of
+ true ->
+ MochiReq;
+ _Else ->
+ NewPath1 = mochiweb_util:urlunsplit_path({NewPath, Query, Fragment}),
+ MochiReq1 = mochiweb_request:new(
+ MochiReq:get(socket),
+ MochiReq:get(method),
+ NewPath1,
+ MochiReq:get(version),
+ MochiReq:get(headers)
+ ),
+ Fun(MochiReq1, VhostTarget)
+ end
+ end,
FinalMochiReq.
-append_path("/"=_Target, "/"=_Path) ->
+append_path("/" = _Target, "/" = _Path) ->
"/";
append_path(Target, Path) ->
Target ++ Path.
@@ -154,15 +165,20 @@ redirect_to_vhost(MochiReq, VhostTarget) ->
}),
couch_log:debug("Vhost Target: '~p'~n", [Target]),
- Headers = mochiweb_headers:enter("x-couchdb-vhost-path", Path,
- MochiReq:get(headers)),
+ Headers = mochiweb_headers:enter(
+ "x-couchdb-vhost-path",
+ Path,
+ MochiReq:get(headers)
+ ),
% build a new mochiweb request
- MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
- MochiReq:get(method),
- Target,
- MochiReq:get(version),
- Headers),
+ MochiReq1 = mochiweb_request:new(
+ MochiReq:get(socket),
+ MochiReq:get(method),
+ Target,
+ MochiReq:get(version),
+ Headers
+ ),
% cleanup, It force mochiweb to reparse raw uri.
MochiReq1:cleanup(),
MochiReq1.
@@ -170,23 +186,25 @@ redirect_to_vhost(MochiReq, VhostTarget) ->
%% if so, then it will not be rewritten, but will run as a normal couchdb request.
%* normally you'd use this for _uuids _utils and a few of the others you want to
%% keep available on vhosts. You can also use it to make databases 'global'.
-vhost_global( VhostGlobals, MochiReq) ->
+vhost_global(VhostGlobals, MochiReq) ->
RawUri = MochiReq:get(raw_path),
{"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
- Front = case couch_httpd:partition(Path) of
- {"", "", ""} ->
- "/"; % Special case the root url handler
- {FirstPart, _, _} ->
- FirstPart
- end,
- [true] == [true||V <- VhostGlobals, V == Front].
+ Front =
+ case couch_httpd:partition(Path) of
+ {"", "", ""} ->
+ % Special case the root url handler
+ "/";
+ {FirstPart, _, _} ->
+ FirstPart
+ end,
+ [true] == [true || V <- VhostGlobals, V == Front].
%% bind host
%% first it try to bind the port then the hostname.
try_bind_vhost([], _HostParts, _Port, _PathParts) ->
no_vhost_matched;
-try_bind_vhost([VhostSpec|Rest], HostParts, Port, PathParts) ->
+try_bind_vhost([VhostSpec | Rest], HostParts, Port, PathParts) ->
{{VHostParts, VPort, VPath}, Path} = VhostSpec,
case bind_port(VPort, Port) of
ok ->
@@ -197,12 +215,18 @@ try_bind_vhost([VhostSpec|Rest], HostParts, Port, PathParts) ->
Path1 = make_target(Path, Bindings, Remainings, []),
{make_path(Path1), make_path(PathParts1)};
fail ->
- try_bind_vhost(Rest, HostParts, Port,
- PathParts)
+ try_bind_vhost(
+ Rest,
+ HostParts,
+ Port,
+ PathParts
+ )
end;
- fail -> try_bind_vhost(Rest, HostParts, Port, PathParts)
+ fail ->
+ try_bind_vhost(Rest, HostParts, Port, PathParts)
end;
- fail -> try_bind_vhost(Rest, HostParts, Port, PathParts)
+ fail ->
+ try_bind_vhost(Rest, HostParts, Port, PathParts)
end.
%% doc: build new patch from bindings. bindings are query args
@@ -215,72 +239,82 @@ make_target([], _Bindings, _Remaining, Acc) ->
make_target([?MATCH_ALL], _Bindings, Remaining, Acc) ->
Acc1 = lists:reverse(Acc) ++ Remaining,
Acc1;
-make_target([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
+make_target([?MATCH_ALL | _Rest], _Bindings, Remaining, Acc) ->
Acc1 = lists:reverse(Acc) ++ Remaining,
Acc1;
-make_target([{bind, P}|Rest], Bindings, Remaining, Acc) ->
- P2 = case couch_util:get_value({bind, P}, Bindings) of
- undefined -> "undefined";
- P1 -> P1
- end,
- make_target(Rest, Bindings, Remaining, [P2|Acc]);
-make_target([P|Rest], Bindings, Remaining, Acc) ->
- make_target(Rest, Bindings, Remaining, [P|Acc]).
+make_target([{bind, P} | Rest], Bindings, Remaining, Acc) ->
+ P2 =
+ case couch_util:get_value({bind, P}, Bindings) of
+ undefined -> "undefined";
+ P1 -> P1
+ end,
+ make_target(Rest, Bindings, Remaining, [P2 | Acc]);
+make_target([P | Rest], Bindings, Remaining, Acc) ->
+ make_target(Rest, Bindings, Remaining, [P | Acc]).
%% bind port
bind_port(Port, Port) -> ok;
bind_port('*', _) -> ok;
-bind_port(_,_) -> fail.
+bind_port(_, _) -> fail.
%% bind bhost
-bind_vhost([],[], Bindings) -> {ok, Bindings, []};
-bind_vhost([?MATCH_ALL], [], _Bindings) -> fail;
-bind_vhost([?MATCH_ALL], Rest, Bindings) -> {ok, Bindings, Rest};
-bind_vhost([], _HostParts, _Bindings) -> fail;
-bind_vhost([{bind, Token}|Rest], [Match|RestHost], Bindings) ->
- bind_vhost(Rest, RestHost, [{{bind, Token}, Match}|Bindings]);
-bind_vhost([Cname|Rest], [Cname|RestHost], Bindings) ->
+bind_vhost([], [], Bindings) ->
+ {ok, Bindings, []};
+bind_vhost([?MATCH_ALL], [], _Bindings) ->
+ fail;
+bind_vhost([?MATCH_ALL], Rest, Bindings) ->
+ {ok, Bindings, Rest};
+bind_vhost([], _HostParts, _Bindings) ->
+ fail;
+bind_vhost([{bind, Token} | Rest], [Match | RestHost], Bindings) ->
+ bind_vhost(Rest, RestHost, [{{bind, Token}, Match} | Bindings]);
+bind_vhost([Cname | Rest], [Cname | RestHost], Bindings) ->
bind_vhost(Rest, RestHost, Bindings);
-bind_vhost(_, _, _) -> fail.
+bind_vhost(_, _, _) ->
+ fail.
%% bind path
bind_path([], PathParts) ->
{ok, PathParts};
bind_path(_VPathParts, []) ->
fail;
-bind_path([Path|VRest],[Path|Rest]) ->
- bind_path(VRest, Rest);
+bind_path([Path | VRest], [Path | Rest]) ->
+ bind_path(VRest, Rest);
bind_path(_, _) ->
fail.
% utilities
-
%% create vhost list from ini
host(MochiReq) ->
XHost = chttpd_util:get_chttpd_config(
- "x_forwarded_host", "X-Forwarded-Host"),
+ "x_forwarded_host", "X-Forwarded-Host"
+ ),
case MochiReq:get_header_value(XHost) of
undefined ->
case MochiReq:get_header_value("Host") of
undefined -> [];
Value1 -> Value1
end;
- Value -> Value
+ Value ->
+ Value
end.
make_vhosts() ->
- Vhosts = lists:foldl(fun
- ({_, ""}, Acc) ->
- Acc;
- ({Vhost, Path}, Acc) ->
- [{parse_vhost(Vhost), split_path(Path)}|Acc]
- end, [], config:get("vhosts")),
+ Vhosts = lists:foldl(
+ fun
+ ({_, ""}, Acc) ->
+ Acc;
+ ({Vhost, Path}, Acc) ->
+ [{parse_vhost(Vhost), split_path(Path)} | Acc]
+ end,
+ [],
+ config:get("vhosts")
+ ),
lists:reverse(lists:usort(Vhosts)).
-
parse_vhost(Vhost) ->
case urlsplit_netloc(Vhost, []) of
{[], Path} ->
@@ -295,15 +329,21 @@ parse_vhost(Vhost) ->
{H1, P, string:tokens(Path, "/")}
end.
-
split_host_port(HostAsString) ->
case string:rchr(HostAsString, $:) of
0 ->
{split_host(HostAsString), '*'};
N ->
- HostPart = string:substr(HostAsString, 1, N-1),
- case (catch erlang:list_to_integer(string:substr(HostAsString,
- N+1, length(HostAsString)))) of
+ HostPart = string:substr(HostAsString, 1, N - 1),
+ case
+ (catch erlang:list_to_integer(
+ string:substr(
+ HostAsString,
+ N + 1,
+ length(HostAsString)
+ )
+ ))
+ of
{'EXIT', _} ->
{split_host(HostAsString), '*'};
Port ->
@@ -317,36 +357,34 @@ split_host(HostAsString) ->
split_path(Path) ->
make_spec(string:tokens(Path, "/"), []).
-
make_spec([], Acc) ->
lists:reverse(Acc);
-make_spec([""|R], Acc) ->
+make_spec(["" | R], Acc) ->
make_spec(R, Acc);
-make_spec(["*"|R], Acc) ->
- make_spec(R, [?MATCH_ALL|Acc]);
-make_spec([P|R], Acc) ->
+make_spec(["*" | R], Acc) ->
+ make_spec(R, [?MATCH_ALL | Acc]);
+make_spec([P | R], Acc) ->
P1 = parse_var(P),
- make_spec(R, [P1|Acc]).
-
+ make_spec(R, [P1 | Acc]).
parse_var(P) ->
case P of
":" ++ Var ->
{bind, Var};
- _ -> P
+ _ ->
+ P
end.
-
% mochiweb doesn't export it.
urlsplit_netloc("", Acc) ->
{lists:reverse(Acc), ""};
-urlsplit_netloc(Rest=[C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
+urlsplit_netloc(Rest = [C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
{lists:reverse(Acc), Rest};
urlsplit_netloc([C | Rest], Acc) ->
urlsplit_netloc(Rest, [C | Acc]).
make_path(Parts) ->
- "/" ++ string:join(Parts,[?SEPARATOR]).
+ "/" ++ string:join(Parts, [?SEPARATOR]).
init(_) ->
ok = config:listen_for_changes(?MODULE, nil),
@@ -354,17 +392,19 @@ init(_) ->
%% load configuration
{VHostGlobals, VHosts, Fun} = load_conf(),
State = #vhosts_state{
- vhost_globals=VHostGlobals,
- vhosts=VHosts,
- vhosts_fun=Fun},
+ vhost_globals = VHostGlobals,
+ vhosts = VHosts,
+ vhosts_fun = Fun
+ },
{ok, State}.
handle_call(reload, _From, _State) ->
{VHostGlobals, VHosts, Fun} = load_conf(),
{reply, ok, #vhosts_state{
- vhost_globals=VHostGlobals,
- vhosts=VHosts,
- vhosts_fun=Fun}};
+ vhost_globals = VHostGlobals,
+ vhosts = VHosts,
+ vhosts_fun = Fun
+ }};
handle_call(get_state, _From, State) ->
{reply, State, State};
handle_call(_Msg, _From, State) ->
@@ -385,7 +425,6 @@ terminate(_Reason, _State) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
handle_config_change("vhosts", _, _, _, _) ->
{ok, ?MODULE:reload()};
handle_config_change(_, _, _, _, _) ->
@@ -398,8 +437,11 @@ handle_config_terminate(_Server, _Reason, _State) ->
load_conf() ->
%% get vhost globals
- VHostGlobals = re:split("_utils, _uuids, _session, _users", "\\s*,\\s*",
- [{return, list}]),
+ VHostGlobals = re:split(
+ "_utils, _uuids, _session, _users",
+ "\\s*,\\s*",
+ [{return, list}]
+ ),
%% build vhosts matching rules
VHosts = make_vhosts(),
diff --git a/src/couch/src/couch_io_logger.erl b/src/couch/src/couch_io_logger.erl
index 188e031cb..f859874b6 100644
--- a/src/couch/src/couch_io_logger.erl
+++ b/src/couch/src/couch_io_logger.erl
@@ -20,7 +20,6 @@
stop_error/1
]).
-
start(undefined) ->
ok;
start(Dir) ->
@@ -42,7 +41,6 @@ start(Dir) ->
ok
end.
-
stop_noerror() ->
case get(logger_path) of
undefined ->
@@ -51,7 +49,6 @@ stop_noerror() ->
close_logs()
end.
-
stop_error(Err) ->
case get(logger_path) of
undefined ->
@@ -61,21 +58,17 @@ stop_error(Err) ->
close_logs()
end.
-
log_output(Data) ->
log(get(logger_out_fd), Data).
-
log_input(Data) ->
log(get(logger_in_fd), Data).
-
unix_time() ->
{Mega, Sec, USec} = os:timestamp(),
UnixTs = (Mega * 1000000 + Sec) * 1000000 + USec,
integer_to_list(UnixTs).
-
log_name() ->
Ts = unix_time(),
Pid0 = erlang:pid_to_list(self()),
@@ -83,12 +76,10 @@ log_name() ->
Pid2 = string:strip(Pid1, right, $>),
lists:flatten(io_lib:format("~s_~s", [Ts, Pid2])).
-
close_logs() ->
file:close(get(logger_out_fd)),
file:close(get(logger_in_fd)).
-
save_error_logs(Path, Err) ->
Otp = erlang:system_info(otp_release),
Msg = io_lib:format("Error: ~p~nNode: ~p~nOTP: ~p~n", [Err, node(), Otp]),
@@ -97,10 +88,9 @@ save_error_logs(Path, Err) ->
OFd = get(logger_in_fd),
file:position(IFd, 0),
file:position(OFd, 0),
- file:copy(IFd, Path ++ ".out.log"),
+ file:copy(IFd, Path ++ ".out.log"),
file:copy(OFd, Path ++ ".in.log").
-
log(undefined, _Data) ->
ok;
log(Fd, Data) ->
diff --git a/src/couch/src/couch_key_tree.erl b/src/couch/src/couch_key_tree.erl
index 94150418e..90f350840 100644
--- a/src/couch/src/couch_key_tree.erl
+++ b/src/couch/src/couch_key_tree.erl
@@ -48,41 +48,43 @@
-module(couch_key_tree).
-export([
-count_leafs/1,
-find_missing/2,
-fold/3,
-get/2,
-get_all_leafs/1,
-get_all_leafs_full/1,
-get_full_key_paths/2,
-get_key_leafs/2,
-map/2,
-map_leafs/2,
-mapfold/3,
-multi_merge/2,
-merge/2,
-remove_leafs/2,
-stem/2
+ count_leafs/1,
+ find_missing/2,
+ fold/3,
+ get/2,
+ get_all_leafs/1,
+ get_all_leafs_full/1,
+ get_full_key_paths/2,
+ get_key_leafs/2,
+ map/2,
+ map_leafs/2,
+ mapfold/3,
+ multi_merge/2,
+ merge/2,
+ remove_leafs/2,
+ stem/2
]).
-include_lib("couch/include/couch_db.hrl").
--type treenode() :: {Key::term(), Value::term(), [Node::treenode()]}.
--type tree() :: {Depth::pos_integer(), [treenode()]}.
+-type treenode() :: {Key :: term(), Value :: term(), [Node :: treenode()]}.
+-type tree() :: {Depth :: pos_integer(), [treenode()]}.
-type revtree() :: [tree()].
-
%% @doc Merge multiple paths into the given tree.
-spec multi_merge(revtree(), tree()) -> revtree().
multi_merge(RevTree, Trees) ->
- lists:foldl(fun(Tree, RevTreeAcc) ->
- {NewRevTree, _} = merge(RevTreeAcc, Tree),
- NewRevTree
- end, RevTree, lists:sort(Trees)).
-
+ lists:foldl(
+ fun(Tree, RevTreeAcc) ->
+ {NewRevTree, _} = merge(RevTreeAcc, Tree),
+ NewRevTree
+ end,
+ RevTree,
+ lists:sort(Trees)
+ ).
%% @doc Merge a path into a tree.
-spec merge(revtree(), tree() | path()) ->
- {revtree(), new_leaf | new_branch | internal_node}.
+ {revtree(), new_leaf | new_branch | internal_node}.
merge(RevTree, Tree) ->
{Merged, Result} = merge_tree(RevTree, Tree, []),
{lists:sort(Merged), Result}.
@@ -92,12 +94,12 @@ merge(RevTree, Tree) ->
%% If it can't find a branch that the new tree merges into, add it as a
%% new branch in the RevTree.
-spec merge_tree(revtree(), tree() | path(), revtree()) ->
- {revtree(), new_leaf | new_branch | internal_node}.
+ {revtree(), new_leaf | new_branch | internal_node}.
merge_tree([], Tree, []) ->
{[Tree], new_leaf};
merge_tree([], Tree, MergeAcc) ->
- {[Tree|MergeAcc], new_branch};
-merge_tree([{Depth, Nodes} | Rest], {IDepth, INodes}=Tree, MergeAcc) ->
+ {[Tree | MergeAcc], new_branch};
+merge_tree([{Depth, Nodes} | Rest], {IDepth, INodes} = Tree, MergeAcc) ->
% For the intrepid observer following along at home, notice what we're
% doing here with (Depth - IDepth). This tells us which of the two
% branches (Nodes or INodes) we need to seek into. If Depth > IDepth
@@ -125,7 +127,7 @@ merge_tree([{Depth, Nodes} | Rest], {IDepth, INodes}=Tree, MergeAcc) ->
%% ends up running out of nodes we know that these two branches can
%% not be merged.
-spec merge_at([node()], integer(), [node()]) ->
- {revtree(), new_leaf | new_branch | internal_node} | fail.
+ {revtree(), new_leaf | new_branch | internal_node} | fail.
merge_at(_Nodes, _Pos, []) ->
fail;
merge_at([], _Pos, _INodes) ->
@@ -172,7 +174,7 @@ merge_at([Tree | Sibs], 0, INodes) ->
end.
-spec merge_extend(revtree(), revtree()) ->
- {revtree(), new_leaf | new_branch | internal_node}.
+ {revtree(), new_leaf | new_branch | internal_node}.
merge_extend([], B) when B =/= [] ->
% Most likely the insert branch simply extends this one, so the new
% branch is exactly B. Its also possible that B is a branch because
@@ -189,7 +191,7 @@ merge_extend([{K, V1, SubA} | NextA], [{K, V2, SubB}]) ->
% level in the two branches.
{Merged, Result} = merge_extend(SubA, SubB),
{[{K, value_pref(V1, V2), Merged} | NextA], Result};
-merge_extend([{K1, _, _}=NodeA | Rest], [{K2, _, _}=NodeB]) when K1 > K2 ->
+merge_extend([{K1, _, _} = NodeA | Rest], [{K2, _, _} = NodeB]) when K1 > K2 ->
% Keys are ordered so we know this is where the insert branch needs
% to be inserted into the tree. We also know that this creates a new
% branch so we have a new leaf to report.
@@ -200,10 +202,11 @@ merge_extend([Tree | RestA], NextB) ->
% key in NextB might be larger than the largest key in RestA which
% means we've created a new branch.
{Merged, Result0} = merge_extend(RestA, NextB),
- Result = case length(Merged) == length(RestA) of
- true -> Result0;
- false -> new_branch
- end,
+ Result =
+ case length(Merged) == length(RestA) of
+ true -> Result0;
+ false -> new_branch
+ end,
{[Tree | Merged], Result}.
find_missing(_Tree, []) ->
@@ -228,17 +231,17 @@ find_missing_simple(Pos, [{Key, _, SubTree} | RestTree], SeachKeys) ->
SrcKeys3 = find_missing_simple(Pos + 1, SubTree, SrcKeys2),
ImpossibleKeys ++ find_missing_simple(Pos, RestTree, SrcKeys3).
-
filter_leafs([], _Keys, FilteredAcc, RemovedKeysAcc) ->
{FilteredAcc, RemovedKeysAcc};
-filter_leafs([{Pos, [{LeafKey, _}|_]} = Path |Rest], Keys, FilteredAcc, RemovedKeysAcc) ->
+filter_leafs([{Pos, [{LeafKey, _} | _]} = Path | Rest], Keys, FilteredAcc, RemovedKeysAcc) ->
FilteredKeys = lists:delete({Pos, LeafKey}, Keys),
- if FilteredKeys == Keys ->
- % this leaf is not a key we are looking to remove
- filter_leafs(Rest, Keys, [Path | FilteredAcc], RemovedKeysAcc);
- true ->
- % this did match a key, remove both the node and the input key
- filter_leafs(Rest, FilteredKeys, FilteredAcc, [{Pos, LeafKey} | RemovedKeysAcc])
+ if
+ FilteredKeys == Keys ->
+ % this leaf is not a key we are looking to remove
+ filter_leafs(Rest, Keys, [Path | FilteredAcc], RemovedKeysAcc);
+ true ->
+ % this did match a key, remove both the node and the input key
+ filter_leafs(Rest, FilteredKeys, FilteredAcc, [{Pos, LeafKey} | RemovedKeysAcc])
end.
% Removes any branches from the tree whose leaf node(s) are in the Keys
@@ -255,15 +258,18 @@ remove_leafs(Trees, Keys) ->
% convert paths back to trees
NewTree = lists:foldl(
- fun({StartPos, Path},TreeAcc) ->
+ fun({StartPos, Path}, TreeAcc) ->
[SingleTree] = lists:foldl(
- fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path),
+ fun({K, V}, NewTreeAcc) -> [{K, V, NewTreeAcc}] end, [], Path
+ ),
{NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}),
NewTrees
- end, [], SortedPaths),
+ end,
+ [],
+ SortedPaths
+ ),
{NewTree, RemovedKeys}.
-
% get the leafs in the tree matching the keys. The matching key nodes can be
% leafs or an inner nodes. If an inner node, then the leafs for that node
% are returned.
@@ -274,7 +280,7 @@ get_key_leafs(_, [], Acc) ->
{Acc, []};
get_key_leafs([], Keys, Acc) ->
{Acc, Keys};
-get_key_leafs([{Pos, Tree}|Rest], Keys, Acc) ->
+get_key_leafs([{Pos, Tree} | Rest], Keys, Acc) ->
{Gotten, RemainingKeys} = get_key_leafs_simple(Pos, [Tree], Keys, []),
get_key_leafs(Rest, RemainingKeys, Gotten ++ Acc).
@@ -282,7 +288,7 @@ get_key_leafs_simple(_Pos, _Tree, [], _PathAcc) ->
{[], []};
get_key_leafs_simple(_Pos, [], Keys, _PathAcc) ->
{[], Keys};
-get_key_leafs_simple(Pos, [{Key, _, SubTree}=Tree | RestTree], Keys, PathAcc) ->
+get_key_leafs_simple(Pos, [{Key, _, SubTree} = Tree | RestTree], Keys, PathAcc) ->
case lists:delete({Pos, Key}, Keys) of
Keys ->
% Same list, key not found
@@ -300,7 +306,6 @@ get_key_leafs_simple(Pos, [{Key, _, SubTree}=Tree | RestTree], Keys, PathAcc) ->
{ChildLeafs ++ SiblingLeafs, Keys4}
end.
-
get_key_leafs_simple2(_Pos, [], Keys, _PathAcc) ->
% No more tree to deal with so no more keys to return.
{[], Keys};
@@ -320,10 +325,12 @@ get_key_leafs_simple2(Pos, [{Key, _Value, SubTree} | RestTree], Keys, PathAcc) -
{SiblingLeafs, Keys4} = get_key_leafs_simple2(Pos, RestTree, Keys3, PathAcc),
{ChildLeafs ++ SiblingLeafs, Keys4}.
-
get(Tree, KeysToGet) ->
{KeyPaths, KeysNotFound} = get_full_key_paths(Tree, KeysToGet),
- FixedResults = [ {Value, {Pos, [Key0 || {Key0, _} <- Path]}} || {Pos, [{_Key, Value}|_]=Path} <- KeyPaths],
+ FixedResults = [
+ {Value, {Pos, [Key0 || {Key0, _} <- Path]}}
+ || {Pos, [{_Key, Value} | _] = Path} <- KeyPaths
+ ],
{FixedResults, KeysNotFound}.
get_full_key_paths(Tree, Keys) ->
@@ -333,11 +340,10 @@ get_full_key_paths(_, [], Acc) ->
{Acc, []};
get_full_key_paths([], Keys, Acc) ->
{Acc, Keys};
-get_full_key_paths([{Pos, Tree}|Rest], Keys, Acc) ->
+get_full_key_paths([{Pos, Tree} | Rest], Keys, Acc) ->
{Gotten, RemainingKeys} = get_full_key_paths(Pos, [Tree], Keys, []),
get_full_key_paths(Rest, RemainingKeys, Gotten ++ Acc).
-
get_full_key_paths(_Pos, _Tree, [], _KeyPathAcc) ->
{[], []};
get_full_key_paths(_Pos, [], KeysToGet, _KeyPathAcc) ->
@@ -345,13 +351,18 @@ get_full_key_paths(_Pos, [], KeysToGet, _KeyPathAcc) ->
get_full_key_paths(Pos, [{KeyId, Value, SubTree} | RestTree], KeysToGet, KeyPathAcc) ->
KeysToGet2 = KeysToGet -- [{Pos, KeyId}],
CurrentNodeResult =
- case length(KeysToGet2) =:= length(KeysToGet) of
- true -> % not in the key list.
- [];
- false -> % this node is the key list. return it
- [{Pos, [{KeyId, Value} | KeyPathAcc]}]
- end,
- {KeysGotten, KeysRemaining} = get_full_key_paths(Pos + 1, SubTree, KeysToGet2, [{KeyId, Value} | KeyPathAcc]),
+ case length(KeysToGet2) =:= length(KeysToGet) of
+ % not in the key list.
+ true ->
+ [];
+ % this node is the key list. return it
+ false ->
+ [{Pos, [{KeyId, Value} | KeyPathAcc]}]
+ end,
+ {KeysGotten, KeysRemaining} = get_full_key_paths(Pos + 1, SubTree, KeysToGet2, [
+ {KeyId, Value}
+ | KeyPathAcc
+ ]),
{KeysGotten2, KeysRemaining2} = get_full_key_paths(Pos, RestTree, KeysRemaining, KeyPathAcc),
{CurrentNodeResult ++ KeysGotten ++ KeysGotten2, KeysRemaining2}.
@@ -368,14 +379,15 @@ get_all_leafs_full_simple(_Pos, [], _KeyPathAcc) ->
get_all_leafs_full_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
[{Pos, [{KeyId, Value} | KeyPathAcc]} | get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc)];
get_all_leafs_full_simple(Pos, [{KeyId, Value, SubTree} | RestTree], KeyPathAcc) ->
- get_all_leafs_full_simple(Pos + 1, SubTree, [{KeyId, Value} | KeyPathAcc]) ++ get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc).
+ get_all_leafs_full_simple(Pos + 1, SubTree, [{KeyId, Value} | KeyPathAcc]) ++
+ get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc).
get_all_leafs(Trees) ->
get_all_leafs(Trees, []).
get_all_leafs([], Acc) ->
Acc;
-get_all_leafs([{Pos, Tree}|Rest], Acc) ->
+get_all_leafs([{Pos, Tree} | Rest], Acc) ->
get_all_leafs(Rest, get_all_leafs_simple(Pos, [Tree], []) ++ Acc).
get_all_leafs_simple(_Pos, [], _KeyPathAcc) ->
@@ -383,12 +395,12 @@ get_all_leafs_simple(_Pos, [], _KeyPathAcc) ->
get_all_leafs_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
[{Value, {Pos, [KeyId | KeyPathAcc]}} | get_all_leafs_simple(Pos, RestTree, KeyPathAcc)];
get_all_leafs_simple(Pos, [{KeyId, _Value, SubTree} | RestTree], KeyPathAcc) ->
- get_all_leafs_simple(Pos + 1, SubTree, [KeyId | KeyPathAcc]) ++ get_all_leafs_simple(Pos, RestTree, KeyPathAcc).
-
+ get_all_leafs_simple(Pos + 1, SubTree, [KeyId | KeyPathAcc]) ++
+ get_all_leafs_simple(Pos, RestTree, KeyPathAcc).
count_leafs([]) ->
0;
-count_leafs([{_Pos,Tree}|Rest]) ->
+count_leafs([{_Pos, Tree} | Rest]) ->
count_leafs_simple([Tree]) + count_leafs(Rest).
count_leafs_simple([]) ->
@@ -398,42 +410,49 @@ count_leafs_simple([{_Key, _Value, []} | RestTree]) ->
count_leafs_simple([{_Key, _Value, SubTree} | RestTree]) ->
count_leafs_simple(SubTree) + count_leafs_simple(RestTree).
-
fold(_Fun, Acc, []) ->
Acc;
-fold(Fun, Acc0, [{Pos, Tree}|Rest]) ->
+fold(Fun, Acc0, [{Pos, Tree} | Rest]) ->
Acc1 = fold_simple(Fun, Acc0, Pos, [Tree]),
fold(Fun, Acc1, Rest).
fold_simple(_Fun, Acc, _Pos, []) ->
Acc;
fold_simple(Fun, Acc0, Pos, [{Key, Value, SubTree} | RestTree]) ->
- Type = if SubTree == [] -> leaf; true -> branch end,
+ Type =
+ if
+ SubTree == [] -> leaf;
+ true -> branch
+ end,
Acc1 = Fun({Pos, Key}, Value, Type, Acc0),
- Acc2 = fold_simple(Fun, Acc1, Pos+1, SubTree),
+ Acc2 = fold_simple(Fun, Acc1, Pos + 1, SubTree),
fold_simple(Fun, Acc2, Pos, RestTree).
-
map(_Fun, []) ->
[];
-map(Fun, [{Pos, Tree}|Rest]) ->
+map(Fun, [{Pos, Tree} | Rest]) ->
case erlang:fun_info(Fun, arity) of
- {arity, 2} ->
- [NewTree] = map_simple(fun(A,B,_C) -> Fun(A,B) end, Pos, [Tree]),
- [{Pos, NewTree} | map(Fun, Rest)];
- {arity, 3} ->
- [NewTree] = map_simple(Fun, Pos, [Tree]),
- [{Pos, NewTree} | map(Fun, Rest)]
+ {arity, 2} ->
+ [NewTree] = map_simple(fun(A, B, _C) -> Fun(A, B) end, Pos, [Tree]),
+ [{Pos, NewTree} | map(Fun, Rest)];
+ {arity, 3} ->
+ [NewTree] = map_simple(Fun, Pos, [Tree]),
+ [{Pos, NewTree} | map(Fun, Rest)]
end.
map_simple(_Fun, _Pos, []) ->
[];
map_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
- Value2 = Fun({Pos, Key}, Value,
- if SubTree == [] -> leaf; true -> branch end),
+ Value2 = Fun(
+ {Pos, Key},
+ Value,
+ if
+ SubTree == [] -> leaf;
+ true -> branch
+ end
+ ),
[{Key, Value2, map_simple(Fun, Pos + 1, SubTree)} | map_simple(Fun, Pos, RestTree)].
-
mapfold(_Fun, Acc, []) ->
{[], Acc};
mapfold(Fun, Acc, [{Pos, Tree} | Rest]) ->
@@ -444,16 +463,22 @@ mapfold(Fun, Acc, [{Pos, Tree} | Rest]) ->
mapfold_simple(_Fun, Acc, _Pos, []) ->
{[], Acc};
mapfold_simple(Fun, Acc, Pos, [{Key, Value, SubTree} | RestTree]) ->
- {Value2, Acc2} = Fun({Pos, Key}, Value,
- if SubTree == [] -> leaf; true -> branch end, Acc),
+ {Value2, Acc2} = Fun(
+ {Pos, Key},
+ Value,
+ if
+ SubTree == [] -> leaf;
+ true -> branch
+ end,
+ Acc
+ ),
{SubTree2, Acc3} = mapfold_simple(Fun, Acc2, Pos + 1, SubTree),
{RestTree2, Acc4} = mapfold_simple(Fun, Acc3, Pos, RestTree),
{[{Key, Value2, SubTree2} | RestTree2], Acc4}.
-
map_leafs(_Fun, []) ->
[];
-map_leafs(Fun, [{Pos, Tree}|Rest]) ->
+map_leafs(Fun, [{Pos, Tree} | Rest]) ->
[NewTree] = map_leafs_simple(Fun, Pos, [Tree]),
[{Pos, NewTree} | map_leafs(Fun, Rest)].
@@ -465,19 +490,22 @@ map_leafs_simple(Fun, Pos, [{Key, Value, []} | RestTree]) ->
map_leafs_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
[{Key, Value, map_leafs_simple(Fun, Pos + 1, SubTree)} | map_leafs_simple(Fun, Pos, RestTree)].
-
stem(Trees, Limit) ->
try
- {_, Branches} = lists:foldl(fun(Tree, {Seen, TreeAcc}) ->
- {NewSeen, NewBranches} = stem_tree(Tree, Limit, Seen),
- {NewSeen, NewBranches ++ TreeAcc}
- end, {sets:new(), []}, Trees),
+ {_, Branches} = lists:foldl(
+ fun(Tree, {Seen, TreeAcc}) ->
+ {NewSeen, NewBranches} = stem_tree(Tree, Limit, Seen),
+ {NewSeen, NewBranches ++ TreeAcc}
+ end,
+ {sets:new(), []},
+ Trees
+ ),
lists:sort(Branches)
- catch throw:dupe_keys ->
- repair_tree(Trees, Limit)
+ catch
+ throw:dupe_keys ->
+ repair_tree(Trees, Limit)
end.
-
stem_tree({Depth, Child}, Limit, Seen) ->
case stem_tree(Depth, Child, Limit, Seen) of
{NewSeen, _, NewChild, NewBranches} ->
@@ -486,41 +514,45 @@ stem_tree({Depth, Child}, Limit, Seen) ->
{NewSeen, NewBranches}
end.
-
stem_tree(_Depth, {Key, _Val, []} = Leaf, Limit, Seen) ->
{check_key(Key, Seen), Limit - 1, Leaf, []};
-
stem_tree(Depth, {Key, Val, Children}, Limit, Seen0) ->
Seen1 = check_key(Key, Seen0),
- FinalAcc = lists:foldl(fun(Child, Acc) ->
- {SeenAcc, LimitPosAcc, ChildAcc, BranchAcc} = Acc,
- case stem_tree(Depth + 1, Child, Limit, SeenAcc) of
- {NewSeenAcc, LimitPos, NewChild, NewBranches} ->
- NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc),
- NewChildAcc = [NewChild | ChildAcc],
- NewBranchAcc = NewBranches ++ BranchAcc,
- {NewSeenAcc, NewLimitPosAcc, NewChildAcc, NewBranchAcc};
- {NewSeenAcc, LimitPos, NewBranches} ->
- NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc),
- NewBranchAcc = NewBranches ++ BranchAcc,
- {NewSeenAcc, NewLimitPosAcc, ChildAcc, NewBranchAcc}
- end
- end, {Seen1, -1, [], []}, Children),
+ FinalAcc = lists:foldl(
+ fun(Child, Acc) ->
+ {SeenAcc, LimitPosAcc, ChildAcc, BranchAcc} = Acc,
+ case stem_tree(Depth + 1, Child, Limit, SeenAcc) of
+ {NewSeenAcc, LimitPos, NewChild, NewBranches} ->
+ NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc),
+ NewChildAcc = [NewChild | ChildAcc],
+ NewBranchAcc = NewBranches ++ BranchAcc,
+ {NewSeenAcc, NewLimitPosAcc, NewChildAcc, NewBranchAcc};
+ {NewSeenAcc, LimitPos, NewBranches} ->
+ NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc),
+ NewBranchAcc = NewBranches ++ BranchAcc,
+ {NewSeenAcc, NewLimitPosAcc, ChildAcc, NewBranchAcc}
+ end
+ end,
+ {Seen1, -1, [], []},
+ Children
+ ),
{FinalSeen, FinalLimitPos, FinalChildren, FinalBranches} = FinalAcc,
case FinalLimitPos of
N when N > 0, length(FinalChildren) > 0 ->
FinalNode = {Key, Val, lists:reverse(FinalChildren)},
{FinalSeen, FinalLimitPos - 1, FinalNode, FinalBranches};
0 when length(FinalChildren) > 0 ->
- NewBranches = lists:map(fun(Child) ->
- {Depth + 1, Child}
- end, lists:reverse(FinalChildren)),
+ NewBranches = lists:map(
+ fun(Child) ->
+ {Depth + 1, Child}
+ end,
+ lists:reverse(FinalChildren)
+ ),
{FinalSeen, -1, NewBranches ++ FinalBranches};
N when N < 0, length(FinalChildren) == 0 ->
{FinalSeen, FinalLimitPos - 1, FinalBranches}
end.
-
check_key(Key, Seen) ->
case sets:is_element(Key, Seen) of
true ->
@@ -529,29 +561,40 @@ check_key(Key, Seen) ->
sets:add_element(Key, Seen)
end.
-
repair_tree(Trees, Limit) ->
% flatten each branch in a tree into a tree path, sort by starting rev #
- Paths = lists:sort(lists:map(fun({Pos, Path}) ->
- StemmedPath = lists:sublist(Path, Limit),
- {Pos + 1 - length(StemmedPath), StemmedPath}
- end, get_all_leafs_full(Trees))),
+ Paths = lists:sort(
+ lists:map(
+ fun({Pos, Path}) ->
+ StemmedPath = lists:sublist(Path, Limit),
+ {Pos + 1 - length(StemmedPath), StemmedPath}
+ end,
+ get_all_leafs_full(Trees)
+ )
+ ),
% convert paths back to trees
lists:foldl(
- fun({StartPos, Path},TreeAcc) ->
+ fun({StartPos, Path}, TreeAcc) ->
[SingleTree] = lists:foldl(
- fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path),
+ fun({K, V}, NewTreeAcc) -> [{K, V, NewTreeAcc}] end, [], Path
+ ),
{NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}),
NewTrees
- end, [], Paths).
-
-
-value_pref(Tuple, _) when is_tuple(Tuple),
- (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) ->
+ end,
+ [],
+ Paths
+ ).
+
+value_pref(Tuple, _) when
+ is_tuple(Tuple),
+ (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4)
+->
Tuple;
-value_pref(_, Tuple) when is_tuple(Tuple),
- (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) ->
+value_pref(_, Tuple) when
+ is_tuple(Tuple),
+ (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4)
+->
Tuple;
value_pref(?REV_MISSING, Other) ->
Other;
diff --git a/src/couch/src/couch_native_process.erl b/src/couch/src/couch_native_process.erl
index 0a228d4c5..5a59ef170 100644
--- a/src/couch/src/couch_native_process.erl
+++ b/src/couch/src/couch_native_process.erl
@@ -41,8 +41,16 @@
-behaviour(gen_server).
-vsn(1).
--export([start_link/0,init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,
- handle_info/2,format_status/2]).
+-export([
+ start_link/0,
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ code_change/3,
+ handle_info/2,
+ format_status/2
+]).
-export([set_timeout/2, prompt/2]).
-define(STATE, native_proc_state).
@@ -74,15 +82,15 @@ prompt(Pid, Data) when is_list(Data) ->
init([]) ->
V = config:get("query_server_config", "os_process_idle_limit", "300"),
Idle = list_to_integer(V) * 1000,
- {ok, #evstate{ddocs=dict:new(), idle=Idle}, Idle}.
+ {ok, #evstate{ddocs = dict:new(), idle = Idle}, Idle}.
handle_call({set_timeout, TimeOut}, _From, State) ->
- {reply, ok, State#evstate{timeout=TimeOut}, State#evstate.idle};
-
+ {reply, ok, State#evstate{timeout = TimeOut}, State#evstate.idle};
handle_call({prompt, Data}, _From, State) ->
- couch_log:debug("Prompt native qs: ~s",[?JSON_ENCODE(Data)]),
- {NewState, Resp} = try run(State, to_binary(Data)) of
- {S, R} -> {S, R}
+ couch_log:debug("Prompt native qs: ~s", [?JSON_ENCODE(Data)]),
+ {NewState, Resp} =
+ try run(State, to_binary(Data)) of
+ {S, R} -> {S, R}
catch
throw:{error, Why} ->
{State, [<<"error">>, Why, Why]}
@@ -118,9 +126,9 @@ handle_info(timeout, State) ->
gen_server:cast(couch_proc_manager, {os_proc_idle, self()}),
erlang:garbage_collect(),
{noreply, State, State#evstate.idle};
-handle_info({'EXIT',_,normal}, State) ->
+handle_info({'EXIT', _, normal}, State) ->
{noreply, State, State#evstate.idle};
-handle_info({'EXIT',_,Reason}, State) ->
+handle_info({'EXIT', _, Reason}, State) ->
{stop, Reason, State}.
terminate(_Reason, _State) -> ok.
code_change(_OldVersion, State, _Extra) -> {ok, State}.
@@ -136,11 +144,9 @@ format_status(_Opt, [_PDict, State]) ->
funs = {length, length(Functions)},
query_config = {length, length(QueryConfig)}
},
- [{data, [{"State",
- ?record_to_keyval(evstate, Scrubbed)
- }]}].
+ [{data, [{"State", ?record_to_keyval(evstate, Scrubbed)}]}].
-run(#evstate{list_pid=Pid}=State, [<<"list_row">>, Row]) when is_pid(Pid) ->
+run(#evstate{list_pid = Pid} = State, [<<"list_row">>, Row]) when is_pid(Pid) ->
Pid ! {self(), list_row, Row},
receive
{Pid, chunks, Data} ->
@@ -152,124 +158,137 @@ run(#evstate{list_pid=Pid}=State, [<<"list_row">>, Row]) when is_pid(Pid) ->
throw({timeout, list_cleanup})
end,
process_flag(trap_exit, erlang:get(do_trap)),
- {State#evstate{list_pid=nil}, [<<"end">>, Data]}
+ {State#evstate{list_pid = nil}, [<<"end">>, Data]}
after State#evstate.timeout ->
throw({timeout, list_row})
end;
-run(#evstate{list_pid=Pid}=State, [<<"list_end">>]) when is_pid(Pid) ->
+run(#evstate{list_pid = Pid} = State, [<<"list_end">>]) when is_pid(Pid) ->
Pid ! {self(), list_end},
Resp =
- receive
- {Pid, list_end, Data} ->
- receive
- {'EXIT', Pid, normal} -> ok
- after State#evstate.timeout ->
- throw({timeout, list_cleanup})
- end,
- [<<"end">>, Data]
- after State#evstate.timeout ->
- throw({timeout, list_end})
- end,
+ receive
+ {Pid, list_end, Data} ->
+ receive
+ {'EXIT', Pid, normal} -> ok
+ after State#evstate.timeout ->
+ throw({timeout, list_cleanup})
+ end,
+ [<<"end">>, Data]
+ after State#evstate.timeout ->
+ throw({timeout, list_end})
+ end,
process_flag(trap_exit, erlang:get(do_trap)),
- {State#evstate{list_pid=nil}, Resp};
-run(#evstate{list_pid=Pid}=State, _Command) when is_pid(Pid) ->
+ {State#evstate{list_pid = nil}, Resp};
+run(#evstate{list_pid = Pid} = State, _Command) when is_pid(Pid) ->
{State, [<<"error">>, list_error, list_error]};
-run(#evstate{ddocs=DDocs}, [<<"reset">>]) ->
- {#evstate{ddocs=DDocs}, true};
-run(#evstate{ddocs=DDocs, idle=Idle}, [<<"reset">>, QueryConfig]) ->
+run(#evstate{ddocs = DDocs}, [<<"reset">>]) ->
+ {#evstate{ddocs = DDocs}, true};
+run(#evstate{ddocs = DDocs, idle = Idle}, [<<"reset">>, QueryConfig]) ->
NewState = #evstate{
ddocs = DDocs,
query_config = QueryConfig,
idle = Idle
},
{NewState, true};
-run(#evstate{funs=Funs}=State, [<<"add_fun">> , BinFunc]) ->
+run(#evstate{funs = Funs} = State, [<<"add_fun">>, BinFunc]) ->
FunInfo = makefun(State, BinFunc),
- {State#evstate{funs=Funs ++ [FunInfo]}, true};
-run(State, [<<"map_doc">> , Doc]) ->
- Resp = lists:map(fun({Sig, Fun}) ->
- erlang:put(Sig, []),
- Fun(Doc),
- lists:reverse(erlang:get(Sig))
- end, State#evstate.funs),
+ {State#evstate{funs = Funs ++ [FunInfo]}, true};
+run(State, [<<"map_doc">>, Doc]) ->
+ Resp = lists:map(
+ fun({Sig, Fun}) ->
+ erlang:put(Sig, []),
+ Fun(Doc),
+ lists:reverse(erlang:get(Sig))
+ end,
+ State#evstate.funs
+ ),
{State, Resp};
run(State, [<<"reduce">>, Funs, KVs]) ->
{Keys, Vals} =
- lists:foldl(fun([K, V], {KAcc, VAcc}) ->
- {[K | KAcc], [V | VAcc]}
- end, {[], []}, KVs),
+ lists:foldl(
+ fun([K, V], {KAcc, VAcc}) ->
+ {[K | KAcc], [V | VAcc]}
+ end,
+ {[], []},
+ KVs
+ ),
Keys2 = lists:reverse(Keys),
Vals2 = lists:reverse(Vals),
{State, catch reduce(State, Funs, Keys2, Vals2, false)};
run(State, [<<"rereduce">>, Funs, Vals]) ->
{State, catch reduce(State, Funs, null, Vals, true)};
-run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, <<"new">>, DDocId, DDoc]) ->
+run(#evstate{ddocs = DDocs} = State, [<<"ddoc">>, <<"new">>, DDocId, DDoc]) ->
DDocs2 = store_ddoc(DDocs, DDocId, DDoc),
- {State#evstate{ddocs=DDocs2}, true};
-run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, DDocId | Rest]) ->
+ {State#evstate{ddocs = DDocs2}, true};
+run(#evstate{ddocs = DDocs} = State, [<<"ddoc">>, DDocId | Rest]) ->
DDoc = load_ddoc(DDocs, DDocId),
ddoc(State, DDoc, Rest);
run(_, Unknown) ->
couch_log:error("Native Process: Unknown command: ~p~n", [Unknown]),
throw({error, unknown_command}).
-
+
ddoc(State, {DDoc}, [FunPath, Args]) ->
% load fun from the FunPath
- BFun = lists:foldl(fun
- (Key, {Props}) when is_list(Props) ->
- couch_util:get_value(Key, Props, nil);
- (_Key, Fun) when is_binary(Fun) ->
- Fun;
- (_Key, nil) ->
- throw({error, not_found});
- (_Key, _Fun) ->
- throw({error, malformed_ddoc})
- end, {DDoc}, FunPath),
+ BFun = lists:foldl(
+ fun
+ (Key, {Props}) when is_list(Props) ->
+ couch_util:get_value(Key, Props, nil);
+ (_Key, Fun) when is_binary(Fun) ->
+ Fun;
+ (_Key, nil) ->
+ throw({error, not_found});
+ (_Key, _Fun) ->
+ throw({error, malformed_ddoc})
+ end,
+ {DDoc},
+ FunPath
+ ),
ddoc(State, makefun(State, BFun, {DDoc}), FunPath, Args).
ddoc(State, {_, Fun}, [<<"validate_doc_update">>], Args) ->
{State, (catch apply(Fun, Args))};
ddoc(State, {_, Fun}, [<<"rewrites">>], Args) ->
{State, (catch apply(Fun, Args))};
-ddoc(State, {_, Fun}, [<<"filters">>|_], [Docs, Req]) ->
+ddoc(State, {_, Fun}, [<<"filters">> | _], [Docs, Req]) ->
FilterFunWrapper = fun(Doc) ->
case catch Fun(Doc, Req) of
- true -> true;
- false -> false;
- {'EXIT', Error} -> couch_log:error("~p", [Error])
+ true -> true;
+ false -> false;
+ {'EXIT', Error} -> couch_log:error("~p", [Error])
end
end,
Resp = lists:map(FilterFunWrapper, Docs),
{State, [true, Resp]};
-ddoc(State, {_, Fun}, [<<"views">>|_], [Docs]) ->
+ddoc(State, {_, Fun}, [<<"views">> | _], [Docs]) ->
MapFunWrapper = fun(Doc) ->
case catch Fun(Doc) of
- undefined -> true;
- ok -> false;
- false -> false;
- [_|_] -> true;
- {'EXIT', Error} -> couch_log:error("~p", [Error])
+ undefined -> true;
+ ok -> false;
+ false -> false;
+ [_ | _] -> true;
+ {'EXIT', Error} -> couch_log:error("~p", [Error])
end
end,
Resp = lists:map(MapFunWrapper, Docs),
{State, [true, Resp]};
-ddoc(State, {_, Fun}, [<<"shows">>|_], Args) ->
- Resp = case (catch apply(Fun, Args)) of
- FunResp when is_list(FunResp) ->
- FunResp;
- {FunResp} ->
- [<<"resp">>, {FunResp}];
- FunResp ->
- FunResp
- end,
+ddoc(State, {_, Fun}, [<<"shows">> | _], Args) ->
+ Resp =
+ case (catch apply(Fun, Args)) of
+ FunResp when is_list(FunResp) ->
+ FunResp;
+ {FunResp} ->
+ [<<"resp">>, {FunResp}];
+ FunResp ->
+ FunResp
+ end,
{State, Resp};
-ddoc(State, {_, Fun}, [<<"updates">>|_], Args) ->
- Resp = case (catch apply(Fun, Args)) of
- [JsonDoc, JsonResp] ->
- [<<"up">>, JsonDoc, JsonResp]
- end,
+ddoc(State, {_, Fun}, [<<"updates">> | _], Args) ->
+ Resp =
+ case (catch apply(Fun, Args)) of
+ [JsonDoc, JsonResp] ->
+ [<<"up">>, JsonDoc, JsonResp]
+ end,
{State, Resp};
-ddoc(State, {Sig, Fun}, [<<"lists">>|_], Args) ->
+ddoc(State, {Sig, Fun}, [<<"lists">> | _], Args) ->
Self = self(),
SpawnFun = fun() ->
LastChunk = (catch apply(Fun, Args)),
@@ -285,22 +304,22 @@ ddoc(State, {Sig, Fun}, [<<"lists">>|_], Args) ->
ok
end,
LastChunks =
- case erlang:get(Sig) of
- undefined -> [LastChunk];
- OtherChunks -> [LastChunk | OtherChunks]
- end,
+ case erlang:get(Sig) of
+ undefined -> [LastChunk];
+ OtherChunks -> [LastChunk | OtherChunks]
+ end,
Self ! {self(), list_end, lists:reverse(LastChunks)}
end,
erlang:put(do_trap, process_flag(trap_exit, true)),
Pid = spawn_link(SpawnFun),
Resp =
- receive
- {Pid, start, Chunks, JsonResp} ->
- [<<"start">>, Chunks, JsonResp]
- after State#evstate.timeout ->
- throw({timeout, list_start})
- end,
- {State#evstate{list_pid=Pid}, Resp}.
+ receive
+ {Pid, start, Chunks, JsonResp} ->
+ [<<"start">>, Chunks, JsonResp]
+ after State#evstate.timeout ->
+ throw({timeout, list_start})
+ end,
+ {State#evstate{list_pid = Pid}, Resp}.
store_ddoc(DDocs, DDocId, DDoc) ->
dict:store(DDocId, DDoc, DDocs).
@@ -308,7 +327,11 @@ load_ddoc(DDocs, DDocId) ->
try dict:fetch(DDocId, DDocs) of
{DDoc} -> {DDoc}
catch
- _:_Else -> throw({error, ?l2b(io_lib:format("Native Query Server missing DDoc with Id: ~s",[DDocId]))})
+ _:_Else ->
+ throw(
+ {error,
+ ?l2b(io_lib:format("Native Query Server missing DDoc with Id: ~s", [DDocId]))}
+ )
end.
bindings(State, Sig) ->
@@ -331,10 +354,10 @@ bindings(State, Sig, DDoc) ->
Send = fun(Chunk) ->
Curr =
- case erlang:get(Sig) of
- undefined -> [];
- Else -> Else
- end,
+ case erlang:get(Sig) of
+ undefined -> [];
+ Else -> Else
+ end,
erlang:put(Sig, [Chunk | Curr])
end,
@@ -344,10 +367,10 @@ bindings(State, Sig, DDoc) ->
ok;
_ ->
Chunks =
- case erlang:get(Sig) of
- undefined -> [];
- CurrChunks -> CurrChunks
- end,
+ case erlang:get(Sig) of
+ undefined -> [];
+ CurrChunks -> CurrChunks
+ end,
Self ! {self(), chunks, lists:reverse(Chunks)}
end,
erlang:put(Sig, []),
@@ -358,7 +381,7 @@ bindings(State, Sig, DDoc) ->
throw({timeout, list_pid_getrow})
end
end,
-
+
FoldRows = fun(Fun, Acc) -> foldrows(GetRow, Fun, Acc) end,
Bindings = [
@@ -372,7 +395,8 @@ bindings(State, Sig, DDoc) ->
case DDoc of
{_Props} ->
Bindings ++ [{'DDoc', DDoc}];
- _Else -> Bindings
+ _Else ->
+ Bindings
end.
% thanks to erlview, via:
@@ -388,30 +412,41 @@ makefun(State, Source, {DDoc}) ->
makefun(_State, Source, BindFuns) when is_list(BindFuns) ->
FunStr = binary_to_list(Source),
{ok, Tokens, _} = erl_scan:string(FunStr),
- Form = case (catch erl_parse:parse_exprs(Tokens)) of
- {ok, [ParsedForm]} ->
- ParsedForm;
- {error, {LineNum, _Mod, [Mesg, Params]}}=Error ->
- couch_log:error("Syntax error on line: ~p~n~s~p~n",
- [LineNum, Mesg, Params]),
- throw(Error)
- end,
- Bindings = lists:foldl(fun({Name, Fun}, Acc) ->
- erl_eval:add_binding(Name, Fun, Acc)
- end, erl_eval:new_bindings(), BindFuns),
+ Form =
+ case (catch erl_parse:parse_exprs(Tokens)) of
+ {ok, [ParsedForm]} ->
+ ParsedForm;
+ {error, {LineNum, _Mod, [Mesg, Params]}} = Error ->
+ couch_log:error(
+ "Syntax error on line: ~p~n~s~p~n",
+ [LineNum, Mesg, Params]
+ ),
+ throw(Error)
+ end,
+ Bindings = lists:foldl(
+ fun({Name, Fun}, Acc) ->
+ erl_eval:add_binding(Name, Fun, Acc)
+ end,
+ erl_eval:new_bindings(),
+ BindFuns
+ ),
{value, Fun, _} = erl_eval:expr(Form, Bindings),
Fun.
reduce(State, BinFuns, Keys, Vals, ReReduce) ->
- Funs = case is_list(BinFuns) of
- true ->
- lists:map(fun(BF) -> makefun(State, BF) end, BinFuns);
- _ ->
- [makefun(State, BinFuns)]
- end,
- Reds = lists:map(fun({_Sig, Fun}) ->
- Fun(Keys, Vals, ReReduce)
- end, Funs),
+ Funs =
+ case is_list(BinFuns) of
+ true ->
+ lists:map(fun(BF) -> makefun(State, BF) end, BinFuns);
+ _ ->
+ [makefun(State, BinFuns)]
+ end,
+ Reds = lists:map(
+ fun({_Sig, Fun}) ->
+ Fun(Keys, Vals, ReReduce)
+ end,
+ Funs
+ ),
[true, Reds].
foldrows(GetRow, ProcRow, Acc) ->
@@ -431,15 +466,15 @@ start_list_resp(Self, Sig) ->
case erlang:get(list_started) of
undefined ->
Headers =
- case erlang:get(list_headers) of
- undefined -> {[{<<"headers">>, {[]}}]};
- CurrHdrs -> CurrHdrs
- end,
+ case erlang:get(list_headers) of
+ undefined -> {[{<<"headers">>, {[]}}]};
+ CurrHdrs -> CurrHdrs
+ end,
Chunks =
- case erlang:get(Sig) of
- undefined -> [];
- CurrChunks -> CurrChunks
- end,
+ case erlang:get(Sig) of
+ undefined -> [];
+ CurrChunks -> CurrChunks
+ end,
Self ! {self(), start, lists:reverse(Chunks), Headers},
erlang:put(list_started, true),
erlang:put(Sig, []),
diff --git a/src/couch/src/couch_os_process.erl b/src/couch/src/couch_os_process.erl
index e17782ea3..fcbd5f678 100644
--- a/src/couch/src/couch_os_process.erl
+++ b/src/couch/src/couch_os_process.erl
@@ -23,14 +23,14 @@
-define(PORT_OPTIONS, [stream, {line, 4096}, binary, exit_status, hide]).
--record(os_proc,
- {command,
- port,
- writer,
- reader,
- timeout=5000,
- idle
- }).
+-record(os_proc, {
+ command,
+ port,
+ writer,
+ reader,
+ timeout = 5000,
+ idle
+}).
start_link(Command) ->
start_link(Command, []).
@@ -55,7 +55,7 @@ prompt(Pid, Data) ->
{ok, Result} ->
Result;
Error ->
- couch_log:error("OS Process Error ~p :: ~p",[Pid,Error]),
+ couch_log:error("OS Process Error ~p :: ~p", [Pid, Error]),
throw(Error)
end.
@@ -72,21 +72,21 @@ readline(#os_proc{} = OsProc) ->
Res.
readline(#os_proc{port = Port} = OsProc, Acc) ->
receive
- {Port, {data, {noeol, Data}}} when is_binary(Acc) ->
- readline(OsProc, <<Acc/binary,Data/binary>>);
- {Port, {data, {noeol, Data}}} when is_binary(Data) ->
- readline(OsProc, Data);
- {Port, {data, {noeol, Data}}} ->
- readline(OsProc, [Data|Acc]);
- {Port, {data, {eol, <<Data/binary>>}}} when is_binary(Acc) ->
- [<<Acc/binary,Data/binary>>];
- {Port, {data, {eol, Data}}} when is_binary(Data) ->
- [Data];
- {Port, {data, {eol, Data}}} ->
- lists:reverse(Acc, Data);
- {Port, Err} ->
- catch port_close(Port),
- throw({os_process_error, Err})
+ {Port, {data, {noeol, Data}}} when is_binary(Acc) ->
+ readline(OsProc, <<Acc/binary, Data/binary>>);
+ {Port, {data, {noeol, Data}}} when is_binary(Data) ->
+ readline(OsProc, Data);
+ {Port, {data, {noeol, Data}}} ->
+ readline(OsProc, [Data | Acc]);
+ {Port, {data, {eol, <<Data/binary>>}}} when is_binary(Acc) ->
+ [<<Acc/binary, Data/binary>>];
+ {Port, {data, {eol, Data}}} when is_binary(Data) ->
+ [Data];
+ {Port, {data, {eol, Data}}} ->
+ lists:reverse(Acc, Data);
+ {Port, Err} ->
+ catch port_close(Port),
+ throw({os_process_error, Err})
after OsProc#os_proc.timeout ->
catch port_close(Port),
throw({os_process_error, "OS process timed out."})
@@ -95,8 +95,10 @@ readline(#os_proc{port = Port} = OsProc, Acc) ->
% Standard JSON functions
writejson(OsProc, Data) when is_record(OsProc, os_proc) ->
JsonData = ?JSON_ENCODE(Data),
- couch_log:debug("OS Process ~p Input :: ~s",
- [OsProc#os_proc.port, JsonData]),
+ couch_log:debug(
+ "OS Process ~p Input :: ~s",
+ [OsProc#os_proc.port, JsonData]
+ ),
true = writeline(OsProc, JsonData).
readjson(OsProc) when is_record(OsProc, os_proc) ->
@@ -109,24 +111,28 @@ readjson(OsProc) when is_record(OsProc, os_proc) ->
% command, otherwise return the raw JSON line to the caller.
pick_command(Line)
catch
- throw:abort ->
- {json, Line};
- throw:{cmd, _Cmd} ->
- case ?JSON_DECODE(Line) of
- [<<"log">>, Msg] when is_binary(Msg) ->
- % we got a message to log. Log it and continue
- couch_log:info("OS Process ~p Log :: ~s",
- [OsProc#os_proc.port, Msg]),
- readjson(OsProc);
- [<<"error">>, Id, Reason] ->
- throw({error, {couch_util:to_existing_atom(Id),Reason}});
- [<<"fatal">>, Id, Reason] ->
- couch_log:info("OS Process ~p Fatal Error :: ~s ~p",
- [OsProc#os_proc.port, Id, Reason]),
- throw({couch_util:to_existing_atom(Id),Reason});
- _Result ->
- {json, Line}
- end
+ throw:abort ->
+ {json, Line};
+ throw:{cmd, _Cmd} ->
+ case ?JSON_DECODE(Line) of
+ [<<"log">>, Msg] when is_binary(Msg) ->
+ % we got a message to log. Log it and continue
+ couch_log:info(
+ "OS Process ~p Log :: ~s",
+ [OsProc#os_proc.port, Msg]
+ ),
+ readjson(OsProc);
+ [<<"error">>, Id, Reason] ->
+ throw({error, {couch_util:to_existing_atom(Id), Reason}});
+ [<<"fatal">>, Id, Reason] ->
+ couch_log:info(
+ "OS Process ~p Fatal Error :: ~s ~p",
+ [OsProc#os_proc.port, Id, Reason]
+ ),
+ throw({couch_util:to_existing_atom(Id), Reason});
+ _Result ->
+ {json, Line}
+ end
end.
pick_command(Line) ->
@@ -146,7 +152,6 @@ pick_command1(<<"fatal">> = Cmd) ->
pick_command1(_) ->
throw(abort).
-
% gen_server API
init([Command, Options, PortOptions]) ->
couch_io_logger:start(os:getenv("COUCHDB_IO_LOG_DIR")),
@@ -155,34 +160,38 @@ init([Command, Options, PortOptions]) ->
V = config:get("query_server_config", "os_process_idle_limit", "300"),
IdleLimit = list_to_integer(V) * 1000,
BaseProc = #os_proc{
- command=Command,
- port=open_port({spawn, Spawnkiller ++ " " ++ Command}, PortOptions),
- writer=fun ?MODULE:writejson/2,
- reader=fun ?MODULE:readjson/1,
- idle=IdleLimit
+ command = Command,
+ port = open_port({spawn, Spawnkiller ++ " " ++ Command}, PortOptions),
+ writer = fun ?MODULE:writejson/2,
+ reader = fun ?MODULE:readjson/1,
+ idle = IdleLimit
},
KillCmd = iolist_to_binary(readline(BaseProc)),
Pid = self(),
couch_log:debug("OS Process Start :: ~p", [BaseProc#os_proc.port]),
spawn(fun() ->
- % this ensure the real os process is killed when this process dies.
- erlang:monitor(process, Pid),
- killer(?b2l(KillCmd))
- end),
+ % this ensure the real os process is killed when this process dies.
+ erlang:monitor(process, Pid),
+ killer(?b2l(KillCmd))
+ end),
OsProc =
- lists:foldl(fun(Opt, Proc) ->
- case Opt of
- {writer, Writer} when is_function(Writer) ->
- Proc#os_proc{writer=Writer};
- {reader, Reader} when is_function(Reader) ->
- Proc#os_proc{reader=Reader};
- {timeout, TimeOut} when is_integer(TimeOut) ->
- Proc#os_proc{timeout=TimeOut}
- end
- end, BaseProc, Options),
+ lists:foldl(
+ fun(Opt, Proc) ->
+ case Opt of
+ {writer, Writer} when is_function(Writer) ->
+ Proc#os_proc{writer = Writer};
+ {reader, Reader} when is_function(Reader) ->
+ Proc#os_proc{reader = Reader};
+ {timeout, TimeOut} when is_integer(TimeOut) ->
+ Proc#os_proc{timeout = TimeOut}
+ end
+ end,
+ BaseProc,
+ Options
+ ),
{ok, OsProc, IdleLimit}.
-terminate(Reason, #os_proc{port=Port}) ->
+terminate(Reason, #os_proc{port = Port}) ->
catch port_close(Port),
case Reason of
normal ->
@@ -192,10 +201,10 @@ terminate(Reason, #os_proc{port=Port}) ->
end,
ok.
-handle_call({set_timeout, TimeOut}, _From, #os_proc{idle=Idle}=OsProc) ->
- {reply, ok, OsProc#os_proc{timeout=TimeOut}, Idle};
-handle_call({prompt, Data}, _From, #os_proc{idle=Idle}=OsProc) ->
- #os_proc{writer=Writer, reader=Reader} = OsProc,
+handle_call({set_timeout, TimeOut}, _From, #os_proc{idle = Idle} = OsProc) ->
+ {reply, ok, OsProc#os_proc{timeout = TimeOut}, Idle};
+handle_call({prompt, Data}, _From, #os_proc{idle = Idle} = OsProc) ->
+ #os_proc{writer = Writer, reader = Reader} = OsProc,
try
Writer(OsProc, Data),
{reply, {ok, Reader(OsProc)}, OsProc, Idle}
@@ -210,7 +219,7 @@ handle_call({prompt, Data}, _From, #os_proc{idle=Idle}=OsProc) ->
garbage_collect()
end.
-handle_cast({send, Data}, #os_proc{writer=Writer, idle=Idle}=OsProc) ->
+handle_cast({send, Data}, #os_proc{writer = Writer, idle = Idle} = OsProc) ->
try
Writer(OsProc, Data),
{noreply, OsProc, Idle}
@@ -219,31 +228,31 @@ handle_cast({send, Data}, #os_proc{writer=Writer, idle=Idle}=OsProc) ->
couch_log:error("Failed sending data: ~p -> ~p", [Data, OsError]),
{stop, normal, OsProc}
end;
-handle_cast(garbage_collect, #os_proc{idle=Idle}=OsProc) ->
+handle_cast(garbage_collect, #os_proc{idle = Idle} = OsProc) ->
erlang:garbage_collect(),
{noreply, OsProc, Idle};
handle_cast(stop, OsProc) ->
{stop, normal, OsProc};
-handle_cast(Msg, #os_proc{idle=Idle}=OsProc) ->
+handle_cast(Msg, #os_proc{idle = Idle} = OsProc) ->
couch_log:debug("OS Proc: Unknown cast: ~p", [Msg]),
{noreply, OsProc, Idle}.
-handle_info(timeout, #os_proc{idle=Idle}=OsProc) ->
+handle_info(timeout, #os_proc{idle = Idle} = OsProc) ->
gen_server:cast(couch_proc_manager, {os_proc_idle, self()}),
erlang:garbage_collect(),
{noreply, OsProc, Idle};
-handle_info({Port, {exit_status, 0}}, #os_proc{port=Port}=OsProc) ->
+handle_info({Port, {exit_status, 0}}, #os_proc{port = Port} = OsProc) ->
couch_log:info("OS Process terminated normally", []),
{stop, normal, OsProc};
-handle_info({Port, {exit_status, Status}}, #os_proc{port=Port}=OsProc) ->
+handle_info({Port, {exit_status, Status}}, #os_proc{port = Port} = OsProc) ->
couch_log:error("OS Process died with status: ~p", [Status]),
{stop, {exit_status, Status}, OsProc};
-handle_info(Msg, #os_proc{idle=Idle}=OsProc) ->
+handle_info(Msg, #os_proc{idle = Idle} = OsProc) ->
couch_log:debug("OS Proc: Unknown info: ~p", [Msg]),
{noreply, OsProc, Idle}.
-code_change(_, {os_proc, Cmd, Port, W, R, Timeout} , _) ->
- V = config:get("query_server_config","os_process_idle_limit","300"),
+code_change(_, {os_proc, Cmd, Port, W, R, Timeout}, _) ->
+ V = config:get("query_server_config", "os_process_idle_limit", "300"),
State = #os_proc{
command = Cmd,
port = Port,
@@ -257,9 +266,9 @@ code_change(_OldVsn, State, _Extra) ->
{ok, State}.
killer(KillCmd) ->
- receive _ ->
- os:cmd(KillCmd)
+ receive
+ _ ->
+ os:cmd(KillCmd)
after 1000 ->
?MODULE:killer(KillCmd)
end.
-
diff --git a/src/couch/src/couch_partition.erl b/src/couch/src/couch_partition.erl
index cb78323c3..b5b675c5c 100644
--- a/src/couch/src/couch_partition.erl
+++ b/src/couch/src/couch_partition.erl
@@ -12,7 +12,6 @@
-module(couch_partition).
-
-export([
extract/1,
from_docid/1,
@@ -29,10 +28,8 @@
hash/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-
extract(Value) when is_binary(Value) ->
case binary:split(Value, <<":">>) of
[Partition, Rest] ->
@@ -40,11 +37,9 @@ extract(Value) when is_binary(Value) ->
_ ->
undefined
end;
-
extract(_) ->
undefined.
-
from_docid(DocId) ->
case extract(DocId) of
undefined ->
@@ -53,7 +48,6 @@ from_docid(DocId) ->
Partition
end.
-
is_member(DocId, Partition) ->
case extract(DocId) of
{Partition, _} ->
@@ -62,53 +56,52 @@ is_member(DocId, Partition) ->
false
end.
-
start_key(Partition) ->
<<Partition/binary, ":">>.
-
end_key(Partition) ->
<<Partition/binary, ";">>.
-
shard_key(Partition) ->
<<Partition/binary, ":foo">>.
-
validate_dbname(DbName, Options) when is_list(DbName) ->
validate_dbname(?l2b(DbName), Options);
validate_dbname(DbName, Options) when is_binary(DbName) ->
Props = couch_util:get_value(props, Options, []),
IsPartitioned = couch_util:get_value(partitioned, Props, false),
- if not IsPartitioned -> ok; true ->
-
- DbsDbName = config:get("mem3", "shards_db", "_dbs"),
- NodesDbName = config:get("mem3", "nodes_db", "_nodes"),
- UsersDbSuffix = config:get("couchdb", "users_db_suffix", "_users"),
- Suffix = couch_db:dbname_suffix(DbName),
+ if
+ not IsPartitioned ->
+ ok;
+ true ->
+ DbsDbName = config:get("mem3", "shards_db", "_dbs"),
+ NodesDbName = config:get("mem3", "nodes_db", "_nodes"),
+ UsersDbSuffix = config:get("couchdb", "users_db_suffix", "_users"),
+ Suffix = couch_db:dbname_suffix(DbName),
- SysDbNames = [
+ SysDbNames = [
iolist_to_binary(DbsDbName),
iolist_to_binary(NodesDbName)
| ?SYSTEM_DATABASES
],
- Suffices = [
+ Suffices = [
<<"_replicator">>,
<<"_users">>,
iolist_to_binary(UsersDbSuffix)
],
- IsSysDb = lists:member(DbName, SysDbNames)
- orelse lists:member(Suffix, Suffices),
+ IsSysDb =
+ lists:member(DbName, SysDbNames) orelse
+ lists:member(Suffix, Suffices),
- if not IsSysDb -> ok; true ->
- throw({bad_request, <<"Cannot partition a system database">>})
- end
+ if
+ not IsSysDb -> ok;
+ true -> throw({bad_request, <<"Cannot partition a system database">>})
+ end
end.
-
validate_docid(<<"_design/", _/binary>>) ->
ok;
validate_docid(<<"_local/", _/binary>>) ->
@@ -125,7 +118,6 @@ validate_docid(DocId) when is_binary(DocId) ->
fabric2_db:validate_docid(PartitionedDocId)
end.
-
validate_partition(<<>>) ->
throw({illegal_partition, <<"Partition must not be empty">>});
validate_partition(Partition) when is_binary(Partition) ->
@@ -153,7 +145,6 @@ validate_partition(Partition) when is_binary(Partition) ->
validate_partition(_) ->
throw({illegal_partition, <<"Partition must be a string">>}).
-
% Document ids that start with an underscore
% (i.e., _local and _design) do not contain a
% partition and thus do not use the partition
diff --git a/src/couch/src/couch_passwords.erl b/src/couch/src/couch_passwords.erl
index 55ffb359f..828d2f68b 100644
--- a/src/couch/src/couch_passwords.erl
+++ b/src/couch/src/couch_passwords.erl
@@ -40,98 +40,144 @@ hash_admin_password(ClearPassword) when is_binary(ClearPassword) ->
Scheme = chttpd_util:get_chttpd_auth_config("password_scheme", "pbkdf2"),
hash_admin_password(Scheme, ClearPassword).
-hash_admin_password("simple", ClearPassword) -> % deprecated
+% deprecated
+hash_admin_password("simple", ClearPassword) ->
Salt = couch_uuids:random(),
Hash = crypto:hash(sha, <<ClearPassword/binary, Salt/binary>>),
?l2b("-hashed-" ++ couch_util:to_hex(Hash) ++ "," ++ ?b2l(Salt));
hash_admin_password("pbkdf2", ClearPassword) ->
Iterations = chttpd_util:get_chttpd_auth_config("iterations", "10"),
Salt = couch_uuids:random(),
- DerivedKey = couch_passwords:pbkdf2(couch_util:to_binary(ClearPassword),
- Salt, list_to_integer(Iterations)),
- ?l2b("-pbkdf2-" ++ ?b2l(DerivedKey) ++ ","
- ++ ?b2l(Salt) ++ ","
- ++ Iterations).
+ DerivedKey = couch_passwords:pbkdf2(
+ couch_util:to_binary(ClearPassword),
+ Salt,
+ list_to_integer(Iterations)
+ ),
+ ?l2b(
+ "-pbkdf2-" ++ ?b2l(DerivedKey) ++ "," ++
+ ?b2l(Salt) ++ "," ++
+ Iterations
+ ).
-spec get_unhashed_admins() -> list().
get_unhashed_admins() ->
lists:filter(
- fun({_User, "-hashed-" ++ _}) ->
- false; % already hashed
- ({_User, "-pbkdf2-" ++ _}) ->
- false; % already hashed
- ({_User, _ClearPassword}) ->
- true
+ fun
+ ({_User, "-hashed-" ++ _}) ->
+ % already hashed
+ false;
+ ({_User, "-pbkdf2-" ++ _}) ->
+ % already hashed
+ false;
+ ({_User, _ClearPassword}) ->
+ true
end,
- config:get("admins")).
+ config:get("admins")
+ ).
%% Current scheme, much stronger.
-spec pbkdf2(binary(), binary(), integer()) -> binary().
-pbkdf2(Password, Salt, Iterations) when is_binary(Password),
- is_binary(Salt),
- is_integer(Iterations),
- Iterations > 0 ->
+pbkdf2(Password, Salt, Iterations) when
+ is_binary(Password),
+ is_binary(Salt),
+ is_integer(Iterations),
+ Iterations > 0
+->
{ok, Result} = pbkdf2(Password, Salt, Iterations, ?SHA1_OUTPUT_LENGTH),
Result;
-pbkdf2(Password, Salt, Iterations) when is_binary(Salt),
- is_integer(Iterations),
- Iterations > 0 ->
+pbkdf2(Password, Salt, Iterations) when
+ is_binary(Salt),
+ is_integer(Iterations),
+ Iterations > 0
+->
Msg = io_lib:format("Password value of '~p' is invalid.", [Password]),
throw({forbidden, Msg});
-pbkdf2(Password, Salt, Iterations) when is_binary(Password),
- is_integer(Iterations),
- Iterations > 0 ->
+pbkdf2(Password, Salt, Iterations) when
+ is_binary(Password),
+ is_integer(Iterations),
+ Iterations > 0
+->
Msg = io_lib:format("Salt value of '~p' is invalid.", [Salt]),
throw({forbidden, Msg}).
--spec pbkdf2(binary(), binary(), integer(), integer())
- -> {ok, binary()} | {error, derived_key_too_long}.
-pbkdf2(_Password, _Salt, _Iterations, DerivedLength)
- when DerivedLength > ?MAX_DERIVED_KEY_LENGTH ->
+-spec pbkdf2(binary(), binary(), integer(), integer()) ->
+ {ok, binary()} | {error, derived_key_too_long}.
+pbkdf2(_Password, _Salt, _Iterations, DerivedLength) when
+ DerivedLength > ?MAX_DERIVED_KEY_LENGTH
+->
{error, derived_key_too_long};
-pbkdf2(Password, Salt, Iterations, DerivedLength) when is_binary(Password),
- is_binary(Salt),
- is_integer(Iterations),
- Iterations > 0,
- is_integer(DerivedLength) ->
+pbkdf2(Password, Salt, Iterations, DerivedLength) when
+ is_binary(Password),
+ is_binary(Salt),
+ is_integer(Iterations),
+ Iterations > 0,
+ is_integer(DerivedLength)
+->
L = ceiling(DerivedLength / ?SHA1_OUTPUT_LENGTH),
- <<Bin:DerivedLength/binary,_/binary>> =
+ <<Bin:DerivedLength/binary, _/binary>> =
iolist_to_binary(pbkdf2(Password, Salt, Iterations, L, 1, [])),
{ok, ?l2b(couch_util:to_hex(Bin))}.
--spec pbkdf2(binary(), binary(), integer(), integer(), integer(), iolist())
- -> iolist().
-pbkdf2(_Password, _Salt, _Iterations, BlockCount, BlockIndex, Acc)
- when BlockIndex > BlockCount ->
+-spec pbkdf2(binary(), binary(), integer(), integer(), integer(), iolist()) ->
+ iolist().
+pbkdf2(_Password, _Salt, _Iterations, BlockCount, BlockIndex, Acc) when
+ BlockIndex > BlockCount
+->
lists:reverse(Acc);
pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex, Acc) ->
Block = pbkdf2(Password, Salt, Iterations, BlockIndex, 1, <<>>, <<>>),
- pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex + 1, [Block|Acc]).
+ pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex + 1, [Block | Acc]).
--spec pbkdf2(binary(), binary(), integer(), integer(), integer(),
- binary(), binary()) -> binary().
-pbkdf2(_Password, _Salt, Iterations, _BlockIndex, Iteration, _Prev, Acc)
- when Iteration > Iterations ->
+-spec pbkdf2(
+ binary(),
+ binary(),
+ integer(),
+ integer(),
+ integer(),
+ binary(),
+ binary()
+) -> binary().
+pbkdf2(_Password, _Salt, Iterations, _BlockIndex, Iteration, _Prev, Acc) when
+ Iteration > Iterations
+->
Acc;
pbkdf2(Password, Salt, Iterations, BlockIndex, 1, _Prev, _Acc) ->
- InitialBlock = couch_util:hmac(sha, Password,
- <<Salt/binary,BlockIndex:32/integer>>),
- pbkdf2(Password, Salt, Iterations, BlockIndex, 2,
- InitialBlock, InitialBlock);
+ InitialBlock = couch_util:hmac(
+ sha,
+ Password,
+ <<Salt/binary, BlockIndex:32/integer>>
+ ),
+ pbkdf2(
+ Password,
+ Salt,
+ Iterations,
+ BlockIndex,
+ 2,
+ InitialBlock,
+ InitialBlock
+ );
pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration, Prev, Acc) ->
Next = couch_util:hmac(sha, Password, Prev),
- pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration + 1,
- Next, crypto:exor(Next, Acc)).
+ pbkdf2(
+ Password,
+ Salt,
+ Iterations,
+ BlockIndex,
+ Iteration + 1,
+ Next,
+ crypto:exor(Next, Acc)
+ ).
%% verify two lists for equality without short-circuits to avoid timing attacks.
-spec verify(string(), string(), integer()) -> boolean().
-verify([X|RestX], [Y|RestY], Result) ->
+verify([X | RestX], [Y | RestY], Result) ->
verify(RestX, RestY, (X bxor Y) bor Result);
verify([], [], Result) ->
Result == 0.
--spec verify(binary(), binary()) -> boolean();
- (list(), list()) -> boolean().
+-spec verify
+ (binary(), binary()) -> boolean();
+ (list(), list()) -> boolean().
verify(<<X/binary>>, <<Y/binary>>) ->
verify(?b2l(X), ?b2l(Y));
verify(X, Y) when is_list(X) and is_list(Y) ->
@@ -141,7 +187,8 @@ verify(X, Y) when is_list(X) and is_list(Y) ->
false ->
false
end;
-verify(_X, _Y) -> false.
+verify(_X, _Y) ->
+ false.
-spec ceiling(number()) -> integer().
ceiling(X) ->
diff --git a/src/couch/src/couch_primary_sup.erl b/src/couch/src/couch_primary_sup.erl
index b6d370fbb..a5dd855ef 100644
--- a/src/couch/src/couch_primary_sup.erl
+++ b/src/couch/src/couch_primary_sup.erl
@@ -15,22 +15,15 @@
-export([init/1, start_link/0]).
start_link() ->
- supervisor:start_link({local,couch_primary_services}, ?MODULE, []).
+ supervisor:start_link({local, couch_primary_services}, ?MODULE, []).
init([]) ->
Children = [
- {collation_driver,
- {couch_drv, start_link, []},
- permanent,
- infinity,
- supervisor,
- [couch_drv]},
- {couch_server,
- {couch_server, sup_start_link, []},
- permanent,
- brutal_kill,
- worker,
- [couch_server]}
+ {collation_driver, {couch_drv, start_link, []}, permanent, infinity, supervisor, [
+ couch_drv
+ ]},
+ {couch_server, {couch_server, sup_start_link, []}, permanent, brutal_kill, worker, [
+ couch_server
+ ]}
],
{ok, {{one_for_one, 10, 3600}, Children}}.
-
diff --git a/src/couch/src/couch_proc_manager.erl b/src/couch/src/couch_proc_manager.erl
index 2dac0ce42..bef495be5 100644
--- a/src/couch/src/couch_proc_manager.erl
+++ b/src/couch/src/couch_proc_manager.erl
@@ -62,7 +62,7 @@
-record(client, {
timestamp :: os:timestamp() | '_',
- from :: undefined | {pid(), reference()} | '_',
+ from :: undefined | {pid(), reference()} | '_',
lang :: binary() | '_',
ddoc :: #doc{} | '_',
ddoc_key :: undefined | {DDocId :: docid(), Rev :: revision()} | '_'
@@ -79,27 +79,21 @@
t0 = os:timestamp()
}).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
get_proc_count() ->
gen_server:call(?MODULE, get_proc_count).
-
get_stale_proc_count() ->
gen_server:call(?MODULE, get_stale_proc_count).
-
reload() ->
gen_server:call(?MODULE, set_threshold_ts).
-
terminate_stale_procs() ->
gen_server:call(?MODULE, terminate_stale_procs).
-
init([]) ->
process_flag(trap_exit, true),
ok = config:listen_for_changes(?MODULE, undefined),
@@ -121,50 +115,48 @@ init([]) ->
soft_limit = get_soft_limit()
}}.
-
terminate(_Reason, _State) ->
- ets:foldl(fun(#proc_int{pid=P}, _) ->
- couch_util:shutdown_sync(P)
- end, 0, ?PROCS),
+ ets:foldl(
+ fun(#proc_int{pid = P}, _) ->
+ couch_util:shutdown_sync(P)
+ end,
+ 0,
+ ?PROCS
+ ),
ok.
-
handle_call(get_proc_count, _From, State) ->
NumProcs = ets:info(?PROCS, size),
NumOpening = ets:info(?OPENING, size),
{reply, NumProcs + NumOpening, State};
-
handle_call(get_stale_proc_count, _From, State) ->
#state{threshold_ts = T0} = State,
- MatchSpec = [{#proc_int{t0='$1', _='_'}, [{'<', '$1', {T0}}], [true]}],
+ MatchSpec = [{#proc_int{t0 = '$1', _ = '_'}, [{'<', '$1', {T0}}], [true]}],
{reply, ets:select_count(?PROCS, MatchSpec), State};
-
-handle_call({get_proc, #doc{body={Props}}=DDoc, DDocKey}, From, State) ->
+handle_call({get_proc, #doc{body = {Props}} = DDoc, DDocKey}, From, State) ->
LangStr = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
Lang = couch_util:to_binary(LangStr),
- Client = #client{from=From, lang=Lang, ddoc=DDoc, ddoc_key=DDocKey},
+ Client = #client{from = From, lang = Lang, ddoc = DDoc, ddoc_key = DDocKey},
add_waiting_client(Client),
{noreply, flush_waiters(State, Lang)};
-
handle_call({get_proc, LangStr}, From, State) ->
Lang = couch_util:to_binary(LangStr),
- Client = #client{from=From, lang=Lang},
+ Client = #client{from = From, lang = Lang},
add_waiting_client(Client),
{noreply, flush_waiters(State, Lang)};
-
-handle_call({ret_proc, #proc{client=Ref} = Proc}, _From, State) ->
+handle_call({ret_proc, #proc{client = Ref} = Proc}, _From, State) ->
erlang:demonitor(Ref, [flush]),
- NewState = case ets:lookup(?PROCS, Proc#proc.pid) of
- [#proc_int{}=ProcInt] ->
- return_proc(State, ProcInt);
- [] ->
- % Proc must've died and we already
- % cleared it out of the table in
- % the handle_info clause.
- State
- end,
+ NewState =
+ case ets:lookup(?PROCS, Proc#proc.pid) of
+ [#proc_int{} = ProcInt] ->
+ return_proc(State, ProcInt);
+ [] ->
+ % Proc must've died and we already
+ % cleared it out of the table in
+ % the handle_info clause.
+ State
+ end,
{reply, true, NewState};
-
handle_call(set_threshold_ts, _From, State) ->
FoldFun = fun
(#proc_int{client = undefined} = Proc, StateAcc) ->
@@ -174,7 +166,6 @@ handle_call(set_threshold_ts, _From, State) ->
end,
NewState = ets:foldl(FoldFun, State, ?PROCS),
{reply, ok, NewState#state{threshold_ts = os:timestamp()}};
-
handle_call(terminate_stale_procs, _From, #state{threshold_ts = Ts1} = State) ->
FoldFun = fun
(#proc_int{client = undefined, t0 = Ts2} = Proc, StateAcc) ->
@@ -189,26 +180,24 @@ handle_call(terminate_stale_procs, _From, #state{threshold_ts = Ts1} = State) ->
end,
NewState = ets:foldl(FoldFun, State, ?PROCS),
{reply, ok, NewState};
-
handle_call(_Call, _From, State) ->
{reply, ignored, State}.
-
-handle_cast({os_proc_idle, Pid}, #state{counts=Counts}=State) ->
- NewState = case ets:lookup(?PROCS, Pid) of
- [#proc_int{client=undefined, lang=Lang}=Proc] ->
- case dict:find(Lang, Counts) of
- {ok, Count} when Count >= State#state.soft_limit ->
- couch_log:info("Closing idle OS Process: ~p", [Pid]),
- remove_proc(State, Proc);
- {ok, _} ->
- State
- end;
- _ ->
- State
- end,
+handle_cast({os_proc_idle, Pid}, #state{counts = Counts} = State) ->
+ NewState =
+ case ets:lookup(?PROCS, Pid) of
+ [#proc_int{client = undefined, lang = Lang} = Proc] ->
+ case dict:find(Lang, Counts) of
+ {ok, Count} when Count >= State#state.soft_limit ->
+ couch_log:info("Closing idle OS Process: ~p", [Pid]),
+ remove_proc(State, Proc);
+ {ok, _} ->
+ State
+ end;
+ _ ->
+ State
+ end,
{noreply, NewState};
-
handle_cast(reload_config, State) ->
NewState = State#state{
config = get_proc_config(),
@@ -217,29 +206,24 @@ handle_cast(reload_config, State) ->
},
maybe_configure_erlang_native_servers(),
{noreply, flush_waiters(NewState)};
-
handle_cast(_Msg, State) ->
{noreply, State}.
-
handle_info(shutdown, State) ->
{stop, shutdown, State};
-
-handle_info({'EXIT', Pid, {spawn_ok, Proc0, {ClientPid,_} = From}}, State) ->
+handle_info({'EXIT', Pid, {spawn_ok, Proc0, {ClientPid, _} = From}}, State) ->
ets:delete(?OPENING, Pid),
link(Proc0#proc_int.pid),
Proc = assign_proc(ClientPid, Proc0),
gen_server:reply(From, {ok, Proc, State#state.config}),
{noreply, State};
-
handle_info({'EXIT', Pid, spawn_error}, State) ->
- [{Pid, #client{lang=Lang}}] = ets:lookup(?OPENING, Pid),
+ [{Pid, #client{lang = Lang}}] = ets:lookup(?OPENING, Pid),
ets:delete(?OPENING, Pid),
NewState = State#state{
counts = dict:update_counter(Lang, -1, State#state.counts)
},
{noreply, flush_waiters(NewState, Lang)};
-
handle_info({'EXIT', Pid, Reason}, State) ->
couch_log:info("~p ~p died ~p", [?MODULE, Pid, Reason]),
case ets:lookup(?PROCS, Pid) of
@@ -249,39 +233,30 @@ handle_info({'EXIT', Pid, Reason}, State) ->
[] ->
{noreply, State}
end;
-
handle_info({'DOWN', Ref, _, _, _Reason}, State0) ->
- case ets:match_object(?PROCS, #proc_int{client=Ref, _='_'}) of
+ case ets:match_object(?PROCS, #proc_int{client = Ref, _ = '_'}) of
[#proc_int{} = Proc] ->
{noreply, return_proc(State0, Proc)};
[] ->
{noreply, State0}
end;
-
-
handle_info(restart_config_listener, State) ->
ok = config:listen_for_changes(?MODULE, nil),
{noreply, State};
-
handle_info(_Msg, State) ->
{noreply, State}.
-
-code_change(_OldVsn, #state{}=State, _Extra) ->
+code_change(_OldVsn, #state{} = State, _Extra) ->
{ok, State}.
-
format_status(_Opt, [_PDict, State]) ->
#state{
- counts=Counts
+ counts = Counts
} = State,
Scrubbed = State#state{
- counts={dict_size, dict:size(Counts)}
+ counts = {dict_size, dict:size(Counts)}
},
- [{data, [{"State",
- ?record_to_keyval(state, Scrubbed)
- }]}].
-
+ [{data, [{"State", ?record_to_keyval(state, Scrubbed)}]}].
handle_config_terminate(_, stop, _) ->
ok;
@@ -298,7 +273,6 @@ handle_config_change("query_server_config", _, _, _, _) ->
handle_config_change(_, _, _, _, _) ->
{ok, undefined}.
-
find_proc(#client{lang = Lang, ddoc_key = undefined}) ->
Pred = fun(_) ->
true
@@ -310,7 +284,7 @@ find_proc(#client{lang = Lang, ddoc = DDoc, ddoc_key = DDocKey} = Client) ->
end,
case find_proc(Lang, Pred) of
not_found ->
- case find_proc(Client#client{ddoc_key=undefined}) of
+ case find_proc(Client#client{ddoc_key = undefined}) of
{ok, Proc} ->
teach_ddoc(DDoc, DDocKey, Proc);
Else ->
@@ -321,15 +295,16 @@ find_proc(#client{lang = Lang, ddoc = DDoc, ddoc_key = DDocKey} = Client) ->
end.
find_proc(Lang, Fun) ->
- try iter_procs(Lang, Fun)
- catch error:Reason:StackTrace ->
- couch_log:error("~p ~p ~p", [?MODULE, Reason, StackTrace]),
- {error, Reason}
+ try
+ iter_procs(Lang, Fun)
+ catch
+ error:Reason:StackTrace ->
+ couch_log:error("~p ~p ~p", [?MODULE, Reason, StackTrace]),
+ {error, Reason}
end.
-
iter_procs(Lang, Fun) when is_binary(Lang) ->
- Pattern = #proc_int{lang=Lang, client=undefined, _='_'},
+ Pattern = #proc_int{lang = Lang, client = undefined, _ = '_'},
MSpec = [{Pattern, [], ['$_']}],
case ets:select_reverse(?PROCS, MSpec, 25) of
'$end_of_table' ->
@@ -338,7 +313,6 @@ iter_procs(Lang, Fun) when is_binary(Lang) ->
iter_procs_int(Continuation, Fun)
end.
-
iter_procs_int({[], Continuation0}, Fun) ->
case ets:select_reverse(Continuation0) of
'$end_of_table' ->
@@ -354,7 +328,6 @@ iter_procs_int({[Proc | Rest], Continuation}, Fun) ->
iter_procs_int({Rest, Continuation}, Fun)
end.
-
spawn_proc(State, Client) ->
Pid = spawn_link(?MODULE, new_proc, [Client]),
ets:insert(?OPENING, {Pid, Client}),
@@ -364,36 +337,38 @@ spawn_proc(State, Client) ->
counts = dict:update_counter(Lang, 1, Counts)
}.
-
-new_proc(#client{ddoc=undefined, ddoc_key=undefined}=Client) ->
- #client{from=From, lang=Lang} = Client,
- Resp = try
- case new_proc_int(From, Lang) of
- {ok, Proc} ->
- {spawn_ok, Proc, From};
- Error ->
- gen_server:reply(From, {error, Error}),
+new_proc(#client{ddoc = undefined, ddoc_key = undefined} = Client) ->
+ #client{from = From, lang = Lang} = Client,
+ Resp =
+ try
+ case new_proc_int(From, Lang) of
+ {ok, Proc} ->
+ {spawn_ok, Proc, From};
+ Error ->
+ gen_server:reply(From, {error, Error}),
+ spawn_error
+ end
+ catch
+ _:_ ->
spawn_error
- end
- catch _:_ ->
- spawn_error
- end,
+ end,
exit(Resp);
-
new_proc(Client) ->
- #client{from=From, lang=Lang, ddoc=DDoc, ddoc_key=DDocKey} = Client,
- Resp = try
- case new_proc_int(From, Lang) of
- {ok, NewProc} ->
- {ok, Proc} = teach_ddoc(DDoc, DDocKey, NewProc),
- {spawn_ok, Proc, From};
- Error ->
- gen_server:reply(From, {error, Error}),
- spawn_error
- end
- catch _:_ ->
- spawn_error
- end,
+ #client{from = From, lang = Lang, ddoc = DDoc, ddoc_key = DDocKey} = Client,
+ Resp =
+ try
+ case new_proc_int(From, Lang) of
+ {ok, NewProc} ->
+ {ok, Proc} = teach_ddoc(DDoc, DDocKey, NewProc),
+ {spawn_ok, Proc, From};
+ Error ->
+ gen_server:reply(From, {error, Error}),
+ spawn_error
+ end
+ catch
+ _:_ ->
+ spawn_error
+ end,
exit(Resp).
split_string_if_longer(String, Pos) ->
@@ -413,14 +388,17 @@ split_by_char(String, Char) ->
get_servers_from_env(Spec) ->
SpecLen = length(Spec),
% loop over os:getenv(), match SPEC_
- lists:filtermap(fun(EnvStr) ->
- case split_string_if_longer(EnvStr, SpecLen) of
- {Spec, Rest} ->
- {true, split_by_char(Rest, $=)};
- _ ->
- false
- end
- end, os:getenv()).
+ lists:filtermap(
+ fun(EnvStr) ->
+ case split_string_if_longer(EnvStr, SpecLen) of
+ {Spec, Rest} ->
+ {true, split_by_char(Rest, $=)};
+ _ ->
+ false
+ end
+ end,
+ os:getenv()
+ ).
get_query_server(LangStr) ->
case ets:lookup(?SERVERS, string:to_upper(LangStr)) of
@@ -439,39 +417,39 @@ native_query_server_enabled() ->
maybe_configure_erlang_native_servers() ->
case native_query_server_enabled() of
true ->
- ets:insert(?SERVERS, [
- {"ERLANG", {couch_native_process, start_link, []}}]);
+ ets:insert(?SERVERS, [
+ {"ERLANG", {couch_native_process, start_link, []}}
+ ]);
_Else ->
- ok
+ ok
end.
new_proc_int(From, Lang) when is_binary(Lang) ->
LangStr = binary_to_list(Lang),
case get_query_server(LangStr) of
- undefined ->
- gen_server:reply(From, {unknown_query_language, Lang});
- {M, F, A} ->
- {ok, Pid} = apply(M, F, A),
- make_proc(Pid, Lang, M);
- Command ->
- {ok, Pid} = couch_os_process:start_link(Command),
- make_proc(Pid, Lang, couch_os_process)
+ undefined ->
+ gen_server:reply(From, {unknown_query_language, Lang});
+ {M, F, A} ->
+ {ok, Pid} = apply(M, F, A),
+ make_proc(Pid, Lang, M);
+ Command ->
+ {ok, Pid} = couch_os_process:start_link(Command),
+ make_proc(Pid, Lang, couch_os_process)
end.
-
-teach_ddoc(DDoc, {DDocId, _Rev}=DDocKey, #proc_int{ddoc_keys=Keys}=Proc) ->
+teach_ddoc(DDoc, {DDocId, _Rev} = DDocKey, #proc_int{ddoc_keys = Keys} = Proc) ->
% send ddoc over the wire
% we only share the rev with the client we know to update code
% but it only keeps the latest copy, per each ddoc, around.
true = couch_query_servers:proc_prompt(
export_proc(Proc),
- [<<"ddoc">>, <<"new">>, DDocId, couch_doc:to_json_obj(DDoc, [])]),
+ [<<"ddoc">>, <<"new">>, DDocId, couch_doc:to_json_obj(DDoc, [])]
+ ),
% we should remove any other ddocs keys for this docid
% because the query server overwrites without the rev
- Keys2 = [{D,R} || {D,R} <- Keys, D /= DDocId],
+ Keys2 = [{D, R} || {D, R} <- Keys, D /= DDocId],
% add ddoc to the proc
- {ok, Proc#proc_int{ddoc_keys=[DDocKey|Keys2]}}.
-
+ {ok, Proc#proc_int{ddoc_keys = [DDocKey | Keys2]}}.
make_proc(Pid, Lang, Mod) when is_binary(Lang) ->
Proc = #proc_int{
@@ -484,42 +462,42 @@ make_proc(Pid, Lang, Mod) when is_binary(Lang) ->
unlink(Pid),
{ok, Proc}.
-
-assign_proc(Pid, #proc_int{client=undefined}=Proc0) when is_pid(Pid) ->
+assign_proc(Pid, #proc_int{client = undefined} = Proc0) when is_pid(Pid) ->
Proc = Proc0#proc_int{client = erlang:monitor(process, Pid)},
ets:insert(?PROCS, Proc),
export_proc(Proc);
-assign_proc(#client{}=Client, #proc_int{client=undefined}=Proc) ->
+assign_proc(#client{} = Client, #proc_int{client = undefined} = Proc) ->
{Pid, _} = Client#client.from,
assign_proc(Pid, Proc).
-
return_proc(#state{} = State, #proc_int{} = ProcInt) ->
#proc_int{pid = Pid, lang = Lang} = ProcInt,
- NewState = case is_process_alive(Pid) of true ->
- case ProcInt#proc_int.t0 < State#state.threshold_ts of
+ NewState =
+ case is_process_alive(Pid) of
true ->
- remove_proc(State, ProcInt);
+ case ProcInt#proc_int.t0 < State#state.threshold_ts of
+ true ->
+ remove_proc(State, ProcInt);
+ false ->
+ gen_server:cast(Pid, garbage_collect),
+ true = ets:update_element(?PROCS, Pid, [
+ {#proc_int.client, undefined}
+ ]),
+ State
+ end;
false ->
- gen_server:cast(Pid, garbage_collect),
- true = ets:update_element(?PROCS, Pid, [
- {#proc_int.client, undefined}
- ]),
- State
- end;
- false ->
- remove_proc(State, ProcInt)
- end,
+ remove_proc(State, ProcInt)
+ end,
flush_waiters(NewState, Lang).
-
-remove_proc(State, #proc_int{}=Proc) ->
+remove_proc(State, #proc_int{} = Proc) ->
ets:delete(?PROCS, Proc#proc_int.pid),
- case is_process_alive(Proc#proc_int.pid) of true ->
- unlink(Proc#proc_int.pid),
- gen_server:cast(Proc#proc_int.pid, stop);
- false ->
- ok
+ case is_process_alive(Proc#proc_int.pid) of
+ true ->
+ unlink(Proc#proc_int.pid),
+ gen_server:cast(Proc#proc_int.pid, stop);
+ false ->
+ ok
end,
Counts = State#state.counts,
Lang = Proc#proc_int.lang,
@@ -527,7 +505,6 @@ remove_proc(State, #proc_int{}=Proc) ->
counts = dict:update_counter(Lang, -1, Counts)
}.
-
-spec export_proc(#proc_int{}) -> #proc{}.
export_proc(#proc_int{} = ProcInt) ->
ProcIntList = tuple_to_list(ProcInt),
@@ -535,17 +512,19 @@ export_proc(#proc_int{} = ProcInt) ->
[_ | Data] = lists:sublist(ProcIntList, ProcLen),
list_to_tuple([proc | Data]).
-
flush_waiters(State) ->
- dict:fold(fun(Lang, Count, StateAcc) ->
- case Count < State#state.hard_limit of
- true ->
- flush_waiters(StateAcc, Lang);
- false ->
- StateAcc
- end
- end, State, State#state.counts).
-
+ dict:fold(
+ fun(Lang, Count, StateAcc) ->
+ case Count < State#state.hard_limit of
+ true ->
+ flush_waiters(StateAcc, Lang);
+ false ->
+ StateAcc
+ end
+ end,
+ State,
+ State#state.counts
+ ).
flush_waiters(State, Lang) ->
CanSpawn = can_spawn(State, Lang),
@@ -572,31 +551,27 @@ flush_waiters(State, Lang) ->
State
end.
-
add_waiting_client(Client) ->
- ets:insert(?WAITERS, Client#client{timestamp=os:timestamp()}).
+ ets:insert(?WAITERS, Client#client{timestamp = os:timestamp()}).
-spec get_waiting_client(Lang :: binary()) -> undefined | #client{}.
get_waiting_client(Lang) ->
- case ets:match_object(?WAITERS, #client{lang=Lang, _='_'}, 1) of
+ case ets:match_object(?WAITERS, #client{lang = Lang, _ = '_'}, 1) of
'$end_of_table' ->
undefined;
- {[#client{}=Client], _} ->
+ {[#client{} = Client], _} ->
Client
end.
-
remove_waiting_client(#client{timestamp = Timestamp}) ->
ets:delete(?WAITERS, Timestamp).
-
can_spawn(#state{hard_limit = HardLimit, counts = Counts}, Lang) ->
case dict:find(Lang, Counts) of
{ok, Count} -> Count < HardLimit;
error -> true
end.
-
get_proc_config() ->
Limit = config:get_boolean("query_server_config", "reduce_limit", true),
Timeout = config:get_integer("couchdb", "os_process_timeout", 5000),
@@ -605,10 +580,8 @@ get_proc_config() ->
{<<"timeout">>, Timeout}
]}.
-
get_hard_limit() ->
config:get_integer("query_server_config", "os_process_limit", 100).
-
get_soft_limit() ->
config:get_integer("query_server_config", "os_process_soft_limit", 100).
diff --git a/src/couch/src/couch_query_servers.erl b/src/couch/src/couch_query_servers.erl
index a011080c2..cae679178 100644
--- a/src/couch/src/couch_query_servers.erl
+++ b/src/couch/src/couch_query_servers.erl
@@ -14,7 +14,7 @@
-export([try_compile/4]).
-export([start_doc_map/3, map_doc_raw/2, stop_doc_map/1, raw_to_ejson/1]).
--export([reduce/3, rereduce/3,validate_doc_update/5]).
+-export([reduce/3, rereduce/3, validate_doc_update/5]).
-export([filter_docs/5]).
-export([filter_view/3]).
-export([finalize/2]).
@@ -26,14 +26,17 @@
-include_lib("couch/include/couch_db.hrl").
--define(SUMERROR, <<"The _sum function requires that map values be numbers, "
+-define(SUMERROR, <<
+ "The _sum function requires that map values be numbers, "
"arrays of numbers, or objects. Objects cannot be mixed with other "
"data structures. Objects can be arbitrarily nested, provided that the values "
- "for all fields are themselves numbers, arrays of numbers, or objects.">>).
-
--define(STATERROR, <<"The _stats function requires that map values be numbers "
- "or arrays of numbers, not '~p'">>).
+ "for all fields are themselves numbers, arrays of numbers, or objects."
+>>).
+-define(STATERROR, <<
+ "The _stats function requires that map values be numbers "
+ "or arrays of numbers, not '~p'"
+>>).
try_compile(Proc, FunctionType, FunctionName, FunctionSource) ->
try
@@ -53,20 +56,21 @@ try_compile(Proc, FunctionType, FunctionName, FunctionSource) ->
start_doc_map(Lang, Functions, Lib) ->
Proc = get_os_process(Lang),
case Lib of
- {[]} -> ok;
- Lib ->
- true = proc_prompt(Proc, [<<"add_lib">>, Lib])
+ {[]} -> ok;
+ Lib -> true = proc_prompt(Proc, [<<"add_lib">>, Lib])
end,
- lists:foreach(fun(FunctionSource) ->
- true = proc_prompt(Proc, [<<"add_fun">>, FunctionSource])
- end, Functions),
+ lists:foreach(
+ fun(FunctionSource) ->
+ true = proc_prompt(Proc, [<<"add_fun">>, FunctionSource])
+ end,
+ Functions
+ ),
{ok, Proc}.
map_doc_raw(Proc, Doc) ->
Json = couch_doc:to_json_obj(Doc, []),
{ok, proc_prompt_raw(Proc, [<<"map_doc">>, Json])}.
-
stop_doc_map(nil) ->
ok;
stop_doc_map(Proc) ->
@@ -76,20 +80,24 @@ group_reductions_results([]) ->
[];
group_reductions_results(List) ->
{Heads, Tails} = lists:foldl(
- fun([H|T], {HAcc,TAcc}) ->
- {[H|HAcc], [T|TAcc]}
- end, {[], []}, List),
+ fun([H | T], {HAcc, TAcc}) ->
+ {[H | HAcc], [T | TAcc]}
+ end,
+ {[], []},
+ List
+ ),
case Tails of
- [[]|_] -> % no tails left
- [Heads];
- _ ->
- [Heads | group_reductions_results(Tails)]
+ % no tails left
+ [[] | _] ->
+ [Heads];
+ _ ->
+ [Heads | group_reductions_results(Tails)]
end.
-finalize(<<"_approx_count_distinct",_/binary>>, Reduction) ->
+finalize(<<"_approx_count_distinct", _/binary>>, Reduction) ->
true = hyper:is_hyper(Reduction),
{ok, round(hyper:card(Reduction))};
-finalize(<<"_stats",_/binary>>, Unpacked) ->
+finalize(<<"_stats", _/binary>>, Unpacked) ->
{ok, pack_stats(Unpacked)};
finalize(_RedSrc, Reduction) ->
{ok, Reduction}.
@@ -100,12 +108,15 @@ rereduce(Lang, RedSrcs, ReducedValues) ->
Grouped = group_reductions_results(ReducedValues),
Results = lists:zipwith(
fun
- (<<"_", _/binary>> = FunSrc, Values) ->
- {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []),
- Result;
- (FunSrc, Values) ->
- os_rereduce(Lang, [FunSrc], Values)
- end, RedSrcs, Grouped),
+ (<<"_", _/binary>> = FunSrc, Values) ->
+ {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []),
+ Result;
+ (FunSrc, Values) ->
+ os_rereduce(Lang, [FunSrc], Values)
+ end,
+ RedSrcs,
+ Grouped
+ ),
{ok, Results}.
reduce(_Lang, [], _KVs) ->
@@ -113,34 +124,37 @@ reduce(_Lang, [], _KVs) ->
reduce(_Lang, [<<"_", _/binary>>] = RedSrcs, KVs) ->
builtin_reduce(reduce, RedSrcs, KVs, []);
reduce(Lang, RedSrcs, KVs) ->
- {OsRedSrcs, BuiltinReds} = lists:partition(fun
- (<<"_", _/binary>>) -> false;
- (_OsFun) -> true
- end, RedSrcs),
+ {OsRedSrcs, BuiltinReds} = lists:partition(
+ fun
+ (<<"_", _/binary>>) -> false;
+ (_OsFun) -> true
+ end,
+ RedSrcs
+ ),
{ok, OsResults} = os_reduce(Lang, OsRedSrcs, KVs),
{ok, BuiltinResults} = builtin_reduce(reduce, BuiltinReds, KVs, []),
recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, []).
-
recombine_reduce_results([], [], [], Acc) ->
{ok, lists:reverse(Acc)};
-recombine_reduce_results([<<"_", _/binary>>|RedSrcs], OsResults, [BRes|BuiltinResults], Acc) ->
- recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [BRes|Acc]);
-recombine_reduce_results([_OsFun|RedSrcs], [OsR|OsResults], BuiltinResults, Acc) ->
- recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [OsR|Acc]).
+recombine_reduce_results([<<"_", _/binary>> | RedSrcs], OsResults, [BRes | BuiltinResults], Acc) ->
+ recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [BRes | Acc]);
+recombine_reduce_results([_OsFun | RedSrcs], [OsR | OsResults], BuiltinResults, Acc) ->
+ recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [OsR | Acc]).
os_reduce(_Lang, [], _KVs) ->
{ok, []};
os_reduce(Lang, OsRedSrcs, KVs) ->
Proc = get_os_process(Lang),
- OsResults = try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of
- [true, Reductions] -> Reductions
- catch
- throw:{reduce_overflow_error, Msg} ->
- [{[{reduce_overflow_error, Msg}]} || _ <- OsRedSrcs]
- after
- ok = ret_os_process(Proc)
- end,
+ OsResults =
+ try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of
+ [true, Reductions] -> Reductions
+ catch
+ throw:{reduce_overflow_error, Msg} ->
+ [{[{reduce_overflow_error, Msg}]} || _ <- OsRedSrcs]
+ after
+ ok = ret_os_process(Proc)
+ end,
{ok, OsResults}.
os_rereduce(Lang, OsRedSrcs, KVs) ->
@@ -159,7 +173,6 @@ os_rereduce(Lang, OsRedSrcs, KVs) ->
Error
end.
-
get_overflow_error([]) ->
undefined;
get_overflow_error([{[{reduce_overflow_error, _}]} = Error | _]) ->
@@ -167,29 +180,28 @@ get_overflow_error([{[{reduce_overflow_error, _}]} = Error | _]) ->
get_overflow_error([_ | Rest]) ->
get_overflow_error(Rest).
-
builtin_reduce(_Re, [], _KVs, Acc) ->
{ok, lists:reverse(Acc)};
-builtin_reduce(Re, [<<"_sum",_/binary>>|BuiltinReds], KVs, Acc) ->
+builtin_reduce(Re, [<<"_sum", _/binary>> | BuiltinReds], KVs, Acc) ->
Sum = builtin_sum_rows(KVs, 0),
- Red = case is_number(Sum) of
- true -> Sum;
- false -> check_sum_overflow(?term_size(KVs), ?term_size(Sum), Sum)
- end,
- builtin_reduce(Re, BuiltinReds, KVs, [Red|Acc]);
-builtin_reduce(reduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
+ Red =
+ case is_number(Sum) of
+ true -> Sum;
+ false -> check_sum_overflow(?term_size(KVs), ?term_size(Sum), Sum)
+ end,
+ builtin_reduce(Re, BuiltinReds, KVs, [Red | Acc]);
+builtin_reduce(reduce, [<<"_count", _/binary>> | BuiltinReds], KVs, Acc) ->
Count = length(KVs),
- builtin_reduce(reduce, BuiltinReds, KVs, [Count|Acc]);
-builtin_reduce(rereduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
+ builtin_reduce(reduce, BuiltinReds, KVs, [Count | Acc]);
+builtin_reduce(rereduce, [<<"_count", _/binary>> | BuiltinReds], KVs, Acc) ->
Count = builtin_sum_rows(KVs, 0),
- builtin_reduce(rereduce, BuiltinReds, KVs, [Count|Acc]);
-builtin_reduce(Re, [<<"_stats",_/binary>>|BuiltinReds], KVs, Acc) ->
+ builtin_reduce(rereduce, BuiltinReds, KVs, [Count | Acc]);
+builtin_reduce(Re, [<<"_stats", _/binary>> | BuiltinReds], KVs, Acc) ->
Stats = builtin_stats(Re, KVs),
- builtin_reduce(Re, BuiltinReds, KVs, [Stats|Acc]);
-builtin_reduce(Re, [<<"_approx_count_distinct",_/binary>>|BuiltinReds], KVs, Acc) ->
+ builtin_reduce(Re, BuiltinReds, KVs, [Stats | Acc]);
+builtin_reduce(Re, [<<"_approx_count_distinct", _/binary>> | BuiltinReds], KVs, Acc) ->
Distinct = approx_count_distinct(Re, KVs),
- builtin_reduce(Re, BuiltinReds, KVs, [Distinct|Acc]).
-
+ builtin_reduce(Re, BuiltinReds, KVs, [Distinct | Acc]).
builtin_sum_rows([], Acc) ->
Acc;
@@ -201,11 +213,13 @@ builtin_sum_rows([[_Key, Value] | RestKVs], Acc) ->
throw:{builtin_reduce_error, Obj} ->
Obj;
throw:{invalid_value, Reason, Cause} ->
- {[{<<"error">>, <<"builtin_reduce_error">>},
- {<<"reason">>, Reason}, {<<"caused_by">>, Cause}]}
+ {[
+ {<<"error">>, <<"builtin_reduce_error">>},
+ {<<"reason">>, Reason},
+ {<<"caused_by">>, Cause}
+ ]}
end.
-
sum_values(Value, Acc) when is_number(Value), is_number(Acc) ->
Acc + Value;
sum_values(Value, Acc) when is_list(Value), is_list(Acc) ->
@@ -243,12 +257,12 @@ sum_objects(Rest, []) ->
sum_arrays([], []) ->
[];
-sum_arrays([_|_]=Xs, []) ->
+sum_arrays([_ | _] = Xs, []) ->
Xs;
-sum_arrays([], [_|_]=Ys) ->
+sum_arrays([], [_ | _] = Ys) ->
Ys;
-sum_arrays([X|Xs], [Y|Ys]) when is_number(X), is_number(Y) ->
- [X+Y | sum_arrays(Xs,Ys)];
+sum_arrays([X | Xs], [Y | Ys]) when is_number(X), is_number(Y) ->
+ [X + Y | sum_arrays(Xs, Ys)];
sum_arrays(Else, _) ->
throw_sum_error(Else).
@@ -269,37 +283,42 @@ check_sum_overflow(InSize, OutSize, Sum) ->
end.
log_sum_overflow(InSize, OutSize) ->
- Fmt = "Reduce output must shrink more rapidly: "
- "input size: ~b "
- "output size: ~b",
+ Fmt =
+ "Reduce output must shrink more rapidly: "
+ "input size: ~b "
+ "output size: ~b",
Msg = iolist_to_binary(io_lib:format(Fmt, [InSize, OutSize])),
couch_log:error(Msg, []),
Msg.
builtin_stats(_, []) ->
{0, 0, 0, 0, 0};
-builtin_stats(_, [[_,First]|Rest]) ->
- lists:foldl(fun([_Key, Value], Acc) ->
- stat_values(Value, Acc)
- end, build_initial_accumulator(First), Rest).
+builtin_stats(_, [[_, First] | Rest]) ->
+ lists:foldl(
+ fun([_Key, Value], Acc) ->
+ stat_values(Value, Acc)
+ end,
+ build_initial_accumulator(First),
+ Rest
+ ).
stat_values(Value, Acc) when is_list(Value), is_list(Acc) ->
lists:zipwith(fun stat_values/2, Value, Acc);
stat_values({PreRed}, Acc) when is_list(PreRed) ->
stat_values(unpack_stats({PreRed}), Acc);
stat_values(Value, Acc) when is_number(Value) ->
- stat_values({Value, 1, Value, Value, Value*Value}, Acc);
+ stat_values({Value, 1, Value, Value, Value * Value}, Acc);
stat_values(Value, Acc) when is_number(Acc) ->
- stat_values(Value, {Acc, 1, Acc, Acc, Acc*Acc});
+ stat_values(Value, {Acc, 1, Acc, Acc, Acc * Acc});
stat_values(Value, Acc) when is_tuple(Value), is_tuple(Acc) ->
{Sum0, Cnt0, Min0, Max0, Sqr0} = Value,
{Sum1, Cnt1, Min1, Max1, Sqr1} = Acc,
{
- Sum0 + Sum1,
- Cnt0 + Cnt1,
- erlang:min(Min0, Min1),
- erlang:max(Max0, Max1),
- Sqr0 + Sqr1
+ Sum0 + Sum1,
+ Cnt0 + Cnt1,
+ erlang:min(Min0, Min1),
+ erlang:max(Max0, Max1),
+ Sqr0 + Sqr1
};
stat_values(Else, _Acc) ->
throw_stat_error(Else).
@@ -307,7 +326,7 @@ stat_values(Else, _Acc) ->
build_initial_accumulator(L) when is_list(L) ->
[build_initial_accumulator(X) || X <- L];
build_initial_accumulator(X) when is_number(X) ->
- {X, 1, X, X, X*X};
+ {X, 1, X, X, X * X};
build_initial_accumulator({_, _, _, _, _} = AlreadyUnpacked) ->
AlreadyUnpacked;
build_initial_accumulator({Props}) ->
@@ -318,16 +337,21 @@ build_initial_accumulator(Else) ->
unpack_stats({PreRed}) when is_list(PreRed) ->
{
- get_number(<<"sum">>, PreRed),
- get_number(<<"count">>, PreRed),
- get_number(<<"min">>, PreRed),
- get_number(<<"max">>, PreRed),
- get_number(<<"sumsqr">>, PreRed)
+ get_number(<<"sum">>, PreRed),
+ get_number(<<"count">>, PreRed),
+ get_number(<<"min">>, PreRed),
+ get_number(<<"max">>, PreRed),
+ get_number(<<"sumsqr">>, PreRed)
}.
-
pack_stats({Sum, Cnt, Min, Max, Sqr}) ->
- {[{<<"sum">>,Sum}, {<<"count">>,Cnt}, {<<"min">>,Min}, {<<"max">>,Max}, {<<"sumsqr">>,Sqr}]};
+ {[
+ {<<"sum">>, Sum},
+ {<<"count">>, Cnt},
+ {<<"min">>, Min},
+ {<<"max">>, Max},
+ {<<"sumsqr">>, Sqr}
+ ]};
pack_stats({Packed}) ->
% Legacy code path before we had the finalize operation
{Packed};
@@ -336,35 +360,43 @@ pack_stats(Stats) when is_list(Stats) ->
get_number(Key, Props) ->
case couch_util:get_value(Key, Props) of
- X when is_number(X) ->
- X;
- undefined when is_binary(Key) ->
- get_number(binary_to_atom(Key, latin1), Props);
- undefined ->
- Msg = io_lib:format("user _stats input missing required field ~s (~p)",
- [Key, Props]),
- throw({invalid_value, iolist_to_binary(Msg)});
- Else ->
- Msg = io_lib:format("non-numeric _stats input received for ~s: ~w",
- [Key, Else]),
- throw({invalid_value, iolist_to_binary(Msg)})
+ X when is_number(X) ->
+ X;
+ undefined when is_binary(Key) ->
+ get_number(binary_to_atom(Key, latin1), Props);
+ undefined ->
+ Msg = io_lib:format(
+ "user _stats input missing required field ~s (~p)",
+ [Key, Props]
+ ),
+ throw({invalid_value, iolist_to_binary(Msg)});
+ Else ->
+ Msg = io_lib:format(
+ "non-numeric _stats input received for ~s: ~w",
+ [Key, Else]
+ ),
+ throw({invalid_value, iolist_to_binary(Msg)})
end.
% TODO allow customization of precision in the ddoc.
approx_count_distinct(reduce, KVs) ->
- lists:foldl(fun([[Key, _Id], _Value], Filter) ->
- hyper:insert(term_to_binary(Key), Filter)
- end, hyper:new(11), KVs);
+ lists:foldl(
+ fun([[Key, _Id], _Value], Filter) ->
+ hyper:insert(term_to_binary(Key), Filter)
+ end,
+ hyper:new(11),
+ KVs
+ );
approx_count_distinct(rereduce, Reds) ->
hyper:union([Filter || [_, Filter] <- Reds]).
% use the function stored in ddoc.validate_doc_update to test an update.
-spec validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) -> ok when
- DDoc :: ddoc(),
+ DDoc :: ddoc(),
EditDoc :: doc(),
DiskDoc :: doc() | nil,
- Ctx :: user_ctx(),
- SecObj :: sec_obj().
+ Ctx :: user_ctx(),
+ SecObj :: sec_obj().
validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
JsonEditDoc = couch_doc:to_json_obj(EditDoc, [revs]),
@@ -374,8 +406,9 @@ validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
[<<"validate_doc_update">>],
[JsonEditDoc, JsonDiskDoc, Ctx, SecObj]
),
- if Resp == 1 -> ok; true ->
- couch_stats:increment_counter([couchdb, query_server, vdu_rejects], 1)
+ if
+ Resp == 1 -> ok;
+ true -> couch_stats:increment_counter([couchdb, query_server, vdu_rejects], 1)
end,
case Resp of
RespCode when RespCode =:= 1; RespCode =:= ok; RespCode =:= true ->
@@ -390,7 +423,6 @@ validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
throw({unknown_error, Message})
end.
-
json_doc_options() ->
json_doc_options([]).
@@ -413,18 +445,19 @@ filter_view(DDoc, VName, Docs) ->
{ok, Passes}.
filter_docs(Req, Db, DDoc, FName, Docs) ->
- JsonReq = case Req of
- {json_req, JsonObj} ->
- JsonObj;
- #httpd{} = HttpReq ->
- couch_httpd_external:json_req_obj(HttpReq, Db)
- end,
+ JsonReq =
+ case Req of
+ {json_req, JsonObj} ->
+ JsonObj;
+ #httpd{} = HttpReq ->
+ couch_httpd_external:json_req_obj(HttpReq, Db)
+ end,
Options = json_doc_options(),
JsonDocs = [json_doc(Doc, Options) || Doc <- Docs],
try
{ok, filter_docs_int(DDoc, FName, JsonReq, JsonDocs)}
catch
- throw:{os_process_error,{exit_status,1}} ->
+ throw:{os_process_error, {exit_status, 1}} ->
%% batch used too much memory, retry sequentially.
Fun = fun(JsonDoc) ->
filter_docs_int(DDoc, FName, JsonReq, [JsonDoc])
@@ -433,8 +466,11 @@ filter_docs(Req, Db, DDoc, FName, Docs) ->
end.
filter_docs_int(DDoc, FName, JsonReq, JsonDocs) ->
- [true, Passes] = ddoc_prompt(DDoc, [<<"filters">>, FName],
- [JsonDocs, JsonReq]),
+ [true, Passes] = ddoc_prompt(
+ DDoc,
+ [<<"filters">>, FName],
+ [JsonDocs, JsonReq]
+ ),
Passes.
ddoc_proc_prompt({Proc, DDocId}, FunPath, Args) ->
@@ -445,22 +481,23 @@ ddoc_prompt(DDoc, FunPath, Args) ->
proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args])
end).
-with_ddoc_proc(#doc{id=DDocId,revs={Start, [DiskRev|_]}}=DDoc, Fun) ->
+with_ddoc_proc(#doc{id = DDocId, revs = {Start, [DiskRev | _]}} = DDoc, Fun) ->
Rev = couch_doc:rev_to_str({Start, DiskRev}),
DDocKey = {DDocId, Rev},
Proc = get_ddoc_process(DDoc, DDocKey),
- try Fun({Proc, DDocId})
+ try
+ Fun({Proc, DDocId})
after
ok = ret_os_process(Proc)
end.
proc_prompt(Proc, Args) ->
- case proc_prompt_raw(Proc, Args) of
- {json, Json} ->
- raw_to_ejson({json, Json});
- EJson ->
- EJson
- end.
+ case proc_prompt_raw(Proc, Args) of
+ {json, Json} ->
+ raw_to_ejson({json, Json});
+ EJson ->
+ EJson
+ end.
proc_prompt_raw(#proc{prompt_fun = {Mod, Func}} = Proc, Args) ->
apply(Mod, Func, [Proc#proc.pid, Args]).
@@ -468,13 +505,16 @@ proc_prompt_raw(#proc{prompt_fun = {Mod, Func}} = Proc, Args) ->
raw_to_ejson({json, Json}) ->
try
?JSON_DECODE(Json)
- catch throw:{invalid_json, {_, invalid_string}} ->
- Forced = try
- force_utf8(Json)
- catch _:_ ->
- Json
- end,
- ?JSON_DECODE(Forced)
+ catch
+ throw:{invalid_json, {_, invalid_string}} ->
+ Forced =
+ try
+ force_utf8(Json)
+ catch
+ _:_ ->
+ Json
+ end,
+ ?JSON_DECODE(Forced)
end;
raw_to_ejson(EJson) ->
EJson.
@@ -483,14 +523,15 @@ force_utf8(Bin) ->
case binary:match(Bin, <<"\\u">>) of
{Start, 2} ->
<<Prefix:Start/binary, Rest1/binary>> = Bin,
- {Insert, Rest3} = case check_uescape(Rest1) of
- {ok, Skip} ->
- <<Skipped:Skip/binary, Rest2/binary>> = Rest1,
- {Skipped, Rest2};
- {error, Skip} ->
- <<_:Skip/binary, Rest2/binary>> = Rest1,
- {<<16#EF, 16#BF, 16#BD>>, Rest2}
- end,
+ {Insert, Rest3} =
+ case check_uescape(Rest1) of
+ {ok, Skip} ->
+ <<Skipped:Skip/binary, Rest2/binary>> = Rest1,
+ {Skipped, Rest2};
+ {error, Skip} ->
+ <<_:Skip/binary, Rest2/binary>> = Rest1,
+ {<<16#EF, 16#BF, 16#BD>>, Rest2}
+ end,
RestForced = force_utf8(Rest3),
<<Prefix/binary, Insert/binary, RestForced/binary>>;
nomatch ->
@@ -510,8 +551,9 @@ check_uescape(Data) ->
try
[_] = xmerl_ucs:from_utf16be(UTF16),
{ok, 12}
- catch _:_ ->
- {error, 6}
+ catch
+ _:_ ->
+ {error, 6}
end;
{_, _} ->
% Found a uescape that's not a low half
@@ -550,33 +592,33 @@ get_os_process_timeout() ->
get_ddoc_process(#doc{} = DDoc, DDocKey) ->
% remove this case statement
case gen_server:call(couch_proc_manager, {get_proc, DDoc, DDocKey}, get_os_process_timeout()) of
- {ok, Proc, {QueryConfig}} ->
- % process knows the ddoc
- case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
- true ->
- proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
- Proc;
- _ ->
- catch proc_stop(Proc),
- get_ddoc_process(DDoc, DDocKey)
- end;
- Error ->
- throw(Error)
+ {ok, Proc, {QueryConfig}} ->
+ % process knows the ddoc
+ case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
+ true ->
+ proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
+ Proc;
+ _ ->
+ catch proc_stop(Proc),
+ get_ddoc_process(DDoc, DDocKey)
+ end;
+ Error ->
+ throw(Error)
end.
get_os_process(Lang) ->
case gen_server:call(couch_proc_manager, {get_proc, Lang}, get_os_process_timeout()) of
- {ok, Proc, {QueryConfig}} ->
- case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
- true ->
- proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
- Proc;
- _ ->
- catch proc_stop(Proc),
- get_os_process(Lang)
- end;
- Error ->
- throw(Error)
+ {ok, Proc, {QueryConfig}} ->
+ case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
+ true ->
+ proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
+ Proc;
+ _ ->
+ catch proc_stop(Proc),
+ get_os_process(Lang)
+ end;
+ Error ->
+ throw(Error)
end.
ret_os_process(Proc) ->
@@ -590,7 +632,6 @@ throw_sum_error(Else) ->
throw_stat_error(Else) ->
throw({invalid_value, iolist_to_binary(io_lib:format(?STATERROR, [Else]))}).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -602,19 +643,38 @@ builtin_sum_rows_negative_test() ->
% it's only one document.
?assertEqual(A, builtin_sum_rows([["K", A]], [])),
{Result} = builtin_sum_rows([["K", A]], [1, 2, 3]),
- ?assertEqual({<<"error">>, <<"builtin_reduce_error">>},
- lists:keyfind(<<"error">>, 1, Result)).
+ ?assertEqual(
+ {<<"error">>, <<"builtin_reduce_error">>},
+ lists:keyfind(<<"error">>, 1, Result)
+ ).
sum_values_test() ->
?assertEqual(3, sum_values(1, 2)),
- ?assertEqual([2,4,6], sum_values(1, [1,4,6])),
- ?assertEqual([3,5,7], sum_values([3,2,4], [0,3,3])),
- X = {[{<<"a">>,1}, {<<"b">>,[1,2]}, {<<"c">>, {[{<<"d">>,3}]}},
- {<<"g">>,1}]},
- Y = {[{<<"a">>,2}, {<<"b">>,3}, {<<"c">>, {[{<<"e">>, 5}]}},
- {<<"f">>,1}, {<<"g">>,1}]},
- Z = {[{<<"a">>,3}, {<<"b">>,[4,2]}, {<<"c">>, {[{<<"d">>,3},{<<"e">>,5}]}},
- {<<"f">>,1}, {<<"g">>,2}]},
+ ?assertEqual([2, 4, 6], sum_values(1, [1, 4, 6])),
+ ?assertEqual([3, 5, 7], sum_values([3, 2, 4], [0, 3, 3])),
+ X =
+ {[
+ {<<"a">>, 1},
+ {<<"b">>, [1, 2]},
+ {<<"c">>, {[{<<"d">>, 3}]}},
+ {<<"g">>, 1}
+ ]},
+ Y =
+ {[
+ {<<"a">>, 2},
+ {<<"b">>, 3},
+ {<<"c">>, {[{<<"e">>, 5}]}},
+ {<<"f">>, 1},
+ {<<"g">>, 1}
+ ]},
+ Z =
+ {[
+ {<<"a">>, 3},
+ {<<"b">>, [4, 2]},
+ {<<"c">>, {[{<<"d">>, 3}, {<<"e">>, 5}]}},
+ {<<"f">>, 1},
+ {<<"g">>, 2}
+ ]},
?assertEqual(Z, sum_values(X, Y)),
?assertEqual(Z, sum_values(Y, X)).
@@ -623,8 +683,12 @@ sum_values_negative_test() ->
A = [{[{<<"a">>, 1}]}, {[{<<"a">>, 2}]}, {[{<<"a">>, 3}]}],
B = ["error 1", "error 2"],
C = [<<"error 3">>, <<"error 4">>],
- KV = {[{<<"error">>, <<"builtin_reduce_error">>},
- {<<"reason">>, ?SUMERROR}, {<<"caused_by">>, <<"some cause">>}]},
+ KV =
+ {[
+ {<<"error">>, <<"builtin_reduce_error">>},
+ {<<"reason">>, ?SUMERROR},
+ {<<"caused_by">>, <<"some cause">>}
+ ]},
?assertThrow({invalid_value, _, _}, sum_values(A, [1, 2, 3])),
?assertThrow({invalid_value, _, _}, sum_values(A, 0)),
?assertThrow({invalid_value, _, _}, sum_values(B, [1, 2])),
@@ -634,48 +698,103 @@ sum_values_negative_test() ->
stat_values_test() ->
?assertEqual({1, 2, 0, 1, 1}, stat_values(1, 0)),
?assertEqual({11, 2, 1, 10, 101}, stat_values(1, 10)),
- ?assertEqual([{9, 2, 2, 7, 53},
- {14, 2, 3, 11, 130},
- {18, 2, 5, 13, 194}
- ], stat_values([2,3,5], [7,11,13])).
+ ?assertEqual(
+ [
+ {9, 2, 2, 7, 53},
+ {14, 2, 3, 11, 130},
+ {18, 2, 5, 13, 194}
+ ],
+ stat_values([2, 3, 5], [7, 11, 13])
+ ).
reduce_stats_test() ->
- ?assertEqual([
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ], test_reduce(<<"_stats">>, [[[null, key], 2]])),
+ ?assertEqual(
+ [
+ {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
+ ],
+ test_reduce(<<"_stats">>, [[[null, key], 2]])
+ ),
- ?assertEqual([[
- {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ]], test_reduce(<<"_stats">>, [[[null, key],[1,2]]])),
+ ?assertEqual(
+ [
+ [
+ {[
+ {<<"sum">>, 1},
+ {<<"count">>, 1},
+ {<<"min">>, 1},
+ {<<"max">>, 1},
+ {<<"sumsqr">>, 1}
+ ]},
+ {[
+ {<<"sum">>, 2},
+ {<<"count">>, 1},
+ {<<"min">>, 2},
+ {<<"max">>, 2},
+ {<<"sumsqr">>, 4}
+ ]}
+ ]
+ ],
+ test_reduce(<<"_stats">>, [[[null, key], [1, 2]]])
+ ),
+
+ ?assertEqual(
+ {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]},
+ element(2, finalize(<<"_stats">>, {2, 1, 2, 2, 4}))
+ ),
?assertEqual(
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- , element(2, finalize(<<"_stats">>, {2, 1, 2, 2, 4}))),
-
- ?assertEqual([
- {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ], element(2, finalize(<<"_stats">>, [
- {1, 1, 1, 1, 1},
- {2, 1, 2, 2, 4}
- ]))),
-
- ?assertEqual([
- {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ], element(2, finalize(<<"_stats">>, [
- {1, 1, 1, 1, 1},
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ]))),
-
- ?assertEqual([
- {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ], element(2, finalize(<<"_stats">>, [
- {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
- {2, 1, 2, 2, 4}
- ]))),
+ [
+ {[{<<"sum">>, 1}, {<<"count">>, 1}, {<<"min">>, 1}, {<<"max">>, 1}, {<<"sumsqr">>, 1}]},
+ {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
+ ],
+ element(
+ 2,
+ finalize(<<"_stats">>, [
+ {1, 1, 1, 1, 1},
+ {2, 1, 2, 2, 4}
+ ])
+ )
+ ),
+
+ ?assertEqual(
+ [
+ {[{<<"sum">>, 1}, {<<"count">>, 1}, {<<"min">>, 1}, {<<"max">>, 1}, {<<"sumsqr">>, 1}]},
+ {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
+ ],
+ element(
+ 2,
+ finalize(<<"_stats">>, [
+ {1, 1, 1, 1, 1},
+ {[
+ {<<"sum">>, 2},
+ {<<"count">>, 1},
+ {<<"min">>, 2},
+ {<<"max">>, 2},
+ {<<"sumsqr">>, 4}
+ ]}
+ ])
+ )
+ ),
+
+ ?assertEqual(
+ [
+ {[{<<"sum">>, 1}, {<<"count">>, 1}, {<<"min">>, 1}, {<<"max">>, 1}, {<<"sumsqr">>, 1}]},
+ {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
+ ],
+ element(
+ 2,
+ finalize(<<"_stats">>, [
+ {[
+ {<<"sum">>, 1},
+ {<<"count">>, 1},
+ {<<"min">>, 1},
+ {<<"max">>, 1},
+ {<<"sumsqr">>, 1}
+ ]},
+ {2, 1, 2, 2, 4}
+ ])
+ )
+ ),
ok.
test_reduce(Reducer, KVs) ->
@@ -695,9 +814,12 @@ force_utf8_test() ->
% Truncated but we doesn't break replacements
<<"\\u0FA">>
],
- lists:foreach(fun(Case) ->
- ?assertEqual(Case, force_utf8(Case))
- end, Ok),
+ lists:foreach(
+ fun(Case) ->
+ ?assertEqual(Case, force_utf8(Case))
+ end,
+ Ok
+ ),
NotOk = [
<<"\\uDCA5">>,
@@ -710,15 +832,18 @@ force_utf8_test() ->
<<"\\uD83D\\u00A0">>
],
ToJSON = fun(Bin) -> <<34, Bin/binary, 34>> end,
- lists:foreach(fun(Case) ->
- try
- ?assertNotEqual(Case, force_utf8(Case)),
- ?assertThrow(_, ?JSON_DECODE(ToJSON(Case))),
- ?assertMatch(<<_/binary>>, ?JSON_DECODE(ToJSON(force_utf8(Case))))
- catch
- T:R ->
- io:format(standard_error, "~p~n~p~n", [T, R])
- end
- end, NotOk).
+ lists:foreach(
+ fun(Case) ->
+ try
+ ?assertNotEqual(Case, force_utf8(Case)),
+ ?assertThrow(_, ?JSON_DECODE(ToJSON(Case))),
+ ?assertMatch(<<_/binary>>, ?JSON_DECODE(ToJSON(force_utf8(Case))))
+ catch
+ T:R ->
+ io:format(standard_error, "~p~n~p~n", [T, R])
+ end
+ end,
+ NotOk
+ ).
-endif.
diff --git a/src/couch/src/couch_rand.erl b/src/couch/src/couch_rand.erl
index 67454b8ad..bc30956a4 100644
--- a/src/couch/src/couch_rand.erl
+++ b/src/couch/src/couch_rand.erl
@@ -12,16 +12,13 @@
-module(couch_rand).
-
-export([
uniform/0,
uniform/1
]).
-
uniform() ->
rand:uniform().
-
uniform(N) ->
rand:uniform(N).
diff --git a/src/couch/src/couch_secondary_sup.erl b/src/couch/src/couch_secondary_sup.erl
index 293e1b52a..813334883 100644
--- a/src/couch/src/couch_secondary_sup.erl
+++ b/src/couch/src/couch_secondary_sup.erl
@@ -15,16 +15,12 @@
-export([init/1, start_link/0]).
start_link() ->
- supervisor:start_link({local,couch_secondary_services}, ?MODULE, []).
+ supervisor:start_link({local, couch_secondary_services}, ?MODULE, []).
init([]) ->
SecondarySupervisors = [
- {couch_plugin_event,
- {gen_event, start_link, [{local, couch_plugin}]},
- permanent,
- brutal_kill,
- worker,
- dynamic}
+ {couch_plugin_event, {gen_event, start_link, [{local, couch_plugin}]}, permanent,
+ brutal_kill, worker, dynamic}
],
Daemons = [
{query_servers, {couch_proc_manager, start_link, []}},
@@ -32,26 +28,25 @@ init([]) ->
{uuids, {couch_uuids, start, []}}
],
- MaybeHttps = case https_enabled() of
- true -> [{httpsd, {chttpd, start_link, [https]}}];
- false -> []
- end,
-
- Children = SecondarySupervisors ++ [
- begin
- {Module, Fun, Args} = Spec,
-
- {Name,
- {Module, Fun, Args},
- permanent,
- brutal_kill,
- worker,
- [Module]}
- end
- || {Name, Spec}
- <- Daemons ++ MaybeHttps, Spec /= ""],
- {ok, {{one_for_one, 50, 3600},
- couch_epi:register_service(couch_db_epi, Children)}}.
+ MaybeHttps =
+ case https_enabled() of
+ true -> [{httpsd, {chttpd, start_link, [https]}}];
+ false -> []
+ end,
+
+ Children =
+ SecondarySupervisors ++
+ [
+ begin
+ {Module, Fun, Args} = Spec,
+
+ {Name, {Module, Fun, Args}, permanent, brutal_kill, worker, [Module]}
+ end
+ || {Name, Spec} <-
+ Daemons ++ MaybeHttps,
+ Spec /= ""
+ ],
+ {ok, {{one_for_one, 50, 3600}, couch_epi:register_service(couch_db_epi, Children)}}.
https_enabled() ->
% 1. [ssl] enable = true | false
diff --git a/src/couch/src/couch_server.erl b/src/couch/src/couch_server.erl
index 8fd074a78..1a2d33f79 100644
--- a/src/couch/src/couch_server.erl
+++ b/src/couch/src/couch_server.erl
@@ -15,10 +15,10 @@
-behaviour(config_listener).
-vsn(3).
--export([get_version/0,get_version/1,get_git_sha/0,get_uuid/0]).
--export([init/1, handle_call/3,sup_start_link/0]).
--export([handle_cast/2,code_change/3,handle_info/2,terminate/2]).
--export([is_admin/2,has_admins/0]).
+-export([get_version/0, get_version/1, get_git_sha/0, get_uuid/0]).
+-export([init/1, handle_call/3, sup_start_link/0]).
+-export([handle_cast/2, code_change/3, handle_info/2, terminate/2]).
+-export([is_admin/2, has_admins/0]).
% config_listener api
-export([handle_config_change/5, handle_config_terminate/3]).
@@ -28,11 +28,12 @@
-define(RELISTEN_DELAY, 5000).
get_version() ->
- ?COUCHDB_VERSION. %% Defined in rebar.config.script
+ %% Defined in rebar.config.script
+ ?COUCHDB_VERSION.
get_version(short) ->
- %% strip git hash from version string
- [Version|_Rest] = string:tokens(get_version(), "+"),
- Version.
+ %% strip git hash from version string
+ [Version | _Rest] = string:tokens(get_version(), "+"),
+ Version.
get_git_sha() -> ?COUCHDB_GIT_SHA.
@@ -42,7 +43,8 @@ get_uuid() ->
UUID = couch_uuids:random(),
config:set("couchdb", "uuid", ?b2l(UUID)),
UUID;
- UUID -> ?l2b(UUID)
+ UUID ->
+ ?l2b(UUID)
end.
sup_start_link() ->
@@ -50,11 +52,11 @@ sup_start_link() ->
is_admin(User, ClearPwd) ->
case config:get("admins", User) of
- "-hashed-" ++ HashedPwdAndSalt ->
- [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
- couch_util:to_hex(crypto:hash(sha, ClearPwd ++ Salt)) == HashedPwd;
- _Else ->
- false
+ "-hashed-" ++ HashedPwdAndSalt ->
+ [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
+ couch_util:to_hex(crypto:hash(sha, ClearPwd ++ Salt)) == HashedPwd;
+ _Else ->
+ false
end.
has_admins() ->
@@ -68,7 +70,9 @@ hash_admin_passwords(Persist) ->
fun({User, ClearPassword}) ->
HashedPassword = couch_passwords:hash_admin_password(ClearPassword),
config:set("admins", User, ?b2l(HashedPassword), Persist)
- end, couch_passwords:get_unhashed_admins()).
+ end,
+ couch_passwords:get_unhashed_admins()
+ ).
init([]) ->
% Mark being able to receive documents with an _access property as a supported feature
@@ -76,7 +80,8 @@ init([]) ->
% Mark if fips is enabled
case
erlang:function_exported(crypto, info_fips, 0) andalso
- crypto:info_fips() == enabled of
+ crypto:info_fips() == enabled
+ of
true ->
config:enable_feature('fips');
false ->
diff --git a/src/couch/src/couch_sup.erl b/src/couch/src/couch_sup.erl
index 7a1afae8b..9b2d31498 100644
--- a/src/couch/src/couch_sup.erl
+++ b/src/couch/src/couch_sup.erl
@@ -15,7 +15,6 @@
-vsn(1).
-behaviour(config_listener).
-
-export([
start_link/0,
init/1,
@@ -23,11 +22,9 @@
handle_config_terminate/3
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("kernel/include/logger.hrl").
-
start_link() ->
assert_admins(),
maybe_launch_admin_annoyance_reporter(),
@@ -45,43 +42,45 @@ start_link() ->
Else
end.
-
init(_Args) ->
couch_log:info("Starting ~s", [?MODULE]),
- {ok, {{one_for_one,10, 60}, [
- {
- config_listener_mon,
- {config_listener_mon, start_link, [?MODULE, nil]},
- permanent,
- 5000,
- worker,
- [config_listener_mon]
- },
- {
- couch_primary_services,
- {couch_primary_sup, start_link, []},
- permanent,
- infinity,
- supervisor,
- [couch_primary_sup]
- },
- {
- couch_secondary_services,
- {couch_secondary_sup, start_link, []},
- permanent,
- infinity,
- supervisor,
- [couch_secondary_sup]
- }
- ]}}.
-
+ {ok,
+ {{one_for_one, 10, 60}, [
+ {
+ config_listener_mon,
+ {config_listener_mon, start_link, [?MODULE, nil]},
+ permanent,
+ 5000,
+ worker,
+ [config_listener_mon]
+ },
+ {
+ couch_primary_services,
+ {couch_primary_sup, start_link, []},
+ permanent,
+ infinity,
+ supervisor,
+ [couch_primary_sup]
+ },
+ {
+ couch_secondary_services,
+ {couch_secondary_sup, start_link, []},
+ permanent,
+ infinity,
+ supervisor,
+ [couch_secondary_sup]
+ }
+ ]}}.
handle_config_change("daemons", _, _, _, _) ->
exit(whereis(?MODULE), shutdown),
remove_handler;
handle_config_change("couchdb", "util_driver_dir", _, _, _) ->
- [Pid] = [P || {collation_driver, P, _, _}
- <- supervisor:which_children(couch_primary_services)],
+ [Pid] = [
+ P
+ || {collation_driver, P, _, _} <-
+ supervisor:which_children(couch_primary_services)
+ ],
Pid ! reload_driver,
{ok, nil};
handle_config_change(_, _, _, _, _) ->
@@ -96,38 +95,46 @@ assert_admins() ->
{[], false} ->
?LOG_INFO(#{
what => admin_account_missing,
- details => "No admin account found, aborting startup. Please configure "
+ details =>
+ "No admin account found, aborting startup. Please configure "
"an admin account in your local.ini file."
}),
- couch_log:info("~n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n"
- ++ " No Admin Account Found, aborting startup. ~n"
- ++ " Please configure an admin account in your local.ini file. ~n"
- ++ "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n", []),
+ couch_log:info(
+ "~n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n" ++
+ " No Admin Account Found, aborting startup. ~n" ++
+ " Please configure an admin account in your local.ini file. ~n" ++
+ "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n",
+ []
+ ),
% Wait a second so the log message can make it to the log
timer:sleep(500),
erlang:halt(1);
- _ -> ok
+ _ ->
+ ok
end.
send_no_admin_account_error_message() ->
?LOG_ERROR(#{
what => admin_account_missing,
- details => "No admin account configured. Please configure an admin "
+ details =>
+ "No admin account configured. Please configure an admin "
"account in your local.ini file and restart CouchDB."
}),
- couch_log:error("No Admin Account configured."
- ++ " Please configure an Admin Account in your local.ini file and restart CouchDB.~n", []),
+ couch_log:error(
+ "No Admin Account configured." ++
+ " Please configure an Admin Account in your local.ini file and restart CouchDB.~n",
+ []
+ ),
FiveMinutes = 5 * 1000 * 60,
timer:sleep(FiveMinutes),
send_no_admin_account_error_message().
-
+
maybe_launch_admin_annoyance_reporter() ->
case os:getenv("COUCHDB_TEST_ADMIN_PARTY_OVERRIDE") of
false -> ok;
_ -> spawn_link(fun send_no_admin_account_error_message/0)
end.
-
notify_starting() ->
?LOG_INFO(#{
what => starting_couchdb,
@@ -137,7 +144,6 @@ notify_starting() ->
couch_server:get_version()
]).
-
notify_started() ->
?LOG_INFO(#{
what => starting_couchdb_complete,
@@ -145,7 +151,6 @@ notify_started() ->
}),
couch_log:info("Apache CouchDB has started. Time to relax.~n", []).
-
notify_error(Error) ->
?LOG_ERROR(#{
what => error_on_startup,
@@ -153,16 +158,17 @@ notify_error(Error) ->
}),
couch_log:error("Error starting Apache CouchDB:~n~n ~p~n~n", [Error]).
-
notify_uris() ->
- lists:foreach(fun(Uri) ->
- ?LOG_INFO(#{
- what => couchdb_listener_started,
- uri => Uri
- }),
- couch_log:info("Apache CouchDB has started on ~s", [Uri])
- end, get_uris()).
-
+ lists:foreach(
+ fun(Uri) ->
+ ?LOG_INFO(#{
+ what => couchdb_listener_started,
+ uri => Uri
+ }),
+ couch_log:info("Apache CouchDB has started on ~s", [Uri])
+ end,
+ get_uris()
+ ).
write_pidfile() ->
case init:get_argument(pidfile) of
@@ -172,7 +178,6 @@ write_pidfile() ->
ok
end.
-
write_uris() ->
case config:get("couchdb", "uri_file", undefined) of
undefined ->
@@ -182,16 +187,17 @@ write_uris() ->
write_file(UriFile, Lines)
end.
-
get_uris() ->
Ip = config:get("chttpd", "bind_address"),
- lists:flatmap(fun(Uri) ->
- case get_uri(Uri, Ip) of
- undefined -> [];
- Else -> [Else]
- end
- end, [couch_httpd, https]).
-
+ lists:flatmap(
+ fun(Uri) ->
+ case get_uri(Uri, Ip) of
+ undefined -> [];
+ Else -> [Else]
+ end
+ end,
+ [couch_httpd, https]
+ ).
get_uri(Name, Ip) ->
case get_port(Name) of
@@ -201,11 +207,9 @@ get_uri(Name, Ip) ->
io_lib:format("~s://~s:~w/", [get_scheme(Name), Ip, Port])
end.
-
get_scheme(couch_httpd) -> "http";
get_scheme(https) -> "https".
-
get_port(Name) ->
try
mochiweb_socket_server:get(Name, port)
@@ -214,7 +218,6 @@ get_port(Name) ->
undefined
end.
-
write_file(FileName, Contents) ->
case file:write_file(FileName, Contents) of
ok ->
diff --git a/src/couch/src/couch_totp.erl b/src/couch/src/couch_totp.erl
index 56e70d81a..3eff9a583 100644
--- a/src/couch/src/couch_totp.erl
+++ b/src/couch/src/couch_totp.erl
@@ -14,10 +14,11 @@
-export([generate/5]).
-generate(Alg, Key, CounterSecs, StepSecs, OutputLen)
- when is_atom(Alg),
- is_binary(Key),
- is_integer(CounterSecs),
- is_integer(StepSecs),
- is_integer(OutputLen) ->
+generate(Alg, Key, CounterSecs, StepSecs, OutputLen) when
+ is_atom(Alg),
+ is_binary(Key),
+ is_integer(CounterSecs),
+ is_integer(StepSecs),
+ is_integer(OutputLen)
+->
couch_hotp:generate(Alg, Key, CounterSecs div StepSecs, OutputLen).
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index 37bf2fc6c..dc1317534 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -15,14 +15,14 @@
-export([priv_dir/0, normpath/1, fold_files/5]).
-export([should_flush/0, should_flush/1, to_existing_atom/1]).
-export([rand32/0, implode/2, collate/2, collate/3, get_sort_key/1]).
--export([abs_pathname/1,abs_pathname/2, trim/1, drop_dot_couch_ext/1]).
+-export([abs_pathname/1, abs_pathname/2, trim/1, drop_dot_couch_ext/1]).
-export([encodeBase64Url/1, decodeBase64Url/1]).
-export([validate_utf8/1, to_hex/1, parse_term/1, dict_find/3]).
-export([get_nested_json_value/2, json_user_ctx/1]).
-export([proplist_apply_field/2, json_apply_field/2]).
-export([to_binary/1, to_integer/1, to_list/1, url_encode/1]).
-export([json_encode/1, json_decode/1, json_decode/2]).
--export([verify/2,simple_call/2,shutdown_sync/1]).
+-export([verify/2, simple_call/2, shutdown_sync/1]).
-export([get_value/2, get_value/3]).
-export([reorder_results/2]).
-export([url_strip_password/1]).
@@ -61,7 +61,6 @@
<<"^tracing$">>
]).
-
priv_dir() ->
case code:priv_dir(couch) of
{error, bad_name} ->
@@ -69,7 +68,8 @@ priv_dir() ->
% renaming src/couch to src/couch. Not really worth the hassle.
% -Damien
code:priv_dir(couchdb);
- Dir -> Dir
+ Dir ->
+ Dir
end.
% Normalize a pathname by removing .. and . components.
@@ -85,7 +85,6 @@ normparts(["." | RestParts], Acc) ->
normparts([Part | RestParts], Acc) ->
normparts(RestParts, [Part | Acc]).
-
% This is implementation is similar the builtin filelib:fold_files/5
% except that this version will run the user supplied function
% on directories that match the regular expression as well.
@@ -126,13 +125,21 @@ fold_files2([File | Rest], Dir, RegExp, Recursive, Fun, Acc0) ->
% works like list_to_existing_atom, except can be list or binary and it
% gives you the original value instead of an error if no existing atom.
to_existing_atom(V) when is_list(V) ->
- try list_to_existing_atom(V) catch _:_ -> V end;
+ try
+ list_to_existing_atom(V)
+ catch
+ _:_ -> V
+ end;
to_existing_atom(V) when is_binary(V) ->
- try list_to_existing_atom(?b2l(V)) catch _:_ -> V end;
+ try
+ list_to_existing_atom(?b2l(V))
+ catch
+ _:_ -> V
+ end;
to_existing_atom(V) when is_atom(V) ->
V.
-shutdown_sync(Pid) when not is_pid(Pid)->
+shutdown_sync(Pid) when not is_pid(Pid) ->
ok;
shutdown_sync(Pid) ->
MRef = erlang:monitor(process, Pid),
@@ -140,23 +147,22 @@ shutdown_sync(Pid) ->
catch unlink(Pid),
catch exit(Pid, shutdown),
receive
- {'DOWN', MRef, _, _, _} ->
- ok
+ {'DOWN', MRef, _, _, _} ->
+ ok
end
after
erlang:demonitor(MRef, [flush])
end.
-
simple_call(Pid, Message) ->
MRef = erlang:monitor(process, Pid),
try
Pid ! {self(), Message},
receive
- {Pid, Result} ->
- Result;
- {'DOWN', MRef, _, _, Reason} ->
- exit(Reason)
+ {Pid, Result} ->
+ Result;
+ {'DOWN', MRef, _, _, Reason} ->
+ exit(Reason)
end
after
erlang:demonitor(MRef, [flush])
@@ -172,28 +178,40 @@ validate_utf8_fast(B, O) ->
<<_:O/binary>> ->
true;
<<_:O/binary, C1, _/binary>> when
- C1 < 128 ->
+ C1 < 128
+ ->
validate_utf8_fast(B, 1 + O);
<<_:O/binary, C1, C2, _/binary>> when
- C1 >= 194, C1 =< 223,
- C2 >= 128, C2 =< 191 ->
+ C1 >= 194,
+ C1 =< 223,
+ C2 >= 128,
+ C2 =< 191
+ ->
validate_utf8_fast(B, 2 + O);
<<_:O/binary, C1, C2, C3, _/binary>> when
- C1 >= 224, C1 =< 239,
- C2 >= 128, C2 =< 191,
- C3 >= 128, C3 =< 191 ->
+ C1 >= 224,
+ C1 =< 239,
+ C2 >= 128,
+ C2 =< 191,
+ C3 >= 128,
+ C3 =< 191
+ ->
validate_utf8_fast(B, 3 + O);
<<_:O/binary, C1, C2, C3, C4, _/binary>> when
- C1 >= 240, C1 =< 244,
- C2 >= 128, C2 =< 191,
- C3 >= 128, C3 =< 191,
- C4 >= 128, C4 =< 191 ->
+ C1 >= 240,
+ C1 =< 244,
+ C2 >= 128,
+ C2 =< 191,
+ C3 >= 128,
+ C3 =< 191,
+ C4 >= 128,
+ C4 =< 191
+ ->
validate_utf8_fast(B, 4 + O);
_ ->
false
end.
-
to_hex(<<Hi:4, Lo:4, Rest/binary>>) ->
[nibble_to_hex(Hi), nibble_to_hex(Lo) | to_hex(Rest)];
to_hex(<<>>) ->
@@ -218,7 +236,6 @@ nibble_to_hex(13) -> $d;
nibble_to_hex(14) -> $e;
nibble_to_hex(15) -> $f.
-
parse_term(Bin) when is_binary(Bin) ->
parse_term(binary_to_list(Bin));
parse_term(List) ->
@@ -230,16 +247,16 @@ get_value(Key, List) ->
get_value(Key, List, Default) ->
case lists:keysearch(Key, 1, List) of
- {value, {Key,Value}} ->
- Value;
- false ->
- Default
+ {value, {Key, Value}} ->
+ Value;
+ false ->
+ Default
end.
-get_nested_json_value({Props}, [Key|Keys]) ->
+get_nested_json_value({Props}, [Key | Keys]) ->
case couch_util:get_value(Key, Props, nil) of
- nil -> throw({not_found, <<"missing json key: ", Key/binary>>});
- Value -> get_nested_json_value(Value, Keys)
+ nil -> throw({not_found, <<"missing json key: ", Key/binary>>});
+ Value -> get_nested_json_value(Value, Keys)
end;
get_nested_json_value(Value, []) ->
Value;
@@ -257,15 +274,16 @@ json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) ->
json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
json_apply_field({Key, NewValue}, [], Acc) ->
- {[{Key, NewValue}|Acc]}.
+ {[{Key, NewValue} | Acc]}.
json_user_ctx(Db) ->
#{name := DbName} = Db,
Ctx = fabric2_db:get_user_ctx(Db),
- {[{<<"db">>, DbName},
- {<<"name">>,Ctx#user_ctx.name},
- {<<"roles">>,Ctx#user_ctx.roles}]}.
-
+ {[
+ {<<"db">>, DbName},
+ {<<"name">>, Ctx#user_ctx.name},
+ {<<"roles">>, Ctx#user_ctx.roles}
+ ]}.
% returns a random integer
rand32() ->
@@ -277,7 +295,7 @@ rand32() ->
abs_pathname(" " ++ Filename) ->
% strip leading whitspace
abs_pathname(Filename);
-abs_pathname([$/ |_]=Filename) ->
+abs_pathname([$/ | _] = Filename) ->
Filename;
abs_pathname(Filename) ->
{ok, Cwd} = file:get_cwd(),
@@ -288,24 +306,25 @@ abs_pathname(Filename, Dir) ->
Name = filename:absname(Filename, Dir ++ "/"),
OutFilename = filename:join(fix_path_list(filename:split(Name), [])),
% If the filename is a dir (last char slash, put back end slash
- case string:right(Filename,1) of
- "/" ->
- OutFilename ++ "/";
- "\\" ->
- OutFilename ++ "/";
- _Else->
- OutFilename
+ case string:right(Filename, 1) of
+ "/" ->
+ OutFilename ++ "/";
+ "\\" ->
+ OutFilename ++ "/";
+ _Else ->
+ OutFilename
end.
% if this as an executable with arguments, seperate out the arguments
% ""./foo\ bar.sh -baz=blah" -> {"./foo\ bar.sh", " -baz=blah"}
separate_cmd_args("", CmdAcc) ->
{lists:reverse(CmdAcc), ""};
-separate_cmd_args("\\ " ++ Rest, CmdAcc) -> % handle skipped value
+% handle skipped value
+separate_cmd_args("\\ " ++ Rest, CmdAcc) ->
separate_cmd_args(Rest, " \\" ++ CmdAcc);
separate_cmd_args(" " ++ Rest, CmdAcc) ->
{lists:reverse(CmdAcc), " " ++ Rest};
-separate_cmd_args([Char|Rest], CmdAcc) ->
+separate_cmd_args([Char | Rest], CmdAcc) ->
separate_cmd_args(Rest, [Char | CmdAcc]).
% Is a character whitespace (from https://en.wikipedia.org/wiki/Whitespace_character#Unicode)?
@@ -342,7 +361,6 @@ is_whitespace(8288) -> true;
is_whitespace(65279) -> true;
is_whitespace(_Else) -> false.
-
% removes leading and trailing whitespace from a string
trim(String) when is_binary(String) ->
% mirror string:trim() behaviour of returning a binary when a binary is passed in
@@ -351,7 +369,6 @@ trim(String) ->
String2 = lists:dropwhile(fun is_whitespace/1, String),
lists:reverse(lists:dropwhile(fun is_whitespace/1, lists:reverse(String2))).
-
drop_dot_couch_ext(DbName) when is_binary(DbName) ->
PrefixLen = size(DbName) - 6,
case DbName of
@@ -360,42 +377,38 @@ drop_dot_couch_ext(DbName) when is_binary(DbName) ->
Else ->
Else
end;
-
drop_dot_couch_ext(DbName) when is_list(DbName) ->
binary_to_list(drop_dot_couch_ext(iolist_to_binary(DbName))).
-
% takes a heirarchical list of dirs and removes the dots ".", double dots
% ".." and the corresponding parent dirs.
fix_path_list([], Acc) ->
lists:reverse(Acc);
-fix_path_list([".."|Rest], [_PrevAcc|RestAcc]) ->
+fix_path_list([".." | Rest], [_PrevAcc | RestAcc]) ->
fix_path_list(Rest, RestAcc);
-fix_path_list(["."|Rest], Acc) ->
+fix_path_list(["." | Rest], Acc) ->
fix_path_list(Rest, Acc);
fix_path_list([Dir | Rest], Acc) ->
fix_path_list(Rest, [Dir | Acc]).
-
implode(List, Sep) ->
implode(List, Sep, []).
implode([], _Sep, Acc) ->
lists:flatten(lists:reverse(Acc));
implode([H], Sep, Acc) ->
- implode([], Sep, [H|Acc]);
-implode([H|T], Sep, Acc) ->
- implode(T, Sep, [Sep,H|Acc]).
-
+ implode([], Sep, [H | Acc]);
+implode([H | T], Sep, Acc) ->
+ implode(T, Sep, [Sep, H | Acc]).
drv_port() ->
case get(couch_drv_port) of
- undefined ->
- Port = open_port({spawn, "couch_icu_driver"}, []),
- put(couch_drv_port, Port),
- Port;
- Port ->
- Port
+ undefined ->
+ Port = open_port({spawn, "couch_icu_driver"}, []),
+ put(couch_drv_port, Port),
+ Port;
+ Port ->
+ Port
end.
collate(A, B) ->
@@ -403,10 +416,12 @@ collate(A, B) ->
collate(A, B, Options) when is_binary(A), is_binary(B) ->
Operation =
- case lists:member(nocase, Options) of
- true -> 1; % Case insensitive
- false -> 0 % Case sensitive
- end,
+ case lists:member(nocase, Options) of
+ % Case insensitive
+ true -> 1;
+ % Case sensitive
+ false -> 0
+ end,
SizeA = byte_size(A),
SizeB = byte_size(B),
Bin = <<SizeA:32/native, A/binary, SizeB:32/native, B/binary>>,
@@ -416,7 +431,8 @@ collate(A, B, Options) when is_binary(A), is_binary(B) ->
Result - 1.
get_sort_key(Str) when is_binary(Str) ->
- Operation = 2, % get_sort_key
+ % get_sort_key
+ Operation = 2,
Size = byte_size(Str),
Bin = <<Size:32/native, Str/binary>>,
case erlang:port_control(drv_port(), Operation, Bin) of
@@ -429,15 +445,24 @@ should_flush() ->
should_flush(MemThreshHold) ->
{memory, ProcMem} = process_info(self(), memory),
- BinMem = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
- 0, element(2,process_info(self(), binary))),
- if ProcMem+BinMem > 2*MemThreshHold ->
- garbage_collect(),
- {memory, ProcMem2} = process_info(self(), memory),
- BinMem2 = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
- 0, element(2,process_info(self(), binary))),
- ProcMem2+BinMem2 > MemThreshHold;
- true -> false end.
+ BinMem = lists:foldl(
+ fun({_Id, Size, _NRefs}, Acc) -> Size + Acc end,
+ 0,
+ element(2, process_info(self(), binary))
+ ),
+ if
+ ProcMem + BinMem > 2 * MemThreshHold ->
+ garbage_collect(),
+ {memory, ProcMem2} = process_info(self(), memory),
+ BinMem2 = lists:foldl(
+ fun({_Id, Size, _NRefs}, Acc) -> Size + Acc end,
+ 0,
+ element(2, process_info(self(), binary))
+ ),
+ ProcMem2 + BinMem2 > MemThreshHold;
+ true ->
+ false
+ end.
encodeBase64Url(Url) ->
b64url:encode(Url).
@@ -447,10 +472,10 @@ decodeBase64Url(Url64) ->
dict_find(Key, Dict, DefaultValue) ->
case dict:find(Key, Dict) of
- {ok, Value} ->
- Value;
- error ->
- DefaultValue
+ {ok, Value} ->
+ Value;
+ error ->
+ DefaultValue
end.
to_binary(V) when is_binary(V) ->
@@ -485,23 +510,23 @@ to_list(V) ->
url_encode(Bin) when is_binary(Bin) ->
url_encode(binary_to_list(Bin));
-url_encode([H|T]) ->
+url_encode([H | T]) ->
if
- H >= $a, $z >= H ->
- [H|url_encode(T)];
- H >= $A, $Z >= H ->
- [H|url_encode(T)];
- H >= $0, $9 >= H ->
- [H|url_encode(T)];
- H == $_; H == $.; H == $-; H == $: ->
- [H|url_encode(T)];
- true ->
- case lists:flatten(io_lib:format("~.16.0B", [H])) of
- [X, Y] ->
- [$%, X, Y | url_encode(T)];
- [X] ->
- [$%, $0, X | url_encode(T)]
- end
+ H >= $a, $z >= H ->
+ [H | url_encode(T)];
+ H >= $A, $Z >= H ->
+ [H | url_encode(T)];
+ H >= $0, $9 >= H ->
+ [H | url_encode(T)];
+ H == $_; H == $.; H == $-; H == $: ->
+ [H | url_encode(T)];
+ true ->
+ case lists:flatten(io_lib:format("~.16.0B", [H])) of
+ [X, Y] ->
+ [$%, X, Y | url_encode(T)];
+ [X] ->
+ [$%, $0, X | url_encode(T)]
+ end
end;
url_encode([]) ->
[].
@@ -520,7 +545,7 @@ json_decode(V, Opts) ->
throw({invalid_json, Error})
end.
-verify([X|RestX], [Y|RestY], Result) ->
+verify([X | RestX], [Y | RestY], Result) ->
verify(RestX, RestY, (X bxor Y) bor Result);
verify([], [], Result) ->
Result == 0.
@@ -534,7 +559,8 @@ verify(X, Y) when is_list(X) and is_list(Y) ->
false ->
false
end;
-verify(_X, _Y) -> false.
+verify(_X, _Y) ->
+ false.
% linear search is faster for small lists, length() is 0.5 ms for 100k list
reorder_results(Keys, SortedResults) when length(Keys) < 100 ->
@@ -544,10 +570,12 @@ reorder_results(Keys, SortedResults) ->
[dict:fetch(Key, KeyDict) || Key <- Keys].
url_strip_password(Url) ->
- re:replace(Url,
+ re:replace(
+ Url,
"(http|https|socks5)://([^:]+):[^@]+@(.*)$",
"\\1://\\2:*****@\\3",
- [{return, list}]).
+ [{return, list}]
+ ).
encode_doc_id(#doc{id = Id}) ->
encode_doc_id(Id);
@@ -566,20 +594,26 @@ normalize_ddoc_id(DDocId) when is_binary(DDocId) ->
<<"_design/", DDocId/binary>>.
rfc1123_date() ->
- {{YYYY,MM,DD},{Hour,Min,Sec}} = calendar:universal_time(),
- DayNumber = calendar:day_of_the_week({YYYY,MM,DD}),
+ {{YYYY, MM, DD}, {Hour, Min, Sec}} = calendar:universal_time(),
+ DayNumber = calendar:day_of_the_week({YYYY, MM, DD}),
lists:flatten(
- io_lib:format("~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
- [day(DayNumber),DD,month(MM),YYYY,Hour,Min,Sec])).
+ io_lib:format(
+ "~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
+ [day(DayNumber), DD, month(MM), YYYY, Hour, Min, Sec]
+ )
+ ).
rfc1123_date(undefined) ->
undefined;
rfc1123_date(UniversalTime) ->
- {{YYYY,MM,DD},{Hour,Min,Sec}} = UniversalTime,
- DayNumber = calendar:day_of_the_week({YYYY,MM,DD}),
+ {{YYYY, MM, DD}, {Hour, Min, Sec}} = UniversalTime,
+ DayNumber = calendar:day_of_the_week({YYYY, MM, DD}),
lists:flatten(
- io_lib:format("~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
- [day(DayNumber),DD,month(MM),YYYY,Hour,Min,Sec])).
+ io_lib:format(
+ "~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
+ [day(DayNumber), DD, month(MM), YYYY, Hour, Min, Sec]
+ )
+ ).
%% day
@@ -616,30 +650,32 @@ boolean_to_integer(true) ->
boolean_to_integer(false) ->
0.
-
validate_positive_int(N) when is_list(N) ->
try
I = list_to_integer(N),
validate_positive_int(I)
- catch error:badarg ->
- false
+ catch
+ error:badarg ->
+ false
end;
validate_positive_int(N) when is_integer(N), N > 0 -> true;
-validate_positive_int(_) -> false.
-
+validate_positive_int(_) ->
+ false.
find_in_binary(_B, <<>>) ->
not_found;
-
find_in_binary(B, Data) ->
case binary:match(Data, [B], []) of
- nomatch ->
- MatchLength = erlang:min(byte_size(B), byte_size(Data)),
- match_prefix_at_end(binary:part(B, {0, MatchLength}),
- binary:part(Data, {byte_size(Data), -MatchLength}),
- MatchLength, byte_size(Data) - MatchLength);
- {Pos, _Len} ->
- {exact, Pos}
+ nomatch ->
+ MatchLength = erlang:min(byte_size(B), byte_size(Data)),
+ match_prefix_at_end(
+ binary:part(B, {0, MatchLength}),
+ binary:part(Data, {byte_size(Data), -MatchLength}),
+ MatchLength,
+ byte_size(Data) - MatchLength
+ );
+ {Pos, _Len} ->
+ {exact, Pos}
end.
match_prefix_at_end(Prefix, Data, PrefixLength, N) ->
@@ -648,10 +684,14 @@ match_prefix_at_end(Prefix, Data, PrefixLength, N) ->
match_rest_of_prefix([], _Prefix, _Data, _PrefixLength, _N) ->
not_found;
-
match_rest_of_prefix([{Pos, _Len} | Rest], Prefix, Data, PrefixLength, N) ->
- case binary:match(binary:part(Data, {PrefixLength, Pos - PrefixLength}),
- [binary:part(Prefix, {0, PrefixLength - Pos})], []) of
+ case
+ binary:match(
+ binary:part(Data, {PrefixLength, Pos - PrefixLength}),
+ [binary:part(Prefix, {0, PrefixLength - Pos})],
+ []
+ )
+ of
nomatch ->
match_rest_of_prefix(Rest, Prefix, Data, PrefixLength, N);
{_Pos, _Len1} ->
@@ -660,44 +700,42 @@ match_rest_of_prefix([{Pos, _Len} | Rest], Prefix, Data, PrefixLength, N) ->
callback_exists(Module, Function, Arity) ->
case ensure_loaded(Module) of
- true ->
- InfoList = Module:module_info(exports),
- lists:member({Function, Arity}, InfoList);
- false ->
- false
+ true ->
+ InfoList = Module:module_info(exports),
+ lists:member({Function, Arity}, InfoList);
+ false ->
+ false
end.
validate_callback_exists(Module, Function, Arity) ->
case callback_exists(Module, Function, Arity) of
- true ->
- ok;
- false ->
- CallbackStr = lists:flatten(
- io_lib:format("~w:~w/~w", [Module, Function, Arity])),
- throw({error,
- {undefined_callback, CallbackStr, {Module, Function, Arity}}})
+ true ->
+ ok;
+ false ->
+ CallbackStr = lists:flatten(
+ io_lib:format("~w:~w/~w", [Module, Function, Arity])
+ ),
+ throw({error, {undefined_callback, CallbackStr, {Module, Function, Arity}}})
end.
-
check_md5(_NewSig, <<>>) -> ok;
check_md5(Sig, Sig) -> ok;
check_md5(_, _) -> throw(md5_mismatch).
-
set_mqd_off_heap(Module) ->
case config:get_boolean("off_heap_mqd", atom_to_list(Module), true) of
true ->
try
erlang:process_flag(message_queue_data, off_heap),
ok
- catch error:badarg ->
+ catch
+ error:badarg ->
ok
end;
false ->
ok
end.
-
set_process_priority(Module, Level) ->
case config:get_boolean("process_priority", atom_to_list(Module), false) of
true ->
@@ -707,18 +745,17 @@ set_process_priority(Module, Level) ->
ok
end.
-
ensure_loaded(Module) when is_atom(Module) ->
case code:ensure_loaded(Module) of
- {module, Module} ->
- true;
- {error, embedded} ->
- true;
- {error, _} ->
- false
+ {module, Module} ->
+ true;
+ {error, embedded} ->
+ true;
+ {error, _} ->
+ false
end;
-ensure_loaded(_Module) -> false.
-
+ensure_loaded(_Module) ->
+ false.
%% This is especially useful in gen_servers when you need to call
%% a function that does a receive as it would hijack incoming messages.
@@ -736,11 +773,9 @@ with_proc(M, F, A, Timeout) ->
{error, timeout}
end.
-
process_dict_get(Pid, Key) ->
process_dict_get(Pid, Key, undefined).
-
process_dict_get(Pid, Key, DefaultValue) ->
case process_info(Pid, dictionary) of
{dictionary, Dict} ->
@@ -754,24 +789,24 @@ process_dict_get(Pid, Key, DefaultValue) ->
DefaultValue
end.
-
unique_monotonic_integer() ->
erlang:unique_integer([monotonic, positive]).
-
check_config_blacklist(Section) ->
- lists:foreach(fun(RegExp) ->
- case re:run(Section, RegExp) of
- nomatch ->
- ok;
- _ ->
- Msg = <<"Config section blacklisted for modification over HTTP API.">>,
- throw({forbidden, Msg})
- end
- end, ?BLACKLIST_CONFIG_SECTIONS),
+ lists:foreach(
+ fun(RegExp) ->
+ case re:run(Section, RegExp) of
+ nomatch ->
+ ok;
+ _ ->
+ Msg = <<"Config section blacklisted for modification over HTTP API.">>,
+ throw({forbidden, Msg})
+ end
+ end,
+ ?BLACKLIST_CONFIG_SECTIONS
+ ),
ok.
-
-ifdef(OTP_RELEASE).
-if(?OTP_RELEASE >= 22).
@@ -786,7 +821,8 @@ hmac(Alg, Key, Message) ->
hmac(Alg, Key, Message) ->
crypto:hmac(Alg, Key, Message).
--endif. % -if(?OTP_RELEASE >= 22)
+% -if(?OTP_RELEASE >= 22)
+-endif.
-else.
@@ -794,4 +830,5 @@ hmac(Alg, Key, Message) ->
hmac(Alg, Key, Message) ->
crypto:hmac(Alg, Key, Message).
--endif. % -ifdef(OTP_RELEASE)
+% -ifdef(OTP_RELEASE)
+-endif.
diff --git a/src/couch/src/couch_uuids.erl b/src/couch/src/couch_uuids.erl
index 3fffd04b3..be6089dff 100644
--- a/src/couch/src/couch_uuids.erl
+++ b/src/couch/src/couch_uuids.erl
@@ -127,23 +127,22 @@ utc_random(ClockSeq) ->
utc_suffix(Suffix, ClockSeq, Now) ->
OsMicros = micros_since_epoch(Now),
- NewClockSeq = if
- OsMicros =< ClockSeq ->
- % Timestamp is lagging, use ClockSeq as Timestamp
- ClockSeq + 1;
- OsMicros > ClockSeq ->
- % Timestamp advanced, use it, and reset ClockSeq with it
- OsMicros
- end,
+ NewClockSeq =
+ if
+ OsMicros =< ClockSeq ->
+ % Timestamp is lagging, use ClockSeq as Timestamp
+ ClockSeq + 1;
+ OsMicros > ClockSeq ->
+ % Timestamp advanced, use it, and reset ClockSeq with it
+ OsMicros
+ end,
Prefix = io_lib:format("~14.16.0b", [NewClockSeq]),
{list_to_binary(Prefix ++ Suffix), NewClockSeq}.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
utc_id_time_does_not_advance_test() ->
% Timestamp didn't advance but local clock sequence should and new UUIds
% should be generated
@@ -156,7 +155,6 @@ utc_id_time_does_not_advance_test() ->
?assertNotEqual(UtcId0, UtcId1),
?assertEqual(ClockSeq1 + 1, ClockSeq2).
-
utc_id_time_advanced_test() ->
% Timestamp advanced, a new UUID generated and also the last clock sequence
% is updated to that timestamp.
@@ -187,5 +185,4 @@ utc_random_test_time_advance_test() ->
?assertEqual(32, byte_size(UtcRandom)),
?assert(NextClockSeq > micros_since_epoch({1000, 0, 0})).
-
-endif.
diff --git a/src/couch/src/couch_work_queue.erl b/src/couch/src/couch_work_queue.erl
index 01271bb35..e7855c74a 100644
--- a/src/couch/src/couch_work_queue.erl
+++ b/src/couch/src/couch_work_queue.erl
@@ -35,21 +35,17 @@
multi_workers = false
}).
-
new(Options) ->
gen_server:start_link(couch_work_queue, Options, []).
-
queue(Wq, Item) when is_binary(Item) ->
gen_server:call(Wq, {queue, Item, byte_size(Item)}, infinity);
queue(Wq, Item) ->
gen_server:call(Wq, {queue, Item, ?term_size(Item)}, infinity).
-
dequeue(Wq) ->
dequeue(Wq, all).
-
dequeue(Wq, MaxItems) ->
try
gen_server:call(Wq, {dequeue, MaxItems}, infinity)
@@ -57,7 +53,6 @@ dequeue(Wq, MaxItems) ->
_:_ -> closed
end.
-
item_count(Wq) ->
try
gen_server:call(Wq, item_count, infinity)
@@ -65,7 +60,6 @@ item_count(Wq) ->
_:_ -> closed
end.
-
size(Wq) ->
try
gen_server:call(Wq, size, infinity)
@@ -73,11 +67,9 @@ size(Wq) ->
_:_ -> closed
end.
-
close(Wq) ->
gen_server:cast(Wq, close).
-
init(Options) ->
Q = #q{
max_size = couch_util:get_value(max_size, Options, nil),
@@ -86,50 +78,47 @@ init(Options) ->
},
{ok, Q, hibernate}.
-
-terminate(_Reason, #q{work_waiters=Workers}) ->
+terminate(_Reason, #q{work_waiters = Workers}) ->
lists:foreach(fun({W, _}) -> gen_server:reply(W, closed) end, Workers).
-
handle_call({queue, Item, Size}, From, #q{work_waiters = []} = Q0) ->
- Q = Q0#q{size = Q0#q.size + Size,
- items = Q0#q.items + 1,
- queue = queue:in({Item, Size}, Q0#q.queue)},
- case (Q#q.size >= Q#q.max_size) orelse
- (Q#q.items >= Q#q.max_items) of
- true ->
- {noreply, Q#q{blocked = [From | Q#q.blocked]}, hibernate};
- false ->
- {reply, ok, Q, hibernate}
+ Q = Q0#q{
+ size = Q0#q.size + Size,
+ items = Q0#q.items + 1,
+ queue = queue:in({Item, Size}, Q0#q.queue)
+ },
+ case
+ (Q#q.size >= Q#q.max_size) orelse
+ (Q#q.items >= Q#q.max_items)
+ of
+ true ->
+ {noreply, Q#q{blocked = [From | Q#q.blocked]}, hibernate};
+ false ->
+ {reply, ok, Q, hibernate}
end;
-
handle_call({queue, Item, _}, _From, #q{work_waiters = [{W, _Max} | Rest]} = Q) ->
gen_server:reply(W, {ok, [Item]}),
{reply, ok, Q#q{work_waiters = Rest}, hibernate};
-
handle_call({dequeue, Max}, From, Q) ->
#q{work_waiters = Workers, multi_workers = Multi, items = Count} = Q,
case {Workers, Multi} of
- {[_ | _], false} ->
- exit("Only one caller allowed to wait for this work at a time");
- {[_ | _], true} ->
- {noreply, Q#q{work_waiters=Workers ++ [{From, Max}]}};
- _ ->
- case Count of
- 0 ->
- {noreply, Q#q{work_waiters=Workers ++ [{From, Max}]}};
- C when C > 0 ->
- deliver_queue_items(Max, Q)
- end
+ {[_ | _], false} ->
+ exit("Only one caller allowed to wait for this work at a time");
+ {[_ | _], true} ->
+ {noreply, Q#q{work_waiters = Workers ++ [{From, Max}]}};
+ _ ->
+ case Count of
+ 0 ->
+ {noreply, Q#q{work_waiters = Workers ++ [{From, Max}]}};
+ C when C > 0 ->
+ deliver_queue_items(Max, Q)
+ end
end;
-
handle_call(item_count, _From, Q) ->
{reply, Q#q.items, Q};
-
handle_call(size, _From, Q) ->
{reply, Q#q.size, Q}.
-
deliver_queue_items(Max, Q) ->
#q{
queue = Queue,
@@ -139,48 +128,48 @@ deliver_queue_items(Max, Q) ->
blocked = Blocked
} = Q,
case (Max =:= all) orelse (Max >= Count) of
- false ->
- {Items, Size2, Queue2, Blocked2} = dequeue_items(
- Max, Size, Queue, Blocked, []),
- Q2 = Q#q{
- items = Count - Max, size = Size2, blocked = Blocked2, queue = Queue2
- },
- {reply, {ok, Items}, Q2};
- true ->
- lists:foreach(fun(F) -> gen_server:reply(F, ok) end, Blocked),
- Q2 = Q#q{items = 0, size = 0, blocked = [], queue = queue:new()},
- Items = [Item || {Item, _} <- queue:to_list(Queue)],
- case Close of
false ->
+ {Items, Size2, Queue2, Blocked2} = dequeue_items(
+ Max, Size, Queue, Blocked, []
+ ),
+ Q2 = Q#q{
+ items = Count - Max,
+ size = Size2,
+ blocked = Blocked2,
+ queue = Queue2
+ },
{reply, {ok, Items}, Q2};
true ->
- {stop, normal, {ok, Items}, Q2}
- end
+ lists:foreach(fun(F) -> gen_server:reply(F, ok) end, Blocked),
+ Q2 = Q#q{items = 0, size = 0, blocked = [], queue = queue:new()},
+ Items = [Item || {Item, _} <- queue:to_list(Queue)],
+ case Close of
+ false ->
+ {reply, {ok, Items}, Q2};
+ true ->
+ {stop, normal, {ok, Items}, Q2}
+ end
end.
-
dequeue_items(0, Size, Queue, Blocked, DequeuedAcc) ->
{lists:reverse(DequeuedAcc), Size, Queue, Blocked};
-
dequeue_items(NumItems, Size, Queue, Blocked, DequeuedAcc) ->
{{value, {Item, ItemSize}}, Queue2} = queue:out(Queue),
case Blocked of
- [] ->
- Blocked2 = Blocked;
- [From | Blocked2] ->
- gen_server:reply(From, ok)
+ [] ->
+ Blocked2 = Blocked;
+ [From | Blocked2] ->
+ gen_server:reply(From, ok)
end,
dequeue_items(
- NumItems - 1, Size - ItemSize, Queue2, Blocked2, [Item | DequeuedAcc]).
-
+ NumItems - 1, Size - ItemSize, Queue2, Blocked2, [Item | DequeuedAcc]
+ ).
handle_cast(close, #q{items = 0} = Q) ->
{stop, normal, Q};
-
handle_cast(close, Q) ->
{noreply, Q#q{close_on_dequeue = true}}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
@@ -198,6 +187,4 @@ format_status(_Opt, [_PDict, Queue]) ->
blocked = {length, length(Blocked)},
work_waiters = {length, length(Waiters)}
},
- [{data, [{"State",
- ?record_to_keyval(q, Scrubbed)
- }]}].
+ [{data, [{"State", ?record_to_keyval(q, Scrubbed)}]}].
diff --git a/src/couch/src/test_request.erl b/src/couch/src/test_request.erl
index 48f49bda6..d7364012f 100644
--- a/src/couch/src/test_request.erl
+++ b/src/couch/src/test_request.erl
@@ -74,7 +74,6 @@ options(Url, Headers) ->
options(Url, Headers, Opts) ->
request(options, Url, Headers, [], Opts).
-
request(Method, Url, Headers) ->
request(Method, Url, Headers, []).
diff --git a/src/couch/src/test_util.erl b/src/couch/src/test_util.erl
index c95c444f5..d9f4bc098 100644
--- a/src/couch/src/test_util.erl
+++ b/src/couch/src/test_util.erl
@@ -36,8 +36,7 @@
-record(test_context, {mocked = [], started = [], module}).
--define(DEFAULT_APPS,
- [inets, ibrowse, ssl, config, couch_epi, couch]).
+-define(DEFAULT_APPS, [inets, ibrowse, ssl, config, couch_epi, couch]).
srcdir() ->
code:priv_dir(couch) ++ "/../../".
@@ -52,9 +51,12 @@ init_code_path() ->
"ibrowse",
"mochiweb"
],
- lists:foreach(fun(Name) ->
- code:add_patha(filename:join([builddir(), "src", Name]))
- end, Paths).
+ lists:foreach(
+ fun(Name) ->
+ code:add_patha(filename:join([builddir(), "src", Name]))
+ end,
+ Paths
+ ).
source_file(Name) ->
filename:join([srcdir(), Name]).
@@ -89,21 +91,21 @@ start_applications(Apps) ->
start_applications([], Acc) ->
lists:reverse(Acc);
-start_applications([App|Apps], Acc) when App == kernel; App == stdlib ->
+start_applications([App | Apps], Acc) when App == kernel; App == stdlib ->
start_applications(Apps, Acc);
-start_applications([App|Apps], Acc) ->
+start_applications([App | Apps], Acc) ->
case application:start(App) of
- {error, {already_started, crypto}} ->
- start_applications(Apps, [crypto | Acc]);
- {error, {already_started, App}} ->
- io:format(standard_error, "Application ~s was left running!~n", [App]),
- application:stop(App),
- start_applications([App|Apps], Acc);
- {error, Reason} ->
- io:format(standard_error, "Cannot start application '~s', reason ~p~n", [App, Reason]),
- throw({error, {cannot_start, App, Reason}});
- ok ->
- start_applications(Apps, [App|Acc])
+ {error, {already_started, crypto}} ->
+ start_applications(Apps, [crypto | Acc]);
+ {error, {already_started, App}} ->
+ io:format(standard_error, "Application ~s was left running!~n", [App]),
+ application:stop(App),
+ start_applications([App | Apps], Acc);
+ {error, Reason} ->
+ io:format(standard_error, "Cannot start application '~s', reason ~p~n", [App, Reason]),
+ throw({error, {cannot_start, App, Reason}});
+ ok ->
+ start_applications(Apps, [App | Acc])
end.
stop_applications(Apps) ->
@@ -114,12 +116,11 @@ start_config(Chain) ->
case config:start_link(Chain) of
{ok, Pid} ->
{ok, Pid};
- {error, {already_started, OldPid}} ->
+ {error, {already_started, OldPid}} ->
ok = stop_config(OldPid),
start_config(Chain)
end.
-
stop_config(Pid) ->
Timeout = 1000,
case stop_sync(Pid, fun() -> config:stop() end, Timeout) of
@@ -145,8 +146,8 @@ stop_sync(Pid, Fun, Timeout) when is_function(Fun) and is_pid(Pid) ->
catch unlink(Pid),
Res = (catch Fun()),
receive
- {'DOWN', MRef, _, _, _} ->
- Res
+ {'DOWN', MRef, _, _, _} ->
+ Res
after Timeout ->
timeout
end
@@ -154,7 +155,8 @@ stop_sync(Pid, Fun, Timeout) when is_function(Fun) and is_pid(Pid) ->
after
erlang:demonitor(MRef, [flush])
end;
-stop_sync(_, _, _) -> error(badarg).
+stop_sync(_, _, _) ->
+ error(badarg).
stop_sync_throw(Name, Error) ->
stop_sync_throw(Name, shutdown, Error).
@@ -171,7 +173,8 @@ stop_sync_throw(Pid, Fun, Error, Timeout) ->
with_process_restart(Name) ->
{Pid, true} = with_process_restart(
- Name, fun() -> exit(whereis(Name), shutdown) end),
+ Name, fun() -> exit(whereis(Name), shutdown) end
+ ),
Pid.
with_process_restart(Name, Fun) ->
@@ -180,24 +183,26 @@ with_process_restart(Name, Fun) ->
with_process_restart(Name, Fun, Timeout) ->
Res = stop_sync(Name, Fun),
case wait_process(Name, Timeout) of
- timeout ->
- timeout;
- Pid ->
- {Pid, Res}
+ timeout ->
+ timeout;
+ Pid ->
+ {Pid, Res}
end.
-
wait_process(Name) ->
wait_process(Name, 5000).
wait_process(Name, Timeout) ->
- wait(fun() ->
- case whereis(Name) of
- undefined ->
- wait;
- Pid ->
- Pid
- end
- end, Timeout).
+ wait(
+ fun() ->
+ case whereis(Name) of
+ undefined ->
+ wait;
+ Pid ->
+ Pid
+ end
+ end,
+ Timeout
+ ).
wait(Fun) ->
wait(Fun, 5000, 50).
@@ -213,11 +218,11 @@ wait(_Fun, Timeout, _Delay, Started, Prev) when Prev - Started > Timeout ->
timeout;
wait(Fun, Timeout, Delay, Started, _Prev) ->
case Fun() of
- wait ->
- ok = timer:sleep(Delay),
- wait(Fun, Timeout, Delay, Started, now_us());
- Else ->
- Else
+ wait ->
+ ok = timer:sleep(Delay),
+ wait(Fun, Timeout, Delay, Started, now_us());
+ Else ->
+ Else
end.
wait_value(Fun, Value) ->
@@ -279,7 +284,7 @@ load_applications_with_stats() ->
ok.
stats_file_to_app(File) ->
- [_Desc, _Priv, App|_] = lists:reverse(filename:split(File)),
+ [_Desc, _Priv, App | _] = lists:reverse(filename:split(File)),
erlang:list_to_atom(App).
calculate_start_order(Apps) ->
@@ -309,14 +314,19 @@ load_app_deps(App, StartOrder) ->
{error, {already_loaded, App}} -> ok
end,
{ok, Apps} = application:get_key(App, applications),
- Deps = case App of
- kernel -> Apps;
- stdlib -> Apps;
- _ -> lists:usort([kernel, stdlib | Apps])
- end,
- NewStartOrder = lists:foldl(fun(Dep, Acc) ->
- load_app_deps(Dep, Acc)
- end, StartOrder, Deps),
+ Deps =
+ case App of
+ kernel -> Apps;
+ stdlib -> Apps;
+ _ -> lists:usort([kernel, stdlib | Apps])
+ end,
+ NewStartOrder = lists:foldl(
+ fun(Dep, Acc) ->
+ load_app_deps(Dep, Acc)
+ end,
+ StartOrder,
+ Deps
+ ),
[App | NewStartOrder]
end.
diff --git a/src/couch_epi/src/couch_epi.erl b/src/couch_epi/src/couch_epi.erl
index 0e5c233ab..c708e5a0b 100644
--- a/src/couch_epi/src/couch_epi.erl
+++ b/src/couch_epi/src/couch_epi.erl
@@ -17,9 +17,14 @@
%% queries and introspection
-export([
- dump/1, get/2, get_value/3,
- by_key/1, by_key/2, by_source/1, by_source/2,
- keys/1, subscribers/1]).
+ dump/1,
+ get/2,
+ get_value/3,
+ by_key/1, by_key/2,
+ by_source/1, by_source/2,
+ keys/1,
+ subscribers/1
+]).
%% apply
-export([apply/5, decide/5]).
@@ -51,19 +56,18 @@
-opaque handle() :: module().
--type apply_opt()
- :: ignore_errors
- | concurrent
- | pipe.
+-type apply_opt() ::
+ ignore_errors
+ | concurrent
+ | pipe.
-type apply_opts() :: [apply_opt()].
--type data_spec()
- :: {static_module, module()}
- | {callback_module, module()}
- | {priv_file, FileName :: string()}
- | {file, FileName :: string()}.
-
+-type data_spec() ::
+ {static_module, module()}
+ | {callback_module, module()}
+ | {priv_file, FileName :: string()}
+ | {file, FileName :: string()}.
%% ------------------------------------------------------------------
%% API Function Definitions
@@ -87,93 +91,109 @@ get(Handle, Key) when Handle /= undefined ->
get_value(Handle, Subscriber, Key) when Handle /= undefined ->
couch_epi_data_gen:get(Handle, Subscriber, Key).
-
-spec by_key(Handle :: handle()) ->
[{Key :: key(), [{Source :: app(), properties()}]}].
by_key(Handle) when Handle /= undefined ->
couch_epi_data_gen:by_key(Handle).
-
-spec by_key(Handle :: handle(), Key :: key()) ->
[{Source :: app(), properties()}].
by_key(Handle, Key) when Handle /= undefined ->
couch_epi_data_gen:by_key(Handle, Key).
-
-spec by_source(Handle :: handle()) ->
[{Source :: app(), [{Key :: key(), properties()}]}].
by_source(Handle) when Handle /= undefined ->
couch_epi_data_gen:by_source(Handle).
-
-spec by_source(Handle :: handle(), Subscriber :: app()) ->
[{Key :: key(), properties()}].
by_source(Handle, Subscriber) when Handle /= undefined ->
couch_epi_data_gen:by_source(Handle, Subscriber).
-
-spec keys(Handle :: handle()) ->
[Key :: key()].
keys(Handle) when Handle /= undefined ->
couch_epi_data_gen:keys(Handle).
-
-spec subscribers(Handle :: handle()) ->
[Subscriber :: app()].
subscribers(Handle) when Handle /= undefined ->
couch_epi_data_gen:subscribers(Handle).
--spec apply(Handle :: handle(), ServiceId :: atom(), Function :: atom(),
- Args :: [term()], Opts :: apply_opts()) -> [any()].
+-spec apply(
+ Handle :: handle(),
+ ServiceId :: atom(),
+ Function :: atom(),
+ Args :: [term()],
+ Opts :: apply_opts()
+) -> [any()].
apply(Handle, ServiceId, Function, Args, Opts) when Handle /= undefined ->
couch_epi_functions_gen:apply(Handle, ServiceId, Function, Args, Opts).
--spec get_handle({ServiceId :: service_id(), Key :: key()}) -> handle();
- (ServiceId :: service_id()) -> handle().
+-spec get_handle
+ ({ServiceId :: service_id(), Key :: key()}) -> handle();
+ (ServiceId :: service_id()) -> handle().
get_handle({_ServiceId, _Key} = EPIKey) ->
couch_epi_data_gen:get_handle(EPIKey);
get_handle(ServiceId) when is_atom(ServiceId) ->
couch_epi_functions_gen:get_handle(ServiceId).
--spec any(Handle :: handle(), ServiceId :: atom(), Function :: atom(),
- Args :: [term()], Opts :: apply_opts()) -> boolean().
+-spec any(
+ Handle :: handle(),
+ ServiceId :: atom(),
+ Function :: atom(),
+ Args :: [term()],
+ Opts :: apply_opts()
+) -> boolean().
any(Handle, ServiceId, Function, Args, Opts) when Handle /= undefined ->
Replies = apply(Handle, ServiceId, Function, Args, Opts),
[] /= [Reply || Reply <- Replies, Reply == true].
--spec all(Handle :: handle(), ServiceId :: atom(), Function :: atom(),
- Args :: [term()], Opts :: apply_opts()) -> boolean().
+-spec all(
+ Handle :: handle(),
+ ServiceId :: atom(),
+ Function :: atom(),
+ Args :: [term()],
+ Opts :: apply_opts()
+) -> boolean().
all(Handle, ServiceId, Function, Args, Opts) when Handle /= undefined ->
Replies = apply(Handle, ServiceId, Function, Args, Opts),
[] == [Reply || Reply <- Replies, Reply == false].
-spec is_configured(
- Handle :: handle(), Function :: atom(), Arity :: pos_integer()) -> boolean().
+ Handle :: handle(), Function :: atom(), Arity :: pos_integer()
+) -> boolean().
is_configured(Handle, Function, Arity) when Handle /= undefined ->
[] /= couch_epi_functions_gen:modules(Handle, Function, Arity).
-
-spec register_service(
- PluginId :: plugin_id(), Children :: [supervisor:child_spec()]) ->
- [supervisor:child_spec()].
+ PluginId :: plugin_id(), Children :: [supervisor:child_spec()]
+) ->
+ [supervisor:child_spec()].
register_service(Plugin, Children) ->
couch_epi_sup:plugin_childspecs(Plugin, Children).
--spec decide(Handle :: handle(), ServiceId :: atom(), Function :: atom(),
- Args :: [term()], Opts :: apply_opts()) ->
- no_decision | {decided, term()}.
+-spec decide(
+ Handle :: handle(),
+ ServiceId :: atom(),
+ Function :: atom(),
+ Args :: [term()],
+ Opts :: apply_opts()
+) ->
+ no_decision | {decided, term()}.
decide(Handle, ServiceId, Function, Args, Opts) when Handle /= undefined ->
couch_epi_functions_gen:decide(Handle, ServiceId, Function, Args, Opts).
diff --git a/src/couch_epi/src/couch_epi_codechange_monitor.erl b/src/couch_epi/src/couch_epi_codechange_monitor.erl
index 738480448..214aea14d 100644
--- a/src/couch_epi/src/couch_epi_codechange_monitor.erl
+++ b/src/couch_epi/src/couch_epi_codechange_monitor.erl
@@ -24,8 +24,14 @@
%% gen_server Function Exports
%% ------------------------------------------------------------------
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
%% ------------------------------------------------------------------
%% API Function Definitions
diff --git a/src/couch_epi/src/couch_epi_codegen.erl b/src/couch_epi/src/couch_epi_codegen.erl
index 89b82a1f9..212a4e31a 100644
--- a/src/couch_epi/src/couch_epi_codegen.erl
+++ b/src/couch_epi/src/couch_epi_codegen.erl
@@ -25,21 +25,29 @@ generate(ModuleName, Forms0) ->
scan(String) ->
Exprs = [E || E <- re:split(String, "\\.\n", [{return, list}, trim])],
- FormsTokens = lists:foldl(fun(Expr, Acc) ->
- case erl_scan:string(Expr) of
- {ok, [], _} ->
- Acc;
- {ok, Tokens, _} ->
- [{Expr, fixup_terminator(Tokens)} | Acc]
- end
- end, [], Exprs),
+ FormsTokens = lists:foldl(
+ fun(Expr, Acc) ->
+ case erl_scan:string(Expr) of
+ {ok, [], _} ->
+ Acc;
+ {ok, Tokens, _} ->
+ [{Expr, fixup_terminator(Tokens)} | Acc]
+ end
+ end,
+ [],
+ Exprs
+ ),
lists:reverse(FormsTokens).
parse(FormsTokens) ->
- ASTForms = lists:foldl(fun(Tokens, Forms) ->
- {ok, AST} = parse_form(Tokens),
- [AST | Forms]
- end, [], FormsTokens),
+ ASTForms = lists:foldl(
+ fun(Tokens, Forms) ->
+ {ok, AST} = parse_form(Tokens),
+ [AST | Forms]
+ end,
+ [],
+ FormsTokens
+ ),
lists:reverse(ASTForms).
format_term(Data) ->
@@ -49,11 +57,11 @@ parse_form(Tokens) ->
{Expr, Forms} = split_expression(Tokens),
case erl_parse:parse_form(Forms) of
{ok, AST} -> {ok, AST};
- {error,{_,_, Reason}} ->
- {error, Expr, Reason}
+ {error, {_, _, Reason}} -> {error, Expr, Reason}
end.
-split_expression({Expr, Forms}) -> {Expr, Forms};
+split_expression({Expr, Forms}) ->
+ {Expr, Forms};
split_expression(Tokens) ->
{Exprs, Forms} = lists:unzip(Tokens),
{string:join(Exprs, "\n"), lists:append(Forms)}.
@@ -63,14 +71,15 @@ function(Clauses) ->
fixup_terminator(Tokens) ->
case lists:last(Tokens) of
- {dot, _} -> Tokens;
- {';', _} -> Tokens;
+ {dot, _} ->
+ Tokens;
+ {';', _} ->
+ Tokens;
Token ->
Line = line(Token),
Tokens ++ [{dot, Line}]
end.
-
-ifdef(pre18).
line(Token) ->
diff --git a/src/couch_epi/src/couch_epi_data.erl b/src/couch_epi/src/couch_epi_data.erl
index 2bb09f6cf..ec554a40e 100644
--- a/src/couch_epi/src/couch_epi_data.erl
+++ b/src/couch_epi/src/couch_epi_data.erl
@@ -60,10 +60,13 @@ minimal_interval({_App, #couch_epi_spec{options = Options}}, Min) ->
end.
locate_sources(Specs) ->
- lists:map(fun({ProviderApp, #couch_epi_spec{value = Src}}) ->
- {ok, Locator} = locate(ProviderApp, Src),
- {ProviderApp, Locator}
- end, Specs).
+ lists:map(
+ fun({ProviderApp, #couch_epi_spec{value = Src}}) ->
+ {ok, Locator} = locate(ProviderApp, Src),
+ {ProviderApp, Locator}
+ end,
+ Specs
+ ).
locate(App, {priv_file, FileName}) ->
case priv_path(App, FileName) of
diff --git a/src/couch_epi/src/couch_epi_data_gen.erl b/src/couch_epi/src/couch_epi_data_gen.erl
index 4a283450d..65d689fbf 100644
--- a/src/couch_epi/src/couch_epi_data_gen.erl
+++ b/src/couch_epi/src/couch_epi_data_gen.erl
@@ -61,57 +61,57 @@ get_handle({Service, Key}) ->
%% ------------------------------------------------------------------
preamble() ->
- "
- -export([by_key/0, by_key/1]).
- -export([by_source/0, by_source/1]).
- -export([all/0, all/1, get/2]).
- -export([version/0, version/1]).
- -export([keys/0, subscribers/0]).
- -compile({no_auto_import,[get/0, get/1]}).
- all() ->
- lists:foldl(fun({Key, Defs}, Acc) ->
- [D || {_Subscriber, D} <- Defs ] ++ Acc
- end, [], by_key()).
-
- all(Key) ->
- lists:foldl(fun({Subscriber, Data}, Acc) ->
- [Data | Acc]
- end, [], by_key(Key)).
-
- by_key() ->
- [{Key, by_key(Key)} || Key <- keys()].
-
- by_key(Key) ->
- lists:foldl(
- fun(Source, Acc) -> append_if_defined(Source, get(Source, Key), Acc)
- end, [], subscribers()).
-
-
- by_source() ->
- [{Source, by_source(Source)} || Source <- subscribers()].
-
- by_source(Source) ->
- lists:foldl(
- fun(Key, Acc) -> append_if_defined(Key, get(Source, Key), Acc)
- end, [], keys()).
-
- version() ->
- [{Subscriber, version(Subscriber)} || Subscriber <- subscribers()].
-
- %% Helper functions
- append_if_defined(Type, undefined, Acc) -> Acc;
- append_if_defined(Type, Value, Acc) -> [{Type, Value} | Acc].
- "
- %% In addition to preamble we also generate following methods
- %% get(Source1, Key1) -> Data;
- %% get(Source, Key) -> undefined.
-
- %% version(Source1) -> "HASH";
- %% version(Source) -> {error, {unknown, Source}}.
-
- %% keys() -> [].
- %% subscribers() -> [].
- .
+ "\n"
+ " -export([by_key/0, by_key/1]).\n"
+ " -export([by_source/0, by_source/1]).\n"
+ " -export([all/0, all/1, get/2]).\n"
+ " -export([version/0, version/1]).\n"
+ " -export([keys/0, subscribers/0]).\n"
+ " -compile({no_auto_import,[get/0, get/1]}).\n"
+ " all() ->\n"
+ " lists:foldl(fun({Key, Defs}, Acc) ->\n"
+ " [D || {_Subscriber, D} <- Defs ] ++ Acc\n"
+ " end, [], by_key()).\n"
+ "\n"
+ " all(Key) ->\n"
+ " lists:foldl(fun({Subscriber, Data}, Acc) ->\n"
+ " [Data | Acc]\n"
+ " end, [], by_key(Key)).\n"
+ "\n"
+ " by_key() ->\n"
+ " [{Key, by_key(Key)} || Key <- keys()].\n"
+ "\n"
+ " by_key(Key) ->\n"
+ " lists:foldl(\n"
+ " fun(Source, Acc) -> append_if_defined(Source, get(Source, Key), Acc)\n"
+ " end, [], subscribers()).\n"
+ "\n"
+ "\n"
+ " by_source() ->\n"
+ " [{Source, by_source(Source)} || Source <- subscribers()].\n"
+ "\n"
+ " by_source(Source) ->\n"
+ " lists:foldl(\n"
+ " fun(Key, Acc) -> append_if_defined(Key, get(Source, Key), Acc)\n"
+ " end, [], keys()).\n"
+ "\n"
+ " version() ->\n"
+ " [{Subscriber, version(Subscriber)} || Subscriber <- subscribers()].\n"
+ "\n"
+ " %% Helper functions\n"
+ " append_if_defined(Type, undefined, Acc) -> Acc;\n"
+ " append_if_defined(Type, Value, Acc) -> [{Type, Value} | Acc].\n"
+ " "
+%% In addition to preamble we also generate following methods
+%% get(Source1, Key1) -> Data;
+%% get(Source, Key) -> undefined.
+
+%% version(Source1) -> "HASH";
+%% version(Source) -> {error, {unknown, Source}}.
+
+%% keys() -> [].
+%% subscribers() -> [].
+.
generate(Handle, Defs) ->
GetFunForms = couch_epi_codegen:function(getters(Defs)),
@@ -119,9 +119,10 @@ generate(Handle, Defs) ->
KeysForms = keys_method(Defs),
SubscribersForms = subscribers_method(Defs),
- Forms = couch_epi_codegen:scan(preamble())
- ++ GetFunForms ++ VersionFunForms
- ++ KeysForms ++ SubscribersForms,
+ Forms =
+ couch_epi_codegen:scan(preamble()) ++
+ GetFunForms ++ VersionFunForms ++
+ KeysForms ++ SubscribersForms,
couch_epi_codegen:generate(Handle, Forms).
@@ -135,22 +136,30 @@ subscribers_method(Defs) ->
getters(Defs) ->
DefaultClause = "get(_S, _K) -> undefined.",
- fold_defs(Defs, [couch_epi_codegen:scan(DefaultClause)],
+ fold_defs(
+ Defs,
+ [couch_epi_codegen:scan(DefaultClause)],
fun({Source, Key, Data}, Acc) ->
getter(Source, Key, Data) ++ Acc
- end).
+ end
+ ).
version_method(Defs) ->
DefaultClause = "version(S) -> {error, {unknown, S}}.",
- lists:foldl(fun({Source, Data}, Clauses) ->
- version(Source, Data) ++ Clauses
- end, [couch_epi_codegen:scan(DefaultClause)], Defs).
+ lists:foldl(
+ fun({Source, Data}, Clauses) ->
+ version(Source, Data) ++ Clauses
+ end,
+ [couch_epi_codegen:scan(DefaultClause)],
+ Defs
+ ).
getter(Source, Key, Data) ->
D = couch_epi_codegen:format_term(Data),
Src = atom_to_list(Source),
couch_epi_codegen:scan(
- "get(" ++ Src ++ ", " ++ format_key(Key) ++ ") ->" ++ D ++ ";").
+ "get(" ++ Src ++ ", " ++ format_key(Key) ++ ") ->" ++ D ++ ";"
+ ).
version(Source, Data) ->
Src = atom_to_list(Source),
@@ -183,7 +192,6 @@ format_key(Key) ->
module_name({Service, Key}) when is_list(Service) andalso is_list(Key) ->
list_to_atom(string:join([atom_to_list(?MODULE), Service, Key], "_")).
-
get_current_definitions(Handle) ->
if_exists(Handle, by_source, 0, [], fun() ->
Handle:by_source()
@@ -205,11 +213,19 @@ defined_subscribers(Defs) ->
[Source || {Source, _} <- Defs].
fold_defs(Defs, Acc, Fun) ->
- lists:foldr(fun({Source, SourceData}, Clauses) ->
- lists:foldr(fun({Key, Data}, InAcc) ->
- Fun({Source, Key, Data}, InAcc)
- end, [], SourceData) ++ Clauses
- end, Acc, Defs).
+ lists:foldr(
+ fun({Source, SourceData}, Clauses) ->
+ lists:foldr(
+ fun({Key, Data}, InAcc) ->
+ Fun({Source, Key, Data}, InAcc)
+ end,
+ [],
+ SourceData
+ ) ++ Clauses
+ end,
+ Acc,
+ Defs
+ ).
%% ------------------------------------------------------------------
%% Tests
@@ -243,15 +259,20 @@ basic_test() ->
?assertEqual("3KZ4EG4WBF4J683W8GSDDPYR3", Module:version(app1)),
?assertEqual("4EFUU47W9XDNMV9RMZSSJQU3Y", Module:version(app2)),
- ?assertEqual({error,{unknown,bad}}, Module:version(bad)),
+ ?assertEqual({error, {unknown, bad}}, Module:version(bad)),
?assertEqual(
- [{app1,"3KZ4EG4WBF4J683W8GSDDPYR3"},
- {app2,"4EFUU47W9XDNMV9RMZSSJQU3Y"}], lists:usort(Module:version())),
+ [
+ {app1, "3KZ4EG4WBF4J683W8GSDDPYR3"},
+ {app2, "4EFUU47W9XDNMV9RMZSSJQU3Y"}
+ ],
+ lists:usort(Module:version())
+ ),
?assertEqual(
- [{app1,[some_nice_data]},{app2,"other data"}],
- lists:usort(Module:by_key(foo))),
+ [{app1, [some_nice_data]}, {app2, "other data"}],
+ lists:usort(Module:by_key(foo))
+ ),
?assertEqual([], lists:usort(Module:by_key(bad))),
@@ -260,8 +281,8 @@ basic_test() ->
{bar, [{app2, {"even more data"}}]},
{foo, [{app2, "other data"}, {app1, [some_nice_data]}]}
],
- lists:usort(Module:by_key())),
-
+ lists:usort(Module:by_key())
+ ),
?assertEqual(Defs1, lists:usort(Module:by_source(app1))),
?assertEqual(Defs2, lists:usort(Module:by_source(app2))),
@@ -273,10 +294,12 @@ basic_test() ->
{app1, [{foo, [some_nice_data]}]},
{app2, [{foo, "other data"}, {bar, {"even more data"}}]}
],
- lists:usort(Module:by_source())),
+ lists:usort(Module:by_source())
+ ),
?assertEqual(
- lists:usort([Data1, Data2, Data3]), lists:usort(Module:all())),
+ lists:usort([Data1, Data2, Data3]), lists:usort(Module:all())
+ ),
?assertEqual(lists:usort([Data1, Data2]), lists:usort(Module:all(foo))),
?assertEqual([], lists:usort(Module:all(bad))),
ok.
diff --git a/src/couch_epi/src/couch_epi_functions.erl b/src/couch_epi/src/couch_epi_functions.erl
index ac9373928..1c5fd3403 100644
--- a/src/couch_epi/src/couch_epi_functions.erl
+++ b/src/couch_epi/src/couch_epi_functions.erl
@@ -43,7 +43,11 @@ definitions(Modules) ->
[{M, M:module_info(exports) -- Blacklist} || M <- Modules].
group(KV) ->
- Dict = lists:foldr(fun({K,V}, D) ->
- dict:append_list(K, V, D)
- end, dict:new(), KV),
+ Dict = lists:foldr(
+ fun({K, V}, D) ->
+ dict:append_list(K, V, D)
+ end,
+ dict:new(),
+ KV
+ ),
[{K, lists:reverse(V)} || {K, V} <- dict:to_list(Dict)].
diff --git a/src/couch_epi/src/couch_epi_functions_gen.erl b/src/couch_epi/src/couch_epi_functions_gen.erl
index 7408593b8..d7364c044 100644
--- a/src/couch_epi/src/couch_epi_functions_gen.erl
+++ b/src/couch_epi/src/couch_epi_functions_gen.erl
@@ -45,20 +45,30 @@ get_handle(ServiceId) ->
apply(ServiceId, Function, Args, Opts) when is_atom(ServiceId) ->
apply(get_handle(ServiceId), ServiceId, Function, Args, Opts).
--spec apply(Handle :: atom(), ServiceId :: atom(), Function :: atom(),
- Args :: [term()], Opts :: couch_epi:apply_opts()) -> [any()].
+-spec apply(
+ Handle :: atom(),
+ ServiceId :: atom(),
+ Function :: atom(),
+ Args :: [term()],
+ Opts :: couch_epi:apply_opts()
+) -> [any()].
apply(Handle, _ServiceId, Function, Args, Opts) ->
DispatchOpts = parse_opts(Opts),
Modules = providers(Handle, Function, length(Args), DispatchOpts),
dispatch(Handle, Modules, Function, Args, DispatchOpts).
--spec decide(Handle :: atom(), ServiceId :: atom(), Function :: atom(),
- Args :: [term()], Opts :: couch_epi:apply_opts()) ->
- no_decision | {decided, term()}.
+-spec decide(
+ Handle :: atom(),
+ ServiceId :: atom(),
+ Function :: atom(),
+ Args :: [term()],
+ Opts :: couch_epi:apply_opts()
+) ->
+ no_decision | {decided, term()}.
decide(Handle, _ServiceId, Function, Args, Opts) ->
- DispatchOpts = parse_opts([interruptible|Opts]),
+ DispatchOpts = parse_opts([interruptible | Opts]),
Modules = providers(Handle, Function, length(Args), DispatchOpts),
dispatch(Handle, Modules, Function, Args, DispatchOpts).
@@ -67,33 +77,33 @@ decide(Handle, _ServiceId, Function, Args, Opts) ->
%% ------------------------------------------------------------------
preamble() ->
- "
- -export([version/0, version/1]).
- -export([providers/0, providers/2]).
- -export([definitions/0, definitions/1]).
- -export([dispatch/3]).
- -export([callbacks/2]).
-
- version() ->
- [{Provider, version(Provider)} || Provider <- providers()].
-
- definitions() ->
- [{Provider, definitions(Provider)} || Provider <- providers()].
-
- callbacks(Provider, Function) ->
- [].
-
- "
- %% In addition to preamble we also generate following methods
- %% dispatch(Module, Function, [A1, A2]) -> Module:Function(A1, A2);
-
- %% version(Source1) -> "HASH";
- %% version(Source) -> {error, {unknown, Source}}.
-
- %% providers() -> [].
- %% providers(Function, Arity) -> [].
- %% definitions(Provider) -> [{Module, [{Fun, Arity}]}].
- .
+ "\n"
+ " -export([version/0, version/1]).\n"
+ " -export([providers/0, providers/2]).\n"
+ " -export([definitions/0, definitions/1]).\n"
+ " -export([dispatch/3]).\n"
+ " -export([callbacks/2]).\n"
+ "\n"
+ " version() ->\n"
+ " [{Provider, version(Provider)} || Provider <- providers()].\n"
+ "\n"
+ " definitions() ->\n"
+ " [{Provider, definitions(Provider)} || Provider <- providers()].\n"
+ "\n"
+ " callbacks(Provider, Function) ->\n"
+ " [].\n"
+ "\n"
+ " "
+%% In addition to preamble we also generate following methods
+%% dispatch(Module, Function, [A1, A2]) -> Module:Function(A1, A2);
+
+%% version(Source1) -> "HASH";
+%% version(Source) -> {error, {unknown, Source}}.
+
+%% providers() -> [].
+%% providers(Function, Arity) -> [].
+%% definitions(Provider) -> [{Module, [{Fun, Arity}]}].
+.
generate(Handle, Defs) ->
DispatchFunForms = couch_epi_codegen:function(dispatchers(Defs)),
@@ -103,10 +113,11 @@ generate(Handle, Defs) ->
ProvidersForms = couch_epi_codegen:function(providers_method(Defs)),
DefinitionsForms = couch_epi_codegen:function(definitions_method(Defs)),
- Forms = couch_epi_codegen:scan(preamble())
- ++ DispatchFunForms ++ VersionFunForms
- ++ ProvidersForms ++ AllProvidersForms
- ++ DefinitionsForms,
+ Forms =
+ couch_epi_codegen:scan(preamble()) ++
+ DispatchFunForms ++ VersionFunForms ++
+ ProvidersForms ++ AllProvidersForms ++
+ DefinitionsForms,
couch_epi_codegen:generate(Handle, Forms).
@@ -117,9 +128,13 @@ all_providers_method(Defs) ->
providers_method(Defs) ->
Providers = providers_by_function(Defs),
DefaultClause = "providers(_, _) -> [].",
- lists:foldl(fun({{Fun, Arity}, Modules}, Clauses) ->
- providers(Fun, Arity, Modules) ++ Clauses
- end, [couch_epi_codegen:scan(DefaultClause)], Providers).
+ lists:foldl(
+ fun({{Fun, Arity}, Modules}, Clauses) ->
+ providers(Fun, Arity, Modules) ++ Clauses
+ end,
+ [couch_epi_codegen:scan(DefaultClause)],
+ Providers
+ ).
providers(Function, Arity, Modules) ->
ArityStr = integer_to_list(Arity),
@@ -127,26 +142,38 @@ providers(Function, Arity, Modules) ->
Fun = atom_to_list(Function),
%% providers(Function, Arity) -> [Module];
couch_epi_codegen:scan(
- "providers(" ++ Fun ++ "," ++ ArityStr ++ ") ->" ++ Mods ++ ";").
+ "providers(" ++ Fun ++ "," ++ ArityStr ++ ") ->" ++ Mods ++ ";"
+ ).
dispatchers(Defs) ->
DefaultClause = "dispatch(_Module, _Fun, _Args) -> ok.",
- fold_defs(Defs, [couch_epi_codegen:scan(DefaultClause)],
+ fold_defs(
+ Defs,
+ [couch_epi_codegen:scan(DefaultClause)],
fun({_Source, Module, Function, Arity}, Acc) ->
dispatcher(Module, Function, Arity) ++ Acc
- end).
+ end
+ ).
version_method(Defs) ->
DefaultClause = "version(S) -> {error, {unknown, S}}.",
- lists:foldl(fun({Source, SrcDefs}, Clauses) ->
- version(Source, SrcDefs) ++ Clauses
- end, [couch_epi_codegen:scan(DefaultClause)], Defs).
+ lists:foldl(
+ fun({Source, SrcDefs}, Clauses) ->
+ version(Source, SrcDefs) ++ Clauses
+ end,
+ [couch_epi_codegen:scan(DefaultClause)],
+ Defs
+ ).
definitions_method(Defs) ->
DefaultClause = "definitions(S) -> {error, {unknown, S}}.",
- lists:foldl(fun({Source, SrcDefs}, Clauses) ->
- definition(Source, SrcDefs) ++ Clauses
- end, [couch_epi_codegen:scan(DefaultClause)], Defs).
+ lists:foldl(
+ fun({Source, SrcDefs}, Clauses) ->
+ definition(Source, SrcDefs) ++ Clauses
+ end,
+ [couch_epi_codegen:scan(DefaultClause)],
+ Defs
+ ).
definition(Source, Defs) ->
Src = atom_to_list(Source),
@@ -159,27 +186,28 @@ dispatcher(Module, Function, 0) ->
%% dispatch(Module, Function, []) -> Module:Function();
couch_epi_codegen:scan(
- "dispatch(" ++ M ++ "," ++ Fun ++ ", []) ->"
- ++ M ++ ":" ++ Fun ++ "();");
+ "dispatch(" ++ M ++ "," ++ Fun ++ ", []) ->" ++
+ M ++ ":" ++ Fun ++ "();"
+ );
dispatcher(Module, Function, Arity) ->
Args = args_string(Arity),
M = atom_to_list(Module),
Fun = atom_to_list(Function),
%% dispatch(Module, Function, [A1, A2]) -> Module:Function(A1, A2);
couch_epi_codegen:scan(
- "dispatch(" ++ M ++ "," ++ Fun ++ ", [" ++ Args ++ "]) ->"
- ++ M ++ ":" ++ Fun ++ "(" ++ Args ++ ");").
+ "dispatch(" ++ M ++ "," ++ Fun ++ ", [" ++ Args ++ "]) ->" ++
+ M ++ ":" ++ Fun ++ "(" ++ Args ++ ");"
+ ).
args_string(Arity) ->
- Vars = ["A" ++ integer_to_list(Seq) || Seq <- lists:seq(1, Arity)],
+ Vars = ["A" ++ integer_to_list(Seq) || Seq <- lists:seq(1, Arity)],
string:join(Vars, ", ").
version(Source, SrcDefs) ->
Modules = [Module || {Module, _Exports} <- SrcDefs],
couch_epi_codegen:scan(
- "version(" ++ atom_to_list(Source) ++ ") ->" ++ hash(Modules) ++ ";").
-
-
+ "version(" ++ atom_to_list(Source) ++ ") ->" ++ hash(Modules) ++ ";"
+ ).
%% ------------------------------------------------------------------
%% Helper functions
@@ -204,26 +232,48 @@ defined_providers(Defs) ->
%% Defs = [{Source, [{Module, [{Fun, Arity}]}]}]
fold_defs(Defs, Acc, Fun) ->
- lists:foldl(fun({Source, SourceData}, Clauses) ->
- lists:foldl(fun({Module, Exports}, ExportsAcc) ->
- lists:foldl(fun({Function, Arity}, InAcc) ->
- Fun({Source, Module, Function, Arity}, InAcc)
- end, [], Exports) ++ ExportsAcc
- end, [], SourceData) ++ Clauses
- end, Acc, Defs).
+ lists:foldl(
+ fun({Source, SourceData}, Clauses) ->
+ lists:foldl(
+ fun({Module, Exports}, ExportsAcc) ->
+ lists:foldl(
+ fun({Function, Arity}, InAcc) ->
+ Fun({Source, Module, Function, Arity}, InAcc)
+ end,
+ [],
+ Exports
+ ) ++ ExportsAcc
+ end,
+ [],
+ SourceData
+ ) ++ Clauses
+ end,
+ Acc,
+ Defs
+ ).
providers_by_function(Defs) ->
- Providers = fold_defs(Defs, [],
+ Providers = fold_defs(
+ Defs,
+ [],
fun({_Source, Module, Function, Arity}, Acc) ->
[{{Function, Arity}, Module} | Acc]
end
),
- Dict = lists:foldl(fun({K, V}, Acc) ->
- dict:update(K, fun(Modules) ->
- append_if_missing(Modules, V)
- end, [V], Acc)
-
- end, dict:new(), Providers),
+ Dict = lists:foldl(
+ fun({K, V}, Acc) ->
+ dict:update(
+ K,
+ fun(Modules) ->
+ append_if_missing(Modules, V)
+ end,
+ [V],
+ Acc
+ )
+ end,
+ dict:new(),
+ Providers
+ ),
dict:to_list(Dict).
append_if_missing(List, Value) ->
@@ -238,36 +288,75 @@ hash(Modules) ->
dispatch(_Handle, _Modules, _Func, _Args, #opts{concurrent = true, pipe = true}) ->
throw({error, {incompatible_options, [concurrent, pipe]}});
-dispatch(Handle, Modules, Function, Args,
- #opts{pipe = true, ignore_errors = true}) ->
- lists:foldl(fun(Module, Acc) ->
- try
+dispatch(
+ Handle,
+ Modules,
+ Function,
+ Args,
+ #opts{pipe = true, ignore_errors = true}
+) ->
+ lists:foldl(
+ fun(Module, Acc) ->
+ try
+ Handle:dispatch(Module, Function, Acc)
+ catch
+ _:_ ->
+ Acc
+ end
+ end,
+ Args,
+ Modules
+ );
+dispatch(
+ Handle,
+ Modules,
+ Function,
+ Args,
+ #opts{pipe = true}
+) ->
+ lists:foldl(
+ fun(Module, Acc) ->
Handle:dispatch(Module, Function, Acc)
- catch _:_ ->
- Acc
- end
- end, Args, Modules);
-dispatch(Handle, Modules, Function, Args,
- #opts{pipe = true}) ->
- lists:foldl(fun(Module, Acc) ->
- Handle:dispatch(Module, Function, Acc)
- end, Args, Modules);
-dispatch(Handle, Modules, Function, Args,
- #opts{interruptible = true}) ->
+ end,
+ Args,
+ Modules
+ );
+dispatch(
+ Handle,
+ Modules,
+ Function,
+ Args,
+ #opts{interruptible = true}
+) ->
apply_while(Modules, Handle, Function, Args);
dispatch(Handle, Modules, Function, Args, #opts{} = Opts) ->
[do_dispatch(Handle, Module, Function, Args, Opts) || Module <- Modules].
-do_dispatch(Handle, Module, Function, Args,
- #opts{concurrent = true, ignore_errors = true}) ->
+do_dispatch(
+ Handle,
+ Module,
+ Function,
+ Args,
+ #opts{concurrent = true, ignore_errors = true}
+) ->
spawn(fun() ->
(catch Handle:dispatch(Module, Function, Args))
end);
-do_dispatch(Handle, Module, Function, Args,
- #opts{ignore_errors = true}) ->
+do_dispatch(
+ Handle,
+ Module,
+ Function,
+ Args,
+ #opts{ignore_errors = true}
+) ->
(catch Handle:dispatch(Module, Function, Args));
-do_dispatch(Handle, Module, Function, Args,
- #opts{concurrent = true}) ->
+do_dispatch(
+ Handle,
+ Module,
+ Function,
+ Args,
+ #opts{concurrent = true}
+) ->
spawn(fun() -> Handle:dispatch(Module, Function, Args) end);
do_dispatch(Handle, Module, Function, Args, #opts{}) ->
Handle:dispatch(Module, Function, Args).
@@ -285,13 +374,13 @@ apply_while([Module | Modules], Handle, Function, Args) ->
parse_opts(Opts) ->
parse_opts(Opts, #opts{}).
-parse_opts([ignore_errors|Rest], #opts{} = Acc) ->
+parse_opts([ignore_errors | Rest], #opts{} = Acc) ->
parse_opts(Rest, Acc#opts{ignore_errors = true});
-parse_opts([pipe|Rest], #opts{} = Acc) ->
+parse_opts([pipe | Rest], #opts{} = Acc) ->
parse_opts(Rest, Acc#opts{pipe = true});
-parse_opts([concurrent|Rest], #opts{} = Acc) ->
+parse_opts([concurrent | Rest], #opts{} = Acc) ->
parse_opts(Rest, Acc#opts{concurrent = true});
-parse_opts([interruptible|Rest], #opts{} = Acc) ->
+parse_opts([interruptible | Rest], #opts{} = Acc) ->
parse_opts(Rest, Acc#opts{interruptible = true});
parse_opts([], Acc) ->
Acc.
@@ -324,16 +413,17 @@ basic_test() ->
generate(Module, [{app1, Defs}, {app2, Defs}]),
Exports = lists:sort([
- {callbacks,2},
- {version,1},
- {providers,2},
- {definitions,1},
- {module_info,0},
- {version,0},
- {dispatch,3},
- {providers,0},
- {module_info,1},
- {definitions,0}]),
+ {callbacks, 2},
+ {version, 1},
+ {providers, 2},
+ {definitions, 1},
+ {module_info, 0},
+ {version, 0},
+ {dispatch, 3},
+ {providers, 0},
+ {module_info, 1},
+ {definitions, 0}
+ ]),
?assertEqual(Exports, lists:sort(Module:module_info(exports))),
?assertEqual([app1, app2], lists:sort(Module:providers())),
@@ -356,19 +446,19 @@ generate_module(Name, Body) ->
couch_epi_codegen:generate(Name, Tokens).
decide_module(decide) ->
- "
- -export([inc/1]).
-
- inc(A) ->
- {decided, A + 1}.
- ";
+ "\n"
+ " -export([inc/1]).\n"
+ "\n"
+ " inc(A) ->\n"
+ " {decided, A + 1}.\n"
+ " ";
decide_module(no_decision) ->
- "
- -export([inc/1]).
-
- inc(_A) ->
- no_decision.
- ".
+ "\n"
+ " -export([inc/1]).\n"
+ "\n"
+ " inc(_A) ->\n"
+ " no_decision.\n"
+ " ".
decide_test() ->
ok = generate_module(decide, decide_module(decide)),
@@ -380,12 +470,12 @@ decide_test() ->
DecideFirstHandle = decide_first_handle,
ok = generate(DecideFirstHandle, [DecideDef, NoDecissionDef]),
?assertMatch([decide, no_decision], DecideFirstHandle:providers(inc, 1)),
- ?assertMatch({decided,4}, decide(DecideFirstHandle, anything, inc, [3], [])),
+ ?assertMatch({decided, 4}, decide(DecideFirstHandle, anything, inc, [3], [])),
DecideSecondHandle = decide_second_handle,
ok = generate(DecideSecondHandle, [NoDecissionDef, DecideDef]),
?assertMatch([no_decision, decide], DecideSecondHandle:providers(inc, 1)),
- ?assertMatch({decided,4}, decide(DecideSecondHandle, anything, inc, [3], [])),
+ ?assertMatch({decided, 4}, decide(DecideSecondHandle, anything, inc, [3], [])),
NoDecissionHandle = no_decision_handle,
ok = generate(NoDecissionHandle, [NoDecissionDef]),
diff --git a/src/couch_epi/src/couch_epi_module_keeper.erl b/src/couch_epi/src/couch_epi_module_keeper.erl
index 36376fec0..97420ea7b 100644
--- a/src/couch_epi/src/couch_epi_module_keeper.erl
+++ b/src/couch_epi/src/couch_epi_module_keeper.erl
@@ -12,7 +12,6 @@
-module(couch_epi_module_keeper).
-
-behaviour(gen_server).
%% ------------------------------------------------------------------
@@ -22,17 +21,29 @@
-export([start_link/3, stop/1]).
-export([reload/1]).
-
%% ------------------------------------------------------------------
%% gen_server Function Exports
%% ------------------------------------------------------------------
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
-record(state, {
- codegen, module, key, type, handle, hash, kind,
- timer = {undefined, undefined}}).
+ codegen,
+ module,
+ key,
+ type,
+ handle,
+ hash,
+ kind,
+ timer = {undefined, undefined}
+}).
%% ------------------------------------------------------------------
%% API Function Definitions
@@ -41,7 +52,8 @@
start_link(Type, Key, Codegen) ->
Handle = Codegen:get_handle(Key),
gen_server:start_link(
- {local, Handle}, ?MODULE, [Type, Codegen, Key, Handle], []).
+ {local, Handle}, ?MODULE, [Type, Codegen, Key, Handle], []
+ ).
stop(Server) ->
catch gen_server:call(Server, stop).
@@ -151,8 +163,9 @@ safe_set(Hash, Data, #state{} = State) ->
OldData = CodeGen:get_current_definitions(Handle),
ok = CodeGen:generate(Handle, Data),
{ok, OldData, State#state{hash = Hash}}
- catch Class:Reason ->
- {{Class, Reason}, State}
+ catch
+ Class:Reason ->
+ {{Class, Reason}, State}
end.
notify(Key, OldData, NewData, Defs) ->
diff --git a/src/couch_epi/src/couch_epi_plugin.erl b/src/couch_epi/src/couch_epi_plugin.erl
index 2cb1f3ebe..1ec09d8dc 100644
--- a/src/couch_epi/src/couch_epi_plugin.erl
+++ b/src/couch_epi/src/couch_epi_plugin.erl
@@ -28,25 +28,25 @@
%% Types Definitions
%% ------------------------------------------------------------------
--type kind()
- :: providers
- | data_providers
- | services
- | data_subscriptions
- .
+-type kind() ::
+ providers
+ | data_providers
+ | services
+ | data_subscriptions.
--type key()
- :: {ServiceId :: couch_epi:service_id(), Key :: couch_epi:key()}
- | couch_epi:service_id().
+-type key() ::
+ {ServiceId :: couch_epi:service_id(), Key :: couch_epi:key()}
+ | couch_epi:service_id().
-callback app() -> couch_epi:app().
-callback providers() -> [{couch_epi:service_id(), module()}].
-callback services() -> [{couch_epi:service_id(), module()}].
-callback data_subscriptions() -> [{couch_epi:service_id(), couch_epi:key()}].
--callback data_providers() -> [
- {couch_epi:key(), couch_epi:data_spec()}
+-callback data_providers() ->
+ [
+ {couch_epi:key(), couch_epi:data_spec()}
| {couch_epi:key(), couch_epi:data_spec(), [couch_epi:data_spec_opt()]}
-].
+ ].
-callback processes() -> [{couch_epi:plugin_id(), [supervisor:child_spec()]}].
-callback notify(Key :: term(), Old :: term(), New :: term()) -> ok.
@@ -58,8 +58,7 @@ definitions(Plugins) ->
lists:append([extract_definitions(Plugin) || Plugin <- Plugins]).
plugin_processes(Plugin, Plugins) ->
- lists:append([
- Specs || P0 <- Plugins, {P1, Specs} <- P0:processes(), P1 =:= Plugin]).
+ lists:append([Specs || P0 <- Plugins, {P1, Specs} <- P0:processes(), P1 =:= Plugin]).
grouped_definitions(Plugins) ->
Defs = lists:append([extract_definitions(Plugin) || Plugin <- Plugins]),
@@ -87,7 +86,6 @@ notify_plugin(Plugin, Key, OldData, NewData) ->
App = Plugin:app(),
Plugin:notify(Key, app_data(App, OldData), app_data(App, NewData)).
-
app_data(App, Data) ->
case lists:keyfind(App, 1, Data) of
{App, AppData} -> AppData;
@@ -100,12 +98,11 @@ filter_by_key(Definitions, Kind, Key) ->
by_key(#couch_epi_spec{kind = Kind, key = Key}, Kind, Key) -> true;
by_key(_, _, _) -> false.
-
extract_definitions(Plugin) ->
- specs(Plugin, providers)
- ++ specs(Plugin, data_providers)
- ++ specs(Plugin, services)
- ++ specs(Plugin, data_subscriptions).
+ specs(Plugin, providers) ++
+ specs(Plugin, data_providers) ++
+ specs(Plugin, services) ++
+ specs(Plugin, data_subscriptions).
-spec group_specs(Specs :: [#couch_epi_spec{}]) -> GroupedSpecs when
GroupedSpecs ::
@@ -113,15 +110,23 @@ extract_definitions(Plugin) ->
group_specs(Specs) ->
Grouped = group(
- [{{Kind, Key}, group([{App, Spec}])}
- || #couch_epi_spec{kind = Kind, key = Key, app = App} = Spec <- Specs]),
+ [
+ {{Kind, Key}, group([{App, Spec}])}
+ || #couch_epi_spec{kind = Kind, key = Key, app = App} = Spec <- Specs
+ ]
+ ),
[{K, lists:reverse(V)} || {K, V} <- Grouped].
-
group(KV) ->
- dict:to_list(lists:foldr(fun({K,V}, D) ->
- dict:append_list(K, V, D)
- end, dict:new(), KV)).
+ dict:to_list(
+ lists:foldr(
+ fun({K, V}, D) ->
+ dict:append_list(K, V, D)
+ end,
+ dict:new(),
+ KV
+ )
+ ).
specs(Plugin, Kind) ->
[spec(parse(Spec, Kind), Plugin, Kind) || Spec <- Plugin:Kind()].
@@ -156,7 +161,6 @@ type(services, _) -> couch_epi_functions;
type(data_providers, _) -> couch_epi_data;
type(data_subscriptions, _) -> undefined.
-
%% ------------------------------------------------------------------
%% Tests
%% ------------------------------------------------------------------
@@ -165,66 +169,66 @@ type(data_subscriptions, _) -> undefined.
-include_lib("eunit/include/eunit.hrl").
plugin_module(foo_epi) ->
- "
- -compile([export_all]).
-
- app() -> foo.
- providers() ->
- [
- {chttpd_handlers, foo_provider},
- {bar_handlers, bar_provider1},
- {bar_handlers, bar_provider2}
- ].
-
- services() ->
- [
- {foo_handlers, foo_service}
- ].
-
- data_providers() ->
- [
- {{foo_service, data1}, {file, \"abs_file\"}, [{interval, 5000}]},
- {{foo_service, data2}, {priv_file, \"priv_file\"}},
- {{foo_service, data3}, {module, foo_data}}
- ].
-
- data_subscriptions() ->
- [
- {stats, foo_definitions}
- ].
-
- processes() -> [].
-
- notify(_, _, _) -> ok.
- ";
+ "\n"
+ " -compile([export_all]).\n"
+ "\n"
+ " app() -> foo.\n"
+ " providers() ->\n"
+ " [\n"
+ " {chttpd_handlers, foo_provider},\n"
+ " {bar_handlers, bar_provider1},\n"
+ " {bar_handlers, bar_provider2}\n"
+ " ].\n"
+ "\n"
+ " services() ->\n"
+ " [\n"
+ " {foo_handlers, foo_service}\n"
+ " ].\n"
+ "\n"
+ " data_providers() ->\n"
+ " [\n"
+ " {{foo_service, data1}, {file, \"abs_file\"}, [{interval, 5000}]},\n"
+ " {{foo_service, data2}, {priv_file, \"priv_file\"}},\n"
+ " {{foo_service, data3}, {module, foo_data}}\n"
+ " ].\n"
+ "\n"
+ " data_subscriptions() ->\n"
+ " [\n"
+ " {stats, foo_definitions}\n"
+ " ].\n"
+ "\n"
+ " processes() -> [].\n"
+ "\n"
+ " notify(_, _, _) -> ok.\n"
+ " ";
plugin_module(bar_epi) ->
- "
- -compile([export_all]).
-
- app() -> bar.
- providers() ->
- [
- {chttpd_handlers, bar_provider},
- {bar_handlers, bar_provider}
- ].
-
- services() ->
- [
- {bar_handlers, bar_service}
- ].
-
- data_providers() ->
- [].
-
- data_subscriptions() ->
- [
- {foo_service, data1}
- ].
-
- processes() -> [].
-
- notify(_, _, _) -> ok.
- ".
+ "\n"
+ " -compile([export_all]).\n"
+ "\n"
+ " app() -> bar.\n"
+ " providers() ->\n"
+ " [\n"
+ " {chttpd_handlers, bar_provider},\n"
+ " {bar_handlers, bar_provider}\n"
+ " ].\n"
+ "\n"
+ " services() ->\n"
+ " [\n"
+ " {bar_handlers, bar_service}\n"
+ " ].\n"
+ "\n"
+ " data_providers() ->\n"
+ " [].\n"
+ "\n"
+ " data_subscriptions() ->\n"
+ " [\n"
+ " {foo_service, data1}\n"
+ " ].\n"
+ "\n"
+ " processes() -> [].\n"
+ "\n"
+ " notify(_, _, _) -> ok.\n"
+ " ".
generate_module(Name, Body) ->
Tokens = couch_epi_codegen:scan(Body),
@@ -234,7 +238,7 @@ generate_modules(Kind, Providers) ->
[generate_module(P, Kind(P)) || P <- Providers].
provider_modules_order_test() ->
- [ok,ok] = generate_modules(fun plugin_module/1, [foo_epi, bar_epi]),
+ [ok, ok] = generate_modules(fun plugin_module/1, [foo_epi, bar_epi]),
ok = application:set_env(couch_epi, plugins, [foo_epi, bar_epi]),
Expected = [
{foo, bar_provider1},
@@ -249,7 +253,7 @@ provider_modules_order_test() ->
ok.
providers_order_test() ->
- [ok,ok] = generate_modules(fun plugin_module/1, [foo_epi, bar_epi]),
+ [ok, ok] = generate_modules(fun plugin_module/1, [foo_epi, bar_epi]),
Expected = [
{foo, bar_provider1},
{foo, bar_provider2},
@@ -331,7 +335,8 @@ definitions_test() ->
key = chttpd_handlers,
value = foo_provider,
codegen = couch_epi_functions_gen,
- type = couch_epi_functions},
+ type = couch_epi_functions
+ },
#couch_epi_spec{
behaviour = foo_epi,
app = foo,
@@ -340,14 +345,15 @@ definitions_test() ->
key = foo_handlers,
value = foo_service,
codegen = couch_epi_functions_gen,
- type = couch_epi_functions},
+ type = couch_epi_functions
+ },
#couch_epi_spec{
behaviour = foo_epi,
app = foo,
kind = data_providers,
options = [{interval, 5000}],
key = {foo_service, data1},
- value = {file,"abs_file"},
+ value = {file, "abs_file"},
codegen = couch_epi_data_gen,
type = couch_epi_data
},
@@ -382,7 +388,7 @@ definitions_test() ->
}
]),
- [ok,ok] = generate_modules(fun plugin_module/1, [foo_epi, bar_epi]),
+ [ok, ok] = generate_modules(fun plugin_module/1, [foo_epi, bar_epi]),
Tests = lists:zip(Expected, lists:sort(definitions([foo_epi, bar_epi]))),
[?assertEqual(Expect, Result) || {Expect, Result} <- Tests],
ok.
diff --git a/src/couch_epi/src/couch_epi_sup.erl b/src/couch_epi/src/couch_epi_sup.erl
index 477cbe79e..aca423a7d 100644
--- a/src/couch_epi/src/couch_epi_sup.erl
+++ b/src/couch_epi/src/couch_epi_sup.erl
@@ -61,7 +61,7 @@ plugin_childspecs(Plugin, Children) ->
%% ===================================================================
init([]) ->
- {ok, { {one_for_one, 5, 10}, keepers()} }.
+ {ok, {{one_for_one, 5, 10}, keepers()}}.
%% ------------------------------------------------------------------
%% Internal Function Definitions
@@ -79,13 +79,16 @@ plugin_childspecs(Plugin, Plugins, Children) ->
merge(ExtraChildren, Children) ++ childspecs(Definitions).
childspecs(Definitions) ->
- lists:map(fun({{Kind, Key}, Defs}) ->
- CodeGen = couch_epi_plugin:codegen(Kind),
- Handle = CodeGen:get_handle(Key),
- Modules = lists:append([modules(Spec) || {_App, Spec} <- Defs]),
- Name = service_name(Key) ++ "|" ++ atom_to_list(Kind),
- code_monitor(Name, [Handle], [Handle|Modules])
- end, Definitions).
+ lists:map(
+ fun({{Kind, Key}, Defs}) ->
+ CodeGen = couch_epi_plugin:codegen(Kind),
+ Handle = CodeGen:get_handle(Key),
+ Modules = lists:append([modules(Spec) || {_App, Spec} <- Defs]),
+ Name = service_name(Key) ++ "|" ++ atom_to_list(Kind),
+ code_monitor(Name, [Handle], [Handle | Modules])
+ end,
+ Definitions
+ ).
%% ------------------------------------------------------------------
%% Helper Function Definitions
@@ -95,21 +98,36 @@ remove_duplicates(Definitions) ->
lists:ukeysort(1, Definitions).
keeper_childspecs(Definitions) ->
- lists:map(fun({{Kind, Key}, _Specs}) ->
- Name = service_name(Key) ++ "|keeper",
- CodeGen = couch_epi_plugin:codegen(Kind),
- Handle = CodeGen:get_handle(Key),
- keeper(Name, [provider_kind(Kind), Key, CodeGen], [Handle])
- end, Definitions).
+ lists:map(
+ fun({{Kind, Key}, _Specs}) ->
+ Name = service_name(Key) ++ "|keeper",
+ CodeGen = couch_epi_plugin:codegen(Kind),
+ Handle = CodeGen:get_handle(Key),
+ keeper(Name, [provider_kind(Kind), Key, CodeGen], [Handle])
+ end,
+ Definitions
+ ).
keeper(Name, Args, Modules) ->
- {"couch_epi|" ++ Name, {couch_epi_module_keeper, start_link,
- Args}, permanent, 5000, worker, Modules}.
+ {
+ "couch_epi|" ++ Name,
+ {couch_epi_module_keeper, start_link, Args},
+ permanent,
+ 5000,
+ worker,
+ Modules
+ }.
code_monitor(Name, Args, Modules0) ->
Modules = [couch_epi_codechange_monitor | Modules0],
- {"couch_epi_codechange_monitor|" ++ Name,
- {couch_epi_codechange_monitor, start_link, Args}, permanent, 5000, worker, Modules}.
+ {
+ "couch_epi_codechange_monitor|" ++ Name,
+ {couch_epi_codechange_monitor, start_link, Args},
+ permanent,
+ 5000,
+ worker,
+ Modules
+ }.
provider_kind(services) -> providers;
provider_kind(data_subscriptions) -> data_providers;
@@ -138,5 +156,8 @@ merge([], Children) ->
merge([{Id, _, _, _, _, _} = Spec | Rest], Children) ->
merge(Rest, lists:keystore(Id, 1, Children, Spec));
merge([#{id := Id} = Spec | Rest], Children) ->
- Replace = fun(#{id := I}) when I == Id -> Spec; (E) -> E end,
+ Replace = fun
+ (#{id := I}) when I == Id -> Spec;
+ (E) -> E
+ end,
merge(Rest, lists:map(Replace, Children)).
diff --git a/src/couch_epi/src/couch_epi_util.erl b/src/couch_epi/src/couch_epi_util.erl
index ea4b10ea8..2c86a96e2 100644
--- a/src/couch_epi/src/couch_epi_util.erl
+++ b/src/couch_epi/src/couch_epi_util.erl
@@ -23,7 +23,7 @@ module_version(Module) ->
hash(Term) ->
<<SigInt:128/integer>> = couch_hash:md5_hash(term_to_binary(Term)),
- lists:flatten(io_lib:format("\"~.36B\"",[SigInt])).
+ lists:flatten(io_lib:format("\"~.36B\"", [SigInt])).
module_exists(Module) ->
erlang:function_exported(Module, module_info, 0).
diff --git a/src/couch_eval/src/couch_eval.erl b/src/couch_eval/src/couch_eval.erl
index a6471b4f5..affcf714e 100644
--- a/src/couch_eval/src/couch_eval.erl
+++ b/src/couch_eval/src/couch_eval.erl
@@ -10,10 +10,8 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(couch_eval).
-
-export([
acquire_map_context/6,
release_map_context/1,
@@ -22,10 +20,8 @@
try_compile/4
]).
-
-include_lib("couch/include/couch_db.hrl").
-
-type db_name() :: binary().
-type doc_id() :: binary().
-type ddoc_id() :: binary().
@@ -56,7 +52,6 @@
language := language()
}.
-
-callback acquire_map_context(context_opts()) -> {ok, any()} | {error, any()}.
-callback release_map_context(context()) -> ok | {error, any()}.
-callback map_docs(context(), [doc()]) -> {ok, [result()]} | {error, any()}.
@@ -64,15 +59,14 @@
-callback release_context(context()) -> ok | {error, any()}.
-callback try_compile(context(), function_type(), function_name(), function_src()) -> ok.
-
-spec acquire_map_context(
- db_name(),
- ddoc_id(),
- language(),
- sig(),
- lib(),
- map_funs()
- ) ->
+ db_name(),
+ ddoc_id(),
+ language(),
+ sig(),
+ lib(),
+ map_funs()
+) ->
{ok, context()}
| error({invalid_eval_api_mod, Language :: binary()})
| error({unknown_eval_api_language, Language :: binary()}).
@@ -93,55 +87,45 @@ acquire_map_context(DbName, DDocId, Language, Sig, Lib, MapFuns) ->
{error, Error}
end.
-
-spec release_map_context(context()) -> ok | {error, any()}.
release_map_context(nil) ->
ok;
-
release_map_context({ApiMod, Ctx}) ->
ApiMod:release_map_context(Ctx).
-
-spec map_docs(context(), [doc()]) -> {ok, result()} | {error, any()}.
map_docs({ApiMod, Ctx}, Docs) ->
ApiMod:map_docs(Ctx, Docs).
-
--spec with_context(with_context_opts(), function()) ->
- any()
+-spec with_context(with_context_opts(), function()) ->
+ any()
| error({invalid_eval_api_mod, Language :: binary()})
| error({unknown_eval_api_language, Language :: binary()}).
with_context(#{language := Language}, Fun) ->
{ok, Ctx} = acquire_context(Language),
- try
+ try
Fun(Ctx)
after
release_context(Ctx)
end.
-
-spec try_compile(context(), function_type(), function_name(), function_src()) -> ok.
try_compile({_ApiMod, _Ctx}, reduce, <<_/binary>>, disabled) ->
% Reduce functions may be disabled. Accept that as a valid configuration.
ok;
-
-try_compile({ApiMod, Ctx}, FuncType, FuncName, FuncSrc) ->
+try_compile({ApiMod, Ctx}, FuncType, FuncName, FuncSrc) ->
ApiMod:try_compile(Ctx, FuncType, FuncName, FuncSrc).
-
acquire_context(Language) ->
ApiMod = get_api_mod(Language),
{ok, Ctx} = ApiMod:acquire_context(),
{ok, {ApiMod, Ctx}}.
-
release_context(nil) ->
ok;
-
release_context({ApiMod, Ctx}) ->
ApiMod:release_context(Ctx).
-
get_api_mod(Language) when is_binary(Language) ->
try
LangStr = binary_to_list(Language),
@@ -158,6 +142,7 @@ get_api_mod(Language) when is_binary(Language) ->
_ -> list_to_existing_atom(ModStr);
undefined -> erlang:error({unknown_eval_api_language, Language})
end
- catch error:badarg ->
- erlang:error({invalid_eval_api_mod, Language})
+ catch
+ error:badarg ->
+ erlang:error({invalid_eval_api_mod, Language})
end.
diff --git a/src/couch_expiring_cache/src/couch_expiring_cache.erl b/src/couch_expiring_cache/src/couch_expiring_cache.erl
index f1ce20276..1c7eb6976 100644
--- a/src/couch_expiring_cache/src/couch_expiring_cache.erl
+++ b/src/couch_expiring_cache/src/couch_expiring_cache.erl
@@ -19,36 +19,51 @@
lookup/3
]).
-
-include_lib("couch_expiring_cache/include/couch_expiring_cache.hrl").
-
--spec insert(Name :: binary(), Key :: binary(), Value :: binary(),
- StaleTS :: ?TIME_UNIT(), ExpiresTS :: ?TIME_UNIT()) -> ok.
-insert(Name, Key, Value, StaleTS, ExpiresTS)
- when is_binary(Name), is_binary(Key), is_binary(Value),
- is_integer(StaleTS), is_integer(ExpiresTS) ->
+-spec insert(
+ Name :: binary(),
+ Key :: binary(),
+ Value :: binary(),
+ StaleTS :: ?TIME_UNIT(),
+ ExpiresTS :: ?TIME_UNIT()
+) -> ok.
+insert(Name, Key, Value, StaleTS, ExpiresTS) when
+ is_binary(Name),
+ is_binary(Key),
+ is_binary(Value),
+ is_integer(StaleTS),
+ is_integer(ExpiresTS)
+->
insert(undefined, Name, Key, Value, StaleTS, ExpiresTS).
-
--spec insert(Tx :: jtx() | undefined, Name :: binary(), Key :: binary(),
- Value :: binary(), StaleTS :: ?TIME_UNIT(), ExpiresTS :: ?TIME_UNIT()) -> ok.
+-spec insert(
+ Tx :: jtx() | undefined,
+ Name :: binary(),
+ Key :: binary(),
+ Value :: binary(),
+ StaleTS :: ?TIME_UNIT(),
+ ExpiresTS :: ?TIME_UNIT()
+) -> ok.
-dialyzer({no_return, insert/6}).
-insert(Tx, Name, Key, Value, StaleTS, ExpiresTS)
- when is_binary(Name), is_binary(Key), is_binary(Value),
- is_integer(StaleTS), is_integer(ExpiresTS) ->
+insert(Tx, Name, Key, Value, StaleTS, ExpiresTS) when
+ is_binary(Name),
+ is_binary(Key),
+ is_binary(Value),
+ is_integer(StaleTS),
+ is_integer(ExpiresTS)
+->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
couch_expiring_cache_fdb:insert(
- JTx, Name, Key, Value, StaleTS, ExpiresTS)
+ JTx, Name, Key, Value, StaleTS, ExpiresTS
+ )
end).
-
-spec lookup(Name :: binary(), Key :: binary()) ->
not_found | {fresh, Val :: binary()} | {stale, Val :: binary()} | expired.
lookup(Name, Key) when is_binary(Name), is_binary(Key) ->
lookup(undefined, Name, Key).
-
-spec lookup(Tx :: jtx(), Name :: binary(), Key :: binary()) ->
not_found | {fresh, Val :: binary()} | {stale, Val :: binary()} | expired.
lookup(Tx, Name, Key) when is_binary(Name), is_binary(Key) ->
diff --git a/src/couch_expiring_cache/src/couch_expiring_cache_fdb.erl b/src/couch_expiring_cache/src/couch_expiring_cache_fdb.erl
index ebc97b926..75a85af47 100644
--- a/src/couch_expiring_cache/src/couch_expiring_cache_fdb.erl
+++ b/src/couch_expiring_cache/src/couch_expiring_cache_fdb.erl
@@ -20,25 +20,27 @@
clear_range_to/3
]).
-
-define(PK, 1).
-define(EXP, 2).
-
-include_lib("fabric/include/fabric2.hrl").
-include_lib("couch_expiring_cache/include/couch_expiring_cache.hrl").
-include_lib("kernel/include/logger.hrl").
-
% Data model
% see: https://forums.foundationdb.org/t/designing-key-value-expiration-in-fdb/156
%
% (?EXPIRING_CACHE, Name, ?PK, Key) := (Val, StaleTS, ExpiresTS)
% (?EXPIRING_CACHE, Name, ?EXP, ExpiresTS, Key) := ()
-
--spec insert(JTx :: jtx(), Name :: binary(), Key :: binary(), Value :: binary(),
- StaleTS :: millisecond(), ExpiresTS :: millisecond()) -> ok.
+-spec insert(
+ JTx :: jtx(),
+ Name :: binary(),
+ Key :: binary(),
+ Value :: binary(),
+ StaleTS :: millisecond(),
+ ExpiresTS :: millisecond()
+) -> ok.
insert(#{jtx := true} = JTx, Name, Key, Val, StaleTS, ExpiresTS) ->
#{tx := Tx, layer_prefix := LayerPrefix} = couch_jobs_fdb:get_jtx(JTx),
PK = primary_key(Name, Key, LayerPrefix),
@@ -59,7 +61,6 @@ insert(#{jtx := true} = JTx, Name, Key, Val, StaleTS, ExpiresTS) ->
XV = erlfdb_tuple:pack({}),
ok = erlfdb:set(Tx, XK, XV).
-
-spec lookup(JTx :: jtx(), Name :: binary(), Key :: binary()) ->
not_found | {fresh, Val :: binary()} | {stale, Val :: binary()} | expired.
lookup(#{jtx := true} = JTx, Name, Key) ->
@@ -77,7 +78,6 @@ lookup(#{jtx := true} = JTx, Name, Key) ->
end
end.
-
-spec clear_all(Name :: binary()) ->
ok.
clear_all(Name) ->
@@ -87,24 +87,36 @@ clear_all(Name) ->
erlfdb:clear_range_startswith(Tx, NamePrefix)
end).
-
--spec clear_range_to(Name :: binary(), EndTS :: millisecond(),
- Limit :: non_neg_integer()) ->
- OldestTS :: ?TIME_UNIT.
+-spec clear_range_to(
+ Name :: binary(),
+ EndTS :: millisecond(),
+ Limit :: non_neg_integer()
+) ->
+ OldestTS :: ?TIME_UNIT.
clear_range_to(Name, EndTS, Limit) when Limit > 0 ->
- fold_range(Name, EndTS, Limit,
+ fold_range(
+ Name,
+ EndTS,
+ Limit,
fun(Tx, PK, XK, _Key, ExpiresTS, Acc) ->
ok = erlfdb:clear(Tx, PK),
ok = erlfdb:clear(Tx, XK),
oldest_ts(ExpiresTS, Acc)
- end, 0).
-
-
--spec get_range_to(Name :: binary(), EndTS :: millisecond(),
- Limit :: non_neg_integer()) ->
- [{Key :: binary(), Val :: binary()}].
+ end,
+ 0
+ ).
+
+-spec get_range_to(
+ Name :: binary(),
+ EndTS :: millisecond(),
+ Limit :: non_neg_integer()
+) ->
+ [{Key :: binary(), Val :: binary()}].
get_range_to(Name, EndTS, Limit) when Limit > 0 ->
- fold_range(Name, EndTS, Limit,
+ fold_range(
+ Name,
+ EndTS,
+ Limit,
fun(Tx, PK, _XK, Key, _ExpiresTS, Acc) ->
case get_val(Tx, PK) of
not_found ->
@@ -114,41 +126,43 @@ get_range_to(Name, EndTS, Limit) when Limit > 0 ->
Val ->
[{Key, Val} | Acc]
end
- end, []).
-
+ end,
+ []
+ ).
%% Private
-
fold_range(Name, EndTS, Limit, Fun, Acc0) when Limit > 0 ->
fabric2_fdb:transactional(fun(Tx) ->
{LayerPrefix, ExpiresPrefix} = prefixes(Tx, Name),
- fabric2_fdb:fold_range({tx, Tx}, ExpiresPrefix, fun({XK, _XV}, Acc) ->
- {ExpiresTS, Key} = erlfdb_tuple:unpack(XK, ExpiresPrefix),
- PK = primary_key(Name, Key, LayerPrefix),
- Fun(Tx, PK, XK, Key, ExpiresTS, Acc)
- end, Acc0, [{end_key, EndTS}, {limit, Limit}])
+ fabric2_fdb:fold_range(
+ {tx, Tx},
+ ExpiresPrefix,
+ fun({XK, _XV}, Acc) ->
+ {ExpiresTS, Key} = erlfdb_tuple:unpack(XK, ExpiresPrefix),
+ PK = primary_key(Name, Key, LayerPrefix),
+ Fun(Tx, PK, XK, Key, ExpiresTS, Acc)
+ end,
+ Acc0,
+ [{end_key, EndTS}, {limit, Limit}]
+ )
end).
-
-oldest_ts(TS, 0) -> TS; % handle initial Acc = 0 case
+% handle initial Acc = 0 case
+oldest_ts(TS, 0) -> TS;
oldest_ts(TS, OldestTS) -> min(TS, OldestTS).
-
primary_key(Name, Key, Prefix) ->
erlfdb_tuple:pack({?EXPIRING_CACHE, Name, ?PK, Key}, Prefix).
-
expiry_key(ExpiresTS, Name, Key, Prefix) ->
erlfdb_tuple:pack({?EXPIRING_CACHE, Name, ?EXP, ExpiresTS, Key}, Prefix).
-
prefixes(Tx, Name) ->
Layer = fabric2_fdb:get_dir(Tx),
Expires = erlfdb_tuple:pack({?EXPIRING_CACHE, Name, ?EXP}, Layer),
{Layer, Expires}.
-
get_val(Tx, PK) ->
case erlfdb:wait(erlfdb:get(Tx, PK)) of
not_found ->
diff --git a/src/couch_expiring_cache/src/couch_expiring_cache_server.erl b/src/couch_expiring_cache/src/couch_expiring_cache_server.erl
index 9c0c89972..ed80ebc76 100644
--- a/src/couch_expiring_cache/src/couch_expiring_cache_server.erl
+++ b/src/couch_expiring_cache/src/couch_expiring_cache_server.erl
@@ -30,20 +30,16 @@
code_change/3
]).
-
-define(DEFAULT_BATCH_SIZE, 1000).
-define(DEFAULT_PERIOD_MSEC, 5000).
-define(DEFAULT_MAX_JITTER_MSEC, 1000).
-
-include_lib("couch_expiring_cache/include/couch_expiring_cache.hrl").
-include_lib("kernel/include/logger.hrl").
-
start_link(Name, Opts) when is_atom(Name) ->
gen_server:start_link({local, Name}, ?MODULE, Opts#{name => Name}, []).
-
init(Opts) ->
DefaultCacheName = atom_to_binary(maps:get(name, Opts), utf8),
Period = maps:get(period, Opts, ?DEFAULT_PERIOD_MSEC),
@@ -57,21 +53,18 @@ init(Opts) ->
oldest_ts => 0,
elapsed => 0,
largest_elapsed => 0,
- lag => 0}}.
-
+ lag => 0
+ }}.
terminate(_, _) ->
ok.
-
handle_call(Msg, _From, St) ->
{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
-
handle_cast(Msg, St) ->
{stop, {bad_cast, Msg}, St}.
-
handle_info(remove_expired, St) ->
#{
cache_name := Name,
@@ -83,8 +76,10 @@ handle_info(remove_expired, St) ->
} = St,
NowTS = now_ts(),
- OldestTS = max(OldestTS0,
- couch_expiring_cache_fdb:clear_range_to(Name, NowTS, BatchSize)),
+ OldestTS = max(
+ OldestTS0,
+ couch_expiring_cache_fdb:clear_range_to(Name, NowTS, BatchSize)
+ ),
Elapsed = now_ts() - NowTS,
{noreply, St#{
@@ -92,9 +87,8 @@ handle_info(remove_expired, St) ->
oldest_ts := OldestTS,
elapsed := Elapsed,
largest_elapsed := max(Elapsed, LargestElapsed),
- lag := NowTS - OldestTS}};
-
-
+ lag := NowTS - OldestTS
+ }};
handle_info({Ref, ready}, St) when is_reference(Ref) ->
% Prevent crashing server and application
?LOG_ERROR(#{
@@ -104,24 +98,18 @@ handle_info({Ref, ready}, St) when is_reference(Ref) ->
LogMsg = "~p : spurious erlfdb future ready message ~p",
couch_log:error(LogMsg, [?MODULE, Ref]),
{noreply, St};
-
-
handle_info(Msg, St) ->
{stop, {bad_info, Msg}, St}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
now_ts() ->
{Mega, Sec, Micro} = os:timestamp(),
((Mega * 1000000) + Sec) * 1000 + Micro div 1000.
-
%% Private
-
schedule_remove_expired(Timeout, MaxJitter) ->
Jitter = max(Timeout div 2, MaxJitter),
Wait = Timeout + rand:uniform(max(1, Jitter)),
diff --git a/src/couch_jobs/src/couch_jobs.erl b/src/couch_jobs/src/couch_jobs.erl
index 1229fca23..90e31f229 100644
--- a/src/couch_jobs/src/couch_jobs.erl
+++ b/src/couch_jobs/src/couch_jobs.erl
@@ -52,24 +52,23 @@
get_type_timeout/1
]).
-
-include("couch_jobs.hrl").
-
-define(MIN_ACCEPT_WAIT_MSEC, 100).
-
%% Job Creation API
-spec add(jtx(), job_type(), job_id(), job_data()) -> ok | {error, any()}.
add(Tx, Type, JobId, JobData) ->
add(Tx, Type, JobId, JobData, 0).
-
-spec add(jtx(), job_type(), job_id(), job_data(), scheduled_time()) ->
ok | {error, any()}.
-add(Tx, Type, JobId, JobData, ScheduledTime) when is_binary(JobId),
- is_map(JobData), is_integer(ScheduledTime) ->
+add(Tx, Type, JobId, JobData, ScheduledTime) when
+ is_binary(JobId),
+ is_map(JobData),
+ is_integer(ScheduledTime)
+->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
case couch_jobs_fdb:add(JTx, Type, JobId, JobData, ScheduledTime) of
{ok, _, _, _} -> ok;
@@ -77,23 +76,21 @@ add(Tx, Type, JobId, JobData, ScheduledTime) when is_binary(JobId),
end
end).
-
-spec remove(jtx(), job_type(), job_id()) -> ok | {error, any()}.
remove(Tx, Type, JobId) when is_binary(JobId) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
couch_jobs_fdb:remove(JTx, job(Type, JobId))
end).
-
-spec get_types(jtx()) -> [job_type()] | {error, any()}.
get_types(Tx) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
couch_jobs_fdb:get_types(JTx)
end).
-
--spec get_job_data(jtx(), job_type(), job_id()) -> {ok, job_data()} | {error,
- any()}.
+-spec get_job_data(jtx(), job_type(), job_id()) ->
+ {ok, job_data()}
+ | {error, any()}.
get_job_data(Tx, Type, JobId) when is_binary(JobId) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
case couch_jobs_fdb:get_job_state_and_data(JTx, job(Type, JobId)) of
@@ -104,9 +101,9 @@ get_job_data(Tx, Type, JobId) when is_binary(JobId) ->
end
end).
-
--spec get_job_state(jtx(), job_type(), job_id()) -> {ok, job_state()} | {error,
- any()}.
+-spec get_job_state(jtx(), job_type(), job_id()) ->
+ {ok, job_state()}
+ | {error, any()}.
get_job_state(Tx, Type, JobId) when is_binary(JobId) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
case couch_jobs_fdb:get_job_state_and_data(JTx, job(Type, JobId)) of
@@ -117,9 +114,9 @@ get_job_state(Tx, Type, JobId) when is_binary(JobId) ->
end
end).
-
--spec get_active_jobs_ids(jtx(), job_type()) -> [job_id()] | {error,
- any()}.
+-spec get_active_jobs_ids(jtx(), job_type()) ->
+ [job_id()]
+ | {error, any()}.
get_active_jobs_ids(Tx, Type) ->
SinceVS = {versionstamp, 0, 0},
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
@@ -127,22 +124,23 @@ get_active_jobs_ids(Tx, Type) ->
maps:keys(Since)
end).
-
-spec fold_jobs(jtx(), job_type(), fun(), any()) -> any().
fold_jobs(Tx, Type, Fun, UserAcc) when is_function(Fun, 5) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
- maps:fold(fun(JobId, {_Seq, JobState, DataEnc}, Acc) ->
- Data = couch_jobs_fdb:decode_data(DataEnc),
- Fun(JTx, JobId, JobState, Data, Acc)
- end, UserAcc, couch_jobs_fdb:get_jobs(JTx, Type))
+ maps:fold(
+ fun(JobId, {_Seq, JobState, DataEnc}, Acc) ->
+ Data = couch_jobs_fdb:decode_data(DataEnc),
+ Fun(JTx, JobId, JobState, Data, Acc)
+ end,
+ UserAcc,
+ couch_jobs_fdb:get_jobs(JTx, Type)
+ )
end).
-
-spec pending_count(jtx(), job_type()) -> integer().
pending_count(Tx, Type) ->
pending_count(Tx, Type, #{}).
-
-spec pending_count(jtx(), job_type(), #{}) -> integer().
pending_count(Tx, Type, Opts) ->
MaxSTime = maps:get(max_sched_time, Opts, ?UNDEFINED_MAX_SCHEDULED_TIME),
@@ -151,21 +149,20 @@ pending_count(Tx, Type, Opts) ->
couch_jobs_pending:pending_count(JTx, Type, MaxSTime, Limit)
end).
-
%% Job processor API
-spec accept(job_type()) -> {ok, job(), job_data()} | {error, any()}.
accept(Type) ->
accept(Type, #{}).
-
-spec accept(job_type(), job_accept_opts()) -> {ok, job()} | {error, any()}.
accept(Type, #{} = Opts) ->
NoSched = maps:get(no_schedule, Opts, false),
- MaxSchedTimeDefault = case NoSched of
- true -> 0;
- false -> ?UNDEFINED_MAX_SCHEDULED_TIME
- end,
+ MaxSchedTimeDefault =
+ case NoSched of
+ true -> 0;
+ false -> ?UNDEFINED_MAX_SCHEDULED_TIME
+ end,
MaxSchedTime = maps:get(max_sched_time, Opts, MaxSchedTimeDefault),
Timeout = maps:get(timeout, Opts, infinity),
case NoSched andalso MaxSchedTime =/= 0 of
@@ -175,69 +172,63 @@ accept(Type, #{} = Opts) ->
accept_loop(Type, NoSched, MaxSchedTime, Timeout)
end.
-
-spec finish(jtx(), job()) -> ok | {error, any()}.
finish(Tx, Job) ->
finish(Tx, Job, undefined).
-
-spec finish(jtx(), job(), job_data()) -> ok | {error, any()}.
finish(Tx, #{jlock := <<_/binary>>} = Job, JobData) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
couch_jobs_fdb:finish(JTx, Job, JobData)
end).
-
-spec resubmit(jtx(), job()) -> {ok, job()} | {error, any()}.
resubmit(Tx, Job) ->
resubmit(Tx, Job, ?UNDEFINED_MAX_SCHEDULED_TIME).
-
-spec resubmit(jtx(), job(), scheduled_time()) -> {ok, job()} | {error, any()}.
resubmit(Tx, #{jlock := <<_/binary>>} = Job, SchedTime) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
couch_jobs_fdb:resubmit(JTx, Job, SchedTime)
end).
-
-spec resubmit(jtx(), job(), scheduled_time(), job_data()) -> {ok, job()} | {error, any()}.
resubmit(Tx, #{jlock := <<_/binary>>} = Job, SchedTime, Data) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
couch_jobs_fdb:resubmit(JTx, Job, SchedTime, Data)
end).
-
-spec is_resubmitted(job()) -> true | false.
is_resubmitted(#{job := true} = Job) ->
maps:get(resubmit, Job, false).
-
-spec update(jtx(), job()) -> {ok, job()} | {error, any()}.
update(Tx, Job) ->
update(Tx, Job, undefined).
-
-spec update(jtx(), job(), job_data()) -> {ok, job()} | {error, any()}.
update(Tx, #{jlock := <<_/binary>>} = Job, JobData) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
couch_jobs_fdb:update(JTx, Job, JobData)
end).
-
%% Subscription API
% Receive events as messages. Wait for them using `wait/2,3`
% functions.
%
--spec subscribe(job_type(), job_id()) -> {ok, job_subscription(), job_state(),
- job_data()} | {ok, finished, job_data()} | {error, any()}.
+-spec subscribe(job_type(), job_id()) ->
+ {ok, job_subscription(), job_state(), job_data()}
+ | {ok, finished, job_data()}
+ | {error, any()}.
subscribe(Type, JobId) ->
subscribe(undefined, Type, JobId).
-
--spec subscribe(jtx(), job_type(), job_id()) -> {ok, job_subscription(),
- job_state(), job_data()} | {ok, finished, job_data()} | {error, any()}.
+-spec subscribe(jtx(), job_type(), job_id()) ->
+ {ok, job_subscription(), job_state(), job_data()}
+ | {ok, finished, job_data()}
+ | {error, any()}.
subscribe(Tx, Type, JobId) ->
StateData = couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
Job = #{job => true, type => Type, id => JobId},
@@ -258,7 +249,6 @@ subscribe(Tx, Type, JobId) ->
{error, Error}
end.
-
% Unsubscribe from getting notifications based on a particular subscription.
% Each subscription should be followed by its own unsubscription call. However,
% subscriber processes are also monitored and auto-unsubscribed if they exit.
@@ -272,7 +262,6 @@ unsubscribe({Server, Ref}) when is_pid(Server), is_reference(Ref) ->
flush_notifications(Ref)
end.
-
% Wait to receive job state updates
%
-spec wait(job_subscription() | [job_subscription()], timeout()) ->
@@ -281,18 +270,15 @@ wait({_, Ref}, Timeout) ->
receive
{?COUCH_JOBS_EVENT, Ref, Type, Id, State, Data} ->
{Type, Id, State, couch_jobs_fdb:decode_data(Data)}
- after
- Timeout -> timeout
+ after Timeout -> timeout
end;
-
wait(Subs, Timeout) when is_list(Subs) ->
{Result, ResendQ} = wait_any(Subs, Timeout, []),
lists:foreach(fun(Msg) -> self() ! Msg end, ResendQ),
Result.
-
--spec wait(job_subscription() | [job_subscription()], job_state(), timeout())
- -> {job_type(), job_id(), job_state(), job_data()} | timeout.
+-spec wait(job_subscription() | [job_subscription()], job_state(), timeout()) ->
+ {job_type(), job_id(), job_state(), job_data()} | timeout.
wait({_, Ref} = Sub, State, Timeout) when is_atom(State) ->
receive
{?COUCH_JOBS_EVENT, Ref, Type, Id, MsgState, Data0} ->
@@ -303,17 +289,16 @@ wait({_, Ref} = Sub, State, Timeout) when is_atom(State) ->
false ->
wait(Sub, State, Timeout)
end
- after
- Timeout -> timeout
+ after Timeout -> timeout
end;
-
-wait(Subs, State, Timeout) when is_list(Subs),
- is_atom(State) ->
+wait(Subs, State, Timeout) when
+ is_list(Subs),
+ is_atom(State)
+->
{Result, ResendQ} = wait_any(Subs, State, Timeout, []),
lists:foreach(fun(Msg) -> self() ! Msg end, ResendQ),
Result.
-
%% Job type timeout API
% These functions manipulate the activity timeout for each job type.
@@ -325,33 +310,31 @@ set_type_timeout(Type, Timeout) ->
end),
ok = couch_jobs_server:force_check_types().
-
-spec clear_type_timeout(job_type()) -> ok.
clear_type_timeout(Type) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
couch_jobs_fdb:clear_type_timeout(JTx, Type)
end).
-
-spec get_type_timeout(job_type()) -> timeout().
get_type_timeout(Type) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
couch_jobs_fdb:get_type_timeout(JTx, Type)
end).
-
%% Private utilities
accept_loop(Type, NoSched, MaxSchedTime, Timeout) ->
- TxFun = fun(JTx) ->
+ TxFun = fun(JTx) ->
couch_jobs_fdb:accept(JTx, Type, MaxSchedTime, NoSched)
end,
- AcceptResult = try
- couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), TxFun)
- catch
- error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
- retry
- end,
+ AcceptResult =
+ try
+ couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), TxFun)
+ catch
+ error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
+ retry
+ end,
case AcceptResult of
{ok, Job, Data} ->
{ok, Job, Data};
@@ -368,15 +351,12 @@ accept_loop(Type, NoSched, MaxSchedTime, Timeout) ->
end
end.
-
job(Type, JobId) ->
#{job => true, type => Type, id => JobId}.
-
wait_pending(PendingWatch, _MaxSTime, _UserTimeout = 0, _NoSched) ->
erlfdb:cancel(PendingWatch, [flush]),
{error, not_found};
-
wait_pending(PendingWatch, MaxSTime, UserTimeout, NoSched) ->
NowMSec = erlang:system_time(millisecond),
Timeout0 = max(?MIN_ACCEPT_WAIT_MSEC, MaxSTime * 1000 - NowMSec),
@@ -393,7 +373,6 @@ wait_pending(PendingWatch, MaxSTime, UserTimeout, NoSched) ->
retry
end.
-
wait_any(Subs, Timeout0, ResendQ) when is_list(Subs) ->
Timeout = limit_timeout(Timeout0, false),
receive
@@ -405,13 +384,12 @@ wait_any(Subs, Timeout0, ResendQ) when is_list(Subs) ->
Data = couch_jobs_fdb:decode_data(Data0),
{{Type, Id, State, Data}, ResendQ}
end
- after
- Timeout -> {timeout, ResendQ}
+ after Timeout -> {timeout, ResendQ}
end.
-
wait_any(Subs, State, Timeout0, ResendQ) when
- is_list(Subs) ->
+ is_list(Subs)
+->
Timeout = limit_timeout(Timeout0, false),
receive
{?COUCH_JOBS_EVENT, Ref, Type, Id, MsgState, Data0} = Msg ->
@@ -427,25 +405,19 @@ wait_any(Subs, State, Timeout0, ResendQ) when
wait_any(Subs, Timeout, ResendQ)
end
end
- after
- Timeout -> {timeout, ResendQ}
+ after Timeout -> {timeout, ResendQ}
end.
-
limit_timeout(_Timeout, true) ->
infinity;
-
limit_timeout(Timeout, false) when is_integer(Timeout), Timeout < 16#FFFFFFFF ->
Timeout;
-
limit_timeout(_Timeout, false) ->
infinity.
-
flush_notifications(Ref) ->
receive
{?COUCH_JOBS_EVENT, Ref, _, _, _} ->
flush_notifications(Ref)
- after
- 0 -> ok
+ after 0 -> ok
end.
diff --git a/src/couch_jobs/src/couch_jobs_activity_monitor.erl b/src/couch_jobs/src/couch_jobs_activity_monitor.erl
index 5cebcf946..ea1be55db 100644
--- a/src/couch_jobs/src/couch_jobs_activity_monitor.erl
+++ b/src/couch_jobs/src/couch_jobs_activity_monitor.erl
@@ -14,7 +14,6 @@
-behaviour(gen_server).
-
-export([
start_link/1
]).
@@ -28,11 +27,9 @@
code_change/3
]).
-
-include("couch_jobs.hrl").
-include_lib("kernel/include/logger.hrl").
-
-record(st, {
jtx,
type,
@@ -42,18 +39,15 @@
batch_size
}).
-
-define(MAX_JITTER_DEFAULT, "10000").
-define(INIT_BATCH_SIZE, "1000").
-define(BATCH_FACTOR, "0.75").
-define(BATCH_INCREMENT, "100").
-define(MISSING_TIMEOUT_CHECK, 5000).
-
start_link(Type) ->
gen_server:start_link(?MODULE, [Type], []).
-
%% gen_server callbacks
init([Type]) ->
@@ -64,37 +58,33 @@ init([Type]) ->
},
{ok, schedule_check(St)}.
-
terminate(_, _St) ->
ok.
-
handle_call(Msg, _From, St) ->
{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
-
handle_cast(Msg, St) ->
{stop, {bad_cast, Msg}, St}.
-
handle_info(check_activity, St) ->
- St1 = try
- check_activity(St)
- catch
- error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
- ?LOG_ERROR(#{
- what => erlfdb_error,
- job_type => St#st.type,
- error_code => Err,
- details => "possible overload condition"
- }),
- LogMsg = "~p : type:~p got ~p error, possibly from overload",
- couch_log:error(LogMsg, [?MODULE, St#st.type, Err]),
- St
- end,
+ St1 =
+ try
+ check_activity(St)
+ catch
+ error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
+ ?LOG_ERROR(#{
+ what => erlfdb_error,
+ job_type => St#st.type,
+ error_code => Err,
+ details => "possible overload condition"
+ }),
+ LogMsg = "~p : type:~p got ~p error, possibly from overload",
+ couch_log:error(LogMsg, [?MODULE, St#st.type, Err]),
+ St
+ end,
St2 = schedule_check(St1),
{noreply, St2};
-
handle_info({Ref, ready}, St) when is_reference(Ref) ->
% Don't crash out couch_jobs_server and the whole application would need to
% eventually do proper cleanup in erlfdb:wait timeout code.
@@ -105,26 +95,21 @@ handle_info({Ref, ready}, St) when is_reference(Ref) ->
LogMsg = "~p : spurious erlfdb future ready message ~p",
couch_log:error(LogMsg, [?MODULE, Ref]),
{noreply, St};
-
handle_info(Msg, St) ->
{stop, {bad_info, Msg}, St}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
% Private helper functions
check_activity(#st{jtx = JTx, type = Type, vs = not_found} = St) ->
St#st{vs = get_activity_vs(JTx, Type)};
-
check_activity(#st{} = St) ->
#st{jtx = JTx, type = Type, vs = VS, batch_size = BatchSize} = St,
NewBatchSize = re_enqueue_inactive(JTx, Type, VS, BatchSize),
St#st{vs = get_activity_vs(JTx, Type), batch_size = NewBatchSize}.
-
get_timeout_msec(JTx, Type) ->
TimeoutVal = couch_jobs_fdb:tx(JTx, fun(JTx1) ->
couch_jobs_fdb:get_type_timeout(JTx1, Type)
@@ -134,37 +119,37 @@ get_timeout_msec(JTx, Type) ->
ValSeconds -> timer:seconds(ValSeconds)
end.
-
schedule_check(#st{jtx = JTx, type = Type, timeout = OldTimeout} = St) ->
% Reset versionstamp if timeout changed.
- St1 = case get_timeout_msec(JTx, Type) of
- not_found ->
- St#st{vs = not_found, timeout = ?MISSING_TIMEOUT_CHECK};
- OldTimeout ->
- St;
- NewTimeout ->
- St#st{vs = not_found, timeout = NewTimeout}
- end,
+ St1 =
+ case get_timeout_msec(JTx, Type) of
+ not_found ->
+ St#st{vs = not_found, timeout = ?MISSING_TIMEOUT_CHECK};
+ OldTimeout ->
+ St;
+ NewTimeout ->
+ St#st{vs = not_found, timeout = NewTimeout}
+ end,
#st{timeout = Timeout} = St1,
MaxJitter = min(Timeout div 2, get_max_jitter_msec()),
Wait = Timeout + rand:uniform(max(1, MaxJitter)),
St1#st{tref = erlang:send_after(Wait, self(), check_activity)}.
-
re_enqueue_inactive(JTx, Type, VS, BatchSize) ->
- Result = try
- couch_jobs_fdb:tx(JTx, fun(JTx1) ->
- Opts = [{limit, BatchSize}],
- JobIds = couch_jobs_fdb:get_inactive_since(JTx1, Type, VS, Opts),
- couch_jobs_fdb:re_enqueue_inactive(JTx1, Type, JobIds),
- length(JobIds)
- end)
- catch
- error:{erlfdb_error, ?ERLFDB_TRANSACTION_TOO_LARGE} ->
- failed;
- error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
- failed
- end,
+ Result =
+ try
+ couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ Opts = [{limit, BatchSize}],
+ JobIds = couch_jobs_fdb:get_inactive_since(JTx1, Type, VS, Opts),
+ couch_jobs_fdb:re_enqueue_inactive(JTx1, Type, JobIds),
+ length(JobIds)
+ end)
+ catch
+ error:{erlfdb_error, ?ERLFDB_TRANSACTION_TOO_LARGE} ->
+ failed;
+ error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
+ failed
+ end,
case Result of
JobCnt when is_integer(JobCnt), JobCnt < BatchSize ->
BatchSize;
@@ -176,28 +161,31 @@ re_enqueue_inactive(JTx, Type, VS, BatchSize) ->
re_enqueue_inactive(JTx, Type, VS, NewBatchSize)
end.
-
get_activity_vs(JTx, Type) ->
couch_jobs_fdb:tx(JTx, fun(JTx1) ->
couch_jobs_fdb:get_activity_vs(JTx1, Type)
end).
-
-get_max_jitter_msec()->
- couch_jobs_util:get_non_neg_int(activity_monitor_max_jitter_msec,
- ?MAX_JITTER_DEFAULT).
-
+get_max_jitter_msec() ->
+ couch_jobs_util:get_non_neg_int(
+ activity_monitor_max_jitter_msec,
+ ?MAX_JITTER_DEFAULT
+ ).
init_batch_size() ->
- couch_jobs_util:get_non_neg_int(activity_monitor_init_batch_size,
- ?INIT_BATCH_SIZE).
-
+ couch_jobs_util:get_non_neg_int(
+ activity_monitor_init_batch_size,
+ ?INIT_BATCH_SIZE
+ ).
batch_increment() ->
- couch_jobs_util:get_non_neg_int(activity_monitor_batch_increment,
- ?BATCH_INCREMENT).
-
+ couch_jobs_util:get_non_neg_int(
+ activity_monitor_batch_increment,
+ ?BATCH_INCREMENT
+ ).
batch_factor() ->
- couch_jobs_util:get_float_0_1(activity_monitor_batch_factor,
- ?BATCH_FACTOR).
+ couch_jobs_util:get_float_0_1(
+ activity_monitor_batch_factor,
+ ?BATCH_FACTOR
+ ).
diff --git a/src/couch_jobs/src/couch_jobs_activity_monitor_sup.erl b/src/couch_jobs/src/couch_jobs_activity_monitor_sup.erl
index b11161a24..a49b73a1b 100644
--- a/src/couch_jobs/src/couch_jobs_activity_monitor_sup.erl
+++ b/src/couch_jobs/src/couch_jobs_activity_monitor_sup.erl
@@ -13,10 +13,8 @@
-module(couch_jobs_activity_monitor_sup).
-
-behaviour(supervisor).
-
-export([
start_link/0,
@@ -29,24 +27,22 @@
init/1
]).
-
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
start_monitor(Type) ->
supervisor:start_child(?MODULE, [Type]).
-
stop_monitor(Pid) ->
supervisor:terminate_child(?MODULE, Pid).
-
get_child_pids() ->
- lists:map(fun({_Id, Pid, _Type, _Mod}) ->
- Pid
- end, supervisor:which_children(?MODULE)).
-
+ lists:map(
+ fun({_Id, Pid, _Type, _Mod}) ->
+ Pid
+ end,
+ supervisor:which_children(?MODULE)
+ ).
init(_) ->
Flags = #{
diff --git a/src/couch_jobs/src/couch_jobs_app.erl b/src/couch_jobs/src/couch_jobs_app.erl
index 720b94891..0b2769110 100644
--- a/src/couch_jobs/src/couch_jobs_app.erl
+++ b/src/couch_jobs/src/couch_jobs_app.erl
@@ -8,19 +8,15 @@
-module(couch_jobs_app).
-
-behaviour(application).
-
-export([
start/2,
stop/1
]).
-
start(_Type, []) ->
couch_jobs_sup:start_link().
-
stop([]) ->
ok.
diff --git a/src/couch_jobs/src/couch_jobs_fdb.erl b/src/couch_jobs/src/couch_jobs_fdb.erl
index cea138876..f7dde96ec 100644
--- a/src/couch_jobs/src/couch_jobs_fdb.erl
+++ b/src/couch_jobs/src/couch_jobs_fdb.erl
@@ -12,7 +12,6 @@
-module(couch_jobs_fdb).
-
-export([
add/5,
remove/2,
@@ -53,10 +52,8 @@
bump_metadata_version/1
]).
-
-include("couch_jobs.hrl").
-
-record(jv, {
seq,
jlock,
@@ -65,13 +62,11 @@
data
}).
-
-define(JOBS_ETS_KEY, jobs).
-define(MD_TIMESTAMP_ETS_KEY, md_timestamp).
-define(MD_VERSION_MAX_AGE_SEC, 10).
-define(PENDING_SEQ, 0).
-
% Data model
%
% (?JOBS, ?DATA, Type, JobId) = (Sequence, Lock, SchedTime, Resubmit, JobData)
@@ -86,7 +81,6 @@
% null - when the job is finished
% Versionstamp - when the job is running
-
% Job creation API
add(#{jtx := true} = JTx0, Type, JobId, Data, STime) ->
@@ -113,7 +107,6 @@ add(#{jtx := true} = JTx0, Type, JobId, Data, STime) ->
end
end.
-
remove(#{jtx := true} = JTx0, #{job := true} = Job) ->
#{tx := Tx} = JTx = get_jtx(JTx0),
#{type := Type, id := JobId} = Job,
@@ -129,7 +122,6 @@ remove(#{jtx := true} = JTx0, #{job := true} = Job) ->
{error, not_found}
end.
-
get_job_state_and_data(#{jtx := true} = JTx, #{job := true} = Job) ->
case get_job_val(get_jtx(JTx), Job) of
#jv{seq = Seq, jlock = JLock, data = Data} ->
@@ -138,32 +130,34 @@ get_job_state_and_data(#{jtx := true} = JTx, #{job := true} = Job) ->
{error, not_found}
end.
-
get_jobs(JTx, Type) ->
get_jobs(JTx, Type, fun(_) -> true end).
-
get_jobs(#{jtx := true} = JTx, Type, Filter) when is_function(Filter, 1) ->
#{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
Prefix = erlfdb_tuple:pack({?DATA, Type}, Jobs),
Opts = [{streaming_mode, want_all}],
Result = erlfdb:wait(erlfdb:get_range_startswith(Tx, Prefix, Opts)),
- lists:foldl(fun({K, V}, #{} = Acc) ->
- {JobId} = erlfdb_tuple:unpack(K, Prefix),
- case Filter(JobId) of
- true ->
- {Seq, JLock, _, _, Data} = erlfdb_tuple:unpack(V),
- Acc#{JobId => {Seq, job_state(JLock, Seq), Data}};
- false ->
- Acc
- end
- end, #{}, Result).
-
+ lists:foldl(
+ fun({K, V}, #{} = Acc) ->
+ {JobId} = erlfdb_tuple:unpack(K, Prefix),
+ case Filter(JobId) of
+ true ->
+ {Seq, JLock, _, _, Data} = erlfdb_tuple:unpack(V),
+ Acc#{JobId => {Seq, job_state(JLock, Seq), Data}};
+ false ->
+ Acc
+ end
+ end,
+ #{},
+ Result
+ ).
% Job processor API
-accept(#{jtx := true} = JTx0, Type, MaxSTime, NoSched)
- when is_integer(MaxSTime), is_boolean(NoSched) ->
+accept(#{jtx := true} = JTx0, Type, MaxSTime, NoSched) when
+ is_integer(MaxSTime), is_boolean(NoSched)
+->
#{jtx := true, tx := Tx} = JTx = get_jtx(JTx0),
case couch_jobs_pending:dequeue(JTx, Type, MaxSTime, NoSched) of
{not_found, PendingWatch} ->
@@ -185,17 +179,18 @@ accept(#{jtx := true} = JTx0, Type, MaxSTime, NoSched)
{ok, Job, decode_data(Data)}
end.
-
finish(#{jtx := true} = JTx0, #{jlock := <<_/binary>>} = Job, Data) when
- is_map(Data) orelse Data =:= undefined ->
+ is_map(Data) orelse Data =:= undefined
+->
#{tx := Tx} = JTx = get_jtx(JTx0),
#{type := Type, jlock := JLock, id := JobId} = Job,
case get_job_or_halt(Tx, job_key(JTx, Job), JLock) of
#jv{seq = Seq, stime = STime, resubmit = Resubmit, data = OldData} ->
- NewData = case Data =:= undefined of
- true -> OldData;
- false -> Data
- end,
+ NewData =
+ case Data =:= undefined of
+ true -> OldData;
+ false -> Data
+ end,
try maybe_enqueue(JTx, Type, JobId, STime, Resubmit, NewData) of
ok ->
clear_activity(JTx, Type, Seq),
@@ -211,17 +206,17 @@ finish(#{jtx := true} = JTx0, #{jlock := <<_/binary>>} = Job, Data) when
resubmit(JTx0, Job, NewSTime) ->
resubmit(JTx0, Job, NewSTime, undefined).
-
resubmit(#{jtx := true} = JTx0, #{job := true} = Job, NewSTime, NewData) ->
#{tx := Tx} = JTx = get_jtx(JTx0),
#{type := Type, id := JobId} = Job,
Key = job_key(JTx, Job),
case get_job_val(Tx, Key) of
#jv{seq = Seq, jlock = JLock, stime = OldSTime, data = Data} = JV ->
- STime = case NewSTime =:= undefined of
- true -> OldSTime;
- false -> NewSTime
- end,
+ STime =
+ case NewSTime =:= undefined of
+ true -> OldSTime;
+ false -> NewSTime
+ end,
case job_state(JLock, Seq) of
finished ->
ok = maybe_enqueue(JTx, Type, JobId, STime, true, NewData),
@@ -259,25 +254,31 @@ resubmit(#{jtx := true} = JTx0, #{job := true} = Job, NewSTime, NewData) ->
running ->
JV1 = JV#jv{stime = STime, resubmit = true},
set_job_val(Tx, Key, JV1),
- {ok, Job#{resubmit => true, stime => STime,
- state => running, seq => Seq, data => Data}}
+ {ok, Job#{
+ resubmit => true,
+ stime => STime,
+ state => running,
+ seq => Seq,
+ data => Data
+ }}
end;
not_found ->
{error, not_found}
end.
-
update(#{jtx := true} = JTx0, #{jlock := <<_/binary>>} = Job, Data0) when
- is_map(Data0) orelse Data0 =:= undefined ->
+ is_map(Data0) orelse Data0 =:= undefined
+->
#{tx := Tx} = JTx = get_jtx(JTx0),
#{jlock := JLock, type := Type, id := JobId} = Job,
Key = job_key(JTx, Job),
case get_job_or_halt(Tx, Key, JLock) of
#jv{seq = Seq, stime = STime, resubmit = Resubmit} = JV0 ->
- Data = case Data0 =:= undefined of
- true -> JV0#jv.data;
- false -> Data0
- end,
+ Data =
+ case Data0 =:= undefined of
+ true -> JV0#jv.data;
+ false -> Data0
+ end,
JV = JV0#jv{seq = ?UNSET_VS, data = Data},
try set_job_val(Tx, Key, JV) of
ok ->
@@ -291,7 +292,6 @@ update(#{jtx := true} = JTx0, #{jlock := <<_/binary>>} = Job, Data0) when
{error, halt}
end.
-
% Type and activity monitoring API
set_type_timeout(#{jtx := true} = JTx, Type, Timeout) ->
@@ -300,13 +300,11 @@ set_type_timeout(#{jtx := true} = JTx, Type, Timeout) ->
Val = erlfdb_tuple:pack({Timeout}),
erlfdb:set(Tx, Key, Val).
-
clear_type_timeout(#{jtx := true} = JTx, Type) ->
#{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
Key = erlfdb_tuple:pack({?ACTIVITY_TIMEOUT, Type}, Jobs),
erlfdb:clear(Tx, Key).
-
get_type_timeout(#{jtx := true} = JTx, Type) ->
#{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
Key = erlfdb_tuple:pack({?ACTIVITY_TIMEOUT, Type}, Jobs),
@@ -318,17 +316,18 @@ get_type_timeout(#{jtx := true} = JTx, Type) ->
Timeout
end.
-
get_types(#{jtx := true} = JTx) ->
#{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
Prefix = erlfdb_tuple:pack({?ACTIVITY_TIMEOUT}, Jobs),
Opts = [{streaming_mode, want_all}],
Result = erlfdb:wait(erlfdb:get_range_startswith(Tx, Prefix, Opts)),
- lists:map(fun({K, _V}) ->
- {Type} = erlfdb_tuple:unpack(K, Prefix),
- Type
- end, Result).
-
+ lists:map(
+ fun({K, _V}) ->
+ {Type} = erlfdb_tuple:unpack(K, Prefix),
+ Type
+ end,
+ Result
+ ).
get_activity_vs(#{jtx := true} = JTx, Type) ->
#{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
@@ -341,7 +340,6 @@ get_activity_vs(#{jtx := true} = JTx, Type) ->
VS
end.
-
get_activity_vs_and_watch(#{jtx := true} = JTx, Type) ->
#{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
Key = erlfdb_tuple:pack({?WATCHES_ACTIVITY, Type}, Jobs),
@@ -355,7 +353,6 @@ get_activity_vs_and_watch(#{jtx := true} = JTx, Type) ->
{VS, Watch}
end.
-
get_active_since(#{jtx := true} = JTx, Type, Versionstamp, Opts) ->
#{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
Prefix = erlfdb_tuple:pack({?ACTIVITY}, Jobs),
@@ -363,13 +360,16 @@ get_active_since(#{jtx := true} = JTx, Type, Versionstamp, Opts) ->
StartKeySel = erlfdb_key:first_greater_or_equal(StartKey),
{_, EndKey} = erlfdb_tuple:range({Type}, Prefix),
Future = erlfdb:get_range(Tx, StartKeySel, EndKey, Opts),
- {JobIdsData, LastSeq} = lists:mapfoldl(fun({K, V}, _PrevSeq) ->
- {Type, Seq} = erlfdb_tuple:unpack(K, Prefix),
- {erlfdb_tuple:unpack(V), Seq}
- end, Versionstamp, erlfdb:wait(Future)),
+ {JobIdsData, LastSeq} = lists:mapfoldl(
+ fun({K, V}, _PrevSeq) ->
+ {Type, Seq} = erlfdb_tuple:unpack(K, Prefix),
+ {erlfdb_tuple:unpack(V), Seq}
+ end,
+ Versionstamp,
+ erlfdb:wait(Future)
+ ),
{maps:from_list(JobIdsData), LastSeq}.
-
get_inactive_since(#{jtx := true} = JTx, Type, Versionstamp, Opts) ->
#{tx := Tx, jobs_path := Jobs} = get_jtx(JTx),
Prefix = erlfdb_tuple:pack({?ACTIVITY}, Jobs),
@@ -377,29 +377,33 @@ get_inactive_since(#{jtx := true} = JTx, Type, Versionstamp, Opts) ->
EndKey = erlfdb_tuple:pack({Type, Versionstamp}, Prefix),
EndKeySel = erlfdb_key:first_greater_than(EndKey),
Future = erlfdb:get_range(Tx, StartKey, EndKeySel, Opts),
- lists:map(fun({_K, V}) ->
- {JobId, _} = erlfdb_tuple:unpack(V),
- JobId
- end, erlfdb:wait(Future)).
-
+ lists:map(
+ fun({_K, V}) ->
+ {JobId, _} = erlfdb_tuple:unpack(V),
+ JobId
+ end,
+ erlfdb:wait(Future)
+ ).
re_enqueue_inactive(#{jtx := true} = JTx, Type, JobIds) when is_list(JobIds) ->
#{tx := Tx} = get_jtx(JTx),
- lists:foreach(fun(JobId) ->
- case get_job_val(Tx, job_key(JTx, Type, JobId)) of
- #jv{seq = Seq, stime = STime, data = Data} ->
- clear_activity(JTx, Type, Seq),
- maybe_enqueue(JTx, Type, JobId, STime, true, Data);
- not_found ->
- ok
- end
- end, JobIds),
+ lists:foreach(
+ fun(JobId) ->
+ case get_job_val(Tx, job_key(JTx, Type, JobId)) of
+ #jv{seq = Seq, stime = STime, data = Data} ->
+ clear_activity(JTx, Type, Seq),
+ maybe_enqueue(JTx, Type, JobId, STime, true, Data);
+ not_found ->
+ ok
+ end
+ end,
+ JobIds
+ ),
case length(JobIds) > 0 of
true -> update_watch(JTx, Type);
false -> ok
end.
-
% Cache initialization API. Called from the supervisor just to create the ETS
% table. It returns `ignore` to tell supervisor it won't actually start any
% process, which is what we want here.
@@ -409,7 +413,6 @@ init_cache() ->
ets:new(?MODULE, [public, named_table] ++ ConcurrencyOpts),
ignore.
-
% Functions to encode / decode JobData
%
encode_data(#{} = JobData) ->
@@ -423,17 +426,13 @@ encode_data(#{} = JobData) ->
error({json_encoding_error, Error})
end.
-
decode_data(not_found) ->
not_found;
-
decode_data(#{} = JobData) ->
JobData;
-
decode_data(<<_/binary>> = JobData) ->
jiffy:decode(JobData, [dedupe_keys, return_maps]).
-
% Cached job transaction object. This object wraps a transaction, caches the
% directory lookup path, and the metadata version. The function can be used
% from inside or outside the transaction. When used from a transaction it will
@@ -442,10 +441,8 @@ decode_data(<<_/binary>> = JobData) ->
get_jtx() ->
get_jtx(undefined).
-
get_jtx(#{tx := Tx} = _TxDb) ->
get_jtx(Tx);
-
get_jtx(undefined = _Tx) ->
case ets:lookup(?MODULE, ?JOBS_ETS_KEY) of
[{_, #{} = JTx}] ->
@@ -454,7 +451,6 @@ get_jtx(undefined = _Tx) ->
JTx = update_jtx_cache(init_jtx(undefined)),
JTx#{tx := undefined}
end;
-
get_jtx({erlfdb_transaction, _} = Tx) ->
case ets:lookup(?MODULE, ?JOBS_ETS_KEY) of
[{_, #{} = JTx}] ->
@@ -463,14 +459,12 @@ get_jtx({erlfdb_transaction, _} = Tx) ->
update_jtx_cache(init_jtx(Tx))
end.
-
% Transaction processing to be used with couch jobs' specific transaction
% contexts
%
tx(#{jtx := true} = JTx, Fun) when is_function(Fun, 1) ->
fabric2_fdb:transactional(JTx, Fun).
-
% Debug and testing API
get_job(Type, JobId) ->
@@ -494,22 +488,23 @@ get_job(Type, JobId) ->
end
end).
-
get_jobs() ->
fabric2_fdb:transactional(fun(Tx) ->
#{jobs_path := Jobs} = init_jtx(Tx),
Prefix = erlfdb_tuple:pack({?DATA}, Jobs),
Opts = [{streaming_mode, want_all}],
Result = erlfdb:wait(erlfdb:get_range_startswith(Tx, Prefix, Opts)),
- lists:map(fun({K, V}) ->
- {Type, JobId} = erlfdb_tuple:unpack(K, Prefix),
- {Seq, JLock, _, _, Data} = erlfdb_tuple:unpack(V),
- JobState = job_state(JLock, Seq),
- {Type, JobId, JobState, decode_data(Data)}
- end, Result)
+ lists:map(
+ fun({K, V}) ->
+ {Type, JobId} = erlfdb_tuple:unpack(K, Prefix),
+ {Seq, JLock, _, _, Data} = erlfdb_tuple:unpack(V),
+ JobState = job_state(JLock, Seq),
+ {Type, JobId, JobState, decode_data(Data)}
+ end,
+ Result
+ )
end).
-
% Call this function if the top level "couchdb" FDB directory layer
% changes.
%
@@ -518,11 +513,9 @@ bump_metadata_version() ->
bump_metadata_version(Tx)
end).
-
bump_metadata_version(Tx) ->
erlfdb:set_versionstamped_value(Tx, ?COUCH_JOBS_MD_VERSION, <<0:112>>).
-
% Private helper functions
maybe_enqueue(#{jtx := true} = JTx, Type, JobId, STime, Resubmit, Data) ->
@@ -544,18 +537,14 @@ maybe_enqueue(#{jtx := true} = JTx, Type, JobId, STime, Resubmit, Data) ->
end,
ok.
-
job_key(#{jtx := true, jobs_path := Jobs}, Type, JobId) ->
erlfdb_tuple:pack({?DATA, Type, JobId}, Jobs).
-
job_key(JTx, #{type := Type, id := JobId}) ->
job_key(JTx, Type, JobId).
-
get_job_val(#{jtx := true, tx := Tx} = JTx, #{job := true} = Job) ->
get_job_val(Tx, job_key(JTx, Job));
-
get_job_val(Tx = {erlfdb_transaction, _}, Key) ->
case erlfdb:wait(erlfdb:get(Tx, Key)) of
<<_/binary>> = Val ->
@@ -571,7 +560,6 @@ get_job_val(Tx = {erlfdb_transaction, _}, Key) ->
not_found
end.
-
set_job_val(Tx = {erlfdb_transaction, _}, Key, #jv{} = JV) ->
#jv{
seq = Seq,
@@ -580,10 +568,11 @@ set_job_val(Tx = {erlfdb_transaction, _}, Key, #jv{} = JV) ->
resubmit = Resubmit,
data = Data0
} = JV,
- Data = case Data0 of
- #{} -> encode_data(Data0);
- <<_/binary>> -> Data0
- end,
+ Data =
+ case Data0 of
+ #{} -> encode_data(Data0);
+ <<_/binary>> -> Data0
+ end,
case Seq of
?UNSET_VS ->
Val = erlfdb_tuple:pack_vs({Seq, JLock, STime, Resubmit, Data}),
@@ -594,7 +583,6 @@ set_job_val(Tx = {erlfdb_transaction, _}, Key, #jv{} = JV) ->
end,
ok.
-
get_job_or_halt(Tx, Key, JLock) ->
case get_job_val(Tx, Key) of
#jv{jlock = CurJLock} when CurJLock =/= JLock ->
@@ -605,37 +593,34 @@ get_job_or_halt(Tx, Key, JLock) ->
halt
end.
-
update_activity(#{jtx := true} = JTx, Type, JobId, Seq, Data0) ->
- #{tx := Tx, jobs_path := Jobs} = JTx,
+ #{tx := Tx, jobs_path := Jobs} = JTx,
case Seq =/= null of
true -> clear_activity(JTx, Type, Seq);
false -> ok
end,
Key = erlfdb_tuple:pack_vs({?ACTIVITY, Type, ?UNSET_VS}, Jobs),
- Data = case Data0 of
- #{} -> encode_data(Data0);
- <<_/binary>> -> Data0
- end,
+ Data =
+ case Data0 of
+ #{} -> encode_data(Data0);
+ <<_/binary>> -> Data0
+ end,
Val = erlfdb_tuple:pack({JobId, Data}),
erlfdb:set_versionstamped_key(Tx, Key, Val),
update_watch(JTx, Type).
-
clear_activity(#{jtx := true} = JTx, Type, Seq) ->
- #{tx := Tx, jobs_path := Jobs} = JTx,
+ #{tx := Tx, jobs_path := Jobs} = JTx,
Key = erlfdb_tuple:pack({?ACTIVITY, Type, Seq}, Jobs),
erlfdb:clear(Tx, Key).
-
update_watch(#{jtx := true} = JTx, Type) ->
- #{tx := Tx, jobs_path := Jobs} = JTx,
+ #{tx := Tx, jobs_path := Jobs} = JTx,
Key = erlfdb_tuple:pack({?WATCHES_ACTIVITY, Type}, Jobs),
Val = erlfdb_tuple:pack_vs({?UNSET_VS}),
erlfdb:set_versionstamped_value(Tx, Key, Val),
ok.
-
job_state(JLock, Seq) ->
case {JLock, Seq} of
{null, null} -> finished;
@@ -643,7 +628,6 @@ job_state(JLock, Seq) ->
{null, Seq} when Seq =/= null -> pending
end.
-
% This a transaction context object similar to the Db = #{} one from
% fabric2_fdb. It's is used to cache the jobs path directory (to avoid extra
% lookups on every operation) and to check for metadata changes (in case
@@ -651,7 +635,6 @@ job_state(JLock, Seq) ->
%
init_jtx(undefined) ->
fabric2_fdb:transactional(fun(Tx) -> init_jtx(Tx) end);
-
init_jtx({erlfdb_transaction, _} = Tx) ->
LayerPrefix = fabric2_fdb:get_dir(Tx),
Jobs = erlfdb_tuple:pack({?JOBS}, LayerPrefix),
@@ -666,7 +649,6 @@ init_jtx({erlfdb_transaction, _} = Tx) ->
md_version => get_metadata_version(Tx)
}.
-
ensure_current(#{jtx := true, tx := Tx} = JTx) ->
case get(?COUCH_JOBS_CURRENT) of
Tx ->
@@ -677,11 +659,9 @@ ensure_current(#{jtx := true, tx := Tx} = JTx) ->
JTx1
end.
-
get_metadata_version({erlfdb_transaction, _} = Tx) ->
erlfdb:wait(erlfdb:get_ss(Tx, ?COUCH_JOBS_MD_VERSION)).
-
update_current(#{tx := Tx, md_version := Version} = JTx) ->
case get_md_version_age(Version) of
Age when Age =< ?MD_VERSION_MAX_AGE_SEC ->
@@ -697,29 +677,25 @@ update_current(#{tx := Tx, md_version := Version} = JTx) ->
end
end.
-
update_jtx_cache(#{jtx := true, md_version := Version} = JTx) ->
CachedJTx = JTx#{tx := undefined},
ets:insert(?MODULE, {?JOBS_ETS_KEY, CachedJTx}),
update_md_version_timestamp(Version),
JTx.
-
get_md_version_age(Version) ->
- Timestamp = case ets:lookup(?MODULE, ?MD_TIMESTAMP_ETS_KEY) of
- [{_, Version, Ts}] -> Ts;
- _ -> 0
- end,
+ Timestamp =
+ case ets:lookup(?MODULE, ?MD_TIMESTAMP_ETS_KEY) of
+ [{_, Version, Ts}] -> Ts;
+ _ -> 0
+ end,
erlang:system_time(second) - Timestamp.
-
update_md_version_timestamp(Version) ->
Ts = erlang:system_time(second),
ets:insert(?MODULE, {?MD_TIMESTAMP_ETS_KEY, Version, Ts}).
-
update_job_data(Data, undefined) ->
Data;
-
update_job_data(_Data, NewData) ->
NewData.
diff --git a/src/couch_jobs/src/couch_jobs_notifier.erl b/src/couch_jobs/src/couch_jobs_notifier.erl
index b47834f2f..b9e323378 100644
--- a/src/couch_jobs/src/couch_jobs_notifier.erl
+++ b/src/couch_jobs/src/couch_jobs_notifier.erl
@@ -14,7 +14,6 @@
-behaviour(gen_server).
-
-export([
start_link/1,
subscribe/4,
@@ -31,33 +30,31 @@
format_status/2
]).
-
-include("couch_jobs.hrl").
-include_lib("kernel/include/logger.hrl").
-
-define(TYPE_MONITOR_HOLDOFF_DEFAULT, "50").
-define(TYPE_MONITOR_TIMEOUT_DEFAULT, "infinity").
-define(INIT_BATCH_SIZE, "1000").
-define(BATCH_FACTOR, "0.75").
-define(BATCH_INCREMENT, "100").
-
-record(st, {
jtx,
type,
monitor_pid,
- subs, % #{JobId => #{Ref => {Pid, State, Seq}}}
- pidmap, % #{{Jobid, Pid} => Ref}
- refmap, % #{Ref => JobId}
+ % #{JobId => #{Ref => {Pid, State, Seq}}}
+ subs,
+ % #{{Jobid, Pid} => Ref}
+ pidmap,
+ % #{Ref => JobId}
+ refmap,
batch_size
}).
-
start_link(Type) ->
gen_server:start_link(?MODULE, [Type], []).
-
subscribe(Type, JobId, State, Seq) ->
case couch_jobs_server:get_notifier_server(Type) of
{ok, Server} ->
@@ -68,11 +65,9 @@ subscribe(Type, JobId, State, Seq) ->
{error, Error}
end.
-
unsubscribe(Server, Ref) when is_reference(Ref) ->
gen_server:call(Server, {unsubscribe, Ref, self()}, infinity).
-
init([Type]) ->
JTx = couch_jobs_fdb:get_jtx(),
St = #st{
@@ -89,11 +84,9 @@ init([Type]) ->
Pid = couch_jobs_type_monitor:start(Type, VS, HoldOff, Timeout),
{ok, St#st{monitor_pid = Pid}}.
-
terminate(_, _St) ->
ok.
-
handle_call({subscribe, JobId, State, Seq, Pid}, _From, #st{} = St) ->
#st{pidmap = PidMap, refmap = RefMap} = St,
case maps:get({JobId, Pid}, PidMap, not_found) of
@@ -107,22 +100,17 @@ handle_call({subscribe, JobId, State, Seq, Pid}, _From, #st{} = St) ->
St1 = update_sub(JobId, Ref, Pid, State, Seq, St),
{reply, Ref, St1}
end;
-
handle_call({unsubscribe, Ref, Pid}, _From, #st{} = St) ->
{reply, ok, unsubscribe_int(Ref, Pid, St)};
-
handle_call(Msg, _From, St) ->
{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
-
handle_cast(Msg, St) ->
{stop, {bad_cast, Msg}, St}.
-
handle_info({type_updated, VS}, St) ->
VSMax = flush_type_updated_messages(VS),
{noreply, try_notify_subscribers(VSMax, St)};
-
handle_info({Ref, ready}, St) when is_reference(Ref) ->
% Don't crash out couch_jobs_server and the whole application would need to
% eventually do proper cleanup in erlfdb:wait timeout code.
@@ -133,56 +121,51 @@ handle_info({Ref, ready}, St) when is_reference(Ref) ->
LogMsg = "~p : spurious erlfdb future ready message ~p",
couch_log:error(LogMsg, [?MODULE, Ref]),
{noreply, St};
-
handle_info({'DOWN', Ref, process, Pid, _}, #st{} = St) ->
{noreply, unsubscribe_int(Ref, Pid, St)};
-
handle_info(Msg, St) ->
{stop, {bad_info, Msg}, St}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
format_status(_Opt, [_PDict, State]) ->
#st{
- jtx=JTx,
- type=Type,
- monitor_pid=MonitorPid,
- subs=Subs,
- pidmap=PidMap,
- refmap=RefMap
+ jtx = JTx,
+ type = Type,
+ monitor_pid = MonitorPid,
+ subs = Subs,
+ pidmap = PidMap,
+ refmap = RefMap
} = State,
- [{data, [{"State", [
- {jtx, JTx},
- {type, Type},
- {monitor_pid, MonitorPid},
- {subs, {map_size, maps:size(Subs)}},
- {pidmap, {map_size, maps:size(PidMap)}},
- {refmap, {map_size, maps:size(RefMap)}}
- ]}]}].
-
+ [
+ {data, [
+ {"State", [
+ {jtx, JTx},
+ {type, Type},
+ {monitor_pid, MonitorPid},
+ {subs, {map_size, maps:size(Subs)}},
+ {pidmap, {map_size, maps:size(PidMap)}},
+ {refmap, {map_size, maps:size(RefMap)}}
+ ]}
+ ]}
+ ].
update_subs(JobId, Refs, #st{subs = Subs} = St) when map_size(Refs) =:= 0 ->
St#st{subs = maps:remove(JobId, Subs)};
-
update_subs(JobId, Refs, #st{subs = Subs} = St) when map_size(Refs) > 0 ->
St#st{subs = Subs#{JobId => Refs}}.
-
update_sub(JobId, Ref, Pid, State, Seq, #st{subs = Subs} = St) ->
- Refs = maps:get(JobId, Subs, #{}),
+ Refs = maps:get(JobId, Subs, #{}),
update_subs(JobId, Refs#{Ref => {Pid, State, Seq}}, St).
-
remove_sub(JobId, Ref, #st{subs = Subs} = St) ->
case maps:get(JobId, Subs, not_found) of
not_found -> St;
#{} = Refs -> update_subs(JobId, maps:remove(Ref, Refs), St)
end.
-
unsubscribe_int(Id, Ref, Pid, #st{pidmap = PidMap, refmap = RefMap} = St) ->
St1 = remove_sub(Id, Ref, St),
erlang:demonitor(Ref, [flush]),
@@ -191,53 +174,53 @@ unsubscribe_int(Id, Ref, Pid, #st{pidmap = PidMap, refmap = RefMap} = St) ->
refmap = maps:remove(Ref, RefMap)
}.
-
unsubscribe_int(Ref, Pid, #st{refmap = RefMap} = St) ->
case maps:get(Ref, RefMap, not_found) of
not_found -> St;
Id -> unsubscribe_int(Id, Ref, Pid, St)
end.
-
flush_type_updated_messages(VSMax) ->
receive
{type_updated, VS} ->
flush_type_updated_messages(max(VS, VSMax))
- after
- 0 -> VSMax
+ after 0 -> VSMax
end.
-
get_jobs(#st{} = St, Ids) when is_list(Ids) ->
#st{jtx = JTx, type = Type, batch_size = BatchSize} = St,
{Jobs, NewBatchSize} = get_jobs_iter(JTx, Type, Ids, BatchSize, #{}),
{Jobs, St#st{batch_size = NewBatchSize}}.
-
get_jobs_iter(_Jtx, _Type, [], BatchSize, #{} = Acc) ->
{Acc, BatchSize};
-
get_jobs_iter(JTx, Type, Ids, BatchSize, #{} = Acc0) ->
- {BatchIds, RestIds} = case length(Ids) < BatchSize of
- true -> {Ids, []};
- false -> lists:split(BatchSize, Ids)
- end,
- Result = try
- couch_jobs_fdb:tx(JTx, fun(JTx1) ->
- lists:foldl(fun(JobId, #{} = Acc) ->
- Job = #{job => true, type => Type, id => JobId},
- case couch_jobs_fdb:get_job_state_and_data(JTx1, Job) of
- {ok, Seq, State, Data} ->
- Acc#{JobId => {Seq, State, Data}};
- {error, not_found} ->
- Acc#{JobId => {null, not_found, not_found}}
- end
- end, Acc0, BatchIds)
- end)
- catch
- error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
- failed
- end,
+ {BatchIds, RestIds} =
+ case length(Ids) < BatchSize of
+ true -> {Ids, []};
+ false -> lists:split(BatchSize, Ids)
+ end,
+ Result =
+ try
+ couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ lists:foldl(
+ fun(JobId, #{} = Acc) ->
+ Job = #{job => true, type => Type, id => JobId},
+ case couch_jobs_fdb:get_job_state_and_data(JTx1, Job) of
+ {ok, Seq, State, Data} ->
+ Acc#{JobId => {Seq, State, Data}};
+ {error, not_found} ->
+ Acc#{JobId => {null, not_found, not_found}}
+ end
+ end,
+ Acc0,
+ BatchIds
+ )
+ end)
+ catch
+ error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
+ failed
+ end,
case Result of
#{} = AccF ->
NewBatchSize = BatchSize + batch_increment(),
@@ -247,39 +230,39 @@ get_jobs_iter(JTx, Type, Ids, BatchSize, #{} = Acc0) ->
get_jobs_iter(JTx, Type, Ids, NewBatchSize, Acc0)
end.
-
get_type_vs(#st{jtx = JTx, type = Type}) ->
couch_jobs_fdb:tx(JTx, fun(JTx1) ->
couch_jobs_fdb:get_activity_vs(JTx1, Type)
end).
-
% "Active since" is the set of jobs that have been active (running)
% and updated at least once since the given versionstamp. These are relatively
% cheap to find as it's just a range read in the ?ACTIVITY subspace.
%
get_active_since(#st{} = St, not_found) ->
{#{}, St};
-
get_active_since(#st{} = St, VS) ->
#st{jtx = JTx, type = Type, subs = Subs, batch_size = BatchSize} = St,
{Updated, NewBatchSize} = get_active_iter(JTx, Type, VS, BatchSize, #{}),
- UpdatedSubs = maps:map(fun(_JobId, Data) ->
- {VS, running, Data}
- end, maps:with(maps:keys(Subs), Updated)),
+ UpdatedSubs = maps:map(
+ fun(_JobId, Data) ->
+ {VS, running, Data}
+ end,
+ maps:with(maps:keys(Subs), Updated)
+ ),
{UpdatedSubs, St#st{batch_size = NewBatchSize}}.
-
get_active_iter(JTx, Type, VS, BatchSize, #{} = Acc) ->
Opts = [{limit, BatchSize}],
- Result = try
- couch_jobs_fdb:tx(JTx, fun(JTx1) ->
- couch_jobs_fdb:get_active_since(JTx1, Type, VS, Opts)
- end)
- catch
- error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
- failed
- end,
+ Result =
+ try
+ couch_jobs_fdb:tx(JTx, fun(JTx1) ->
+ couch_jobs_fdb:get_active_since(JTx1, Type, VS, Opts)
+ end)
+ catch
+ error:{Tag, Err} when ?COUCH_JOBS_RETRYABLE(Tag, Err) ->
+ failed
+ end,
case Result of
{Updated, _FinalSeq} when map_size(Updated) < BatchSize ->
{maps:merge(Acc, Updated), BatchSize};
@@ -293,7 +276,6 @@ get_active_iter(JTx, Type, VS, BatchSize, #{} = Acc) ->
get_active_iter(JTx, Type, VS, NewBatchSize, Acc)
end.
-
try_notify_subscribers(ActiveVS, #st{} = St) ->
try
notify_subscribers(ActiveVS, St)
@@ -302,10 +284,8 @@ try_notify_subscribers(ActiveVS, #st{} = St) ->
try_notify_subscribers(ActiveVS, St)
end.
-
notify_subscribers(_, #st{subs = Subs} = St) when map_size(Subs) =:= 0 ->
St;
-
notify_subscribers(ActiveVS, #st{} = St1) ->
% First gather the easy (cheap) active jobs. Then with those out of way
% inspect each job to get its state.
@@ -314,58 +294,73 @@ notify_subscribers(ActiveVS, #st{} = St1) ->
ActiveIds = maps:keys(Active),
Subs = St3#st.subs,
InactiveIds = maps:keys(maps:without(ActiveIds, Subs)),
- {Inactive, St4} = get_jobs(St3, InactiveIds),
+ {Inactive, St4} = get_jobs(St3, InactiveIds),
notify_job_ids(Inactive, St4).
-
notify_job_ids(#{} = Jobs, #st{type = Type} = St0) ->
- maps:fold(fun(Id, {VS, State, Data}, #st{} = StAcc) ->
- DoUnsub = lists:member(State, [finished, not_found]),
- maps:fold(fun
- (_Ref, {_Pid, running, OldVS}, St) when State =:= running,
- OldVS >= VS ->
- St;
- (Ref, {Pid, running, OldVS}, St) when State =:= running,
- OldVS < VS ->
- % For running state send updates even if state doesn't change
- notify(Pid, Ref, Type, Id, State, Data),
- update_sub(Id, Ref, Pid, running, VS, St);
- (_Ref, {_Pid, OldState, _VS}, St) when OldState =:= State ->
- St;
- (Ref, {Pid, _State, _VS}, St) ->
- notify(Pid, Ref, Type, Id, State, Data),
- case DoUnsub of
- true -> unsubscribe_int(Id, Ref, Pid, St);
- false -> update_sub(Id, Ref, Pid, State, VS, St)
- end
- end, StAcc, maps:get(Id, StAcc#st.subs, #{}))
- end, St0, Jobs).
-
+ maps:fold(
+ fun(Id, {VS, State, Data}, #st{} = StAcc) ->
+ DoUnsub = lists:member(State, [finished, not_found]),
+ maps:fold(
+ fun
+ (_Ref, {_Pid, running, OldVS}, St) when
+ State =:= running,
+ OldVS >= VS
+ ->
+ St;
+ (Ref, {Pid, running, OldVS}, St) when
+ State =:= running,
+ OldVS < VS
+ ->
+ % For running state send updates even if state doesn't change
+ notify(Pid, Ref, Type, Id, State, Data),
+ update_sub(Id, Ref, Pid, running, VS, St);
+ (_Ref, {_Pid, OldState, _VS}, St) when OldState =:= State ->
+ St;
+ (Ref, {Pid, _State, _VS}, St) ->
+ notify(Pid, Ref, Type, Id, State, Data),
+ case DoUnsub of
+ true -> unsubscribe_int(Id, Ref, Pid, St);
+ false -> update_sub(Id, Ref, Pid, State, VS, St)
+ end
+ end,
+ StAcc,
+ maps:get(Id, StAcc#st.subs, #{})
+ )
+ end,
+ St0,
+ Jobs
+ ).
notify(Pid, Ref, Type, Id, State, Data) ->
Pid ! {?COUCH_JOBS_EVENT, Ref, Type, Id, State, Data}.
-
get_holdoff() ->
- couch_jobs_util:get_non_neg_int(type_monitor_holdoff_msec,
- ?TYPE_MONITOR_HOLDOFF_DEFAULT).
-
+ couch_jobs_util:get_non_neg_int(
+ type_monitor_holdoff_msec,
+ ?TYPE_MONITOR_HOLDOFF_DEFAULT
+ ).
get_timeout() ->
- couch_jobs_util:get_timeout(type_monitor_timeout_msec,
- ?TYPE_MONITOR_TIMEOUT_DEFAULT).
-
+ couch_jobs_util:get_timeout(
+ type_monitor_timeout_msec,
+ ?TYPE_MONITOR_TIMEOUT_DEFAULT
+ ).
init_batch_size() ->
- couch_jobs_util:get_non_neg_int(notifier_init_batch_size,
- ?INIT_BATCH_SIZE).
-
+ couch_jobs_util:get_non_neg_int(
+ notifier_init_batch_size,
+ ?INIT_BATCH_SIZE
+ ).
batch_increment() ->
- couch_jobs_util:get_non_neg_int(notifier_batch_increment,
- ?BATCH_INCREMENT).
-
+ couch_jobs_util:get_non_neg_int(
+ notifier_batch_increment,
+ ?BATCH_INCREMENT
+ ).
batch_factor() ->
- couch_jobs_util:get_float_0_1(notifier_batch_factor,
- ?BATCH_FACTOR).
+ couch_jobs_util:get_float_0_1(
+ notifier_batch_factor,
+ ?BATCH_FACTOR
+ ).
diff --git a/src/couch_jobs/src/couch_jobs_notifier_sup.erl b/src/couch_jobs/src/couch_jobs_notifier_sup.erl
index 81d93493b..29e81b5ca 100644
--- a/src/couch_jobs/src/couch_jobs_notifier_sup.erl
+++ b/src/couch_jobs/src/couch_jobs_notifier_sup.erl
@@ -13,10 +13,8 @@
-module(couch_jobs_notifier_sup).
-
-behaviour(supervisor).
-
-export([
start_link/0,
@@ -29,24 +27,22 @@
init/1
]).
-
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
start_notifier(Type) ->
supervisor:start_child(?MODULE, [Type]).
-
stop_notifier(Pid) ->
supervisor:terminate_child(?MODULE, Pid).
-
get_child_pids() ->
- lists:map(fun({_Id, Pid, _Type, _Mod}) ->
- Pid
- end, supervisor:which_children(?MODULE)).
-
+ lists:map(
+ fun({_Id, Pid, _Type, _Mod}) ->
+ Pid
+ end,
+ supervisor:which_children(?MODULE)
+ ).
init(_) ->
Flags = #{
diff --git a/src/couch_jobs/src/couch_jobs_pending.erl b/src/couch_jobs/src/couch_jobs_pending.erl
index a85f2fc5c..41e670efa 100644
--- a/src/couch_jobs/src/couch_jobs_pending.erl
+++ b/src/couch_jobs/src/couch_jobs_pending.erl
@@ -12,7 +12,6 @@
-module(couch_jobs_pending).
-
-export([
enqueue/4,
dequeue/4,
@@ -20,13 +19,10 @@
pending_count/4
]).
-
-include("couch_jobs.hrl").
-
-define(RANGE_LIMIT, 1024).
-
enqueue(#{jtx := true} = JTx, Type, STime, JobId) ->
#{tx := Tx, jobs_path := Jobs} = JTx,
Key = erlfdb_tuple:pack({?PENDING, Type, STime, JobId}, Jobs),
@@ -35,7 +31,6 @@ enqueue(#{jtx := true} = JTx, Type, STime, JobId) ->
erlfdb:add(Tx, WatchKey, 1),
ok.
-
dequeue(#{jtx := true} = JTx, Type, _, true) ->
#{tx := Tx, jobs_path := Jobs} = JTx,
Prefix = erlfdb_tuple:pack({?PENDING, Type, 0}, Jobs),
@@ -47,7 +42,6 @@ dequeue(#{jtx := true} = JTx, Type, _, true) ->
{JobId} = erlfdb_tuple:unpack(PendingKey, Prefix),
{ok, JobId}
end;
-
dequeue(#{jtx := true} = JTx, Type, MaxSTime, _) ->
#{tx := Tx, jobs_path := Jobs} = JTx,
{StartKeySel, EndKeySel} = get_range_selectors(JTx, Type, MaxSTime),
@@ -60,13 +54,11 @@ dequeue(#{jtx := true} = JTx, Type, MaxSTime, _) ->
{ok, JobId}
end.
-
remove(#{jtx := true} = JTx, Type, JobId, STime) ->
#{tx := Tx, jobs_path := Jobs} = JTx,
Key = erlfdb_tuple:pack({?PENDING, Type, STime, JobId}, Jobs),
erlfdb:clear(Tx, Key).
-
pending_count(#{jtx := true} = JTx, Type, MaxSTime, Limit) ->
#{tx := Tx} = JTx,
Opts = [
@@ -78,7 +70,6 @@ pending_count(#{jtx := true} = JTx, Type, MaxSTime, Limit) ->
FoldFun = fun(_Row, Cnt) -> Cnt + 1 end,
erlfdb:fold_range(Tx, StartSel, EndSel, FoldFun, 0, Opts).
-
%% Private functions
% Get pending key selectors, taking into account max scheduled time value.
@@ -90,7 +81,6 @@ get_range_selectors(#{jtx := true} = JTx, Type, MaxSTime) ->
EndKeySel = erlfdb_key:first_greater_or_equal(End),
{StartKeySel, EndKeySel}.
-
% Pick a random item from the range without reading the keys in first. However
% the constraint it that IDs should looks like random UUIDs
get_random_item(Tx, Prefix) ->
@@ -113,16 +103,14 @@ get_random_item(Tx, Prefix) ->
end
end.
-
get_before(Snapshot, Prefix, Id) ->
KSel = erlfdb_key:last_less_or_equal(erlfdb_tuple:pack({Id}, Prefix)),
PrefixSize = byte_size(Prefix),
case erlfdb:wait(erlfdb:get_key(Snapshot, KSel)) of
- <<Prefix:PrefixSize/binary, _/binary>> = Key -> {ok, Key};
+ <<Prefix:PrefixSize/binary, _/binary>> = Key -> {ok, Key};
_ -> {error, not_found}
end.
-
get_after(Snapshot, Prefix, Id) ->
KSel = erlfdb_key:first_greater_or_equal(erlfdb_tuple:pack({Id}, Prefix)),
PrefixSize = byte_size(Prefix),
@@ -131,7 +119,6 @@ get_after(Snapshot, Prefix, Id) ->
_ -> {error, not_found}
end.
-
% Pick a random key from the range snapshot. Then radomly pick a key to clear.
% Before clearing, ensure there is a read conflict on the key in in case other
% workers have picked the same key.
@@ -156,7 +143,6 @@ clear_random_key_from_range(Tx, Start, End) ->
{ok, Key}
end.
-
get_pending_watch(#{jtx := true} = JTx, Type) ->
#{tx := Tx, jobs_path := Jobs} = couch_jobs_fdb:get_jtx(JTx),
Key = erlfdb_tuple:pack({?WATCHES_PENDING, Type}, Jobs),
diff --git a/src/couch_jobs/src/couch_jobs_server.erl b/src/couch_jobs/src/couch_jobs_server.erl
index afa1fba7a..9447d5372 100644
--- a/src/couch_jobs/src/couch_jobs_server.erl
+++ b/src/couch_jobs/src/couch_jobs_server.erl
@@ -18,7 +18,6 @@
-include("couch_jobs.hrl").
-
-export([
start_link/0,
get_notifier_server/1,
@@ -34,15 +33,12 @@
code_change/3
]).
-
-define(TYPE_CHECK_PERIOD_DEFAULT, "15000").
-define(MAX_JITTER_DEFAULT, "5000").
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, nil, []).
-
get_notifier_server(Type) ->
case get_type_pid_refs(Type) of
{{_, _}, {NotifierPid, _}} ->
@@ -57,11 +53,9 @@ get_notifier_server(Type) ->
end
end.
-
force_check_types() ->
gen_server:call(?MODULE, check_types, infinity).
-
init(_) ->
% If couch_jobs_server is after the notifiers and activity supervisor. If
% it restart, there could be some stale notifier or activity monitors. Kill
@@ -73,28 +67,22 @@ init(_) ->
schedule_check(),
{ok, nil}.
-
terminate(_, _St) ->
ok.
-
handle_call(check_types, _From, St) ->
check_types(),
{reply, ok, St};
-
handle_call(Msg, _From, St) ->
{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
-
handle_cast(Msg, St) ->
{stop, {bad_cast, Msg}, St}.
-
handle_info(check_types, St) ->
check_types(),
schedule_check(),
{noreply, St};
-
handle_info({'DOWN', _Ref, process, Pid, Reason}, St) ->
?LOG_ERROR(#{
what => monitored_process_crash,
@@ -104,7 +92,6 @@ handle_info({'DOWN', _Ref, process, Pid, Reason}, St) ->
LogMsg = "~p : process ~p exited with ~p",
couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
{stop, {unexpected_process_exit, Pid, Reason}, St};
-
handle_info({Ref, ready}, St) when is_reference(Ref) ->
% Don't crash out couch_jobs_server and the whole application would need to
% eventually do proper cleanup in erlfdb:wait timeout code.
@@ -115,15 +102,12 @@ handle_info({Ref, ready}, St) when is_reference(Ref) ->
LogMsg = "~p : spurious erlfdb future ready message ~p",
couch_log:error(LogMsg, [?MODULE, Ref]),
{noreply, St};
-
handle_info(Msg, St) ->
{stop, {bad_info, Msg}, St}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
check_types() ->
FdbTypes = fdb_types(),
EtsTypes = ets_types(),
@@ -132,19 +116,19 @@ check_types() ->
lists:foreach(fun(Type) -> start_monitors(Type) end, ToStart),
lists:foreach(fun(Type) -> stop_monitors(Type) end, ToStop).
-
start_monitors(Type) ->
- MonPidRef = case couch_jobs_activity_monitor_sup:start_monitor(Type) of
- {ok, Pid1} -> {Pid1, monitor(process, Pid1)};
- {error, Error1} -> error({failed_to_start_monitor, Type, Error1})
- end,
- NotifierPidRef = case couch_jobs_notifier_sup:start_notifier(Type) of
- {ok, Pid2} -> {Pid2, monitor(process, Pid2)};
- {error, Error2} -> error({failed_to_start_notifier, Type, Error2})
- end,
+ MonPidRef =
+ case couch_jobs_activity_monitor_sup:start_monitor(Type) of
+ {ok, Pid1} -> {Pid1, monitor(process, Pid1)};
+ {error, Error1} -> error({failed_to_start_monitor, Type, Error1})
+ end,
+ NotifierPidRef =
+ case couch_jobs_notifier_sup:start_notifier(Type) of
+ {ok, Pid2} -> {Pid2, monitor(process, Pid2)};
+ {error, Error2} -> error({failed_to_start_notifier, Type, Error2})
+ end,
ets:insert_new(?MODULE, {Type, MonPidRef, NotifierPidRef}).
-
stop_monitors(Type) ->
{{MonPid, MonRef}, {NotifierPid, NotifierRef}} = get_type_pid_refs(Type),
ok = couch_jobs_activity_monitor_sup:stop_monitor(MonPid),
@@ -153,18 +137,21 @@ stop_monitors(Type) ->
demonitor(NotifierRef, [flush]),
ets:delete(?MODULE, Type).
-
reset_monitors() ->
- lists:foreach(fun(Pid) ->
- couch_jobs_activity_monitor_sup:stop_monitor(Pid)
- end, couch_jobs_activity_monitor_sup:get_child_pids()).
-
+ lists:foreach(
+ fun(Pid) ->
+ couch_jobs_activity_monitor_sup:stop_monitor(Pid)
+ end,
+ couch_jobs_activity_monitor_sup:get_child_pids()
+ ).
reset_notifiers() ->
- lists:foreach(fun(Pid) ->
- couch_jobs_notifier_sup:stop_notifier(Pid)
- end, couch_jobs_notifier_sup:get_child_pids()).
-
+ lists:foreach(
+ fun(Pid) ->
+ couch_jobs_notifier_sup:stop_notifier(Pid)
+ end,
+ couch_jobs_notifier_sup:get_child_pids()
+ ).
get_type_pid_refs(Type) ->
case ets:lookup(?MODULE, Type) of
@@ -172,11 +159,9 @@ get_type_pid_refs(Type) ->
[] -> not_found
end.
-
ets_types() ->
lists:flatten(ets:match(?MODULE, {'$1', '_', '_'})).
-
fdb_types() ->
try
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
@@ -194,19 +179,20 @@ fdb_types() ->
[]
end.
-
schedule_check() ->
Timeout = get_period_msec(),
MaxJitter = max(Timeout div 2, get_max_jitter_msec()),
Wait = Timeout + rand:uniform(max(1, MaxJitter)),
erlang:send_after(Wait, self(), check_types).
-
get_period_msec() ->
- couch_jobs_util:get_non_neg_int(type_check_period_msec,
- ?TYPE_CHECK_PERIOD_DEFAULT).
-
+ couch_jobs_util:get_non_neg_int(
+ type_check_period_msec,
+ ?TYPE_CHECK_PERIOD_DEFAULT
+ ).
get_max_jitter_msec() ->
- couch_jobs_util:get_non_neg_int(type_check_max_jitter_msec,
- ?MAX_JITTER_DEFAULT).
+ couch_jobs_util:get_non_neg_int(
+ type_check_max_jitter_msec,
+ ?MAX_JITTER_DEFAULT
+ ).
diff --git a/src/couch_jobs/src/couch_jobs_sup.erl b/src/couch_jobs/src/couch_jobs_sup.erl
index d79023777..21f545e4e 100644
--- a/src/couch_jobs/src/couch_jobs_sup.erl
+++ b/src/couch_jobs/src/couch_jobs_sup.erl
@@ -13,10 +13,8 @@
-module(couch_jobs_sup).
-
-behaviour(supervisor).
-
-export([
start_link/0
]).
@@ -25,11 +23,9 @@
init/1
]).
-
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
init([]) ->
Flags = #{
strategy => rest_for_one,
diff --git a/src/couch_jobs/src/couch_jobs_type_monitor.erl b/src/couch_jobs/src/couch_jobs_type_monitor.erl
index 95aee4e7a..d03326c34 100644
--- a/src/couch_jobs/src/couch_jobs_type_monitor.erl
+++ b/src/couch_jobs/src/couch_jobs_type_monitor.erl
@@ -12,15 +12,12 @@
-module(couch_jobs_type_monitor).
-
-export([
start/4
]).
-
-include("couch_jobs.hrl").
-
-record(st, {
jtx,
type,
@@ -31,7 +28,6 @@
timeout
}).
-
start(Type, VS, HoldOff, Timeout) ->
Parent = self(),
spawn_link(fun() ->
@@ -46,12 +42,12 @@ start(Type, VS, HoldOff, Timeout) ->
})
end).
-
loop(#st{vs = VS, timeout = Timeout} = St) ->
- {St1, Watch} = case get_vs_and_watch(St) of
- {VS1, W} when VS1 =/= VS -> {notify(St#st{vs = VS1}), W};
- {VS, W} -> {St, W}
- end,
+ {St1, Watch} =
+ case get_vs_and_watch(St) of
+ {VS1, W} when VS1 =/= VS -> {notify(St#st{vs = VS1}), W};
+ {VS, W} -> {St, W}
+ end,
try
erlfdb:wait(Watch, [{timeout, Timeout}])
catch
@@ -61,7 +57,6 @@ loop(#st{vs = VS, timeout = Timeout} = St) ->
end,
loop(St1).
-
notify(#st{} = St) ->
#st{holdoff = HoldOff, parent = Pid, timestamp = Ts, vs = VS} = St,
Now = erlang:system_time(millisecond),
@@ -74,7 +69,6 @@ notify(#st{} = St) ->
Pid ! {type_updated, VS},
St#st{timestamp = Now}.
-
get_vs_and_watch(#st{} = St) ->
#st{jtx = JTx, type = Type, holdoff = HoldOff} = St,
try
diff --git a/src/couch_jobs/src/couch_jobs_util.erl b/src/couch_jobs/src/couch_jobs_util.erl
index 747ab60d8..32d238ebd 100644
--- a/src/couch_jobs/src/couch_jobs_util.erl
+++ b/src/couch_jobs/src/couch_jobs_util.erl
@@ -12,47 +12,45 @@
-module(couch_jobs_util).
-
-export([
get_non_neg_int/2,
get_float_0_1/2,
get_timeout/2
]).
-
get_non_neg_int(Key, Default) when is_atom(Key), is_list(Default) ->
StrVal = config:get("couch_jobs", atom_to_list(Key), Default),
non_neg_int(Key, StrVal).
-
get_float_0_1(Key, Default) when is_atom(Key), is_list(Default) ->
StrVal = config:get("couch_jobs", atom_to_list(Key), Default),
float_0_1(Key, StrVal).
-
get_timeout(Key, Default) when is_atom(Key), is_list(Default) ->
case config:get("couch_jobs", atom_to_list(Key), Default) of
"infinity" -> infinity;
StrVal -> non_neg_int(Key, StrVal)
end.
-
non_neg_int(Name, Str) ->
try
Val = list_to_integer(Str),
true = Val > 0,
Val
- catch _:_ ->
- erlang:error({invalid_non_neg_integer, {couch_jobs, Name, Str}})
+ catch
+ _:_ ->
+ erlang:error({invalid_non_neg_integer, {couch_jobs, Name, Str}})
end.
-
float_0_1(Name, Str) ->
- Val = try
- list_to_float(Str)
- catch error:badarg ->
- erlang:error({invalid_float, {couch_jobs, Name, Str}})
- end,
- if Val >= 0.0 andalso Val =< 1.0 -> Val; true ->
- erlang:error({float_out_of_range, {couch_jobs, Name, Str}})
+ Val =
+ try
+ list_to_float(Str)
+ catch
+ error:badarg ->
+ erlang:error({invalid_float, {couch_jobs, Name, Str}})
+ end,
+ if
+ Val >= 0.0 andalso Val =< 1.0 -> Val;
+ true -> erlang:error({float_out_of_range, {couch_jobs, Name, Str}})
end.
diff --git a/src/couch_js/src/couch_js.erl b/src/couch_js/src/couch_js.erl
index a9c974ef8..3a80a99c3 100644
--- a/src/couch_js/src/couch_js.erl
+++ b/src/couch_js/src/couch_js.erl
@@ -10,11 +10,9 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(couch_js).
-behavior(couch_eval).
-
-export([
acquire_map_context/1,
release_map_context/1,
@@ -24,13 +22,10 @@
try_compile/4
]).
-
-include_lib("couch/include/couch_db.hrl").
-
-define(JS, <<"javascript">>).
-
acquire_map_context(Opts) ->
#{
map_funs := MapFuns,
@@ -38,29 +33,32 @@ acquire_map_context(Opts) ->
} = Opts,
couch_js_query_servers:start_doc_map(?JS, MapFuns, Lib).
-
release_map_context(Proc) ->
couch_js_query_servers:stop_doc_map(Proc).
-
map_docs(Proc, Docs) ->
- {ok, lists:map(fun(Doc) ->
- {ok, RawResults} = couch_js_query_servers:map_doc_raw(Proc, Doc),
- Results = couch_js_query_servers:raw_to_ejson(RawResults),
- Tupled = lists:map(fun(ViewResult) ->
- lists:map(fun([K, V]) -> {K, V} end, ViewResult)
- end, Results),
- {Doc#doc.id, Tupled}
- end, Docs)}.
+ {ok,
+ lists:map(
+ fun(Doc) ->
+ {ok, RawResults} = couch_js_query_servers:map_doc_raw(Proc, Doc),
+ Results = couch_js_query_servers:raw_to_ejson(RawResults),
+ Tupled = lists:map(
+ fun(ViewResult) ->
+ lists:map(fun([K, V]) -> {K, V} end, ViewResult)
+ end,
+ Results
+ ),
+ {Doc#doc.id, Tupled}
+ end,
+ Docs
+ )}.
acquire_context() ->
Ctx = couch_query_servers:get_os_process(?JS),
{ok, Ctx}.
-
release_context(Proc) ->
couch_query_servers:ret_os_process(Proc).
-
try_compile(Proc, FunctionType, FunName, FunSrc) ->
couch_query_servers:try_compile(Proc, FunctionType, FunName, FunSrc).
diff --git a/src/couch_js/src/couch_js_app.erl b/src/couch_js/src/couch_js_app.erl
index b28f5852e..7ead4e8c6 100644
--- a/src/couch_js/src/couch_js_app.erl
+++ b/src/couch_js/src/couch_js_app.erl
@@ -10,22 +10,17 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(couch_js_app).
-
-behaviour(application).
-
-export([
start/2,
stop/1
]).
-
start(_StartType, _StartArgs) ->
couch_js_sup:start_link().
-
stop(_State) ->
- ok. \ No newline at end of file
+ ok.
diff --git a/src/couch_js/src/couch_js_io_logger.erl b/src/couch_js/src/couch_js_io_logger.erl
index 5a1695c01..493da32a1 100644
--- a/src/couch_js/src/couch_js_io_logger.erl
+++ b/src/couch_js/src/couch_js_io_logger.erl
@@ -20,7 +20,6 @@
stop_error/1
]).
-
start(undefined) ->
ok;
start(Dir) ->
@@ -42,7 +41,6 @@ start(Dir) ->
ok
end.
-
stop_noerror() ->
case get(logger_path) of
undefined ->
@@ -51,7 +49,6 @@ stop_noerror() ->
close_logs()
end.
-
stop_error(Err) ->
case get(logger_path) of
undefined ->
@@ -61,21 +58,17 @@ stop_error(Err) ->
close_logs()
end.
-
log_output(Data) ->
log(get(logger_out_fd), Data).
-
log_input(Data) ->
log(get(logger_in_fd), Data).
-
unix_time() ->
{Mega, Sec, USec} = os:timestamp(),
UnixTs = (Mega * 1000000 + Sec) * 1000000 + USec,
integer_to_list(UnixTs).
-
log_name() ->
Ts = unix_time(),
Pid0 = erlang:pid_to_list(self()),
@@ -83,12 +76,10 @@ log_name() ->
Pid2 = string:strip(Pid1, right, $>),
lists:flatten(io_lib:format("~s_~s", [Ts, Pid2])).
-
close_logs() ->
file:close(get(logger_out_fd)),
file:close(get(logger_in_fd)).
-
save_error_logs(Path, Err) ->
Otp = erlang:system_info(otp_release),
Msg = io_lib:format("Error: ~p~nNode: ~p~nOTP: ~p~n", [Err, node(), Otp]),
@@ -97,10 +88,9 @@ save_error_logs(Path, Err) ->
OFd = get(logger_in_fd),
file:position(IFd, 0),
file:position(OFd, 0),
- file:copy(IFd, Path ++ ".out.log"),
+ file:copy(IFd, Path ++ ".out.log"),
file:copy(OFd, Path ++ ".in.log").
-
log(undefined, _Data) ->
ok;
log(Fd, Data) ->
diff --git a/src/couch_js/src/couch_js_native_process.erl b/src/couch_js/src/couch_js_native_process.erl
index 8add3d5f2..4cbd9f053 100644
--- a/src/couch_js/src/couch_js_native_process.erl
+++ b/src/couch_js/src/couch_js_native_process.erl
@@ -41,8 +41,16 @@
-behaviour(gen_server).
-vsn(1).
--export([start_link/0,init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,
- handle_info/2,format_status/2]).
+-export([
+ start_link/0,
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ code_change/3,
+ handle_info/2,
+ format_status/2
+]).
-export([set_timeout/2, prompt/2]).
-define(STATE, native_proc_state).
@@ -75,20 +83,20 @@ prompt(Pid, Data) when is_list(Data) ->
init([]) ->
V = config:get("query_server_config", "os_process_idle_limit", "300"),
Idle = list_to_integer(V) * 1000,
- {ok, #evstate{ddocs=dict:new(), idle=Idle}, Idle}.
+ {ok, #evstate{ddocs = dict:new(), idle = Idle}, Idle}.
handle_call({set_timeout, TimeOut}, _From, State) ->
- {reply, ok, State#evstate{timeout=TimeOut}, State#evstate.idle};
-
+ {reply, ok, State#evstate{timeout = TimeOut}, State#evstate.idle};
handle_call({prompt, Data}, _From, State) ->
?LOG_DEBUG(#{
what => prompt,
in => native_process,
msg => ?JSON_ENCODE(Data)
}),
- couch_log:debug("Prompt native qs: ~s",[?JSON_ENCODE(Data)]),
- {NewState, Resp} = try run(State, to_binary(Data)) of
- {S, R} -> {S, R}
+ couch_log:debug("Prompt native qs: ~s", [?JSON_ENCODE(Data)]),
+ {NewState, Resp} =
+ try run(State, to_binary(Data)) of
+ {S, R} -> {S, R}
catch
throw:{error, Why} ->
{State, [<<"error">>, Why, Why]}
@@ -124,9 +132,9 @@ handle_info(timeout, State) ->
gen_server:cast(couch_js_proc_manager, {os_proc_idle, self()}),
erlang:garbage_collect(),
{noreply, State, State#evstate.idle};
-handle_info({'EXIT',_,normal}, State) ->
+handle_info({'EXIT', _, normal}, State) ->
{noreply, State, State#evstate.idle};
-handle_info({'EXIT',_,Reason}, State) ->
+handle_info({'EXIT', _, Reason}, State) ->
{stop, Reason, State}.
terminate(_Reason, _State) -> ok.
code_change(_OldVersion, State, _Extra) -> {ok, State}.
@@ -142,12 +150,9 @@ format_status(_Opt, [_PDict, State]) ->
funs = {length, length(Funs)},
query_config = {length, length(Config)}
},
- [{data, [{"State",
- ?record_to_keyval(evstate, Scrubbed)
- }]}].
-
+ [{data, [{"State", ?record_to_keyval(evstate, Scrubbed)}]}].
-run(#evstate{list_pid=Pid}=State, [<<"list_row">>, Row]) when is_pid(Pid) ->
+run(#evstate{list_pid = Pid} = State, [<<"list_row">>, Row]) when is_pid(Pid) ->
Pid ! {self(), list_row, Row},
receive
{Pid, chunks, Data} ->
@@ -159,61 +164,68 @@ run(#evstate{list_pid=Pid}=State, [<<"list_row">>, Row]) when is_pid(Pid) ->
throw({timeout, list_cleanup})
end,
process_flag(trap_exit, erlang:get(do_trap)),
- {State#evstate{list_pid=nil}, [<<"end">>, Data]}
+ {State#evstate{list_pid = nil}, [<<"end">>, Data]}
after State#evstate.timeout ->
throw({timeout, list_row})
end;
-run(#evstate{list_pid=Pid}=State, [<<"list_end">>]) when is_pid(Pid) ->
+run(#evstate{list_pid = Pid} = State, [<<"list_end">>]) when is_pid(Pid) ->
Pid ! {self(), list_end},
Resp =
- receive
- {Pid, list_end, Data} ->
- receive
- {'EXIT', Pid, normal} -> ok
- after State#evstate.timeout ->
- throw({timeout, list_cleanup})
- end,
- [<<"end">>, Data]
- after State#evstate.timeout ->
- throw({timeout, list_end})
- end,
+ receive
+ {Pid, list_end, Data} ->
+ receive
+ {'EXIT', Pid, normal} -> ok
+ after State#evstate.timeout ->
+ throw({timeout, list_cleanup})
+ end,
+ [<<"end">>, Data]
+ after State#evstate.timeout ->
+ throw({timeout, list_end})
+ end,
process_flag(trap_exit, erlang:get(do_trap)),
- {State#evstate{list_pid=nil}, Resp};
-run(#evstate{list_pid=Pid}=State, _Command) when is_pid(Pid) ->
+ {State#evstate{list_pid = nil}, Resp};
+run(#evstate{list_pid = Pid} = State, _Command) when is_pid(Pid) ->
{State, [<<"error">>, list_error, list_error]};
-run(#evstate{ddocs=DDocs}, [<<"reset">>]) ->
- {#evstate{ddocs=DDocs}, true};
-run(#evstate{ddocs=DDocs, idle=Idle}, [<<"reset">>, QueryConfig]) ->
+run(#evstate{ddocs = DDocs}, [<<"reset">>]) ->
+ {#evstate{ddocs = DDocs}, true};
+run(#evstate{ddocs = DDocs, idle = Idle}, [<<"reset">>, QueryConfig]) ->
NewState = #evstate{
ddocs = DDocs,
query_config = QueryConfig,
idle = Idle
},
{NewState, true};
-run(#evstate{funs=Funs}=State, [<<"add_fun">> , BinFunc]) ->
+run(#evstate{funs = Funs} = State, [<<"add_fun">>, BinFunc]) ->
FunInfo = makefun(State, BinFunc),
- {State#evstate{funs=Funs ++ [FunInfo]}, true};
-run(State, [<<"map_doc">> , Doc]) ->
- Resp = lists:map(fun({Sig, Fun}) ->
- erlang:put(Sig, []),
- Fun(Doc),
- lists:reverse(erlang:get(Sig))
- end, State#evstate.funs),
+ {State#evstate{funs = Funs ++ [FunInfo]}, true};
+run(State, [<<"map_doc">>, Doc]) ->
+ Resp = lists:map(
+ fun({Sig, Fun}) ->
+ erlang:put(Sig, []),
+ Fun(Doc),
+ lists:reverse(erlang:get(Sig))
+ end,
+ State#evstate.funs
+ ),
{State, Resp};
run(State, [<<"reduce">>, Funs, KVs]) ->
{Keys, Vals} =
- lists:foldl(fun([K, V], {KAcc, VAcc}) ->
- {[K | KAcc], [V | VAcc]}
- end, {[], []}, KVs),
+ lists:foldl(
+ fun([K, V], {KAcc, VAcc}) ->
+ {[K | KAcc], [V | VAcc]}
+ end,
+ {[], []},
+ KVs
+ ),
Keys2 = lists:reverse(Keys),
Vals2 = lists:reverse(Vals),
{State, catch reduce(State, Funs, Keys2, Vals2, false)};
run(State, [<<"rereduce">>, Funs, Vals]) ->
{State, catch reduce(State, Funs, null, Vals, true)};
-run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, <<"new">>, DDocId, DDoc]) ->
+run(#evstate{ddocs = DDocs} = State, [<<"ddoc">>, <<"new">>, DDocId, DDoc]) ->
DDocs2 = store_ddoc(DDocs, DDocId, DDoc),
- {State#evstate{ddocs=DDocs2}, true};
-run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, DDocId | Rest]) ->
+ {State#evstate{ddocs = DDocs2}, true};
+run(#evstate{ddocs = DDocs} = State, [<<"ddoc">>, DDocId | Rest]) ->
DDoc = load_ddoc(DDocs, DDocId),
ddoc(State, DDoc, Rest);
run(_, Unknown) ->
@@ -227,73 +239,85 @@ run(_, Unknown) ->
ddoc(State, {DDoc}, [FunPath, Args]) ->
% load fun from the FunPath
- BFun = lists:foldl(fun
- (Key, {Props}) when is_list(Props) ->
- couch_util:get_value(Key, Props, nil);
- (_Key, Fun) when is_binary(Fun) ->
- Fun;
- (_Key, nil) ->
- throw({error, not_found});
- (_Key, _Fun) ->
- throw({error, malformed_ddoc})
- end, {DDoc}, FunPath),
+ BFun = lists:foldl(
+ fun
+ (Key, {Props}) when is_list(Props) ->
+ couch_util:get_value(Key, Props, nil);
+ (_Key, Fun) when is_binary(Fun) ->
+ Fun;
+ (_Key, nil) ->
+ throw({error, not_found});
+ (_Key, _Fun) ->
+ throw({error, malformed_ddoc})
+ end,
+ {DDoc},
+ FunPath
+ ),
ddoc(State, makefun(State, BFun, {DDoc}), FunPath, Args).
ddoc(State, {_, Fun}, [<<"validate_doc_update">>], Args) ->
{State, (catch apply(Fun, Args))};
ddoc(State, {_, Fun}, [<<"rewrites">>], Args) ->
{State, (catch apply(Fun, Args))};
-ddoc(State, {_, Fun}, [<<"filters">>|_], [Docs, Req]) ->
+ddoc(State, {_, Fun}, [<<"filters">> | _], [Docs, Req]) ->
FilterFunWrapper = fun(Doc) ->
case catch Fun(Doc, Req) of
- true -> true;
- false -> false;
- {'EXIT', Error} ->
- ?LOG_ERROR(#{
- what => filter_fun_crash,
- in => native_process,
- details => Error
- }),
- couch_log:error("~p", [Error])
+ true ->
+ true;
+ false ->
+ false;
+ {'EXIT', Error} ->
+ ?LOG_ERROR(#{
+ what => filter_fun_crash,
+ in => native_process,
+ details => Error
+ }),
+ couch_log:error("~p", [Error])
end
end,
Resp = lists:map(FilterFunWrapper, Docs),
{State, [true, Resp]};
-ddoc(State, {_, Fun}, [<<"views">>|_], [Docs]) ->
+ddoc(State, {_, Fun}, [<<"views">> | _], [Docs]) ->
MapFunWrapper = fun(Doc) ->
case catch Fun(Doc) of
- undefined -> true;
- ok -> false;
- false -> false;
- [_|_] -> true;
- {'EXIT', Error} ->
- ?LOG_ERROR(#{
- what => view_fun_crash,
- in => native_process,
- details => Error
- }),
- couch_log:error("~p", [Error])
+ undefined ->
+ true;
+ ok ->
+ false;
+ false ->
+ false;
+ [_ | _] ->
+ true;
+ {'EXIT', Error} ->
+ ?LOG_ERROR(#{
+ what => view_fun_crash,
+ in => native_process,
+ details => Error
+ }),
+ couch_log:error("~p", [Error])
end
end,
Resp = lists:map(MapFunWrapper, Docs),
{State, [true, Resp]};
-ddoc(State, {_, Fun}, [<<"shows">>|_], Args) ->
- Resp = case (catch apply(Fun, Args)) of
- FunResp when is_list(FunResp) ->
- FunResp;
- {FunResp} ->
- [<<"resp">>, {FunResp}];
- FunResp ->
- FunResp
- end,
+ddoc(State, {_, Fun}, [<<"shows">> | _], Args) ->
+ Resp =
+ case (catch apply(Fun, Args)) of
+ FunResp when is_list(FunResp) ->
+ FunResp;
+ {FunResp} ->
+ [<<"resp">>, {FunResp}];
+ FunResp ->
+ FunResp
+ end,
{State, Resp};
-ddoc(State, {_, Fun}, [<<"updates">>|_], Args) ->
- Resp = case (catch apply(Fun, Args)) of
- [JsonDoc, JsonResp] ->
- [<<"up">>, JsonDoc, JsonResp]
- end,
+ddoc(State, {_, Fun}, [<<"updates">> | _], Args) ->
+ Resp =
+ case (catch apply(Fun, Args)) of
+ [JsonDoc, JsonResp] ->
+ [<<"up">>, JsonDoc, JsonResp]
+ end,
{State, Resp};
-ddoc(State, {Sig, Fun}, [<<"lists">>|_], Args) ->
+ddoc(State, {Sig, Fun}, [<<"lists">> | _], Args) ->
Self = self(),
SpawnFun = fun() ->
LastChunk = (catch apply(Fun, Args)),
@@ -309,22 +333,22 @@ ddoc(State, {Sig, Fun}, [<<"lists">>|_], Args) ->
ok
end,
LastChunks =
- case erlang:get(Sig) of
- undefined -> [LastChunk];
- OtherChunks -> [LastChunk | OtherChunks]
- end,
+ case erlang:get(Sig) of
+ undefined -> [LastChunk];
+ OtherChunks -> [LastChunk | OtherChunks]
+ end,
Self ! {self(), list_end, lists:reverse(LastChunks)}
end,
erlang:put(do_trap, process_flag(trap_exit, true)),
Pid = spawn_link(SpawnFun),
Resp =
- receive
- {Pid, start, Chunks, JsonResp} ->
- [<<"start">>, Chunks, JsonResp]
- after State#evstate.timeout ->
- throw({timeout, list_start})
- end,
- {State#evstate{list_pid=Pid}, Resp}.
+ receive
+ {Pid, start, Chunks, JsonResp} ->
+ [<<"start">>, Chunks, JsonResp]
+ after State#evstate.timeout ->
+ throw({timeout, list_start})
+ end,
+ {State#evstate{list_pid = Pid}, Resp}.
store_ddoc(DDocs, DDocId, DDoc) ->
dict:store(DDocId, DDoc, DDocs).
@@ -332,7 +356,11 @@ load_ddoc(DDocs, DDocId) ->
try dict:fetch(DDocId, DDocs) of
{DDoc} -> {DDoc}
catch
- _:_Else -> throw({error, ?l2b(io_lib:format("Native Query Server missing DDoc with Id: ~s",[DDocId]))})
+ _:_Else ->
+ throw(
+ {error,
+ ?l2b(io_lib:format("Native Query Server missing DDoc with Id: ~s", [DDocId]))}
+ )
end.
bindings(State, Sig) ->
@@ -361,10 +389,10 @@ bindings(State, Sig, DDoc) ->
Send = fun(Chunk) ->
Curr =
- case erlang:get(Sig) of
- undefined -> [];
- Else -> Else
- end,
+ case erlang:get(Sig) of
+ undefined -> [];
+ Else -> Else
+ end,
erlang:put(Sig, [Chunk | Curr])
end,
@@ -374,10 +402,10 @@ bindings(State, Sig, DDoc) ->
ok;
_ ->
Chunks =
- case erlang:get(Sig) of
- undefined -> [];
- CurrChunks -> CurrChunks
- end,
+ case erlang:get(Sig) of
+ undefined -> [];
+ CurrChunks -> CurrChunks
+ end,
Self ! {self(), chunks, lists:reverse(Chunks)}
end,
erlang:put(Sig, []),
@@ -402,7 +430,8 @@ bindings(State, Sig, DDoc) ->
case DDoc of
{_Props} ->
Bindings ++ [{'DDoc', DDoc}];
- _Else -> Bindings
+ _Else ->
+ Bindings
end.
% thanks to erlview, via:
@@ -418,37 +447,48 @@ makefun(State, Source, {DDoc}) ->
makefun(_State, Source, BindFuns) when is_list(BindFuns) ->
FunStr = binary_to_list(Source),
{ok, Tokens, _} = erl_scan:string(FunStr),
- Form = case (catch erl_parse:parse_exprs(Tokens)) of
- {ok, [ParsedForm]} ->
- ParsedForm;
- {error, {LineNum, _Mod, [Mesg, Params]}}=Error ->
- ?LOG_ERROR(#{
- what => syntax_error,
- in => native_process,
- line => LineNum,
- details => Mesg,
- parameters => Params
- }),
- couch_log:error("Syntax error on line: ~p~n~s~p~n",
- [LineNum, Mesg, Params]),
- throw(Error)
- end,
- Bindings = lists:foldl(fun({Name, Fun}, Acc) ->
- erl_eval:add_binding(Name, Fun, Acc)
- end, erl_eval:new_bindings(), BindFuns),
+ Form =
+ case (catch erl_parse:parse_exprs(Tokens)) of
+ {ok, [ParsedForm]} ->
+ ParsedForm;
+ {error, {LineNum, _Mod, [Mesg, Params]}} = Error ->
+ ?LOG_ERROR(#{
+ what => syntax_error,
+ in => native_process,
+ line => LineNum,
+ details => Mesg,
+ parameters => Params
+ }),
+ couch_log:error(
+ "Syntax error on line: ~p~n~s~p~n",
+ [LineNum, Mesg, Params]
+ ),
+ throw(Error)
+ end,
+ Bindings = lists:foldl(
+ fun({Name, Fun}, Acc) ->
+ erl_eval:add_binding(Name, Fun, Acc)
+ end,
+ erl_eval:new_bindings(),
+ BindFuns
+ ),
{value, Fun, _} = erl_eval:expr(Form, Bindings),
Fun.
reduce(State, BinFuns, Keys, Vals, ReReduce) ->
- Funs = case is_list(BinFuns) of
- true ->
- lists:map(fun(BF) -> makefun(State, BF) end, BinFuns);
- _ ->
- [makefun(State, BinFuns)]
- end,
- Reds = lists:map(fun({_Sig, Fun}) ->
- Fun(Keys, Vals, ReReduce)
- end, Funs),
+ Funs =
+ case is_list(BinFuns) of
+ true ->
+ lists:map(fun(BF) -> makefun(State, BF) end, BinFuns);
+ _ ->
+ [makefun(State, BinFuns)]
+ end,
+ Reds = lists:map(
+ fun({_Sig, Fun}) ->
+ Fun(Keys, Vals, ReReduce)
+ end,
+ Funs
+ ),
[true, Reds].
foldrows(GetRow, ProcRow, Acc) ->
@@ -468,15 +508,15 @@ start_list_resp(Self, Sig) ->
case erlang:get(list_started) of
undefined ->
Headers =
- case erlang:get(list_headers) of
- undefined -> {[{<<"headers">>, {[]}}]};
- CurrHdrs -> CurrHdrs
- end,
+ case erlang:get(list_headers) of
+ undefined -> {[{<<"headers">>, {[]}}]};
+ CurrHdrs -> CurrHdrs
+ end,
Chunks =
- case erlang:get(Sig) of
- undefined -> [];
- CurrChunks -> CurrChunks
- end,
+ case erlang:get(Sig) of
+ undefined -> [];
+ CurrChunks -> CurrChunks
+ end,
Self ! {self(), start, lists:reverse(Chunks), Headers},
erlang:put(list_started, true),
erlang:put(Sig, []),
diff --git a/src/couch_js/src/couch_js_os_process.erl b/src/couch_js/src/couch_js_os_process.erl
index 4ff01e74b..119e60bbc 100644
--- a/src/couch_js/src/couch_js_os_process.erl
+++ b/src/couch_js/src/couch_js_os_process.erl
@@ -24,14 +24,14 @@
-define(PORT_OPTIONS, [stream, {line, 4096}, binary, exit_status, hide]).
--record(os_proc,
- {command,
- port,
- writer,
- reader,
- timeout=5000,
- idle
- }).
+-record(os_proc, {
+ command,
+ port,
+ writer,
+ reader,
+ timeout = 5000,
+ idle
+}).
start_link(Command) ->
start_link(Command, []).
@@ -62,7 +62,7 @@ prompt(Pid, Data) ->
pid => Pid,
details => Error
}),
- couch_log:error("OS Process Error ~p :: ~p",[Pid,Error]),
+ couch_log:error("OS Process Error ~p :: ~p", [Pid, Error]),
throw(Error)
end.
@@ -79,21 +79,21 @@ readline(#os_proc{} = OsProc) ->
Res.
readline(#os_proc{port = Port} = OsProc, Acc) ->
receive
- {Port, {data, {noeol, Data}}} when is_binary(Acc) ->
- readline(OsProc, <<Acc/binary,Data/binary>>);
- {Port, {data, {noeol, Data}}} when is_binary(Data) ->
- readline(OsProc, Data);
- {Port, {data, {noeol, Data}}} ->
- readline(OsProc, [Data|Acc]);
- {Port, {data, {eol, <<Data/binary>>}}} when is_binary(Acc) ->
- [<<Acc/binary,Data/binary>>];
- {Port, {data, {eol, Data}}} when is_binary(Data) ->
- [Data];
- {Port, {data, {eol, Data}}} ->
- lists:reverse(Acc, Data);
- {Port, Err} ->
- catch port_close(Port),
- throw({os_process_error, Err})
+ {Port, {data, {noeol, Data}}} when is_binary(Acc) ->
+ readline(OsProc, <<Acc/binary, Data/binary>>);
+ {Port, {data, {noeol, Data}}} when is_binary(Data) ->
+ readline(OsProc, Data);
+ {Port, {data, {noeol, Data}}} ->
+ readline(OsProc, [Data | Acc]);
+ {Port, {data, {eol, <<Data/binary>>}}} when is_binary(Acc) ->
+ [<<Acc/binary, Data/binary>>];
+ {Port, {data, {eol, Data}}} when is_binary(Data) ->
+ [Data];
+ {Port, {data, {eol, Data}}} ->
+ lists:reverse(Acc, Data);
+ {Port, Err} ->
+ catch port_close(Port),
+ throw({os_process_error, Err})
after OsProc#os_proc.timeout ->
catch port_close(Port),
throw({os_process_error, "OS process timed out."})
@@ -108,8 +108,10 @@ writejson(OsProc, Data) when is_record(OsProc, os_proc) ->
port => OsProc#os_proc.port,
data => JsonData
}),
- couch_log:debug("OS Process ~p Input :: ~s",
- [OsProc#os_proc.port, JsonData]),
+ couch_log:debug(
+ "OS Process ~p Input :: ~s",
+ [OsProc#os_proc.port, JsonData]
+ ),
true = writeline(OsProc, JsonData).
readjson(OsProc) when is_record(OsProc, os_proc) ->
@@ -128,37 +130,41 @@ readjson(OsProc) when is_record(OsProc, os_proc) ->
% command, otherwise return the raw JSON line to the caller.
pick_command(Line)
catch
- throw:abort ->
- {json, Line};
- throw:{cmd, _Cmd} ->
- case ?JSON_DECODE(Line) of
- [<<"log">>, Msg] when is_binary(Msg) ->
- % we got a message to log. Log it and continue
- ?LOG_INFO(#{
- what => user_defined_log,
- in => os_process,
- port => OsProc#os_proc.port,
- msg => Msg
- }),
- couch_log:info("OS Process ~p Log :: ~s",
- [OsProc#os_proc.port, Msg]),
- readjson(OsProc);
- [<<"error">>, Id, Reason] ->
- throw({error, {couch_util:to_existing_atom(Id),Reason}});
- [<<"fatal">>, Id, Reason] ->
- ?LOG_INFO(#{
- what => fatal_error,
- in => os_process,
- port => OsProc#os_proc.port,
- tag => Id,
- details => Reason
- }),
- couch_log:info("OS Process ~p Fatal Error :: ~s ~p",
- [OsProc#os_proc.port, Id, Reason]),
- throw({couch_util:to_existing_atom(Id),Reason});
- _Result ->
- {json, Line}
- end
+ throw:abort ->
+ {json, Line};
+ throw:{cmd, _Cmd} ->
+ case ?JSON_DECODE(Line) of
+ [<<"log">>, Msg] when is_binary(Msg) ->
+ % we got a message to log. Log it and continue
+ ?LOG_INFO(#{
+ what => user_defined_log,
+ in => os_process,
+ port => OsProc#os_proc.port,
+ msg => Msg
+ }),
+ couch_log:info(
+ "OS Process ~p Log :: ~s",
+ [OsProc#os_proc.port, Msg]
+ ),
+ readjson(OsProc);
+ [<<"error">>, Id, Reason] ->
+ throw({error, {couch_util:to_existing_atom(Id), Reason}});
+ [<<"fatal">>, Id, Reason] ->
+ ?LOG_INFO(#{
+ what => fatal_error,
+ in => os_process,
+ port => OsProc#os_proc.port,
+ tag => Id,
+ details => Reason
+ }),
+ couch_log:info(
+ "OS Process ~p Fatal Error :: ~s ~p",
+ [OsProc#os_proc.port, Id, Reason]
+ ),
+ throw({couch_util:to_existing_atom(Id), Reason});
+ _Result ->
+ {json, Line}
+ end
end.
pick_command(Line) ->
@@ -178,7 +184,6 @@ pick_command1(<<"fatal">> = Cmd) ->
pick_command1(_) ->
throw(abort).
-
% gen_server API
init([Command, Options, PortOptions]) ->
couch_js_io_logger:start(os:getenv("COUCHDB_IO_LOG_DIR")),
@@ -187,11 +192,11 @@ init([Command, Options, PortOptions]) ->
V = config:get("query_server_config", "os_process_idle_limit", "300"),
IdleLimit = list_to_integer(V) * 1000,
BaseProc = #os_proc{
- command=Command,
- port=open_port({spawn, Spawnkiller ++ " " ++ Command}, PortOptions),
- writer=fun ?MODULE:writejson/2,
- reader=fun ?MODULE:readjson/1,
- idle=IdleLimit
+ command = Command,
+ port = open_port({spawn, Spawnkiller ++ " " ++ Command}, PortOptions),
+ writer = fun ?MODULE:writejson/2,
+ reader = fun ?MODULE:readjson/1,
+ idle = IdleLimit
},
KillCmd = iolist_to_binary(readline(BaseProc)),
Pid = self(),
@@ -202,24 +207,28 @@ init([Command, Options, PortOptions]) ->
}),
couch_log:debug("OS Process Start :: ~p", [BaseProc#os_proc.port]),
spawn(fun() ->
- % this ensure the real os process is killed when this process dies.
- erlang:monitor(process, Pid),
- killer(?b2l(KillCmd))
- end),
+ % this ensure the real os process is killed when this process dies.
+ erlang:monitor(process, Pid),
+ killer(?b2l(KillCmd))
+ end),
OsProc =
- lists:foldl(fun(Opt, Proc) ->
- case Opt of
- {writer, Writer} when is_function(Writer) ->
- Proc#os_proc{writer=Writer};
- {reader, Reader} when is_function(Reader) ->
- Proc#os_proc{reader=Reader};
- {timeout, TimeOut} when is_integer(TimeOut) ->
- Proc#os_proc{timeout=TimeOut}
- end
- end, BaseProc, Options),
+ lists:foldl(
+ fun(Opt, Proc) ->
+ case Opt of
+ {writer, Writer} when is_function(Writer) ->
+ Proc#os_proc{writer = Writer};
+ {reader, Reader} when is_function(Reader) ->
+ Proc#os_proc{reader = Reader};
+ {timeout, TimeOut} when is_integer(TimeOut) ->
+ Proc#os_proc{timeout = TimeOut}
+ end
+ end,
+ BaseProc,
+ Options
+ ),
{ok, OsProc, IdleLimit}.
-terminate(Reason, #os_proc{port=Port}) ->
+terminate(Reason, #os_proc{port = Port}) ->
catch port_close(Port),
case Reason of
normal ->
@@ -229,10 +238,10 @@ terminate(Reason, #os_proc{port=Port}) ->
end,
ok.
-handle_call({set_timeout, TimeOut}, _From, #os_proc{idle=Idle}=OsProc) ->
- {reply, ok, OsProc#os_proc{timeout=TimeOut}, Idle};
-handle_call({prompt, Data}, _From, #os_proc{idle=Idle}=OsProc) ->
- #os_proc{writer=Writer, reader=Reader} = OsProc,
+handle_call({set_timeout, TimeOut}, _From, #os_proc{idle = Idle} = OsProc) ->
+ {reply, ok, OsProc#os_proc{timeout = TimeOut}, Idle};
+handle_call({prompt, Data}, _From, #os_proc{idle = Idle} = OsProc) ->
+ #os_proc{writer = Writer, reader = Reader} = OsProc,
try
Writer(OsProc, Data),
{reply, {ok, Reader(OsProc)}, OsProc, Idle}
@@ -247,7 +256,7 @@ handle_call({prompt, Data}, _From, #os_proc{idle=Idle}=OsProc) ->
garbage_collect()
end.
-handle_cast({send, Data}, #os_proc{writer=Writer, idle=Idle}=OsProc) ->
+handle_cast({send, Data}, #os_proc{writer = Writer, idle = Idle} = OsProc) ->
try
Writer(OsProc, Data),
{noreply, OsProc, Idle}
@@ -262,12 +271,12 @@ handle_cast({send, Data}, #os_proc{writer=Writer, idle=Idle}=OsProc) ->
couch_log:error("Failed sending data: ~p -> ~p", [Data, OsError]),
{stop, normal, OsProc}
end;
-handle_cast(garbage_collect, #os_proc{idle=Idle}=OsProc) ->
+handle_cast(garbage_collect, #os_proc{idle = Idle} = OsProc) ->
erlang:garbage_collect(),
{noreply, OsProc, Idle};
handle_cast(stop, OsProc) ->
{stop, normal, OsProc};
-handle_cast(Msg, #os_proc{idle=Idle}=OsProc) ->
+handle_cast(Msg, #os_proc{idle = Idle} = OsProc) ->
?LOG_DEBUG(#{
what => unknown_message,
in => os_process,
@@ -276,11 +285,11 @@ handle_cast(Msg, #os_proc{idle=Idle}=OsProc) ->
couch_log:debug("OS Proc: Unknown cast: ~p", [Msg]),
{noreply, OsProc, Idle}.
-handle_info(timeout, #os_proc{idle=Idle}=OsProc) ->
+handle_info(timeout, #os_proc{idle = Idle} = OsProc) ->
gen_server:cast(couch_js_proc_manager, {os_proc_idle, self()}),
erlang:garbage_collect(),
{noreply, OsProc, Idle};
-handle_info({Port, {exit_status, 0}}, #os_proc{port=Port}=OsProc) ->
+handle_info({Port, {exit_status, 0}}, #os_proc{port = Port} = OsProc) ->
?LOG_INFO(#{
what => normal_termination,
in => os_process,
@@ -288,7 +297,7 @@ handle_info({Port, {exit_status, 0}}, #os_proc{port=Port}=OsProc) ->
}),
couch_log:info("OS Process terminated normally", []),
{stop, normal, OsProc};
-handle_info({Port, {exit_status, Status}}, #os_proc{port=Port}=OsProc) ->
+handle_info({Port, {exit_status, Status}}, #os_proc{port = Port} = OsProc) ->
?LOG_ERROR(#{
what => abnormal_termination,
in => os_process,
@@ -297,7 +306,7 @@ handle_info({Port, {exit_status, Status}}, #os_proc{port=Port}=OsProc) ->
}),
couch_log:error("OS Process died with status: ~p", [Status]),
{stop, {exit_status, Status}, OsProc};
-handle_info(Msg, #os_proc{idle=Idle}=OsProc) ->
+handle_info(Msg, #os_proc{idle = Idle} = OsProc) ->
?LOG_DEBUG(#{
what => unexpected_message,
in => os_process,
@@ -307,8 +316,8 @@ handle_info(Msg, #os_proc{idle=Idle}=OsProc) ->
couch_log:debug("OS Proc: Unknown info: ~p", [Msg]),
{noreply, OsProc, Idle}.
-code_change(_, {os_proc, Cmd, Port, W, R, Timeout} , _) ->
- V = config:get("query_server_config","os_process_idle_limit","300"),
+code_change(_, {os_proc, Cmd, Port, W, R, Timeout}, _) ->
+ V = config:get("query_server_config", "os_process_idle_limit", "300"),
State = #os_proc{
command = Cmd,
port = Port,
@@ -322,9 +331,9 @@ code_change(_OldVsn, State, _Extra) ->
{ok, State}.
killer(KillCmd) ->
- receive _ ->
- os:cmd(KillCmd)
+ receive
+ _ ->
+ os:cmd(KillCmd)
after 1000 ->
?MODULE:killer(KillCmd)
end.
-
diff --git a/src/couch_js/src/couch_js_proc_manager.erl b/src/couch_js/src/couch_js_proc_manager.erl
index 0d3be22d6..b6ecd5506 100644
--- a/src/couch_js/src/couch_js_proc_manager.erl
+++ b/src/couch_js/src/couch_js_proc_manager.erl
@@ -61,7 +61,7 @@
-record(client, {
timestamp :: os:timestamp() | '_',
- from :: undefined | {pid(), reference()} | '_',
+ from :: undefined | {pid(), reference()} | '_',
lang :: binary() | '_',
ddoc :: #doc{} | '_',
ddoc_key :: undefined | {DDocId :: docid(), Rev :: revision()} | '_'
@@ -78,27 +78,21 @@
t0 = os:timestamp()
}).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
get_proc_count() ->
gen_server:call(?MODULE, get_proc_count).
-
get_stale_proc_count() ->
gen_server:call(?MODULE, get_stale_proc_count).
-
reload() ->
gen_server:call(?MODULE, set_threshold_ts).
-
terminate_stale_procs() ->
gen_server:call(?MODULE, terminate_stale_procs).
-
init([]) ->
process_flag(trap_exit, true),
ok = config:listen_for_changes(?MODULE, undefined),
@@ -120,50 +114,48 @@ init([]) ->
soft_limit = get_soft_limit()
}}.
-
terminate(_Reason, _State) ->
- ets:foldl(fun(#proc_int{pid=P}, _) ->
- couch_util:shutdown_sync(P)
- end, 0, ?PROCS),
+ ets:foldl(
+ fun(#proc_int{pid = P}, _) ->
+ couch_util:shutdown_sync(P)
+ end,
+ 0,
+ ?PROCS
+ ),
ok.
-
handle_call(get_proc_count, _From, State) ->
NumProcs = ets:info(?PROCS, size),
NumOpening = ets:info(?OPENING, size),
{reply, NumProcs + NumOpening, State};
-
handle_call(get_stale_proc_count, _From, State) ->
#state{threshold_ts = T0} = State,
- MatchSpec = [{#proc_int{t0='$1', _='_'}, [{'<', '$1', {T0}}], [true]}],
+ MatchSpec = [{#proc_int{t0 = '$1', _ = '_'}, [{'<', '$1', {T0}}], [true]}],
{reply, ets:select_count(?PROCS, MatchSpec), State};
-
-handle_call({get_proc, #doc{body={Props}}=DDoc, DDocKey}, From, State) ->
+handle_call({get_proc, #doc{body = {Props}} = DDoc, DDocKey}, From, State) ->
LangStr = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
Lang = couch_util:to_binary(LangStr),
- Client = #client{from=From, lang=Lang, ddoc=DDoc, ddoc_key=DDocKey},
+ Client = #client{from = From, lang = Lang, ddoc = DDoc, ddoc_key = DDocKey},
add_waiting_client(Client),
{noreply, flush_waiters(State, Lang)};
-
handle_call({get_proc, LangStr}, From, State) ->
Lang = couch_util:to_binary(LangStr),
- Client = #client{from=From, lang=Lang},
+ Client = #client{from = From, lang = Lang},
add_waiting_client(Client),
{noreply, flush_waiters(State, Lang)};
-
-handle_call({ret_proc, #proc{client=Ref} = Proc}, _From, State) ->
+handle_call({ret_proc, #proc{client = Ref} = Proc}, _From, State) ->
erlang:demonitor(Ref, [flush]),
- NewState = case ets:lookup(?PROCS, Proc#proc.pid) of
- [#proc_int{}=ProcInt] ->
- return_proc(State, ProcInt);
- [] ->
- % Proc must've died and we already
- % cleared it out of the table in
- % the handle_info clause.
- State
- end,
+ NewState =
+ case ets:lookup(?PROCS, Proc#proc.pid) of
+ [#proc_int{} = ProcInt] ->
+ return_proc(State, ProcInt);
+ [] ->
+ % Proc must've died and we already
+ % cleared it out of the table in
+ % the handle_info clause.
+ State
+ end,
{reply, true, NewState};
-
handle_call(set_threshold_ts, _From, State) ->
FoldFun = fun
(#proc_int{client = undefined} = Proc, StateAcc) ->
@@ -173,7 +165,6 @@ handle_call(set_threshold_ts, _From, State) ->
end,
NewState = ets:foldl(FoldFun, State, ?PROCS),
{reply, ok, NewState#state{threshold_ts = os:timestamp()}};
-
handle_call(terminate_stale_procs, _From, #state{threshold_ts = Ts1} = State) ->
FoldFun = fun
(#proc_int{client = undefined, t0 = Ts2} = Proc, StateAcc) ->
@@ -188,27 +179,25 @@ handle_call(terminate_stale_procs, _From, #state{threshold_ts = Ts1} = State) ->
end,
NewState = ets:foldl(FoldFun, State, ?PROCS),
{reply, ok, NewState};
-
handle_call(_Call, _From, State) ->
{reply, ignored, State}.
-
-handle_cast({os_proc_idle, Pid}, #state{counts=Counts}=State) ->
- NewState = case ets:lookup(?PROCS, Pid) of
- [#proc_int{client=undefined, lang=Lang}=Proc] ->
- case dict:find(Lang, Counts) of
- {ok, Count} when Count >= State#state.soft_limit ->
- ?LOG_INFO(#{what => close_idle_os_process, pid => Pid}),
- couch_log:info("Closing idle OS Process: ~p", [Pid]),
- remove_proc(State, Proc);
- {ok, _} ->
- State
- end;
- _ ->
- State
- end,
+handle_cast({os_proc_idle, Pid}, #state{counts = Counts} = State) ->
+ NewState =
+ case ets:lookup(?PROCS, Pid) of
+ [#proc_int{client = undefined, lang = Lang} = Proc] ->
+ case dict:find(Lang, Counts) of
+ {ok, Count} when Count >= State#state.soft_limit ->
+ ?LOG_INFO(#{what => close_idle_os_process, pid => Pid}),
+ couch_log:info("Closing idle OS Process: ~p", [Pid]),
+ remove_proc(State, Proc);
+ {ok, _} ->
+ State
+ end;
+ _ ->
+ State
+ end,
{noreply, NewState};
-
handle_cast(reload_config, State) ->
NewState = State#state{
config = get_proc_config(),
@@ -217,29 +206,24 @@ handle_cast(reload_config, State) ->
},
maybe_configure_erlang_native_servers(),
{noreply, flush_waiters(NewState)};
-
handle_cast(_Msg, State) ->
{noreply, State}.
-
handle_info(shutdown, State) ->
{stop, shutdown, State};
-
-handle_info({'EXIT', Pid, {spawn_ok, Proc0, {ClientPid,_} = From}}, State) ->
+handle_info({'EXIT', Pid, {spawn_ok, Proc0, {ClientPid, _} = From}}, State) ->
ets:delete(?OPENING, Pid),
link(Proc0#proc_int.pid),
Proc = assign_proc(ClientPid, Proc0),
gen_server:reply(From, {ok, Proc, State#state.config}),
{noreply, State};
-
handle_info({'EXIT', Pid, spawn_error}, State) ->
- [{Pid, #client{lang=Lang}}] = ets:lookup(?OPENING, Pid),
+ [{Pid, #client{lang = Lang}}] = ets:lookup(?OPENING, Pid),
ets:delete(?OPENING, Pid),
NewState = State#state{
counts = dict:update_counter(Lang, -1, State#state.counts)
},
{noreply, flush_waiters(NewState, Lang)};
-
handle_info({'EXIT', Pid, Reason}, State) ->
?LOG_INFO(#{what => os_process_failure, pid => Pid, details => Reason}),
couch_log:info("~p ~p died ~p", [?MODULE, Pid, Reason]),
@@ -250,28 +234,22 @@ handle_info({'EXIT', Pid, Reason}, State) ->
[] ->
{noreply, State}
end;
-
handle_info({'DOWN', Ref, _, _, _Reason}, State0) ->
- case ets:match_object(?PROCS, #proc_int{client=Ref, _='_'}) of
+ case ets:match_object(?PROCS, #proc_int{client = Ref, _ = '_'}) of
[#proc_int{} = Proc] ->
{noreply, return_proc(State0, Proc)};
[] ->
{noreply, State0}
end;
-
-
handle_info(restart_config_listener, State) ->
ok = config:listen_for_changes(?MODULE, nil),
{noreply, State};
-
handle_info(_Msg, State) ->
{noreply, State}.
-
-code_change(_OldVsn, #state{}=State, _Extra) ->
+code_change(_OldVsn, #state{} = State, _Extra) ->
{ok, State}.
-
format_status(_Opt, [_PDict, State]) ->
#state{
counts = Counts
@@ -279,10 +257,7 @@ format_status(_Opt, [_PDict, State]) ->
Scrubbed = State#state{
counts = {dict_size, dict:size(Counts)}
},
- [{data, [{"State",
- ?record_to_keyval(state, Scrubbed)
- }]}].
-
+ [{data, [{"State", ?record_to_keyval(state, Scrubbed)}]}].
handle_config_terminate(_, stop, _) ->
ok;
@@ -299,7 +274,6 @@ handle_config_change("query_server_config", _, _, _, _) ->
handle_config_change(_, _, _, _, _) ->
{ok, undefined}.
-
find_proc(#client{lang = Lang, ddoc_key = undefined}) ->
Pred = fun(_) ->
true
@@ -311,7 +285,7 @@ find_proc(#client{lang = Lang, ddoc = DDoc, ddoc_key = DDocKey} = Client) ->
end,
case find_proc(Lang, Pred) of
not_found ->
- case find_proc(Client#client{ddoc_key=undefined}) of
+ case find_proc(Client#client{ddoc_key = undefined}) of
{ok, Proc} ->
teach_ddoc(DDoc, DDocKey, Proc);
Else ->
@@ -322,20 +296,21 @@ find_proc(#client{lang = Lang, ddoc = DDoc, ddoc_key = DDocKey} = Client) ->
end.
find_proc(Lang, Fun) ->
- try iter_procs(Lang, Fun)
- catch error:Reason:StackTrace ->
- ?LOG_ERROR(#{
- what => os_process_not_available,
- details => Reason,
- stacktrace => StackTrace
- }),
- couch_log:error("~p ~p ~p", [?MODULE, Reason, StackTrace]),
- {error, Reason}
+ try
+ iter_procs(Lang, Fun)
+ catch
+ error:Reason:StackTrace ->
+ ?LOG_ERROR(#{
+ what => os_process_not_available,
+ details => Reason,
+ stacktrace => StackTrace
+ }),
+ couch_log:error("~p ~p ~p", [?MODULE, Reason, StackTrace]),
+ {error, Reason}
end.
-
iter_procs(Lang, Fun) when is_binary(Lang) ->
- Pattern = #proc_int{lang=Lang, client=undefined, _='_'},
+ Pattern = #proc_int{lang = Lang, client = undefined, _ = '_'},
MSpec = [{Pattern, [], ['$_']}],
case ets:select_reverse(?PROCS, MSpec, 25) of
'$end_of_table' ->
@@ -344,7 +319,6 @@ iter_procs(Lang, Fun) when is_binary(Lang) ->
iter_procs_int(Continuation, Fun)
end.
-
iter_procs_int({[], Continuation0}, Fun) ->
case ets:select_reverse(Continuation0) of
'$end_of_table' ->
@@ -360,7 +334,6 @@ iter_procs_int({[Proc | Rest], Continuation}, Fun) ->
iter_procs_int({Rest, Continuation}, Fun)
end.
-
spawn_proc(State, Client) ->
Pid = spawn_link(?MODULE, new_proc, [Client]),
ets:insert(?OPENING, {Pid, Client}),
@@ -370,36 +343,38 @@ spawn_proc(State, Client) ->
counts = dict:update_counter(Lang, 1, Counts)
}.
-
-new_proc(#client{ddoc=undefined, ddoc_key=undefined}=Client) ->
- #client{from=From, lang=Lang} = Client,
- Resp = try
- case new_proc_int(From, Lang) of
- {ok, Proc} ->
- {spawn_ok, Proc, From};
- Error ->
- gen_server:reply(From, {error, Error}),
+new_proc(#client{ddoc = undefined, ddoc_key = undefined} = Client) ->
+ #client{from = From, lang = Lang} = Client,
+ Resp =
+ try
+ case new_proc_int(From, Lang) of
+ {ok, Proc} ->
+ {spawn_ok, Proc, From};
+ Error ->
+ gen_server:reply(From, {error, Error}),
+ spawn_error
+ end
+ catch
+ _:_ ->
spawn_error
- end
- catch _:_ ->
- spawn_error
- end,
+ end,
exit(Resp);
-
new_proc(Client) ->
- #client{from=From, lang=Lang, ddoc=DDoc, ddoc_key=DDocKey} = Client,
- Resp = try
- case new_proc_int(From, Lang) of
- {ok, NewProc} ->
- {ok, Proc} = teach_ddoc(DDoc, DDocKey, NewProc),
- {spawn_ok, Proc, From};
- Error ->
- gen_server:reply(From, {error, Error}),
- spawn_error
- end
- catch _:_ ->
- spawn_error
- end,
+ #client{from = From, lang = Lang, ddoc = DDoc, ddoc_key = DDocKey} = Client,
+ Resp =
+ try
+ case new_proc_int(From, Lang) of
+ {ok, NewProc} ->
+ {ok, Proc} = teach_ddoc(DDoc, DDocKey, NewProc),
+ {spawn_ok, Proc, From};
+ Error ->
+ gen_server:reply(From, {error, Error}),
+ spawn_error
+ end
+ catch
+ _:_ ->
+ spawn_error
+ end,
exit(Resp).
split_string_if_longer(String, Pos) ->
@@ -419,14 +394,17 @@ split_by_char(String, Char) ->
get_servers_from_env(Spec) ->
SpecLen = length(Spec),
% loop over os:getenv(), match SPEC_
- lists:filtermap(fun(EnvStr) ->
- case split_string_if_longer(EnvStr, SpecLen) of
- {Spec, Rest} ->
- {true, split_by_char(Rest, $=)};
- _ ->
- false
- end
- end, os:getenv()).
+ lists:filtermap(
+ fun(EnvStr) ->
+ case split_string_if_longer(EnvStr, SpecLen) of
+ {Spec, Rest} ->
+ {true, split_by_char(Rest, $=)};
+ _ ->
+ false
+ end
+ end,
+ os:getenv()
+ ).
get_query_server(LangStr) ->
case ets:lookup(?SERVERS, string:to_upper(LangStr)) of
@@ -445,39 +423,39 @@ native_query_server_enabled() ->
maybe_configure_erlang_native_servers() ->
case native_query_server_enabled() of
true ->
- ets:insert(?SERVERS, [
- {"ERLANG", {couch_js_native_process, start_link, []}}]);
+ ets:insert(?SERVERS, [
+ {"ERLANG", {couch_js_native_process, start_link, []}}
+ ]);
_Else ->
- ok
+ ok
end.
new_proc_int(From, Lang) when is_binary(Lang) ->
LangStr = binary_to_list(Lang),
case get_query_server(LangStr) of
- undefined ->
- gen_server:reply(From, {unknown_query_language, Lang});
- {M, F, A} ->
- {ok, Pid} = apply(M, F, A),
- make_proc(Pid, Lang, M);
- Command ->
- {ok, Pid} = couch_js_os_process:start_link(Command),
- make_proc(Pid, Lang, couch_js_os_process)
+ undefined ->
+ gen_server:reply(From, {unknown_query_language, Lang});
+ {M, F, A} ->
+ {ok, Pid} = apply(M, F, A),
+ make_proc(Pid, Lang, M);
+ Command ->
+ {ok, Pid} = couch_js_os_process:start_link(Command),
+ make_proc(Pid, Lang, couch_js_os_process)
end.
-
-teach_ddoc(DDoc, {DDocId, _Rev}=DDocKey, #proc_int{ddoc_keys=Keys}=Proc) ->
+teach_ddoc(DDoc, {DDocId, _Rev} = DDocKey, #proc_int{ddoc_keys = Keys} = Proc) ->
% send ddoc over the wire
% we only share the rev with the client we know to update code
% but it only keeps the latest copy, per each ddoc, around.
true = couch_js_query_servers:proc_prompt(
export_proc(Proc),
- [<<"ddoc">>, <<"new">>, DDocId, couch_doc:to_json_obj(DDoc, [])]),
+ [<<"ddoc">>, <<"new">>, DDocId, couch_doc:to_json_obj(DDoc, [])]
+ ),
% we should remove any other ddocs keys for this docid
% because the query server overwrites without the rev
- Keys2 = [{D,R} || {D,R} <- Keys, D /= DDocId],
+ Keys2 = [{D, R} || {D, R} <- Keys, D /= DDocId],
% add ddoc to the proc
- {ok, Proc#proc_int{ddoc_keys=[DDocKey|Keys2]}}.
-
+ {ok, Proc#proc_int{ddoc_keys = [DDocKey | Keys2]}}.
make_proc(Pid, Lang, Mod) when is_binary(Lang) ->
Proc = #proc_int{
@@ -490,42 +468,42 @@ make_proc(Pid, Lang, Mod) when is_binary(Lang) ->
unlink(Pid),
{ok, Proc}.
-
-assign_proc(Pid, #proc_int{client=undefined}=Proc0) when is_pid(Pid) ->
+assign_proc(Pid, #proc_int{client = undefined} = Proc0) when is_pid(Pid) ->
Proc = Proc0#proc_int{client = erlang:monitor(process, Pid)},
ets:insert(?PROCS, Proc),
export_proc(Proc);
-assign_proc(#client{}=Client, #proc_int{client=undefined}=Proc) ->
+assign_proc(#client{} = Client, #proc_int{client = undefined} = Proc) ->
{Pid, _} = Client#client.from,
assign_proc(Pid, Proc).
-
return_proc(#state{} = State, #proc_int{} = ProcInt) ->
#proc_int{pid = Pid, lang = Lang} = ProcInt,
- NewState = case is_process_alive(Pid) of true ->
- case ProcInt#proc_int.t0 < State#state.threshold_ts of
+ NewState =
+ case is_process_alive(Pid) of
true ->
- remove_proc(State, ProcInt);
+ case ProcInt#proc_int.t0 < State#state.threshold_ts of
+ true ->
+ remove_proc(State, ProcInt);
+ false ->
+ gen_server:cast(Pid, garbage_collect),
+ true = ets:update_element(?PROCS, Pid, [
+ {#proc_int.client, undefined}
+ ]),
+ State
+ end;
false ->
- gen_server:cast(Pid, garbage_collect),
- true = ets:update_element(?PROCS, Pid, [
- {#proc_int.client, undefined}
- ]),
- State
- end;
- false ->
- remove_proc(State, ProcInt)
- end,
+ remove_proc(State, ProcInt)
+ end,
flush_waiters(NewState, Lang).
-
-remove_proc(State, #proc_int{}=Proc) ->
+remove_proc(State, #proc_int{} = Proc) ->
ets:delete(?PROCS, Proc#proc_int.pid),
- case is_process_alive(Proc#proc_int.pid) of true ->
- unlink(Proc#proc_int.pid),
- gen_server:cast(Proc#proc_int.pid, stop);
- false ->
- ok
+ case is_process_alive(Proc#proc_int.pid) of
+ true ->
+ unlink(Proc#proc_int.pid),
+ gen_server:cast(Proc#proc_int.pid, stop);
+ false ->
+ ok
end,
Counts = State#state.counts,
Lang = Proc#proc_int.lang,
@@ -533,7 +511,6 @@ remove_proc(State, #proc_int{}=Proc) ->
counts = dict:update_counter(Lang, -1, Counts)
}.
-
-spec export_proc(#proc_int{}) -> #proc{}.
export_proc(#proc_int{} = ProcInt) ->
ProcIntList = tuple_to_list(ProcInt),
@@ -541,17 +518,19 @@ export_proc(#proc_int{} = ProcInt) ->
[_ | Data] = lists:sublist(ProcIntList, ProcLen),
list_to_tuple([proc | Data]).
-
flush_waiters(State) ->
- dict:fold(fun(Lang, Count, StateAcc) ->
- case Count < State#state.hard_limit of
- true ->
- flush_waiters(StateAcc, Lang);
- false ->
- StateAcc
- end
- end, State, State#state.counts).
-
+ dict:fold(
+ fun(Lang, Count, StateAcc) ->
+ case Count < State#state.hard_limit of
+ true ->
+ flush_waiters(StateAcc, Lang);
+ false ->
+ StateAcc
+ end
+ end,
+ State,
+ State#state.counts
+ ).
flush_waiters(State, Lang) ->
CanSpawn = can_spawn(State, Lang),
@@ -578,31 +557,27 @@ flush_waiters(State, Lang) ->
State
end.
-
add_waiting_client(Client) ->
- ets:insert(?WAITERS, Client#client{timestamp=os:timestamp()}).
+ ets:insert(?WAITERS, Client#client{timestamp = os:timestamp()}).
-spec get_waiting_client(Lang :: binary()) -> undefined | #client{}.
get_waiting_client(Lang) ->
- case ets:match_object(?WAITERS, #client{lang=Lang, _='_'}, 1) of
+ case ets:match_object(?WAITERS, #client{lang = Lang, _ = '_'}, 1) of
'$end_of_table' ->
undefined;
- {[#client{}=Client], _} ->
+ {[#client{} = Client], _} ->
Client
end.
-
remove_waiting_client(#client{timestamp = Timestamp}) ->
ets:delete(?WAITERS, Timestamp).
-
can_spawn(#state{hard_limit = HardLimit, counts = Counts}, Lang) ->
case dict:find(Lang, Counts) of
{ok, Count} -> Count < HardLimit;
error -> true
end.
-
get_proc_config() ->
Limit = config:get_boolean("query_server_config", "reduce_limit", true),
Timeout = config:get_integer("couchdb", "os_process_timeout", 5000),
@@ -611,10 +586,8 @@ get_proc_config() ->
{<<"timeout">>, Timeout}
]}.
-
get_hard_limit() ->
config:get_integer("query_server_config", "os_process_limit", 100).
-
get_soft_limit() ->
config:get_integer("query_server_config", "os_process_soft_limit", 100).
diff --git a/src/couch_js/src/couch_js_query_servers.erl b/src/couch_js/src/couch_js_query_servers.erl
index 7c36f395f..d986690df 100644
--- a/src/couch_js/src/couch_js_query_servers.erl
+++ b/src/couch_js/src/couch_js_query_servers.erl
@@ -14,7 +14,7 @@
-export([try_compile/4]).
-export([start_doc_map/3, map_doc_raw/2, stop_doc_map/1, raw_to_ejson/1]).
--export([reduce/3, rereduce/3,validate_doc_update/5]).
+-export([reduce/3, rereduce/3, validate_doc_update/5]).
-export([filter_docs/5]).
-export([filter_view/3]).
-export([finalize/2]).
@@ -27,14 +27,17 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("kernel/include/logger.hrl").
--define(SUMERROR, <<"The _sum function requires that map values be numbers, "
+-define(SUMERROR, <<
+ "The _sum function requires that map values be numbers, "
"arrays of numbers, or objects. Objects cannot be mixed with other "
"data structures. Objects can be arbitrarily nested, provided that the values "
- "for all fields are themselves numbers, arrays of numbers, or objects.">>).
-
--define(STATERROR, <<"The _stats function requires that map values be numbers "
- "or arrays of numbers, not '~p'">>).
+ "for all fields are themselves numbers, arrays of numbers, or objects."
+>>).
+-define(STATERROR, <<
+ "The _stats function requires that map values be numbers "
+ "or arrays of numbers, not '~p'"
+>>).
try_compile(Proc, FunctionType, FunctionName, FunctionSource) ->
try
@@ -54,20 +57,21 @@ try_compile(Proc, FunctionType, FunctionName, FunctionSource) ->
start_doc_map(Lang, Functions, Lib) ->
Proc = get_os_process(Lang),
case Lib of
- {[]} -> ok;
- Lib ->
- true = proc_prompt(Proc, [<<"add_lib">>, Lib])
+ {[]} -> ok;
+ Lib -> true = proc_prompt(Proc, [<<"add_lib">>, Lib])
end,
- lists:foreach(fun(FunctionSource) ->
- true = proc_prompt(Proc, [<<"add_fun">>, FunctionSource])
- end, Functions),
+ lists:foreach(
+ fun(FunctionSource) ->
+ true = proc_prompt(Proc, [<<"add_fun">>, FunctionSource])
+ end,
+ Functions
+ ),
{ok, Proc}.
map_doc_raw(Proc, Doc) ->
Json = couch_doc:to_json_obj(Doc, []),
{ok, proc_prompt_raw(Proc, [<<"map_doc">>, Json])}.
-
stop_doc_map(nil) ->
ok;
stop_doc_map(Proc) ->
@@ -77,20 +81,24 @@ group_reductions_results([]) ->
[];
group_reductions_results(List) ->
{Heads, Tails} = lists:foldl(
- fun([H|T], {HAcc,TAcc}) ->
- {[H|HAcc], [T|TAcc]}
- end, {[], []}, List),
+ fun([H | T], {HAcc, TAcc}) ->
+ {[H | HAcc], [T | TAcc]}
+ end,
+ {[], []},
+ List
+ ),
case Tails of
- [[]|_] -> % no tails left
- [Heads];
- _ ->
- [Heads | group_reductions_results(Tails)]
+ % no tails left
+ [[] | _] ->
+ [Heads];
+ _ ->
+ [Heads | group_reductions_results(Tails)]
end.
-finalize(<<"_approx_count_distinct",_/binary>>, Reduction) ->
+finalize(<<"_approx_count_distinct", _/binary>>, Reduction) ->
true = hyper:is_hyper(Reduction),
{ok, round(hyper:card(Reduction))};
-finalize(<<"_stats",_/binary>>, Unpacked) ->
+finalize(<<"_stats", _/binary>>, Unpacked) ->
{ok, pack_stats(Unpacked)};
finalize(_RedSrc, Reduction) ->
{ok, Reduction}.
@@ -101,45 +109,51 @@ rereduce(Lang, RedSrcs, ReducedValues) ->
Grouped = group_reductions_results(ReducedValues),
Results = lists:zipwith(
fun
- (<<"_", _/binary>> = FunSrc, Values) ->
- {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []),
- Result;
- (FunSrc, Values) ->
- os_rereduce(Lang, [FunSrc], Values)
- end, RedSrcs, Grouped),
+ (<<"_", _/binary>> = FunSrc, Values) ->
+ {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []),
+ Result;
+ (FunSrc, Values) ->
+ os_rereduce(Lang, [FunSrc], Values)
+ end,
+ RedSrcs,
+ Grouped
+ ),
{ok, Results}.
reduce(_Lang, [], _KVs) ->
{ok, []};
reduce(Lang, RedSrcs, KVs) ->
- {OsRedSrcs, BuiltinReds} = lists:partition(fun
- (<<"_", _/binary>>) -> false;
- (_OsFun) -> true
- end, RedSrcs),
+ {OsRedSrcs, BuiltinReds} = lists:partition(
+ fun
+ (<<"_", _/binary>>) -> false;
+ (_OsFun) -> true
+ end,
+ RedSrcs
+ ),
{ok, OsResults} = os_reduce(Lang, OsRedSrcs, KVs),
{ok, BuiltinResults} = builtin_reduce(reduce, BuiltinReds, KVs, []),
recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, []).
-
recombine_reduce_results([], [], [], Acc) ->
{ok, lists:reverse(Acc)};
-recombine_reduce_results([<<"_", _/binary>>|RedSrcs], OsResults, [BRes|BuiltinResults], Acc) ->
- recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [BRes|Acc]);
-recombine_reduce_results([_OsFun|RedSrcs], [OsR|OsResults], BuiltinResults, Acc) ->
- recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [OsR|Acc]).
+recombine_reduce_results([<<"_", _/binary>> | RedSrcs], OsResults, [BRes | BuiltinResults], Acc) ->
+ recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [BRes | Acc]);
+recombine_reduce_results([_OsFun | RedSrcs], [OsR | OsResults], BuiltinResults, Acc) ->
+ recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [OsR | Acc]).
os_reduce(_Lang, [], _KVs) ->
{ok, []};
os_reduce(Lang, OsRedSrcs, KVs) ->
Proc = get_os_process(Lang),
- OsResults = try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of
- [true, Reductions] -> Reductions
- catch
- throw:{reduce_overflow_error, Msg} ->
- [{[{reduce_overflow_error, Msg}]} || _ <- OsRedSrcs]
- after
- ok = ret_os_process(Proc)
- end,
+ OsResults =
+ try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of
+ [true, Reductions] -> Reductions
+ catch
+ throw:{reduce_overflow_error, Msg} ->
+ [{[{reduce_overflow_error, Msg}]} || _ <- OsRedSrcs]
+ after
+ ok = ret_os_process(Proc)
+ end,
{ok, OsResults}.
os_rereduce(Lang, OsRedSrcs, KVs) ->
@@ -158,7 +172,6 @@ os_rereduce(Lang, OsRedSrcs, KVs) ->
Error
end.
-
get_overflow_error([]) ->
undefined;
get_overflow_error([{[{reduce_overflow_error, _}]} = Error | _]) ->
@@ -166,26 +179,24 @@ get_overflow_error([{[{reduce_overflow_error, _}]} = Error | _]) ->
get_overflow_error([_ | Rest]) ->
get_overflow_error(Rest).
-
builtin_reduce(_Re, [], _KVs, Acc) ->
{ok, lists:reverse(Acc)};
-builtin_reduce(Re, [<<"_sum",_/binary>>|BuiltinReds], KVs, Acc) ->
+builtin_reduce(Re, [<<"_sum", _/binary>> | BuiltinReds], KVs, Acc) ->
Sum = builtin_sum_rows(KVs, 0),
Red = check_sum_overflow(?term_size(KVs), ?term_size(Sum), Sum),
- builtin_reduce(Re, BuiltinReds, KVs, [Red|Acc]);
-builtin_reduce(reduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
+ builtin_reduce(Re, BuiltinReds, KVs, [Red | Acc]);
+builtin_reduce(reduce, [<<"_count", _/binary>> | BuiltinReds], KVs, Acc) ->
Count = length(KVs),
- builtin_reduce(reduce, BuiltinReds, KVs, [Count|Acc]);
-builtin_reduce(rereduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
+ builtin_reduce(reduce, BuiltinReds, KVs, [Count | Acc]);
+builtin_reduce(rereduce, [<<"_count", _/binary>> | BuiltinReds], KVs, Acc) ->
Count = builtin_sum_rows(KVs, 0),
- builtin_reduce(rereduce, BuiltinReds, KVs, [Count|Acc]);
-builtin_reduce(Re, [<<"_stats",_/binary>>|BuiltinReds], KVs, Acc) ->
+ builtin_reduce(rereduce, BuiltinReds, KVs, [Count | Acc]);
+builtin_reduce(Re, [<<"_stats", _/binary>> | BuiltinReds], KVs, Acc) ->
Stats = builtin_stats(Re, KVs),
- builtin_reduce(Re, BuiltinReds, KVs, [Stats|Acc]);
-builtin_reduce(Re, [<<"_approx_count_distinct",_/binary>>|BuiltinReds], KVs, Acc) ->
+ builtin_reduce(Re, BuiltinReds, KVs, [Stats | Acc]);
+builtin_reduce(Re, [<<"_approx_count_distinct", _/binary>> | BuiltinReds], KVs, Acc) ->
Distinct = approx_count_distinct(Re, KVs),
- builtin_reduce(Re, BuiltinReds, KVs, [Distinct|Acc]).
-
+ builtin_reduce(Re, BuiltinReds, KVs, [Distinct | Acc]).
builtin_sum_rows([], Acc) ->
Acc;
@@ -197,11 +208,13 @@ builtin_sum_rows([[_Key, Value] | RestKVs], Acc) ->
throw:{builtin_reduce_error, Obj} ->
Obj;
throw:{invalid_value, Reason, Cause} ->
- {[{<<"error">>, <<"builtin_reduce_error">>},
- {<<"reason">>, Reason}, {<<"caused_by">>, Cause}]}
+ {[
+ {<<"error">>, <<"builtin_reduce_error">>},
+ {<<"reason">>, Reason},
+ {<<"caused_by">>, Cause}
+ ]}
end.
-
sum_values(Value, Acc) when is_number(Value), is_number(Acc) ->
Acc + Value;
sum_values(Value, Acc) when is_list(Value), is_list(Acc) ->
@@ -239,12 +252,12 @@ sum_objects(Rest, []) ->
sum_arrays([], []) ->
[];
-sum_arrays([_|_]=Xs, []) ->
+sum_arrays([_ | _] = Xs, []) ->
Xs;
-sum_arrays([], [_|_]=Ys) ->
+sum_arrays([], [_ | _] = Ys) ->
Ys;
-sum_arrays([X|Xs], [Y|Ys]) when is_number(X), is_number(Y) ->
- [X+Y | sum_arrays(Xs,Ys)];
+sum_arrays([X | Xs], [Y | Ys]) when is_number(X), is_number(Y) ->
+ [X + Y | sum_arrays(Xs, Ys)];
sum_arrays(Else, _) ->
throw_sum_error(Else).
@@ -271,37 +284,42 @@ log_sum_overflow(InSize, OutSize) ->
output_size => OutSize,
details => "reduce output must shrink more rapidly"
}),
- Fmt = "Reduce output must shrink more rapidly: "
- "input size: ~b "
- "output size: ~b",
+ Fmt =
+ "Reduce output must shrink more rapidly: "
+ "input size: ~b "
+ "output size: ~b",
Msg = iolist_to_binary(io_lib:format(Fmt, [InSize, OutSize])),
couch_log:error(Msg, []),
Msg.
builtin_stats(_, []) ->
{0, 0, 0, 0, 0};
-builtin_stats(_, [[_,First]|Rest]) ->
- lists:foldl(fun([_Key, Value], Acc) ->
- stat_values(Value, Acc)
- end, build_initial_accumulator(First), Rest).
+builtin_stats(_, [[_, First] | Rest]) ->
+ lists:foldl(
+ fun([_Key, Value], Acc) ->
+ stat_values(Value, Acc)
+ end,
+ build_initial_accumulator(First),
+ Rest
+ ).
stat_values(Value, Acc) when is_list(Value), is_list(Acc) ->
lists:zipwith(fun stat_values/2, Value, Acc);
stat_values({PreRed}, Acc) when is_list(PreRed) ->
stat_values(unpack_stats({PreRed}), Acc);
stat_values(Value, Acc) when is_number(Value) ->
- stat_values({Value, 1, Value, Value, Value*Value}, Acc);
+ stat_values({Value, 1, Value, Value, Value * Value}, Acc);
stat_values(Value, Acc) when is_number(Acc) ->
- stat_values(Value, {Acc, 1, Acc, Acc, Acc*Acc});
+ stat_values(Value, {Acc, 1, Acc, Acc, Acc * Acc});
stat_values(Value, Acc) when is_tuple(Value), is_tuple(Acc) ->
{Sum0, Cnt0, Min0, Max0, Sqr0} = Value,
{Sum1, Cnt1, Min1, Max1, Sqr1} = Acc,
{
- Sum0 + Sum1,
- Cnt0 + Cnt1,
- erlang:min(Min0, Min1),
- erlang:max(Max0, Max1),
- Sqr0 + Sqr1
+ Sum0 + Sum1,
+ Cnt0 + Cnt1,
+ erlang:min(Min0, Min1),
+ erlang:max(Max0, Max1),
+ Sqr0 + Sqr1
};
stat_values(Else, _Acc) ->
throw_stat_error(Else).
@@ -309,7 +327,7 @@ stat_values(Else, _Acc) ->
build_initial_accumulator(L) when is_list(L) ->
[build_initial_accumulator(X) || X <- L];
build_initial_accumulator(X) when is_number(X) ->
- {X, 1, X, X, X*X};
+ {X, 1, X, X, X * X};
build_initial_accumulator({_, _, _, _, _} = AlreadyUnpacked) ->
AlreadyUnpacked;
build_initial_accumulator({Props}) ->
@@ -320,16 +338,21 @@ build_initial_accumulator(Else) ->
unpack_stats({PreRed}) when is_list(PreRed) ->
{
- get_number(<<"sum">>, PreRed),
- get_number(<<"count">>, PreRed),
- get_number(<<"min">>, PreRed),
- get_number(<<"max">>, PreRed),
- get_number(<<"sumsqr">>, PreRed)
+ get_number(<<"sum">>, PreRed),
+ get_number(<<"count">>, PreRed),
+ get_number(<<"min">>, PreRed),
+ get_number(<<"max">>, PreRed),
+ get_number(<<"sumsqr">>, PreRed)
}.
-
pack_stats({Sum, Cnt, Min, Max, Sqr}) ->
- {[{<<"sum">>,Sum}, {<<"count">>,Cnt}, {<<"min">>,Min}, {<<"max">>,Max}, {<<"sumsqr">>,Sqr}]};
+ {[
+ {<<"sum">>, Sum},
+ {<<"count">>, Cnt},
+ {<<"min">>, Min},
+ {<<"max">>, Max},
+ {<<"sumsqr">>, Sqr}
+ ]};
pack_stats({Packed}) ->
% Legacy code path before we had the finalize operation
{Packed};
@@ -338,35 +361,43 @@ pack_stats(Stats) when is_list(Stats) ->
get_number(Key, Props) ->
case couch_util:get_value(Key, Props) of
- X when is_number(X) ->
- X;
- undefined when is_binary(Key) ->
- get_number(binary_to_atom(Key, latin1), Props);
- undefined ->
- Msg = io_lib:format("user _stats input missing required field ~s (~p)",
- [Key, Props]),
- throw({invalid_value, iolist_to_binary(Msg)});
- Else ->
- Msg = io_lib:format("non-numeric _stats input received for ~s: ~w",
- [Key, Else]),
- throw({invalid_value, iolist_to_binary(Msg)})
+ X when is_number(X) ->
+ X;
+ undefined when is_binary(Key) ->
+ get_number(binary_to_atom(Key, latin1), Props);
+ undefined ->
+ Msg = io_lib:format(
+ "user _stats input missing required field ~s (~p)",
+ [Key, Props]
+ ),
+ throw({invalid_value, iolist_to_binary(Msg)});
+ Else ->
+ Msg = io_lib:format(
+ "non-numeric _stats input received for ~s: ~w",
+ [Key, Else]
+ ),
+ throw({invalid_value, iolist_to_binary(Msg)})
end.
% TODO allow customization of precision in the ddoc.
approx_count_distinct(reduce, KVs) ->
- lists:foldl(fun([[Key, _Id], _Value], Filter) ->
- hyper:insert(term_to_binary(Key), Filter)
- end, hyper:new(11), KVs);
+ lists:foldl(
+ fun([[Key, _Id], _Value], Filter) ->
+ hyper:insert(term_to_binary(Key), Filter)
+ end,
+ hyper:new(11),
+ KVs
+ );
approx_count_distinct(rereduce, Reds) ->
hyper:union([Filter || [_, Filter] <- Reds]).
% use the function stored in ddoc.validate_doc_update to test an update.
-spec validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) -> ok when
- DDoc :: ddoc(),
+ DDoc :: ddoc(),
EditDoc :: doc(),
DiskDoc :: doc() | nil,
- Ctx :: user_ctx(),
- SecObj :: sec_obj().
+ Ctx :: user_ctx(),
+ SecObj :: sec_obj().
validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
JsonEditDoc = couch_doc:to_json_obj(EditDoc, [revs]),
@@ -376,8 +407,9 @@ validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
[<<"validate_doc_update">>],
[JsonEditDoc, JsonDiskDoc, Ctx, SecObj]
),
- if Resp == 1 -> ok; true ->
- couch_stats:increment_counter([couchdb, query_server, vdu_rejects], 1)
+ if
+ Resp == 1 -> ok;
+ true -> couch_stats:increment_counter([couchdb, query_server, vdu_rejects], 1)
end,
case Resp of
RespCode when RespCode =:= 1; RespCode =:= ok; RespCode =:= true ->
@@ -414,16 +446,20 @@ filter_view(DDoc, VName, Docs) ->
{ok, Passes}.
filter_docs(Req, Db, DDoc, FName, Docs) ->
- JsonReq = case Req of
- {json_req, JsonObj} ->
- JsonObj;
- #httpd{} = HttpReq ->
- couch_httpd_external:json_req_obj(HttpReq, Db)
- end,
+ JsonReq =
+ case Req of
+ {json_req, JsonObj} ->
+ JsonObj;
+ #httpd{} = HttpReq ->
+ couch_httpd_external:json_req_obj(HttpReq, Db)
+ end,
Options = json_doc_options(),
JsonDocs = [json_doc(Doc, Options) || Doc <- Docs],
- [true, Passes] = ddoc_prompt(DDoc, [<<"filters">>, FName],
- [JsonDocs, JsonReq]),
+ [true, Passes] = ddoc_prompt(
+ DDoc,
+ [<<"filters">>, FName],
+ [JsonDocs, JsonReq]
+ ),
{ok, Passes}.
ddoc_proc_prompt({Proc, DDocId}, FunPath, Args) ->
@@ -434,22 +470,23 @@ ddoc_prompt(DDoc, FunPath, Args) ->
proc_prompt(Proc, [<<"ddoc">>, DDocId, FunPath, Args])
end).
-with_ddoc_proc(#doc{id=DDocId,revs={Start, [DiskRev|_]}}=DDoc, Fun) ->
+with_ddoc_proc(#doc{id = DDocId, revs = {Start, [DiskRev | _]}} = DDoc, Fun) ->
Rev = couch_doc:rev_to_str({Start, DiskRev}),
DDocKey = {DDocId, Rev},
Proc = get_ddoc_process(DDoc, DDocKey),
- try Fun({Proc, DDocId})
+ try
+ Fun({Proc, DDocId})
after
ok = ret_os_process(Proc)
end.
proc_prompt(Proc, Args) ->
- case proc_prompt_raw(Proc, Args) of
- {json, Json} ->
- ?JSON_DECODE(Json);
- EJson ->
- EJson
- end.
+ case proc_prompt_raw(Proc, Args) of
+ {json, Json} ->
+ ?JSON_DECODE(Json);
+ EJson ->
+ EJson
+ end.
proc_prompt_raw(#proc{prompt_fun = {Mod, Func}} = Proc, Args) ->
apply(Mod, Func, [Proc#proc.pid, Args]).
@@ -472,34 +509,36 @@ get_os_process_timeout() ->
get_ddoc_process(#doc{} = DDoc, DDocKey) ->
% remove this case statement
- case gen_server:call(couch_js_proc_manager, {get_proc, DDoc, DDocKey}, get_os_process_timeout()) of
- {ok, Proc, {QueryConfig}} ->
- % process knows the ddoc
- case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
- true ->
- proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
- Proc;
- _ ->
- catch proc_stop(Proc),
- get_ddoc_process(DDoc, DDocKey)
- end;
- Error ->
- throw(Error)
+ case
+ gen_server:call(couch_js_proc_manager, {get_proc, DDoc, DDocKey}, get_os_process_timeout())
+ of
+ {ok, Proc, {QueryConfig}} ->
+ % process knows the ddoc
+ case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
+ true ->
+ proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
+ Proc;
+ _ ->
+ catch proc_stop(Proc),
+ get_ddoc_process(DDoc, DDocKey)
+ end;
+ Error ->
+ throw(Error)
end.
get_os_process(Lang) ->
case gen_server:call(couch_js_proc_manager, {get_proc, Lang}, get_os_process_timeout()) of
- {ok, Proc, {QueryConfig}} ->
- case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
- true ->
- proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
- Proc;
- _ ->
- catch proc_stop(Proc),
- get_os_process(Lang)
- end;
- Error ->
- throw(Error)
+ {ok, Proc, {QueryConfig}} ->
+ case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
+ true ->
+ proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
+ Proc;
+ _ ->
+ catch proc_stop(Proc),
+ get_os_process(Lang)
+ end;
+ Error ->
+ throw(Error)
end.
ret_os_process(Proc) ->
@@ -513,7 +552,6 @@ throw_sum_error(Else) ->
throw_stat_error(Else) ->
throw({invalid_value, iolist_to_binary(io_lib:format(?STATERROR, [Else]))}).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -525,19 +563,38 @@ builtin_sum_rows_negative_test() ->
% it's only one document.
?assertEqual(A, builtin_sum_rows([["K", A]], [])),
{Result} = builtin_sum_rows([["K", A]], [1, 2, 3]),
- ?assertEqual({<<"error">>, <<"builtin_reduce_error">>},
- lists:keyfind(<<"error">>, 1, Result)).
+ ?assertEqual(
+ {<<"error">>, <<"builtin_reduce_error">>},
+ lists:keyfind(<<"error">>, 1, Result)
+ ).
sum_values_test() ->
?assertEqual(3, sum_values(1, 2)),
- ?assertEqual([2,4,6], sum_values(1, [1,4,6])),
- ?assertEqual([3,5,7], sum_values([3,2,4], [0,3,3])),
- X = {[{<<"a">>,1}, {<<"b">>,[1,2]}, {<<"c">>, {[{<<"d">>,3}]}},
- {<<"g">>,1}]},
- Y = {[{<<"a">>,2}, {<<"b">>,3}, {<<"c">>, {[{<<"e">>, 5}]}},
- {<<"f">>,1}, {<<"g">>,1}]},
- Z = {[{<<"a">>,3}, {<<"b">>,[4,2]}, {<<"c">>, {[{<<"d">>,3},{<<"e">>,5}]}},
- {<<"f">>,1}, {<<"g">>,2}]},
+ ?assertEqual([2, 4, 6], sum_values(1, [1, 4, 6])),
+ ?assertEqual([3, 5, 7], sum_values([3, 2, 4], [0, 3, 3])),
+ X =
+ {[
+ {<<"a">>, 1},
+ {<<"b">>, [1, 2]},
+ {<<"c">>, {[{<<"d">>, 3}]}},
+ {<<"g">>, 1}
+ ]},
+ Y =
+ {[
+ {<<"a">>, 2},
+ {<<"b">>, 3},
+ {<<"c">>, {[{<<"e">>, 5}]}},
+ {<<"f">>, 1},
+ {<<"g">>, 1}
+ ]},
+ Z =
+ {[
+ {<<"a">>, 3},
+ {<<"b">>, [4, 2]},
+ {<<"c">>, {[{<<"d">>, 3}, {<<"e">>, 5}]}},
+ {<<"f">>, 1},
+ {<<"g">>, 2}
+ ]},
?assertEqual(Z, sum_values(X, Y)),
?assertEqual(Z, sum_values(Y, X)).
@@ -546,8 +603,12 @@ sum_values_negative_test() ->
A = [{[{<<"a">>, 1}]}, {[{<<"a">>, 2}]}, {[{<<"a">>, 3}]}],
B = ["error 1", "error 2"],
C = [<<"error 3">>, <<"error 4">>],
- KV = {[{<<"error">>, <<"builtin_reduce_error">>},
- {<<"reason">>, ?SUMERROR}, {<<"caused_by">>, <<"some cause">>}]},
+ KV =
+ {[
+ {<<"error">>, <<"builtin_reduce_error">>},
+ {<<"reason">>, ?SUMERROR},
+ {<<"caused_by">>, <<"some cause">>}
+ ]},
?assertThrow({invalid_value, _, _}, sum_values(A, [1, 2, 3])),
?assertThrow({invalid_value, _, _}, sum_values(A, 0)),
?assertThrow({invalid_value, _, _}, sum_values(B, [1, 2])),
@@ -557,48 +618,103 @@ sum_values_negative_test() ->
stat_values_test() ->
?assertEqual({1, 2, 0, 1, 1}, stat_values(1, 0)),
?assertEqual({11, 2, 1, 10, 101}, stat_values(1, 10)),
- ?assertEqual([{9, 2, 2, 7, 53},
- {14, 2, 3, 11, 130},
- {18, 2, 5, 13, 194}
- ], stat_values([2,3,5], [7,11,13])).
+ ?assertEqual(
+ [
+ {9, 2, 2, 7, 53},
+ {14, 2, 3, 11, 130},
+ {18, 2, 5, 13, 194}
+ ],
+ stat_values([2, 3, 5], [7, 11, 13])
+ ).
reduce_stats_test() ->
- ?assertEqual([
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ], test_reduce(<<"_stats">>, [[[null, key], 2]])),
+ ?assertEqual(
+ [
+ {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
+ ],
+ test_reduce(<<"_stats">>, [[[null, key], 2]])
+ ),
- ?assertEqual([[
- {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ]], test_reduce(<<"_stats">>, [[[null, key],[1,2]]])),
+ ?assertEqual(
+ [
+ [
+ {[
+ {<<"sum">>, 1},
+ {<<"count">>, 1},
+ {<<"min">>, 1},
+ {<<"max">>, 1},
+ {<<"sumsqr">>, 1}
+ ]},
+ {[
+ {<<"sum">>, 2},
+ {<<"count">>, 1},
+ {<<"min">>, 2},
+ {<<"max">>, 2},
+ {<<"sumsqr">>, 4}
+ ]}
+ ]
+ ],
+ test_reduce(<<"_stats">>, [[[null, key], [1, 2]]])
+ ),
+
+ ?assertEqual(
+ {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]},
+ element(2, finalize(<<"_stats">>, {2, 1, 2, 2, 4}))
+ ),
+
+ ?assertEqual(
+ [
+ {[{<<"sum">>, 1}, {<<"count">>, 1}, {<<"min">>, 1}, {<<"max">>, 1}, {<<"sumsqr">>, 1}]},
+ {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
+ ],
+ element(
+ 2,
+ finalize(<<"_stats">>, [
+ {1, 1, 1, 1, 1},
+ {2, 1, 2, 2, 4}
+ ])
+ )
+ ),
+
+ ?assertEqual(
+ [
+ {[{<<"sum">>, 1}, {<<"count">>, 1}, {<<"min">>, 1}, {<<"max">>, 1}, {<<"sumsqr">>, 1}]},
+ {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
+ ],
+ element(
+ 2,
+ finalize(<<"_stats">>, [
+ {1, 1, 1, 1, 1},
+ {[
+ {<<"sum">>, 2},
+ {<<"count">>, 1},
+ {<<"min">>, 2},
+ {<<"max">>, 2},
+ {<<"sumsqr">>, 4}
+ ]}
+ ])
+ )
+ ),
?assertEqual(
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- , element(2, finalize(<<"_stats">>, {2, 1, 2, 2, 4}))),
-
- ?assertEqual([
- {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ], element(2, finalize(<<"_stats">>, [
- {1, 1, 1, 1, 1},
- {2, 1, 2, 2, 4}
- ]))),
-
- ?assertEqual([
- {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ], element(2, finalize(<<"_stats">>, [
- {1, 1, 1, 1, 1},
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ]))),
-
- ?assertEqual([
- {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ], element(2, finalize(<<"_stats">>, [
- {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
- {2, 1, 2, 2, 4}
- ]))),
+ [
+ {[{<<"sum">>, 1}, {<<"count">>, 1}, {<<"min">>, 1}, {<<"max">>, 1}, {<<"sumsqr">>, 1}]},
+ {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
+ ],
+ element(
+ 2,
+ finalize(<<"_stats">>, [
+ {[
+ {<<"sum">>, 1},
+ {<<"count">>, 1},
+ {<<"min">>, 1},
+ {<<"max">>, 1},
+ {<<"sumsqr">>, 1}
+ ]},
+ {2, 1, 2, 2, 4}
+ ])
+ )
+ ),
ok.
test_reduce(Reducer, KVs) ->
diff --git a/src/couch_js/src/couch_js_sup.erl b/src/couch_js/src/couch_js_sup.erl
index e87546127..54b4da9eb 100644
--- a/src/couch_js/src/couch_js_sup.erl
+++ b/src/couch_js/src/couch_js_sup.erl
@@ -10,11 +10,9 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(couch_js_sup).
-behaviour(supervisor).
-
-export([
start_link/0
]).
@@ -23,11 +21,9 @@
init/1
]).
-
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
init([]) ->
Flags = #{
strategy => one_for_one,
diff --git a/src/couch_lib/src/couch_lib_parse.erl b/src/couch_lib/src/couch_lib_parse.erl
index e0b6c85d8..f050c3aea 100644
--- a/src/couch_lib/src/couch_lib_parse.erl
+++ b/src/couch_lib/src/couch_lib_parse.erl
@@ -22,40 +22,38 @@ parse_boolean(true) ->
true;
parse_boolean(false) ->
false;
-
parse_boolean(Val) when is_binary(Val) ->
parse_boolean(binary_to_list(Val));
-
parse_boolean(Val) ->
case string:to_lower(Val) of
- "true" -> true;
- "false" -> false;
- _ ->
- Msg = io_lib:format("Invalid boolean: ~p", [Val]),
- {error, list_to_binary(Msg)}
+ "true" ->
+ true;
+ "false" ->
+ false;
+ _ ->
+ Msg = io_lib:format("Invalid boolean: ~p", [Val]),
+ {error, list_to_binary(Msg)}
end.
-
parse_integer(Val) when is_integer(Val) ->
Val;
parse_integer(Val) when is_list(Val) ->
case (catch list_to_integer(Val)) of
- IntVal when is_integer(IntVal) ->
- IntVal;
- _ ->
- Msg = io_lib:format("Invalid value for integer: ~p", [Val]),
- {error, list_to_binary(Msg)}
+ IntVal when is_integer(IntVal) ->
+ IntVal;
+ _ ->
+ Msg = io_lib:format("Invalid value for integer: ~p", [Val]),
+ {error, list_to_binary(Msg)}
end;
parse_integer(Val) when is_binary(Val) ->
binary_to_list(Val).
-
parse_non_neg_integer(Val) ->
case parse_integer(Val) of
- IntVal when IntVal >= 0 ->
- IntVal;
- _ ->
- Fmt = "Invalid value for non negative integer: ~p",
- Msg = io_lib:format(Fmt, [Val]),
- {error, list_to_binary(Msg)}
- end. \ No newline at end of file
+ IntVal when IntVal >= 0 ->
+ IntVal;
+ _ ->
+ Fmt = "Invalid value for non negative integer: ~p",
+ Msg = io_lib:format(Fmt, [Val]),
+ {error, list_to_binary(Msg)}
+ end.
diff --git a/src/couch_log/src/couch_log.erl b/src/couch_log/src/couch_log.erl
index a8dc5d48d..b8a1ca4bd 100644
--- a/src/couch_log/src/couch_log.erl
+++ b/src/couch_log/src/couch_log.erl
@@ -12,7 +12,6 @@
-module(couch_log).
-
-export([
debug/2,
info/2,
@@ -26,44 +25,34 @@
set_level/1
]).
-
-spec debug(string(), list()) -> ok.
debug(Fmt, Args) -> log(debug, Fmt, Args).
-
-spec info(string(), list()) -> ok.
info(Fmt, Args) -> log(info, Fmt, Args).
-
-spec notice(string(), list()) -> ok.
notice(Fmt, Args) -> log(notice, Fmt, Args).
-
-spec warning(string(), list()) -> ok.
warning(Fmt, Args) -> log(warning, Fmt, Args).
-
-spec error(string(), list()) -> ok.
error(Fmt, Args) -> log(error, Fmt, Args).
-
-spec critical(string(), list()) -> ok.
critical(Fmt, Args) -> log(critical, Fmt, Args).
-
-spec alert(string(), list()) -> ok.
alert(Fmt, Args) -> log(alert, Fmt, Args).
-
-spec emergency(string(), list()) -> ok.
emergency(Fmt, Args) -> log(emergency, Fmt, Args).
-
-spec set_level(atom() | string() | integer()) -> true.
set_level(Level) ->
config:set("log", "level", couch_log_util:level_to_string(Level)).
-
-spec log(atom(), string(), list()) -> ok.
log(Level, Fmt, Args) ->
case couch_log_util:should_log(Level) of
diff --git a/src/couch_log/src/couch_log_app.erl b/src/couch_log/src/couch_log_app.erl
index 91a8ecc4d..28c8bb193 100644
--- a/src/couch_log/src/couch_log_app.erl
+++ b/src/couch_log/src/couch_log_app.erl
@@ -16,7 +16,6 @@
-export([start/2, stop/1]).
-
start(_Type, _StartArgs) ->
couch_log_sup:start_link().
diff --git a/src/couch_log/src/couch_log_config.erl b/src/couch_log/src/couch_log_config.erl
index 55925c39f..925973178 100644
--- a/src/couch_log/src/couch_log_config.erl
+++ b/src/couch_log/src/couch_log_config.erl
@@ -14,23 +14,19 @@
-module(couch_log_config).
-
-export([
init/0,
reconfigure/0,
get/1
]).
-
-define(MOD_NAME, couch_log_config_dyn).
-define(ERL_FILE, "couch_log_config_dyn.erl").
-
-spec init() -> ok.
init() ->
reconfigure().
-
-spec reconfigure() -> ok.
reconfigure() ->
{ok, ?MOD_NAME, Bin} = compile:forms(forms(), [verbose, report_errors]),
@@ -38,12 +34,10 @@ reconfigure() ->
{module, ?MOD_NAME} = code:load_binary(?MOD_NAME, ?ERL_FILE, Bin),
ok.
-
-spec get(atom()) -> term().
get(Key) ->
?MOD_NAME:get(Key).
-
-spec entries() -> [string()].
entries() ->
[
@@ -52,17 +46,19 @@ entries() ->
{max_message_size, "max_message_size", "16000"},
{strip_last_msg, "strip_last_msg", "true"},
{filter_fields, "filter_fields", "[pid, registered_name, error_info, messages]"}
- ].
-
+ ].
-spec forms() -> [erl_syntax:syntaxTree()].
forms() ->
- GetFunClauses = lists:map(fun({FunKey, CfgKey, Default}) ->
- FunVal = transform(FunKey, config:get("log", CfgKey, Default)),
- Patterns = [erl_syntax:abstract(FunKey)],
- Bodies = [erl_syntax:abstract(FunVal)],
- erl_syntax:clause(Patterns, none, Bodies)
- end, entries()),
+ GetFunClauses = lists:map(
+ fun({FunKey, CfgKey, Default}) ->
+ FunVal = transform(FunKey, config:get("log", CfgKey, Default)),
+ Patterns = [erl_syntax:abstract(FunKey)],
+ Bodies = [erl_syntax:abstract(FunVal)],
+ erl_syntax:clause(Patterns, none, Bodies)
+ end,
+ entries()
+ ),
Statements = [
% -module(?MOD_NAME)
@@ -74,11 +70,14 @@ forms() ->
% -export([lookup/1]).
erl_syntax:attribute(
erl_syntax:atom(export),
- [erl_syntax:list([
- erl_syntax:arity_qualifier(
- erl_syntax:atom(get),
- erl_syntax:integer(1))
- ])]
+ [
+ erl_syntax:list([
+ erl_syntax:arity_qualifier(
+ erl_syntax:atom(get),
+ erl_syntax:integer(1)
+ )
+ ])
+ ]
),
% list(Key) -> Value.
@@ -86,27 +85,22 @@ forms() ->
],
[erl_syntax:revert(X) || X <- Statements].
-
transform(level, LevelStr) ->
couch_log_util:level_to_atom(LevelStr);
-
transform(level_int, LevelStr) ->
Level = couch_log_util:level_to_atom(LevelStr),
couch_log_util:level_to_integer(Level);
-
transform(max_message_size, SizeStr) ->
try list_to_integer(SizeStr) of
Size -> Size
- catch _:_ ->
- 16000
+ catch
+ _:_ ->
+ 16000
end;
-
transform(strip_last_msg, "false") ->
false;
-
transform(strip_last_msg, _) ->
true;
-
transform(filter_fields, FieldsStr) ->
Default = [pid, registered_name, error_info, messages],
case parse_term(FieldsStr) of
@@ -121,7 +115,6 @@ transform(filter_fields, FieldsStr) ->
Default
end.
-
parse_term(List) ->
{ok, Tokens, _} = erl_scan:string(List ++ "."),
erl_parse:parse_term(Tokens).
diff --git a/src/couch_log/src/couch_log_config_dyn.erl b/src/couch_log/src/couch_log_config_dyn.erl
index 1e1c927ae..ff781d3a0 100644
--- a/src/couch_log/src/couch_log_config_dyn.erl
+++ b/src/couch_log/src/couch_log_config_dyn.erl
@@ -17,12 +17,10 @@
-module(couch_log_config_dyn).
-
-export([
get/1
]).
-
get(level) -> info;
get(level_int) -> 2;
get(max_message_size) -> 16000;
diff --git a/src/couch_log/src/couch_log_error_logger_h.erl b/src/couch_log/src/couch_log_error_logger_h.erl
index c0765c61a..ff7ae045f 100644
--- a/src/couch_log/src/couch_log_error_logger_h.erl
+++ b/src/couch_log/src/couch_log_error_logger_h.erl
@@ -14,10 +14,8 @@
% https://github.com/basho/lager which is available under the
% above marked ASFL v2 license.
-
-module(couch_log_error_logger_h).
-
-behaviour(gen_event).
-export([
@@ -29,29 +27,22 @@
code_change/3
]).
-
init(_) ->
{ok, undefined}.
-
terminate(_Reason, _St) ->
ok.
-
handle_call(_, St) ->
{ok, ignored, St}.
-
handle_event(Event, St) ->
Entry = couch_log_formatter:format(Event),
ok = couch_log_server:log(Entry),
{ok, St}.
-
handle_info(_, St) ->
{ok, St}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
diff --git a/src/couch_log/src/couch_log_formatter.erl b/src/couch_log/src/couch_log_formatter.erl
index 3553666f6..2ce0fba6d 100644
--- a/src/couch_log/src/couch_log_formatter.erl
+++ b/src/couch_log/src/couch_log_formatter.erl
@@ -14,10 +14,8 @@
% from lager's error_logger_lager_h.erl which is available
% under the ASFv2 license.
-
-module(couch_log_formatter).
-
-export([
format/4,
format/3,
@@ -29,13 +27,10 @@
format_args/3
]).
-
-include("couch_log.hrl").
-
-define(DEFAULT_TRUNCATION, 1024).
-
format(Level, Pid, Fmt, Args) ->
#log_entry{
level = couch_log_util:level_to_atom(Level),
@@ -45,7 +40,6 @@ format(Level, Pid, Fmt, Args) ->
time_stamp = couch_log_util:iso8601_timestamp()
}.
-
format(Level, Pid, Msg) ->
#log_entry{
level = couch_log_util:level_to_atom(Level),
@@ -55,79 +49,82 @@ format(Level, Pid, Msg) ->
time_stamp = couch_log_util:iso8601_timestamp()
}.
-
format(Event) ->
try
do_format(Event)
catch
Tag:Err ->
Msg = "Encountered error ~w when formatting ~w",
- format(error, self(), Msg, [{Tag, Err}, Event])
+ format(error, self(), Msg, [{Tag, Err}, Event])
end.
-
do_format({error, _GL, {Pid, "** Generic server " ++ _, Args}}) ->
%% gen_server terminate
[Name, LastMsg0, State, Reason | Extra] = Args,
- LastMsg = case couch_log_config:get(strip_last_msg) of
- true ->
- redacted;
- false ->
- LastMsg0
- end,
- MsgFmt = "gen_server ~w terminated with reason: ~s~n" ++
- " last msg: ~p~n state: ~p~n extra: ~p",
+ LastMsg =
+ case couch_log_config:get(strip_last_msg) of
+ true ->
+ redacted;
+ false ->
+ LastMsg0
+ end,
+ MsgFmt =
+ "gen_server ~w terminated with reason: ~s~n" ++
+ " last msg: ~p~n state: ~p~n extra: ~p",
MsgArgs = [Name, format_reason(Reason), LastMsg, State, Extra],
format(error, Pid, MsgFmt, MsgArgs);
-
do_format({error, _GL, {Pid, "** State machine " ++ _, Args}}) ->
%% gen_fsm terminate
[Name, LastMsg0, StateName, State, Reason | Extra] = Args,
- LastMsg = case couch_log_config:get(strip_last_msg) of
- true ->
- redacted;
- false ->
- LastMsg0
- end,
- MsgFmt = "gen_fsm ~w in state ~w terminated with reason: ~s~n" ++
- " last msg: ~p~n state: ~p~n extra: ~p",
+ LastMsg =
+ case couch_log_config:get(strip_last_msg) of
+ true ->
+ redacted;
+ false ->
+ LastMsg0
+ end,
+ MsgFmt =
+ "gen_fsm ~w in state ~w terminated with reason: ~s~n" ++
+ " last msg: ~p~n state: ~p~n extra: ~p",
MsgArgs = [Name, StateName, format_reason(Reason), LastMsg, State, Extra],
format(error, Pid, MsgFmt, MsgArgs);
-
do_format({error, _GL, {Pid, "** gen_event handler" ++ _, Args}}) ->
%% gen_event handler terminate
[ID, Name, LastMsg0, State, Reason] = Args,
- LastMsg = case couch_log_config:get(strip_last_msg) of
- true ->
- redacted;
- false ->
- LastMsg0
- end,
- MsgFmt = "gen_event ~w installed in ~w terminated with reason: ~s~n" ++
- " last msg: ~p~n state: ~p",
+ LastMsg =
+ case couch_log_config:get(strip_last_msg) of
+ true ->
+ redacted;
+ false ->
+ LastMsg0
+ end,
+ MsgFmt =
+ "gen_event ~w installed in ~w terminated with reason: ~s~n" ++
+ " last msg: ~p~n state: ~p",
MsgArgs = [ID, Name, format_reason(Reason), LastMsg, State],
format(error, Pid, MsgFmt, MsgArgs);
-
do_format({error, _GL, {emulator, "~s~n", [Msg]}}) when is_list(Msg) ->
% These messages are for whenever any process exits due
% to a throw or error. We intercept here to remove the
% extra newlines.
NewMsg = lists:sublist(Msg, length(Msg) - 1),
format(error, emulator, NewMsg);
-
do_format({error, _GL, {Pid, Fmt, Args}}) ->
format(error, Pid, Fmt, Args);
-
do_format({error_report, _GL, {Pid, std_error, D}}) ->
format(error, Pid, print_silly_list(D));
-
do_format({error_report, _GL, {Pid, supervisor_report, D}}) ->
case lists:sort(D) of
- [{errorContext, Ctx}, {offender, Off},
- {reason, Reason}, {supervisor, Name}] ->
+ [
+ {errorContext, Ctx},
+ {offender, Off},
+ {reason, Reason},
+ {supervisor, Name}
+ ] ->
Offender = format_offender(Off),
- MsgFmt = "Supervisor ~w had child ~s exit " ++
- "with reason ~s in context ~w",
+ MsgFmt =
+ "Supervisor ~w had child ~s exit " ++
+ "with reason ~s in context ~w",
Args = [
supervisor_name(Name),
Offender,
@@ -138,20 +135,15 @@ do_format({error_report, _GL, {Pid, supervisor_report, D}}) ->
_ ->
format(error, Pid, "SUPERVISOR REPORT " ++ print_silly_list(D))
end;
-
do_format({error_report, _GL, {Pid, crash_report, [Report, Neighbors]}}) ->
Msg = "CRASH REPORT " ++ format_crash_report(Report, Neighbors),
format(error, Pid, Msg);
-
do_format({warning_msg, _GL, {Pid, Fmt, Args}}) ->
format(warning, Pid, Fmt, Args);
-
do_format({warning_report, _GL, {Pid, std_warning, Report}}) ->
format(warning, Pid, print_silly_list(Report));
-
do_format({info_msg, _GL, {Pid, Fmt, Args}}) ->
format(info, Pid, Fmt, Args);
-
do_format({info_report, _GL, {Pid, std_info, D}}) when is_list(D) ->
case lists:sort(D) of
[{application, App}, {exited, Reason}, {type, _Type}] ->
@@ -160,10 +152,8 @@ do_format({info_report, _GL, {Pid, std_info, D}}) when is_list(D) ->
_ ->
format(info, Pid, print_silly_list(D))
end;
-
do_format({info_report, _GL, {Pid, std_info, D}}) ->
format(info, Pid, "~w", [D]);
-
do_format({info_report, _GL, {Pid, progress, D}}) ->
case lists:sort(D) of
[{application, App}, {started_at, Node}] ->
@@ -177,25 +167,25 @@ do_format({info_report, _GL, {Pid, progress, D}}) ->
_ ->
format(info, Pid, "PROGRESS REPORT " ++ print_silly_list(D))
end;
-
do_format(Event) ->
format(warning, self(), "Unexpected error_logger event ~w", [Event]).
-
format_crash_report(Report, Neighbours) ->
Pid = get_value(pid, Report),
- Name = case get_value(registered_name, Report) of
- undefined ->
- pid_to_list(Pid);
- Atom ->
- io_lib:format("~s (~w)", [Atom, Pid])
- end,
+ Name =
+ case get_value(registered_name, Report) of
+ undefined ->
+ pid_to_list(Pid);
+ Atom ->
+ io_lib:format("~s (~w)", [Atom, Pid])
+ end,
{Class, Reason, Trace} = get_value(error_info, Report),
ReasonStr = format_reason({Reason, Trace}),
- Type = case Class of
- exit -> "exited";
- _ -> "crashed"
- end,
+ Type =
+ case Class of
+ exit -> "exited";
+ _ -> "crashed"
+ end,
MsgFmt = "Process ~s with ~w neighbors ~s with reason: ~s",
Args = [Name, length(Neighbours), Type, ReasonStr],
Msg = io_lib:format(MsgFmt, Args),
@@ -206,7 +196,6 @@ format_crash_report(Report, Neighbours) ->
Msg ++ "; " ++ print_silly_list(Rest)
end.
-
format_offender(Off) ->
case get_value(mfargs, Off) of
undefined ->
@@ -219,129 +208,131 @@ format_offender(Off) ->
%% In 2014 the error report changed from `name' to
%% `id', so try that first.
- Name = case get_value(id, Off) of
- undefined ->
- get_value(name, Off);
- Id ->
- Id
- end,
+ Name =
+ case get_value(id, Off) of
+ undefined ->
+ get_value(name, Off);
+ Id ->
+ Id
+ end,
Args = [Name, MFA, get_value(pid, Off)],
io_lib:format("~p started with ~s at ~w", Args)
end.
-
format_reason({'function not exported', [{M, F, A} | Trace]}) ->
- ["call to unexported function ", format_mfa({M, F, A}),
- " at ", format_trace(Trace)];
-
+ [
+ "call to unexported function ",
+ format_mfa({M, F, A}),
+ " at ",
+ format_trace(Trace)
+ ];
format_reason({'function not exported' = C, [{M, F, A, _Props} | Rest]}) ->
%% Drop line number from undefined function
format_reason({C, [{M, F, A} | Rest]});
-
format_reason({undef, [MFA | Trace]}) ->
- ["call to undefined function ", format_mfa(MFA),
- " at ", format_trace(Trace)];
-
+ [
+ "call to undefined function ",
+ format_mfa(MFA),
+ " at ",
+ format_trace(Trace)
+ ];
format_reason({bad_return, {MFA, Val}}) ->
["bad return value ", print_val(Val), " from ", format_mfa(MFA)];
-
format_reason({bad_return_value, Val}) ->
["bad return value ", print_val(Val)];
-
format_reason({{bad_return_value, Val}, MFA}) ->
["bad return value ", print_val(Val), " at ", format_mfa(MFA)];
-
format_reason({{badrecord, Record}, Trace}) ->
["bad record ", print_val(Record), " at ", format_trace(Trace)];
-
format_reason({{case_clause, Val}, Trace}) ->
["no case clause matching ", print_val(Val), " at ", format_trace(Trace)];
-
format_reason({function_clause, [MFA | Trace]}) ->
- ["no function clause matching ", format_mfa(MFA),
- " at ", format_trace(Trace)];
-
+ [
+ "no function clause matching ",
+ format_mfa(MFA),
+ " at ",
+ format_trace(Trace)
+ ];
format_reason({if_clause, Trace}) ->
- ["no true branch found while evaluating if expression at ",
- format_trace(Trace)];
-
+ [
+ "no true branch found while evaluating if expression at ",
+ format_trace(Trace)
+ ];
format_reason({{try_clause, Val}, Trace}) ->
["no try clause matching ", print_val(Val), " at ", format_trace(Trace)];
-
format_reason({badarith, Trace}) ->
["bad arithmetic expression at ", format_trace(Trace)];
-
format_reason({{badmatch, Val}, Trace}) ->
- ["no match of right hand value ", print_val(Val),
- " at ", format_trace(Trace)];
-
+ [
+ "no match of right hand value ",
+ print_val(Val),
+ " at ",
+ format_trace(Trace)
+ ];
format_reason({emfile, Trace}) ->
- ["maximum number of file descriptors exhausted, check ulimit -n; ",
- format_trace(Trace)];
-
+ [
+ "maximum number of file descriptors exhausted, check ulimit -n; ",
+ format_trace(Trace)
+ ];
format_reason({system_limit, [{M, F, A} | Trace]}) ->
- Limit = case {M, F} of
- {erlang, open_port} ->
- "maximum number of ports exceeded";
- {erlang, spawn} ->
- "maximum number of processes exceeded";
- {erlang, spawn_opt} ->
- "maximum number of processes exceeded";
- {erlang, list_to_atom} ->
- "tried to create an atom larger than 255, or maximum atom count exceeded";
- {ets, new} ->
- "maximum number of ETS tables exceeded";
- _ ->
- format_mfa({M, F, A})
- end,
+ Limit =
+ case {M, F} of
+ {erlang, open_port} ->
+ "maximum number of ports exceeded";
+ {erlang, spawn} ->
+ "maximum number of processes exceeded";
+ {erlang, spawn_opt} ->
+ "maximum number of processes exceeded";
+ {erlang, list_to_atom} ->
+ "tried to create an atom larger than 255, or maximum atom count exceeded";
+ {ets, new} ->
+ "maximum number of ETS tables exceeded";
+ _ ->
+ format_mfa({M, F, A})
+ end,
["system limit: ", Limit, " at ", format_trace(Trace)];
-
format_reason({badarg, [MFA | Trace]}) ->
- ["bad argument in call to ", format_mfa(MFA),
- " at ", format_trace(Trace)];
-
+ [
+ "bad argument in call to ",
+ format_mfa(MFA),
+ " at ",
+ format_trace(Trace)
+ ];
format_reason({{badarg, Stack}, _}) ->
format_reason({badarg, Stack});
-
format_reason({{badarity, {Fun, Args}}, Trace}) ->
{arity, Arity} = lists:keyfind(arity, 1, erlang:fun_info(Fun)),
MsgFmt = "function called with wrong arity of ~w instead of ~w at ",
[io_lib:format(MsgFmt, [length(Args), Arity]), format_trace(Trace)];
-
format_reason({noproc, MFA}) ->
["no such process or port in call to ", format_mfa(MFA)];
-
format_reason({{badfun, Term}, Trace}) ->
["bad function ", print_val(Term), " called at ", format_trace(Trace)];
-
-format_reason({Reason, [{M, F, A} | _] = Trace})
- when is_atom(M), is_atom(F), is_integer(A) ->
+format_reason({Reason, [{M, F, A} | _] = Trace}) when
+ is_atom(M), is_atom(F), is_integer(A)
+->
[format_reason(Reason), " at ", format_trace(Trace)];
-
-format_reason({Reason, [{M, F, A} | _] = Trace})
- when is_atom(M), is_atom(F), is_list(A) ->
+format_reason({Reason, [{M, F, A} | _] = Trace}) when
+ is_atom(M), is_atom(F), is_list(A)
+->
[format_reason(Reason), " at ", format_trace(Trace)];
-
-format_reason({Reason, [{M, F, A, Props} | _] = Trace})
- when is_atom(M), is_atom(F), is_integer(A), is_list(Props) ->
+format_reason({Reason, [{M, F, A, Props} | _] = Trace}) when
+ is_atom(M), is_atom(F), is_integer(A), is_list(Props)
+->
[format_reason(Reason), " at ", format_trace(Trace)];
-
-format_reason({Reason, [{M, F, A, Props} | _] = Trace})
- when is_atom(M), is_atom(F), is_list(A), is_list(Props) ->
+format_reason({Reason, [{M, F, A, Props} | _] = Trace}) when
+ is_atom(M), is_atom(F), is_list(A), is_list(Props)
+->
[format_reason(Reason), " at ", format_trace(Trace)];
-
format_reason(Reason) ->
{Str, _} = couch_log_trunc_io:print(Reason, 500),
Str.
-
format_mfa({M, F, A}) when is_list(A) ->
{FmtStr, Args} = format_args(A, [], []),
io_lib:format("~w:~w(" ++ FmtStr ++ ")", [M, F | Args]);
-
format_mfa({M, F, A}) when is_integer(A) ->
io_lib:format("~w:~w/~w", [M, F, A]);
-
format_mfa({M, F, A, Props}) when is_list(Props) ->
case get_value(line, Props) of
undefined ->
@@ -349,47 +340,35 @@ format_mfa({M, F, A, Props}) when is_list(Props) ->
Line ->
[format_mfa({M, F, A}), io_lib:format("(line:~w)", [Line])]
end;
-
format_mfa(Trace) when is_list(Trace) ->
format_trace(Trace);
-
format_mfa(Other) ->
io_lib:format("~w", [Other]).
-
format_trace([MFA]) ->
[trace_mfa(MFA)];
-
format_trace([MFA | Rest]) ->
[trace_mfa(MFA), " <= ", format_trace(Rest)];
-
format_trace(Other) ->
io_lib:format("~w", [Other]).
-
trace_mfa({M, F, A}) when is_list(A) ->
format_mfa({M, F, length(A)});
-
trace_mfa({M, F, A, Props}) when is_list(A) ->
format_mfa({M, F, length(A), Props});
-
trace_mfa(Other) ->
format_mfa(Other).
-
format_args([], FmtAcc, ArgsAcc) ->
{string:join(lists:reverse(FmtAcc), ", "), lists:reverse(ArgsAcc)};
-
-format_args([H|T], FmtAcc, ArgsAcc) ->
+format_args([H | T], FmtAcc, ArgsAcc) ->
{Str, _} = couch_log_trunc_io:print(H, 100),
format_args(T, ["~s" | FmtAcc], [Str | ArgsAcc]).
-
maybe_truncate(Fmt, Args) ->
MaxMsgSize = couch_log_config:get(max_message_size),
couch_log_trunc_io:format(Fmt, Args, MaxMsgSize).
-
maybe_truncate(Msg) ->
MaxMsgSize = couch_log_config:get(max_message_size),
case iolist_size(Msg) > MaxMsgSize of
@@ -402,7 +381,6 @@ maybe_truncate(Msg) ->
Msg
end.
-
print_silly_list(L) when is_list(L) ->
case couch_log_util:string_p(L) of
true ->
@@ -410,23 +388,21 @@ print_silly_list(L) when is_list(L) ->
_ ->
print_silly_list(L, [], [])
end;
-
print_silly_list(L) ->
{Str, _} = couch_log_trunc_io:print(L, ?DEFAULT_TRUNCATION),
Str.
-
print_silly_list([], Fmt, Acc) ->
- couch_log_trunc_io:format(string:join(lists:reverse(Fmt), ", "),
- lists:reverse(Acc), ?DEFAULT_TRUNCATION);
-
+ couch_log_trunc_io:format(
+ string:join(lists:reverse(Fmt), ", "),
+ lists:reverse(Acc),
+ ?DEFAULT_TRUNCATION
+ );
print_silly_list([{K, V} | T], Fmt, Acc) ->
print_silly_list(T, ["~p: ~p" | Fmt], [V, K | Acc]);
-
print_silly_list([H | T], Fmt, Acc) ->
print_silly_list(T, ["~p" | Fmt], [H | Acc]).
-
print_val(Val) ->
{Str, _} = couch_log_trunc_io:print(Val, 500),
Str.
@@ -439,7 +415,6 @@ filter_silly_list(KV) ->
filter_silly_list([], _) ->
[];
-
filter_silly_list([{K, V} | T], Filter) ->
case lists:member(K, Filter) of
true ->
@@ -447,11 +422,9 @@ filter_silly_list([{K, V} | T], Filter) ->
false ->
[{K, V} | filter_silly_list(T, Filter)]
end;
-
filter_silly_list([H | T], Filter) ->
[H | filter_silly_list(T, Filter)].
-
get_value(Key, Value) ->
get_value(Key, Value, undefined).
diff --git a/src/couch_log/src/couch_log_monitor.erl b/src/couch_log/src/couch_log_monitor.erl
index d7620e290..eb8d13380 100644
--- a/src/couch_log/src/couch_log_monitor.erl
+++ b/src/couch_log/src/couch_log_monitor.erl
@@ -15,7 +15,6 @@
-behaviour(gen_server).
-vsn(1).
-
-export([
start_link/0
]).
@@ -29,14 +28,11 @@
code_change/3
]).
-
-define(HANDLER_MOD, couch_log_error_logger_h).
-
start_link() ->
gen_server:start_link(?MODULE, [], []).
-
% OTP_RELEASE defined in OTP >= 21 only
-ifdef(OTP_RELEASE).
@@ -45,7 +41,7 @@ init(_) ->
% however that call doesn't call a supervised handler so we do the same
% thing add_report_handler/1 does but call gen_event:add_sup_handler/3
% instead of gen_event:add_handler/3.
- Opts = #{level => info, filter_default => log},
+ Opts = #{level => info, filter_default => log},
_ = logger:add_handler(error_logger, error_logger, Opts),
ok = gen_event:add_sup_handler(error_logger, ?HANDLER_MOD, []),
{ok, nil}.
@@ -59,26 +55,19 @@ init(_) ->
-endif.
-
terminate(_, _) ->
ok.
-
handle_call(_Msg, _From, St) ->
{reply, ignored, St}.
-
handle_cast(_Msg, St) ->
{noreply, St}.
-
handle_info({gen_event_EXIT, ?HANDLER_MOD, Reason}, St) ->
{stop, Reason, St};
-
-
handle_info(_Msg, St) ->
{noreply, St}.
-
code_change(_, State, _) ->
{ok, State}.
diff --git a/src/couch_log/src/couch_log_server.erl b/src/couch_log/src/couch_log_server.erl
index 8432b9aa3..05cf92a75 100644
--- a/src/couch_log/src/couch_log_server.erl
+++ b/src/couch_log/src/couch_log_server.erl
@@ -13,7 +13,6 @@
-module(couch_log_server).
-behavior(gen_server).
-
-export([
start_link/0,
reconfigure/0,
@@ -21,42 +20,35 @@
]).
-export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3
]).
-
-include("couch_log.hrl").
-
-record(st, {
writer
}).
-
-ifdef(TEST).
-define(SEND(Entry), gen_server:call(?MODULE, {log, Entry})).
-else.
-define(SEND(Entry), gen_server:cast(?MODULE, {log, Entry})).
-endif.
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
reconfigure() ->
gen_server:call(?MODULE, reconfigure).
-
log(Entry) ->
?SEND(Entry).
-
init(_) ->
couch_util:set_mqd_off_heap(?MODULE),
process_flag(trap_exit, true),
@@ -64,17 +56,14 @@ init(_) ->
writer = couch_log_writer:init()
}}.
-
terminate(Reason, St) ->
ok = couch_log_writer:terminate(Reason, St#st.writer).
-
handle_call(reconfigure, _From, St) ->
ok = couch_log_writer:terminate(reconfiguring, St#st.writer),
{reply, ok, St#st{
writer = couch_log_writer:init()
}};
-
handle_call({log, Entry}, _From, St) ->
% We re-check if we should log here in case an operator
% adjusted the log level and then realized it was a bad
@@ -86,22 +75,18 @@ handle_call({log, Entry}, _From, St) ->
false ->
{reply, ok, St}
end;
-
handle_call(Ignore, From, St) ->
Args = [?MODULE, Ignore],
Entry = couch_log_formatter:format(error, ?MODULE, "~s ignored ~p", Args),
handle_call({log, Entry}, From, St).
-
handle_cast(Msg, St) ->
{reply, ok, NewSt} = handle_call(Msg, nil, St),
{noreply, NewSt}.
-
handle_info(Msg, St) ->
{reply, ok, NewSt} = handle_call(Msg, nil, St),
{noreply, NewSt}.
-
code_change(_Vsn, St, _Extra) ->
{ok, St}.
diff --git a/src/couch_log/src/couch_log_sup.erl b/src/couch_log/src/couch_log_sup.erl
index 6cb8d7395..0167192d8 100644
--- a/src/couch_log/src/couch_log_sup.erl
+++ b/src/couch_log/src/couch_log_sup.erl
@@ -23,12 +23,10 @@
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
init([]) ->
ok = couch_log_config:init(),
{ok, {{one_for_one, 10, 10}, children()}}.
-
children() ->
[
{
@@ -74,7 +72,6 @@ handle_config_change("log", Key, _, _, S) ->
end,
notify_listeners(),
{ok, S};
-
handle_config_change(_, _, _, _, S) ->
{ok, S}.
@@ -84,9 +81,12 @@ handle_config_terminate(_Server, _Reason, _State) ->
-ifdef(TEST).
notify_listeners() ->
Listeners = application:get_env(couch_log, config_listeners, []),
- lists:foreach(fun(L) ->
- L ! couch_log_config_change_finished
- end, Listeners).
+ lists:foreach(
+ fun(L) ->
+ L ! couch_log_config_change_finished
+ end,
+ Listeners
+ ).
-else.
notify_listeners() ->
ok.
diff --git a/src/couch_log/src/couch_log_trunc_io.erl b/src/couch_log/src/couch_log_trunc_io.erl
index 636dfdc1f..9736e87e1 100644
--- a/src/couch_log/src/couch_log_trunc_io.erl
+++ b/src/couch_log/src/couch_log_trunc_io.erl
@@ -36,33 +36,37 @@
-module(couch_log_trunc_io).
-author('matthias@corelatus.se').
%% And thanks to Chris Newcombe for a bug fix
--export([format/3, format/4, print/2, print/3, fprint/2, fprint/3, safe/2]). % interface functions
+
+% interface functions
+-export([format/3, format/4, print/2, print/3, fprint/2, fprint/3, safe/2]).
-version("$Id: trunc_io.erl,v 1.11 2009-02-23 12:01:06 matthias Exp $").
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
--type option() :: {'depth', integer()}
+-type option() ::
+ {'depth', integer()}
| {'lists_as_strings', boolean()}
| {'force_strings', boolean()}.
-type options() :: [option()].
-record(print_options, {
- %% negative depth means no depth limiting
- depth = -1 :: integer(),
- %% whether to print lists as strings, if possible
- lists_as_strings = true :: boolean(),
- %% force strings, or binaries to be printed as a string,
- %% even if they're not printable
- force_strings = false :: boolean()
- }).
+ %% negative depth means no depth limiting
+ depth = -1 :: integer(),
+ %% whether to print lists as strings, if possible
+ lists_as_strings = true :: boolean(),
+ %% force strings, or binaries to be printed as a string,
+ %% even if they're not printable
+ force_strings = false :: boolean()
+}).
format(Fmt, Args, Max) ->
format(Fmt, Args, Max, []).
format(Fmt, Args, Max, Options) ->
- try couch_log_trunc_io_fmt:format(Fmt, Args, Max, Options)
+ try
+ couch_log_trunc_io_fmt:format(Fmt, Args, Max, Options)
catch
_What:_Why ->
erlang:error(badarg, [Fmt, Args])
@@ -74,7 +78,6 @@ format(Fmt, Args, Max, Options) ->
fprint(Term, Max) ->
fprint(Term, Max, []).
-
%% @doc Returns an flattened list containing the ASCII representation of the given
%% term.
-spec fprint(term(), pos_integer(), options()) -> string().
@@ -108,30 +111,29 @@ print(Term, Max) ->
print(Term, Max, Options) when is_list(Options) ->
%% need to convert the proplist to a record
print(Term, Max, prepare_options(Options, #print_options{}));
-
-print(Term, _Max, #print_options{force_strings=true}) when not is_list(Term), not is_binary(Term), not is_atom(Term) ->
+print(Term, _Max, #print_options{force_strings = true}) when
+ not is_list(Term), not is_binary(Term), not is_atom(Term)
+->
erlang:error(badarg);
-
print(_, Max, _Options) when Max < 0 -> {"...", 3};
-print(_, _, #print_options{depth=0}) -> {"...", 3};
-
-
+print(_, _, #print_options{depth = 0}) ->
+ {"...", 3};
%% @doc We assume atoms, floats, funs, integers, PIDs, ports and refs never need
%% to be truncated. This isn't strictly true, someone could make an
%% arbitrarily long bignum. Let's assume that won't happen unless someone
%% is being malicious.
%%
-print(Atom, _Max, #print_options{force_strings=NoQuote}) when is_atom(Atom) ->
+print(Atom, _Max, #print_options{force_strings = NoQuote}) when is_atom(Atom) ->
L = atom_to_list(Atom),
- R = case atom_needs_quoting_start(L) andalso not NoQuote of
- true -> lists:flatten([$', L, $']);
- false -> L
- end,
+ R =
+ case atom_needs_quoting_start(L) andalso not NoQuote of
+ true -> lists:flatten([$', L, $']);
+ false -> L
+ end,
{R, length(R)};
-
-print(<<>>, _Max, #print_options{depth=1}) ->
+print(<<>>, _Max, #print_options{depth = 1}) ->
{"<<>>", 4};
-print(Bin, _Max, #print_options{depth=1}) when is_binary(Bin) ->
+print(Bin, _Max, #print_options{depth = 1}) when is_binary(Bin) ->
{"<<...>>", 7};
print(<<>>, _Max, Options) ->
case Options#print_options.force_strings of
@@ -140,70 +142,79 @@ print(<<>>, _Max, Options) ->
false ->
{"<<>>", 4}
end;
-
print(Binary, 0, _Options) when is_bitstring(Binary) ->
{"<<..>>", 6};
-
print(Bin, Max, _Options) when is_binary(Bin), Max < 2 ->
{"<<...>>", 7};
print(Binary, Max, Options) when is_binary(Binary) ->
B = binary_to_list(Binary, 1, lists:min([Max, byte_size(Binary)])),
- {Res, Length} = case Options#print_options.lists_as_strings orelse
- Options#print_options.force_strings of
- true ->
- Depth = Options#print_options.depth,
- MaxSize = (Depth - 1) * 4,
- %% check if we need to truncate based on depth
- In = case Depth > -1 andalso MaxSize < length(B) andalso
- not Options#print_options.force_strings of
- true ->
- string:substr(B, 1, MaxSize);
- false -> B
- end,
- MaxLen = case Options#print_options.force_strings of
- true ->
- Max;
- false ->
- %% make room for the leading doublequote
- Max - 1
- end,
- try alist(In, MaxLen, Options) of
- {L0, Len0} ->
- case Options#print_options.force_strings of
- false ->
- case B /= In of
- true ->
- {[$", L0, "..."], Len0+4};
- false ->
- {[$"|L0], Len0+1}
- end;
+ {Res, Length} =
+ case
+ Options#print_options.lists_as_strings orelse
+ Options#print_options.force_strings
+ of
+ true ->
+ Depth = Options#print_options.depth,
+ MaxSize = (Depth - 1) * 4,
+ %% check if we need to truncate based on depth
+ In =
+ case
+ Depth > -1 andalso MaxSize < length(B) andalso
+ not Options#print_options.force_strings
+ of
true ->
- {L0, Len0}
- end
- catch
- throw:{unprintable, C} ->
- Index = string:chr(In, C),
- case Index > 1 andalso Options#print_options.depth =< Index andalso
- Options#print_options.depth > -1 andalso
- not Options#print_options.force_strings of
+ string:substr(B, 1, MaxSize);
+ false ->
+ B
+ end,
+ MaxLen =
+ case Options#print_options.force_strings of
true ->
- %% print first Index-1 characters followed by ...
- {L0, Len0} = alist_start(string:substr(In, 1, Index - 1), Max - 1, Options),
- {L0++"...", Len0+3};
+ Max;
false ->
- list_body(In, Max-4, dec_depth(Options), true)
- end
- end;
- _ ->
- list_body(B, Max-4, dec_depth(Options), true)
- end,
+ %% make room for the leading doublequote
+ Max - 1
+ end,
+ try alist(In, MaxLen, Options) of
+ {L0, Len0} ->
+ case Options#print_options.force_strings of
+ false ->
+ case B /= In of
+ true ->
+ {[$", L0, "..."], Len0 + 4};
+ false ->
+ {[$" | L0], Len0 + 1}
+ end;
+ true ->
+ {L0, Len0}
+ end
+ catch
+ throw:{unprintable, C} ->
+ Index = string:chr(In, C),
+ case
+ Index > 1 andalso Options#print_options.depth =< Index andalso
+ Options#print_options.depth > -1 andalso
+ not Options#print_options.force_strings
+ of
+ true ->
+ %% print first Index-1 characters followed by ...
+ {L0, Len0} = alist_start(
+ string:substr(In, 1, Index - 1), Max - 1, Options
+ ),
+ {L0 ++ "...", Len0 + 3};
+ false ->
+ list_body(In, Max - 4, dec_depth(Options), true)
+ end
+ end;
+ _ ->
+ list_body(B, Max - 4, dec_depth(Options), true)
+ end,
case Options#print_options.force_strings of
true ->
{Res, Length};
_ ->
- {["<<", Res, ">>"], Length+4}
+ {["<<", Res, ">>"], Length + 4}
end;
-
%% bitstrings are binary's evil brother who doesn't end on an 8 bit boundary.
%% This makes printing them extremely annoying, so list_body/list_bodyc has
%% some magic for dealing with the output of bitstring_to_list, which returns
@@ -214,27 +225,26 @@ print({inline_bitstring, B}, _Max, _Options) when is_bitstring(B) ->
<<Value:Size>> = B,
ValueStr = integer_to_list(Value),
SizeStr = integer_to_list(Size),
- {[ValueStr, $:, SizeStr], length(ValueStr) + length(SizeStr) +1};
+ {[ValueStr, $:, SizeStr], length(ValueStr) + length(SizeStr) + 1};
print(BitString, Max, Options) when is_bitstring(BitString) ->
- BL = case byte_size(BitString) > Max of
- true ->
- binary_to_list(BitString, 1, Max);
- _ ->
- R = erlang:bitstring_to_list(BitString),
- {Bytes, [Bits]} = lists:splitwith(fun erlang:is_integer/1, R),
- %% tag the trailing bits with a special tuple we catch when
- %% list_body calls print again
- Bytes ++ [{inline_bitstring, Bits}]
- end,
+ BL =
+ case byte_size(BitString) > Max of
+ true ->
+ binary_to_list(BitString, 1, Max);
+ _ ->
+ R = erlang:bitstring_to_list(BitString),
+ {Bytes, [Bits]} = lists:splitwith(fun erlang:is_integer/1, R),
+ %% tag the trailing bits with a special tuple we catch when
+ %% list_body calls print again
+ Bytes ++ [{inline_bitstring, Bits}]
+ end,
{X, Len0} = list_body(BL, Max - 4, dec_depth(Options), true),
{["<<", X, ">>"], Len0 + 4};
-
print(Float, _Max, _Options) when is_float(Float) ->
%% use the same function io_lib:format uses to print floats
%% float_to_list is way too verbose.
L = io_lib_format:fwrite_g(Float),
{L, length(L)};
-
print(Fun, Max, _Options) when is_function(Fun) ->
L = erlang:fun_to_list(Fun),
case length(L) > Max of
@@ -245,42 +255,36 @@ print(Fun, Max, _Options) when is_function(Fun) ->
_ ->
{L, length(L)}
end;
-
print(Integer, _Max, _Options) when is_integer(Integer) ->
L = integer_to_list(Integer),
{L, length(L)};
-
print(Pid, _Max, _Options) when is_pid(Pid) ->
L = pid_to_list(Pid),
{L, length(L)};
-
print(Ref, _Max, _Options) when is_reference(Ref) ->
L = erlang:ref_to_list(Ref),
{L, length(L)};
-
print(Port, _Max, _Options) when is_port(Port) ->
L = erlang:port_to_list(Port),
{L, length(L)};
-
print({'$lager_record', Name, Fields}, Max, Options) ->
Leader = "#" ++ atom_to_list(Name) ++ "{",
{RC, Len} = record_fields(Fields, Max - length(Leader) + 1, dec_depth(Options)),
{[Leader, RC, "}"], Len + length(Leader) + 1};
-
print(Tuple, Max, Options) when is_tuple(Tuple) ->
- {TC, Len} = tuple_contents(Tuple, Max-2, Options),
+ {TC, Len} = tuple_contents(Tuple, Max - 2, Options),
{[${, TC, $}], Len + 2};
-
print(List, Max, Options) when is_list(List) ->
- case Options#print_options.lists_as_strings orelse
- Options#print_options.force_strings of
+ case
+ Options#print_options.lists_as_strings orelse
+ Options#print_options.force_strings
+ of
true ->
alist_start(List, Max, dec_depth(Options));
_ ->
{R, Len} = list_body(List, Max - 2, dec_depth(Options), false),
{[$[, R, $]], Len + 2}
end;
-
print(Map, Max, Options) ->
case erlang:is_builtin(erlang, is_map, 1) andalso erlang:is_map(Map) of
true ->
@@ -297,43 +301,52 @@ tuple_contents(Tuple, Max, Options) ->
%% Format the inside of a list, i.e. do not add a leading [ or trailing ].
%% Returns {List, Length}
-list_body([], _Max, _Options, _Tuple) -> {[], 0};
+list_body([], _Max, _Options, _Tuple) ->
+ {[], 0};
list_body(_, Max, _Options, _Tuple) when Max < 4 -> {"...", 3};
-list_body(_, _Max, #print_options{depth=0}, _Tuple) -> {"...", 3};
-list_body([H], Max, Options=#print_options{depth=1}, _Tuple) ->
+list_body(_, _Max, #print_options{depth = 0}, _Tuple) ->
+ {"...", 3};
+list_body([H], Max, Options = #print_options{depth = 1}, _Tuple) ->
print(H, Max, Options);
-list_body([H|_], Max, Options=#print_options{depth=1}, Tuple) ->
- {List, Len} = print(H, Max-4, Options),
- Sep = case Tuple of
- true -> $,;
- false -> $|
- end,
+list_body([H | _], Max, Options = #print_options{depth = 1}, Tuple) ->
+ {List, Len} = print(H, Max - 4, Options),
+ Sep =
+ case Tuple of
+ true -> $,;
+ false -> $|
+ end,
{[List ++ [Sep | "..."]], Len + 4};
-list_body([H|T], Max, Options, Tuple) ->
+list_body([H | T], Max, Options, Tuple) ->
{List, Len} = print(H, Max, Options),
{Final, FLen} = list_bodyc(T, Max - Len, Options, Tuple),
- {[List|Final], FLen + Len};
-list_body(X, Max, Options, _Tuple) -> %% improper list
+ {[List | Final], FLen + Len};
+%% improper list
+list_body(X, Max, Options, _Tuple) ->
{List, Len} = print(X, Max - 1, Options),
- {[$|,List], Len + 1}.
+ {[$|, List], Len + 1}.
-list_bodyc([], _Max, _Options, _Tuple) -> {[], 0};
+list_bodyc([], _Max, _Options, _Tuple) ->
+ {[], 0};
list_bodyc(_, Max, _Options, _Tuple) when Max < 5 -> {",...", 4};
-list_bodyc(_, _Max, #print_options{depth=1}, true) -> {",...", 4};
-list_bodyc(_, _Max, #print_options{depth=1}, false) -> {"|...", 4};
-list_bodyc([H|T], Max, #print_options{depth=Depth} = Options, Tuple) ->
+list_bodyc(_, _Max, #print_options{depth = 1}, true) ->
+ {",...", 4};
+list_bodyc(_, _Max, #print_options{depth = 1}, false) ->
+ {"|...", 4};
+list_bodyc([H | T], Max, #print_options{depth = Depth} = Options, Tuple) ->
{List, Len} = print(H, Max, dec_depth(Options)),
{Final, FLen} = list_bodyc(T, Max - Len - 1, dec_depth(Options), Tuple),
- Sep = case Depth == 1 andalso not Tuple of
- true -> $|;
- _ -> $,
- end,
- {[Sep, List|Final], FLen + Len + 1};
-list_bodyc(X, Max, Options, _Tuple) -> %% improper list
+ Sep =
+ case Depth == 1 andalso not Tuple of
+ true -> $|;
+ _ -> $,
+ end,
+ {[Sep, List | Final], FLen + Len + 1};
+%% improper list
+list_bodyc(X, Max, Options, _Tuple) ->
{List, Len} = print(X, Max - 1, Options),
- {[$|,List], Len + 1}.
+ {[$|, List], Len + 1}.
-map_body(Map, Max, #print_options{depth=Depth}) when Max < 4; Depth =:= 0 ->
+map_body(Map, Max, #print_options{depth = Depth}) when Max < 4; Depth =:= 0 ->
case erlang:map_size(Map) of
0 -> {[], 0};
_ -> {"...", 3}
@@ -353,7 +366,7 @@ map_body(Map, Max, Options) ->
map_bodyc([], _Max, _Options) ->
{[], 0};
-map_bodyc(_Rest, Max,#print_options{depth=Depth}) when Max < 5; Depth =:= 0 ->
+map_bodyc(_Rest, Max, #print_options{depth = Depth}) when Max < 5; Depth =:= 0 ->
{",...", 4};
map_bodyc([{Key, Value} | Rest], Max, Options) ->
{KeyStr, KeyLen} = print(Key, Max - 5, Options),
@@ -370,70 +383,86 @@ map_bodyc([{Key, Value} | Rest], Max, Options) ->
%% [0,65,66] -> [0,65,66]
%% [65,b,66] -> "A"[b,66]
%%
-alist_start([], _Max, #print_options{force_strings=true}) -> {"", 0};
-alist_start([], _Max, _Options) -> {"[]", 2};
+alist_start([], _Max, #print_options{force_strings = true}) ->
+ {"", 0};
+alist_start([], _Max, _Options) ->
+ {"[]", 2};
alist_start(_, Max, _Options) when Max < 4 -> {"...", 3};
-alist_start(_, _Max, #print_options{depth=0}) -> {"[...]", 5};
-alist_start(L, Max, #print_options{force_strings=true} = Options) ->
+alist_start(_, _Max, #print_options{depth = 0}) ->
+ {"[...]", 5};
+alist_start(L, Max, #print_options{force_strings = true} = Options) ->
alist(L, Max, Options);
%alist_start([H|_T], _Max, #print_options{depth=1}) when is_integer(H) -> {[$[, H, $|, $., $., $., $]], 7};
-alist_start([H|T], Max, Options) when is_integer(H), H >= 16#20, H =< 16#7e -> % definitely printable
- try alist([H|T], Max -1, Options) of
+
+% definitely printable
+alist_start([H | T], Max, Options) when is_integer(H), H >= 16#20, H =< 16#7e ->
+ try alist([H | T], Max - 1, Options) of
{L, Len} ->
- {[$"|L], Len + 1}
+ {[$" | L], Len + 1}
catch
throw:{unprintable, _} ->
- {R, Len} = list_body([H|T], Max-2, Options, false),
+ {R, Len} = list_body([H | T], Max - 2, Options, false),
{[$[, R, $]], Len + 2}
end;
-alist_start([H|T], Max, Options) when is_integer(H), H >= 16#a0, H =< 16#ff -> % definitely printable
- try alist([H|T], Max -1, Options) of
+% definitely printable
+alist_start([H | T], Max, Options) when is_integer(H), H >= 16#a0, H =< 16#ff ->
+ try alist([H | T], Max - 1, Options) of
{L, Len} ->
- {[$"|L], Len + 1}
+ {[$" | L], Len + 1}
catch
throw:{unprintable, _} ->
- {R, Len} = list_body([H|T], Max-2, Options, false),
+ {R, Len} = list_body([H | T], Max - 2, Options, false),
{[$[, R, $]], Len + 2}
end;
-alist_start([H|T], Max, Options) when H =:= $\t; H =:= $\n; H =:= $\r; H =:= $\v; H =:= $\e; H=:= $\f; H=:= $\b ->
- try alist([H|T], Max -1, Options) of
+alist_start([H | T], Max, Options) when
+ H =:= $\t; H =:= $\n; H =:= $\r; H =:= $\v; H =:= $\e; H =:= $\f; H =:= $\b
+->
+ try alist([H | T], Max - 1, Options) of
{L, Len} ->
- {[$"|L], Len + 1}
+ {[$" | L], Len + 1}
catch
throw:{unprintable, _} ->
- {R, Len} = list_body([H|T], Max-2, Options, false),
+ {R, Len} = list_body([H | T], Max - 2, Options, false),
{[$[, R, $]], Len + 2}
end;
alist_start(L, Max, Options) ->
- {R, Len} = list_body(L, Max-2, Options, false),
+ {R, Len} = list_body(L, Max - 2, Options, false),
{[$[, R, $]], Len + 2}.
-alist([], _Max, #print_options{force_strings=true}) -> {"", 0};
-alist([], _Max, _Options) -> {"\"", 1};
-alist(_, Max, #print_options{force_strings=true}) when Max < 4 -> {"...", 3};
-alist(_, Max, #print_options{force_strings=false}) when Max < 5 -> {"...\"", 4};
-alist([H|T], Max, Options = #print_options{force_strings=false,lists_as_strings=true}) when H =:= $"; H =:= $\\ ->
+alist([], _Max, #print_options{force_strings = true}) ->
+ {"", 0};
+alist([], _Max, _Options) ->
+ {"\"", 1};
+alist(_, Max, #print_options{force_strings = true}) when Max < 4 -> {"...", 3};
+alist(_, Max, #print_options{force_strings = false}) when Max < 5 -> {"...\"", 4};
+alist([H | T], Max, Options = #print_options{force_strings = false, lists_as_strings = true}) when
+ H =:= $"; H =:= $\\
+->
%% preserve escaping around quotes
- {L, Len} = alist(T, Max-1, Options),
- {[$\\,H|L], Len + 2};
-alist([H|T], Max, Options) when is_integer(H), H >= 16#20, H =< 16#7e -> % definitely printable
- {L, Len} = alist(T, Max-1, Options),
- {[H|L], Len + 1};
-alist([H|T], Max, Options) when is_integer(H), H >= 16#a0, H =< 16#ff -> % definitely printable
- {L, Len} = alist(T, Max-1, Options),
- {[H|L], Len + 1};
-alist([H|T], Max, Options) when H =:= $\t; H =:= $\n; H =:= $\r; H =:= $\v; H =:= $\e; H=:= $\f; H=:= $\b ->
- {L, Len} = alist(T, Max-1, Options),
+ {L, Len} = alist(T, Max - 1, Options),
+ {[$\\, H | L], Len + 2};
+% definitely printable
+alist([H | T], Max, Options) when is_integer(H), H >= 16#20, H =< 16#7e ->
+ {L, Len} = alist(T, Max - 1, Options),
+ {[H | L], Len + 1};
+% definitely printable
+alist([H | T], Max, Options) when is_integer(H), H >= 16#a0, H =< 16#ff ->
+ {L, Len} = alist(T, Max - 1, Options),
+ {[H | L], Len + 1};
+alist([H | T], Max, Options) when
+ H =:= $\t; H =:= $\n; H =:= $\r; H =:= $\v; H =:= $\e; H =:= $\f; H =:= $\b
+->
+ {L, Len} = alist(T, Max - 1, Options),
case Options#print_options.force_strings of
true ->
- {[H|L], Len + 1};
+ {[H | L], Len + 1};
_ ->
- {[escape(H)|L], Len + 1}
+ {[escape(H) | L], Len + 1}
end;
-alist([H|T], Max, #print_options{force_strings=true} = Options) when is_integer(H) ->
- {L, Len} = alist(T, Max-1, Options),
- {[H|L], Len + 1};
-alist([H|T], Max, Options = #print_options{force_strings=true}) when is_binary(H); is_list(H) ->
+alist([H | T], Max, #print_options{force_strings = true} = Options) when is_integer(H) ->
+ {L, Len} = alist(T, Max - 1, Options),
+ {[H | L], Len + 1};
+alist([H | T], Max, Options = #print_options{force_strings = true}) when is_binary(H); is_list(H) ->
{List, Len} = print(H, Max, Options),
case (Max - Len) =< 0 of
true ->
@@ -442,28 +471,31 @@ alist([H|T], Max, Options = #print_options{force_strings=true}) when is_binary(H
false ->
%% no need to decrement depth, as we're in printable string mode
{Final, FLen} = alist(T, Max - Len, Options),
- {[List|Final], FLen+Len}
+ {[List | Final], FLen + Len}
end;
-alist(_, _, #print_options{force_strings=true}) ->
+alist(_, _, #print_options{force_strings = true}) ->
erlang:error(badarg);
-alist([H|_L], _Max, _Options) ->
+alist([H | _L], _Max, _Options) ->
throw({unprintable, H});
alist(H, _Max, _Options) ->
%% improper list
throw({unprintable, H}).
%% is the first character in the atom alphabetic & lowercase?
-atom_needs_quoting_start([H|T]) when H >= $a, H =< $z ->
+atom_needs_quoting_start([H | T]) when H >= $a, H =< $z ->
atom_needs_quoting(T);
atom_needs_quoting_start(_) ->
true.
atom_needs_quoting([]) ->
false;
-atom_needs_quoting([H|T]) when (H >= $a andalso H =< $z);
- (H >= $A andalso H =< $Z);
- (H >= $0 andalso H =< $9);
- H == $@; H == $_ ->
+atom_needs_quoting([H | T]) when
+ (H >= $a andalso H =< $z);
+ (H >= $A andalso H =< $Z);
+ (H >= $0 andalso H =< $9);
+ H == $@;
+ H == $_
+->
atom_needs_quoting(T);
atom_needs_quoting(_) ->
true.
@@ -471,15 +503,15 @@ atom_needs_quoting(_) ->
-spec prepare_options(options(), #print_options{}) -> #print_options{}.
prepare_options([], Options) ->
Options;
-prepare_options([{depth, Depth}|T], Options) when is_integer(Depth) ->
- prepare_options(T, Options#print_options{depth=Depth});
-prepare_options([{lists_as_strings, Bool}|T], Options) when is_boolean(Bool) ->
+prepare_options([{depth, Depth} | T], Options) when is_integer(Depth) ->
+ prepare_options(T, Options#print_options{depth = Depth});
+prepare_options([{lists_as_strings, Bool} | T], Options) when is_boolean(Bool) ->
prepare_options(T, Options#print_options{lists_as_strings = Bool});
-prepare_options([{force_strings, Bool}|T], Options) when is_boolean(Bool) ->
+prepare_options([{force_strings, Bool} | T], Options) when is_boolean(Bool) ->
prepare_options(T, Options#print_options{force_strings = Bool}).
-dec_depth(#print_options{depth=Depth} = Options) when Depth > 0 ->
- Options#print_options{depth=Depth-1};
+dec_depth(#print_options{depth = Depth} = Options) when Depth > 0 ->
+ Options#print_options{depth = Depth - 1};
dec_depth(Options) ->
Options.
@@ -493,20 +525,20 @@ escape($\v) -> "\\v".
record_fields([], _, _) ->
{"", 0};
-record_fields(_, Max, #print_options{depth=D}) when Max < 4; D == 0 ->
+record_fields(_, Max, #print_options{depth = D}) when Max < 4; D == 0 ->
{"...", 3};
-record_fields([{Field, Value}|T], Max, Options) ->
- {ExtraChars, Terminator} = case T of
- [] ->
- {1, []};
- _ ->
- {2, ","}
- end,
+record_fields([{Field, Value} | T], Max, Options) ->
+ {ExtraChars, Terminator} =
+ case T of
+ [] ->
+ {1, []};
+ _ ->
+ {2, ","}
+ end,
{FieldStr, FieldLen} = print(Field, Max - ExtraChars, Options),
{ValueStr, ValueLen} = print(Value, Max - (FieldLen + ExtraChars), Options),
{Final, FLen} = record_fields(T, Max - (FieldLen + ValueLen + ExtraChars), dec_depth(Options)),
- {[FieldStr++"="++ValueStr++Terminator|Final], FLen + FieldLen + ValueLen + ExtraChars}.
-
+ {[FieldStr ++ "=" ++ ValueStr ++ Terminator | Final], FLen + FieldLen + ValueLen + ExtraChars}.
-ifdef(TEST).
%%--------------------
@@ -516,19 +548,27 @@ format_test() ->
?assertEqual("foobar", lists:flatten(format("~s", [["foo", $b, $a, $r]], 50))),
?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~p", [["foo", $b, $a, $r]], 50))),
?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~P", [["foo", $b, $a, $r], 10], 50))),
- ?assertEqual("[[102,111,111],98,97,114]", lists:flatten(format("~w", [["foo", $b, $a, $r]], 50))),
+ ?assertEqual(
+ "[[102,111,111],98,97,114]", lists:flatten(format("~w", [["foo", $b, $a, $r]], 50))
+ ),
%% complex ones
?assertEqual(" foobar", lists:flatten(format("~10s", [["foo", $b, $a, $r]], 50))),
?assertEqual("f", lists:flatten(format("~1s", [["foo", $b, $a, $r]], 50))),
?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~22p", [["foo", $b, $a, $r]], 50))),
- ?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~22P", [["foo", $b, $a, $r], 10], 50))),
+ ?assertEqual(
+ "[\"foo\",98,97,114]", lists:flatten(format("~22P", [["foo", $b, $a, $r], 10], 50))
+ ),
?assertEqual("**********", lists:flatten(format("~10W", [["foo", $b, $a, $r], 10], 50))),
- ?assertEqual("[[102,111,111],98,97,114]", lists:flatten(format("~25W", [["foo", $b, $a, $r], 10], 50))),
+ ?assertEqual(
+ "[[102,111,111],98,97,114]", lists:flatten(format("~25W", [["foo", $b, $a, $r], 10], 50))
+ ),
% Note these next two diverge from io_lib:format; the field width is
% ignored, when it should be used as max line length.
?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~10p", [["foo", $b, $a, $r]], 50))),
- ?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~10P", [["foo", $b, $a, $r], 10], 50))),
+ ?assertEqual(
+ "[\"foo\",98,97,114]", lists:flatten(format("~10P", [["foo", $b, $a, $r], 10], 50))
+ ),
ok.
atom_quoting_test() ->
@@ -545,12 +585,15 @@ sane_float_printing_test() ->
?assertEqual("1.0", lists:flatten(format("~p", [1.0], 50))),
?assertEqual("1.23456789", lists:flatten(format("~p", [1.23456789], 50))),
?assertEqual("1.23456789", lists:flatten(format("~p", [1.234567890], 50))),
- ?assertEqual("0.3333333333333333", lists:flatten(format("~p", [1/3], 50))),
+ ?assertEqual("0.3333333333333333", lists:flatten(format("~p", [1 / 3], 50))),
?assertEqual("0.1234567", lists:flatten(format("~p", [0.1234567], 50))),
ok.
float_inside_list_test() ->
- ?assertEqual("[97,38.233913133184835,99]", lists:flatten(format("~p", [[$a, 38.233913133184835, $c]], 50))),
+ ?assertEqual(
+ "[97,38.233913133184835,99]",
+ lists:flatten(format("~p", [[$a, 38.233913133184835, $c]], 50))
+ ),
?assertError(badarg, lists:flatten(format("~s", [[$a, 38.233913133184835, $c]], 50))),
ok.
@@ -572,14 +615,18 @@ binary_printing_test() ->
?assertEqual("<<\"hello\">>", lists:flatten(format("~p", [<<"hello">>], 50))),
?assertEqual("<<104,101,108,108,111>>", lists:flatten(format("~w", [<<"hello">>], 50))),
?assertEqual("<<1,2,3,4>>", lists:flatten(format("~p", [<<1, 2, 3, 4>>], 50))),
- ?assertEqual([1,2,3,4], lists:flatten(format("~s", [<<1, 2, 3, 4>>], 50))),
+ ?assertEqual([1, 2, 3, 4], lists:flatten(format("~s", [<<1, 2, 3, 4>>], 50))),
?assertEqual("hello", lists:flatten(format("~s", [<<"hello">>], 50))),
?assertEqual("hello\nworld", lists:flatten(format("~s", [<<"hello\nworld">>], 50))),
?assertEqual("<<\"hello\\nworld\">>", lists:flatten(format("~p", [<<"hello\nworld">>], 50))),
- ?assertEqual("<<\"\\\"hello world\\\"\">>", lists:flatten(format("~p", [<<"\"hello world\"">>], 50))),
+ ?assertEqual(
+ "<<\"\\\"hello world\\\"\">>", lists:flatten(format("~p", [<<"\"hello world\"">>], 50))
+ ),
?assertEqual("<<\"hello\\\\world\">>", lists:flatten(format("~p", [<<"hello\\world">>], 50))),
?assertEqual("<<\"hello\\\\\world\">>", lists:flatten(format("~p", [<<"hello\\\world">>], 50))),
- ?assertEqual("<<\"hello\\\\\\\\world\">>", lists:flatten(format("~p", [<<"hello\\\\world">>], 50))),
+ ?assertEqual(
+ "<<\"hello\\\\\\\\world\">>", lists:flatten(format("~p", [<<"hello\\\\world">>], 50))
+ ),
?assertEqual("<<\"hello\\bworld\">>", lists:flatten(format("~p", [<<"hello\bworld">>], 50))),
?assertEqual("<<\"hello\\tworld\">>", lists:flatten(format("~p", [<<"hello\tworld">>], 50))),
?assertEqual("<<\"hello\\nworld\">>", lists:flatten(format("~p", [<<"hello\nworld">>], 50))),
@@ -594,20 +641,68 @@ binary_printing_test() ->
ok.
bitstring_printing_test() ->
- ?assertEqual("<<1,2,3,1:7>>", lists:flatten(format("~p",
- [<<1, 2, 3, 1:7>>], 100))),
- ?assertEqual("<<1:7>>", lists:flatten(format("~p",
- [<<1:7>>], 100))),
- ?assertEqual("<<1,2,3,...>>", lists:flatten(format("~p",
- [<<1, 2, 3, 1:7>>], 12))),
- ?assertEqual("<<1,2,3,...>>", lists:flatten(format("~p",
- [<<1, 2, 3, 1:7>>], 13))),
- ?assertEqual("<<1,2,3,1:7>>", lists:flatten(format("~p",
- [<<1, 2, 3, 1:7>>], 14))),
+ ?assertEqual(
+ "<<1,2,3,1:7>>",
+ lists:flatten(
+ format(
+ "~p",
+ [<<1, 2, 3, 1:7>>],
+ 100
+ )
+ )
+ ),
+ ?assertEqual(
+ "<<1:7>>",
+ lists:flatten(
+ format(
+ "~p",
+ [<<1:7>>],
+ 100
+ )
+ )
+ ),
+ ?assertEqual(
+ "<<1,2,3,...>>",
+ lists:flatten(
+ format(
+ "~p",
+ [<<1, 2, 3, 1:7>>],
+ 12
+ )
+ )
+ ),
+ ?assertEqual(
+ "<<1,2,3,...>>",
+ lists:flatten(
+ format(
+ "~p",
+ [<<1, 2, 3, 1:7>>],
+ 13
+ )
+ )
+ ),
+ ?assertEqual(
+ "<<1,2,3,1:7>>",
+ lists:flatten(
+ format(
+ "~p",
+ [<<1, 2, 3, 1:7>>],
+ 14
+ )
+ )
+ ),
?assertEqual("<<..>>", lists:flatten(format("~p", [<<1:7>>], 0))),
?assertEqual("<<...>>", lists:flatten(format("~p", [<<1:7>>], 1))),
- ?assertEqual("[<<1>>,<<2>>]", lists:flatten(format("~p", [[<<1>>, <<2>>]],
- 100))),
+ ?assertEqual(
+ "[<<1>>,<<2>>]",
+ lists:flatten(
+ format(
+ "~p",
+ [[<<1>>, <<2>>]],
+ 100
+ )
+ )
+ ),
?assertEqual("{<<1:7>>}", lists:flatten(format("~p", [{<<1:7>>}], 50))),
ok.
@@ -617,48 +712,91 @@ list_printing_test() ->
?assertEqual("", lists:flatten(format("~s", [[]], 50))),
?assertEqual("...", lists:flatten(format("~s", [[]], -1))),
?assertEqual("[[]]", lists:flatten(format("~p", [[[]]], 50))),
- ?assertEqual("[13,11,10,8,5,4]", lists:flatten(format("~p", [[13,11,10,8,5,4]], 50))),
- ?assertEqual("\"\\rabc\"", lists:flatten(format("~p", [[13,$a, $b, $c]], 50))),
- ?assertEqual("[1,2,3|4]", lists:flatten(format("~p", [[1, 2, 3|4]], 50))),
- ?assertEqual("[...]", lists:flatten(format("~p", [[1, 2, 3,4]], 4))),
+ ?assertEqual("[13,11,10,8,5,4]", lists:flatten(format("~p", [[13, 11, 10, 8, 5, 4]], 50))),
+ ?assertEqual("\"\\rabc\"", lists:flatten(format("~p", [[13, $a, $b, $c]], 50))),
+ ?assertEqual("[1,2,3|4]", lists:flatten(format("~p", [[1, 2, 3 | 4]], 50))),
+ ?assertEqual("[...]", lists:flatten(format("~p", [[1, 2, 3, 4]], 4))),
?assertEqual("[1,...]", lists:flatten(format("~p", [[1, 2, 3, 4]], 6))),
?assertEqual("[1,...]", lists:flatten(format("~p", [[1, 2, 3, 4]], 7))),
?assertEqual("[1,2,...]", lists:flatten(format("~p", [[1, 2, 3, 4]], 8))),
- ?assertEqual("[1|4]", lists:flatten(format("~p", [[1|4]], 50))),
+ ?assertEqual("[1|4]", lists:flatten(format("~p", [[1 | 4]], 50))),
?assertEqual("[1]", lists:flatten(format("~p", [[1]], 50))),
- ?assertError(badarg, lists:flatten(format("~s", [[1|4]], 50))),
+ ?assertError(badarg, lists:flatten(format("~s", [[1 | 4]], 50))),
?assertEqual("\"hello...\"", lists:flatten(format("~p", ["hello world"], 10))),
?assertEqual("hello w...", lists:flatten(format("~s", ["hello world"], 10))),
?assertEqual("hello world\r\n", lists:flatten(format("~s", ["hello world\r\n"], 50))),
?assertEqual("\rhello world\r\n", lists:flatten(format("~s", ["\rhello world\r\n"], 50))),
- ?assertEqual("\"\\rhello world\\r\\n\"", lists:flatten(format("~p", ["\rhello world\r\n"], 50))),
- ?assertEqual("[13,104,101,108,108,111,32,119,111,114,108,100,13,10]", lists:flatten(format("~w", ["\rhello world\r\n"], 60))),
+ ?assertEqual(
+ "\"\\rhello world\\r\\n\"", lists:flatten(format("~p", ["\rhello world\r\n"], 50))
+ ),
+ ?assertEqual(
+ "[13,104,101,108,108,111,32,119,111,114,108,100,13,10]",
+ lists:flatten(format("~w", ["\rhello world\r\n"], 60))
+ ),
?assertEqual("...", lists:flatten(format("~s", ["\rhello world\r\n"], 3))),
- ?assertEqual("[22835963083295358096932575511191922182123945984,...]",
- lists:flatten(format("~p", [
- [22835963083295358096932575511191922182123945984,
- 22835963083295358096932575511191922182123945984]], 9))),
- ?assertEqual("[22835963083295358096932575511191922182123945984,...]",
- lists:flatten(format("~p", [
- [22835963083295358096932575511191922182123945984,
- 22835963083295358096932575511191922182123945984]], 53))),
+ ?assertEqual(
+ "[22835963083295358096932575511191922182123945984,...]",
+ lists:flatten(
+ format(
+ "~p",
+ [
+ [
+ 22835963083295358096932575511191922182123945984,
+ 22835963083295358096932575511191922182123945984
+ ]
+ ],
+ 9
+ )
+ )
+ ),
+ ?assertEqual(
+ "[22835963083295358096932575511191922182123945984,...]",
+ lists:flatten(
+ format(
+ "~p",
+ [
+ [
+ 22835963083295358096932575511191922182123945984,
+ 22835963083295358096932575511191922182123945984
+ ]
+ ],
+ 53
+ )
+ )
+ ),
%%improper list
- ?assertEqual("[1,2,3|4]", lists:flatten(format("~P", [[1|[2|[3|4]]], 5], 50))),
- ?assertEqual("[1|1]", lists:flatten(format("~P", [[1|1], 5], 50))),
- ?assertEqual("[9|9]", lists:flatten(format("~p", [[9|9]], 50))),
+ ?assertEqual("[1,2,3|4]", lists:flatten(format("~P", [[1 | [2 | [3 | 4]]], 5], 50))),
+ ?assertEqual("[1|1]", lists:flatten(format("~P", [[1 | 1], 5], 50))),
+ ?assertEqual("[9|9]", lists:flatten(format("~p", [[9 | 9]], 50))),
ok.
iolist_printing_test() ->
- ?assertEqual("iolist: HelloIamaniolist",
- lists:flatten(format("iolist: ~s", [[$H, $e, $l, $l, $o, "I", ["am", [<<"an">>], [$i, $o, $l, $i, $s, $t]]]], 1000))),
- ?assertEqual("123...",
- lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 6))),
- ?assertEqual("123456...",
- lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 9))),
- ?assertEqual("123456789H...",
- lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 13))),
- ?assertEqual("123456789HellIamaniolist",
- lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 30))),
+ ?assertEqual(
+ "iolist: HelloIamaniolist",
+ lists:flatten(
+ format(
+ "iolist: ~s",
+ [[$H, $e, $l, $l, $o, "I", ["am", [<<"an">>], [$i, $o, $l, $i, $s, $t]]]],
+ 1000
+ )
+ )
+ ),
+ ?assertEqual(
+ "123...",
+ lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 6))
+ ),
+ ?assertEqual(
+ "123456...",
+ lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 9))
+ ),
+ ?assertEqual(
+ "123456789H...",
+ lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 13))
+ ),
+ ?assertEqual(
+ "123456789HellIamaniolist",
+ lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 30))
+ ),
ok.
@@ -671,22 +809,48 @@ tuple_printing_test() ->
?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 3))),
?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 4))),
?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 5))),
- ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo,bar}], 6))),
- ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo,bar}], 7))),
- ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo,bar}], 9))),
- ?assertEqual("{foo,bar}", lists:flatten(format("~p", [{foo,bar}], 10))),
- ?assertEqual("{22835963083295358096932575511191922182123945984,...}",
- lists:flatten(format("~w", [
+ ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo, bar}], 6))),
+ ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo, bar}], 7))),
+ ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo, bar}], 9))),
+ ?assertEqual("{foo,bar}", lists:flatten(format("~p", [{foo, bar}], 10))),
+ ?assertEqual(
+ "{22835963083295358096932575511191922182123945984,...}",
+ lists:flatten(
+ format(
+ "~w",
+ [
{22835963083295358096932575511191922182123945984,
- 22835963083295358096932575511191922182123945984}], 10))),
- ?assertEqual("{22835963083295358096932575511191922182123945984,...}",
- lists:flatten(format("~w", [
+ 22835963083295358096932575511191922182123945984}
+ ],
+ 10
+ )
+ )
+ ),
+ ?assertEqual(
+ "{22835963083295358096932575511191922182123945984,...}",
+ lists:flatten(
+ format(
+ "~w",
+ [
+ {22835963083295358096932575511191922182123945984, bar}
+ ],
+ 10
+ )
+ )
+ ),
+ ?assertEqual(
+ "{22835963083295358096932575511191922182123945984,...}",
+ lists:flatten(
+ format(
+ "~w",
+ [
{22835963083295358096932575511191922182123945984,
- bar}], 10))),
- ?assertEqual("{22835963083295358096932575511191922182123945984,...}",
- lists:flatten(format("~w", [
- {22835963083295358096932575511191922182123945984,
- 22835963083295358096932575511191922182123945984}], 53))),
+ 22835963083295358096932575511191922182123945984}
+ ],
+ 53
+ )
+ )
+ ),
ok.
map_printing_test() ->
@@ -698,40 +862,102 @@ map_printing_test() ->
?assertError(badarg, lists:flatten(format("~s", [maps:new()], 50))),
?assertEqual("#{...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 1))),
?assertEqual("#{...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 6))),
- ?assertEqual("#{bar => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 7))),
- ?assertEqual("#{bar => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 9))),
- ?assertEqual("#{bar => foo}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 10))),
- ?assertEqual("#{bar => ...,...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 9))),
- ?assertEqual("#{bar => foo,...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 10))),
- ?assertEqual("#{bar => foo,...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 17))),
- ?assertEqual("#{bar => foo,foo => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 18))),
- ?assertEqual("#{bar => foo,foo => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 19))),
- ?assertEqual("#{bar => foo,foo => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 20))),
- ?assertEqual("#{bar => foo,foo => bar}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 21))),
- ?assertEqual("#{22835963083295358096932575511191922182123945984 => ...}",
- lists:flatten(format("~w", [
- maps:from_list([{22835963083295358096932575511191922182123945984,
- 22835963083295358096932575511191922182123945984}])], 10))),
- ?assertEqual("#{22835963083295358096932575511191922182123945984 => ...}",
- lists:flatten(format("~w", [
- maps:from_list([{22835963083295358096932575511191922182123945984,
- bar}])], 10))),
- ?assertEqual("#{22835963083295358096932575511191922182123945984 => ...}",
- lists:flatten(format("~w", [
- maps:from_list([{22835963083295358096932575511191922182123945984,
- bar}])], 53))),
- ?assertEqual("#{22835963083295358096932575511191922182123945984 => bar}",
- lists:flatten(format("~w", [
- maps:from_list([{22835963083295358096932575511191922182123945984,
- bar}])], 54))),
+ ?assertEqual(
+ "#{bar => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 7))
+ ),
+ ?assertEqual(
+ "#{bar => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 9))
+ ),
+ ?assertEqual(
+ "#{bar => foo}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 10))
+ ),
+ ?assertEqual(
+ "#{bar => ...,...}",
+ lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 9))
+ ),
+ ?assertEqual(
+ "#{bar => foo,...}",
+ lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 10))
+ ),
+ ?assertEqual(
+ "#{bar => foo,...}",
+ lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 17))
+ ),
+ ?assertEqual(
+ "#{bar => foo,foo => ...}",
+ lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 18))
+ ),
+ ?assertEqual(
+ "#{bar => foo,foo => ...}",
+ lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 19))
+ ),
+ ?assertEqual(
+ "#{bar => foo,foo => ...}",
+ lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 20))
+ ),
+ ?assertEqual(
+ "#{bar => foo,foo => bar}",
+ lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 21))
+ ),
+ ?assertEqual(
+ "#{22835963083295358096932575511191922182123945984 => ...}",
+ lists:flatten(
+ format(
+ "~w",
+ [
+ maps:from_list([
+ {22835963083295358096932575511191922182123945984,
+ 22835963083295358096932575511191922182123945984}
+ ])
+ ],
+ 10
+ )
+ )
+ ),
+ ?assertEqual(
+ "#{22835963083295358096932575511191922182123945984 => ...}",
+ lists:flatten(
+ format(
+ "~w",
+ [
+ maps:from_list([{22835963083295358096932575511191922182123945984, bar}])
+ ],
+ 10
+ )
+ )
+ ),
+ ?assertEqual(
+ "#{22835963083295358096932575511191922182123945984 => ...}",
+ lists:flatten(
+ format(
+ "~w",
+ [
+ maps:from_list([{22835963083295358096932575511191922182123945984, bar}])
+ ],
+ 53
+ )
+ )
+ ),
+ ?assertEqual(
+ "#{22835963083295358096932575511191922182123945984 => bar}",
+ lists:flatten(
+ format(
+ "~w",
+ [
+ maps:from_list([{22835963083295358096932575511191922182123945984, bar}])
+ ],
+ 54
+ )
+ )
+ ),
ok;
false ->
ok
end.
unicode_test() ->
- ?assertEqual([231,167,129], lists:flatten(format("~s", [<<231,167,129>>], 50))),
- ?assertEqual([31169], lists:flatten(format("~ts", [<<231,167,129>>], 50))),
+ ?assertEqual([231, 167, 129], lists:flatten(format("~s", [<<231, 167, 129>>], 50))),
+ ?assertEqual([31169], lists:flatten(format("~ts", [<<231, 167, 129>>], 50))),
ok.
depth_limit_test() ->
@@ -754,21 +980,54 @@ depth_limit_test() ->
case erlang:is_builtin(erlang, is_map, 1) of
true ->
- ?assertEqual("#{a => #{...}}",
- lists:flatten(format("~P",
- [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 2], 50))),
- ?assertEqual("#{a => #{b => #{...}}}",
- lists:flatten(format("~P",
- [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 3], 50))),
- ?assertEqual("#{a => #{b => #{c => d}}}",
- lists:flatten(format("~P",
- [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 4], 50))),
+ ?assertEqual(
+ "#{a => #{...}}",
+ lists:flatten(
+ format(
+ "~P",
+ [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 2],
+ 50
+ )
+ )
+ ),
+ ?assertEqual(
+ "#{a => #{b => #{...}}}",
+ lists:flatten(
+ format(
+ "~P",
+ [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 3],
+ 50
+ )
+ )
+ ),
+ ?assertEqual(
+ "#{a => #{b => #{c => d}}}",
+ lists:flatten(
+ format(
+ "~P",
+ [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 4],
+ 50
+ )
+ )
+ ),
?assertEqual("#{}", lists:flatten(format("~P", [maps:new(), 1], 50))),
- ?assertEqual("#{...}", lists:flatten(format("~P", [maps:from_list([{1,1}, {2,2}, {3,3}]), 1], 50))),
- ?assertEqual("#{1 => 1,...}", lists:flatten(format("~P", [maps:from_list([{1,1}, {2,2}, {3,3}]), 2], 50))),
- ?assertEqual("#{1 => 1,2 => 2,...}", lists:flatten(format("~P", [maps:from_list([{1,1}, {2,2}, {3,3}]), 3], 50))),
- ?assertEqual("#{1 => 1,2 => 2,3 => 3}", lists:flatten(format("~P", [maps:from_list([{1,1}, {2,2}, {3,3}]), 4], 50))),
+ ?assertEqual(
+ "#{...}",
+ lists:flatten(format("~P", [maps:from_list([{1, 1}, {2, 2}, {3, 3}]), 1], 50))
+ ),
+ ?assertEqual(
+ "#{1 => 1,...}",
+ lists:flatten(format("~P", [maps:from_list([{1, 1}, {2, 2}, {3, 3}]), 2], 50))
+ ),
+ ?assertEqual(
+ "#{1 => 1,2 => 2,...}",
+ lists:flatten(format("~P", [maps:from_list([{1, 1}, {2, 2}, {3, 3}]), 3], 50))
+ ),
+ ?assertEqual(
+ "#{1 => 1,2 => 2,3 => 3}",
+ lists:flatten(format("~P", [maps:from_list([{1, 1}, {2, 2}, {3, 3}]), 4], 50))
+ ),
ok;
false ->
@@ -776,8 +1035,14 @@ depth_limit_test() ->
end,
?assertEqual("{\"a\",[...]}", lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 3], 50))),
- ?assertEqual("{\"a\",[\"b\",[[...]|...]]}", lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 6], 50))),
- ?assertEqual("{\"a\",[\"b\",[\"c\",[\"d\"]]]}", lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 9], 50))),
+ ?assertEqual(
+ "{\"a\",[\"b\",[[...]|...]]}",
+ lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 6], 50))
+ ),
+ ?assertEqual(
+ "{\"a\",[\"b\",[\"c\",[\"d\"]]]}",
+ lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 9], 50))
+ ),
?assertEqual("[...]", lists:flatten(format("~P", [[1, 2, 3], 1], 50))),
?assertEqual("[1|...]", lists:flatten(format("~P", [[1, 2, 3], 2], 50))),
@@ -808,21 +1073,23 @@ depth_limit_test() ->
%% depth limiting for some reason works in 4 byte chunks on printable binaries?
?assertEqual("<<\"hell\"...>>", lists:flatten(format("~P", [<<"hello world">>, 2], 50))),
- ?assertEqual("<<\"abcd\"...>>", lists:flatten(format("~P", [<<$a, $b, $c, $d, $e, 0>>, 2], 50))),
+ ?assertEqual(
+ "<<\"abcd\"...>>", lists:flatten(format("~P", [<<$a, $b, $c, $d, $e, 0>>, 2], 50))
+ ),
%% I don't even know...
?assertEqual("<<>>", lists:flatten(format("~P", [<<>>, 1], 50))),
?assertEqual("<<>>", lists:flatten(format("~W", [<<>>, 1], 50))),
- ?assertEqual("{abc,<<\"abc\\\"\">>}", lists:flatten(format("~P", [{abc,<<"abc\"">>}, 4], 50))),
+ ?assertEqual("{abc,<<\"abc\\\"\">>}", lists:flatten(format("~P", [{abc, <<"abc\"">>}, 4], 50))),
ok.
print_terms_without_format_string_test() ->
?assertError(badarg, format({hello, world}, [], 50)),
?assertError(badarg, format([{google, bomb}], [], 50)),
- ?assertError(badarg, format([$h,$e,$l,$l,$o, 3594], [], 50)),
- ?assertEqual("helloworld", lists:flatten(format([$h,$e,$l,$l,$o, "world"], [], 50))),
+ ?assertError(badarg, format([$h, $e, $l, $l, $o, 3594], [], 50)),
+ ?assertEqual("helloworld", lists:flatten(format([$h, $e, $l, $l, $o, "world"], [], 50))),
?assertEqual("hello", lists:flatten(format(<<"hello">>, [], 50))),
?assertEqual("hello", lists:flatten(format('hello', [], 50))),
?assertError(badarg, format(<<1, 2, 3, 1:7>>, [], 100)),
@@ -830,9 +1097,9 @@ print_terms_without_format_string_test() ->
ok.
improper_io_list_test() ->
- ?assertEqual(">hello", lists:flatten(format('~s', [[$>|<<"hello">>]], 50))),
- ?assertEqual(">hello", lists:flatten(format('~ts', [[$>|<<"hello">>]], 50))),
- ?assertEqual("helloworld", lists:flatten(format('~ts', [[<<"hello">>|<<"world">>]], 50))),
+ ?assertEqual(">hello", lists:flatten(format('~s', [[$> | <<"hello">>]], 50))),
+ ?assertEqual(">hello", lists:flatten(format('~ts', [[$> | <<"hello">>]], 50))),
+ ?assertEqual("helloworld", lists:flatten(format('~ts', [[<<"hello">> | <<"world">>]], 50))),
ok.
--endif. \ No newline at end of file
+-endif.
diff --git a/src/couch_log/src/couch_log_trunc_io_fmt.erl b/src/couch_log/src/couch_log_trunc_io_fmt.erl
index 77f0b2e0d..cf18019ad 100644
--- a/src/couch_log/src/couch_log_trunc_io_fmt.erl
+++ b/src/couch_log/src/couch_log_trunc_io_fmt.erl
@@ -22,12 +22,11 @@
%% lager_Format.
-module(couch_log_trunc_io_fmt).
-
-export([format/3, format/4]).
-record(options, {
- chomp = false :: boolean()
- }).
+ chomp = false :: boolean()
+}).
format(FmtStr, Args, MaxLen) ->
format(FmtStr, Args, MaxLen, []).
@@ -46,11 +45,15 @@ format(FmtStr, Args, MaxLen, Opts) when is_list(FmtStr) ->
{Cs2, MaxLen2} = build(Cs, [], MaxLen, Options),
%% count how many terms remain
{Count, StrLen} = lists:foldl(
- fun({_C, _As, _F, _Adj, _P, _Pad, _Enc}, {Terms, Chars}) ->
+ fun
+ ({_C, _As, _F, _Adj, _P, _Pad, _Enc}, {Terms, Chars}) ->
{Terms + 1, Chars};
(_, {Terms, Chars}) ->
{Terms, Chars + 1}
- end, {0, 0}, Cs2),
+ end,
+ {0, 0},
+ Cs2
+ ),
build2(Cs2, Count, MaxLen2 - StrLen);
false ->
erlang:error(badarg)
@@ -58,114 +61,116 @@ format(FmtStr, Args, MaxLen, Opts) when is_list(FmtStr) ->
format(_FmtStr, _Args, _MaxLen, _Opts) ->
erlang:error(badarg).
-collect([$~|Fmt0], Args0) ->
- {C,Fmt1,Args1} = collect_cseq(Fmt0, Args0),
- [C|collect(Fmt1, Args1)];
-collect([C|Fmt], Args) ->
- [C|collect(Fmt, Args)];
-collect([], []) -> [].
+collect([$~ | Fmt0], Args0) ->
+ {C, Fmt1, Args1} = collect_cseq(Fmt0, Args0),
+ [C | collect(Fmt1, Args1)];
+collect([C | Fmt], Args) ->
+ [C | collect(Fmt, Args)];
+collect([], []) ->
+ [].
collect_cseq(Fmt0, Args0) ->
- {F,Ad,Fmt1,Args1} = field_width(Fmt0, Args0),
- {P,Fmt2,Args2} = precision(Fmt1, Args1),
- {Pad,Fmt3,Args3} = pad_char(Fmt2, Args2),
- {Encoding,Fmt4,Args4} = encoding(Fmt3, Args3),
- {C,As,Fmt5,Args5} = collect_cc(Fmt4, Args4),
- {{C,As,F,Ad,P,Pad,Encoding},Fmt5,Args5}.
-
-encoding([$t|Fmt],Args) ->
- {unicode,Fmt,Args};
-encoding(Fmt,Args) ->
- {latin1,Fmt,Args}.
-
-field_width([$-|Fmt0], Args0) ->
- {F,Fmt,Args} = field_value(Fmt0, Args0),
+ {F, Ad, Fmt1, Args1} = field_width(Fmt0, Args0),
+ {P, Fmt2, Args2} = precision(Fmt1, Args1),
+ {Pad, Fmt3, Args3} = pad_char(Fmt2, Args2),
+ {Encoding, Fmt4, Args4} = encoding(Fmt3, Args3),
+ {C, As, Fmt5, Args5} = collect_cc(Fmt4, Args4),
+ {{C, As, F, Ad, P, Pad, Encoding}, Fmt5, Args5}.
+
+encoding([$t | Fmt], Args) ->
+ {unicode, Fmt, Args};
+encoding(Fmt, Args) ->
+ {latin1, Fmt, Args}.
+
+field_width([$- | Fmt0], Args0) ->
+ {F, Fmt, Args} = field_value(Fmt0, Args0),
field_width(-F, Fmt, Args);
field_width(Fmt0, Args0) ->
- {F,Fmt,Args} = field_value(Fmt0, Args0),
+ {F, Fmt, Args} = field_value(Fmt0, Args0),
field_width(F, Fmt, Args).
field_width(F, Fmt, Args) when F < 0 ->
- {-F,left,Fmt,Args};
+ {-F, left, Fmt, Args};
field_width(F, Fmt, Args) when F >= 0 ->
- {F,right,Fmt,Args}.
+ {F, right, Fmt, Args}.
-precision([$.|Fmt], Args) ->
+precision([$. | Fmt], Args) ->
field_value(Fmt, Args);
precision(Fmt, Args) ->
- {none,Fmt,Args}.
+ {none, Fmt, Args}.
-field_value([$*|Fmt], [A|Args]) when is_integer(A) ->
- {A,Fmt,Args};
-field_value([C|Fmt], Args) when is_integer(C), C >= $0, C =< $9 ->
- field_value([C|Fmt], Args, 0);
+field_value([$* | Fmt], [A | Args]) when is_integer(A) ->
+ {A, Fmt, Args};
+field_value([C | Fmt], Args) when is_integer(C), C >= $0, C =< $9 ->
+ field_value([C | Fmt], Args, 0);
field_value(Fmt, Args) ->
- {none,Fmt,Args}.
+ {none, Fmt, Args}.
-field_value([C|Fmt], Args, F) when is_integer(C), C >= $0, C =< $9 ->
- field_value(Fmt, Args, 10*F + (C - $0));
-field_value(Fmt, Args, F) -> %Default case
- {F,Fmt,Args}.
+field_value([C | Fmt], Args, F) when is_integer(C), C >= $0, C =< $9 ->
+ field_value(Fmt, Args, 10 * F + (C - $0));
+%Default case
+field_value(Fmt, Args, F) ->
+ {F, Fmt, Args}.
-pad_char([$.,$*|Fmt], [Pad|Args]) -> {Pad,Fmt,Args};
-pad_char([$.,Pad|Fmt], Args) -> {Pad,Fmt,Args};
-pad_char(Fmt, Args) -> {$\s,Fmt,Args}.
+pad_char([$., $* | Fmt], [Pad | Args]) -> {Pad, Fmt, Args};
+pad_char([$., Pad | Fmt], Args) -> {Pad, Fmt, Args};
+pad_char(Fmt, Args) -> {$\s, Fmt, Args}.
%% collect_cc([FormatChar], [Argument]) ->
%% {Control,[ControlArg],[FormatChar],[Arg]}.
%% Here we collect the argments for each control character.
%% Be explicit to cause failure early.
-collect_cc([$w|Fmt], [A|Args]) -> {$w,[A],Fmt,Args};
-collect_cc([$p|Fmt], [A|Args]) -> {$p,[A],Fmt,Args};
-collect_cc([$W|Fmt], [A,Depth|Args]) -> {$W,[A,Depth],Fmt,Args};
-collect_cc([$P|Fmt], [A,Depth|Args]) -> {$P,[A,Depth],Fmt,Args};
-collect_cc([$s|Fmt], [A|Args]) -> {$s,[A],Fmt,Args};
-collect_cc([$r|Fmt], [A|Args]) -> {$r,[A],Fmt,Args};
-collect_cc([$e|Fmt], [A|Args]) -> {$e,[A],Fmt,Args};
-collect_cc([$f|Fmt], [A|Args]) -> {$f,[A],Fmt,Args};
-collect_cc([$g|Fmt], [A|Args]) -> {$g,[A],Fmt,Args};
-collect_cc([$b|Fmt], [A|Args]) -> {$b,[A],Fmt,Args};
-collect_cc([$B|Fmt], [A|Args]) -> {$B,[A],Fmt,Args};
-collect_cc([$x|Fmt], [A,Prefix|Args]) -> {$x,[A,Prefix],Fmt,Args};
-collect_cc([$X|Fmt], [A,Prefix|Args]) -> {$X,[A,Prefix],Fmt,Args};
-collect_cc([$+|Fmt], [A|Args]) -> {$+,[A],Fmt,Args};
-collect_cc([$#|Fmt], [A|Args]) -> {$#,[A],Fmt,Args};
-collect_cc([$c|Fmt], [A|Args]) -> {$c,[A],Fmt,Args};
-collect_cc([$~|Fmt], Args) when is_list(Args) -> {$~,[],Fmt,Args};
-collect_cc([$n|Fmt], Args) when is_list(Args) -> {$n,[],Fmt,Args};
-collect_cc([$i|Fmt], [A|Args]) -> {$i,[A],Fmt,Args}.
-
+collect_cc([$w | Fmt], [A | Args]) -> {$w, [A], Fmt, Args};
+collect_cc([$p | Fmt], [A | Args]) -> {$p, [A], Fmt, Args};
+collect_cc([$W | Fmt], [A, Depth | Args]) -> {$W, [A, Depth], Fmt, Args};
+collect_cc([$P | Fmt], [A, Depth | Args]) -> {$P, [A, Depth], Fmt, Args};
+collect_cc([$s | Fmt], [A | Args]) -> {$s, [A], Fmt, Args};
+collect_cc([$r | Fmt], [A | Args]) -> {$r, [A], Fmt, Args};
+collect_cc([$e | Fmt], [A | Args]) -> {$e, [A], Fmt, Args};
+collect_cc([$f | Fmt], [A | Args]) -> {$f, [A], Fmt, Args};
+collect_cc([$g | Fmt], [A | Args]) -> {$g, [A], Fmt, Args};
+collect_cc([$b | Fmt], [A | Args]) -> {$b, [A], Fmt, Args};
+collect_cc([$B | Fmt], [A | Args]) -> {$B, [A], Fmt, Args};
+collect_cc([$x | Fmt], [A, Prefix | Args]) -> {$x, [A, Prefix], Fmt, Args};
+collect_cc([$X | Fmt], [A, Prefix | Args]) -> {$X, [A, Prefix], Fmt, Args};
+collect_cc([$+ | Fmt], [A | Args]) -> {$+, [A], Fmt, Args};
+collect_cc([$# | Fmt], [A | Args]) -> {$#, [A], Fmt, Args};
+collect_cc([$c | Fmt], [A | Args]) -> {$c, [A], Fmt, Args};
+collect_cc([$~ | Fmt], Args) when is_list(Args) -> {$~, [], Fmt, Args};
+collect_cc([$n | Fmt], Args) when is_list(Args) -> {$n, [], Fmt, Args};
+collect_cc([$i | Fmt], [A | Args]) -> {$i, [A], Fmt, Args}.
%% build([Control], Pc, Indentation) -> [Char].
%% Interpret the control structures. Count the number of print
%% remaining and only calculate indentation when necessary. Must also
%% be smart when calculating indentation for characters in format.
-build([{$n, _, _, _, _, _, _}], Acc, MaxLen, #options{chomp=true}) ->
+build([{$n, _, _, _, _, _, _}], Acc, MaxLen, #options{chomp = true}) ->
%% trailing ~n, ignore
{lists:reverse(Acc), MaxLen};
-build([{C,As,F,Ad,P,Pad,Enc}|Cs], Acc, MaxLen, O) ->
+build([{C, As, F, Ad, P, Pad, Enc} | Cs], Acc, MaxLen, O) ->
{S, MaxLen2} = control(C, As, F, Ad, P, Pad, Enc, MaxLen),
- build(Cs, [S|Acc], MaxLen2, O);
-build([$\n], Acc, MaxLen, #options{chomp=true}) ->
+ build(Cs, [S | Acc], MaxLen2, O);
+build([$\n], Acc, MaxLen, #options{chomp = true}) ->
%% trailing \n, ignore
{lists:reverse(Acc), MaxLen};
-build([$\n|Cs], Acc, MaxLen, O) ->
- build(Cs, [$\n|Acc], MaxLen - 1, O);
-build([$\t|Cs], Acc, MaxLen, O) ->
- build(Cs, [$\t|Acc], MaxLen - 1, O);
-build([C|Cs], Acc, MaxLen, O) ->
- build(Cs, [C|Acc], MaxLen - 1, O);
+build([$\n | Cs], Acc, MaxLen, O) ->
+ build(Cs, [$\n | Acc], MaxLen - 1, O);
+build([$\t | Cs], Acc, MaxLen, O) ->
+ build(Cs, [$\t | Acc], MaxLen - 1, O);
+build([C | Cs], Acc, MaxLen, O) ->
+ build(Cs, [C | Acc], MaxLen - 1, O);
build([], Acc, MaxLen, _O) ->
{lists:reverse(Acc), MaxLen}.
-build2([{C,As,F,Ad,P,Pad,Enc}|Cs], Count, MaxLen) ->
+build2([{C, As, F, Ad, P, Pad, Enc} | Cs], Count, MaxLen) ->
{S, Len} = control2(C, As, F, Ad, P, Pad, Enc, MaxLen div Count),
- [S|build2(Cs, Count - 1, MaxLen - Len)];
-build2([C|Cs], Count, MaxLen) ->
- [C|build2(Cs, Count, MaxLen)];
-build2([], _, _) -> [].
+ [S | build2(Cs, Count - 1, MaxLen - Len)];
+build2([C | Cs], Count, MaxLen) ->
+ [C | build2(Cs, Count, MaxLen)];
+build2([], _, _) ->
+ [].
%% control(FormatChar, [Argument], FieldWidth, Adjust, Precision, PadChar,
%% Indentation) -> [Char]
@@ -187,20 +192,26 @@ control($b, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
control($B, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
Res = unprefixed_integer(A, F, Adj, base(P), Pad, false),
{Res, L - lists:flatlength(Res)};
-control($x, [A,Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A),
- is_atom(Prefix) ->
+control($x, [A, Prefix], F, Adj, P, Pad, _Enc, L) when
+ is_integer(A),
+ is_atom(Prefix)
+->
Res = prefixed_integer(A, F, Adj, base(P), Pad, atom_to_list(Prefix), true),
{Res, L - lists:flatlength(Res)};
-control($x, [A,Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
- true = io_lib:deep_char_list(Prefix), %Check if Prefix a character list
+control($x, [A, Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
+ %Check if Prefix a character list
+ true = io_lib:deep_char_list(Prefix),
Res = prefixed_integer(A, F, Adj, base(P), Pad, Prefix, true),
{Res, L - lists:flatlength(Res)};
-control($X, [A,Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A),
- is_atom(Prefix) ->
+control($X, [A, Prefix], F, Adj, P, Pad, _Enc, L) when
+ is_integer(A),
+ is_atom(Prefix)
+->
Res = prefixed_integer(A, F, Adj, base(P), Pad, atom_to_list(Prefix), false),
{Res, L - lists:flatlength(Res)};
-control($X, [A,Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
- true = io_lib:deep_char_list(Prefix), %Check if Prefix a character list
+control($X, [A, Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
+ %Check if Prefix a character list
+ true = io_lib:deep_char_list(Prefix),
Res = prefixed_integer(A, F, Adj, base(P), Pad, Prefix, false),
{Res, L - lists:flatlength(Res)};
control($+, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
@@ -241,11 +252,11 @@ control2($w, [A], F, Adj, P, Pad, _Enc, L) ->
control2($p, [A], _F, _Adj, _P, _Pad, _Enc, L) ->
Term = couch_log_trunc_io:fprint(A, L, [{lists_as_strings, true}]),
{Term, lists:flatlength(Term)};
-control2($W, [A,Depth], F, Adj, P, Pad, _Enc, L) when is_integer(Depth) ->
+control2($W, [A, Depth], F, Adj, P, Pad, _Enc, L) when is_integer(Depth) ->
Term = couch_log_trunc_io:fprint(A, L, [{depth, Depth}, {lists_as_strings, false}]),
Res = term(Term, F, Adj, P, Pad),
{Res, lists:flatlength(Res)};
-control2($P, [A,Depth], _F, _Adj, _P, _Pad, _Enc, L) when is_integer(Depth) ->
+control2($P, [A, Depth], _F, _Adj, _P, _Pad, _Enc, L) when is_integer(Depth) ->
Term = couch_log_trunc_io:fprint(A, L, [{depth, Depth}, {lists_as_strings, true}]),
{Term, lists:flatlength(Term)};
control2($s, [L0], F, Adj, P, Pad, latin1, L) ->
@@ -261,18 +272,18 @@ control2($r, [R], F, Adj, P, Pad, _Enc, _L) ->
Res = string(List, F, Adj, P, Pad),
{Res, lists:flatlength(Res)}.
-iolist_to_chars([C|Cs]) when is_integer(C), C >= $\000, C =< $\377 ->
+iolist_to_chars([C | Cs]) when is_integer(C), C >= $\000, C =< $\377 ->
[C | iolist_to_chars(Cs)];
-iolist_to_chars([I|Cs]) ->
+iolist_to_chars([I | Cs]) ->
[iolist_to_chars(I) | iolist_to_chars(Cs)];
iolist_to_chars([]) ->
[];
iolist_to_chars(B) when is_binary(B) ->
binary_to_list(B).
-cdata_to_chars([C|Cs]) when is_integer(C), C >= $\000 ->
+cdata_to_chars([C | Cs]) when is_integer(C), C >= $\000 ->
[C | cdata_to_chars(Cs)];
-cdata_to_chars([I|Cs]) ->
+cdata_to_chars([I | Cs]) ->
[cdata_to_chars(I) | cdata_to_chars(Cs)];
cdata_to_chars([]) ->
[];
@@ -284,12 +295,12 @@ cdata_to_chars(B) when is_binary(B) ->
make_options([], Options) ->
Options;
-make_options([{chomp, Bool}|T], Options) when is_boolean(Bool) ->
- make_options(T, Options#options{chomp=Bool}).
+make_options([{chomp, Bool} | T], Options) when is_boolean(Bool) ->
+ make_options(T, Options#options{chomp = Bool}).
-ifdef(UNICODE_AS_BINARIES).
uniconv(C) ->
- unicode:characters_to_binary(C,unicode).
+ unicode:characters_to_binary(C, unicode).
-else.
uniconv(C) ->
C.
@@ -305,21 +316,28 @@ base(B) when is_integer(B) ->
%% Adjust the characters within the field if length less than Max padding
%% with PadChar.
-term(T, none, _Adj, none, _Pad) -> T;
-term(T, none, Adj, P, Pad) -> term(T, P, Adj, P, Pad);
+term(T, none, _Adj, none, _Pad) ->
+ T;
+term(T, none, Adj, P, Pad) ->
+ term(T, P, Adj, P, Pad);
term(T, F, Adj, P0, Pad) ->
L = lists:flatlength(T),
- P = case P0 of none -> erlang:min(L, F); _ -> P0 end,
+ P =
+ case P0 of
+ none -> erlang:min(L, F);
+ _ -> P0
+ end,
if
L > P ->
- adjust(chars($*, P), chars(Pad, F-P), Adj);
+ adjust(chars($*, P), chars(Pad, F - P), Adj);
F >= P ->
- adjust(T, chars(Pad, F-L), Adj)
+ adjust(T, chars(Pad, F - L), Adj)
end.
%% fwrite_e(Float, Field, Adjust, Precision, PadChar)
-fwrite_e(Fl, none, Adj, none, Pad) -> %Default values
+%Default values
+fwrite_e(Fl, none, Adj, none, Pad) ->
fwrite_e(Fl, none, Adj, 6, Pad);
fwrite_e(Fl, none, _Adj, P, _Pad) when P >= 2 ->
float_e(Fl, float_data(Fl), P);
@@ -328,12 +346,13 @@ fwrite_e(Fl, F, Adj, none, Pad) ->
fwrite_e(Fl, F, Adj, P, Pad) when P >= 2 ->
term(float_e(Fl, float_data(Fl), P), F, Adj, F, Pad).
-float_e(Fl, Fd, P) when Fl < 0.0 -> %Negative numbers
- [$-|float_e(-Fl, Fd, P)];
-float_e(_Fl, {Ds,E}, P) ->
- case float_man(Ds, 1, P-1) of
- {[$0|Fs],true} -> [[$1|Fs]|float_exp(E)];
- {Fs,false} -> [Fs|float_exp(E-1)]
+%Negative numbers
+float_e(Fl, Fd, P) when Fl < 0.0 ->
+ [$- | float_e(-Fl, Fd, P)];
+float_e(_Fl, {Ds, E}, P) ->
+ case float_man(Ds, 1, P - 1) of
+ {[$0 | Fs], true} -> [[$1 | Fs] | float_exp(E)];
+ {Fs, false} -> [Fs | float_exp(E - 1)]
end.
%% float_man([Digit], Icount, Dcount) -> {[Chars],CarryFlag}.
@@ -342,38 +361,43 @@ float_e(_Fl, {Ds,E}, P) ->
%% caller decide what to do at top.
float_man(Ds, 0, Dc) ->
- {Cs,C} = float_man(Ds, Dc),
- {[$.|Cs],C};
-float_man([D|Ds], I, Dc) ->
- case float_man(Ds, I-1, Dc) of
- {Cs,true} when D =:= $9 -> {[$0|Cs],true};
- {Cs,true} -> {[D+1|Cs],false};
- {Cs,false} -> {[D|Cs],false}
+ {Cs, C} = float_man(Ds, Dc),
+ {[$. | Cs], C};
+float_man([D | Ds], I, Dc) ->
+ case float_man(Ds, I - 1, Dc) of
+ {Cs, true} when D =:= $9 -> {[$0 | Cs], true};
+ {Cs, true} -> {[D + 1 | Cs], false};
+ {Cs, false} -> {[D | Cs], false}
end;
-float_man([], I, Dc) -> %Pad with 0's
- {string:chars($0, I, [$.|string:chars($0, Dc)]),false}.
-
-float_man([D|_], 0) when D >= $5 -> {[],true};
-float_man([_|_], 0) -> {[],false};
-float_man([D|Ds], Dc) ->
- case float_man(Ds, Dc-1) of
- {Cs,true} when D =:= $9 -> {[$0|Cs],true};
- {Cs,true} -> {[D+1|Cs],false};
- {Cs,false} -> {[D|Cs],false}
+%Pad with 0's
+float_man([], I, Dc) ->
+ {string:chars($0, I, [$. | string:chars($0, Dc)]), false}.
+
+float_man([D | _], 0) when D >= $5 -> {[], true};
+float_man([_ | _], 0) ->
+ {[], false};
+float_man([D | Ds], Dc) ->
+ case float_man(Ds, Dc - 1) of
+ {Cs, true} when D =:= $9 -> {[$0 | Cs], true};
+ {Cs, true} -> {[D + 1 | Cs], false};
+ {Cs, false} -> {[D | Cs], false}
end;
-float_man([], Dc) -> {string:chars($0, Dc),false}. %Pad with 0's
+%Pad with 0's
+float_man([], Dc) ->
+ {string:chars($0, Dc), false}.
%% float_exp(Exponent) -> [Char].
%% Generate the exponent of a floating point number. Always include sign.
float_exp(E) when E >= 0 ->
- [$e,$+|integer_to_list(E)];
+ [$e, $+ | integer_to_list(E)];
float_exp(E) ->
- [$e|integer_to_list(E)].
+ [$e | integer_to_list(E)].
%% fwrite_f(FloatData, Field, Adjust, Precision, PadChar)
-fwrite_f(Fl, none, Adj, none, Pad) -> %Default values
+%Default values
+fwrite_f(Fl, none, Adj, none, Pad) ->
fwrite_f(Fl, none, Adj, 6, Pad);
fwrite_f(Fl, none, _Adj, P, _Pad) when P >= 1 ->
float_f(Fl, float_data(Fl), P);
@@ -383,13 +407,15 @@ fwrite_f(Fl, F, Adj, P, Pad) when P >= 1 ->
term(float_f(Fl, float_data(Fl), P), F, Adj, F, Pad).
float_f(Fl, Fd, P) when Fl < 0.0 ->
- [$-|float_f(-Fl, Fd, P)];
-float_f(Fl, {Ds,E}, P) when E =< 0 ->
- float_f(Fl, {string:chars($0, -E+1, Ds),1}, P); %Prepend enough 0's
-float_f(_Fl, {Ds,E}, P) ->
+ [$- | float_f(-Fl, Fd, P)];
+float_f(Fl, {Ds, E}, P) when E =< 0 ->
+ %Prepend enough 0's
+ float_f(Fl, {string:chars($0, -E + 1, Ds), 1}, P);
+float_f(_Fl, {Ds, E}, P) ->
case float_man(Ds, E, P) of
- {Fs,true} -> "1" ++ Fs; %Handle carry
- {Fs,false} -> Fs
+ %Handle carry
+ {Fs, true} -> "1" ++ Fs;
+ {Fs, false} -> Fs
end.
%% float_data([FloatChar]) -> {[Digit],Exponent}
@@ -397,11 +423,11 @@ float_f(_Fl, {Ds,E}, P) ->
float_data(Fl) ->
float_data(float_to_list(Fl), []).
-float_data([$e|E], Ds) ->
- {lists:reverse(Ds),list_to_integer(E)+1};
-float_data([D|Cs], Ds) when D >= $0, D =< $9 ->
- float_data(Cs, [D|Ds]);
-float_data([_|Cs], Ds) ->
+float_data([$e | E], Ds) ->
+ {lists:reverse(Ds), list_to_integer(E) + 1};
+float_data([D | Cs], Ds) when D >= $0, D =< $9 ->
+ float_data(Cs, [D | Ds]);
+float_data([_ | Cs], Ds) ->
float_data(Cs, Ds).
%% fwrite_g(Float, Field, Adjust, Precision, PadChar)
@@ -413,83 +439,98 @@ fwrite_g(Fl, F, Adj, none, Pad) ->
fwrite_g(Fl, F, Adj, 6, Pad);
fwrite_g(Fl, F, Adj, P, Pad) when P >= 1 ->
A = abs(Fl),
- E = if A < 1.0e-1 -> -2;
- A < 1.0e0 -> -1;
- A < 1.0e1 -> 0;
- A < 1.0e2 -> 1;
- A < 1.0e3 -> 2;
- A < 1.0e4 -> 3;
- true -> fwrite_f
- end,
- if P =< 1, E =:= -1;
- P-1 > E, E >= -1 ->
- fwrite_f(Fl, F, Adj, P-1-E, Pad);
- P =< 1 ->
- fwrite_e(Fl, F, Adj, 2, Pad);
- true ->
- fwrite_e(Fl, F, Adj, P, Pad)
+ E =
+ if
+ A < 1.0e-1 -> -2;
+ A < 1.0e0 -> -1;
+ A < 1.0e1 -> 0;
+ A < 1.0e2 -> 1;
+ A < 1.0e3 -> 2;
+ A < 1.0e4 -> 3;
+ true -> fwrite_f
+ end,
+ if
+ P =< 1, E =:= -1;
+ P - 1 > E, E >= -1 ->
+ fwrite_f(Fl, F, Adj, P - 1 - E, Pad);
+ P =< 1 ->
+ fwrite_e(Fl, F, Adj, 2, Pad);
+ true ->
+ fwrite_e(Fl, F, Adj, P, Pad)
end.
-
%% string(String, Field, Adjust, Precision, PadChar)
-string(S, none, _Adj, none, _Pad) -> S;
+string(S, none, _Adj, none, _Pad) ->
+ S;
string(S, F, Adj, none, Pad) ->
string_field(S, F, Adj, lists:flatlength(S), Pad);
string(S, none, _Adj, P, Pad) ->
string_field(S, P, left, lists:flatlength(S), Pad);
string(S, F, Adj, P, Pad) when F >= P ->
N = lists:flatlength(S),
- if F > P ->
- if N > P ->
- adjust(flat_trunc(S, P), chars(Pad, F-P), Adj);
+ if
+ F > P ->
+ if
+ N > P ->
+ adjust(flat_trunc(S, P), chars(Pad, F - P), Adj);
N < P ->
- adjust([S|chars(Pad, P-N)], chars(Pad, F-P), Adj);
- true -> % N == P
- adjust(S, chars(Pad, F-P), Adj)
+ adjust([S | chars(Pad, P - N)], chars(Pad, F - P), Adj);
+ % N == P
+ true ->
+ adjust(S, chars(Pad, F - P), Adj)
end;
- true -> % F == P
- string_field(S, F, Adj, N, Pad)
+ % F == P
+ true ->
+ string_field(S, F, Adj, N, Pad)
end.
string_field(S, F, _Adj, N, _Pad) when N > F ->
flat_trunc(S, F);
string_field(S, F, Adj, N, Pad) when N < F ->
- adjust(S, chars(Pad, F-N), Adj);
-string_field(S, _, _, _, _) -> % N == F
+ adjust(S, chars(Pad, F - N), Adj);
+% N == F
+string_field(S, _, _, _, _) ->
S.
%% unprefixed_integer(Int, Field, Adjust, Base, PadChar, Lowercase)
%% -> [Char].
-unprefixed_integer(Int, F, Adj, Base, Pad, Lowercase)
- when Base >= 2, Base =< 1+$Z-$A+10 ->
- if Int < 0 ->
+unprefixed_integer(Int, F, Adj, Base, Pad, Lowercase) when
+ Base >= 2, Base =< 1 + $Z - $A + 10
+->
+ if
+ Int < 0 ->
S = cond_lowercase(erlang:integer_to_list(-Int, Base), Lowercase),
- term([$-|S], F, Adj, none, Pad);
- true ->
- S = cond_lowercase(erlang:integer_to_list(Int, Base), Lowercase),
- term(S, F, Adj, none, Pad)
+ term([$- | S], F, Adj, none, Pad);
+ true ->
+ S = cond_lowercase(erlang:integer_to_list(Int, Base), Lowercase),
+ term(S, F, Adj, none, Pad)
end.
%% prefixed_integer(Int, Field, Adjust, Base, PadChar, Prefix, Lowercase)
%% -> [Char].
-prefixed_integer(Int, F, Adj, Base, Pad, Prefix, Lowercase)
- when Base >= 2, Base =< 1+$Z-$A+10 ->
- if Int < 0 ->
+prefixed_integer(Int, F, Adj, Base, Pad, Prefix, Lowercase) when
+ Base >= 2, Base =< 1 + $Z - $A + 10
+->
+ if
+ Int < 0 ->
S = cond_lowercase(erlang:integer_to_list(-Int, Base), Lowercase),
- term([$-,Prefix|S], F, Adj, none, Pad);
- true ->
- S = cond_lowercase(erlang:integer_to_list(Int, Base), Lowercase),
- term([Prefix|S], F, Adj, none, Pad)
+ term([$-, Prefix | S], F, Adj, none, Pad);
+ true ->
+ S = cond_lowercase(erlang:integer_to_list(Int, Base), Lowercase),
+ term([Prefix | S], F, Adj, none, Pad)
end.
%% char(Char, Field, Adjust, Precision, PadChar) -> [Char].
-char(C, none, _Adj, none, _Pad) -> [C];
-char(C, F, _Adj, none, _Pad) -> chars(C, F);
-char(C, none, _Adj, P, _Pad) -> chars(C, P);
+char(C, none, _Adj, none, _Pad) ->
+ [C];
+char(C, F, _Adj, none, _Pad) ->
+ chars(C, F);
+char(C, none, _Adj, P, _Pad) ->
+ chars(C, P);
char(C, F, Adj, P, Pad) when F >= P ->
adjust(chars(C, P), chars(Pad, F - P), Adj).
@@ -503,8 +544,8 @@ newline(F, right, _P, _Pad) -> chars($\n, F).
%%
adjust(Data, [], _) -> Data;
-adjust(Data, Pad, left) -> [Data|Pad];
-adjust(Data, Pad, right) -> [Pad|Data].
+adjust(Data, Pad, left) -> [Data | Pad];
+adjust(Data, Pad, right) -> [Pad | Data].
%% Flatten and truncate a deep list to at most N elements.
flat_trunc(List, N) when is_integer(N), N >= 0 ->
@@ -512,8 +553,8 @@ flat_trunc(List, N) when is_integer(N), N >= 0 ->
flat_trunc(L, 0, R) when is_list(L) ->
lists:reverse(R);
-flat_trunc([H|T], N, R) ->
- flat_trunc(T, N-1, [H|R]);
+flat_trunc([H | T], N, R) ->
+ flat_trunc(T, N - 1, [H | R]);
flat_trunc([], _, R) ->
lists:reverse(R).
@@ -524,15 +565,15 @@ chars(_C, 0) ->
chars(C, 1) ->
[C];
chars(C, 2) ->
- [C,C];
+ [C, C];
chars(C, 3) ->
- [C,C,C];
+ [C, C, C];
chars(C, N) when is_integer(N), (N band 1) =:= 0 ->
S = chars(C, N bsr 1),
- [S|S];
+ [S | S];
chars(C, N) when is_integer(N) ->
S = chars(C, N bsr 1),
- [C,S|S].
+ [C, S | S].
%chars(C, N, Tail) ->
% [chars(C, N)|Tail].
@@ -541,12 +582,12 @@ chars(C, N) when is_integer(N) ->
cond_lowercase(String, true) ->
lowercase(String);
-cond_lowercase(String,false) ->
+cond_lowercase(String, false) ->
String.
-lowercase([H|T]) when is_integer(H), H >= $A, H =< $Z ->
- [(H-$A+$a)|lowercase(T)];
-lowercase([H|T]) ->
- [H|lowercase(T)];
+lowercase([H | T]) when is_integer(H), H >= $A, H =< $Z ->
+ [(H - $A + $a) | lowercase(T)];
+lowercase([H | T]) ->
+ [H | lowercase(T)];
lowercase([]) ->
- []. \ No newline at end of file
+ [].
diff --git a/src/couch_log/src/couch_log_util.erl b/src/couch_log/src/couch_log_util.erl
index c8b8e54ea..8be11e12d 100644
--- a/src/couch_log/src/couch_log_util.erl
+++ b/src/couch_log/src/couch_log_util.erl
@@ -12,7 +12,6 @@
-module(couch_log_util).
-
-export([
should_log/1,
iso8601_timestamp/0,
@@ -25,26 +24,21 @@
string_p/1
]).
-
-include("couch_log.hrl").
-
-spec should_log(#log_entry{} | atom()) -> boolean().
should_log(#log_entry{level = Level}) ->
should_log(Level);
-
should_log(Level) ->
level_to_integer(Level) >= couch_log_config:get(level_int).
-
-spec iso8601_timestamp() -> string().
iso8601_timestamp() ->
- {_,_,Micro} = Now = os:timestamp(),
- {{Year,Month,Date},{Hour,Minute,Second}} = calendar:now_to_datetime(Now),
+ {_, _, Micro} = Now = os:timestamp(),
+ {{Year, Month, Date}, {Hour, Minute, Second}} = calendar:now_to_datetime(Now),
Format = "~4.10.0B-~2.10.0B-~2.10.0BT~2.10.0B:~2.10.0B:~2.10.0B.~6.10.0BZ",
io_lib:format(Format, [Year, Month, Date, Hour, Minute, Second, Micro]).
-
-spec get_msg_id() -> string().
get_msg_id() ->
case erlang:get(nonce) of
@@ -52,78 +46,73 @@ get_msg_id() ->
MsgId -> MsgId
end.
-
-spec level_to_integer(atom() | string() | integer()) -> integer().
level_to_integer(L) when L >= 0, L =< 9 -> L;
-level_to_integer(debug) -> 1;
-level_to_integer(info) -> 2;
-level_to_integer(notice) -> 3;
-level_to_integer(warning) -> 4;
-level_to_integer(warn) -> 4;
-level_to_integer(error) -> 5;
-level_to_integer(err) -> 5;
-level_to_integer(critical) -> 6;
-level_to_integer(crit) -> 6;
-level_to_integer(alert) -> 7;
-level_to_integer(emergency) -> 8;
-level_to_integer(emerg) -> 8;
-level_to_integer(none) -> 9;
-level_to_integer("debug") -> 1;
-level_to_integer("info") -> 2;
-level_to_integer("notice") -> 3;
-level_to_integer("warning") -> 4;
-level_to_integer("warn") -> 4;
-level_to_integer("error") -> 5;
-level_to_integer("err") -> 5;
-level_to_integer("critical") -> 6;
-level_to_integer("crit") -> 6;
-level_to_integer("alert") -> 7;
-level_to_integer("emergency") -> 8;
-level_to_integer("emerg") -> 8;
-level_to_integer("none") -> 9;
-level_to_integer("1") -> 1;
-level_to_integer("2") -> 2;
-level_to_integer("3") -> 3;
-level_to_integer("4") -> 4;
-level_to_integer("5") -> 5;
-level_to_integer("6") -> 6;
-level_to_integer("7") -> 7;
-level_to_integer("8") -> 8;
-level_to_integer("9") -> 9.
-
+level_to_integer(debug) -> 1;
+level_to_integer(info) -> 2;
+level_to_integer(notice) -> 3;
+level_to_integer(warning) -> 4;
+level_to_integer(warn) -> 4;
+level_to_integer(error) -> 5;
+level_to_integer(err) -> 5;
+level_to_integer(critical) -> 6;
+level_to_integer(crit) -> 6;
+level_to_integer(alert) -> 7;
+level_to_integer(emergency) -> 8;
+level_to_integer(emerg) -> 8;
+level_to_integer(none) -> 9;
+level_to_integer("debug") -> 1;
+level_to_integer("info") -> 2;
+level_to_integer("notice") -> 3;
+level_to_integer("warning") -> 4;
+level_to_integer("warn") -> 4;
+level_to_integer("error") -> 5;
+level_to_integer("err") -> 5;
+level_to_integer("critical") -> 6;
+level_to_integer("crit") -> 6;
+level_to_integer("alert") -> 7;
+level_to_integer("emergency") -> 8;
+level_to_integer("emerg") -> 8;
+level_to_integer("none") -> 9;
+level_to_integer("1") -> 1;
+level_to_integer("2") -> 2;
+level_to_integer("3") -> 3;
+level_to_integer("4") -> 4;
+level_to_integer("5") -> 5;
+level_to_integer("6") -> 6;
+level_to_integer("7") -> 7;
+level_to_integer("8") -> 8;
+level_to_integer("9") -> 9.
-spec level_to_atom(atom() | string() | integer()) -> atom().
-level_to_atom(L) when is_atom(L) -> L;
-level_to_atom("1") -> debug;
-level_to_atom("debug") -> debug;
-level_to_atom("2") -> info;
-level_to_atom("info") -> info;
-level_to_atom("3") -> notice;
-level_to_atom("notice") -> notice;
-level_to_atom("4") -> warning;
-level_to_atom("warning") -> warning;
-level_to_atom("warn") -> warning;
-level_to_atom("5") -> error;
-level_to_atom("error") -> error;
-level_to_atom("err") -> error;
-level_to_atom("6") -> critical;
-level_to_atom("critical") -> critical;
-level_to_atom("crit") -> critical;
-level_to_atom("7") -> alert;
-level_to_atom("alert") -> alert;
-level_to_atom("8") -> emergency;
-level_to_atom("emergency") -> emergency;
-level_to_atom("emerg") -> emergency;
-level_to_atom("9") -> none;
-level_to_atom("none") -> none;
+level_to_atom(L) when is_atom(L) -> L;
+level_to_atom("1") -> debug;
+level_to_atom("debug") -> debug;
+level_to_atom("2") -> info;
+level_to_atom("info") -> info;
+level_to_atom("3") -> notice;
+level_to_atom("notice") -> notice;
+level_to_atom("4") -> warning;
+level_to_atom("warning") -> warning;
+level_to_atom("warn") -> warning;
+level_to_atom("5") -> error;
+level_to_atom("error") -> error;
+level_to_atom("err") -> error;
+level_to_atom("6") -> critical;
+level_to_atom("critical") -> critical;
+level_to_atom("crit") -> critical;
+level_to_atom("7") -> alert;
+level_to_atom("alert") -> alert;
+level_to_atom("8") -> emergency;
+level_to_atom("emergency") -> emergency;
+level_to_atom("emerg") -> emergency;
+level_to_atom("9") -> none;
+level_to_atom("none") -> none;
level_to_atom(V) when is_integer(V) -> level_to_atom(integer_to_list(V));
-level_to_atom(V) when is_list(V) -> info.
-
-
-level_to_string(L) when is_atom(L) -> atom_to_list(L);
-level_to_string(L) -> atom_to_list(level_to_atom(L)).
-
+level_to_atom(V) when is_list(V) -> info.
+level_to_string(L) when is_atom(L) -> atom_to_list(L);
+level_to_string(L) -> atom_to_list(level_to_atom(L)).
% From error_logger_file_h via lager_stdlib.erl
string_p([]) ->
@@ -131,19 +120,28 @@ string_p([]) ->
string_p(Term) ->
string_p1(Term).
-string_p1([H|T]) when is_integer(H), H >= $\s, H < 256 ->
+string_p1([H | T]) when is_integer(H), H >= $\s, H < 256 ->
+ string_p1(T);
+string_p1([$\n | T]) ->
+ string_p1(T);
+string_p1([$\r | T]) ->
+ string_p1(T);
+string_p1([$\t | T]) ->
+ string_p1(T);
+string_p1([$\v | T]) ->
+ string_p1(T);
+string_p1([$\b | T]) ->
+ string_p1(T);
+string_p1([$\f | T]) ->
+ string_p1(T);
+string_p1([$\e | T]) ->
string_p1(T);
-string_p1([$\n|T]) -> string_p1(T);
-string_p1([$\r|T]) -> string_p1(T);
-string_p1([$\t|T]) -> string_p1(T);
-string_p1([$\v|T]) -> string_p1(T);
-string_p1([$\b|T]) -> string_p1(T);
-string_p1([$\f|T]) -> string_p1(T);
-string_p1([$\e|T]) -> string_p1(T);
-string_p1([H|T]) when is_list(H) ->
+string_p1([H | T]) when is_list(H) ->
case string_p1(H) of
true -> string_p1(T);
- _ -> false
+ _ -> false
end;
-string_p1([]) -> true;
-string_p1(_) -> false.
+string_p1([]) ->
+ true;
+string_p1(_) ->
+ false.
diff --git a/src/couch_log/src/couch_log_writer.erl b/src/couch_log/src/couch_log_writer.erl
index 5e28a0775..18bb557ae 100644
--- a/src/couch_log/src/couch_log_writer.erl
+++ b/src/couch_log/src/couch_log_writer.erl
@@ -13,28 +13,22 @@
% @doc Modules wishing to handle writing log
% messages should implement this behavior.
-
-module(couch_log_writer).
-
-export([
init/0,
terminate/2,
write/2
]).
-
-include("couch_log.hrl").
-
-define(DEFAULT_WRITER, couch_log_writer_stderr).
-
--callback init() -> {ok, State::term()}.
--callback terminate(Reason::term(), State::term()) -> ok.
--callback write(LogEntry::#log_entry{}, State::term()) ->
- {ok, NewState::term()}.
-
+-callback init() -> {ok, State :: term()}.
+-callback terminate(Reason :: term(), State :: term()) -> ok.
+-callback write(LogEntry :: #log_entry{}, State :: term()) ->
+ {ok, NewState :: term()}.
-spec init() -> {atom(), term()}.
init() ->
@@ -42,18 +36,15 @@ init() ->
{ok, St} = Writer:init(),
{Writer, St}.
-
-spec terminate(term(), {atom(), term()}) -> ok.
terminate(Reason, {Writer, St}) ->
ok = Writer:terminate(Reason, St).
-
-spec write(#log_entry{}, {atom(), term()}) -> {atom(), term()}.
write(Entry, {Writer, St}) ->
{ok, NewSt} = Writer:write(Entry, St),
{Writer, NewSt}.
-
get_writer_mod() ->
WriterStr = config:get("log", "writer", "stderr"),
ModName1 = to_atom("couch_log_writer_" ++ WriterStr),
@@ -70,14 +61,13 @@ get_writer_mod() ->
end
end.
-
to_atom(Str) ->
try list_to_existing_atom(Str) of
Atom -> Atom
- catch _:_ ->
- undefined
+ catch
+ _:_ ->
+ undefined
end.
-
mod_exists(ModName) ->
code:which(ModName) /= non_existing.
diff --git a/src/couch_log/src/couch_log_writer_file.erl b/src/couch_log/src/couch_log_writer_file.erl
index 1fe35a8ab..9b7255050 100644
--- a/src/couch_log/src/couch_log_writer_file.erl
+++ b/src/couch_log/src/couch_log_writer_file.erl
@@ -13,18 +13,15 @@
-module(couch_log_writer_file).
-behaviour(couch_log_writer).
-
-export([
init/0,
terminate/2,
write/2
]).
-
-include_lib("kernel/include/file.hrl").
-include("couch_log.hrl").
-
-record(st, {
file_path,
fd,
@@ -32,17 +29,14 @@
last_check
}).
-
-define(CHECK_INTERVAL, 30000000).
-
-ifdef(TEST).
-export([
maybe_reopen/1
]).
-endif.
-
init() ->
FilePath = config:get("log", "file", "./couch.log"),
Opts = [append, raw] ++ buffer_opt(),
@@ -69,14 +63,12 @@ init() ->
EnsureDirError
end.
-
terminate(_, St) ->
% Apparently delayed_write can require two closes
file:close(St#st.fd),
file:close(St#st.fd),
ok.
-
write(Entry, St) ->
{ok, NewSt} = maybe_reopen(St),
#log_entry{
@@ -99,7 +91,6 @@ write(Entry, St) ->
ok = file:write(NewSt#st.fd, [Data, Msg, "\n"]),
{ok, NewSt}.
-
buffer_opt() ->
WriteBuffer = config:get_integer("log", "write_buffer", 0),
WriteDelay = config:get_integer("log", "write_delay", 0),
@@ -110,7 +101,6 @@ buffer_opt() ->
[]
end.
-
maybe_reopen(St) ->
#st{
last_check = LastCheck
@@ -121,7 +111,6 @@ maybe_reopen(St) ->
false -> {ok, St}
end.
-
reopen(St) ->
case file:read_file_info(St#st.file_path) of
{ok, FInfo} ->
diff --git a/src/couch_log/src/couch_log_writer_journald.erl b/src/couch_log/src/couch_log_writer_journald.erl
index 02a9c6900..c2bdd940c 100644
--- a/src/couch_log/src/couch_log_writer_journald.erl
+++ b/src/couch_log/src/couch_log_writer_journald.erl
@@ -13,25 +13,20 @@
-module(couch_log_writer_journald).
-behaviour(couch_log_writer).
-
-export([
init/0,
terminate/2,
write/2
]).
-
-include("couch_log.hrl").
-
init() ->
{ok, nil}.
-
terminate(_, _St) ->
ok.
-
write(Entry, St) ->
#log_entry{
level = Level,
@@ -51,19 +46,18 @@ write(Entry, St) ->
io:format(standard_error, [Data, Msg, "\n"], []),
{ok, St}.
-
% log level mapping from sd-daemon(3)
% https://www.freedesktop.org/software/systemd/man/sd-daemon.html
-spec level_for_journald(atom()) -> integer().
level_for_journald(Level) when is_atom(Level) ->
case Level of
- debug -> 7;
- info -> 6;
- notice -> 5;
- warning -> 4;
- error -> 3;
- critical -> 2;
- alert -> 1;
- emergency -> 0;
- _ -> 3
+ debug -> 7;
+ info -> 6;
+ notice -> 5;
+ warning -> 4;
+ error -> 3;
+ critical -> 2;
+ alert -> 1;
+ emergency -> 0;
+ _ -> 3
end.
diff --git a/src/couch_log/src/couch_log_writer_stderr.erl b/src/couch_log/src/couch_log_writer_stderr.erl
index 7c5fc6ca0..01e350971 100644
--- a/src/couch_log/src/couch_log_writer_stderr.erl
+++ b/src/couch_log/src/couch_log_writer_stderr.erl
@@ -13,25 +13,20 @@
-module(couch_log_writer_stderr).
-behaviour(couch_log_writer).
-
-export([
init/0,
terminate/2,
write/2
]).
-
-include("couch_log.hrl").
-
init() ->
{ok, nil}.
-
terminate(_, _St) ->
ok.
-
write(Entry, St) ->
#log_entry{
level = Level,
diff --git a/src/couch_log/src/couch_log_writer_syslog.erl b/src/couch_log/src/couch_log_writer_syslog.erl
index e3a6fc4b6..b95cf018c 100644
--- a/src/couch_log/src/couch_log_writer_syslog.erl
+++ b/src/couch_log/src/couch_log_writer_syslog.erl
@@ -13,17 +13,14 @@
-module(couch_log_writer_syslog).
-behavior(couch_log_writer).
-
-export([
init/0,
terminate/2,
write/2
]).
-
-include("couch_log.hrl").
-
-record(st, {
socket,
host,
@@ -34,10 +31,8 @@
facility
}).
-
-define(SYSLOG_VERSION, 1).
-
-ifdef(TEST).
-export([
get_facility/1,
@@ -45,20 +40,20 @@
]).
-endif.
-
init() ->
{ok, Socket} = gen_udp:open(0),
- Host = case config:get("log", "syslog_host") of
- undefined ->
- undefined;
- SysLogHost ->
- case inet:getaddr(SysLogHost, inet) of
- {ok, Address} ->
- Address;
- _ ->
- undefined
- end
+ Host =
+ case config:get("log", "syslog_host") of
+ undefined ->
+ undefined;
+ SysLogHost ->
+ case inet:getaddr(SysLogHost, inet) of
+ {ok, Address} ->
+ Address;
+ _ ->
+ undefined
+ end
end,
{ok, #st{
@@ -71,11 +66,9 @@ init() ->
facility = get_facility(config:get("log", "syslog_facility", "local2"))
}}.
-
terminate(_Reason, St) ->
gen_udp:close(St#st.socket).
-
write(Entry, St) ->
#log_entry{
level = Level,
@@ -98,10 +91,8 @@ write(Entry, St) ->
ok = send(St, [Pre, Msg, $\n]),
{ok, St}.
-
-send(#st{host=undefined}, Packet) ->
+send(#st{host = undefined}, Packet) ->
io:format(standard_error, "~s", [Packet]);
-
send(St, Packet) ->
#st{
socket = Socket,
@@ -110,53 +101,101 @@ send(St, Packet) ->
} = St,
gen_udp:send(Socket, Host, Port, Packet).
-
get_facility(Name) ->
- FacId = case Name of
- "kern" -> 0; % Kernel messages
- "user" -> 1; % Random user-level messages
- "mail" -> 2; % Mail system
- "daemon" -> 3; % System daemons
- "auth" -> 4; % Security/Authorization messages
- "syslog" -> 5; % Internal Syslog messages
- "lpr" -> 6; % Line printer subsystem
- "news" -> 7; % Network news subsystems
- "uucp" -> 8; % UUCP subsystem
- "clock" -> 9; % Clock daemon
- "authpriv" -> 10; % Security/Authorization messages
- "ftp" -> 11; % FTP daemon
- "ntp" -> 12; % NTP subsystem
- "audit" -> 13; % Log audit
- "alert" -> 14; % Log alert
- "cron" -> 15; % Scheduling daemon
- "local0" -> 16; % Local use 0
- "local1" -> 17; % Local use 1
- "local2" -> 18; % Local use 2
- "local3" -> 19; % Local use 3
- "local4" -> 20; % Local use 4
- "local5" -> 21; % Local use 5
- "local6" -> 22; % Local use 6
- "local7" -> 23; % Local use 7
- _ ->
- try list_to_integer(Name) of
- N when N >= 0, N =< 23 -> N;
- _ -> 23
- catch _:_ ->
- 23
- end
- end,
+ FacId =
+ case Name of
+ % Kernel messages
+ "kern" ->
+ 0;
+ % Random user-level messages
+ "user" ->
+ 1;
+ % Mail system
+ "mail" ->
+ 2;
+ % System daemons
+ "daemon" ->
+ 3;
+ % Security/Authorization messages
+ "auth" ->
+ 4;
+ % Internal Syslog messages
+ "syslog" ->
+ 5;
+ % Line printer subsystem
+ "lpr" ->
+ 6;
+ % Network news subsystems
+ "news" ->
+ 7;
+ % UUCP subsystem
+ "uucp" ->
+ 8;
+ % Clock daemon
+ "clock" ->
+ 9;
+ % Security/Authorization messages
+ "authpriv" ->
+ 10;
+ % FTP daemon
+ "ftp" ->
+ 11;
+ % NTP subsystem
+ "ntp" ->
+ 12;
+ % Log audit
+ "audit" ->
+ 13;
+ % Log alert
+ "alert" ->
+ 14;
+ % Scheduling daemon
+ "cron" ->
+ 15;
+ % Local use 0
+ "local0" ->
+ 16;
+ % Local use 1
+ "local1" ->
+ 17;
+ % Local use 2
+ "local2" ->
+ 18;
+ % Local use 3
+ "local3" ->
+ 19;
+ % Local use 4
+ "local4" ->
+ 20;
+ % Local use 5
+ "local5" ->
+ 21;
+ % Local use 6
+ "local6" ->
+ 22;
+ % Local use 7
+ "local7" ->
+ 23;
+ _ ->
+ try list_to_integer(Name) of
+ N when N >= 0, N =< 23 -> N;
+ _ -> 23
+ catch
+ _:_ ->
+ 23
+ end
+ end,
FacId bsl 3.
-
get_level(Name) when is_atom(Name) ->
case Name of
- debug -> 7;
- info -> 6;
- notice -> 5;
- warning -> 4;
- error -> 3;
- critical -> 2;
- alert -> 1;
- emergency -> 0;
- _ -> 3
+ debug -> 7;
+ info -> 6;
+ notice -> 5;
+ warning -> 4;
+ error -> 3;
+ critical -> 2;
+ alert -> 1;
+ emergency -> 0;
+ _ -> 3
end.
diff --git a/src/couch_prometheus/src/couch_prometheus_http.erl b/src/couch_prometheus/src/couch_prometheus_http.erl
index bd0c4c6f9..b3df1ea4b 100644
--- a/src/couch_prometheus/src/couch_prometheus_http.erl
+++ b/src/couch_prometheus/src/couch_prometheus_http.erl
@@ -23,10 +23,11 @@
-include_lib("couch/include/couch_db.hrl").
start_link() ->
- IP = case config:get("prometheus", "bind_address", "any") of
- "any" -> any;
- Else -> Else
- end,
+ IP =
+ case config:get("prometheus", "bind_address", "any") of
+ "any" -> any;
+ Else -> Else
+ end,
Port = config:get("prometheus", "port"),
ok = couch_httpd:validate_bind_address(IP),
@@ -47,7 +48,7 @@ start_link() ->
handle_request(MochiReq) ->
RawUri = MochiReq:get(raw_path),
{"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
- PathParts = string:tokens(Path, "/"),
+ PathParts = string:tokens(Path, "/"),
try
case PathParts of
["_node", Node, "_prometheus"] ->
@@ -55,16 +56,19 @@ handle_request(MochiReq) ->
_ ->
send_error(MochiReq, 404, <<"not_found">>, <<>>)
end
- catch T:R ->
- Body = list_to_binary(io_lib:format("~p:~p", [T, R])),
- send_error(MochiReq, 500, <<"server_error">>, Body)
+ catch
+ T:R ->
+ Body = list_to_binary(io_lib:format("~p:~p", [T, R])),
+ send_error(MochiReq, 500, <<"server_error">>, Body)
end.
send_prometheus(MochiReq, Node) ->
Type = "text/plain; version=" ++ ?PROMETHEUS_VERSION,
- Headers = couch_httpd:server_header() ++ [
- {<<"Content-Type">>, ?l2b(Type)}
- ],
+ Headers =
+ couch_httpd:server_header() ++
+ [
+ {<<"Content-Type">>, ?l2b(Type)}
+ ],
Body = call_node(Node, couch_prometheus_server, scrape, []),
send_resp(MochiReq, 200, Headers, Body).
@@ -73,23 +77,29 @@ send_resp(MochiReq, Status, ExtraHeaders, Body) ->
MochiReq:respond({Status, Headers, Body}).
send_error(MochiReq, Code, Error, Reason) ->
- Headers = couch_httpd:server_header() ++ [
- {<<"Content-Type">>, <<"application/json">>}
- ],
- JsonError = {[{<<"error">>, Error},
- {<<"reason">>, Reason}]},
+ Headers =
+ couch_httpd:server_header() ++
+ [
+ {<<"Content-Type">>, <<"application/json">>}
+ ],
+ JsonError =
+ {[
+ {<<"error">>, Error},
+ {<<"reason">>, Reason}
+ ]},
Body = ?JSON_ENCODE(JsonError),
MochiReq:respond({Code, Headers, Body}).
call_node("_local", Mod, Fun, Args) ->
call_node(node(), Mod, Fun, Args);
call_node(Node0, Mod, Fun, Args) when is_list(Node0) ->
- Node1 = try
- list_to_existing_atom(Node0)
- catch
- error:badarg ->
- NoNode = list_to_binary(Node0),
- throw({not_found, <<"no such node: ", NoNode/binary>>})
+ Node1 =
+ try
+ list_to_existing_atom(Node0)
+ catch
+ error:badarg ->
+ NoNode = list_to_binary(Node0),
+ throw({not_found, <<"no such node: ", NoNode/binary>>})
end,
call_node(Node1, Mod, Fun, Args);
call_node(Node, Mod, Fun, Args) when is_atom(Node) ->
diff --git a/src/couch_prometheus/src/couch_prometheus_server.erl b/src/couch_prometheus/src/couch_prometheus_server.erl
index e97df04a4..701483a38 100644
--- a/src/couch_prometheus/src/couch_prometheus_server.erl
+++ b/src/couch_prometheus/src/couch_prometheus_server.erl
@@ -48,7 +48,7 @@ start_link() ->
init([]) ->
Metrics = refresh_metrics(),
RT = update_refresh_timer(),
- {ok, #st{metrics=Metrics, refresh=RT}}.
+ {ok, #st{metrics = Metrics, refresh = RT}}.
scrape() ->
{ok, Metrics} = gen_server:call(?MODULE, scrape),
@@ -57,13 +57,13 @@ scrape() ->
version() ->
?PROMETHEUS_VERSION.
-handle_call(scrape, _from, #st{metrics = Metrics}=State) ->
+handle_call(scrape, _from, #st{metrics = Metrics} = State) ->
{reply, {ok, Metrics}, State};
-handle_call(refresh, _from, #st{refresh=OldRT} = State) ->
+handle_call(refresh, _from, #st{refresh = OldRT} = State) ->
timer:cancel(OldRT),
Metrics = refresh_metrics(),
RT = update_refresh_timer(),
- {reply, ok, State#st{metrics=Metrics, refresh=RT}};
+ {reply, ok, State#st{metrics = Metrics, refresh = RT}};
handle_call(Msg, _From, State) ->
{stop, {unknown_call, Msg}, error, State}.
@@ -73,7 +73,7 @@ handle_cast(Msg, State) ->
handle_info(refresh, State) ->
Metrics = refresh_metrics(),
RT = update_refresh_timer(),
- {noreply, State#st{metrics=Metrics, refresh=RT}};
+ {noreply, State#st{metrics = Metrics, refresh = RT}};
handle_info(Msg, State) ->
{stop, {unknown_info, Msg}, State}.
@@ -86,15 +86,23 @@ code_change(_OldVsn, State, _Extra) ->
refresh_metrics() ->
CouchDB = get_couchdb_stats(),
System = couch_stats_httpd:to_ejson(get_system_stats()),
- couch_prometheus_util:to_bin(lists:map(fun(Line) ->
- io_lib:format("~s~n", [Line])
- end, CouchDB ++ System)).
+ couch_prometheus_util:to_bin(
+ lists:map(
+ fun(Line) ->
+ io_lib:format("~s~n", [Line])
+ end,
+ CouchDB ++ System
+ )
+ ).
get_couchdb_stats() ->
Stats = lists:sort(couch_stats:fetch()),
- lists:flatmap(fun({Path, Info}) ->
- couch_to_prom(Path, Info, Stats)
- end, Stats).
+ lists:flatmap(
+ fun({Path, Info}) ->
+ couch_to_prom(Path, Info, Stats)
+ end,
+ Stats
+ ).
get_system_stats() ->
lists:flatten([
@@ -111,9 +119,12 @@ get_uptime_stat() ->
to_prom(uptime_seconds, counter, couch_app:uptime() div 1000).
get_vm_stats() ->
- MemLabels = lists:map(fun({Type, Value}) ->
- {[{memory_type, Type}], Value}
- end, erlang:memory()),
+ MemLabels = lists:map(
+ fun({Type, Value}) ->
+ {[{memory_type, Type}], Value}
+ end,
+ erlang:memory()
+ ),
{NumGCs, WordsReclaimed, _} = erlang:statistics(garbage_collection),
CtxSwitches = element(1, erlang:statistics(context_switches)),
Reds = element(1, erlang:statistics(reductions)),
@@ -137,14 +148,17 @@ get_io_stats() ->
].
get_message_queue_stats() ->
- Queues = lists:map(fun(Name) ->
- case process_info(whereis(Name), message_queue_len) of
- {message_queue_len, N} ->
- N;
- _ ->
- 0
- end
- end, registered()),
+ Queues = lists:map(
+ fun(Name) ->
+ case process_info(whereis(Name), message_queue_len) of
+ {message_queue_len, N} ->
+ N;
+ _ ->
+ 0
+ end
+ end,
+ registered()
+ ),
[
to_prom(erlang_message_queues, gauge, lists:sum(Queues)),
to_prom(erlang_message_queue_min, gauge, lists:min(Queues)),
@@ -153,13 +167,14 @@ get_message_queue_stats() ->
get_run_queue_stats() ->
%% Workaround for https://bugs.erlang.org/browse/ERL-1355
- {Normal, Dirty} = case erlang:system_info(dirty_cpu_schedulers) > 0 of
- false ->
- {statistics(run_queue), 0};
- true ->
- [DCQ | SQs] = lists:reverse(statistics(run_queue_lengths)),
- {lists:sum(SQs), DCQ}
- end,
+ {Normal, Dirty} =
+ case erlang:system_info(dirty_cpu_schedulers) > 0 of
+ false ->
+ {statistics(run_queue), 0};
+ true ->
+ [DCQ | SQs] = lists:reverse(statistics(run_queue_lengths)),
+ {lists:sum(SQs), DCQ}
+ end,
[
to_prom(erlang_scheduler_queues, gauge, Normal),
to_prom(erlang_dirty_cpu_scheduler_queues, gauge, Dirty)
diff --git a/src/couch_prometheus/src/couch_prometheus_sup.erl b/src/couch_prometheus/src/couch_prometheus_sup.erl
index 102ed9454..de5bd6676 100644
--- a/src/couch_prometheus/src/couch_prometheus_sup.erl
+++ b/src/couch_prometheus/src/couch_prometheus_sup.erl
@@ -26,7 +26,8 @@ start_link() ->
init([]) ->
{ok, {
- {one_for_one, 5, 10}, [
+ {one_for_one, 5, 10},
+ [
?CHILD(couch_prometheus_server, worker)
] ++ maybe_start_prometheus_http()
}}.
diff --git a/src/couch_prometheus/src/couch_prometheus_util.erl b/src/couch_prometheus/src/couch_prometheus_util.erl
index c3b58cb3a..ea2cdf737 100644
--- a/src/couch_prometheus/src/couch_prometheus_util.erl
+++ b/src/couch_prometheus/src/couch_prometheus_util.erl
@@ -10,7 +10,7 @@
% License for the specific language governing permissions and limitations under
% the License.
--module(couch_prometheus_util ).
+-module(couch_prometheus_util).
-export([
couch_to_prom/3,
@@ -25,7 +25,6 @@ couch_to_prom([couch_log, level, alert], Info, _All) ->
to_prom(couch_log_requests_total, counter, {[{level, alert}], val(Info)});
couch_to_prom([couch_log, level, Level], Info, _All) ->
to_prom(couch_log_requests_total, {[{level, Level}], val(Info)});
-
couch_to_prom([couch_replicator, checkpoints, failure], Info, _All) ->
to_prom(couch_replicator_checkpoints_failure_total, counter, val(Info));
couch_to_prom([couch_replicator, checkpoints, success], Info, All) ->
@@ -41,7 +40,6 @@ couch_to_prom([couch_replicator, stream_responses, failure], Info, _All) ->
couch_to_prom([couch_replicator, stream_responses, success], Info, All) ->
Total = val(Info) + val([couch_replicator, stream_responses, failure], All),
to_prom(couch_replicator_stream_responses_total, counter, Total);
-
couch_to_prom([couchdb, auth_cache_hits], Info, All) ->
Total = val(Info) + val([couchdb, auth_cache_misses], All),
to_prom(auth_cache_requests_total, counter, Total);
@@ -53,7 +51,6 @@ couch_to_prom([couchdb, httpd_request_methods, Method], Info, _All) ->
to_prom(httpd_request_methods, {[{method, Method}], val(Info)});
couch_to_prom([couchdb, httpd_status_codes, Code], Info, _All) ->
to_prom(httpd_status_codes, {[{code, Code}], val(Info)});
-
couch_to_prom([ddoc_cache, hit], Info, All) ->
Total = val(Info) + val([ddoc_cache, miss], All),
to_prom(ddoc_cache_requests_total, counter, Total);
@@ -61,21 +58,17 @@ couch_to_prom([ddoc_cache, miss], Info, _All) ->
to_prom(ddoc_cache_requests_failures_total, counter, val(Info));
couch_to_prom([ddoc_cache, recovery], Info, _All) ->
to_prom(ddoc_cache_requests_recovery_total, counter, val(Info));
-
couch_to_prom([fabric, read_repairs, failure], Info, _All) ->
to_prom(fabric_read_repairs_failures_total, counter, val(Info));
couch_to_prom([fabric, read_repairs, success], Info, All) ->
Total = val(Info) + val([fabric, read_repairs, failure], All),
to_prom(fabric_read_repairs_total, counter, Total);
-
couch_to_prom([rexi, streams, timeout, init_stream], Info, _All) ->
to_prom(rexi_streams_timeout_total, counter, {[{stage, init_stream}], val(Info)});
couch_to_prom([rexi_streams, timeout, Stage], Info, _All) ->
to_prom(rexi_streams_timeout_total, {[{stage, Stage}], val(Info)});
-
couch_to_prom([couchdb | Rest], Info, All) ->
couch_to_prom(Rest, Info, All);
-
couch_to_prom(Path, Info, _All) ->
case lists:keyfind(type, 1, Info) of
{type, counter} ->
@@ -94,16 +87,20 @@ to_prom(Metric, Type, Data) ->
to_prom(Metric, Instances) when is_list(Instances) ->
lists:flatmap(fun(Inst) -> to_prom(Metric, Inst) end, Instances);
to_prom(Metric, {Labels, Value}) ->
- LabelParts = lists:map(fun({K, V}) ->
- lists:flatten(io_lib:format("~s=\"~s\"", [to_bin(K), to_bin(V)]))
- end, Labels),
- MetricStr = case length(LabelParts) > 0 of
- true ->
- LabelStr = string:join(LabelParts, ", "),
- lists:flatten(io_lib:format("~s{~s}", [to_prom_name(Metric), LabelStr]));
- false ->
- lists:flatten(io_lib:format("~s", [to_prom_name(Metric)]))
- end,
+ LabelParts = lists:map(
+ fun({K, V}) ->
+ lists:flatten(io_lib:format("~s=\"~s\"", [to_bin(K), to_bin(V)]))
+ end,
+ Labels
+ ),
+ MetricStr =
+ case length(LabelParts) > 0 of
+ true ->
+ LabelStr = string:join(LabelParts, ", "),
+ lists:flatten(io_lib:format("~s{~s}", [to_prom_name(Metric), LabelStr]));
+ false ->
+ lists:flatten(io_lib:format("~s", [to_prom_name(Metric)]))
+ end,
[to_bin(io_lib:format("~s ~p", [MetricStr, Value]))];
to_prom(Metric, Value) ->
[to_bin(io_lib:format("~s ~p", [to_prom_name(Metric), Value]))].
@@ -114,18 +111,21 @@ to_prom_summary(Path, Info) ->
{arithmetic_mean, Mean} = lists:keyfind(arithmetic_mean, 1, Value),
{percentile, Percentiles} = lists:keyfind(percentile, 1, Value),
{n, Count} = lists:keyfind(n, 1, Value),
- Quantiles = lists:map(fun({Perc, Val0}) ->
- % Prometheus uses seconds, so we need to covert milliseconds to seconds
- Val = Val0/1000,
- case Perc of
- 50 -> {[{quantile, <<"0.5">>}], Val};
- 75 -> {[{quantile, <<"0.75">>}], Val};
- 90 -> {[{quantile, <<"0.9">>}], Val};
- 95 -> {[{quantile, <<"0.95">>}], Val};
- 99 -> {[{quantile, <<"0.99">>}], Val};
- 999 -> {[{quantile, <<"0.999">>}], Val}
- end
- end, Percentiles),
+ Quantiles = lists:map(
+ fun({Perc, Val0}) ->
+ % Prometheus uses seconds, so we need to covert milliseconds to seconds
+ Val = Val0 / 1000,
+ case Perc of
+ 50 -> {[{quantile, <<"0.5">>}], Val};
+ 75 -> {[{quantile, <<"0.75">>}], Val};
+ 90 -> {[{quantile, <<"0.9">>}], Val};
+ 95 -> {[{quantile, <<"0.95">>}], Val};
+ 99 -> {[{quantile, <<"0.99">>}], Val};
+ 999 -> {[{quantile, <<"0.999">>}], Val}
+ end
+ end,
+ Percentiles
+ ),
SumMetric = path_to_name(Path ++ ["seconds", "sum"]),
SumStat = to_prom(SumMetric, Count * Mean),
CountMetric = path_to_name(Path ++ ["seconds", "count"]),
@@ -136,9 +136,12 @@ to_prom_name(Metric) ->
to_bin(io_lib:format("couchdb_~s", [Metric])).
path_to_name(Path) ->
- Parts = lists:map(fun(Part) ->
- io_lib:format("~s", [Part])
- end, Path),
+ Parts = lists:map(
+ fun(Part) ->
+ io_lib:format("~s", [Part])
+ end,
+ Path
+ ),
string:join(Parts, "_").
counter_metric(Path) ->
@@ -163,4 +166,4 @@ val(Data) ->
val(Key, Stats) ->
{Key, Data} = lists:keyfind(Key, 1, Stats),
- val(Data). \ No newline at end of file
+ val(Data).
diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl
index a0c6d2b8a..0560d3f30 100644
--- a/src/couch_replicator/src/couch_replicator.erl
+++ b/src/couch_replicator/src/couch_replicator.erl
@@ -34,26 +34,25 @@
get_job_ids/0
]).
-
-include_lib("ibrowse/include/ibrowse.hrl").
-include_lib("couch/include/couch_db.hrl").
-include("couch_replicator.hrl").
-include_lib("kernel/include/logger.hrl").
-
-spec replicate({[_]}, any()) ->
- {ok, {continuous, binary()}} |
- {ok, #{}} |
- {ok, {cancelled, binary()}} |
- {error, any()} |
- no_return().
+ {ok, {continuous, binary()}}
+ | {ok, #{}}
+ | {ok, {cancelled, binary()}}
+ | {error, any()}
+ | no_return().
replicate(Body, #user_ctx{name = User} = UserCtx) ->
{ok, Id, Rep} = couch_replicator_parse:parse_transient_rep(Body, User),
#{?OPTIONS := Options} = Rep,
- JobId = case couch_replicator_jobs:get_job_id(undefined, Id) of
- {ok, JobId0} -> JobId0;
- {error, not_found} -> Id
- end,
+ JobId =
+ case couch_replicator_jobs:get_job_id(undefined, Id) of
+ {ok, JobId0} -> JobId0;
+ {error, not_found} -> Id
+ end,
case maps:get(<<"cancel">>, Options, false) of
true ->
case check_authorization(JobId, UserCtx) of
@@ -85,7 +84,6 @@ replicate(Body, #user_ctx{name = User} = UserCtx) ->
end
end.
-
jobs() ->
FoldFun = fun(_JTx, _JobId, CouchJobsState, JobData, Acc) ->
case CouchJobsState of
@@ -96,27 +94,26 @@ jobs() ->
end,
couch_replicator_jobs:fold_jobs(undefined, FoldFun, []).
-
job(Id0) when is_binary(Id0) ->
Id1 = couch_replicator_ids:convert(Id0),
- JobId = case couch_replicator_jobs:get_job_id(undefined, Id1) of
- {ok, JobId0} -> JobId0;
- {error, not_found} -> Id1
- end,
+ JobId =
+ case couch_replicator_jobs:get_job_id(undefined, Id1) of
+ {ok, JobId0} -> JobId0;
+ {error, not_found} -> Id1
+ end,
case couch_replicator_jobs:get_job_data(undefined, JobId) of
{ok, #{} = JobData} -> {ok, job_ejson(JobData)};
{error, not_found} -> {error, not_found}
end.
-
docs(#{} = Db, States) when is_list(States) ->
DbName = fabric2_db:name(Db),
FoldFun = fun(_JTx, _JobId, _, JobData, Acc) ->
case JobData of
#{?DB_NAME := DbName, ?STATE := State} ->
case {States, lists:member(State, States)} of
- {[], _} -> [doc_ejson(JobData) | Acc];
- {[_ | _], true} -> [doc_ejson(JobData) | Acc];
+ {[], _} -> [doc_ejson(JobData) | Acc];
+ {[_ | _], true} -> [doc_ejson(JobData) | Acc];
{[_ | _], false} -> Acc
end;
#{} ->
@@ -125,17 +122,15 @@ docs(#{} = Db, States) when is_list(States) ->
end,
couch_replicator_jobs:fold_jobs(undefined, FoldFun, []).
-
doc(#{} = Db, DocId) when is_binary(DocId) ->
DbUUID = fabric2_db:get_uuid(Db),
JobId = couch_replicator_ids:job_id(DbUUID, DocId),
case couch_replicator_jobs:get_job_data(undefined, JobId) of
{ok, #{} = JobData} -> {ok, doc_ejson(JobData)};
- {error, not_found} -> {error, not_found}
+ {error, not_found} -> {error, not_found}
end.
-
-after_db_create(DbName, DbUUID) when ?IS_REP_DB(DbName)->
+after_db_create(DbName, DbUUID) when ?IS_REP_DB(DbName) ->
couch_stats:increment_counter([couch_replicator, docs, dbs_created]),
try fabric2_db:open(DbName, [{uuid, DbUUID}, ?ADMIN_CTX]) of
{ok, Db} ->
@@ -146,11 +141,9 @@ after_db_create(DbName, DbUUID) when ?IS_REP_DB(DbName)->
error:database_does_not_exist ->
ok
end;
-
after_db_create(_DbName, _DbUUID) ->
ok.
-
after_db_delete(DbName, DbUUID) when ?IS_REP_DB(DbName) ->
couch_stats:increment_counter([couch_replicator, docs, dbs_deleted]),
FoldFun = fun(JTx, JobId, _, JobData, ok) ->
@@ -162,13 +155,17 @@ after_db_delete(DbName, DbUUID) when ?IS_REP_DB(DbName) ->
end
end,
couch_replicator_jobs:fold_jobs(undefined, FoldFun, ok);
-
after_db_delete(_DbName, _DbUUID) ->
ok.
-
-after_doc_write(#{name := DbName} = Db, #doc{} = Doc, _NewWinner, _OldWinner,
- _NewRevId, _Seq) when ?IS_REP_DB(DbName) ->
+after_doc_write(
+ #{name := DbName} = Db,
+ #doc{} = Doc,
+ _NewWinner,
+ _OldWinner,
+ _NewRevId,
+ _Seq
+) when ?IS_REP_DB(DbName) ->
couch_stats:increment_counter([couch_replicator, docs, db_changes]),
{Props} = Doc#doc.body,
case couch_util:get_value(?REPLICATION_STATE, Props) of
@@ -176,18 +173,16 @@ after_doc_write(#{name := DbName} = Db, #doc{} = Doc, _NewWinner, _OldWinner,
?ST_FAILED -> ok;
_ -> process_change(Db, Doc)
end;
-
after_doc_write(_Db, _Doc, _NewWinner, _OldWinner, _NewRevId, _Seq) ->
ok.
-
% This is called from supervisor, must return ignore.
-spec ensure_rep_db_exists() -> ignore.
ensure_rep_db_exists() ->
couch_replicator_jobs:set_timeout(),
case config:get_boolean("replicator", "create_replicator_db", false) of
true ->
- UserCtx = #user_ctx{roles=[<<"_admin">>, <<"_replicator">>]},
+ UserCtx = #user_ctx{roles = [<<"_admin">>, <<"_replicator">>]},
Opts = [{user_ctx, UserCtx}, sys_db],
case fabric2_db:create(?REP_DB_NAME, Opts) of
{error, file_exists} -> ok;
@@ -198,13 +193,11 @@ ensure_rep_db_exists() ->
end,
ignore.
-
% Testing and debug functions
rescan_jobs() ->
rescan_jobs(?REP_DB_NAME).
-
rescan_jobs(DbName) when is_binary(DbName), ?IS_REP_DB(DbName) ->
try fabric2_db:open(DbName, [?ADMIN_CTX]) of
{ok, Db} ->
@@ -214,11 +207,9 @@ rescan_jobs(DbName) when is_binary(DbName), ?IS_REP_DB(DbName) ->
database_does_not_exist
end.
-
reenqueue_jobs() ->
reenqueue_jobs(?REP_DB_NAME).
-
reenqueue_jobs(DbName) when is_binary(DbName), ?IS_REP_DB(DbName) ->
try fabric2_db:open(DbName, [?ADMIN_CTX]) of
{ok, Db} ->
@@ -230,7 +221,6 @@ reenqueue_jobs(DbName) when is_binary(DbName), ?IS_REP_DB(DbName) ->
database_does_not_exist
end.
-
remove_jobs() ->
% If we clear a large number of jobs make sure to use batching so we don't
% take too long, if use individual transactions, and also don't timeout if
@@ -245,17 +235,22 @@ remove_jobs() ->
[] = couch_replicator_jobs:remove_jobs(undefined, Acc),
ok.
-
get_job_ids() ->
couch_replicator_jobs:get_job_ids(undefined).
-
% Private functions
-spec start_transient_job(binary(), #{}) -> ok.
start_transient_job(JobId, #{} = Rep) ->
- JobData = couch_replicator_jobs:new_job(Rep, null, null, null,
- ?ST_PENDING, null, null),
+ JobData = couch_replicator_jobs:new_job(
+ Rep,
+ null,
+ null,
+ null,
+ ?ST_PENDING,
+ null,
+ null
+ ),
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
case couch_replicator_jobs:get_job_data(JTx, JobId) of
{ok, #{?REP := OldRep, ?STATE := State}} ->
@@ -277,17 +272,17 @@ start_transient_job(JobId, #{} = Rep) ->
end
end).
-
-spec cancel_replication(job_id()) ->
{ok, {cancelled, binary()}} | {error, not_found}.
cancel_replication(JobId) when is_binary(JobId) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
- Id = case couch_replicator_jobs:get_job_data(JTx, JobId) of
- {ok, #{?REP_ID := RepId}} when is_binary(RepId) ->
- RepId;
- _ ->
- JobId
- end,
+ Id =
+ case couch_replicator_jobs:get_job_data(JTx, JobId) of
+ {ok, #{?REP_ID := RepId}} when is_binary(RepId) ->
+ RepId;
+ _ ->
+ JobId
+ end,
?LOG_NOTICE(#{what => cancel_replication, in => replicator, id => Id}),
couch_log:notice("Canceling replication '~s'", [Id]),
case couch_replicator_jobs:remove_job(JTx, JobId) of
@@ -298,36 +293,49 @@ cancel_replication(JobId) when is_binary(JobId) ->
end
end).
-
process_change(_Db, #doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>}) ->
ok;
-
process_change(#{} = Db, #doc{deleted = true} = Doc) ->
DbUUID = fabric2_db:get_uuid(Db),
JobId = couch_replicator_ids:job_id(DbUUID, Doc#doc.id),
couch_replicator_jobs:remove_job(undefined, JobId);
-
process_change(#{} = Db, #doc{deleted = false} = Doc) ->
#doc{id = DocId, body = {Props} = Body} = Doc,
DbName = fabric2_db:name(Db),
DbUUID = fabric2_db:get_uuid(Db),
- {Rep, DocState, Error} = try
- Rep0 = couch_replicator_parse:parse_rep_doc(Body),
- DocState0 = couch_util:get_value(?REPLICATION_STATE, Props, null),
- {Rep0, DocState0, null}
- catch
- throw:{bad_rep_doc, Reason} ->
- {null, null, couch_replicator_utils:rep_error_to_binary(Reason)}
- end,
+ {Rep, DocState, Error} =
+ try
+ Rep0 = couch_replicator_parse:parse_rep_doc(Body),
+ DocState0 = couch_util:get_value(?REPLICATION_STATE, Props, null),
+ {Rep0, DocState0, null}
+ catch
+ throw:{bad_rep_doc, Reason} ->
+ {null, null, couch_replicator_utils:rep_error_to_binary(Reason)}
+ end,
JobId = couch_replicator_ids:job_id(DbUUID, DocId),
- JobData = case Rep of
- null ->
- couch_relicator_jobs:new_job(Rep, DbName, DbUUID, DocId,
- ?ST_FAILED, Error, null);
- #{} ->
- couch_replicator_jobs:new_job(Rep, DbName, DbUUID, DocId,
- ?ST_PENDING, null, DocState)
- end,
+ JobData =
+ case Rep of
+ null ->
+ couch_relicator_jobs:new_job(
+ Rep,
+ DbName,
+ DbUUID,
+ DocId,
+ ?ST_FAILED,
+ Error,
+ null
+ );
+ #{} ->
+ couch_replicator_jobs:new_job(
+ Rep,
+ DbName,
+ DbUUID,
+ DocId,
+ ?ST_PENDING,
+ null,
+ DocState
+ )
+ end,
?LOG_NOTICE(#{
what => replication_update,
@@ -362,13 +370,11 @@ process_change(#{} = Db, #doc{deleted = false} = Doc) ->
{error, not_found} ->
couch_replicator_jobs:add_job(JTx, JobId, JobData)
end
-
end).
-
-spec add_jobs_from_db(#{}) -> ok.
add_jobs_from_db(#{} = TxDb) ->
- FoldFun = fun
+ FoldFun = fun
({meta, _Meta}, ok) ->
{ok, ok};
(complete, ok) ->
@@ -382,14 +388,12 @@ add_jobs_from_db(#{} = TxDb) ->
{ok, ok} = fabric2_db:fold_docs(TxDb, FoldFun, ok, Opts),
ok.
-
-spec get_doc(#{}, list()) -> #doc{}.
get_doc(TxDb, Row) ->
{_, DocId} = lists:keyfind(id, 1, Row),
{ok, #doc{deleted = false} = Doc} = fabric2_db:open_doc(TxDb, DocId, []),
Doc.
-
doc_ejson(#{} = JobData) ->
#{
?REP := Rep,
@@ -414,11 +418,12 @@ doc_ejson(#{} = JobData) ->
LastUpdatedISO8601 = couch_replicator_utils:iso8601(LastUpdatedSec),
StartISO8601 = couch_replicator_utils:iso8601(StartSec),
- Info = case State of
- ?ST_RUNNING -> Stats;
- ?ST_PENDING -> Stats;
- _Other -> Info0
- end,
+ Info =
+ case State of
+ ?ST_RUNNING -> Stats;
+ ?ST_PENDING -> Stats;
+ _Other -> Info0
+ end,
#{
<<"id">> => RepId,
@@ -437,7 +442,6 @@ doc_ejson(#{} = JobData) ->
<<"pid">> => Pid
}.
-
job_ejson(#{} = JobData) ->
#{
?REP := Rep,
@@ -461,15 +465,19 @@ job_ejson(#{} = JobData) ->
StartISO8601 = couch_replicator_utils:iso8601(StartSec),
- History1 = lists:map(fun(#{?HIST_TIMESTAMP := Ts} = Evt) ->
- Evt#{?HIST_TIMESTAMP := couch_replicator_utils:iso8601(Ts)}
- end, History),
-
- Info = case State of
- ?ST_RUNNING -> Stats;
- ?ST_PENDING -> Stats;
- _Other -> Info0
- end,
+ History1 = lists:map(
+ fun(#{?HIST_TIMESTAMP := Ts} = Evt) ->
+ Evt#{?HIST_TIMESTAMP := couch_replicator_utils:iso8601(Ts)}
+ end,
+ History
+ ),
+
+ Info =
+ case State of
+ ?ST_RUNNING -> Stats;
+ ?ST_PENDING -> Stats;
+ _Other -> Info0
+ end,
#{
<<"id">> => RepId,
@@ -486,14 +494,11 @@ job_ejson(#{} = JobData) ->
<<"pid">> => Pid
}.
-
ejson_url(Url) when is_binary(Url) ->
strip_url_creds(Url);
-
ejson_url(null) ->
null.
-
-spec strip_url_creds(binary()) -> binary() | null.
strip_url_creds(Url) ->
try
@@ -509,7 +514,6 @@ strip_url_creds(Url) ->
null
end.
-
-spec check_authorization(rep_id(), #user_ctx{}) -> ok | not_found.
check_authorization(JobId, #user_ctx{} = Ctx) when is_binary(JobId) ->
#user_ctx{name = Name} = Ctx,
@@ -524,18 +528,16 @@ check_authorization(JobId, #user_ctx{} = Ctx) when is_binary(JobId) ->
couch_httpd:verify_is_server_admin(Ctx)
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-include_lib("fabric/test/fabric2_test.hrl").
-
authorization_test_() ->
{
foreach,
- fun () -> ok end,
- fun (_) -> meck:unload() end,
+ fun() -> ok end,
+ fun(_) -> meck:unload() end,
[
?TDEF_FE(t_admin_is_always_authorized),
?TDEF_FE(t_username_must_match),
@@ -543,21 +545,23 @@ authorization_test_() ->
]
}.
-
t_admin_is_always_authorized(_) ->
expect_job_data({ok, #{?REP => #{?REP_USER => <<"someuser">>}}}),
UserCtx = #user_ctx{name = <<"adm">>, roles = [<<"_admin">>]},
?assertEqual(ok, check_authorization(<<"RepId">>, UserCtx)).
-
t_username_must_match(_) ->
expect_job_data({ok, #{?REP => #{?REP_USER => <<"user1">>}}}),
UserCtx1 = #user_ctx{name = <<"user1">>, roles = [<<"somerole">>]},
?assertEqual(ok, check_authorization(<<"RepId">>, UserCtx1)),
UserCtx2 = #user_ctx{name = <<"other">>, roles = [<<"somerole">>]},
- ?assertThrow({unauthorized, _}, check_authorization(<<"RepId">>,
- UserCtx2)).
-
+ ?assertThrow(
+ {unauthorized, _},
+ check_authorization(
+ <<"RepId">>,
+ UserCtx2
+ )
+ ).
t_replication_not_found(_) ->
expect_job_data({error, not_found}),
@@ -566,11 +570,9 @@ t_replication_not_found(_) ->
UserCtx2 = #user_ctx{name = <<"adm">>, roles = [<<"_admin">>]},
?assertEqual(not_found, check_authorization(<<"RepId">>, UserCtx2)).
-
expect_job_data(JobDataRes) ->
meck:expect(couch_replicator_jobs, get_job_data, 2, JobDataRes).
-
strip_url_creds_test_() ->
{
setup,
@@ -586,7 +588,6 @@ strip_url_creds_test_() ->
])
}.
-
t_strip_http_basic_creds(_) ->
Url1 = <<"http://adm:pass@host/db/">>,
?assertEqual(<<"http://adm:*****@host/db/">>, strip_url_creds(Url1)),
@@ -595,9 +596,10 @@ t_strip_http_basic_creds(_) ->
Url3 = <<"http://adm:pass@host:80/db/">>,
?assertEqual(<<"http://adm:*****@host:80/db/">>, strip_url_creds(Url3)),
Url4 = <<"http://adm:pass@host/db?a=b&c=d">>,
- ?assertEqual(<<"http://adm:*****@host/db?a=b&c=d">>,
- strip_url_creds(Url4)).
-
+ ?assertEqual(
+ <<"http://adm:*****@host/db?a=b&c=d">>,
+ strip_url_creds(Url4)
+ ).
t_strip_url_creds_errors(_) ->
Bad1 = <<"http://adm:pass/bad">>,
@@ -614,5 +616,4 @@ t_strip_url_creds_errors(_) ->
Bad5 = <<"http://adm:pass/bad">>,
?assertEqual(null, strip_url_creds(Bad5)).
-
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_api_wrap.erl b/src/couch_replicator/src/couch_replicator_api_wrap.erl
index fd592a87f..f182ecd10 100644
--- a/src/couch_replicator/src/couch_replicator_api_wrap.erl
+++ b/src/couch_replicator/src/couch_replicator_api_wrap.erl
@@ -40,8 +40,7 @@
changes_since/5,
db_uri/1,
db_from_json/1
- ]).
-
+]).
-define(MAX_WAIT, 5 * 60 * 1000).
@@ -50,53 +49,58 @@
db_uri(#{<<"url">> := Url}) ->
couch_util:url_strip_password(Url);
-
db_uri(#httpdb{url = Url}) ->
couch_util:url_strip_password(Url).
-
db_open(#{} = Db) ->
db_open(Db, false, #{}).
-
db_open(#{} = Db0, Create, #{} = CreateParams) when is_boolean(Create) ->
{ok, Db} = couch_replicator_httpc:setup(db_from_json(Db0)),
try
case Create of
- false ->
- ok;
- true ->
- Db2 = maybe_append_create_query_params(Db, CreateParams),
- send_req(Db2, [{method, put}],
- fun(401, _, _) ->
- throw({unauthorized, ?l2b(db_uri(Db2))});
+ false ->
+ ok;
+ true ->
+ Db2 = maybe_append_create_query_params(Db, CreateParams),
+ send_req(
+ Db2,
+ [{method, put}],
+ fun
+ (401, _, _) ->
+ throw({unauthorized, ?l2b(db_uri(Db2))});
+ (403, _, _) ->
+ throw({forbidden, ?l2b(db_uri(Db2))});
+ (_, _, _) ->
+ ok
+ end
+ )
+ end,
+ send_req(
+ Db,
+ [{method, get}],
+ fun
+ (200, _, {Props}) ->
+ UpdateSeq = get_value(<<"update_seq">>, Props),
+ InstanceStart = get_value(<<"instance_start_time">>, Props),
+ case {UpdateSeq, InstanceStart} of
+ {undefined, _} ->
+ throw({db_not_found, ?l2b(db_uri(Db))});
+ {_, undefined} ->
+ throw({db_not_found, ?l2b(db_uri(Db))});
+ _ ->
+ {ok, Db}
+ end;
+ (200, _, _Body) ->
+ throw({db_not_found, ?l2b(db_uri(Db))});
+ (401, _, _) ->
+ throw({unauthorized, ?l2b(db_uri(Db))});
(403, _, _) ->
- throw({forbidden, ?l2b(db_uri(Db2))});
+ throw({forbidden, ?l2b(db_uri(Db))});
(_, _, _) ->
- ok
- end)
- end,
- send_req(Db, [{method, get}],
- fun(200, _, {Props}) ->
- UpdateSeq = get_value(<<"update_seq">>, Props),
- InstanceStart = get_value(<<"instance_start_time">>, Props),
- case {UpdateSeq, InstanceStart} of
- {undefined, _} ->
- throw({db_not_found, ?l2b(db_uri(Db))});
- {_, undefined} ->
- throw({db_not_found, ?l2b(db_uri(Db))});
- _ ->
- {ok, Db}
- end;
- (200, _, _Body) ->
- throw({db_not_found, ?l2b(db_uri(Db))});
- (401, _, _) ->
- throw({unauthorized, ?l2b(db_uri(Db))});
- (403, _, _) ->
- throw({forbidden, ?l2b(db_uri(Db))});
- (_, _, _) ->
- throw({db_not_found, ?l2b(db_uri(Db))})
- end)
+ throw({db_not_found, ?l2b(db_uri(Db))})
+ end
+ )
catch
throw:Error ->
db_close(Db),
@@ -114,12 +118,14 @@ db_close(#httpdb{httpc_pool = Pool} = HttpDb) ->
unlink(Pool),
ok = couch_replicator_httpc_pool:stop(Pool).
-
get_db_info(#httpdb{} = Db) ->
- send_req(Db, [],
+ send_req(
+ Db,
+ [],
fun(200, _, {Props}) ->
{ok, Props}
- end).
+ end
+ ).
get_pending_count(#httpdb{} = Db, Seq) when is_number(Seq) ->
% Source looks like Apache CouchDB and not Cloudant so we fall
@@ -138,46 +144,54 @@ get_pending_count(#httpdb{} = Db, Seq) ->
{ok, couch_util:get_value(<<"pending">>, Props, null)}
end).
-
ensure_full_commit(#httpdb{} = Db) ->
send_req(
Db,
- [{method, post}, {path, "_ensure_full_commit"},
- {headers, [{"Content-Type", "application/json"}]}],
- fun(201, _, {Props}) ->
- {ok, get_value(<<"instance_start_time">>, Props)};
- (_, _, {Props}) ->
- {error, get_value(<<"error">>, Props)}
- end).
-
+ [
+ {method, post},
+ {path, "_ensure_full_commit"},
+ {headers, [{"Content-Type", "application/json"}]}
+ ],
+ fun
+ (201, _, {Props}) ->
+ {ok, get_value(<<"instance_start_time">>, Props)};
+ (_, _, {Props}) ->
+ {error, get_value(<<"error">>, Props)}
+ end
+ ).
get_missing_revs(#httpdb{} = Db, IdRevs) ->
JsonBody = {[{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs} <- IdRevs]},
send_req(
Db,
- [{method, post}, {path, "_revs_diff"}, {body, ?JSON_ENCODE(JsonBody)},
- {headers, [{"Content-Type", "application/json"}]}],
- fun(200, _, {Props}) ->
- ConvertToNativeFun = fun({Id, {Result}}) ->
- MissingRevs = couch_doc:parse_revs(
- get_value(<<"missing">>, Result)
- ),
- PossibleAncestors = couch_doc:parse_revs(
- get_value(<<"possible_ancestors">>, Result, [])
- ),
- {Id, MissingRevs, PossibleAncestors}
- end,
- {ok, lists:map(ConvertToNativeFun, Props)};
- (ErrCode, _, ErrMsg) when is_integer(ErrCode) ->
- {error, {revs_diff_failed, ErrCode, ErrMsg}}
- end).
-
+ [
+ {method, post},
+ {path, "_revs_diff"},
+ {body, ?JSON_ENCODE(JsonBody)},
+ {headers, [{"Content-Type", "application/json"}]}
+ ],
+ fun
+ (200, _, {Props}) ->
+ ConvertToNativeFun = fun({Id, {Result}}) ->
+ MissingRevs = couch_doc:parse_revs(
+ get_value(<<"missing">>, Result)
+ ),
+ PossibleAncestors = couch_doc:parse_revs(
+ get_value(<<"possible_ancestors">>, Result, [])
+ ),
+ {Id, MissingRevs, PossibleAncestors}
+ end,
+ {ok, lists:map(ConvertToNativeFun, Props)};
+ (ErrCode, _, ErrMsg) when is_integer(ErrCode) ->
+ {error, {revs_diff_failed, ErrCode, ErrMsg}}
+ end
+ ).
open_doc_revs(#httpdb{retries = 0} = HttpDb, Id, Revs, Options, _Fun, _Acc) ->
Path = encode_doc_id(Id),
QS = options_to_query_args(HttpDb, Path, [revs, {open_revs, Revs} | Options]),
Url = couch_util:url_strip_password(
- couch_replicator_httpc:full_url(HttpDb, [{path,Path}, {qs,QS}])
+ couch_replicator_httpc:full_url(HttpDb, [{path, Path}, {qs, QS}])
),
?LOG_ERROR(#{
what => permanent_request_failure,
@@ -193,15 +207,15 @@ open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
{Pid, Ref} = spawn_monitor(fun() ->
Self = self(),
Callback = fun
- (200, Headers, StreamDataFun) ->
- remote_open_doc_revs_streamer_start(Self),
- {<<"--">>, _, _} = couch_httpd:parse_multipart_request(
- header_value("Content-Type", Headers),
- StreamDataFun,
- fun mp_parse_mixed/1
- );
- (414, _, _) ->
- exit(request_uri_too_long)
+ (200, Headers, StreamDataFun) ->
+ remote_open_doc_revs_streamer_start(Self),
+ {<<"--">>, _, _} = couch_httpd:parse_multipart_request(
+ header_value("Content-Type", Headers),
+ StreamDataFun,
+ fun mp_parse_mixed/1
+ );
+ (414, _, _) ->
+ exit(request_uri_too_long)
end,
Streamer = spawn_link(fun() ->
Params = [
@@ -228,12 +242,12 @@ open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
exit(Streamer, {streamer_parent_died, Self});
{'DOWN', Ref, process, Self, _} ->
ok
- end
+ end
end),
receive
- {started_open_doc_revs, Ref} ->
- Ret = receive_docs_loop(Streamer, Fun, Id, Revs, Ref, Acc),
- exit({exit_ok, Ret})
+ {started_open_doc_revs, Ref} ->
+ Ret = receive_docs_loop(Streamer, Fun, Id, Revs, Ref, Acc),
+ exit({exit_ok, Ret})
end
end),
receive
@@ -241,7 +255,7 @@ open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
Ret;
{'DOWN', Ref, process, Pid, {{nocatch, missing_doc}, _}} ->
throw(missing_doc);
- {'DOWN', Ref, process, Pid, {{nocatch, {missing_stub,_} = Stub}, _}} ->
+ {'DOWN', Ref, process, Pid, {{nocatch, {missing_stub, _} = Stub}, _}} ->
throw(Stub);
{'DOWN', Ref, process, Pid, {http_request_failed, _, _, max_backoff}} ->
exit(max_backoff);
@@ -258,15 +272,22 @@ open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
new_max_length => NewMaxLen,
details => "reducing url length because of 414 response"
}),
- couch_log:info("Reducing url length to ~B because of"
- " 414 response", [NewMaxLen]),
- Options1 = lists:keystore(max_url_len, 1, Options,
- {max_url_len, NewMaxLen}),
+ couch_log:info(
+ "Reducing url length to ~B because of"
+ " 414 response",
+ [NewMaxLen]
+ ),
+ Options1 = lists:keystore(
+ max_url_len,
+ 1,
+ Options,
+ {max_url_len, NewMaxLen}
+ ),
open_doc_revs(HttpDb, Id, Revs, Options1, Fun, Acc)
end;
{'DOWN', Ref, process, Pid, Else} ->
Url = couch_util:url_strip_password(
- couch_replicator_httpc:full_url(HttpDb, [{path,Path}, {qs,QS}])
+ couch_replicator_httpc:full_url(HttpDb, [{path, Path}, {qs, QS}])
),
#httpdb{retries = Retries, wait = Wait0} = HttpDb,
Wait = 2 * erlang:min(Wait0 * 2, ?MAX_WAIT),
@@ -277,7 +298,8 @@ open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
delay_sec => Wait / 1000,
details => error_reason(Else)
}),
- couch_log:notice("Retrying GET to ~s in ~p seconds due to error ~w",
+ couch_log:notice(
+ "Retrying GET to ~s in ~p seconds due to error ~w",
[Url, Wait / 1000, error_reason(Else)]
),
ok = timer:sleep(Wait),
@@ -288,7 +310,6 @@ open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
open_doc_revs(RetryDb, Id, Revs, Options, Fun, Acc)
end.
-
error_reason({http_request_failed, "GET", _Url, {error, timeout}}) ->
timeout;
error_reason({http_request_failed, "GET", _Url, {error, {_, req_timedout}}}) ->
@@ -302,35 +323,44 @@ open_doc(#httpdb{} = Db, Id, Options) ->
send_req(
Db,
[{path, encode_doc_id(Id)}, {qs, options_to_query_args(Options, [])}],
- fun(200, _, Body) ->
- {ok, couch_doc:from_json_obj(Body)};
- (_, _, {Props}) ->
- {error, get_value(<<"error">>, Props)}
- end).
-
+ fun
+ (200, _, Body) ->
+ {ok, couch_doc:from_json_obj(Body)};
+ (_, _, {Props}) ->
+ {error, get_value(<<"error">>, Props)}
+ end
+ ).
update_doc(Db, Doc, Options) ->
update_doc(Db, Doc, Options, interactive_edit).
update_doc(#httpdb{} = HttpDb, #doc{id = DocId} = Doc, Options, Type) ->
- QArgs = case Type of
- replicated_changes ->
- [{"new_edits", "false"}];
- _ ->
- []
- end ++ options_to_query_args(Options, []),
+ QArgs =
+ case Type of
+ replicated_changes ->
+ [{"new_edits", "false"}];
+ _ ->
+ []
+ end ++ options_to_query_args(Options, []),
Boundary = couch_uuids:random(),
JsonBytes = ?JSON_ENCODE(
couch_doc:to_json_obj(
- Doc, [revs, attachments, follows, att_encoding_info | Options])),
- {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(Boundary,
- JsonBytes, Doc#doc.atts, true),
- Headers = case lists:member(delay_commit, Options) of
- true ->
- [{"X-Couch-Full-Commit", "false"}];
- false ->
- []
- end ++ [{"Content-Type", ?b2l(ContentType)}, {"Content-Length", Len}],
+ Doc, [revs, attachments, follows, att_encoding_info | Options]
+ )
+ ),
+ {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
+ Boundary,
+ JsonBytes,
+ Doc#doc.atts,
+ true
+ ),
+ Headers =
+ case lists:member(delay_commit, Options) of
+ true ->
+ [{"X-Couch-Full-Commit", "false"}];
+ false ->
+ []
+ end ++ [{"Content-Type", ?b2l(ContentType)}, {"Content-Length", Len}],
Body = {fun stream_doc/1, {JsonBytes, Doc#doc.atts, Boundary, Len}},
send_req(
% A crash here bubbles all the way back up to run_user_fun inside
@@ -338,27 +368,33 @@ update_doc(#httpdb{} = HttpDb, #doc{id = DocId} = Doc, Options, Type) ->
% appropriate course of action, since we've already started streaming
% the response body from the GET request.
HttpDb#httpdb{retries = 0},
- [{method, put}, {path, encode_doc_id(DocId)},
- {qs, QArgs}, {headers, Headers}, {body, Body}],
- fun(Code, _, {Props}) when Code =:= 200 orelse Code =:= 201 orelse Code =:= 202 ->
+ [
+ {method, put},
+ {path, encode_doc_id(DocId)},
+ {qs, QArgs},
+ {headers, Headers},
+ {body, Body}
+ ],
+ fun
+ (Code, _, {Props}) when Code =:= 200 orelse Code =:= 201 orelse Code =:= 202 ->
{ok, couch_doc:parse_rev(get_value(<<"rev">>, Props))};
(409, _, _) ->
throw(conflict);
(Code, _, {Props}) ->
case {Code, get_value(<<"error">>, Props)} of
- {401, <<"unauthorized">>} ->
- throw({unauthorized, get_value(<<"reason">>, Props)});
- {403, <<"forbidden">>} ->
- throw({forbidden, get_value(<<"reason">>, Props)});
- {412, <<"missing_stub">>} ->
- throw({missing_stub, get_value(<<"reason">>, Props)});
- {413, _} ->
- {error, request_body_too_large};
- {_, Error} ->
- {error, Error}
+ {401, <<"unauthorized">>} ->
+ throw({unauthorized, get_value(<<"reason">>, Props)});
+ {403, <<"forbidden">>} ->
+ throw({forbidden, get_value(<<"reason">>, Props)});
+ {412, <<"missing_stub">>} ->
+ throw({missing_stub, get_value(<<"reason">>, Props)});
+ {413, _} ->
+ {error, request_body_too_large};
+ {_, Error} ->
+ {error, Error}
end
- end).
-
+ end
+ ).
update_docs(Db, DocList, Options) ->
update_docs(Db, DocList, Options, interactive_edit).
@@ -367,26 +403,30 @@ update_docs(_Db, [], _Options, _UpdateType) ->
{ok, []};
update_docs(#httpdb{} = HttpDb, DocList, Options, UpdateType) ->
FullCommit = atom_to_list(not lists:member(delay_commit, Options)),
- Prefix = case UpdateType of
- replicated_changes ->
- <<"{\"new_edits\":false,\"docs\":[">>;
- interactive_edit ->
- <<"{\"docs\":[">>
- end,
+ Prefix =
+ case UpdateType of
+ replicated_changes ->
+ <<"{\"new_edits\":false,\"docs\":[">>;
+ interactive_edit ->
+ <<"{\"docs\":[">>
+ end,
Suffix = <<"]}">>,
% Note: nginx and other servers don't like PUT/POST requests without
% a Content-Length header, so we can't do a chunked transfer encoding
% and JSON encode each doc only before sending it through the socket.
{Docs, Len} = lists:mapfoldl(
- fun(#doc{} = Doc, Acc) ->
- Json = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
- {Json, Acc + iolist_size(Json)};
- (Doc, Acc) ->
- {Doc, Acc + iolist_size(Doc)}
+ fun
+ (#doc{} = Doc, Acc) ->
+ Json = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
+ {Json, Acc + iolist_size(Json)};
+ (Doc, Acc) ->
+ {Doc, Acc + iolist_size(Doc)}
end,
byte_size(Prefix) + byte_size(Suffix) + length(DocList) - 1,
- DocList),
- BodyFun = fun(eof) ->
+ DocList
+ ),
+ BodyFun = fun
+ (eof) ->
eof;
([]) ->
{ok, Suffix, eof};
@@ -404,154 +444,196 @@ update_docs(#httpdb{} = HttpDb, DocList, Options, UpdateType) ->
],
send_req(
HttpDb,
- [{method, post}, {path, "_bulk_docs"},
- {body, {BodyFun, [prefix | Docs]}}, {headers, Headers}],
- fun(201, _, Results) when is_list(Results) ->
+ [
+ {method, post},
+ {path, "_bulk_docs"},
+ {body, {BodyFun, [prefix | Docs]}},
+ {headers, Headers}
+ ],
+ fun
+ (201, _, Results) when is_list(Results) ->
{ok, bulk_results_to_errors(DocList, Results, remote)};
- (413, _, _) ->
+ (413, _, _) ->
{error, request_body_too_large};
- (417, _, Results) when is_list(Results) ->
+ (417, _, Results) when is_list(Results) ->
{ok, bulk_results_to_errors(DocList, Results, remote)};
- (ErrCode, _, ErrMsg) when is_integer(ErrCode) ->
+ (ErrCode, _, ErrMsg) when is_integer(ErrCode) ->
{error, {bulk_docs_failed, ErrCode, ErrMsg}}
- end).
-
-
-changes_since(#httpdb{headers = Headers1, timeout = InactiveTimeout} = HttpDb,
- Style, StartSeq, UserFun, Options) ->
+ end
+ ).
+
+changes_since(
+ #httpdb{headers = Headers1, timeout = InactiveTimeout} = HttpDb,
+ Style,
+ StartSeq,
+ UserFun,
+ Options
+) ->
Timeout = erlang:max(1000, InactiveTimeout div 3),
- BaseQArgs = case get_value(continuous, Options, false) of
- false ->
- [{"feed", "normal"}];
- true ->
- [{"feed", "continuous"}]
- end ++ [
- {"style", atom_to_list(Style)}, {"since", ?JSON_ENCODE(StartSeq)},
- {"timeout", integer_to_list(Timeout)}
- ],
+ BaseQArgs =
+ case get_value(continuous, Options, false) of
+ false ->
+ [{"feed", "normal"}];
+ true ->
+ [{"feed", "continuous"}]
+ end ++
+ [
+ {"style", atom_to_list(Style)},
+ {"since", ?JSON_ENCODE(StartSeq)},
+ {"timeout", integer_to_list(Timeout)}
+ ],
DocIds = get_value(doc_ids, Options),
Selector = get_value(selector, Options),
- {QArgs, Method, Body, Headers} = case {DocIds, Selector} of
- {undefined, undefined} ->
- QArgs1 = maybe_add_changes_filter_q_args(BaseQArgs, Options),
- {QArgs1, get, [], Headers1};
- {undefined, #{}} ->
- Headers2 = [{"Content-Type", "application/json"} | Headers1],
- JsonSelector = ?JSON_ENCODE(#{<<"selector">> => Selector}),
- {[{"filter", "_selector"} | BaseQArgs], post, JsonSelector, Headers2};
- {_, undefined} when is_list(DocIds) ->
- Headers2 = [{"Content-Type", "application/json"} | Headers1],
- JsonDocIds = ?JSON_ENCODE({[{<<"doc_ids">>, DocIds}]}),
- {[{"filter", "_doc_ids"} | BaseQArgs], post, JsonDocIds, Headers2}
- end,
+ {QArgs, Method, Body, Headers} =
+ case {DocIds, Selector} of
+ {undefined, undefined} ->
+ QArgs1 = maybe_add_changes_filter_q_args(BaseQArgs, Options),
+ {QArgs1, get, [], Headers1};
+ {undefined, #{}} ->
+ Headers2 = [{"Content-Type", "application/json"} | Headers1],
+ JsonSelector = ?JSON_ENCODE(#{<<"selector">> => Selector}),
+ {[{"filter", "_selector"} | BaseQArgs], post, JsonSelector, Headers2};
+ {_, undefined} when is_list(DocIds) ->
+ Headers2 = [{"Content-Type", "application/json"} | Headers1],
+ JsonDocIds = ?JSON_ENCODE({[{<<"doc_ids">>, DocIds}]}),
+ {[{"filter", "_doc_ids"} | BaseQArgs], post, JsonDocIds, Headers2}
+ end,
try
send_req(
HttpDb,
- [{method, Method}, {path, "_changes"}, {qs, QArgs},
- {headers, Headers}, {body, Body},
- {ibrowse_options, [{stream_to, {self(), once}}]}],
- fun(200, _, DataStreamFun) ->
+ [
+ {method, Method},
+ {path, "_changes"},
+ {qs, QArgs},
+ {headers, Headers},
+ {body, Body},
+ {ibrowse_options, [{stream_to, {self(), once}}]}
+ ],
+ fun
+ (200, _, DataStreamFun) ->
parse_changes_feed(Options, UserFun, DataStreamFun);
(405, _, _) when is_list(DocIds) ->
% CouchDB versions < 1.1.0 don't have the builtin
% _changes feed filter "_doc_ids" neither support POST
- send_req(HttpDb, [{method, get}, {path, "_changes"},
- {qs, BaseQArgs}, {headers, Headers1},
- {ibrowse_options, [{stream_to, {self(), once}}]}],
+ send_req(
+ HttpDb,
+ [
+ {method, get},
+ {path, "_changes"},
+ {qs, BaseQArgs},
+ {headers, Headers1},
+ {ibrowse_options, [{stream_to, {self(), once}}]}
+ ],
fun(200, _, DataStreamFun2) ->
- UserFun2 = fun(#doc_info{id = Id} = DocInfo) ->
- case lists:member(Id, DocIds) of
- true ->
- UserFun(DocInfo);
- false ->
- ok
- end;
- (LastSeq) ->
- UserFun(LastSeq)
+ UserFun2 = fun
+ (#doc_info{id = Id} = DocInfo) ->
+ case lists:member(Id, DocIds) of
+ true ->
+ UserFun(DocInfo);
+ false ->
+ ok
+ end;
+ (LastSeq) ->
+ UserFun(LastSeq)
end,
- parse_changes_feed(Options, UserFun2,
- DataStreamFun2)
- end);
- (ErrCode, _, ErrMsg) when is_integer(ErrCode) ->
+ parse_changes_feed(
+ Options,
+ UserFun2,
+ DataStreamFun2
+ )
+ end
+ );
+ (ErrCode, _, ErrMsg) when is_integer(ErrCode) ->
throw({retry_limit, {changes_req_failed, ErrCode, ErrMsg}})
- end)
+ end
+ )
catch
exit:{http_request_failed, _, _, max_backoff} ->
exit(max_backoff);
- exit:{http_request_failed, _, _, {error, {connection_closed,
- mid_stream}}} ->
+ exit:{http_request_failed, _, _, {error, {connection_closed, mid_stream}}} ->
throw(retry_no_limit);
exit:{http_request_failed, _, _, _} = Error ->
throw({retry_limit, Error})
end.
-
% internal functions
maybe_add_changes_filter_q_args(BaseQS, Options) ->
case get_value(filter, Options) of
- undefined ->
- BaseQS;
- FilterName ->
- %% get list of view attributes
- ViewFields0 = [atom_to_list(F) || F <- record_info(fields, mrargs)],
- ViewFields = ["key" | ViewFields0],
-
- ParamsMap = #{} = get_value(query_params, Options, #{}),
- Params = maps:to_list(ParamsMap),
- [{"filter", ?b2l(FilterName)} | lists:foldl(
- fun({K, V}, QSAcc) ->
- Ks = couch_util:to_list(K),
- case lists:keymember(Ks, 1, QSAcc) of
- true ->
- QSAcc;
- false when FilterName =:= <<"_view">> ->
- V1 = case lists:member(Ks, ViewFields) of
- true -> ?JSON_ENCODE(V);
- false -> couch_util:to_list(V)
+ undefined ->
+ BaseQS;
+ FilterName ->
+ %% get list of view attributes
+ ViewFields0 = [atom_to_list(F) || F <- record_info(fields, mrargs)],
+ ViewFields = ["key" | ViewFields0],
+
+ ParamsMap = #{} = get_value(query_params, Options, #{}),
+ Params = maps:to_list(ParamsMap),
+ [
+ {"filter", ?b2l(FilterName)}
+ | lists:foldl(
+ fun({K, V}, QSAcc) ->
+ Ks = couch_util:to_list(K),
+ case lists:keymember(Ks, 1, QSAcc) of
+ true ->
+ QSAcc;
+ false when FilterName =:= <<"_view">> ->
+ V1 =
+ case lists:member(Ks, ViewFields) of
+ true -> ?JSON_ENCODE(V);
+ false -> couch_util:to_list(V)
+ end,
+ [{Ks, V1} | QSAcc];
+ false ->
+ [{Ks, couch_util:to_list(V)} | QSAcc]
+ end
end,
- [{Ks, V1} | QSAcc];
- false ->
- [{Ks, couch_util:to_list(V)} | QSAcc]
- end
- end,
- BaseQS, Params)]
+ BaseQS,
+ Params
+ )
+ ]
end.
parse_changes_feed(Options, UserFun, DataStreamFun) ->
case get_value(continuous, Options, false) of
- true ->
- continuous_changes(DataStreamFun, UserFun);
- false ->
- EventFun = fun(Ev) ->
- changes_ev1(Ev, fun(DocInfo, _) -> UserFun(DocInfo) end, [])
- end,
- json_stream_parse:events(DataStreamFun, EventFun)
+ true ->
+ continuous_changes(DataStreamFun, UserFun);
+ false ->
+ EventFun = fun(Ev) ->
+ changes_ev1(Ev, fun(DocInfo, _) -> UserFun(DocInfo) end, [])
+ end,
+ json_stream_parse:events(DataStreamFun, EventFun)
end.
-
options_to_query_args(HttpDb, Path, Options0) ->
case lists:keytake(max_url_len, 1, Options0) of
- false -> MaxLen = ?MAX_URL_LEN, Options = Options0;
- {value, {max_url_len, MaxLen}, Options} -> ok
+ false ->
+ MaxLen = ?MAX_URL_LEN,
+ Options = Options0;
+ {value, {max_url_len, MaxLen}, Options} ->
+ ok
end,
case lists:keytake(atts_since, 1, Options) of
- false ->
- options_to_query_args(Options, []);
- {value, {atts_since, []}, Options2} ->
- options_to_query_args(Options2, []);
- {value, {atts_since, PAs}, Options2} ->
- QueryArgs1 = options_to_query_args(Options2, []),
- FullUrl = couch_replicator_httpc:full_url(
- HttpDb, [{path, Path}, {qs, QueryArgs1}]),
- RevList = atts_since_arg(
- length("GET " ++ FullUrl ++ " HTTP/1.1\r\n") +
- length("&atts_since=") + 6, % +6 = % encoded [ and ]
- PAs, MaxLen, []),
- [{"atts_since", ?b2l(iolist_to_binary(?JSON_ENCODE(RevList)))} | QueryArgs1]
+ false ->
+ options_to_query_args(Options, []);
+ {value, {atts_since, []}, Options2} ->
+ options_to_query_args(Options2, []);
+ {value, {atts_since, PAs}, Options2} ->
+ QueryArgs1 = options_to_query_args(Options2, []),
+ FullUrl = couch_replicator_httpc:full_url(
+ HttpDb, [{path, Path}, {qs, QueryArgs1}]
+ ),
+ RevList = atts_since_arg(
+ length("GET " ++ FullUrl ++ " HTTP/1.1\r\n") +
+ % +6 = % encoded [ and ]
+ length("&atts_since=") + 6,
+ PAs,
+ MaxLen,
+ []
+ ),
+ [{"atts_since", ?b2l(iolist_to_binary(?JSON_ENCODE(RevList)))} | QueryArgs1]
end.
-
options_to_query_args([], Acc) ->
lists:reverse(Acc);
options_to_query_args([ejson_body | Rest], Acc) ->
@@ -572,22 +654,22 @@ atts_since_arg(_UrlLen, [], _MaxLen, Acc) ->
lists:reverse(Acc);
atts_since_arg(UrlLen, [PA | Rest], MaxLen, Acc) ->
RevStr = couch_doc:rev_to_str(PA),
- NewUrlLen = case Rest of
- [] ->
- % plus 2 double quotes (% encoded)
- UrlLen + size(RevStr) + 6;
- _ ->
- % plus 2 double quotes and a comma (% encoded)
- UrlLen + size(RevStr) + 9
- end,
+ NewUrlLen =
+ case Rest of
+ [] ->
+ % plus 2 double quotes (% encoded)
+ UrlLen + size(RevStr) + 6;
+ _ ->
+ % plus 2 double quotes and a comma (% encoded)
+ UrlLen + size(RevStr) + 9
+ end,
case NewUrlLen >= MaxLen of
- true ->
- lists:reverse(Acc);
- false ->
- atts_since_arg(NewUrlLen, Rest, MaxLen, [RevStr | Acc])
+ true ->
+ lists:reverse(Acc);
+ false ->
+ atts_since_arg(NewUrlLen, Rest, MaxLen, [RevStr | Acc])
end.
-
% TODO: A less verbose, more elegant and automatic restart strategy for
% the exported open_doc_revs/6 function. The restart should be
% transparent to the caller like any other Couch API function exported
@@ -598,51 +680,55 @@ receive_docs_loop(Streamer, Fun, Id, Revs, Ref, Acc) ->
erlang:put(open_doc_revs, {Id, Revs, Ref, Streamer}),
receive_docs(Streamer, Fun, Ref, Acc)
catch
- error:{restart_open_doc_revs, NewRef} ->
- receive_docs_loop(Streamer, Fun, Id, Revs, NewRef, Acc)
+ error:{restart_open_doc_revs, NewRef} ->
+ receive_docs_loop(Streamer, Fun, Id, Revs, NewRef, Acc)
end.
receive_docs(Streamer, UserFun, Ref, UserAcc) ->
Streamer ! {get_headers, Ref, self()},
receive
- {started_open_doc_revs, NewRef} ->
- restart_remote_open_doc_revs(Ref, NewRef);
- {headers, Ref, Headers} ->
- case header_value("content-type", Headers) of
- {"multipart/related", _} = ContentType ->
- % Skip document body and attachment size limits validation here
- % since these should be validated by the replication target
- case couch_doc:doc_from_multi_part_stream(
- ContentType,
- fun() -> receive_doc_data(Streamer, Ref) end,
- Ref, _ValidateDocLimits = false) of
- {ok, Doc, WaitFun, Parser} ->
- case run_user_fun(UserFun, {ok, Doc}, UserAcc, Ref) of
- {ok, UserAcc2} ->
- ok;
- {skip, UserAcc2} ->
- couch_httpd_multipart:abort_multipart_stream(Parser)
- end,
- WaitFun(),
- receive_docs(Streamer, UserFun, Ref, UserAcc2)
+ {started_open_doc_revs, NewRef} ->
+ restart_remote_open_doc_revs(Ref, NewRef);
+ {headers, Ref, Headers} ->
+ case header_value("content-type", Headers) of
+ {"multipart/related", _} = ContentType ->
+ % Skip document body and attachment size limits validation here
+ % since these should be validated by the replication target
+ case
+ couch_doc:doc_from_multi_part_stream(
+ ContentType,
+ fun() -> receive_doc_data(Streamer, Ref) end,
+ Ref,
+ _ValidateDocLimits = false
+ )
+ of
+ {ok, Doc, WaitFun, Parser} ->
+ case run_user_fun(UserFun, {ok, Doc}, UserAcc, Ref) of
+ {ok, UserAcc2} ->
+ ok;
+ {skip, UserAcc2} ->
+ couch_httpd_multipart:abort_multipart_stream(Parser)
+ end,
+ WaitFun(),
+ receive_docs(Streamer, UserFun, Ref, UserAcc2)
+ end;
+ {"application/json", []} ->
+ Doc = couch_doc:from_json_obj(
+ ?JSON_DECODE(receive_all(Streamer, Ref, []))
+ ),
+ {_, UserAcc2} = run_user_fun(UserFun, {ok, Doc}, UserAcc, Ref),
+ receive_docs(Streamer, UserFun, Ref, UserAcc2);
+ {"application/json", [{"error", "true"}]} ->
+ {ErrorProps} = ?JSON_DECODE(receive_all(Streamer, Ref, [])),
+ Rev = get_value(<<"missing">>, ErrorProps),
+ Result = {{not_found, missing}, couch_doc:parse_rev(Rev)},
+ {_, UserAcc2} = run_user_fun(UserFun, Result, UserAcc, Ref),
+ receive_docs(Streamer, UserFun, Ref, UserAcc2)
end;
- {"application/json", []} ->
- Doc = couch_doc:from_json_obj(
- ?JSON_DECODE(receive_all(Streamer, Ref, []))),
- {_, UserAcc2} = run_user_fun(UserFun, {ok, Doc}, UserAcc, Ref),
- receive_docs(Streamer, UserFun, Ref, UserAcc2);
- {"application/json", [{"error","true"}]} ->
- {ErrorProps} = ?JSON_DECODE(receive_all(Streamer, Ref, [])),
- Rev = get_value(<<"missing">>, ErrorProps),
- Result = {{not_found, missing}, couch_doc:parse_rev(Rev)},
- {_, UserAcc2} = run_user_fun(UserFun, Result, UserAcc, Ref),
- receive_docs(Streamer, UserFun, Ref, UserAcc2)
- end;
- {done, Ref} ->
- {ok, UserAcc}
+ {done, Ref} ->
+ {ok, UserAcc}
end.
-
run_user_fun(UserFun, Arg, UserAcc, OldRef) ->
{Pid, Ref} = spawn_monitor(fun() ->
try UserFun(Arg, UserAcc) of
@@ -672,78 +758,76 @@ run_user_fun(UserFun, Arg, UserAcc, OldRef) ->
erlang:exit(Reason)
end.
-
restart_remote_open_doc_revs(Ref, NewRef) ->
receive
- {body_bytes, Ref, _} ->
- restart_remote_open_doc_revs(Ref, NewRef);
- {body_done, Ref} ->
- restart_remote_open_doc_revs(Ref, NewRef);
- {done, Ref} ->
- restart_remote_open_doc_revs(Ref, NewRef);
- {headers, Ref, _} ->
- restart_remote_open_doc_revs(Ref, NewRef)
+ {body_bytes, Ref, _} ->
+ restart_remote_open_doc_revs(Ref, NewRef);
+ {body_done, Ref} ->
+ restart_remote_open_doc_revs(Ref, NewRef);
+ {done, Ref} ->
+ restart_remote_open_doc_revs(Ref, NewRef);
+ {headers, Ref, _} ->
+ restart_remote_open_doc_revs(Ref, NewRef)
after 0 ->
erlang:error({restart_open_doc_revs, NewRef})
end.
-
remote_open_doc_revs_streamer_start(Parent) ->
receive
- {get_headers, _Ref, Parent} ->
- remote_open_doc_revs_streamer_start(Parent);
- {next_bytes, _Ref, Parent} ->
- remote_open_doc_revs_streamer_start(Parent)
+ {get_headers, _Ref, Parent} ->
+ remote_open_doc_revs_streamer_start(Parent);
+ {next_bytes, _Ref, Parent} ->
+ remote_open_doc_revs_streamer_start(Parent)
after 0 ->
Parent ! {started_open_doc_revs, make_ref()}
end.
-
receive_all(Streamer, Ref, Acc) ->
Streamer ! {next_bytes, Ref, self()},
receive
- {started_open_doc_revs, NewRef} ->
- restart_remote_open_doc_revs(Ref, NewRef);
- {body_bytes, Ref, Bytes} ->
- receive_all(Streamer, Ref, [Bytes | Acc]);
- {body_done, Ref} ->
- lists:reverse(Acc)
+ {started_open_doc_revs, NewRef} ->
+ restart_remote_open_doc_revs(Ref, NewRef);
+ {body_bytes, Ref, Bytes} ->
+ receive_all(Streamer, Ref, [Bytes | Acc]);
+ {body_done, Ref} ->
+ lists:reverse(Acc)
end.
-
mp_parse_mixed(eof) ->
- receive {get_headers, Ref, From} ->
- From ! {done, Ref}
+ receive
+ {get_headers, Ref, From} ->
+ From ! {done, Ref}
end;
mp_parse_mixed({headers, H}) ->
- receive {get_headers, Ref, From} ->
- From ! {headers, Ref, H}
+ receive
+ {get_headers, Ref, From} ->
+ From ! {headers, Ref, H}
end,
fun mp_parse_mixed/1;
mp_parse_mixed({body, Bytes}) ->
- receive {next_bytes, Ref, From} ->
- From ! {body_bytes, Ref, Bytes}
+ receive
+ {next_bytes, Ref, From} ->
+ From ! {body_bytes, Ref, Bytes}
end,
fun mp_parse_mixed/1;
mp_parse_mixed(body_end) ->
- receive {next_bytes, Ref, From} ->
- From ! {body_done, Ref};
- {get_headers, Ref, From} ->
- self() ! {get_headers, Ref, From}
+ receive
+ {next_bytes, Ref, From} ->
+ From ! {body_done, Ref};
+ {get_headers, Ref, From} ->
+ self() ! {get_headers, Ref, From}
end,
fun mp_parse_mixed/1.
-
receive_doc_data(Streamer, Ref) ->
Streamer ! {next_bytes, Ref, self()},
receive
- {body_bytes, Ref, Bytes} ->
- {Bytes, fun() -> receive_doc_data(Streamer, Ref) end};
- {body_done, Ref} ->
- {<<>>, fun() -> receive_doc_data(Streamer, Ref) end}
+ {body_bytes, Ref, Bytes} ->
+ {Bytes, fun() -> receive_doc_data(Streamer, Ref) end};
+ {body_done, Ref} ->
+ {<<>>, fun() -> receive_doc_data(Streamer, Ref) end}
end.
-
changes_ev1(object_start, UserFun, UserAcc) ->
fun(Ev) -> changes_ev2(Ev, UserFun, UserAcc) end.
@@ -757,11 +841,13 @@ changes_ev3(array_start, UserFun, UserAcc) ->
changes_ev_loop(object_start, UserFun, UserAcc) ->
fun(Ev) ->
- json_stream_parse:collect_object(Ev,
+ json_stream_parse:collect_object(
+ Ev,
fun(Obj) ->
UserAcc2 = UserFun(json_to_doc_info(Obj), UserAcc),
fun(Ev2) -> changes_ev_loop(Ev2, UserFun, UserAcc2) end
- end)
+ end
+ )
end;
changes_ev_loop(array_end, _UserFun, _UserAcc) ->
fun(_Ev) -> changes_ev_done() end.
@@ -772,80 +858,106 @@ changes_ev_done() ->
continuous_changes(DataFun, UserFun) ->
{DataFun2, _, Rest} = json_stream_parse:events(
DataFun,
- fun(Ev) -> parse_changes_line(Ev, UserFun) end),
+ fun(Ev) -> parse_changes_line(Ev, UserFun) end
+ ),
continuous_changes(fun() -> {Rest, DataFun2} end, UserFun).
parse_changes_line(object_start, UserFun) ->
fun(Ev) ->
- json_stream_parse:collect_object(Ev,
- fun(Obj) -> UserFun(json_to_doc_info(Obj)) end)
+ json_stream_parse:collect_object(
+ Ev,
+ fun(Obj) -> UserFun(json_to_doc_info(Obj)) end
+ )
end.
json_to_doc_info({Props}) ->
case get_value(<<"changes">>, Props) of
- undefined ->
- {last_seq, get_value(<<"last_seq">>, Props)};
- Changes ->
- RevsInfo0 = lists:map(
- fun({Change}) ->
- Rev = couch_doc:parse_rev(get_value(<<"rev">>, Change)),
- Del = get_value(<<"deleted">>, Change, false),
- #rev_info{rev=Rev, deleted=Del}
- end, Changes),
-
- RevsInfo = case get_value(<<"removed">>, Props) of
- true ->
- [_ | RevsInfo1] = RevsInfo0,
- RevsInfo1;
- _ ->
- RevsInfo0
- end,
+ undefined ->
+ {last_seq, get_value(<<"last_seq">>, Props)};
+ Changes ->
+ RevsInfo0 = lists:map(
+ fun({Change}) ->
+ Rev = couch_doc:parse_rev(get_value(<<"rev">>, Change)),
+ Del = get_value(<<"deleted">>, Change, false),
+ #rev_info{rev = Rev, deleted = Del}
+ end,
+ Changes
+ ),
+
+ RevsInfo =
+ case get_value(<<"removed">>, Props) of
+ true ->
+ [_ | RevsInfo1] = RevsInfo0,
+ RevsInfo1;
+ _ ->
+ RevsInfo0
+ end,
- #doc_info{
- id = get_value(<<"id">>, Props),
- high_seq = get_value(<<"seq">>, Props),
- revs = RevsInfo
- }
+ #doc_info{
+ id = get_value(<<"id">>, Props),
+ high_seq = get_value(<<"seq">>, Props),
+ revs = RevsInfo
+ }
end.
bulk_results_to_errors(Docs, {ok, Results}, interactive_edit) ->
- lists:reverse(lists:foldl(
- fun({_, {ok, _}}, Acc) ->
- Acc;
- ({#doc{id = Id, revs = {Pos, [RevId | _]}}, Error}, Acc) ->
- {_, Error, Reason} = couch_httpd:error_info(Error),
- [ {[{id, Id}, {rev, rev_to_str({Pos, RevId})},
- {error, Error}, {reason, Reason}]} | Acc ]
- end,
- [], lists:zip(Docs, Results)));
-
+ lists:reverse(
+ lists:foldl(
+ fun
+ ({_, {ok, _}}, Acc) ->
+ Acc;
+ ({#doc{id = Id, revs = {Pos, [RevId | _]}}, Error}, Acc) ->
+ {_, Error, Reason} = couch_httpd:error_info(Error),
+ [
+ {[
+ {id, Id},
+ {rev, rev_to_str({Pos, RevId})},
+ {error, Error},
+ {reason, Reason}
+ ]}
+ | Acc
+ ]
+ end,
+ [],
+ lists:zip(Docs, Results)
+ )
+ );
bulk_results_to_errors(Docs, {ok, Results}, replicated_changes) ->
bulk_results_to_errors(Docs, {aborted, Results}, interactive_edit);
-
bulk_results_to_errors(_Docs, {aborted, Results}, interactive_edit) ->
lists:map(
fun({{Id, Rev}, Err}) ->
{_, Error, Reason} = couch_httpd:error_info(Err),
{[{id, Id}, {rev, rev_to_str(Rev)}, {error, Error}, {reason, Reason}]}
end,
- Results);
-
+ Results
+ );
bulk_results_to_errors(_Docs, Results, remote) ->
- lists:reverse(lists:foldl(
- fun({Props}, Acc) ->
- case get_value(<<"error">>, Props, get_value(error, Props)) of
- undefined ->
- Acc;
- Error ->
- Id = get_value(<<"id">>, Props, get_value(id, Props)),
- Rev = get_value(<<"rev">>, Props, get_value(rev, Props)),
- Reason = get_value(<<"reason">>, Props, get_value(reason, Props)),
- [ {[{id, Id}, {rev, rev_to_str(Rev)},
- {error, Error}, {reason, Reason}]} | Acc ]
- end
- end,
- [], Results)).
-
+ lists:reverse(
+ lists:foldl(
+ fun({Props}, Acc) ->
+ case get_value(<<"error">>, Props, get_value(error, Props)) of
+ undefined ->
+ Acc;
+ Error ->
+ Id = get_value(<<"id">>, Props, get_value(id, Props)),
+ Rev = get_value(<<"rev">>, Props, get_value(rev, Props)),
+ Reason = get_value(<<"reason">>, Props, get_value(reason, Props)),
+ [
+ {[
+ {id, Id},
+ {rev, rev_to_str(Rev)},
+ {error, Error},
+ {reason, Reason}
+ ]}
+ | Acc
+ ]
+ end
+ end,
+ [],
+ Results
+ )
+ ).
rev_to_str({_Pos, _Id} = Rev) ->
couch_doc:rev_to_str(Rev);
@@ -854,18 +966,19 @@ rev_to_str(Rev) ->
write_fun() ->
fun(Data) ->
- receive {get_data, Ref, From} ->
- From ! {data, Ref, Data}
+ receive
+ {get_data, Ref, From} ->
+ From ! {data, Ref, Data}
end
end.
stream_doc({JsonBytes, Atts, Boundary, Len}) ->
case erlang:erase({doc_streamer, Boundary}) of
- Pid when is_pid(Pid) ->
- unlink(Pid),
- exit(Pid, kill);
- _ ->
- ok
+ Pid when is_pid(Pid) ->
+ unlink(Pid),
+ exit(Pid, kill);
+ _ ->
+ ok
end,
DocStreamer = spawn_link(
couch_doc,
@@ -880,8 +993,9 @@ stream_doc({0, Id}) ->
stream_doc({LenLeft, Id}) when LenLeft > 0 ->
Ref = make_ref(),
erlang:get({doc_streamer, Id}) ! {get_data, Ref, self()},
- receive {data, Ref, Data} ->
- {ok, Data, {LenLeft - iolist_size(Data), Id}}
+ receive
+ {data, Ref, Data} ->
+ {ok, Data, {LenLeft - iolist_size(Data), Id}}
end.
header_value(Key, Headers) ->
@@ -896,16 +1010,13 @@ header_value(Key, Headers, Default) ->
Default
end.
-
maybe_append_create_query_params(Db, Params) when map_size(Params) == 0 ->
Db;
-
maybe_append_create_query_params(Db, #{} = Params) ->
ParamList = maps:to_list(Params),
NewUrl = Db#httpdb.url ++ "?" ++ mochiweb_util:urlencode(ParamList),
Db#httpdb{url = NewUrl}.
-
db_from_json(#{} = DbMap) ->
#{
<<"url">> := Url,
@@ -917,25 +1028,34 @@ db_from_json(#{} = DbMap) ->
<<"retries">> := Retries,
<<"proxy_url">> := ProxyUrl0
} = DbMap,
- Headers = maps:fold(fun(K, V, Acc) ->
- [{binary_to_list(K), binary_to_list(V)} | Acc]
- end, [], Headers0),
- IBrowseOptions = maps:fold(fun
- (<<"socket_options">>, #{} = SockOpts, Acc) ->
- SockOptsKVs = maps:fold(fun sock_opts_fold/3, [], SockOpts),
- [{socket_options, SockOptsKVs} | Acc];
- (<<"ssl_options">>, #{} = SslOpts, Acc) ->
- SslOptsKVs = maps:fold(fun ssl_opts_fold/3, [], SslOpts),
- [{ssl_options, SslOptsKVs} | Acc];
- (K, V, Acc) when is_binary(V) ->
- [{binary_to_atom(K, utf8), binary_to_list(V)} | Acc];
- (K, V, Acc) ->
- [{binary_to_atom(K, utf8), V} | Acc]
- end, [], IBrowseOptions0),
- ProxyUrl = case ProxyUrl0 of
- null -> undefined;
- V when is_binary(V) -> binary_to_list(V)
- end,
+ Headers = maps:fold(
+ fun(K, V, Acc) ->
+ [{binary_to_list(K), binary_to_list(V)} | Acc]
+ end,
+ [],
+ Headers0
+ ),
+ IBrowseOptions = maps:fold(
+ fun
+ (<<"socket_options">>, #{} = SockOpts, Acc) ->
+ SockOptsKVs = maps:fold(fun sock_opts_fold/3, [], SockOpts),
+ [{socket_options, SockOptsKVs} | Acc];
+ (<<"ssl_options">>, #{} = SslOpts, Acc) ->
+ SslOptsKVs = maps:fold(fun ssl_opts_fold/3, [], SslOpts),
+ [{ssl_options, SslOptsKVs} | Acc];
+ (K, V, Acc) when is_binary(V) ->
+ [{binary_to_atom(K, utf8), binary_to_list(V)} | Acc];
+ (K, V, Acc) ->
+ [{binary_to_atom(K, utf8), V} | Acc]
+ end,
+ [],
+ IBrowseOptions0
+ ),
+ ProxyUrl =
+ case ProxyUrl0 of
+ null -> undefined;
+ V when is_binary(V) -> binary_to_list(V)
+ end,
#httpdb{
url = binary_to_list(Url),
auth_props = Auth,
@@ -947,45 +1067,34 @@ db_from_json(#{} = DbMap) ->
proxy_url = ProxyUrl
}.
-
send_req(#httpdb{} = HttpDb, Opts, Callback) when is_function(Callback) ->
couch_replicator_httpc:send_req(HttpDb, Opts, Callback).
-
get_value(K, Props) ->
couch_util:get_value(K, Props).
-
get_value(K, Props, Default) ->
couch_util:get_value(K, Props, Default).
-
encode_doc_id(DocId) ->
couch_util:encode_doc_id(DocId).
-
-
% See couch_replicator_docs:ssl_params/1 for ssl parsed options
% and http://erlang.org/doc/man/ssl.html#type-server_option
% all latest SSL server options
%
ssl_opts_fold(K, V, Acc) when is_boolean(V); is_integer(V) ->
[{binary_to_atom(K, utf8), V} | Acc];
-
ssl_opts_fold(K, null, Acc) ->
[{binary_to_atom(K, utf8), undefined} | Acc];
-
ssl_opts_fold(<<"verify">>, V, Acc) ->
[{verify, binary_to_atom(V, utf8)} | Acc];
-
ssl_opts_fold(K, V, Acc) when is_list(V) ->
[{binary_to_atom(K, utf8), binary_to_list(V)} | Acc].
-
% See ?VALID_SOCK_OPTS in couch_replicator_docs for accepted socket options
%
sock_opts_fold(K, V, Acc) when is_binary(V) ->
- [{binary_to_atom(K, utf8), binary_to_atom(V, utf8)} | Acc];
-
+ [{binary_to_atom(K, utf8), binary_to_atom(V, utf8)} | Acc];
sock_opts_fold(K, V, Acc) when is_boolean(V); is_integer(V) ->
[{binary_to_atom(K, utf8), V} | Acc].
diff --git a/src/couch_replicator/src/couch_replicator_auth.erl b/src/couch_replicator/src/couch_replicator_auth.erl
index 272e10af5..e5c024f7e 100644
--- a/src/couch_replicator/src/couch_replicator_auth.erl
+++ b/src/couch_replicator/src/couch_replicator_auth.erl
@@ -12,7 +12,6 @@
-module(couch_replicator_auth).
-
-export([
initialize/1,
update_headers/2,
@@ -20,17 +19,13 @@
cleanup/1
]).
-
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-
-type headers() :: [{string(), string()}].
-type code() :: non_neg_integer().
-
-define(DEFAULT_PLUGINS, "couch_replicator_auth_session,couch_replicator_auth_noop").
-
% Behavior API
% Note for plugin developers: consider using the "auth" field in the source and
@@ -49,7 +44,6 @@
-callback cleanup(term()) -> ok.
-
% Main API
-spec initialize(#httpdb{}) -> {ok, #httpdb{}} | {error, term()}.
@@ -61,13 +55,11 @@ initialize(#httpdb{auth_context = nil} = HttpDb) ->
{error, Error}
end.
-
-spec update_headers(#httpdb{}, headers()) -> {headers(), #httpdb{}}.
update_headers(#httpdb{auth_context = {Mod, Context}} = HttpDb, Headers) ->
{Headers1, Context1} = Mod:update_headers(Context, Headers),
{Headers1, HttpDb#httpdb{auth_context = {Mod, Context1}}}.
-
-spec handle_response(#httpdb{}, code(), headers()) ->
{continue | retry, term()}.
handle_response(#httpdb{} = HttpDb, Code, Headers) ->
@@ -75,13 +67,11 @@ handle_response(#httpdb{} = HttpDb, Code, Headers) ->
{Res, Context1} = Mod:handle_response(Context, Code, Headers),
{Res, HttpDb#httpdb{auth_context = {Mod, Context1}}}.
-
-spec cleanup(#httpdb{}) -> #httpdb{}.
cleanup(#httpdb{auth_context = {Module, Context}} = HttpDb) ->
ok = Module:cleanup(Context),
HttpDb#httpdb{auth_context = nil}.
-
% Private helper functions
-spec get_plugin_modules() -> [atom()].
@@ -89,7 +79,6 @@ get_plugin_modules() ->
Plugins1 = config:get("replicator", "auth_plugins", ?DEFAULT_PLUGINS),
[list_to_atom(Plugin) || Plugin <- string:tokens(Plugins1, ",")].
-
try_initialize([], _HttpDb) ->
{error, no_more_auth_plugins_left_to_try};
try_initialize([Mod | Modules], HttpDb) ->
diff --git a/src/couch_replicator/src/couch_replicator_auth_noop.erl b/src/couch_replicator/src/couch_replicator_auth_noop.erl
index 5dbf13335..e2a7ee839 100644
--- a/src/couch_replicator/src/couch_replicator_auth_noop.erl
+++ b/src/couch_replicator/src/couch_replicator_auth_noop.erl
@@ -12,10 +12,8 @@
-module(couch_replicator_auth_noop).
-
-behavior(couch_replicator_auth).
-
-export([
initialize/1,
update_headers/2,
@@ -23,30 +21,24 @@
cleanup/1
]).
-
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-
-type headers() :: [{string(), string()}].
-type code() :: non_neg_integer().
-
-spec initialize(#httpdb{}) -> {ok, #httpdb{}, term()} | ignore.
initialize(#httpdb{} = HttpDb) ->
{ok, HttpDb, nil}.
-
-spec update_headers(term(), headers()) -> {headers(), term()}.
update_headers(Context, Headers) ->
{Headers, Context}.
-
-spec handle_response(term(), code(), headers()) ->
{continue | retry, term()}.
handle_response(Context, _Code, _Headers) ->
{continue, Context}.
-
-spec cleanup(term()) -> ok.
cleanup(_Context) ->
ok.
diff --git a/src/couch_replicator/src/couch_replicator_auth_session.erl b/src/couch_replicator/src/couch_replicator_auth_session.erl
index b2bf31722..acd74a3d5 100644
--- a/src/couch_replicator/src/couch_replicator_auth_session.erl
+++ b/src/couch_replicator/src/couch_replicator_auth_session.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
% This is the replicator session auth plugin. It implements session based
% authentication for the replicator. The only public API are the functions from
% the couch_replicator_auth behaviour. Most of the logic and state is in the
@@ -46,14 +45,11 @@
% ensure if something goes wrong and one of the endpoints issues invalid
% cookies, replicator won't be stuck in a busy loop refreshing them.
-
-module(couch_replicator_auth_session).
-
-behaviour(couch_replicator_auth).
-behaviour(gen_server).
-
-export([
initialize/1,
update_headers/2,
@@ -71,12 +67,10 @@
format_status/2
]).
-
-include_lib("ibrowse/include/ibrowse.hrl").
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-include_lib("kernel/include/logger.hrl").
-
-type headers() :: [{string(), string()}].
-type code() :: non_neg_integer().
-type time_sec() :: non_neg_integer().
@@ -85,7 +79,6 @@
-define(MIN_UPDATE_INTERVAL_SEC, 5).
-define(DEFAULT_REFRESH_INTERVAL_SEC, 550).
-
-record(state, {
epoch = 0 :: non_neg_integer(),
cookie :: string() | undefined,
@@ -95,12 +88,11 @@
httpdb_pool :: pid(),
httpdb_ibrowse_options = [] :: list(),
session_url :: string(),
- next_refresh = infinity :: infinity | non_neg_integer(),
+ next_refresh = infinity :: infinity | non_neg_integer(),
refresh_tstamp = 0 :: non_neg_integer(),
require_valid_user = false :: boolean()
}).
-
% Behavior API callbacks
-spec initialize(#httpdb{}) ->
@@ -118,37 +110,31 @@ initialize(#httpdb{} = HttpDb) ->
ignore
end.
-
-spec update_headers(term(), headers()) -> {headers(), term()}.
update_headers({Pid, Epoch, Timeout}, Headers) ->
Args = {update_headers, Headers, Epoch},
{Headers1, Epoch1} = gen_server:call(Pid, Args, Timeout * 10),
{Headers1, {Pid, Epoch1, Timeout}}.
-
-spec handle_response(term(), code(), headers()) ->
{continue | retry, term()}.
handle_response({Pid, Epoch, Timeout}, Code, Headers) ->
- Args = {handle_response, Code, Headers, Epoch},
+ Args = {handle_response, Code, Headers, Epoch},
{Retry, Epoch1} = gen_server:call(Pid, Args, Timeout * 10),
{Retry, {Pid, Epoch1, Timeout}}.
-
-spec cleanup(term()) -> ok.
cleanup({Pid, _Epoch, Timeout}) ->
gen_server:call(Pid, stop, Timeout * 10).
-
%% gen_server functions
init([#state{} = State]) ->
{ok, State}.
-
terminate(_Reason, _State) ->
ok.
-
handle_call({update_headers, Headers, _Epoch}, _From, State) ->
case maybe_refresh(State) of
{ok, State1} ->
@@ -165,31 +151,25 @@ handle_call({update_headers, Headers, _Epoch}, _From, State) ->
couch_log:error(LogMsg, [?MODULE, Error]),
{stop, Error, State}
end;
-
handle_call({handle_response, Code, Headers, Epoch}, _From, State) ->
{Retry, State1} = process_response(Code, Headers, Epoch, State),
{reply, {Retry, State1#state.epoch}, State1};
-
handle_call(stop, _From, State) ->
{stop, normal, ok, State}.
-
handle_cast(Msg, State) ->
?LOG_ERROR(#{what => unexpected_cast, in => replicator, msg => Msg}),
couch_log:error("~p: Received un-expected cast ~p", [?MODULE, Msg]),
{noreply, State}.
-
handle_info(Msg, State) ->
?LOG_ERROR(#{what => unexpected_message, in => replicator, msg => Msg}),
couch_log:error("~p : Received un-expected message ~p", [?MODULE, Msg]),
{noreply, State}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
format_status(_Opt, [_PDict, State]) ->
[
{epoch, State#state.epoch},
@@ -198,10 +178,8 @@ format_status(_Opt, [_PDict, State]) ->
{refresh_tstamp, State#state.refresh_tstamp}
].
-
%% Private helper functions
-
-spec init_state(#httpdb{}) ->
{ok, #httpdb{}, #state{}} | {error, term()} | ignore.
init_state(#httpdb{} = HttpDb) ->
@@ -242,7 +220,6 @@ init_state(#httpdb{} = HttpDb) ->
{error, Error}
end.
-
-spec extract_creds(#httpdb{}) ->
{ok, string(), string(), #httpdb{}} | {error, term()}.
extract_creds(#httpdb{} = HttpDb) ->
@@ -256,9 +233,12 @@ extract_creds(#httpdb{} = HttpDb) ->
{ok, User, Pass, HttpDb1}
end.
-
--spec process_response(non_neg_integer(), headers(),
- non_neg_integer(), #state{}) -> {retry | continue, #state{}}.
+-spec process_response(
+ non_neg_integer(),
+ headers(),
+ non_neg_integer(),
+ #state{}
+) -> {retry | continue, #state{}}.
process_response(403, _Headers, Epoch, State) ->
process_auth_failure(Epoch, State);
process_response(401, _Headers, Epoch, State) ->
@@ -266,30 +246,31 @@ process_response(401, _Headers, Epoch, State) ->
process_response(Code, Headers, _Epoch, State) when Code >= 200, Code < 300 ->
% If server noticed cookie is about to time out it can send a new cookie in
% the response headers. Take advantage of that and refresh the cookie.
- State1 = case maybe_update_cookie(Headers, State) of
- {ok, UpdatedState} ->
- UpdatedState;
- {error, cookie_not_found} ->
- State;
- {error, Other} ->
- ?LOG_ERROR(#{
- what => cookie_parse_error,
- in => replicator,
- details => Other
- }),
- LogMsg = "~p : Could not parse cookie from response headers ~p",
- couch_log:error(LogMsg, [?MODULE, Other]),
- State
- end,
+ State1 =
+ case maybe_update_cookie(Headers, State) of
+ {ok, UpdatedState} ->
+ UpdatedState;
+ {error, cookie_not_found} ->
+ State;
+ {error, Other} ->
+ ?LOG_ERROR(#{
+ what => cookie_parse_error,
+ in => replicator,
+ details => Other
+ }),
+ LogMsg = "~p : Could not parse cookie from response headers ~p",
+ couch_log:error(LogMsg, [?MODULE, Other]),
+ State
+ end,
{continue, State1};
process_response(_Code, _Headers, _Epoch, State) ->
{continue, State}.
-
-spec process_auth_failure(non_neg_integer(), #state{}) ->
{retry | continue, #state{}}.
-process_auth_failure(Epoch, #state{epoch = StateEpoch} = State)
- when StateEpoch > Epoch ->
+process_auth_failure(Epoch, #state{epoch = StateEpoch} = State) when
+ StateEpoch > Epoch
+->
% This request used an outdated cookie, tell it to immediately retry
% and it will pick up the current cookie when its headers are updated
{retry, State};
@@ -306,7 +287,6 @@ process_auth_failure(Epoch, #state{epoch = Epoch} = State) ->
{retry, schedule_refresh(now_sec(), State)}
end.
-
-spec get_session_url(string()) -> string().
get_session_url(Url) ->
#url{
@@ -324,14 +304,12 @@ get_session_url(Url) ->
lists:concat([Proto, "://", Host, "/_session"])
end.
-
-spec schedule_refresh(non_neg_integer(), #state{}) -> #state{}.
schedule_refresh(T, #state{next_refresh = Tc} = State) when T < Tc ->
State#state{next_refresh = T};
schedule_refresh(_, #state{} = State) ->
State.
-
-spec maybe_refresh(#state{}) -> {ok, #state{}} | {error, term()}.
maybe_refresh(#state{next_refresh = T} = State) ->
case now_sec() >= T of
@@ -341,21 +319,20 @@ maybe_refresh(#state{next_refresh = T} = State) ->
{ok, State}
end.
-
-spec refresh(#state{}) -> {ok, #state{}} | {error, term()}.
refresh(#state{session_url = Url, user = User, pass = Pass} = State) ->
- Body = mochiweb_util:urlencode([{name, User}, {password, Pass}]),
+ Body = mochiweb_util:urlencode([{name, User}, {password, Pass}]),
Headers0 = [{"Content-Type", "application/x-www-form-urlencoded"}],
- Headers = case State#state.require_valid_user of
- true ->
- Headers0 ++ [{"Authorization", "Basic " ++ b64creds(User, Pass)}];
- false ->
- Headers0
- end,
+ Headers =
+ case State#state.require_valid_user of
+ true ->
+ Headers0 ++ [{"Authorization", "Basic " ++ b64creds(User, Pass)}];
+ false ->
+ Headers0
+ end,
Result = http_request(State, Url, Headers, post, Body),
http_response(Result, State).
-
-spec http_request(#state{}, string(), headers(), atom(), iolist()) ->
{ok, string(), headers(), binary()} | {error, term()}.
http_request(#state{httpdb_pool = Pool} = State, Url, Headers, Method, Body) ->
@@ -367,20 +344,26 @@ http_request(#state{httpdb_pool = Pool} = State, Url, Headers, Method, Body) ->
],
{ok, Wrk} = couch_replicator_httpc_pool:get_worker(Pool),
try
- Result = ibrowse:send_req_direct(Wrk, Url, Headers, Method, Body, Opts,
- Timeout),
+ Result = ibrowse:send_req_direct(
+ Wrk,
+ Url,
+ Headers,
+ Method,
+ Body,
+ Opts,
+ Timeout
+ ),
case Result of
{ok, _, ResultHeaders, _} ->
stop_worker_if_server_requested(ResultHeaders, Wrk);
_Other ->
ok
- end,
- Result
+ end,
+ Result
after
ok = couch_replicator_httpc_pool:release_worker_sync(Pool, Wrk)
end.
-
-spec stop_worker_if_server_requested(headers(), pid()) -> ok.
stop_worker_if_server_requested(ResultHeaders0, Worker) ->
ResultHeaders = mochiweb_headers:make(ResultHeaders0),
@@ -396,13 +379,16 @@ stop_worker_if_server_requested(ResultHeaders0, Worker) ->
ok
end.
-
--spec http_response({ok, string(), headers(), binary()} | {error, term()},
- #state{}) -> {ok, #state{}} | {error, term()}.
+-spec http_response(
+ {ok, string(), headers(), binary()} | {error, term()},
+ #state{}
+) -> {ok, #state{}} | {error, term()}.
http_response({ok, "200", Headers, _}, State) ->
maybe_update_cookie(Headers, State);
-http_response({ok, "401", Headers0, _}, #state{session_url = Url,
- user = User}) ->
+http_response({ok, "401", Headers0, _}, #state{
+ session_url = Url,
+ user = User
+}) ->
Headers = mochiweb_headers:make(Headers0),
case mochiweb_headers:get_value("WWW-Authenticate", Headers) of
undefined ->
@@ -419,7 +405,6 @@ http_response({ok, Code, _, _}, #state{session_url = Url, user = User}) ->
http_response({error, Error}, #state{session_url = Url, user = User}) ->
{error, {session_request_failed, Url, User, Error}}.
-
-spec parse_cookie(list()) -> {ok, age(), string()} | {error, term()}.
parse_cookie(Headers0) ->
Headers = mochiweb_headers:make(Headers0),
@@ -438,14 +423,11 @@ parse_cookie(Headers0) ->
end
end.
-
-spec parse_max_age(list()) -> age().
parse_max_age(CaseInsKVs) ->
case mochiweb_headers:get_value("Max-Age", CaseInsKVs) of
String when is_list(String) ->
- try
- list_to_integer(String)
- of
+ try list_to_integer(String) of
MaxAge when MaxAge >= 0 ->
MaxAge;
_ ->
@@ -458,7 +440,6 @@ parse_max_age(CaseInsKVs) ->
undefined
end.
-
-spec maybe_update_cookie(headers(), #state{}) ->
{ok, string()} | {error, term()}.
maybe_update_cookie(ResponseHeaders, State) ->
@@ -469,7 +450,6 @@ maybe_update_cookie(ResponseHeaders, State) ->
{error, Error}
end.
-
-spec update_cookie(#state{}, string(), time_sec(), age()) -> #state{}.
update_cookie(#state{cookie = Cookie} = State, Cookie, _, _) ->
State;
@@ -482,77 +462,75 @@ update_cookie(#state{epoch = Epoch} = State, Cookie, NowSec, MaxAge) ->
},
schedule_refresh(NextRefresh, NewState).
-
-spec next_refresh(time_sec(), age(), time_sec()) -> time_sec().
next_refresh(NowSec, undefined, RefreshInterval) ->
NowSec + RefreshInterval;
-
next_refresh(NowSec, MaxAge, _) when is_integer(MaxAge) ->
% Apply a fudge factor to account for delays in receving the cookie
% and / or time adjustments happening over a longer period of time
NowSec + trunc(MaxAge * 0.9).
-
-spec cookie_age_sec(#state{}, time_sec()) -> time_sec().
cookie_age_sec(#state{refresh_tstamp = RefreshTs}, Now) ->
max(0, Now - RefreshTs).
-
-spec now_sec() -> time_sec().
now_sec() ->
{Mega, Sec, _Micro} = os:timestamp(),
Mega * 1000000 + Sec.
-
-spec min_update_interval() -> time_sec().
min_update_interval() ->
- config:get_integer("replicator", "session_min_update_interval",
- ?MIN_UPDATE_INTERVAL_SEC).
-
+ config:get_integer(
+ "replicator",
+ "session_min_update_interval",
+ ?MIN_UPDATE_INTERVAL_SEC
+ ).
-spec refresh_interval() -> integer().
refresh_interval() ->
- config:get_integer("replicator", "session_refresh_interval_sec",
- ?DEFAULT_REFRESH_INTERVAL_SEC).
-
-
+ config:get_integer(
+ "replicator",
+ "session_refresh_interval_sec",
+ ?DEFAULT_REFRESH_INTERVAL_SEC
+ ).
-spec b64creds(string(), string()) -> string().
b64creds(User, Pass) ->
base64:encode_to_string(User ++ ":" ++ Pass).
-
remove_basic_auth_creds(#httpdb{auth_props = Props} = HttpDb) ->
Props1 = maps:remove(<<"basic">>, Props),
HttpDb#httpdb{auth_props = Props1}.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
get_session_url_test_() ->
- [?_assertEqual(SessionUrl, get_session_url(Url)) || {Url, SessionUrl} <- [
- {"http://host/db", "http://host/_session"},
- {"http://127.0.0.1/db", "http://127.0.0.1/_session"},
- {"http://host/x/y/z", "http://host/_session"},
- {"http://host:5984/db", "http://host:5984/_session"},
- {"https://host/db?q=1", "https://host/_session"}
- ]].
-
+ [
+ ?_assertEqual(SessionUrl, get_session_url(Url))
+ || {Url, SessionUrl} <- [
+ {"http://host/db", "http://host/_session"},
+ {"http://127.0.0.1/db", "http://127.0.0.1/_session"},
+ {"http://host/x/y/z", "http://host/_session"},
+ {"http://host:5984/db", "http://host:5984/_session"},
+ {"https://host/db?q=1", "https://host/_session"}
+ ]
+ ].
extract_creds_success_test() ->
- HttpDb = #httpdb{auth_props = #{
- <<"basic">> => #{
- <<"username">> => <<"u2">>,
- <<"password">> => <<"p2">>
+ HttpDb = #httpdb{
+ auth_props = #{
+ <<"basic">> => #{
+ <<"username">> => <<"u2">>,
+ <<"password">> => <<"p2">>
+ }
}
- }},
+ },
?assertEqual({ok, "u2", "p2", #httpdb{}}, extract_creds(HttpDb)),
?assertEqual({error, missing_credentials}, extract_creds(#httpdb{})).
-
cookie_update_test_() ->
{
setup,
@@ -580,7 +558,6 @@ cookie_update_test_() ->
}
}.
-
t_do_refresh_without_max_age() ->
?_test(begin
State = #state{next_refresh = 0},
@@ -591,7 +568,6 @@ t_do_refresh_without_max_age() ->
?assert(540 < RefreshInterval andalso RefreshInterval =< 550)
end).
-
t_do_refresh_with_max_age() ->
?_test(begin
State = #state{next_refresh = 0},
@@ -603,7 +579,6 @@ t_do_refresh_with_max_age() ->
?assert(80 < RefreshInterval andalso RefreshInterval =< 90)
end).
-
t_dont_refresh() ->
?_test(begin
State = #state{
@@ -620,7 +595,6 @@ t_dont_refresh() ->
?assertMatch(State2, State3)
end).
-
t_process_auth_failure() ->
?_test(begin
State = #state{epoch = 1, refresh_tstamp = 0},
@@ -629,21 +603,18 @@ t_process_auth_failure() ->
?assert(NextRefresh =< now_sec())
end).
-
t_process_auth_failure_stale_epoch() ->
?_test(begin
State = #state{epoch = 3},
?assertMatch({retry, State}, process_auth_failure(2, State))
end).
-
t_process_auth_failure_too_frequent() ->
?_test(begin
State = #state{epoch = 4, refresh_tstamp = now_sec()},
?assertMatch({continue, _}, process_auth_failure(4, State))
end).
-
t_process_ok_update_cookie() ->
?_test(begin
Headers = [{"set-CookiE", "AuthSession=xyz; Path=/;"}, {"X", "y"}],
@@ -654,7 +625,6 @@ t_process_ok_update_cookie() ->
?assertMatch({continue, #state{cookie = "xyz", epoch = 2}}, Res2)
end).
-
t_process_ok_no_cookie() ->
?_test(begin
Headers = [{"X", "y"}],
@@ -663,37 +633,34 @@ t_process_ok_no_cookie() ->
?assertMatch({continue, State}, Res)
end).
-
t_init_state_fails_on_401() ->
?_test(begin
mock_http_401_response(),
{error, Error} = init_state(httpdb("http://u:p@h")),
- SessionUrl = "http://h/_session",
+ SessionUrl = "http://h/_session",
?assertEqual({session_request_unauthorized, SessionUrl, "u"}, Error)
end).
-
t_init_state_401_with_require_valid_user() ->
?_test(begin
mock_http_401_response_with_require_valid_user(),
- ?assertMatch({ok, #httpdb{}, #state{cookie = "Cookie"}},
- init_state(httpdb("http://u:p@h")))
+ ?assertMatch(
+ {ok, #httpdb{}, #state{cookie = "Cookie"}},
+ init_state(httpdb("http://u:p@h"))
+ )
end).
-
t_init_state_404() ->
?_test(begin
mock_http_404_response(),
?assertEqual(ignore, init_state(httpdb("http://u:p@h")))
end).
-
t_init_state_no_creds() ->
?_test(begin
?_assertEqual(ignore, init_state(httpdb("http://h")))
end).
-
t_init_state_http_error() ->
?_test(begin
mock_http_error_response(),
@@ -702,7 +669,6 @@ t_init_state_http_error() ->
?assertEqual({session_request_failed, SessionUrl, "u", x}, Error)
end).
-
httpdb(Url) when is_list(Url) ->
FakeDoc = #{
<<"source">> => list_to_binary(Url),
@@ -712,7 +678,6 @@ httpdb(Url) when is_list(Url) ->
HttpDb = maps:get(<<"source">>, Rep),
couch_replicator_api_wrap:db_from_json(HttpDb).
-
setup_all() ->
meck:expect(couch_replicator_httpc_pool, get_worker, 1, {ok, worker}),
meck:expect(couch_replicator_httpc_pool, release_worker_sync, 2, ok),
@@ -720,11 +685,9 @@ setup_all() ->
mock_http_cookie_response("Abc"),
ok.
-
teardown_all(_) ->
meck:unload().
-
setup() ->
meck:reset([
config,
@@ -732,44 +695,37 @@ setup() ->
ibrowse
]).
-
teardown(_) ->
ok.
-
mock_http_cookie_response(Cookie) ->
Resp = {ok, "200", [{"Set-Cookie", "AuthSession=" ++ Cookie}], []},
meck:expect(ibrowse, send_req_direct, 7, Resp).
-
mock_http_cookie_response_with_age(Cookie, Age) ->
AgeKV = "Max-Age=" ++ Age,
CookieKV = "AuthSession=" ++ Cookie,
Resp = {ok, "200", [{"Set-Cookie", CookieKV ++ ";" ++ AgeKV}], []},
meck:expect(ibrowse, send_req_direct, 7, Resp).
-
mock_http_401_response() ->
meck:expect(ibrowse, send_req_direct, 7, {ok, "401", [], []}).
-
mock_http_401_response_with_require_valid_user() ->
Resp1 = {ok, "401", [{"WWW-Authenticate", "Basic realm=\"server\""}], []},
Resp2 = {ok, "200", [{"Set-Cookie", "AuthSession=Cookie"}], []},
meck:expect(ibrowse, send_req_direct, 7, meck:seq([Resp1, Resp2])).
-
mock_http_404_response() ->
meck:expect(ibrowse, send_req_direct, 7, {ok, "404", [], []}).
-
mock_http_error_response() ->
meck:expect(ibrowse, send_req_direct, 7, {error, x}).
-
parse_max_age_test_() ->
- [?_assertEqual(R, parse_max_age(mochiweb_headers:make([{"Max-Age", A}])))
- || {A, R} <- [
+ [
+ ?_assertEqual(R, parse_max_age(mochiweb_headers:make([{"Max-Age", A}])))
+ || {A, R} <- [
{"-10", undefined},
{"\ufeff", undefined},
{"*", undefined},
@@ -782,7 +738,6 @@ parse_max_age_test_() ->
]
].
-
remove_basic_auth_creds_test() ->
Check = fun(Props) ->
HttpDb = remove_basic_auth_creds(#httpdb{auth_props = Props}),
@@ -793,19 +748,25 @@ remove_basic_auth_creds_test() ->
?assertEqual(#{<<"other">> => #{}}, Check(#{<<"other">> => #{}})),
- ?assertEqual(#{}, Check(#{
- <<"basic">> => #{
- <<"username">> => <<"u">>,
- <<"password">> => <<"p">>
- }
- })),
-
- ?assertEqual(#{<<"other">> => #{}}, Check(#{
- <<"basic">> => #{
- <<"username">> => <<"u">>,
- <<"password">> => <<"p">>
- },
- <<"other">> => #{}
- })).
+ ?assertEqual(
+ #{},
+ Check(#{
+ <<"basic">> => #{
+ <<"username">> => <<"u">>,
+ <<"password">> => <<"p">>
+ }
+ })
+ ),
+
+ ?assertEqual(
+ #{<<"other">> => #{}},
+ Check(#{
+ <<"basic">> => #{
+ <<"username">> => <<"u">>,
+ <<"password">> => <<"p">>
+ },
+ <<"other">> => #{}
+ })
+ ).
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_changes_reader.erl b/src/couch_replicator/src/couch_replicator_changes_reader.erl
index 97c728971..c3bd926ee 100644
--- a/src/couch_replicator/src/couch_replicator_changes_reader.erl
+++ b/src/couch_replicator/src/couch_replicator_changes_reader.erl
@@ -23,28 +23,39 @@
-include("couch_replicator.hrl").
-include_lib("kernel/include/logger.hrl").
-
start_link(StartSeq, #httpdb{} = Db, ChangesQueue, #{} = Options) ->
Parent = self(),
- {ok, spawn_link(fun() ->
- put(last_seq, StartSeq),
- put(retries_left, Db#httpdb.retries),
- ?MODULE:read_changes(Parent, StartSeq, Db#httpdb{retries = 0},
- ChangesQueue, Options)
- end)};
+ {ok,
+ spawn_link(fun() ->
+ put(last_seq, StartSeq),
+ put(retries_left, Db#httpdb.retries),
+ ?MODULE:read_changes(
+ Parent,
+ StartSeq,
+ Db#httpdb{retries = 0},
+ ChangesQueue,
+ Options
+ )
+ end)};
start_link(StartSeq, Db, ChangesQueue, Options) ->
Parent = self(),
- {ok, spawn_link(fun() ->
- ?MODULE:read_changes(Parent, StartSeq, Db, ChangesQueue, Options)
- end)}.
+ {ok,
+ spawn_link(fun() ->
+ ?MODULE:read_changes(Parent, StartSeq, Db, ChangesQueue, Options)
+ end)}.
read_changes(Parent, StartSeq, Db, ChangesQueue, Options) ->
Continuous = maps:get(<<"continuous">>, Options, false),
try
- couch_replicator_api_wrap:changes_since(Db, all_docs, StartSeq,
+ couch_replicator_api_wrap:changes_since(
+ Db,
+ all_docs,
+ StartSeq,
fun(Item) ->
process_change(Item, {Parent, Db, ChangesQueue, Continuous})
- end, couch_replicator_utils:proplist_options(Options)),
+ end,
+ couch_replicator_utils:proplist_options(Options)
+ ),
couch_work_queue:close(ChangesQueue)
catch
throw:recurse ->
@@ -54,41 +65,50 @@ read_changes(Parent, StartSeq, Db, ChangesQueue, Options) ->
LS = get(last_seq),
read_changes(Parent, LS, Db, ChangesQueue, Options);
throw:{retry_limit, Error} ->
- couch_stats:increment_counter(
- [couch_replicator, changes_read_failures]
- ),
- case get(retries_left) of
- N when N > 0 ->
- put(retries_left, N - 1),
- LastSeq = get(last_seq),
- LogMsg = #{
- what => retry_changes_feed,
- in => replicator,
- source => couch_replicator_api_wrap:db_uri(Db),
- sequence => LastSeq,
- retries_remaining => N
- },
- Db2 = case LastSeq of
- StartSeq ->
- ?LOG_NOTICE(LogMsg#{delay_sec => Db#httpdb.wait / 1000}),
- couch_log:notice("Retrying _changes request to source database ~s"
- " with since=~p in ~p seconds",
- [couch_replicator_api_wrap:db_uri(Db), LastSeq, Db#httpdb.wait / 1000]),
- ok = timer:sleep(Db#httpdb.wait),
- Db#httpdb{wait = 2 * Db#httpdb.wait};
- _ ->
- ?LOG_NOTICE(LogMsg),
- couch_log:notice("Retrying _changes request to source database ~s"
- " with since=~p", [couch_replicator_api_wrap:db_uri(Db), LastSeq]),
- Db
- end,
- read_changes(Parent, LastSeq, Db2, ChangesQueue, Options);
- _ ->
- exit(Error)
- end
+ couch_stats:increment_counter(
+ [couch_replicator, changes_read_failures]
+ ),
+ case get(retries_left) of
+ N when N > 0 ->
+ put(retries_left, N - 1),
+ LastSeq = get(last_seq),
+ LogMsg = #{
+ what => retry_changes_feed,
+ in => replicator,
+ source => couch_replicator_api_wrap:db_uri(Db),
+ sequence => LastSeq,
+ retries_remaining => N
+ },
+ Db2 =
+ case LastSeq of
+ StartSeq ->
+ ?LOG_NOTICE(LogMsg#{delay_sec => Db#httpdb.wait / 1000}),
+ couch_log:notice(
+ "Retrying _changes request to source database ~s"
+ " with since=~p in ~p seconds",
+ [
+ couch_replicator_api_wrap:db_uri(Db),
+ LastSeq,
+ Db#httpdb.wait / 1000
+ ]
+ ),
+ ok = timer:sleep(Db#httpdb.wait),
+ Db#httpdb{wait = 2 * Db#httpdb.wait};
+ _ ->
+ ?LOG_NOTICE(LogMsg),
+ couch_log:notice(
+ "Retrying _changes request to source database ~s"
+ " with since=~p",
+ [couch_replicator_api_wrap:db_uri(Db), LastSeq]
+ ),
+ Db
+ end,
+ read_changes(Parent, LastSeq, Db2, ChangesQueue, Options);
+ _ ->
+ exit(Error)
+ end
end.
-
process_change(#doc_info{id = <<>>} = DocInfo, {_, Db, _, _}) ->
% Previous CouchDB releases had a bug which allowed a doc with an empty ID
% to be inserted into databases. Such doc is impossible to GET.
@@ -98,10 +118,11 @@ process_change(#doc_info{id = <<>>} = DocInfo, {_, Db, _, _}) ->
source => couch_replicator_api_wrap:db_uri(Db),
sequence => DocInfo#doc_info.high_seq
}),
- couch_log:error("Replicator: ignoring document with empty ID in "
+ couch_log:error(
+ "Replicator: ignoring document with empty ID in "
"source database `~s` (_changes sequence ~p)",
- [couch_replicator_api_wrap:db_uri(Db), DocInfo#doc_info.high_seq]);
-
+ [couch_replicator_api_wrap:db_uri(Db), DocInfo#doc_info.high_seq]
+ );
process_change(#doc_info{id = Id} = DocInfo, {Parent, Db, ChangesQueue, _}) ->
case is_doc_id_too_long(byte_size(Id)) of
true ->
@@ -113,26 +134,32 @@ process_change(#doc_info{id = Id} = DocInfo, {Parent, Db, ChangesQueue, _}) ->
docid => Id,
details => "document ID too long"
}),
- couch_log:error("Replicator: document id `~s...` from source db "
- " `~64s` is too long, ignoring.", [Id, SourceDb]),
+ couch_log:error(
+ "Replicator: document id `~s...` from source db "
+ " `~64s` is too long, ignoring.",
+ [Id, SourceDb]
+ ),
Stats = couch_replicator_stats:new([{doc_write_failures, 1}]),
ok = gen_server:call(Parent, {add_stats, Stats}, infinity);
false ->
ok = couch_work_queue:queue(ChangesQueue, DocInfo),
put(last_seq, DocInfo#doc_info.high_seq)
end;
-
process_change({last_seq, LS}, {_Parent, _, ChangesQueue, true = _Continuous}) ->
% LS should never be undefined, but it doesn't hurt to be defensive inside
% the replicator.
- Seq = case LS of undefined -> get(last_seq); _ -> LS end,
+ Seq =
+ case LS of
+ undefined -> get(last_seq);
+ _ -> LS
+ end,
OldSeq = get(last_seq),
- if Seq == OldSeq -> ok; true ->
- ok = couch_work_queue:queue(ChangesQueue, {last_seq, Seq})
+ if
+ Seq == OldSeq -> ok;
+ true -> ok = couch_work_queue:queue(ChangesQueue, {last_seq, Seq})
end,
put(last_seq, Seq),
throw(recurse);
-
process_change({last_seq, _}, _) ->
% This clause is unreachable today, but let's plan ahead for the future
% where we checkpoint against last_seq instead of the sequence of the last
diff --git a/src/couch_replicator/src/couch_replicator_connection.erl b/src/couch_replicator/src/couch_replicator_connection.erl
index 8082ef438..988353f22 100644
--- a/src/couch_replicator/src/couch_replicator_connection.erl
+++ b/src/couch_replicator/src/couch_replicator_connection.erl
@@ -20,18 +20,18 @@
]).
-export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_info/2,
+ handle_cast/2,
+ code_change/3
]).
-export([
- acquire/1,
- acquire/2,
- release/1
+ acquire/1,
+ acquire/2,
+ release/1
]).
-export([
@@ -45,7 +45,6 @@
-define(DEFAULT_CLOSE_INTERVAL, 90000).
-define(RELISTEN_DELAY, 5000).
-
-record(state, {
close_interval,
timer
@@ -60,40 +59,43 @@
mref
}).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
init([]) ->
process_flag(trap_exit, true),
- ?MODULE = ets:new(?MODULE, [named_table, public,
- {keypos, #connection.worker}]),
+ ?MODULE = ets:new(?MODULE, [
+ named_table,
+ public,
+ {keypos, #connection.worker}
+ ]),
ok = config:listen_for_changes(?MODULE, nil),
- Interval = config:get_integer("replicator", "connection_close_interval",
- ?DEFAULT_CLOSE_INTERVAL),
+ Interval = config:get_integer(
+ "replicator",
+ "connection_close_interval",
+ ?DEFAULT_CLOSE_INTERVAL
+ ),
Timer = erlang:send_after(Interval, self(), close_idle_connections),
ibrowse:add_config([
{inactivity_timeout, Interval},
{worker_trap_exits, false}
]),
- {ok, #state{close_interval=Interval, timer=Timer}}.
+ {ok, #state{close_interval = Interval, timer = Timer}}.
acquire(Url) ->
acquire(Url, undefined).
acquire(Url, ProxyUrl) when is_binary(Url) ->
acquire(binary_to_list(Url), ProxyUrl);
-
acquire(Url, ProxyUrl) when is_binary(ProxyUrl) ->
acquire(Url, binary_to_list(ProxyUrl));
-
acquire(Url0, ProxyUrl0) ->
Url = couch_util:url_strip_password(Url0),
- ProxyUrl = case ProxyUrl0 of
- undefined -> undefined;
- _ -> couch_util:url_strip_password(ProxyUrl0)
- end,
+ ProxyUrl =
+ case ProxyUrl0 of
+ undefined -> undefined;
+ _ -> couch_util:url_strip_password(ProxyUrl0)
+ end,
case gen_server:call(?MODULE, {acquire, Url, ProxyUrl}) of
{ok, Worker} ->
link(Worker),
@@ -106,28 +108,37 @@ acquire(Url0, ProxyUrl0) ->
{error, Reason}
end.
-
release(Worker) ->
unlink(Worker),
gen_server:cast(?MODULE, {release, Worker}).
-
handle_call({acquire, Url, ProxyUrl}, From, State) ->
{Pid, _Ref} = From,
case {ibrowse_lib:parse_url(Url), parse_proxy_url(ProxyUrl)} of
- {#url{host=Host, port=Port}, #url{host=ProxyHost, port=ProxyPort}} ->
+ {#url{host = Host, port = Port}, #url{host = ProxyHost, port = ProxyPort}} ->
Pat = #connection{
- host=Host, port=Port,
- proxy_host=ProxyHost, proxy_port=ProxyPort,
- mref=undefined, _='_'},
+ host = Host,
+ port = Port,
+ proxy_host = ProxyHost,
+ proxy_port = ProxyPort,
+ mref = undefined,
+ _ = '_'
+ },
case ets:match_object(?MODULE, Pat, 1) of
'$end_of_table' ->
{reply, {error, all_allocated}, State};
{[Worker], _Cont} ->
- couch_stats:increment_counter([couch_replicator, connection,
- acquires]),
- ets:insert(?MODULE, Worker#connection{mref=monitor(process,
- Pid)}),
+ couch_stats:increment_counter([
+ couch_replicator,
+ connection,
+ acquires
+ ]),
+ ets:insert(?MODULE, Worker#connection{
+ mref = monitor(
+ process,
+ Pid
+ )
+ }),
{reply, {ok, Worker#connection.worker}, State}
end;
{{error, invalid_uri}, _} ->
@@ -135,26 +146,30 @@ handle_call({acquire, Url, ProxyUrl}, From, State) ->
{_, {error, invalid_uri}} ->
{reply, {error, invalid_uri}, State}
end;
-
handle_call({create, Url, ProxyUrl, Worker}, From, State) ->
{Pid, _Ref} = From,
case {ibrowse_lib:parse_url(Url), parse_proxy_url(ProxyUrl)} of
- {#url{host=Host, port=Port}, #url{host=ProxyHost, port=ProxyPort}} ->
+ {#url{host = Host, port = Port}, #url{host = ProxyHost, port = ProxyPort}} ->
link(Worker),
- couch_stats:increment_counter([couch_replicator, connection,
- creates]),
+ couch_stats:increment_counter([
+ couch_replicator,
+ connection,
+ creates
+ ]),
true = ets:insert_new(
?MODULE,
#connection{
- host=Host, port=Port,
- proxy_host=ProxyHost, proxy_port=ProxyPort,
- worker=Worker,
- mref=monitor(process, Pid)}
+ host = Host,
+ port = Port,
+ proxy_host = ProxyHost,
+ proxy_port = ProxyPort,
+ worker = Worker,
+ mref = monitor(process, Pid)
+ }
),
{reply, ok, State}
end.
-
handle_cast({release, WorkerPid}, State) ->
couch_stats:increment_counter([couch_replicator, connection, releases]),
case ets:lookup(?MODULE, WorkerPid) of
@@ -163,39 +178,45 @@ handle_cast({release, WorkerPid}, State) ->
MRef when is_reference(MRef) -> demonitor(MRef, [flush]);
undefined -> ok
end,
- ets:insert(?MODULE, Worker#connection{mref=undefined});
+ ets:insert(?MODULE, Worker#connection{mref = undefined});
[] ->
ok
end,
{noreply, State};
-
handle_cast({connection_close_interval, V}, State) ->
erlang:cancel_timer(State#state.timer),
NewTimer = erlang:send_after(V, self(), close_idle_connections),
ibrowse:add_config([{inactivity_timeout, V}]),
- {noreply, State#state{close_interval=V, timer=NewTimer}}.
-
+ {noreply, State#state{close_interval = V, timer = NewTimer}}.
% owner crashed
handle_info({'DOWN', Ref, process, _Pid, _Reason}, State) ->
- couch_stats:increment_counter([couch_replicator, connection,
- owner_crashes]),
- Conns = ets:match_object(?MODULE, #connection{mref = Ref, _='_'}),
- lists:foreach(fun(Conn) ->
- couch_stats:increment_counter([couch_replicator, connection, closes]),
- delete_worker(Conn)
- end, Conns),
+ couch_stats:increment_counter([
+ couch_replicator,
+ connection,
+ owner_crashes
+ ]),
+ Conns = ets:match_object(?MODULE, #connection{mref = Ref, _ = '_'}),
+ lists:foreach(
+ fun(Conn) ->
+ couch_stats:increment_counter([couch_replicator, connection, closes]),
+ delete_worker(Conn)
+ end,
+ Conns
+ ),
{noreply, State};
-
% worker crashed
handle_info({'EXIT', Pid, Reason}, State) ->
- couch_stats:increment_counter([couch_replicator, connection,
- worker_crashes]),
+ couch_stats:increment_counter([
+ couch_replicator,
+ connection,
+ worker_crashes
+ ]),
case ets:lookup(?MODULE, Pid) of
[] ->
ok;
[Worker] ->
- #connection{host=Host, port=Port} = Worker,
+ #connection{host = Host, port = Port} = Worker,
maybe_log_worker_death(Host, Port, Reason),
case Worker#connection.mref of
MRef when is_reference(MRef) -> demonitor(MRef, [flush]);
@@ -204,37 +225,34 @@ handle_info({'EXIT', Pid, Reason}, State) ->
ets:delete(?MODULE, Pid)
end,
{noreply, State};
-
handle_info(close_idle_connections, State) ->
#state{
- close_interval=Interval,
- timer=Timer
+ close_interval = Interval,
+ timer = Timer
} = State,
- Conns = ets:match_object(?MODULE, #connection{mref=undefined, _='_'}),
- lists:foreach(fun(Conn) ->
- couch_stats:increment_counter([couch_replicator, connection, closes]),
- delete_worker(Conn)
- end, Conns),
+ Conns = ets:match_object(?MODULE, #connection{mref = undefined, _ = '_'}),
+ lists:foreach(
+ fun(Conn) ->
+ couch_stats:increment_counter([couch_replicator, connection, closes]),
+ delete_worker(Conn)
+ end,
+ Conns
+ ),
erlang:cancel_timer(Timer),
NewTimer = erlang:send_after(Interval, self(), close_idle_connections),
- {noreply, State#state{timer=NewTimer}};
-
+ {noreply, State#state{timer = NewTimer}};
handle_info(restart_config_listener, State) ->
ok = config:listen_for_changes(?MODULE, nil),
{noreply, State}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
terminate(_Reason, _State) ->
ok.
-
maybe_log_worker_death(_Host, _Port, normal) ->
ok;
-
maybe_log_worker_death(Host, Port, Reason) ->
?LOG_INFO(#{
what => connection_failed,
@@ -246,7 +264,6 @@ maybe_log_worker_death(Host, Port, Reason) ->
ErrMsg = "Replication connection to: ~p:~p died with reason ~p",
couch_log:info(ErrMsg, [Host, Port, Reason]).
-
-spec delete_worker(#connection{}) -> ok.
delete_worker(Worker) ->
ets:delete(?MODULE, Worker#connection.worker),
@@ -254,25 +271,19 @@ delete_worker(Worker) ->
spawn(fun() -> ibrowse_http_client:stop(Worker#connection.worker) end),
ok.
-
handle_config_change("replicator", "connection_close_interval", V, _, S) ->
- ok = gen_server:cast(?MODULE, {connection_close_interval,
- list_to_integer(V)}),
+ ok = gen_server:cast(?MODULE, {connection_close_interval, list_to_integer(V)}),
{ok, S};
-
handle_config_change(_, _, _, _, S) ->
{ok, S}.
-
handle_config_terminate(_, stop, _) ->
ok;
-
handle_config_terminate(_, _, _) ->
Pid = whereis(?MODULE),
erlang:send_after(?RELISTEN_DELAY, Pid, restart_config_listener).
-
parse_proxy_url(undefined) ->
- #url{host=undefined, port=undefined};
+ #url{host = undefined, port = undefined};
parse_proxy_url(ProxyUrl) ->
ibrowse_lib:parse_url(ProxyUrl).
diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl
index d70ad1cc0..a4c4ccc00 100644
--- a/src/couch_replicator/src/couch_replicator_docs.erl
+++ b/src/couch_replicator/src/couch_replicator_docs.erl
@@ -20,21 +20,17 @@
after_doc_read/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-include("couch_replicator.hrl").
-include_lib("kernel/include/logger.hrl").
-
-define(OWNER, <<"owner">>).
--define(CTX, {user_ctx, #user_ctx{roles=[<<"_admin">>, <<"_replicator">>]}}).
+-define(CTX, {user_ctx, #user_ctx{roles = [<<"_admin">>, <<"_replicator">>]}}).
-define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
-
remove_state_fields(null, null, null) ->
ok;
-
remove_state_fields(DbName, DbUUID, DocId) ->
update_rep_doc(DbName, DbUUID, DocId, [
{?REPLICATION_STATE, undefined},
@@ -45,27 +41,26 @@ remove_state_fields(DbName, DbUUID, DocId) ->
]),
ok.
-
-spec update_completed(binary(), binary(), binary(), [_]) -> ok.
update_completed(null, null, _, _) ->
ok;
-
update_completed(DbName, DbUUID, DocId, #{} = Stats0) ->
Stats = {maps:to_list(Stats0)},
update_rep_doc(DbName, DbUUID, DocId, [
{?REPLICATION_STATE, ?ST_COMPLETED},
{?REPLICATION_STATE_REASON, undefined},
- {?REPLICATION_STATS, Stats}]),
- couch_stats:increment_counter([couch_replicator, docs,
+ {?REPLICATION_STATS, Stats}
+ ]),
+ couch_stats:increment_counter([
+ couch_replicator,
+ docs,
completed_state_updates
]),
ok.
-
-spec update_failed(binary(), binary(), binary(), any()) -> ok.
update_failed(null, null, null, _) ->
ok;
-
update_failed(DbName, DbUUID, DocId, Error) ->
Reason = error_reason(Error),
?LOG_ERROR(#{
@@ -75,88 +70,111 @@ update_failed(DbName, DbUUID, DocId, Error) ->
replicator_doc => DocId,
details => Reason
}),
- couch_log:error("Error processing replication doc `~s` from `~s`: ~s",
- [DocId, DbName, Reason]),
+ couch_log:error(
+ "Error processing replication doc `~s` from `~s`: ~s",
+ [DocId, DbName, Reason]
+ ),
update_rep_doc(DbName, DbUUID, DocId, [
{?REPLICATION_STATE, ?ST_FAILED},
{?REPLICATION_STATS, undefined},
{?REPLICATION_STATE_REASON, Reason}
]),
- couch_stats:increment_counter([couch_replicator, docs,
- failed_state_updates]),
+ couch_stats:increment_counter([
+ couch_replicator,
+ docs,
+ failed_state_updates
+ ]),
ok.
-
--spec before_doc_update(#doc{}, Db::any(), couch_db:update_type()) -> #doc{}.
+-spec before_doc_update(#doc{}, Db :: any(), couch_db:update_type()) -> #doc{}.
before_doc_update(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, _, _) ->
Doc;
before_doc_update(#doc{body = {Body}} = Doc, Db, _UpdateType) ->
#user_ctx{roles = Roles, name = Name} = fabric2_db:get_user_ctx(Db),
IsReplicator = lists:member(<<"_replicator">>, Roles),
- Doc1 = case IsReplicator of true -> Doc; false ->
- case couch_util:get_value(?OWNER, Body) of
- undefined ->
- Doc#doc{body = {?replace(Body, ?OWNER, Name)}};
- Name ->
+ Doc1 =
+ case IsReplicator of
+ true ->
Doc;
- Other ->
- case (catch fabric2_db:check_is_admin(Db)) of
- ok when Other =:= null ->
+ false ->
+ case couch_util:get_value(?OWNER, Body) of
+ undefined ->
Doc#doc{body = {?replace(Body, ?OWNER, Name)}};
- ok ->
+ Name ->
Doc;
- _ ->
- throw({forbidden, <<"Can't update replication",
- "documents from other users.">>})
+ Other ->
+ case (catch fabric2_db:check_is_admin(Db)) of
+ ok when Other =:= null ->
+ Doc#doc{body = {?replace(Body, ?OWNER, Name)}};
+ ok ->
+ Doc;
+ _ ->
+ throw(
+ {forbidden,
+ <<"Can't update replication",
+ "documents from other users.">>}
+ )
+ end
end
- end
- end,
+ end,
Deleted = Doc1#doc.deleted,
IsFailed = couch_util:get_value(?REPLICATION_STATE, Body) == ?ST_FAILED,
- case IsReplicator orelse Deleted orelse IsFailed of true -> ok; false ->
- try
- couch_replicator_parse:parse_rep_doc(Doc1#doc.body)
- catch
- throw:{bad_rep_doc, Error} ->
- throw({forbidden, Error})
- end
+ case IsReplicator orelse Deleted orelse IsFailed of
+ true ->
+ ok;
+ false ->
+ try
+ couch_replicator_parse:parse_rep_doc(Doc1#doc.body)
+ catch
+ throw:{bad_rep_doc, Error} ->
+ throw({forbidden, Error})
+ end
end,
Doc1.
-
--spec after_doc_read(#doc{}, Db::any()) -> #doc{}.
+-spec after_doc_read(#doc{}, Db :: any()) -> #doc{}.
after_doc_read(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, _Db) ->
Doc;
after_doc_read(#doc{body = {Body}} = Doc, Db) ->
#user_ctx{name = Name} = fabric2_db:get_user_ctx(Db),
- case (catch fabric2_db:check_is_admin(Db)) of ok -> Doc; _ ->
- case couch_util:get_value(?OWNER, Body) of Name -> Doc; _ ->
- Source0 = couch_util:get_value(<<"source">>, Body),
- Target0 = couch_util:get_value(<<"target">>, Body),
- Source = strip_credentials(Source0),
- Target = strip_credentials(Target0),
- NewBody0 = ?replace(Body, <<"source">>, Source),
- NewBody = ?replace(NewBody0, <<"target">>, Target),
- #doc{revs = {Pos, [_ | Revs]}} = Doc,
- NewDoc = Doc#doc{body = {NewBody}, revs = {Pos - 1, Revs}},
- fabric2_db:new_revid(Db, NewDoc)
- end
+ case (catch fabric2_db:check_is_admin(Db)) of
+ ok ->
+ Doc;
+ _ ->
+ case couch_util:get_value(?OWNER, Body) of
+ Name ->
+ Doc;
+ _ ->
+ Source0 = couch_util:get_value(<<"source">>, Body),
+ Target0 = couch_util:get_value(<<"target">>, Body),
+ Source = strip_credentials(Source0),
+ Target = strip_credentials(Target0),
+ NewBody0 = ?replace(Body, <<"source">>, Source),
+ NewBody = ?replace(NewBody0, <<"target">>, Target),
+ #doc{revs = {Pos, [_ | Revs]}} = Doc,
+ NewDoc = Doc#doc{body = {NewBody}, revs = {Pos - 1, Revs}},
+ fabric2_db:new_revid(Db, NewDoc)
+ end
end.
-
update_rep_doc(RepDbName, RepDbUUID, RepDocId, KVs) ->
update_rep_doc(RepDbName, RepDbUUID, RepDocId, KVs, 1).
-
-update_rep_doc(RepDbName, RepDbUUID, RepDocId, KVs, Wait)
- when is_binary(RepDocId) ->
+update_rep_doc(RepDbName, RepDbUUID, RepDocId, KVs, Wait) when
+ is_binary(RepDocId)
+->
try
case open_rep_doc(RepDbName, RepDbUUID, RepDocId) of
{ok, LastRepDoc} ->
- update_rep_doc(RepDbName, RepDbUUID, LastRepDoc, KVs,
- Wait * 2);
+ update_rep_doc(
+ RepDbName,
+ RepDbUUID,
+ LastRepDoc,
+ KVs,
+ Wait * 2
+ );
_ ->
ok
end
@@ -175,38 +193,45 @@ update_rep_doc(RepDbName, RepDbUUID, RepDocId, KVs, Wait)
ok = timer:sleep(Delay),
update_rep_doc(RepDbName, RepDbUUID, RepDocId, KVs, Wait * 2)
end;
-
update_rep_doc(RepDbName, RepDbUUID, #doc{body = {RepDocBody}} = RepDoc, KVs, _Try) ->
NewRepDocBody = lists:foldl(
- fun({K, undefined}, Body) when is_binary(K) ->
+ fun
+ ({K, undefined}, Body) when is_binary(K) ->
lists:keydelete(K, 1, Body);
- ({?REPLICATION_STATE = K, State} = KV, Body) when is_binary(K) ->
+ ({?REPLICATION_STATE = K, State} = KV, Body) when is_binary(K) ->
case couch_util:get_value(K, Body) of
- State ->
- Body;
- _ ->
- Body1 = lists:keystore(K, 1, Body, KV),
- Timestamp = couch_replicator_utils:iso8601(),
- lists:keystore(
- ?REPLICATION_STATE_TIME, 1, Body1,
- {?REPLICATION_STATE_TIME, Timestamp})
+ State ->
+ Body;
+ _ ->
+ Body1 = lists:keystore(K, 1, Body, KV),
+ Timestamp = couch_replicator_utils:iso8601(),
+ lists:keystore(
+ ?REPLICATION_STATE_TIME,
+ 1,
+ Body1,
+ {?REPLICATION_STATE_TIME, Timestamp}
+ )
end;
({K, _V} = KV, Body) when is_binary(K) ->
lists:keystore(K, 1, Body, KV)
end,
- RepDocBody, KVs),
+ RepDocBody,
+ KVs
+ ),
case NewRepDocBody of
- RepDocBody ->
- ok;
- _ ->
- % Might not succeed - when the replication doc is deleted right
- % before this update (not an error, ignore).
- save_rep_doc(RepDbName, RepDbUUID, RepDoc#doc{body = {NewRepDocBody}})
+ RepDocBody ->
+ ok;
+ _ ->
+ % Might not succeed - when the replication doc is deleted right
+ % before this update (not an error, ignore).
+ save_rep_doc(RepDbName, RepDbUUID, RepDoc#doc{body = {NewRepDocBody}})
end.
-
-open_rep_doc(DbName, DbUUID, DocId) when is_binary(DbName), is_binary(DbUUID),
- is_binary(DocId) ->
+open_rep_doc(DbName, DbUUID, DocId) when
+ is_binary(DbName),
+ is_binary(DbUUID),
+ is_binary(DocId)
+->
try
case fabric2_db:open(DbName, [?CTX, sys_db, {uuid, DbUUID}]) of
{ok, Db} -> fabric2_db:open_doc(Db, DocId, [ejson_body]);
@@ -217,7 +242,6 @@ open_rep_doc(DbName, DbUUID, DocId) when is_binary(DbName), is_binary(DbUUID),
{not_found, database_does_not_exist}
end.
-
save_rep_doc(DbName, DbUUID, Doc) when is_binary(DbName), is_binary(DbUUID) ->
try
{ok, Db} = fabric2_db:open(DbName, [?CTX, sys_db, {uuid, DbUUID}]),
@@ -241,17 +265,19 @@ save_rep_doc(DbName, DbUUID, Doc) when is_binary(DbName), is_binary(DbUUID) ->
{ok, forbidden}
end.
-
--spec strip_credentials(undefined) -> undefined;
+-spec strip_credentials
+ (undefined) -> undefined;
(binary()) -> binary();
({[_]}) -> {[_]}.
strip_credentials(undefined) ->
undefined;
strip_credentials(Url) when is_binary(Url) ->
- re:replace(Url,
+ re:replace(
+ Url,
"http(s)?://(?:[^:]+):[^@]+@(.*)$",
"http\\1://\\2",
- [{return, binary}]);
+ [{return, binary}]
+ );
strip_credentials({Props0}) ->
Props1 = lists:keydelete(<<"headers">>, 1, Props0),
% Strip "auth" just like headers, for replication plugins it can be a place
@@ -259,16 +285,17 @@ strip_credentials({Props0}) ->
Props2 = lists:keydelete(<<"auth">>, 1, Props1),
{Props2}.
-
error_reason({shutdown, Error}) ->
error_reason(Error);
error_reason({bad_rep_doc, Reason}) ->
couch_util:to_binary(Reason);
-error_reason(#{<<"error">> := Error, <<"reason">> := Reason})
- when is_binary(Error), is_binary(Reason) ->
+error_reason(#{<<"error">> := Error, <<"reason">> := Reason}) when
+ is_binary(Error), is_binary(Reason)
+->
couch_util:to_binary(io_list:format("~s: ~s", [Error, Reason]));
-error_reason({error, {Error, Reason}})
- when is_atom(Error), is_binary(Reason) ->
+error_reason({error, {Error, Reason}}) when
+ is_atom(Error), is_binary(Reason)
+->
couch_util:to_binary(io_lib:format("~s: ~s", [Error, Reason]));
error_reason({error, Reason}) ->
couch_util:to_binary(Reason);
diff --git a/src/couch_replicator/src/couch_replicator_epi.erl b/src/couch_replicator/src/couch_replicator_epi.erl
index 9fb1790b5..760ebd7db 100644
--- a/src/couch_replicator/src/couch_replicator_epi.erl
+++ b/src/couch_replicator/src/couch_replicator_epi.erl
@@ -10,13 +10,10 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(couch_replicator_epi).
-
-behaviour(couch_epi_plugin).
-
-export([
app/0,
providers/0,
@@ -27,32 +24,25 @@
notify/3
]).
-
app() ->
couch_replicator.
-
providers() ->
[
{fabric2_db, couch_replicator_fabric2_plugin}
].
-
services() ->
[].
-
data_subscriptions() ->
[].
-
data_providers() ->
[].
-
processes() ->
[].
-
notify(_Key, _Old, _New) ->
ok.
diff --git a/src/couch_replicator/src/couch_replicator_fabric2_plugin.erl b/src/couch_replicator/src/couch_replicator_fabric2_plugin.erl
index 7bf614512..5ba78e038 100644
--- a/src/couch_replicator/src/couch_replicator_fabric2_plugin.erl
+++ b/src/couch_replicator/src/couch_replicator_fabric2_plugin.erl
@@ -10,27 +10,22 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(couch_replicator_fabric2_plugin).
-
-export([
after_db_create/2,
after_db_delete/2,
after_doc_write/6
]).
-
after_db_create(DbName, DbUUID) ->
couch_replicator:after_db_create(DbName, DbUUID),
[DbName, DbUUID].
-
after_db_delete(DbName, DbUUID) ->
couch_replicator:after_db_delete(DbName, DbUUID),
[DbName, DbUUID].
-
-after_doc_write(Db, Doc, Winner, OldWinner, RevId, Seq)->
+after_doc_write(Db, Doc, Winner, OldWinner, RevId, Seq) ->
couch_replicator:after_doc_write(Db, Doc, Winner, OldWinner, RevId, Seq),
[Db, Doc, Winner, OldWinner, RevId, Seq].
diff --git a/src/couch_replicator/src/couch_replicator_filters.erl b/src/couch_replicator/src/couch_replicator_filters.erl
index 1cadce1dd..ec9e1bdf7 100644
--- a/src/couch_replicator/src/couch_replicator_filters.erl
+++ b/src/couch_replicator/src/couch_replicator_filters.erl
@@ -22,19 +22,18 @@
-include_lib("couch/include/couch_db.hrl").
-include("couch_replicator.hrl").
-
% Parse the filter from replication options proplist.
% Return {ok, {FilterType,...}} | {error, ParseError}.
% For `user` filter, i.e. filters specified as user code
% in source database, this code doesn't fetch the filter
% code, but only returns the name of the filter.
-spec parse(#{}) ->
- {ok, nil} |
- {ok, {view, binary(), {[_]}}} |
- {ok, {user, {binary(), binary()}, {[_]}}} |
- {ok, {docids, [_]}} |
- {ok, {mango, {[_]}}} |
- {error, binary()}.
+ {ok, nil}
+ | {ok, {view, binary(), {[_]}}}
+ | {ok, {user, {binary(), binary()}, {[_]}}}
+ | {ok, {docids, [_]}}
+ | {ok, {mango, {[_]}}}
+ | {error, binary()}.
parse(#{} = Options) ->
Filter = maps:get(<<"filter">>, Options, undefined),
DocIds = maps:get(<<"doc_ids">>, Options, undefined),
@@ -63,7 +62,6 @@ parse(#{} = Options) ->
{error, list_to_binary(Err)}
end.
-
% Fetches body of filter function from source database. Guaranteed to either
% return {ok, Body} or an {error, Reason}. Also assume this function might
% block due to network / socket issues for an undeterminted amount of time.
@@ -90,7 +88,6 @@ fetch(DDocName, FilterName, Source) ->
{error, couch_util:to_binary(Reason)}
end.
-
% Get replication type and view (if any) from replication document props
-spec view_type(#{}, #{}) ->
{binary(), #{}} | {error, binary()}.
@@ -100,68 +97,80 @@ view_type(#{?FILTER := <<"_view">>}, #{} = Options) ->
case re:split(ViewParam, <<"/">>) of
[DName, ViewName] ->
DDocMap = #{
- <<"ddoc">> => <<"_design/",DName/binary>>,
+ <<"ddoc">> => <<"_design/", DName/binary>>,
<<"view">> => ViewName
},
{<<"view">>, DDocMap};
_ ->
{error, <<"Invalid `view` parameter.">>}
end;
-
view_type(#{}, #{}) ->
{<<"db">>, #{}}.
-
% Private functions
fetch_internal(DDocName, FilterName, Source) ->
- Db = case (catch couch_replicator_api_wrap:db_open(Source)) of
- {ok, Db0} ->
- Db0;
- DbError ->
- DbErrorMsg = io_lib:format("Could not open source database `~s`: ~s",
- [couch_replicator_api_wrap:db_uri(Source),
- couch_util:to_binary(DbError)]),
- throw({fetch_error, iolist_to_binary(DbErrorMsg)})
- end,
- try
- Body = case (catch couch_replicator_api_wrap:open_doc(
- Db, <<"_design/", DDocName/binary>>, [ejson_body])) of
- {ok, #doc{body = Body0}} ->
- Body0;
- DocError ->
- DocErrorMsg = io_lib:format(
- "Couldn't open document `_design/~s` from source "
- "database `~s`: ~s", [DDocName,
- couch_replicator_api_wrap:db_uri(Source),
- couch_util:to_binary(DocError)]
- ),
- throw({fetch_error, iolist_to_binary(DocErrorMsg)})
+ Db =
+ case (catch couch_replicator_api_wrap:db_open(Source)) of
+ {ok, Db0} ->
+ Db0;
+ DbError ->
+ DbErrorMsg = io_lib:format(
+ "Could not open source database `~s`: ~s",
+ [
+ couch_replicator_api_wrap:db_uri(Source),
+ couch_util:to_binary(DbError)
+ ]
+ ),
+ throw({fetch_error, iolist_to_binary(DbErrorMsg)})
end,
+ try
+ Body =
+ case
+ (catch couch_replicator_api_wrap:open_doc(
+ Db, <<"_design/", DDocName/binary>>, [ejson_body]
+ ))
+ of
+ {ok, #doc{body = Body0}} ->
+ Body0;
+ DocError ->
+ DocErrorMsg = io_lib:format(
+ "Couldn't open document `_design/~s` from source "
+ "database `~s`: ~s",
+ [
+ DDocName,
+ couch_replicator_api_wrap:db_uri(Source),
+ couch_util:to_binary(DocError)
+ ]
+ ),
+ throw({fetch_error, iolist_to_binary(DocErrorMsg)})
+ end,
try
Code = couch_util:get_nested_json_value(
- Body, [<<"filters">>, FilterName]),
+ Body, [<<"filters">>, FilterName]
+ ),
re:replace(Code, [$^, "\s*(.*?)\s*", $$], "\\1", [{return, binary}])
- catch
- _Tag:CodeError ->
- CodeErrorMsg = io_lib:format(
- "Couldn't parse filter code from document ~s on `~s` "
- " Error: ~s", [DDocName,
- couch_replicator_api_wrap:db_uri(Source),
- couch_util:to_binary(CodeError)]
- ),
- throw({fetch_error, iolist_to_binary(CodeErrorMsg)})
- end
+ catch
+ _Tag:CodeError ->
+ CodeErrorMsg = io_lib:format(
+ "Couldn't parse filter code from document ~s on `~s` "
+ " Error: ~s",
+ [
+ DDocName,
+ couch_replicator_api_wrap:db_uri(Source),
+ couch_util:to_binary(CodeError)
+ ]
+ ),
+ throw({fetch_error, iolist_to_binary(CodeErrorMsg)})
+ end
after
couch_replicator_api_wrap:db_close(Db)
end.
-
-spec query_params(#{}) -> #{}.
-query_params(#{} = Options)->
+query_params(#{} = Options) ->
maps:get(<<"query_params">>, Options, #{}).
-
parse_user_filter(Filter) ->
case re:run(Filter, "(.*?)/(.*)", [{capture, [1, 2], binary}]) of
{match, [DDocName0, FilterName0]} ->
@@ -170,31 +179,27 @@ parse_user_filter(Filter) ->
{error, <<"Invalid filter. Must match `ddocname/filtername`.">>}
end.
-
% Sort an EJSON object's properties to attempt
% to generate a unique representation. This is used
% to reduce the chance of getting different
% replication checkpoints for the same Mango selector
-ejsort({V})->
+ejsort({V}) ->
ejsort_props(V, []);
ejsort(V) when is_list(V) ->
ejsort_array(V, []);
ejsort(V) ->
V.
-
-ejsort_props([], Acc)->
+ejsort_props([], Acc) ->
{lists:keysort(1, Acc)};
-ejsort_props([{K, V}| R], Acc) ->
+ejsort_props([{K, V} | R], Acc) ->
ejsort_props(R, [{K, ejsort(V)} | Acc]).
-
-ejsort_array([], Acc)->
+ejsort_array([], Acc) ->
lists:reverse(Acc);
ejsort_array([V | R], Acc) ->
ejsort_array(R, [ejsort(V) | Acc]).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -206,14 +211,15 @@ ejsort_basic_values_test() ->
?assertEqual(ejsort([]), []),
?assertEqual(ejsort({[]}), {[]}).
-
ejsort_compound_values_test() ->
?assertEqual(ejsort([2, 1, 3, <<"a">>]), [2, 1, 3, <<"a">>]),
- Ej1 = {[{<<"a">>, 0}, {<<"c">>, 0}, {<<"b">>, 0}]},
- Ej1s = {[{<<"a">>, 0}, {<<"b">>, 0}, {<<"c">>, 0}]},
+ Ej1 = {[{<<"a">>, 0}, {<<"c">>, 0}, {<<"b">>, 0}]},
+ Ej1s = {[{<<"a">>, 0}, {<<"b">>, 0}, {<<"c">>, 0}]},
?assertEqual(ejsort(Ej1), Ej1s),
Ej2 = {[{<<"x">>, Ej1}, {<<"z">>, Ej1}, {<<"y">>, [Ej1, Ej1]}]},
- ?assertEqual(ejsort(Ej2),
- {[{<<"x">>, Ej1s}, {<<"y">>, [Ej1s, Ej1s]}, {<<"z">>, Ej1s}]}).
+ ?assertEqual(
+ ejsort(Ej2),
+ {[{<<"x">>, Ej1s}, {<<"y">>, [Ej1s, Ej1s]}, {<<"z">>, Ej1s}]}
+ ).
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_httpc.erl b/src/couch_replicator/src/couch_replicator_httpc.erl
index 59a79cf93..9fd79a39f 100644
--- a/src/couch_replicator/src/couch_replicator_httpc.erl
+++ b/src/couch_replicator/src/couch_replicator_httpc.erl
@@ -40,7 +40,6 @@
% where we may end up processing an unbounded number of messages.
-define(MAX_DISCARDED_MESSAGES, 16).
-
setup(Db) ->
#httpdb{
httpc_pool = nil,
@@ -48,8 +47,11 @@ setup(Db) ->
http_connections = MaxConns,
proxy_url = ProxyUrl
} = Db,
- {ok, Pid} = couch_replicator_httpc_pool:start_link(Url, ProxyUrl,
- [{max_connections, MaxConns}]),
+ {ok, Pid} = couch_replicator_httpc_pool:start_link(
+ Url,
+ ProxyUrl,
+ [{max_connections, MaxConns}]
+ ),
case couch_replicator_auth:initialize(Db#httpdb{httpc_pool = Pid}) of
{ok, Db1} ->
{ok, Db1};
@@ -66,42 +68,47 @@ setup(Db) ->
throw({replication_auth_error, Error})
end.
-
send_req(HttpDb, Params1, Callback) ->
put(?STREAM_STATUS, init),
couch_stats:increment_counter([couch_replicator, requests]),
- Params2 = ?replace(Params1, qs,
- [{K, ?b2l(iolist_to_binary(V))} || {K, V} <- get_value(qs, Params1, [])]),
- Params = ?replace(Params2, ibrowse_options,
- lists:keysort(1, get_value(ibrowse_options, Params2, []))),
+ Params2 = ?replace(
+ Params1,
+ qs,
+ [{K, ?b2l(iolist_to_binary(V))} || {K, V} <- get_value(qs, Params1, [])]
+ ),
+ Params = ?replace(
+ Params2,
+ ibrowse_options,
+ lists:keysort(1, get_value(ibrowse_options, Params2, []))
+ ),
{Worker, Response, HttpDb1} = send_ibrowse_req(HttpDb, Params),
- Ret = try
- process_response(Response, Worker, HttpDb1, Params, Callback)
- catch
- throw:{retry, NewHttpDb0, NewParams0} ->
- {retry, NewHttpDb0, NewParams0}
- after
- Pool = HttpDb1#httpdb.httpc_pool,
- case get(?STOP_HTTP_WORKER) of
- stop ->
- ok = stop_and_release_worker(Pool, Worker),
- erase(?STOP_HTTP_WORKER);
- undefined ->
- ok = couch_replicator_httpc_pool:release_worker(Pool, Worker)
+ Ret =
+ try
+ process_response(Response, Worker, HttpDb1, Params, Callback)
+ catch
+ throw:{retry, NewHttpDb0, NewParams0} ->
+ {retry, NewHttpDb0, NewParams0}
+ after
+ Pool = HttpDb1#httpdb.httpc_pool,
+ case get(?STOP_HTTP_WORKER) of
+ stop ->
+ ok = stop_and_release_worker(Pool, Worker),
+ erase(?STOP_HTTP_WORKER);
+ undefined ->
+ ok = couch_replicator_httpc_pool:release_worker(Pool, Worker)
+ end,
+ clean_mailbox(Response)
end,
- clean_mailbox(Response)
- end,
% This is necessary to keep this tail-recursive. Calling
% send_req in the catch clause would turn it into a body
% recursive call accidentally.
case Ret of
- {retry, #httpdb{}=NewHttpDb, NewParams} ->
+ {retry, #httpdb{} = NewHttpDb, NewParams} ->
send_req(NewHttpDb, NewParams, Callback);
_ ->
Ret
end.
-
send_ibrowse_req(#httpdb{headers = BaseHeaders} = HttpDb0, Params) ->
Method = get_value(method, Params, get),
UserHeaders = get_value(headers, Params, []),
@@ -110,32 +117,40 @@ send_ibrowse_req(#httpdb{headers = BaseHeaders} = HttpDb0, Params) ->
Url = full_url(HttpDb, Params),
Body = get_value(body, Params, []),
case get_value(path, Params) == "_changes" of
- true ->
- Timeout = infinity;
- false ->
- Timeout = case config:get("replicator", "request_timeout", "infinity") of
- "infinity" -> infinity;
- Milliseconds -> list_to_integer(Milliseconds)
- end
+ true ->
+ Timeout = infinity;
+ false ->
+ Timeout =
+ case config:get("replicator", "request_timeout", "infinity") of
+ "infinity" -> infinity;
+ Milliseconds -> list_to_integer(Milliseconds)
+ end
end,
{ok, Worker} = couch_replicator_httpc_pool:get_worker(HttpDb#httpdb.httpc_pool),
- BasicAuthOpts = case couch_replicator_utils:get_basic_auth_creds(HttpDb) of
- {undefined, undefined} ->
- [];
- {User, Pass} when is_list(User), is_list(Pass) ->
- [{basic_auth, {User, Pass}}]
- end,
- IbrowseOptions = BasicAuthOpts ++ [
- {response_format, binary}, {inactivity_timeout, HttpDb#httpdb.timeout} |
- lists:ukeymerge(1, get_value(ibrowse_options, Params, []),
- HttpDb#httpdb.ibrowse_options)
- ],
+ BasicAuthOpts =
+ case couch_replicator_utils:get_basic_auth_creds(HttpDb) of
+ {undefined, undefined} ->
+ [];
+ {User, Pass} when is_list(User), is_list(Pass) ->
+ [{basic_auth, {User, Pass}}]
+ end,
+ IbrowseOptions =
+ BasicAuthOpts ++
+ [
+ {response_format, binary},
+ {inactivity_timeout, HttpDb#httpdb.timeout}
+ | lists:ukeymerge(
+ 1,
+ get_value(ibrowse_options, Params, []),
+ HttpDb#httpdb.ibrowse_options
+ )
+ ],
backoff_before_request(Worker, HttpDb, Params),
Response = ibrowse:send_req_direct(
- Worker, Url, Headers2, Method, Body, IbrowseOptions, Timeout),
+ Worker, Url, Headers2, Method, Body, IbrowseOptions, Timeout
+ ),
{Worker, Response, HttpDb}.
-
%% Stop worker, wait for it to die, then release it. Make sure it is dead before
%% releasing it to the pool, so there is not race triggered recycling it again.
%% The reason is recycling a dying worker, could end up that worker returning
@@ -153,8 +168,6 @@ stop_and_release_worker(Pool, Worker) ->
process_response({error, sel_conn_closed}, Worker, HttpDb, Params, _Cb) ->
put(?STOP_HTTP_WORKER, stop),
maybe_retry(sel_conn_closed, Worker, HttpDb, Params);
-
-
%% This clause handles un-expected connection closing during pipelined requests.
%% For example, if server responds to a request, sets Connection: close header
%% and closes the socket, ibrowse will detect that error when it sends
@@ -162,78 +175,85 @@ process_response({error, sel_conn_closed}, Worker, HttpDb, Params, _Cb) ->
process_response({error, connection_closing}, Worker, HttpDb, Params, _Cb) ->
put(?STOP_HTTP_WORKER, stop),
maybe_retry({error, connection_closing}, Worker, HttpDb, Params);
-
process_response({ibrowse_req_id, ReqId}, Worker, HttpDb, Params, Callback) ->
process_stream_response(ReqId, Worker, HttpDb, Params, Callback);
-
process_response({ok, Code, Headers, Body}, Worker, HttpDb, Params, Callback) ->
case list_to_integer(Code) of
- R when R =:= 301 ; R =:= 302 ; R =:= 303 ->
- backoff_success(HttpDb, Params),
- do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
- 429 ->
- backoff(HttpDb, Params);
- Error when Error =:= 408 ; Error >= 500 ->
- couch_stats:increment_counter([couch_replicator, responses, failure]),
- maybe_retry({code, Error}, Worker, HttpDb, Params);
- Ok when Ok >= 200 , Ok < 500 ->
- backoff_success(HttpDb, Params),
- couch_stats:increment_counter([couch_replicator, responses, success]),
- EJson = case Body of
- <<>> ->
- null;
- Json ->
- ?JSON_DECODE(Json)
- end,
- process_auth_response(HttpDb, Ok, Headers, Params),
- if Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop); true -> ok end,
- Callback(Ok, Headers, EJson)
+ R when R =:= 301; R =:= 302; R =:= 303 ->
+ backoff_success(HttpDb, Params),
+ do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
+ 429 ->
+ backoff(HttpDb, Params);
+ Error when Error =:= 408; Error >= 500 ->
+ couch_stats:increment_counter([couch_replicator, responses, failure]),
+ maybe_retry({code, Error}, Worker, HttpDb, Params);
+ Ok when Ok >= 200, Ok < 500 ->
+ backoff_success(HttpDb, Params),
+ couch_stats:increment_counter([couch_replicator, responses, success]),
+ EJson =
+ case Body of
+ <<>> ->
+ null;
+ Json ->
+ ?JSON_DECODE(Json)
+ end,
+ process_auth_response(HttpDb, Ok, Headers, Params),
+ if
+ Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop);
+ true -> ok
+ end,
+ Callback(Ok, Headers, EJson)
end;
-
process_response(Error, Worker, HttpDb, Params, _Callback) ->
maybe_retry(Error, Worker, HttpDb, Params).
-
process_stream_response(ReqId, Worker, HttpDb, Params, Callback) ->
receive
- {ibrowse_async_headers, ReqId, Code, Headers} ->
- case list_to_integer(Code) of
- R when R =:= 301 ; R =:= 302 ; R =:= 303 ->
- backoff_success(HttpDb, Params),
- do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
- 429 ->
- Timeout = couch_replicator_rate_limiter:max_interval(),
- backoff(HttpDb#httpdb{timeout = Timeout}, Params);
- Error when Error =:= 408 ; Error >= 500 ->
+ {ibrowse_async_headers, ReqId, Code, Headers} ->
+ case list_to_integer(Code) of
+ R when R =:= 301; R =:= 302; R =:= 303 ->
+ backoff_success(HttpDb, Params),
+ do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
+ 429 ->
+ Timeout = couch_replicator_rate_limiter:max_interval(),
+ backoff(HttpDb#httpdb{timeout = Timeout}, Params);
+ Error when Error =:= 408; Error >= 500 ->
+ couch_stats:increment_counter(
+ [couch_replicator, stream_responses, failure]
+ ),
+ report_error(Worker, HttpDb, Params, {code, Error});
+ Ok when Ok >= 200, Ok < 500 ->
+ backoff_success(HttpDb, Params),
+ HttpDb1 = process_auth_response(HttpDb, Ok, Headers, Params),
+ StreamDataFun = fun() ->
+ stream_data_self(HttpDb1, Params, Worker, ReqId, Callback)
+ end,
+ put(?STREAM_STATUS, {streaming, Worker}),
+ if
+ Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop);
+ true -> ok
+ end,
+ ibrowse:stream_next(ReqId),
+ try
+ Ret = Callback(Ok, Headers, StreamDataFun),
+ Ret
+ catch
+ throw:{maybe_retry_req, connection_closed} ->
+ maybe_retry(
+ {connection_closed, mid_stream},
+ Worker,
+ HttpDb1,
+ Params
+ );
+ throw:{maybe_retry_req, Err} ->
+ maybe_retry(Err, Worker, HttpDb1, Params)
+ end
+ end;
+ {ibrowse_async_response, ReqId, {error, _} = Error} ->
couch_stats:increment_counter(
[couch_replicator, stream_responses, failure]
),
- report_error(Worker, HttpDb, Params, {code, Error});
- Ok when Ok >= 200 , Ok < 500 ->
- backoff_success(HttpDb, Params),
- HttpDb1 = process_auth_response(HttpDb, Ok, Headers, Params),
- StreamDataFun = fun() ->
- stream_data_self(HttpDb1, Params, Worker, ReqId, Callback)
- end,
- put(?STREAM_STATUS, {streaming, Worker}),
- if Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop); true -> ok end,
- ibrowse:stream_next(ReqId),
- try
- Ret = Callback(Ok, Headers, StreamDataFun),
- Ret
- catch
- throw:{maybe_retry_req, connection_closed} ->
- maybe_retry({connection_closed, mid_stream},
- Worker, HttpDb1, Params);
- throw:{maybe_retry_req, Err} ->
- maybe_retry(Err, Worker, HttpDb1, Params)
- end
- end;
- {ibrowse_async_response, ReqId, {error, _} = Error} ->
- couch_stats:increment_counter(
- [couch_replicator, stream_responses, failure]
- ),
- maybe_retry(Error, Worker, HttpDb, Params)
+ maybe_retry(Error, Worker, HttpDb, Params)
after HttpDb#httpdb.timeout + 500 ->
% Note: ibrowse should always reply with timeouts, but this doesn't
% seem to be always true when there's a very high rate of requests
@@ -241,7 +261,6 @@ process_stream_response(ReqId, Worker, HttpDb, Params, Callback) ->
maybe_retry(timeout, Worker, HttpDb, Params)
end.
-
process_auth_response(HttpDb, Code, Headers, Params) ->
case couch_replicator_auth:handle_response(HttpDb, Code, Headers) of
{continue, HttpDb1} ->
@@ -251,7 +270,6 @@ process_auth_response(HttpDb, Code, Headers, Params) ->
throw({retry, HttpDb1, Params})
end.
-
% Only streaming HTTP requests send messages back from
% the ibrowse worker process. We can detect that based
% on the ibrowse_req_id format. This just drops all
@@ -261,7 +279,6 @@ process_auth_response(HttpDb, Code, Headers, Params) ->
clean_mailbox(ReqId) ->
clean_mailbox(ReqId, ?MAX_DISCARDED_MESSAGES).
-
clean_mailbox(_ReqId, 0) ->
case get(?STREAM_STATUS) of
{streaming, Worker} ->
@@ -292,14 +309,13 @@ clean_mailbox({ibrowse_req_id, ReqId}, Count) when Count > 0 ->
{ibrowse_async_response_end, ReqId} ->
put(?STREAM_STATUS, ended),
ok
- after 0 ->
- ok
+ after 0 ->
+ ok
end
end;
clean_mailbox(_, Count) when Count > 0 ->
ok.
-
discard_message(ReqId, Worker, Count) ->
ibrowse:stream_next(ReqId),
receive
@@ -313,12 +329,14 @@ discard_message(ReqId, Worker, Count) ->
exit({timeout, ibrowse_stream_cleanup})
end.
-
maybe_retry(Error, Worker, #httpdb{retries = 0} = HttpDb, Params) ->
report_error(Worker, HttpDb, Params, {error, Error});
-
-maybe_retry(Error, Worker, #httpdb{retries = Retries, wait = Wait} = HttpDb,
- Params) ->
+maybe_retry(
+ Error,
+ Worker,
+ #httpdb{retries = Retries, wait = Wait} = HttpDb,
+ Params
+) ->
case total_error_time_exceeded(HttpDb) of
true ->
report_error(Worker, HttpDb, Params, {error, Error});
@@ -331,14 +349,12 @@ maybe_retry(Error, Worker, #httpdb{retries = Retries, wait = Wait} = HttpDb,
throw({retry, HttpDb2, Params})
end.
-
% When retrying, check to make total time spent retrying a request is below
% the current scheduler health threshold. The goal is to not exceed the
% threshold, otherwise the job which keep retrying too long will still be
% considered healthy.
total_error_time_exceeded(#httpdb{first_error_timestamp = nil}) ->
false;
-
total_error_time_exceeded(#httpdb{first_error_timestamp = ErrorTimestamp}) ->
HealthThresholdSec = couch_replicator_job:health_threshold(),
% Theshold value is halved because in the calling code the next step
@@ -347,17 +363,14 @@ total_error_time_exceeded(#httpdb{first_error_timestamp = ErrorTimestamp}) ->
ThresholdUSec = (HealthThresholdSec / 2) * 1000000,
timer:now_diff(os:timestamp(), ErrorTimestamp) > ThresholdUSec.
-
% Remember the first time an error occurs. This value is used later to check
% the total time spend retrying a request. Because retrying is cursive, on
% successful result #httpdb{} record is reset back to the original value.
update_first_error_timestamp(#httpdb{first_error_timestamp = nil} = HttpDb) ->
HttpDb#httpdb{first_error_timestamp = os:timestamp()};
-
update_first_error_timestamp(HttpDb) ->
HttpDb.
-
log_retry_error(Params, HttpDb, Wait, Error) ->
Method = string:to_upper(atom_to_list(get_value(method, Params, get))),
Url = couch_util:url_strip_password(full_url(HttpDb, Params)),
@@ -369,9 +382,10 @@ log_retry_error(Params, HttpDb, Wait, Error) ->
retry_delay_sec => Wait / 1000,
details => error_cause(Error)
}),
- couch_log:notice("Retrying ~s request to ~s in ~p seconds due to error ~s",
- [Method, Url, Wait / 1000, error_cause(Error)]).
-
+ couch_log:notice(
+ "Retrying ~s request to ~s in ~p seconds due to error ~s",
+ [Method, Url, Wait / 1000, error_cause(Error)]
+ ).
report_error(_Worker, HttpDb, Params, Error) ->
Method = string:to_upper(atom_to_list(get_value(method, Params, get))),
@@ -379,7 +393,6 @@ report_error(_Worker, HttpDb, Params, Error) ->
do_report_error(Url, Method, Error),
exit({http_request_failed, Method, Url, Error}).
-
do_report_error(Url, Method, {code, Code}) ->
?LOG_ERROR(#{
what => failed_request,
@@ -388,9 +401,11 @@ do_report_error(Url, Method, {code, Code}) ->
url => Url,
status_code => Code
}),
- couch_log:error("Replicator, request ~s to ~p failed. The received "
- "HTTP error code is ~p", [Method, Url, Code]);
-
+ couch_log:error(
+ "Replicator, request ~s to ~p failed. The received "
+ "HTTP error code is ~p",
+ [Method, Url, Code]
+ );
do_report_error(FullUrl, Method, Error) ->
?LOG_ERROR(#{
what => failed_request,
@@ -399,54 +414,53 @@ do_report_error(FullUrl, Method, Error) ->
url => FullUrl,
details => error_cause(Error)
}),
- couch_log:error("Replicator, request ~s to ~p failed due to error ~s",
- [Method, FullUrl, error_cause(Error)]).
-
+ couch_log:error(
+ "Replicator, request ~s to ~p failed due to error ~s",
+ [Method, FullUrl, error_cause(Error)]
+ ).
error_cause({error, Cause}) ->
lists:flatten(io_lib:format("~p", [Cause]));
error_cause(Cause) ->
lists:flatten(io_lib:format("~p", [Cause])).
-
stream_data_self(#httpdb{timeout = T} = HttpDb, Params, Worker, ReqId, Cb) ->
case accumulate_messages(ReqId, [], T + 500) of
- {Data, ibrowse_async_response} ->
- ibrowse:stream_next(ReqId),
- {Data, fun() -> stream_data_self(HttpDb, Params, Worker, ReqId, Cb) end};
- {Data, ibrowse_async_response_end} ->
- put(?STREAM_STATUS, ended),
- {Data, fun() -> throw({maybe_retry_req, more_data_expected}) end}
+ {Data, ibrowse_async_response} ->
+ ibrowse:stream_next(ReqId),
+ {Data, fun() -> stream_data_self(HttpDb, Params, Worker, ReqId, Cb) end};
+ {Data, ibrowse_async_response_end} ->
+ put(?STREAM_STATUS, ended),
+ {Data, fun() -> throw({maybe_retry_req, more_data_expected}) end}
end.
accumulate_messages(ReqId, Acc, Timeout) ->
receive
- {ibrowse_async_response, ReqId, {error, Error}} ->
- throw({maybe_retry_req, Error});
- {ibrowse_async_response, ReqId, <<>>} ->
- accumulate_messages(ReqId, Acc, Timeout);
- {ibrowse_async_response, ReqId, Data} ->
- accumulate_messages(ReqId, [Data | Acc], 0);
- {ibrowse_async_response_end, ReqId} ->
- {iolist_to_binary(lists:reverse(Acc)), ibrowse_async_response_end}
+ {ibrowse_async_response, ReqId, {error, Error}} ->
+ throw({maybe_retry_req, Error});
+ {ibrowse_async_response, ReqId, <<>>} ->
+ accumulate_messages(ReqId, Acc, Timeout);
+ {ibrowse_async_response, ReqId, Data} ->
+ accumulate_messages(ReqId, [Data | Acc], 0);
+ {ibrowse_async_response_end, ReqId} ->
+ {iolist_to_binary(lists:reverse(Acc)), ibrowse_async_response_end}
after Timeout ->
% Note: ibrowse should always reply with timeouts, but this doesn't
% seem to be always true when there's a very high rate of requests
% and many open connections.
- if Acc =:= [] ->
- throw({maybe_retry_req, timeout});
- true ->
- {iolist_to_binary(lists:reverse(Acc)), ibrowse_async_response}
+ if
+ Acc =:= [] ->
+ throw({maybe_retry_req, timeout});
+ true ->
+ {iolist_to_binary(lists:reverse(Acc)), ibrowse_async_response}
end
end.
-
full_url(#httpdb{url = BaseUrl}, Params) ->
Path = get_value(path, Params, []),
QueryArgs = get_value(qs, Params, []),
BaseUrl ++ Path ++ query_args_to_string(QueryArgs, []).
-
query_args_to_string([], []) ->
"";
query_args_to_string([], Acc) ->
@@ -454,13 +468,11 @@ query_args_to_string([], Acc) ->
query_args_to_string([{K, V} | Rest], Acc) ->
query_args_to_string(Rest, [K ++ "=" ++ couch_httpd:quote(V) | Acc]).
-
do_redirect(_Worker, Code, Headers, #httpdb{url = Url} = HttpDb, Params, _Cb) ->
RedirectUrl = redirect_url(Headers, Url),
{HttpDb2, Params2} = after_redirect(RedirectUrl, Code, HttpDb, Params),
throw({retry, HttpDb2, Params2}).
-
redirect_url(RespHeaders, OrigUrl) ->
MochiHeaders = mochiweb_headers:make(RespHeaders),
RedUrl = mochiweb_headers:get_value("Location", MochiHeaders),
@@ -468,25 +480,28 @@ redirect_url(RespHeaders, OrigUrl) ->
host = Host,
host_type = HostType,
port = Port,
- path = Path, % includes query string
+ % includes query string
+ path = Path,
protocol = Proto
} = ibrowse_lib:parse_url(RedUrl),
#url{
username = User,
password = Passwd
} = ibrowse_lib:parse_url(OrigUrl),
- Creds = case is_list(User) andalso is_list(Passwd) of
- true ->
- User ++ ":" ++ Passwd ++ "@";
- false ->
- []
- end,
- HostPart = case HostType of
- ipv6_address ->
- "[" ++ Host ++ "]";
- _ ->
- Host
- end,
+ Creds =
+ case is_list(User) andalso is_list(Passwd) of
+ true ->
+ User ++ ":" ++ Passwd ++ "@";
+ false ->
+ []
+ end,
+ HostPart =
+ case HostType of
+ ipv6_address ->
+ "[" ++ Host ++ "]";
+ _ ->
+ Host
+ end,
atom_to_list(Proto) ++ "://" ++ Creds ++ HostPart ++ ":" ++
integer_to_list(Port) ++ Path.
@@ -499,24 +514,20 @@ after_redirect(RedirectUrl, HttpDb, Params) ->
Params2 = lists:keydelete(path, 1, lists:keydelete(qs, 1, Params)),
{HttpDb#httpdb{url = RedirectUrl}, Params2}.
-
backoff_key(HttpDb, Params) ->
Method = get_value(method, Params, get),
Url = HttpDb#httpdb.url,
{Url, Method}.
-
backoff(HttpDb, Params) ->
Key = backoff_key(HttpDb, Params),
couch_replicator_rate_limiter:failure(Key),
throw({retry, HttpDb, Params}).
-
backoff_success(HttpDb, Params) ->
Key = backoff_key(HttpDb, Params),
couch_replicator_rate_limiter:success(Key).
-
backoff_before_request(Worker, HttpDb, Params) ->
Key = backoff_key(HttpDb, Params),
Limit = couch_replicator_rate_limiter:max_interval(),
@@ -529,25 +540,27 @@ backoff_before_request(Worker, HttpDb, Params) ->
ok
end.
-
merge_headers(Headers1, Headers2) when is_list(Headers1), is_list(Headers2) ->
Empty = mochiweb_headers:empty(),
Merged = mochiweb_headers:enter_from_list(Headers1 ++ Headers2, Empty),
mochiweb_headers:to_list(Merged).
-
-ifdef(TEST).
-include_lib("couch/include/couch_eunit.hrl").
-
merge_headers_test() ->
?assertEqual([], merge_headers([], [])),
?assertEqual([{"a", "x"}], merge_headers([], [{"a", "x"}])),
?assertEqual([{"a", "x"}], merge_headers([{"a", "x"}], [])),
?assertEqual([{"a", "y"}], merge_headers([{"A", "x"}], [{"a", "y"}])),
- ?assertEqual([{"a", "y"}, {"B", "x"}], merge_headers([{"B", "x"}],
- [{"a", "y"}])),
+ ?assertEqual(
+ [{"a", "y"}, {"B", "x"}],
+ merge_headers(
+ [{"B", "x"}],
+ [{"a", "y"}]
+ )
+ ),
?assertEqual([{"a", "y"}], merge_headers([{"A", "z"}, {"a", "y"}], [])),
?assertEqual([{"a", "y"}], merge_headers([], [{"A", "z"}, {"a", "y"}])).
diff --git a/src/couch_replicator/src/couch_replicator_httpc_pool.erl b/src/couch_replicator/src/couch_replicator_httpc_pool.erl
index c63a5efa6..30e601f9b 100644
--- a/src/couch_replicator/src/couch_replicator_httpc_pool.erl
+++ b/src/couch_replicator/src/couch_replicator_httpc_pool.erl
@@ -31,13 +31,15 @@
-record(state, {
url,
proxy_url,
- limit, % max # of workers allowed
+ % max # of workers allowed
+ limit,
workers = [],
- waiting = queue:new(), % blocked clients waiting for a worker
- callers = [] % clients who've been given a worker
+ % blocked clients waiting for a worker
+ waiting = queue:new(),
+ % clients who've been given a worker
+ callers = []
}).
-
start_link(Url, Options) ->
start_link(Url, undefined, Options).
@@ -47,11 +49,9 @@ start_link(Url, ProxyUrl, Options) ->
stop(Pool) ->
ok = gen_server:call(Pool, stop, infinity).
-
get_worker(Pool) ->
{ok, _Worker} = gen_server:call(Pool, get_worker, infinity).
-
release_worker(Pool, Worker) ->
ok = gen_server:cast(Pool, {release_worker, Worker}).
@@ -67,7 +67,6 @@ init({Url, ProxyUrl, Options}) ->
},
{ok, State}.
-
handle_call(get_worker, From, State) ->
#state{
waiting = Waiting,
@@ -78,22 +77,20 @@ handle_call(get_worker, From, State) ->
workers = Workers
} = State,
case length(Workers) >= Limit of
- true ->
- {noreply, State#state{waiting = queue:in(From, Waiting)}};
- false ->
- % If the call to acquire fails, the worker pool will crash with a
- % badmatch.
- {ok, Worker} = couch_replicator_connection:acquire(Url, ProxyUrl),
- NewState = State#state{
- workers = [Worker | Workers],
- callers = monitor_client(Callers, Worker, From)
- },
- {reply, {ok, Worker}, NewState}
+ true ->
+ {noreply, State#state{waiting = queue:in(From, Waiting)}};
+ false ->
+ % If the call to acquire fails, the worker pool will crash with a
+ % badmatch.
+ {ok, Worker} = couch_replicator_connection:acquire(Url, ProxyUrl),
+ NewState = State#state{
+ workers = [Worker | Workers],
+ callers = monitor_client(Callers, Worker, From)
+ },
+ {reply, {ok, Worker}, NewState}
end;
-
handle_call(stop, _From, State) ->
{stop, normal, ok, State};
-
handle_call({release_worker_sync, Worker}, _From, State) ->
{reply, ok, release_worker_internal(Worker, State)}.
@@ -115,8 +112,10 @@ handle_info({'EXIT', Pid, _Reason}, State) ->
Workers2 ->
case queue:out(Waiting) of
{empty, _} ->
- {noreply, State#state{workers = Workers2,
- callers = NewCallers0}};
+ {noreply, State#state{
+ workers = Workers2,
+ callers = NewCallers0
+ }};
{{value, From}, Waiting2} ->
{ok, Worker} = couch_replicator_connection:acquire(Url, ProxyUrl),
NewCallers1 = monitor_client(NewCallers0, Worker, From),
@@ -129,7 +128,6 @@ handle_info({'EXIT', Pid, _Reason}, State) ->
{noreply, NewState}
end
end;
-
handle_info({'DOWN', Ref, process, _, _}, #state{callers = Callers} = State) ->
case lists:keysearch(Ref, 2, Callers) of
{value, {Worker, Ref}} ->
@@ -138,10 +136,9 @@ handle_info({'DOWN', Ref, process, _, _}, #state{callers = Callers} = State) ->
{noreply, State}
end.
-code_change(_OldVsn, #state{}=State, _Extra) ->
+code_change(_OldVsn, #state{} = State, _Extra) ->
{ok, State}.
-
terminate(_Reason, _State) ->
ok.
@@ -172,25 +169,28 @@ demonitor_client(Callers, Worker) ->
release_worker_internal(Worker, State) ->
#state{waiting = Waiting, callers = Callers} = State,
NewCallers0 = demonitor_client(Callers, Worker),
- case is_process_alive(Worker) andalso
- lists:member(Worker, State#state.workers) of
- true ->
- Workers = case queue:out(Waiting) of
- {empty, Waiting2} ->
- NewCallers1 = NewCallers0,
- couch_replicator_connection:release(Worker),
- State#state.workers -- [Worker];
- {{value, From}, Waiting2} ->
- NewCallers1 = monitor_client(NewCallers0, Worker, From),
- gen_server:reply(From, {ok, Worker}),
- State#state.workers
- end,
- NewState = State#state{
- workers = Workers,
- waiting = Waiting2,
- callers = NewCallers1
- },
- NewState;
- false ->
- State#state{callers = NewCallers0}
- end.
+ case
+ is_process_alive(Worker) andalso
+ lists:member(Worker, State#state.workers)
+ of
+ true ->
+ Workers =
+ case queue:out(Waiting) of
+ {empty, Waiting2} ->
+ NewCallers1 = NewCallers0,
+ couch_replicator_connection:release(Worker),
+ State#state.workers -- [Worker];
+ {{value, From}, Waiting2} ->
+ NewCallers1 = monitor_client(NewCallers0, Worker, From),
+ gen_server:reply(From, {ok, Worker}),
+ State#state.workers
+ end,
+ NewState = State#state{
+ workers = Workers,
+ waiting = Waiting2,
+ callers = NewCallers1
+ },
+ NewState;
+ false ->
+ State#state{callers = NewCallers0}
+ end.
diff --git a/src/couch_replicator/src/couch_replicator_httpd.erl b/src/couch_replicator/src/couch_replicator_httpd.erl
index 0934ffe66..4e14bb7f1 100644
--- a/src/couch_replicator/src/couch_replicator_httpd.erl
+++ b/src/couch_replicator/src/couch_replicator_httpd.erl
@@ -23,19 +23,26 @@
send_method_not_allowed/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("couch_replicator.hrl").
-
-define(DEFAULT_TASK_LIMIT, 100).
-
-handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"jobs">>]}=Req) ->
- Limit = couch_replicator_utils:parse_int_param(Req, "limit",
- ?DEFAULT_TASK_LIMIT, 0, infinity),
- Skip = couch_replicator_utils:parse_int_param(Req, "skip", 0, 0,
- infinity),
+handle_scheduler_req(#httpd{method = 'GET', path_parts = [_, <<"jobs">>]} = Req) ->
+ Limit = couch_replicator_utils:parse_int_param(
+ Req,
+ "limit",
+ ?DEFAULT_TASK_LIMIT,
+ 0,
+ infinity
+ ),
+ Skip = couch_replicator_utils:parse_int_param(
+ Req,
+ "skip",
+ 0,
+ 0,
+ infinity
+ ),
Jobs1 = couch_replicator:jobs(),
Total = length(Jobs1),
Offset = min(Skip, Total),
@@ -45,24 +52,29 @@ handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"jobs">>]}=Req) ->
<<"offset">> => Offset,
<<"jobs">> => Jobs2
});
-handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"jobs">>,JobId]}=Req) ->
+handle_scheduler_req(#httpd{method = 'GET', path_parts = [_, <<"jobs">>, JobId]} = Req) ->
case couch_replicator:job(JobId) of
- {ok, JobInfo} -> send_json(Req, JobInfo);
+ {ok, JobInfo} -> send_json(Req, JobInfo);
{error, not_found} -> throw(not_found)
end;
-handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"docs">>]}=Req) ->
+handle_scheduler_req(#httpd{method = 'GET', path_parts = [_, <<"docs">>]} = Req) ->
handle_scheduler_docs(?REP_DB_NAME, Req);
-handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"docs">>,Db]}=Req)
- when ?IS_REP_DB(Db) ->
+handle_scheduler_req(#httpd{method = 'GET', path_parts = [_, <<"docs">>, Db]} = Req) when
+ ?IS_REP_DB(Db)
+->
handle_scheduler_docs(Db, Req);
-handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"docs">>,Db,DocId]}
- = Req) when ?IS_REP_DB(Db) ->
+handle_scheduler_req(
+ #httpd{method = 'GET', path_parts = [_, <<"docs">>, Db, DocId]} =
+ Req
+) when ?IS_REP_DB(Db) ->
handle_scheduler_doc(Db, DocId, Req);
% Allow users to pass in unencoded _replicator database names (/ are not
% escaped). This is possible here because _replicator is not a valid document
% ID so can disambiguate between an element of a db path and the document ID.
-handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"docs">>|Unquoted]}
- = Req) ->
+handle_scheduler_req(
+ #httpd{method = 'GET', path_parts = [_, <<"docs">> | Unquoted]} =
+ Req
+) ->
case parse_unquoted_docs_path(Unquoted) of
{db_only, Db} ->
handle_scheduler_docs(Db, Req);
@@ -71,12 +83,11 @@ handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"docs">>|Unquoted]}
{error, invalid} ->
throw(bad_request)
end;
-handle_scheduler_req(#httpd{method='GET'} = _Req) ->
+handle_scheduler_req(#httpd{method = 'GET'} = _Req) ->
throw(not_found);
handle_scheduler_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD").
-
handle_req(#httpd{method = 'POST', user_ctx = UserCtx} = Req) ->
couch_httpd:validate_ctype(Req, "application/json"),
RepDoc = couch_httpd:json_body_obj(Req),
@@ -97,12 +108,10 @@ handle_req(#httpd{method = 'POST', user_ctx = UserCtx} = Req) ->
{ok, #{} = CheckpointHistory} ->
Res = maps:merge(#{<<"ok">> => true}, CheckpointHistory),
send_json(Req, Res)
- end;
-
+ end;
handle_req(Req) ->
send_method_not_allowed(Req, "POST").
-
handle_scheduler_docs(DbName, #httpd{user_ctx = UserCtx} = Req) ->
try fabric2_db:open(DbName, [{user_ctx, UserCtx}]) of
{ok, Db} ->
@@ -120,20 +129,18 @@ handle_scheduler_docs(DbName, #httpd{user_ctx = UserCtx} = Req) ->
throw(not_found)
end.
-
handle_scheduler_doc(DbName, DocId, #httpd{user_ctx = UserCtx} = Req) ->
- try fabric2_db:open(DbName, [{user_ctx, UserCtx}]) of
+ try fabric2_db:open(DbName, [{user_ctx, UserCtx}]) of
{ok, Db} ->
- ok = fabric2_db:check_is_member(Db),
- case couch_replicator:doc(Db, DocId) of
- {ok, DocInfo} -> send_json(Req, DocInfo);
+ ok = fabric2_db:check_is_member(Db),
+ case couch_replicator:doc(Db, DocId) of
+ {ok, DocInfo} -> send_json(Req, DocInfo);
{error, not_found} -> throw(not_found)
- end
- catch
- error:database_does_not_exist ->
- throw(not_found)
- end.
-
+ end
+ catch
+ error:database_does_not_exist ->
+ throw(not_found)
+ end.
parse_unquoted_docs_path([_, _ | _] = Unquoted) ->
DbAndAfter = lists:dropwhile(fun(E) -> E =/= ?REP_DB_NAME end, Unquoted),
@@ -147,22 +154,29 @@ parse_unquoted_docs_path([_, _ | _] = Unquoted) ->
{db_and_doc, filename:join(BeforeRDb ++ [?REP_DB_NAME]), DocId}
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
unquoted_scheduler_docs_path_test_() ->
- [?_assertEqual(Res, parse_unquoted_docs_path(Path)) || {Res, Path} <- [
- {{error, invalid}, [<<"a">>,<< "b">>]},
- {{db_only, <<"a/_replicator">>}, [<<"a">>, ?REP_DB_NAME]},
- {{db_only, <<"a/b/_replicator">>}, [<<"a">>, <<"b">>,
- ?REP_DB_NAME]},
- {{db_and_doc, <<"_replicator">>, <<"x">>},
- [?REP_DB_NAME, <<"x">>]},
- {{db_and_doc, <<"a/_replicator">>, <<"x">>}, [<<"a">>,
- ?REP_DB_NAME, <<"x">>]},
- {{error, invalid}, [<<"a/_replicator">>,<<"x">>]}
- ]].
+ [
+ ?_assertEqual(Res, parse_unquoted_docs_path(Path))
+ || {Res, Path} <- [
+ {{error, invalid}, [<<"a">>, <<"b">>]},
+ {{db_only, <<"a/_replicator">>}, [<<"a">>, ?REP_DB_NAME]},
+ {{db_only, <<"a/b/_replicator">>}, [
+ <<"a">>,
+ <<"b">>,
+ ?REP_DB_NAME
+ ]},
+ {{db_and_doc, <<"_replicator">>, <<"x">>}, [?REP_DB_NAME, <<"x">>]},
+ {{db_and_doc, <<"a/_replicator">>, <<"x">>}, [
+ <<"a">>,
+ ?REP_DB_NAME,
+ <<"x">>
+ ]},
+ {{error, invalid}, [<<"a/_replicator">>, <<"x">>]}
+ ]
+ ].
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_ids.erl b/src/couch_replicator/src/couch_replicator_ids.erl
index b78140432..434e444aa 100644
--- a/src/couch_replicator/src/couch_replicator_ids.erl
+++ b/src/couch_replicator/src/couch_replicator_ids.erl
@@ -39,7 +39,6 @@ replication_id(#{?OPTIONS := Options} = Rep) ->
RepId = iolist_to_binary([BaseId, ExtId]),
{RepId, BaseId}.
-
% Versioned clauses for generating replication IDs.
% If a change is made to how replications are identified,
% please add a new clause and increase ?REP_ID_VERSION.
@@ -49,37 +48,34 @@ base_id(#{?SOURCE := Src, ?TARGET := Tgt} = Rep, 4) ->
SrcInfo = get_v4_endpoint(Src),
TgtInfo = get_v4_endpoint(Tgt),
maybe_append_filters([UUID, SrcInfo, TgtInfo], Rep);
-
base_id(#{?SOURCE := Src0, ?TARGET := Tgt0} = Rep, 3) ->
UUID = couch_server:get_uuid(),
Src = get_rep_endpoint(Src0),
Tgt = get_rep_endpoint(Tgt0),
maybe_append_filters([UUID, Src, Tgt], Rep);
-
base_id(#{?SOURCE := Src0, ?TARGET := Tgt0} = Rep, 2) ->
{ok, HostName} = inet:gethostname(),
- Port = case (catch mochiweb_socket_server:get(chttpd, port)) of
- P when is_number(P) ->
- P;
- _ ->
- % On restart we might be called before the couch_httpd process is
- % started.
- % TODO: we might be under an SSL socket server only, or both under
- % SSL and a non-SSL socket.
- % ... mochiweb_socket_server:get(https, port)
- list_to_integer(config:get("httpd", "port", "5984"))
- end,
+ Port =
+ case (catch mochiweb_socket_server:get(chttpd, port)) of
+ P when is_number(P) ->
+ P;
+ _ ->
+ % On restart we might be called before the couch_httpd process is
+ % started.
+ % TODO: we might be under an SSL socket server only, or both under
+ % SSL and a non-SSL socket.
+ % ... mochiweb_socket_server:get(https, port)
+ list_to_integer(config:get("httpd", "port", "5984"))
+ end,
Src = get_rep_endpoint(Src0),
Tgt = get_rep_endpoint(Tgt0),
maybe_append_filters([HostName, Port, Src, Tgt], Rep);
-
base_id(#{?SOURCE := Src0, ?TARGET := Tgt0} = Rep, 1) ->
{ok, HostName} = inet:gethostname(),
Src = get_rep_endpoint(Src0),
Tgt = get_rep_endpoint(Tgt0),
maybe_append_filters([HostName, Src, Tgt], Rep).
-
-spec job_id(#{}, binary() | null, binary() | null) -> binary().
job_id(#{} = Rep, null = _DbUUID, null = _DocId) ->
#{
@@ -95,16 +91,13 @@ job_id(#{} = Rep, null = _DbUUID, null = _DocId) ->
Opts = maybe_append_options(UseOpts, Options),
IdParts = [UUID, SrcInfo, TgtInfo, UserName, Opts],
maybe_append_filters(IdParts, Rep, false);
-
job_id(#{} = _Rep, DbUUID, DocId) when is_binary(DbUUID), is_binary(DocId) ->
job_id(DbUUID, DocId).
-
-spec job_id(binary(), binary()) -> binary().
job_id(DbUUID, DocId) when is_binary(DbUUID), is_binary(DocId) ->
<<DbUUID/binary, "|", DocId/binary>>.
-
-spec convert(binary()) -> binary().
convert(Id0) when is_binary(Id0) ->
% Spaces can result from mochiweb incorrectly unquoting + characters from
@@ -112,75 +105,78 @@ convert(Id0) when is_binary(Id0) ->
% users to url encode + characters.
binary:replace(Id0, <<" ">>, <<"+">>, [global]).
-
% Private functions
maybe_append_filters(Base, #{} = Rep) ->
maybe_append_filters(Base, Rep, true).
-
maybe_append_filters(Base, #{} = Rep, FetchFilter) ->
#{
?SOURCE := Source,
?OPTIONS := Options
} = Rep,
- Base2 = Base ++
- case couch_replicator_filters:parse(Options) of
- {ok, nil} ->
- [];
- {ok, {view, Filter, QueryParams}} ->
- [Filter, QueryParams];
- {ok, {user, {Doc, Filter}, QueryParams}} when FetchFilter =:= true ->
- case couch_replicator_filters:fetch(Doc, Filter, Source) of
- {ok, Code} ->
- [Code, QueryParams];
- {error, Error} ->
- throw({filter_fetch_error, Error})
- end;
- {ok, {user, {Doc, Filter}, QueryParams}} when FetchFilter =:= false ->
- [Doc, Filter, QueryParams];
- {ok, {docids, DocIds}} ->
- [DocIds];
- {ok, {mango, Selector}} ->
- [Selector];
- {error, FilterParseError} ->
- throw({error, FilterParseError})
- end,
+ Base2 =
+ Base ++
+ case couch_replicator_filters:parse(Options) of
+ {ok, nil} ->
+ [];
+ {ok, {view, Filter, QueryParams}} ->
+ [Filter, QueryParams];
+ {ok, {user, {Doc, Filter}, QueryParams}} when FetchFilter =:= true ->
+ case couch_replicator_filters:fetch(Doc, Filter, Source) of
+ {ok, Code} ->
+ [Code, QueryParams];
+ {error, Error} ->
+ throw({filter_fetch_error, Error})
+ end;
+ {ok, {user, {Doc, Filter}, QueryParams}} when FetchFilter =:= false ->
+ [Doc, Filter, QueryParams];
+ {ok, {docids, DocIds}} ->
+ [DocIds];
+ {ok, {mango, Selector}} ->
+ [Selector];
+ {error, FilterParseError} ->
+ throw({error, FilterParseError})
+ end,
Res = couch_util:to_hex(couch_hash:md5_hash(term_to_binary(Base2))),
list_to_binary(Res).
-
maybe_append_options(Options, #{} = RepOptions) ->
- lists:foldl(fun(Option, Acc) ->
- Acc ++
- case maps:get(Option, RepOptions, false) of
- true -> "+" ++ binary_to_list(Option);
- false -> ""
- end
- end, [], Options).
-
+ lists:foldl(
+ fun(Option, Acc) ->
+ Acc ++
+ case maps:get(Option, RepOptions, false) of
+ true -> "+" ++ binary_to_list(Option);
+ false -> ""
+ end
+ end,
+ [],
+ Options
+ ).
get_rep_endpoint(#{<<"url">> := Url0, <<"headers">> := Headers0}) ->
% We turn everything to lists and proplists to calculate the same
% replication ID as CouchDB <= 3.x
Url = binary_to_list(Url0),
- Headers1 = maps:fold(fun(K, V, Acc) ->
- [{binary_to_list(K), binary_to_list(V)} | Acc]
- end, [], Headers0),
+ Headers1 = maps:fold(
+ fun(K, V, Acc) ->
+ [{binary_to_list(K), binary_to_list(V)} | Acc]
+ end,
+ [],
+ Headers0
+ ),
Headers2 = lists:keysort(1, Headers1),
DefaultHeaders = (#httpdb{})#httpdb.headers,
{remote, Url, Headers2 -- DefaultHeaders}.
-
get_v4_endpoint(#{} = HttpDb) ->
{remote, Url, Headers} = get_rep_endpoint(HttpDb),
{User, _} = couch_replicator_utils:get_basic_auth_creds(HttpDb),
{Host, NonDefaultPort, Path} = get_v4_url_info(Url),
- OAuth = undefined, % Keep this to ensure checkpoints don't change
+ % Keep this to ensure checkpoints don't change
+ OAuth = undefined,
{remote, User, Host, NonDefaultPort, Path, Headers, OAuth}.
-
-
get_v4_url_info(Url) when is_binary(Url) ->
get_v4_url_info(binary_to_list(Url));
get_v4_url_info(Url) ->
@@ -200,7 +196,6 @@ get_v4_url_info(Url) ->
{Host, NonDefaultPort, Path}
end.
-
get_non_default_port(https, 443) ->
default;
get_non_default_port(http, 80) ->
@@ -210,71 +205,73 @@ get_non_default_port(http, 5984) ->
get_non_default_port(_Schema, Port) ->
Port.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-include_lib("fabric/test/fabric2_test.hrl").
-
replication_id_convert_test_() ->
- [?_assertEqual(Expected, convert(Id)) || {Expected, Id} <- [
- {<<"abc">>, <<"abc">>},
- {<<"abc+x">>, <<"abc+x">>},
- {<<"abc+x">>, <<"abc x">>},
- {<<"abc+x+y">>, <<"abc+x+y">>},
- {<<"abc+x+y">>, <<"abc x y">>}
- ]].
-
+ [
+ ?_assertEqual(Expected, convert(Id))
+ || {Expected, Id} <- [
+ {<<"abc">>, <<"abc">>},
+ {<<"abc+x">>, <<"abc+x">>},
+ {<<"abc+x">>, <<"abc x">>},
+ {<<"abc+x+y">>, <<"abc+x+y">>},
+ {<<"abc+x+y">>, <<"abc x y">>}
+ ]
+ ].
http_v4_endpoint_test_() ->
- [?_assertMatch({remote, User, Host, Port, Path, HeadersNoAuth, undefined},
- get_v4_endpoint(HttpDb)) ||
- {{User, Host, Port, Path, HeadersNoAuth}, HttpDb} <- [
- {
- {undefined, "host", default, "/", []},
- httpdb("http://host")
- },
- {
- {undefined, "host", default, "/", []},
- httpdb("https://host")
- },
- {
- {undefined, "host", default, "/", []},
- httpdb("http://host:5984")
- },
- {
- {undefined, "host", 1, "/", []},
- httpdb("http://host:1")
- },
- {
- {undefined, "host", 2, "/", []},
- httpdb("https://host:2")
- },
- {
- {undefined, "host", default, "/", [{"h", "v"}]},
- httpdb("http://host", undefined, undefined, #{"h" => "v"})
- },
- {
- {undefined, "host", default, "/a/b", []},
- httpdb("http://host/a/b")
- },
- {
- {"user", "host", default, "/", []},
- httpdb("http://host", "user", "pass")
- },
- {
- {"user", "host", default, "/", []},
- httpdb("http://host", "user", "newpass")
- },
- {
- {"user2", "host", default, "/", [{"h", "v"}]},
- httpdb("http://host", "user2", "pass2", #{"h" => "v"})
- }
+ [
+ ?_assertMatch(
+ {remote, User, Host, Port, Path, HeadersNoAuth, undefined},
+ get_v4_endpoint(HttpDb)
+ )
+ || {{User, Host, Port, Path, HeadersNoAuth}, HttpDb} <- [
+ {
+ {undefined, "host", default, "/", []},
+ httpdb("http://host")
+ },
+ {
+ {undefined, "host", default, "/", []},
+ httpdb("https://host")
+ },
+ {
+ {undefined, "host", default, "/", []},
+ httpdb("http://host:5984")
+ },
+ {
+ {undefined, "host", 1, "/", []},
+ httpdb("http://host:1")
+ },
+ {
+ {undefined, "host", 2, "/", []},
+ httpdb("https://host:2")
+ },
+ {
+ {undefined, "host", default, "/", [{"h", "v"}]},
+ httpdb("http://host", undefined, undefined, #{"h" => "v"})
+ },
+ {
+ {undefined, "host", default, "/a/b", []},
+ httpdb("http://host/a/b")
+ },
+ {
+ {"user", "host", default, "/", []},
+ httpdb("http://host", "user", "pass")
+ },
+ {
+ {"user", "host", default, "/", []},
+ httpdb("http://host", "user", "newpass")
+ },
+ {
+ {"user2", "host", default, "/", [{"h", "v"}]},
+ httpdb("http://host", "user2", "pass2", #{"h" => "v"})
+ }
]
].
-
httpdb(Url) ->
#{
<<"url">> => list_to_binary(Url),
@@ -282,7 +279,6 @@ httpdb(Url) ->
<<"headers">> => #{}
}.
-
httpdb(Url, User, Pass) ->
#{
<<"url">> => list_to_binary(Url),
@@ -295,18 +291,21 @@ httpdb(Url, User, Pass) ->
<<"headers">> => #{}
}.
-
httpdb(Url, User, Pass, #{} = Headers) ->
- HttpDb1 = case {User, Pass} of
- {undefined, undefined} -> httpdb(Url);
- {User, Pass} -> httpdb(Url, User, Pass)
- end,
- Headers1 = maps:fold(fun(K, V, Acc) ->
- Acc#{list_to_binary(K) => list_to_binary(V)}
- end, #{}, Headers),
+ HttpDb1 =
+ case {User, Pass} of
+ {undefined, undefined} -> httpdb(Url);
+ {User, Pass} -> httpdb(Url, User, Pass)
+ end,
+ Headers1 = maps:fold(
+ fun(K, V, Acc) ->
+ Acc#{list_to_binary(K) => list_to_binary(V)}
+ end,
+ #{},
+ Headers
+ ),
HttpDb1#{<<"headers">> => Headers1}.
-
version4_matches_couchdb3_test_() ->
{
foreach,
@@ -317,22 +316,22 @@ version4_matches_couchdb3_test_() ->
]
}.
-
setup() ->
meck:expect(config, get, fun(_, _, Default) -> Default end).
-
teardown(_) ->
meck:unload().
-
id_matches_couchdb3(_) ->
- {ok, Rep} = couch_replicator_parse:parse_rep(#{
- <<"source">> => <<"http://adm:pass@127.0.0.1/abc">>,
- <<"target">> => <<"http://adm:pass@127.0.0.1/xyz">>,
- <<"create_target">> => true,
- <<"continuous">> => true
- }, null),
+ {ok, Rep} = couch_replicator_parse:parse_rep(
+ #{
+ <<"source">> => <<"http://adm:pass@127.0.0.1/abc">>,
+ <<"target">> => <<"http://adm:pass@127.0.0.1/xyz">>,
+ <<"create_target">> => true,
+ <<"continuous">> => true
+ },
+ null
+ ),
meck:expect(couch_server, get_uuid, 0, "somefixedid"),
{RepId, BaseId} = replication_id(Rep),
% Calculated on CouchDB 3.x
@@ -341,11 +340,13 @@ id_matches_couchdb3(_) ->
?assertEqual(RepId3x, RepId),
?assertEqual(BaseId3x, BaseId).
-
auth_props(User, Pass) when is_list(User), is_list(Pass) ->
- [{<<"basic">>, {[
- {<<"username">>, list_to_binary(User)},
- {<<"password">>, list_to_binary(Pass)}
- ]}}].
+ [
+ {<<"basic">>,
+ {[
+ {<<"username">>, list_to_binary(User)},
+ {<<"password">>, list_to_binary(Pass)}
+ ]}}
+ ].
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_job.erl b/src/couch_replicator/src/couch_replicator_job.erl
index 381fe5739..1281ec5ac 100644
--- a/src/couch_replicator/src/couch_replicator_job.erl
+++ b/src/couch_replicator/src/couch_replicator_job.erl
@@ -12,10 +12,8 @@
-module(couch_replicator_job).
-
-behaviour(gen_server).
-
-export([
start_link/0
]).
@@ -35,13 +33,11 @@
health_threshold/0
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-include("couch_replicator.hrl").
-include_lib("kernel/include/logger.hrl").
-
-define(LOWEST_SEQ, 0).
-define(DEFAULT_CHECKPOINT_INTERVAL, 30000).
-define(STARTUP_JITTER_DEFAULT, 5000).
@@ -51,7 +47,6 @@
-define(DEFAULT_MAX_HISTORY, 10).
-define(DEFAULT_STATS_UPDATE_INTERVAL_SEC, 10).
-
-record(rep_state, {
job,
job_data,
@@ -91,16 +86,13 @@
options = #{}
}).
-
start_link() ->
gen_server:start_link(?MODULE, [], []).
-
init(_) ->
process_flag(trap_exit, true),
{ok, delayed_init, 0}.
-
terminate(normal, #rep_state{} = State) ->
#rep_state{
job = Job,
@@ -109,28 +101,27 @@ terminate(normal, #rep_state{} = State) ->
} = State,
ok = complete_job(undefined, Job, JobData, History),
close_endpoints(State);
-
terminate(shutdown, #rep_state{} = State0) ->
% Replication stopped by the job server
State1 = cancel_timers(State0),
- State3 = case do_checkpoint(State1) of
- {ok, State2} ->
- State2;
- Error ->
- ?LOG_ERROR(#{
- what => checkpoint_failure,
- in => replicator,
- jobid => State1#rep_state.id,
- details => Error
- }),
- Msg = "~p : Failed last checkpoint. Job: ~p Error: ~p",
- couch_log:error(Msg, [?MODULE, State1#rep_state.id, Error]),
- State1
- end,
+ State3 =
+ case do_checkpoint(State1) of
+ {ok, State2} ->
+ State2;
+ Error ->
+ ?LOG_ERROR(#{
+ what => checkpoint_failure,
+ in => replicator,
+ jobid => State1#rep_state.id,
+ details => Error
+ }),
+ Msg = "~p : Failed last checkpoint. Job: ~p Error: ~p",
+ couch_log:error(Msg, [?MODULE, State1#rep_state.id, Error]),
+ State1
+ end,
#rep_state{job = Job, job_data = JobData} = State3,
ok = reschedule(undefined, Job, JobData),
ok = close_endpoints(State3);
-
terminate({shutdown, Error}, {init_error, Stack}) ->
% Termination in init, before the job had initialized
case Error of
@@ -150,11 +141,9 @@ terminate({shutdown, Error}, {init_error, Stack}) ->
couch_log:error("~p job failed ~p ~p", [?MODULE, Error, Stack])
end,
ok;
-
terminate({shutdown, finished}, #rep_state{} = State) ->
% Job state was already updated and job is marked as finished
ok = close_endpoints(State);
-
terminate({shutdown, halt}, #rep_state{} = State) ->
% Job is re-enqueued and possibly already running somewhere else
?LOG_ERROR(#{
@@ -164,13 +153,13 @@ terminate({shutdown, halt}, #rep_state{} = State) ->
}),
couch_log:error("~p job ~p halted", [?MODULE, State#rep_state.id]),
ok = close_endpoints(State);
-
terminate(Reason0, #rep_state{} = State0) ->
State = update_job_state(State0),
- Reason = case Reason0 of
- {shutdown, Err} -> Err;
- _ -> Reason0
- end,
+ Reason =
+ case Reason0 of
+ {shutdown, Err} -> Err;
+ _ -> Reason0
+ end,
#rep_state{
id = RepId,
job = Job,
@@ -186,17 +175,17 @@ terminate(Reason0, #rep_state{} = State0) ->
target => Target,
details => Reason
}),
- couch_log:error("Replication `~s` (`~s` -> `~s`) failed: ~p",
- [RepId, Source, Target, Reason]),
+ couch_log:error(
+ "Replication `~s` (`~s` -> `~s`) failed: ~p",
+ [RepId, Source, Target, Reason]
+ ),
ok = reschedule_on_error(undefined, Job, JobData, Reason),
ok = close_endpoints(State).
-
handle_call({add_stats, Stats}, From, State) ->
gen_server:reply(From, ok),
NewStats = couch_replicator_stats:sum_stats(State#rep_state.stats, Stats),
{noreply, State#rep_state{stats = NewStats}};
-
handle_call({report_seq_done, Seq, StatsInc}, From, #rep_state{} = State) ->
#rep_state{
seqs_in_progress = SeqsInProgress,
@@ -205,21 +194,23 @@ handle_call({report_seq_done, Seq, StatsInc}, From, #rep_state{} = State) ->
stats = Stats
} = State,
gen_server:reply(From, ok),
- {NewThroughSeq0, NewSeqsInProgress} = case SeqsInProgress of
- [] ->
- {Seq, []};
- [Seq | Rest] ->
- {Seq, Rest};
- [_ | _] ->
- {ThroughSeq, ordsets:del_element(Seq, SeqsInProgress)}
- end,
+ {NewThroughSeq0, NewSeqsInProgress} =
+ case SeqsInProgress of
+ [] ->
+ {Seq, []};
+ [Seq | Rest] ->
+ {Seq, Rest};
+ [_ | _] ->
+ {ThroughSeq, ordsets:del_element(Seq, SeqsInProgress)}
+ end,
NewHighestDone = lists:max([HighestDone, Seq]),
- NewThroughSeq = case NewSeqsInProgress of
- [] ->
- lists:max([NewThroughSeq0, NewHighestDone]);
- _ ->
- NewThroughSeq0
- end,
+ NewThroughSeq =
+ case NewSeqsInProgress of
+ [] ->
+ lists:max([NewThroughSeq0, NewHighestDone]);
+ _ ->
+ NewThroughSeq0
+ end,
?LOG_DEBUG(#{
what => progress_report,
in => replicator,
@@ -235,12 +226,21 @@ handle_call({report_seq_done, Seq, StatsInc}, From, #rep_state{} = State) ->
},
worker_reported_seq => Seq
}),
- couch_log:debug("Worker reported seq ~p, through seq was ~p, "
+ couch_log:debug(
+ "Worker reported seq ~p, through seq was ~p, "
"new through seq is ~p, highest seq done was ~p, "
"new highest seq done is ~p~n"
"Seqs in progress were: ~p~nSeqs in progress are now: ~p",
- [Seq, ThroughSeq, NewThroughSeq, HighestDone,
- NewHighestDone, SeqsInProgress, NewSeqsInProgress]),
+ [
+ Seq,
+ ThroughSeq,
+ NewThroughSeq,
+ HighestDone,
+ NewHighestDone,
+ SeqsInProgress,
+ NewSeqsInProgress
+ ]
+ ),
NewState = State#rep_state{
stats = couch_replicator_stats:sum_stats(Stats, StatsInc),
current_through_seq = NewThroughSeq,
@@ -248,20 +248,18 @@ handle_call({report_seq_done, Seq, StatsInc}, From, #rep_state{} = State) ->
highest_seq_done = NewHighestDone
},
{noreply, maybe_update_job_state(NewState)};
-
handle_call(Msg, _From, St) ->
{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
-
-handle_cast({report_seq, Seq},
- #rep_state{seqs_in_progress = SeqsInProgress} = State) ->
+handle_cast(
+ {report_seq, Seq},
+ #rep_state{seqs_in_progress = SeqsInProgress} = State
+) ->
NewSeqsInProgress = ordsets:add_element(Seq, SeqsInProgress),
{noreply, State#rep_state{seqs_in_progress = NewSeqsInProgress}};
-
handle_cast(Msg, St) ->
{stop, {bad_cast, Msg}, St}.
-
handle_info(timeout, delayed_init) ->
try delayed_init() of
{ok, State} -> {noreply, State};
@@ -273,76 +271,71 @@ handle_info(timeout, delayed_init) ->
ShutdownReason = {error, replication_start_error(Error)},
{stop, {shutdown, ShutdownReason}, {init_error, Stack}}
end;
-
handle_info(stats_update, #rep_state{} = State) ->
State1 = cancel_stats_timer(State),
State2 = update_job_state(State1),
{noreply, State2};
-
handle_info(checkpoint, State0) ->
State = cancel_checkpoint_timer(State0),
ok = check_user_filter(State),
case do_checkpoint(State) of
{ok, State1} ->
- couch_stats:increment_counter([couch_replicator, checkpoints,
- success]),
+ couch_stats:increment_counter([
+ couch_replicator,
+ checkpoints,
+ success
+ ]),
{noreply, start_checkpoint_timer(State1)};
Error ->
- couch_stats:increment_counter([couch_replicator, checkpoints,
- failure]),
+ couch_stats:increment_counter([
+ couch_replicator,
+ checkpoints,
+ failure
+ ]),
{stop, Error, State}
end;
-
handle_info(shutdown, St) ->
{stop, shutdown, St};
-
handle_info({'EXIT', Pid, max_backoff}, State) ->
?LOG_ERROR(#{what => max_backoff, in => replicator, pid => Pid}),
couch_log:error("Max backoff reached child process ~p", [Pid]),
{stop, {shutdown, max_backoff}, State};
-
handle_info({'EXIT', Pid, {shutdown, max_backoff}}, State) ->
?LOG_ERROR(#{what => max_backoff, in => replicator, pid => Pid}),
couch_log:error("Max backoff reached child process ~p", [Pid]),
{stop, {shutdown, max_backoff}, State};
-
-handle_info({'EXIT', Pid, normal}, #rep_state{changes_reader=Pid} = State) ->
+handle_info({'EXIT', Pid, normal}, #rep_state{changes_reader = Pid} = State) ->
{noreply, State};
-
-handle_info({'EXIT', Pid, Reason0}, #rep_state{changes_reader=Pid} = State) ->
+handle_info({'EXIT', Pid, Reason0}, #rep_state{changes_reader = Pid} = State) ->
couch_stats:increment_counter([couch_replicator, changes_reader_deaths]),
- Reason = case Reason0 of
- {changes_req_failed, _, _} = HttpFail ->
- HttpFail;
- {http_request_failed, _, _, {error, {code, Code}}} ->
- {changes_req_failed, Code};
- {http_request_failed, _, _, {error, Err}} ->
- {changes_req_failed, Err};
- Other ->
- {changes_reader_died, Other}
- end,
+ Reason =
+ case Reason0 of
+ {changes_req_failed, _, _} = HttpFail ->
+ HttpFail;
+ {http_request_failed, _, _, {error, {code, Code}}} ->
+ {changes_req_failed, Code};
+ {http_request_failed, _, _, {error, Err}} ->
+ {changes_req_failed, Err};
+ Other ->
+ {changes_reader_died, Other}
+ end,
?LOG_ERROR(#{what => changes_reader_crash, in => replicator, details => Reason}),
couch_log:error("ChangesReader process died with reason: ~p", [Reason]),
{stop, {shutdown, Reason}, cancel_timers(State)};
-
-handle_info({'EXIT', Pid, normal}, #rep_state{changes_manager=Pid} = State) ->
+handle_info({'EXIT', Pid, normal}, #rep_state{changes_manager = Pid} = State) ->
{noreply, State};
-
-handle_info({'EXIT', Pid, Reason}, #rep_state{changes_manager=Pid} = State) ->
+handle_info({'EXIT', Pid, Reason}, #rep_state{changes_manager = Pid} = State) ->
couch_stats:increment_counter([couch_replicator, changes_manager_deaths]),
?LOG_ERROR(#{what => changes_manager_crash, in => replicator, details => Reason}),
couch_log:error("ChangesManager process died with reason: ~p", [Reason]),
{stop, {shutdown, {changes_manager_died, Reason}}, cancel_timers(State)};
-
-handle_info({'EXIT', Pid, normal}, #rep_state{changes_queue=Pid} = State) ->
+handle_info({'EXIT', Pid, normal}, #rep_state{changes_queue = Pid} = State) ->
{noreply, State};
-
-handle_info({'EXIT', Pid, Reason}, #rep_state{changes_queue=Pid} = State) ->
+handle_info({'EXIT', Pid, Reason}, #rep_state{changes_queue = Pid} = State) ->
couch_stats:increment_counter([couch_replicator, changes_queue_deaths]),
?LOG_ERROR(#{what => changes_queue_crash, in => replicator, details => Reason}),
couch_log:error("ChangesQueue process died with reason: ~p", [Reason]),
{stop, {shutdown, {changes_queue_died, Reason}}, cancel_timers(State)};
-
handle_info({'EXIT', Pid, normal}, #rep_state{workers = Workers} = State) ->
case Workers -- [Pid] of
Workers ->
@@ -364,7 +357,6 @@ handle_info({'EXIT', Pid, normal}, #rep_state{workers = Workers} = State) ->
Workers2 ->
{noreply, State#rep_state{workers = Workers2}}
end;
-
handle_info({'EXIT', Pid, Reason}, #rep_state{workers = Workers} = State) ->
State2 = cancel_timers(State),
case lists:member(Pid, Workers) of
@@ -372,23 +364,23 @@ handle_info({'EXIT', Pid, Reason}, #rep_state{workers = Workers} = State) ->
{stop, {unknown_process_died, Pid, Reason}, State2};
true ->
couch_stats:increment_counter([couch_replicator, worker_deaths]),
- StopReason = case Reason of
- {shutdown, _} = Err ->
- Err;
- Other ->
- ?LOG_ERROR(#{
- what => worker_crash,
- in => replicator,
- pid => Pid,
- details => Reason
- }),
- ErrLog = "Worker ~p died with reason: ~p",
- couch_log:error(ErrLog, [Pid, Reason]),
- {worker_died, Pid, Other}
- end,
+ StopReason =
+ case Reason of
+ {shutdown, _} = Err ->
+ Err;
+ Other ->
+ ?LOG_ERROR(#{
+ what => worker_crash,
+ in => replicator,
+ pid => Pid,
+ details => Reason
+ }),
+ ErrLog = "Worker ~p died with reason: ~p",
+ couch_log:error(ErrLog, [Pid, Reason]),
+ {worker_died, Pid, Other}
+ end,
{stop, StopReason, State2}
end;
-
handle_info({Ref, ready}, St) when is_reference(Ref) ->
?LOG_NOTICE(#{
what => spurious_future_ready_message,
@@ -398,11 +390,9 @@ handle_info({Ref, ready}, St) when is_reference(Ref) ->
LogMsg = "~p : spurious erlfdb future ready message ~p",
couch_log:notice(LogMsg, [?MODULE, Ref]),
{noreply, St};
-
handle_info(Msg, St) ->
{stop, {bad_info, Msg}, St}.
-
format_status(_Opt, [_PDict, State]) ->
#rep_state{
id = Id,
@@ -433,22 +423,21 @@ format_status(_Opt, [_PDict, State]) ->
{highest_seq_done, HighestSeqDone}
].
-
-code_change(_OldVsn, #rep_state{}=State, _Extra) ->
+code_change(_OldVsn, #rep_state{} = State, _Extra) ->
{ok, State}.
-
accept() ->
couch_stats:increment_counter([couch_replicator, jobs, accepts]),
Now = erlang:system_time(second),
case couch_replicator_jobs:accept_job(Now + 5) of
{ok, Job, #{?REP := Rep} = JobData} ->
- Normal = case Rep of
- #{?OPTIONS := #{} = Options} ->
- not maps:get(<<"continuous">>, Options, false);
- _ ->
- true
- end,
+ Normal =
+ case Rep of
+ #{?OPTIONS := #{} = Options} ->
+ not maps:get(<<"continuous">>, Options, false);
+ _ ->
+ true
+ end,
couch_replicator_job_server:accepted(self(), Normal),
{ok, Job, JobData};
{error, not_found} ->
@@ -456,16 +445,17 @@ accept() ->
?MODULE:accept()
end.
-
% Health threshold is the minimum amount of time an unhealthy job should run
% crashing before it is considered to be healthy again. HealtThreashold should
% not be 0 as jobs could start and immediately crash, and it shouldn't be
% infinity, since then consecutive crashes would accumulate forever even if
% job is back to normal.
health_threshold() ->
- config:get_integer("replicator", "health_threshold_sec",
- ?DEFAULT_HEALTH_THRESHOLD_SEC).
-
+ config:get_integer(
+ "replicator",
+ "health_threshold_sec",
+ ?DEFAULT_HEALTH_THRESHOLD_SEC
+ ).
delayed_init() ->
{ok, Job, JobData} = accept(),
@@ -491,7 +481,6 @@ delayed_init() ->
{stop, {shutdown, Reason}, {init_error, Stack}}
end.
-
do_init(Job, #{} = JobData) ->
couch_stats:increment_counter([couch_replicator, jobs, starts]),
% This may make a network request, then may fail and reschedule the job
@@ -555,25 +544,32 @@ do_init(Job, #{} = JobData) ->
% the target, and for the missing ones, it copies them from the source to
% the target.
MaxConns = maps:get(<<"http_connections">>, Options),
- Workers = lists:map(fun(_) ->
- couch_stats:increment_counter([couch_replicator, workers_started]),
- {ok, Pid} = couch_replicator_worker:start_link(self(), Source, Target,
- ChangesManager, MaxConns),
- Pid
- end, lists:seq(1, NumWorkers)),
+ Workers = lists:map(
+ fun(_) ->
+ couch_stats:increment_counter([couch_replicator, workers_started]),
+ {ok, Pid} = couch_replicator_worker:start_link(
+ self(),
+ Source,
+ Target,
+ ChangesManager,
+ MaxConns
+ ),
+ Pid
+ end,
+ lists:seq(1, NumWorkers)
+ ),
log_replication_start(State),
State1 = State#rep_state{
- changes_queue = ChangesQueue,
- changes_manager = ChangesManager,
- changes_reader = ChangesReader,
- workers = Workers
+ changes_queue = ChangesQueue,
+ changes_manager = ChangesManager,
+ changes_reader = ChangesReader,
+ workers = Workers
},
update_job_state(State1).
-
init_job_data(#{jtx := true} = JTx, Job, #{} = JobData, RepId, BaseId) ->
#{
?REP := Rep,
@@ -593,17 +589,18 @@ init_job_data(#{jtx := true} = JTx, Job, #{} = JobData, RepId, BaseId) ->
?REP_PID := list_to_binary(pid_to_list(self())),
?LAST_UPDATED := Now
},
- JobData2 = case is_binary(OldRepId) andalso OldRepId =/= RepId of
- true ->
- % Handle Replication ID change
- ok = couch_replicator_jobs:clear_old_rep_id(JTx, JobId, OldRepId),
- JobData1#{
- ?REP_STATS := #{},
- ?JOB_HISTORY := []
- };
- false ->
- JobData1
- end,
+ JobData2 =
+ case is_binary(OldRepId) andalso OldRepId =/= RepId of
+ true ->
+ % Handle Replication ID change
+ ok = couch_replicator_jobs:clear_old_rep_id(JTx, JobId, OldRepId),
+ JobData1#{
+ ?REP_STATS := #{},
+ ?JOB_HISTORY := []
+ };
+ false ->
+ JobData1
+ end,
JobData3 = hist_append(?HIST_STARTED, Now, JobData2, undefined),
case check_ownership(JTx, Job, JobData3) of
owner ->
@@ -614,7 +611,6 @@ init_job_data(#{jtx := true} = JTx, Job, #{} = JobData, RepId, BaseId) ->
{Job, JobData3, not_owner}
end.
-
check_ownership(#{jtx := true} = JTx, Job, JobData) ->
#{
?REP_ID := RepId,
@@ -629,7 +625,8 @@ check_ownership(#{jtx := true} = JTx, Job, JobData) ->
{error, {replication_job_conflict, OtherJobId}} ->
case couch_replicator_jobs:get_job_data(JTx, OtherJobId) of
{ok, #{?STATE := S, ?DB_NAME := null}} when
- S == ?ST_RUNNING; S == ?ST_PENDING ->
+ S == ?ST_RUNNING; S == ?ST_PENDING
+ ->
% Conflicting job is a transient job, not associated with a
% _replicator doc, so we let this job retry. This is also
% partly done for compatibility with pervious replicator
@@ -638,7 +635,8 @@ check_ownership(#{jtx := true} = JTx, Job, JobData) ->
reschedule_on_error(JTx, Job, JobData, Error),
not_owner;
{ok, #{?STATE := S, ?DB_NAME := <<_/binary>>}} when
- S == ?ST_RUNNING; S == ?ST_PENDING ->
+ S == ?ST_RUNNING; S == ?ST_PENDING
+ ->
% Conflicting job is a permanent replication job, so this
% job is marked as failed.
Error = <<"Duplicate job running: ", OtherJobId/binary>>,
@@ -653,8 +651,12 @@ check_ownership(#{jtx := true} = JTx, Job, JobData) ->
replication_id => RepId
}),
LogMsg = "~p : Job ~p usurping job ~p for replication ~p",
- couch_log:warning(LogMsg, [?MODULE, JobId, OtherJobId,
- RepId]),
+ couch_log:warning(LogMsg, [
+ ?MODULE,
+ JobId,
+ OtherJobId,
+ RepId
+ ]),
couch_replicator_jobs:update_rep_id(JTx, JobId, RepId),
owner;
{error, not_found} ->
@@ -671,13 +673,11 @@ check_ownership(#{jtx := true} = JTx, Job, JobData) ->
end
end.
-
update_job_data(Tx, #rep_state{} = State) ->
#rep_state{job = Job, job_data = JobData} = State,
{Job1, JobData1} = update_job_data(Tx, Job, JobData),
State#rep_state{job = Job1, job_data = JobData1}.
-
update_job_data(Tx, Job, #{} = JobData) ->
case couch_replicator_jobs:update_job_data(Tx, Job, JobData) of
{ok, Job1} ->
@@ -686,7 +686,6 @@ update_job_data(Tx, Job, #{} = JobData) ->
exit({shutdown, halt})
end.
-
update_active_task_info(#rep_state{} = State) ->
#rep_state{
job_data = JobData,
@@ -729,14 +728,13 @@ update_active_task_info(#rep_state{} = State) ->
JobData1 = fabric2_active_tasks:update_active_task_info(JobData, Info),
State#rep_state{job_data = JobData1}.
-
% Transient jobs don't get rescheduled on error with the exception of
% max_backoff errors.
%
reschedule_on_error(JTx, Job, #{?DB_NAME := null} = JobData, Error) when
- Error =/= max_backoff ->
+ Error =/= max_backoff
+->
fail_job(JTx, Job, JobData, Error);
-
reschedule_on_error(JTx, Job, #{} = JobData0, Error0) ->
Error = error_info(Error0),
@@ -764,7 +762,6 @@ reschedule_on_error(JTx, Job, #{} = JobData0, Error0) ->
{error, halt} -> exit({shutdown, halt})
end.
-
reschedule(JTx, Job, #{} = JobData) ->
Now = erlang:system_time(second),
@@ -787,7 +784,6 @@ reschedule(JTx, Job, #{} = JobData) ->
{error, halt} -> exit({shutdown, halt})
end.
-
fail_job(JTx, Job, #{} = JobData, Error0) ->
Error = error_info(Error0),
@@ -820,7 +816,6 @@ fail_job(JTx, Job, #{} = JobData, Error0) ->
exit({shutdown, halt})
end.
-
complete_job(JTx, Job, #{} = JobData, CheckpointHistory) ->
#{
?DB_NAME := Db,
@@ -855,7 +850,6 @@ complete_job(JTx, Job, #{} = JobData, CheckpointHistory) ->
exit({shutdown, halt})
end.
-
error_info(Error0) ->
case Error0 of
<<_/binary>> ->
@@ -882,7 +876,6 @@ error_info(Error0) ->
couch_replicator_utils:rep_error_to_binary(Error0)
end.
-
get_rep_id(JTx, Job, #{} = JobData) ->
#{?REP := Rep} = JobData,
try
@@ -893,7 +886,6 @@ get_rep_id(JTx, Job, #{} = JobData) ->
exit({shutdown, finished})
end.
-
% After job run continuously for some time we consider it "healed" and reset
% its consecutive error count.
maybe_heal(#{} = JobData, Now) ->
@@ -903,7 +895,6 @@ maybe_heal(#{} = JobData, Now) ->
false -> JobData
end.
-
get_backoff_time(ErrCnt) ->
Max = min(max_backoff_penalty_sec(), 3600 * 24 * 30),
Min = max(min_backoff_penalty_sec(), 2),
@@ -918,35 +909,30 @@ get_backoff_time(ErrCnt) ->
WaitJittered = Wait * 0.75 + rand:uniform(trunc(Wait * 0.25) + 1),
erlang:system_time(second) + trunc(WaitJittered).
-
headers_strip_creds([], Acc) ->
lists:reverse(Acc);
-
headers_strip_creds([{Key, Value0} | Rest], Acc) ->
- Value = case string:to_lower(Key) of
- "authorization" -> "****";
- _ -> Value0
- end,
+ Value =
+ case string:to_lower(Key) of
+ "authorization" -> "****";
+ _ -> Value0
+ end,
headers_strip_creds(Rest, [{Key, Value} | Acc]).
-
httpdb_strip_creds(#httpdb{url = Url, headers = Headers} = HttpDb) ->
HttpDb#httpdb{
url = couch_util:url_strip_password(Url),
headers = headers_strip_creds(Headers, [])
};
-
httpdb_strip_creds(LocalDb) ->
LocalDb.
-
state_strip_creds(#rep_state{source = Source, target = Target} = State) ->
State#rep_state{
source = httpdb_strip_creds(Source),
target = httpdb_strip_creds(Target)
}.
-
adjust_maxconn(Src = #{<<"http_connections">> := 1}, RepId) ->
?LOG_NOTICE(#{
what => minimum_source_connections_override,
@@ -957,31 +943,41 @@ adjust_maxconn(Src = #{<<"http_connections">> := 1}, RepId) ->
Msg = "Adjusting minimum number of HTTP source connections to 2 for ~p",
couch_log:notice(Msg, [RepId]),
Src#{<<"http_connections">> := 2};
-
adjust_maxconn(Src, _RepId) ->
Src.
-
-do_last_checkpoint(#rep_state{seqs_in_progress = [],
- highest_seq_done = {_Ts, ?LOWEST_SEQ}} = State) ->
+do_last_checkpoint(
+ #rep_state{
+ seqs_in_progress = [],
+ highest_seq_done = {_Ts, ?LOWEST_SEQ}
+ } = State
+) ->
{stop, normal, cancel_timers(State)};
-
-do_last_checkpoint(#rep_state{seqs_in_progress = [],
- highest_seq_done = Seq} = State) ->
+do_last_checkpoint(
+ #rep_state{
+ seqs_in_progress = [],
+ highest_seq_done = Seq
+ } = State
+) ->
State1 = State#rep_state{current_through_seq = Seq},
State2 = cancel_timers(State1),
case do_checkpoint(State2) of
{ok, State3} ->
- couch_stats:increment_counter([couch_replicator, checkpoints,
- success]),
+ couch_stats:increment_counter([
+ couch_replicator,
+ checkpoints,
+ success
+ ]),
{stop, normal, State3};
Error ->
- couch_stats:increment_counter([couch_replicator, checkpoints,
- failure]),
+ couch_stats:increment_counter([
+ couch_replicator,
+ checkpoints,
+ failure
+ ]),
{stop, Error, State2}
end.
-
start_checkpoint_timer(#rep_state{} = State) ->
CheckpointAfterMSec = State#rep_state.checkpoint_interval,
JobTimeoutMSec = couch_replicator_jobs:get_timeout() * 1000,
@@ -990,33 +986,31 @@ start_checkpoint_timer(#rep_state{} = State) ->
TRef = erlang:send_after(Wait2, self(), checkpoint),
State#rep_state{checkpoint_timer = TRef}.
-
cancel_checkpoint_timer(#rep_state{checkpoint_timer = nil} = State) ->
State;
cancel_checkpoint_timer(#rep_state{checkpoint_timer = Timer} = State) ->
erlang:cancel_timer(Timer),
State#rep_state{checkpoint_timer = nil}.
-
start_stats_timer(#rep_state{} = State) ->
MSec = stats_update_interval_sec() * 1000,
TRef = erlang:send_after(MSec, self(), stats_update),
State#rep_state{stats_timer = TRef}.
-
cancel_stats_timer(#rep_state{stats_timer = nil} = State) ->
State;
cancel_stats_timer(#rep_state{stats_timer = Timer} = State) ->
erlang:cancel_timer(Timer),
- receive stats_update -> ok after 0 -> ok end,
+ receive
+ stats_update -> ok
+ after 0 -> ok
+ end,
State#rep_state{stats_timer = nil}.
-
cancel_timers(#rep_state{} = State) ->
State1 = cancel_checkpoint_timer(State),
cancel_stats_timer(State1).
-
init_state(#{} = Job, #{} = JobData) ->
#{
?REP := Rep,
@@ -1050,31 +1044,36 @@ init_state(#{} = Job, #{} = JobData) ->
{ok, SourceInfo} = couch_replicator_api_wrap:get_db_info(Source),
{ok, TargetInfo} = couch_replicator_api_wrap:get_db_info(Target),
- [SourceLog, TargetLog] = find_and_migrate_logs([Source, Target], Rep,
- BaseId),
+ [SourceLog, TargetLog] = find_and_migrate_logs(
+ [Source, Target],
+ Rep,
+ BaseId
+ ),
{StartSeq0, History, MatchedSessionIds} = compare_replication_logs(SourceLog, TargetLog),
- if not MatchedSessionIds ->
- ?LOG_NOTICE(#{
- what => session_history_mismatch,
- in => replicator,
- calculated_start_seq => StartSeq0,
- source => couch_replicator_api_wrap:db_uri(Source),
- target => couch_replicator_api_wrap:db_uri(Target),
- replication_id => Id,
- details => "scanned histories to find common ancestor"
- });
- true ->
- ok
+ if
+ not MatchedSessionIds ->
+ ?LOG_NOTICE(#{
+ what => session_history_mismatch,
+ in => replicator,
+ calculated_start_seq => StartSeq0,
+ source => couch_replicator_api_wrap:db_uri(Source),
+ target => couch_replicator_api_wrap:db_uri(Target),
+ replication_id => Id,
+ details => "scanned histories to find common ancestor"
+ });
+ true ->
+ ok
end,
#{?REP_STATS := Stats0} = JobData,
Stats1 = couch_replicator_stats:new(Stats0),
- HistoryStats = case History of
- [{[_ | _] = HProps} | _] -> couch_replicator_stats:new(HProps);
- _ -> couch_replicator_stats:new()
- end,
+ HistoryStats =
+ case History of
+ [{[_ | _] = HProps} | _] -> couch_replicator_stats:new(HProps);
+ _ -> couch_replicator_stats:new()
+ end,
Stats2 = couch_replicator_stats:max_stats(Stats1, HistoryStats),
StartSeq1 = maps:get(<<"since_seq">>, Options, StartSeq0),
@@ -1082,7 +1081,7 @@ init_state(#{} = Job, #{} = JobData) ->
SourceSeq = get_value(<<"update_seq">>, SourceInfo, ?LOWEST_SEQ),
- #doc{body={CheckpointHistory}} = SourceLog,
+ #doc{body = {CheckpointHistory}} = SourceLog,
State = #rep_state{
job = Job,
@@ -1117,35 +1116,50 @@ init_state(#{} = Job, #{} = JobData) ->
},
start_checkpoint_timer(State).
-
find_and_migrate_logs(DbList, #{} = Rep, BaseId) when is_binary(BaseId) ->
LogId = ?l2b(?LOCAL_DOC_PREFIX ++ BaseId),
fold_replication_logs(DbList, ?REP_ID_VERSION, LogId, LogId, Rep, []).
-
fold_replication_logs([], _Vsn, _LogId, _NewId, _Rep, Acc) ->
lists:reverse(Acc);
-
fold_replication_logs([Db | Rest] = Dbs, Vsn, LogId, NewId, #{} = Rep, Acc) ->
case couch_replicator_api_wrap:open_doc(Db, LogId, [ejson_body]) of
{error, <<"not_found">>} when Vsn > 1 ->
OldRepId = couch_replicator_ids:base_id(Rep, Vsn - 1),
- fold_replication_logs(Dbs, Vsn - 1,
- ?l2b(?LOCAL_DOC_PREFIX ++ OldRepId), NewId, Rep, Acc);
+ fold_replication_logs(
+ Dbs,
+ Vsn - 1,
+ ?l2b(?LOCAL_DOC_PREFIX ++ OldRepId),
+ NewId,
+ Rep,
+ Acc
+ );
{error, <<"not_found">>} ->
- fold_replication_logs(Rest, ?REP_ID_VERSION, NewId, NewId, Rep,
- [#doc{id = NewId} | Acc]);
+ fold_replication_logs(
+ Rest,
+ ?REP_ID_VERSION,
+ NewId,
+ NewId,
+ Rep,
+ [#doc{id = NewId} | Acc]
+ );
{ok, Doc} when LogId =:= NewId ->
fold_replication_logs(
- Rest, ?REP_ID_VERSION, NewId, NewId, Rep, [Doc | Acc]);
+ Rest, ?REP_ID_VERSION, NewId, NewId, Rep, [Doc | Acc]
+ );
{ok, Doc} ->
MigratedLog = #doc{id = NewId, body = Doc#doc.body},
maybe_save_migrated_log(Rep, Db, MigratedLog, Doc#doc.id),
- fold_replication_logs(Rest, ?REP_ID_VERSION, NewId, NewId, Rep,
- [MigratedLog | Acc])
+ fold_replication_logs(
+ Rest,
+ ?REP_ID_VERSION,
+ NewId,
+ NewId,
+ Rep,
+ [MigratedLog | Acc]
+ )
end.
-
maybe_save_migrated_log(#{?OPTIONS := Options}, Db, #doc{} = Doc, OldId) ->
case maps:get(<<"use_checkpoints">>, Options) of
true ->
@@ -1163,13 +1177,11 @@ maybe_save_migrated_log(#{?OPTIONS := Options}, Db, #doc{} = Doc, OldId) ->
ok
end.
-
spawn_changes_manager(Parent, ChangesQueue, BatchSize) ->
spawn_link(fun() ->
changes_manager_loop_open(Parent, ChangesQueue, BatchSize, 1)
end).
-
changes_manager_loop_open(Parent, ChangesQueue, BatchSize, Ts) ->
receive
{get_changes, From} ->
@@ -1177,32 +1189,35 @@ changes_manager_loop_open(Parent, ChangesQueue, BatchSize, Ts) ->
closed ->
From ! {closed, self()};
{ok, ChangesOrLastSeqs} ->
- ReportSeq = case lists:last(ChangesOrLastSeqs) of
- {last_seq, Seq} -> {Ts, Seq};
- #doc_info{high_seq = Seq} -> {Ts, Seq}
- end,
- Changes = lists:filter(fun
- (#doc_info{}) -> true;
- ({last_seq, _Seq}) -> false
- end, ChangesOrLastSeqs),
+ ReportSeq =
+ case lists:last(ChangesOrLastSeqs) of
+ {last_seq, Seq} -> {Ts, Seq};
+ #doc_info{high_seq = Seq} -> {Ts, Seq}
+ end,
+ Changes = lists:filter(
+ fun
+ (#doc_info{}) -> true;
+ ({last_seq, _Seq}) -> false
+ end,
+ ChangesOrLastSeqs
+ ),
ok = gen_server:cast(Parent, {report_seq, ReportSeq}),
From ! {changes, self(), Changes, ReportSeq}
end,
changes_manager_loop_open(Parent, ChangesQueue, BatchSize, Ts + 1)
end.
-
-do_checkpoint(#rep_state{use_checkpoints=false} = State) ->
+do_checkpoint(#rep_state{use_checkpoints = false} = State) ->
NewState = State#rep_state{
checkpoint_history = {[{<<"use_checkpoints">>, false}]}
},
{ok, update_job_state(NewState)};
-do_checkpoint(#rep_state{current_through_seq=S, committed_seq=S} = State) ->
+do_checkpoint(#rep_state{current_through_seq = S, committed_seq = S} = State) ->
{ok, update_job_state(State)};
do_checkpoint(State) ->
#rep_state{
- source_name=SourceName,
- target_name=TargetName,
+ source_name = SourceName,
+ target_name = TargetName,
source = Source,
target = Target,
history = OldHistory,
@@ -1219,11 +1234,11 @@ do_checkpoint(State) ->
} = State,
case commit_to_both(Source, Target) of
{source_error, Reason} ->
- {checkpoint_commit_failure, <<"Failure on source commit: ",
- (couch_util:to_binary(Reason))/binary>>};
+ {checkpoint_commit_failure,
+ <<"Failure on source commit: ", (couch_util:to_binary(Reason))/binary>>};
{target_error, Reason} ->
- {checkpoint_commit_failure, <<"Failure on target commit: ",
- (couch_util:to_binary(Reason))/binary>>};
+ {checkpoint_commit_failure,
+ <<"Failure on target commit: ", (couch_util:to_binary(Reason))/binary>>};
{SrcInstanceStartTime, TgtInstanceStartTime} ->
?LOG_NOTICE(#{
what => checkpoint,
@@ -1232,117 +1247,129 @@ do_checkpoint(State) ->
target => TargetName,
sequence => NewSeq
}),
- couch_log:notice("recording a checkpoint for `~s` -> `~s` at "
- "source update_seq ~p", [SourceName, TargetName, NewSeq]),
+ couch_log:notice(
+ "recording a checkpoint for `~s` -> `~s` at "
+ "source update_seq ~p",
+ [SourceName, TargetName, NewSeq]
+ ),
StartTime = couch_replicator_utils:rfc1123_local(RepStartTime),
EndTime = couch_replicator_utils:rfc1123_local(),
- NewHistoryEntry = {[
- {<<"session_id">>, SessionId},
- {<<"start_time">>, StartTime},
- {<<"end_time">>, EndTime},
- {<<"start_last_seq">>, StartSeq},
- {<<"end_last_seq">>, NewSeq},
- {<<"recorded_seq">>, NewSeq},
- {<<"missing_checked">>,
- couch_replicator_stats:missing_checked(Stats)},
- {<<"missing_found">>,
- couch_replicator_stats:missing_found(Stats)},
- {<<"docs_read">>,
- couch_replicator_stats:docs_read(Stats)},
- {<<"docs_written">>,
- couch_replicator_stats:docs_written(Stats)},
- {<<"doc_write_failures">>,
- couch_replicator_stats:doc_write_failures(Stats)}
- ]},
- BaseHistory = [
- {<<"session_id">>, SessionId},
- {<<"source_last_seq">>, NewSeq},
- {<<"replication_id_version">>, ?REP_ID_VERSION}
- ] ++ case maps:get(<<"doc_ids">>, Options, undefined) of
- undefined ->
- [];
- _DocIds ->
- % backwards compatibility with the result of a replication
- % by doc IDs in versions 0.11.x and 1.0.x TODO: deprecate
- % (use same history format, simplify code)
- [
- {<<"start_time">>, StartTime},
- {<<"end_time">>, EndTime},
- {<<"docs_read">>,
- couch_replicator_stats:docs_read(Stats)},
- {<<"docs_written">>,
- couch_replicator_stats:docs_written(Stats)},
- {<<"doc_write_failures">>,
- couch_replicator_stats:doc_write_failures(Stats)}
- ]
- end,
+ NewHistoryEntry =
+ {[
+ {<<"session_id">>, SessionId},
+ {<<"start_time">>, StartTime},
+ {<<"end_time">>, EndTime},
+ {<<"start_last_seq">>, StartSeq},
+ {<<"end_last_seq">>, NewSeq},
+ {<<"recorded_seq">>, NewSeq},
+ {<<"missing_checked">>, couch_replicator_stats:missing_checked(Stats)},
+ {<<"missing_found">>, couch_replicator_stats:missing_found(Stats)},
+ {<<"docs_read">>, couch_replicator_stats:docs_read(Stats)},
+ {<<"docs_written">>, couch_replicator_stats:docs_written(Stats)},
+ {<<"doc_write_failures">>, couch_replicator_stats:doc_write_failures(Stats)}
+ ]},
+ BaseHistory =
+ [
+ {<<"session_id">>, SessionId},
+ {<<"source_last_seq">>, NewSeq},
+ {<<"replication_id_version">>, ?REP_ID_VERSION}
+ ] ++
+ case maps:get(<<"doc_ids">>, Options, undefined) of
+ undefined ->
+ [];
+ _DocIds ->
+ % backwards compatibility with the result of a replication
+ % by doc IDs in versions 0.11.x and 1.0.x TODO: deprecate
+ % (use same history format, simplify code)
+ [
+ {<<"start_time">>, StartTime},
+ {<<"end_time">>, EndTime},
+ {<<"docs_read">>, couch_replicator_stats:docs_read(Stats)},
+ {<<"docs_written">>, couch_replicator_stats:docs_written(Stats)},
+ {<<"doc_write_failures">>,
+ couch_replicator_stats:doc_write_failures(Stats)}
+ ]
+ end,
% limit history to 50 entries
NewRepHistory = {
- BaseHistory ++ [{<<"history">>,
- lists:sublist([NewHistoryEntry | OldHistory], 50)}]
+ BaseHistory ++ [{<<"history">>, lists:sublist([NewHistoryEntry | OldHistory], 50)}]
},
try
- {SrcRevPos, SrcRevId} = update_checkpoint(Source,
- SourceLog#doc{body = NewRepHistory}, source),
- {TgtRevPos, TgtRevId} = update_checkpoint(Target,
- TargetLog#doc{body = NewRepHistory}, target),
+ {SrcRevPos, SrcRevId} = update_checkpoint(
+ Source,
+ SourceLog#doc{body = NewRepHistory},
+ source
+ ),
+ {TgtRevPos, TgtRevId} = update_checkpoint(
+ Target,
+ TargetLog#doc{body = NewRepHistory},
+ target
+ ),
NewState = State#rep_state{
checkpoint_history = NewRepHistory,
committed_seq = NewTsSeq,
- source_log = SourceLog#doc{revs={SrcRevPos, [SrcRevId]}},
- target_log = TargetLog#doc{revs={TgtRevPos, [TgtRevId]}}
+ source_log = SourceLog#doc{revs = {SrcRevPos, [SrcRevId]}},
+ target_log = TargetLog#doc{revs = {TgtRevPos, [TgtRevId]}}
},
{ok, update_job_state(NewState)}
- catch throw:{checkpoint_commit_failure, _} = Failure ->
- Failure
+ catch
+ throw:{checkpoint_commit_failure, _} = Failure ->
+ Failure
end;
{SrcInstanceStartTime, _NewTgtInstanceStartTime} ->
- {checkpoint_commit_failure, <<"Target database out of sync. "
- "Try to increase max_dbs_open at the target's server.">>};
+ {checkpoint_commit_failure, <<
+ "Target database out of sync. "
+ "Try to increase max_dbs_open at the target's server."
+ >>};
{_NewSrcInstanceStartTime, TgtInstanceStartTime} ->
- {checkpoint_commit_failure, <<"Source database out of sync. "
- "Try to increase max_dbs_open at the source's server.">>};
+ {checkpoint_commit_failure, <<
+ "Source database out of sync. "
+ "Try to increase max_dbs_open at the source's server."
+ >>};
{_NewSrcInstanceStartTime, _NewTgtInstanceStartTime} ->
- {checkpoint_commit_failure, <<"Source and target databases out of "
- "sync. Try to increase max_dbs_open at both servers.">>}
+ {checkpoint_commit_failure, <<
+ "Source and target databases out of "
+ "sync. Try to increase max_dbs_open at both servers."
+ >>}
end.
-
update_checkpoint(Db, Doc, DbType) ->
try
update_checkpoint(Db, Doc)
- catch throw:{checkpoint_commit_failure, Reason} ->
- throw({checkpoint_commit_failure, <<"Error updating the ",
- (couch_util:to_binary(DbType))/binary, " checkpoint document: ",
- (couch_util:to_binary(Reason))/binary>>})
+ catch
+ throw:{checkpoint_commit_failure, Reason} ->
+ throw(
+ {checkpoint_commit_failure,
+ <<"Error updating the ", (couch_util:to_binary(DbType))/binary,
+ " checkpoint document: ", (couch_util:to_binary(Reason))/binary>>}
+ )
end.
-
update_checkpoint(Db, #doc{id = LogId, body = LogBody} = Doc) ->
try
case couch_replicator_api_wrap:update_doc(Db, Doc, [delay_commit]) of
{ok, PosRevId} -> PosRevId;
{error, Reason} -> throw({checkpoint_commit_failure, Reason})
end
- catch throw:conflict ->
- Opts = [ejson_body],
- case (catch couch_replicator_api_wrap:open_doc(Db, LogId, Opts)) of
- {ok, #doc{body = LogBody, revs = {Pos, [RevId | _]}}} ->
- % This means that we were able to update successfully the
- % checkpoint doc in a previous attempt but we got a connection
- % error (timeout for e.g.) before receiving the success
- % response. Therefore the request was retried and we got a
- % conflict, as the revision we sent is not the current one. We
- % confirm this by verifying the doc body we just got is the
- % same that we have just sent.
- {Pos, RevId};
- _ ->
- throw({checkpoint_commit_failure, conflict})
- end
+ catch
+ throw:conflict ->
+ Opts = [ejson_body],
+ case (catch couch_replicator_api_wrap:open_doc(Db, LogId, Opts)) of
+ {ok, #doc{body = LogBody, revs = {Pos, [RevId | _]}}} ->
+ % This means that we were able to update successfully the
+ % checkpoint doc in a previous attempt but we got a connection
+ % error (timeout for e.g.) before receiving the success
+ % response. Therefore the request was retried and we got a
+ % conflict, as the revision we sent is not the current one. We
+ % confirm this by verifying the doc body we just got is the
+ % same that we have just sent.
+ {Pos, RevId};
+ _ ->
+ throw({checkpoint_commit_failure, conflict})
+ end
end.
-
commit_to_both(Source, Target) ->
% commit the src async
ParentPid = self(),
@@ -1354,19 +1381,19 @@ commit_to_both(Source, Target) ->
% commit tgt sync
TgtResult = (catch couch_replicator_api_wrap:ensure_full_commit(Target)),
- SrcResult = receive
- {SrcCommitPid, Result} ->
- unlink(SrcCommitPid),
- receive
- {'EXIT', SrcCommitPid, _} ->
- ok
- after
- 0 -> ok
- end,
- Result;
- {'EXIT', SrcCommitPid, Reason} ->
- {error, Reason}
- end,
+ SrcResult =
+ receive
+ {SrcCommitPid, Result} ->
+ unlink(SrcCommitPid),
+ receive
+ {'EXIT', SrcCommitPid, _} ->
+ ok
+ after 0 -> ok
+ end,
+ Result;
+ {'EXIT', SrcCommitPid, Reason} ->
+ {error, Reason}
+ end,
case TgtResult of
{ok, TargetStartTime} ->
case SrcResult of
@@ -1379,69 +1406,80 @@ commit_to_both(Source, Target) ->
{target_error, TargetError}
end.
-
compare_replication_logs(SrcDoc, TgtDoc) ->
- #doc{body={RepRecProps}} = SrcDoc,
- #doc{body={RepRecPropsTgt}} = TgtDoc,
+ #doc{body = {RepRecProps}} = SrcDoc,
+ #doc{body = {RepRecPropsTgt}} = TgtDoc,
SrcSession = get_value(<<"session_id">>, RepRecProps),
TgtSession = get_value(<<"session_id">>, RepRecPropsTgt),
case SrcSession == TgtSession of
true ->
% if the records have the same session id,
% then we have a valid replication history
- OldSeqNum = get_value(<<"source_last_seq">>, RepRecProps,
- ?LOWEST_SEQ),
+ OldSeqNum = get_value(
+ <<"source_last_seq">>,
+ RepRecProps,
+ ?LOWEST_SEQ
+ ),
OldHistory = get_value(<<"history">>, RepRecProps, []),
{OldSeqNum, OldHistory, true};
false ->
SourceHistory = get_value(<<"history">>, RepRecProps, []),
TargetHistory = get_value(<<"history">>, RepRecPropsTgt, []),
- couch_log:notice("Replication records differ. "
- "Scanning histories to find a common ancestor.", []),
- couch_log:debug("Record on source:~p~nRecord on target:~p~n",
- [RepRecProps, RepRecPropsTgt]),
+ couch_log:notice(
+ "Replication records differ. "
+ "Scanning histories to find a common ancestor.",
+ []
+ ),
+ couch_log:debug(
+ "Record on source:~p~nRecord on target:~p~n",
+ [RepRecProps, RepRecPropsTgt]
+ ),
{StartSeq, History} = compare_rep_history(SourceHistory, TargetHistory),
{StartSeq, History, false}
end.
-
compare_rep_history(S, T) when S =:= [] orelse T =:= [] ->
couch_log:notice("no common ancestry -- performing full replication", []),
{?LOWEST_SEQ, []};
-
compare_rep_history([{S} | SourceRest], [{T} | TargetRest] = Target) ->
SourceId = get_value(<<"session_id">>, S),
case has_session_id(SourceId, Target) of
true ->
RecordSeqNum = get_value(<<"recorded_seq">>, S, ?LOWEST_SEQ),
- couch_log:notice("found a common replication record with "
- "source_seq ~p", [RecordSeqNum]),
+ couch_log:notice(
+ "found a common replication record with "
+ "source_seq ~p",
+ [RecordSeqNum]
+ ),
{RecordSeqNum, SourceRest};
false ->
TargetId = get_value(<<"session_id">>, T),
case has_session_id(TargetId, SourceRest) of
true ->
- RecordSeqNum = get_value(<<"recorded_seq">>, T,
- ?LOWEST_SEQ),
- couch_log:notice("found a common replication record with "
- "source_seq ~p", [RecordSeqNum]),
+ RecordSeqNum = get_value(
+ <<"recorded_seq">>,
+ T,
+ ?LOWEST_SEQ
+ ),
+ couch_log:notice(
+ "found a common replication record with "
+ "source_seq ~p",
+ [RecordSeqNum]
+ ),
{RecordSeqNum, TargetRest};
false ->
compare_rep_history(SourceRest, TargetRest)
end
end.
-
has_session_id(_SessionId, []) ->
false;
-
has_session_id(SessionId, [{Props} | Rest]) ->
case get_value(<<"session_id">>, Props, nil) of
SessionId -> true;
_Else -> has_session_id(SessionId, Rest)
end.
-
get_pending_count(#rep_state{} = St) ->
#rep_state{
highest_seq_done = HighestSeqDone,
@@ -1456,14 +1494,12 @@ get_pending_count(#rep_state{} = St) ->
null
end.
-
maybe_update_job_state(#rep_state{} = State) ->
case State#rep_state.stats_timer of
nil -> start_stats_timer(State);
Ref when is_reference(Ref) -> State
end.
-
update_job_state(#rep_state{} = State0) ->
State = cancel_stats_timer(State0),
#rep_state{
@@ -1506,30 +1542,25 @@ update_job_state(#rep_state{} = State0) ->
State2 = update_active_task_info(State1),
update_job_data(undefined, State2).
-
replication_start_error({unauthorized, DbUri}) ->
- {unauthorized, <<"unauthorized to access or create database ",
- DbUri/binary>>};
-
+ {unauthorized, <<"unauthorized to access or create database ", DbUri/binary>>};
replication_start_error({db_not_found, DbUri}) ->
{db_not_found, <<"could not open ", DbUri/binary>>};
-
-replication_start_error({http_request_failed, _Method, Url0,
- {error, {error, {conn_failed, {error, nxdomain}}}}}) ->
+replication_start_error(
+ {http_request_failed, _Method, Url0, {error, {error, {conn_failed, {error, nxdomain}}}}}
+) ->
Url = ?l2b(couch_util:url_strip_password(Url0)),
{nxdomain, <<"could not resolve ", Url/binary>>};
-
-replication_start_error({http_request_failed, Method0, Url0,
- {error, {code, Code}}}) when is_integer(Code) ->
+replication_start_error({http_request_failed, Method0, Url0, {error, {code, Code}}}) when
+ is_integer(Code)
+->
Url = ?l2b(couch_util:url_strip_password(Url0)),
Method = ?l2b(Method0),
CodeBin = integer_to_binary(Code),
{http_error_code, <<CodeBin/binary, " ", Method/binary, " ", Url/binary>>};
-
replication_start_error(Error) ->
Error.
-
log_replication_start(#rep_state{} = RepState) ->
#rep_state{
id = Id,
@@ -1542,12 +1573,13 @@ log_replication_start(#rep_state{} = RepState) ->
} = RepState,
Workers = maps:get(<<"worker_processes">>, Options),
BatchSize = maps:get(<<"worker_batch_size">>, Options),
- From = case DbName of
- Name when is_binary(Name) ->
- io_lib:format("from doc ~s:~s", [Name, DocId]);
- _ ->
- "from _replicate endpoint"
- end,
+ From =
+ case DbName of
+ Name when is_binary(Name) ->
+ io_lib:format("from doc ~s:~s", [Name, DocId]);
+ _ ->
+ "from _replicate endpoint"
+ end,
?LOG_NOTICE(#{
what => starting_replication,
in => replicator,
@@ -1559,11 +1591,11 @@ log_replication_start(#rep_state{} = RepState) ->
worker_processes => Workers,
worker_batch_size => BatchSize
}),
- Msg = "Starting replication ~s (~s -> ~s) ~s worker_procesess:~p"
+ Msg =
+ "Starting replication ~s (~s -> ~s) ~s worker_procesess:~p"
" worker_batch_size:~p session_id:~s",
couch_log:notice(Msg, [Id, Source, Target, From, Workers, BatchSize, Sid]).
-
check_user_filter(#rep_state{} = State) ->
#rep_state{
id = RepId,
@@ -1588,27 +1620,30 @@ check_user_filter(#rep_state{} = State) ->
exit({shutdown, finished})
end.
-
-hist_append(Type, Now, #{} = JobData, Info) when is_integer(Now),
- is_binary(Type) ->
+hist_append(Type, Now, #{} = JobData, Info) when
+ is_integer(Now),
+ is_binary(Type)
+->
#{?JOB_HISTORY := Hist} = JobData,
Evt1 = #{?HIST_TYPE => Type, ?HIST_TIMESTAMP => Now},
- Evt2 = case Info of
- undefined ->
- Evt1;
- null ->
- Evt1#{?HIST_REASON => null};
- <<_/binary>> ->
- Evt1#{?HIST_REASON => Info};
- #{<<"error">> := Err, <<"reason">> := Reason} when is_binary(Err),
- is_binary(Reason) ->
- Evt1#{?HIST_REASON => Reason}
- end,
+ Evt2 =
+ case Info of
+ undefined ->
+ Evt1;
+ null ->
+ Evt1#{?HIST_REASON => null};
+ <<_/binary>> ->
+ Evt1#{?HIST_REASON => Info};
+ #{<<"error">> := Err, <<"reason">> := Reason} when
+ is_binary(Err),
+ is_binary(Reason)
+ ->
+ Evt1#{?HIST_REASON => Reason}
+ end,
Hist1 = [Evt2 | Hist],
Hist2 = lists:sublist(Hist1, max_history()),
JobData#{?JOB_HISTORY := Hist2}.
-
optimize_rate_limited_job(#{} = Options, <<"max_backoff">>) ->
OptimizedSettings = #{
<<"checkpoint_interval">> => 5000,
@@ -1617,73 +1652,83 @@ optimize_rate_limited_job(#{} = Options, <<"max_backoff">>) ->
<<"http_connections">> => 2
},
maps:merge(Options, OptimizedSettings);
-
optimize_rate_limited_job(#{} = Options, _Other) ->
Options.
-
close_endpoints(State) ->
State1 = cancel_timers(State),
couch_replicator_api_wrap:db_close(State1#rep_state.source),
couch_replicator_api_wrap:db_close(State1#rep_state.target),
ok.
-
get_value(K, Props) ->
couch_util:get_value(K, Props).
-
get_value(K, Props, Default) ->
couch_util:get_value(K, Props, Default).
-
accept_jitter_msec() ->
couch_rand:uniform(erlang:max(1, max_startup_jitter_msec())).
-
max_startup_jitter_msec() ->
- config:get_integer("replicator", "startup_jitter",
- ?STARTUP_JITTER_DEFAULT).
-
+ config:get_integer(
+ "replicator",
+ "startup_jitter",
+ ?STARTUP_JITTER_DEFAULT
+ ).
min_backoff_penalty_sec() ->
- config:get_integer("replicator", "min_backoff_penalty_sec",
- ?DEFAULT_MIN_BACKOFF_PENALTY_SEC).
-
+ config:get_integer(
+ "replicator",
+ "min_backoff_penalty_sec",
+ ?DEFAULT_MIN_BACKOFF_PENALTY_SEC
+ ).
max_backoff_penalty_sec() ->
- config:get_integer("replicator", "max_backoff_penalty_sec",
- ?DEFAULT_MAX_BACKOFF_PENALTY_SEC).
-
+ config:get_integer(
+ "replicator",
+ "max_backoff_penalty_sec",
+ ?DEFAULT_MAX_BACKOFF_PENALTY_SEC
+ ).
max_history() ->
config:get_integer("replicator", "max_history", ?DEFAULT_MAX_HISTORY).
-
stats_update_interval_sec() ->
- config:get_integer("replicator", "stats_update_interval_sec",
- ?DEFAULT_STATS_UPDATE_INTERVAL_SEC).
-
+ config:get_integer(
+ "replicator",
+ "stats_update_interval_sec",
+ ?DEFAULT_STATS_UPDATE_INTERVAL_SEC
+ ).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-include_lib("fabric/test/fabric2_test.hrl").
-
replication_start_error_test() ->
- ?assertEqual({unauthorized, <<"unauthorized to access or create database"
- " http://x/y">>}, replication_start_error({unauthorized,
- <<"http://x/y">>})),
- ?assertEqual({db_not_found, <<"could not open http://x/y">>},
- replication_start_error({db_not_found, <<"http://x/y">>})),
- ?assertEqual({nxdomain, <<"could not resolve http://x/y">>},
- replication_start_error({http_request_failed, "GET", "http://x/y",
- {error, {error, {conn_failed, {error, nxdomain}}}}})),
- ?assertEqual({http_error_code, <<"503 GET http://x/y">>},
- replication_start_error({http_request_failed, "GET", "http://x/y",
- {error, {code, 503}}})).
-
+ ?assertEqual(
+ {unauthorized, <<
+ "unauthorized to access or create database"
+ " http://x/y"
+ >>},
+ replication_start_error({unauthorized, <<"http://x/y">>})
+ ),
+ ?assertEqual(
+ {db_not_found, <<"could not open http://x/y">>},
+ replication_start_error({db_not_found, <<"http://x/y">>})
+ ),
+ ?assertEqual(
+ {nxdomain, <<"could not resolve http://x/y">>},
+ replication_start_error(
+ {http_request_failed, "GET", "http://x/y",
+ {error, {error, {conn_failed, {error, nxdomain}}}}}
+ )
+ ),
+ ?assertEqual(
+ {http_error_code, <<"503 GET http://x/y">>},
+ replication_start_error({http_request_failed, "GET", "http://x/y", {error, {code, 503}}})
+ ).
scheduler_job_format_status_test_() ->
{
@@ -1695,21 +1740,21 @@ scheduler_job_format_status_test_() ->
]
}.
-
setup() ->
meck:expect(config, get, fun(_, _, Default) -> Default end).
-
teardown(_) ->
meck:unload().
-
t_format_status(_) ->
- {ok, Rep} = couch_replicator_parse:parse_rep(#{
- <<"source">> => <<"http://u:p@h1/d1">>,
- <<"target">> => <<"http://u:p@h2/d2">>,
- <<"create_target">> => true
- }, null),
+ {ok, Rep} = couch_replicator_parse:parse_rep(
+ #{
+ <<"source">> => <<"http://u:p@h1/d1">>,
+ <<"target">> => <<"http://u:p@h2/d2">>,
+ <<"create_target">> => true
+ },
+ null
+ ),
State = #rep_state{
id = <<"base+ext">>,
job_data = #{?REP => Rep},
@@ -1740,5 +1785,4 @@ t_format_status(_) ->
?assertEqual(<<"4">>, proplists:get_value(current_through_seq, Format)),
?assertEqual(<<"5">>, proplists:get_value(highest_seq_done, Format)).
-
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_job_server.erl b/src/couch_replicator/src/couch_replicator_job_server.erl
index 2452a222d..b96f6d461 100644
--- a/src/couch_replicator/src/couch_replicator_job_server.erl
+++ b/src/couch_replicator/src/couch_replicator_job_server.erl
@@ -12,10 +12,8 @@
-module(couch_replicator_job_server).
-
-behaviour(gen_server).
-
-export([
start_link/1
]).
@@ -36,23 +34,20 @@
reschedule/0
]).
-
-include("couch_replicator.hrl").
-include_lib("kernel/include/logger.hrl").
-
-define(MAX_ACCEPTORS, 2).
-define(MAX_JOBS, 500).
-define(MAX_CHURN, 100).
-define(INTERVAL_SEC, 15).
-define(MIN_RUN_TIME_SEC, 60).
--define(TRANSIENT_JOB_MAX_AGE_SEC, 86400). % 1 day
-
+% 1 day
+-define(TRANSIENT_JOB_MAX_AGE_SEC, 86400).
start_link(Timeout) when is_integer(Timeout) ->
gen_server:start_link({local, ?MODULE}, ?MODULE, Timeout, []).
-
init(Timeout) when is_integer(Timeout) ->
process_flag(trap_exit, true),
couch_replicator_jobs:set_timeout(),
@@ -68,7 +63,6 @@ init(Timeout) when is_integer(Timeout) ->
St2 = do_send_after(St1),
{ok, St2}.
-
terminate(_, #{} = St) ->
#{
workers := Workers,
@@ -79,7 +73,6 @@ terminate(_, #{} = St) ->
wait_jobs_exit(Workers, Timeout),
ok.
-
handle_call({accepted, Pid, Normal}, _From, #{} = St) ->
#{
acceptors := Acceptors,
@@ -105,21 +98,16 @@ handle_call({accepted, Pid, Normal}, _From, #{} = St) ->
couch_log:error(LogMsg, [?MODULE, Pid]),
{stop, {unknown_acceptor_pid, Pid}, St}
end;
-
handle_call(reschedule, _From, St) ->
{reply, ok, reschedule(St)};
-
handle_call(Msg, _From, St) ->
{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
-
handle_cast(Msg, St) ->
{stop, {bad_cast, Msg}, St}.
-
handle_info(reschedule, #{} = St) ->
{noreply, reschedule(St)};
-
handle_info({'EXIT', Pid, Reason}, #{} = St) ->
#{
acceptors := Acceptors,
@@ -130,11 +118,9 @@ handle_info({'EXIT', Pid, Reason}, #{} = St) ->
{false, true} -> handle_worker_exit(St, Pid, Reason);
{false, false} -> handle_unknown_exit(St, Pid, Reason)
end;
-
handle_info(Msg, St) ->
{stop, {bad_info, Msg}, St}.
-
format_status(_Opt, [_PDict, #{} = St]) ->
#{
acceptors := Acceptors,
@@ -149,23 +135,18 @@ format_status(_Opt, [_PDict, #{} = St]) ->
{config, Config}
].
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
accepted(Worker, Normal) when is_pid(Worker), is_boolean(Normal) ->
gen_server:call(?MODULE, {accepted, Worker, Normal}, infinity).
-
scheduling_interval_sec() ->
config:get_integer("replicator", "interval_sec", ?INTERVAL_SEC).
-
reschedule() ->
gen_server:call(?MODULE, reschedule, infinity).
-
% Scheduling logic
do_send_after(#{} = St) ->
@@ -176,15 +157,12 @@ do_send_after(#{} = St) ->
TRef = erlang:send_after(WaitMSec, self(), reschedule),
St#{timer := TRef}.
-
cancel_timer(#{timer := undefined} = St) ->
St;
-
cancel_timer(#{timer := TRef} = St) when is_reference(TRef) ->
erlang:cancel_timer(TRef),
St#{timer := undefined}.
-
reschedule(#{} = St) ->
St1 = cancel_timer(St),
St2 = St1#{config := get_config()},
@@ -195,7 +173,6 @@ reschedule(#{} = St) ->
St7 = do_send_after(St6),
St7#{churn := 0}.
-
start_excess_acceptors(#{} = St) ->
#{
churn := Churn,
@@ -211,10 +188,14 @@ start_excess_acceptors(#{} = St) ->
Slots = (MaxJobs + MaxChurn) - (ACnt + WCnt),
MinSlotsChurn = min(Slots, ChurnLeft),
- Pending = if MinSlotsChurn =< 0 -> 0; true ->
- % Don't fetch pending if we don't have enough slots or churn budget
- couch_replicator_jobs:pending_count(undefined, MinSlotsChurn)
- end,
+ Pending =
+ if
+ MinSlotsChurn =< 0 ->
+ 0;
+ true ->
+ % Don't fetch pending if we don't have enough slots or churn budget
+ couch_replicator_jobs:pending_count(undefined, MinSlotsChurn)
+ end,
couch_stats:update_gauge([couch_replicator, jobs, pending], Pending),
@@ -222,12 +203,15 @@ start_excess_acceptors(#{} = St) ->
% and we won't start more than max jobs + churn total acceptors
ToStart = max(0, lists:min([ChurnLeft, Pending, Slots])),
- lists:foldl(fun(_, #{} = StAcc) ->
- #{acceptors := AccAcceptors} = StAcc,
- {ok, Pid} = couch_replicator_job:start_link(),
- StAcc#{acceptors := AccAcceptors#{Pid => true}}
- end, St, lists:seq(1, ToStart)).
-
+ lists:foldl(
+ fun(_, #{} = StAcc) ->
+ #{acceptors := AccAcceptors} = StAcc,
+ {ok, Pid} = couch_replicator_job:start_link(),
+ StAcc#{acceptors := AccAcceptors#{Pid => true}}
+ end,
+ St,
+ lists:seq(1, ToStart)
+ ).
transient_job_cleanup(#{} = St) ->
#{
@@ -254,7 +238,6 @@ transient_job_cleanup(#{} = St) ->
ok = couch_replicator_jobs:fold_jobs(undefined, FoldFun, ok),
St.
-
update_stats(#{} = St) ->
ACnt = maps:size(maps:get(acceptors, St)),
WCnt = maps:size(maps:get(workers, St)),
@@ -263,7 +246,6 @@ update_stats(#{} = St) ->
couch_stats:increment_counter([couch_replicator, jobs, reschedules]),
St.
-
trim_jobs(#{} = St) ->
#{
workers := Workers,
@@ -274,55 +256,52 @@ trim_jobs(#{} = St) ->
lists:foreach(fun stop_job/1, stop_candidates(St, Excess)),
St#{churn := Churn + Excess}.
-
stop_candidates(#{}, Top) when is_integer(Top), Top =< 0 ->
[];
-
stop_candidates(#{} = St, Top) when is_integer(Top), Top > 0 ->
#{
workers := Workers,
config := #{min_run_time_sec := MinRunTime}
} = St,
- WList1 = maps:to_list(Workers), % [{Pid, {Normal, StartTime}},...]
+ % [{Pid, {Normal, StartTime}},...]
+ WList1 = maps:to_list(Workers),
% Filter out normal jobs and those which have just started running
MaxT = erlang:system_time(second) - MinRunTime,
- WList2 = lists:filter(fun({_Pid, {Normal, T}}) ->
- not Normal andalso T =< MaxT
- end, WList1),
+ WList2 = lists:filter(
+ fun({_Pid, {Normal, T}}) ->
+ not Normal andalso T =< MaxT
+ end,
+ WList1
+ ),
Sorted = lists:keysort(2, WList2),
Pids = lists:map(fun({Pid, _}) -> Pid end, Sorted),
lists:sublist(Pids, Top).
-
stop_job(Pid) when is_pid(Pid) ->
% Replication jobs handle the shutdown signal and then checkpoint in
% terminate handler
exit(Pid, shutdown).
-
wait_jobs_exit(#{} = Jobs, _) when map_size(Jobs) =:= 0 ->
ok;
-
wait_jobs_exit(#{} = Jobs, Timeout) ->
receive
{'EXIT', Pid, _} ->
wait_jobs_exit(maps:remove(Pid, Jobs), Timeout)
- after
- Timeout ->
- ?LOG_ERROR(#{
- what => unclean_job_termination,
- in => replicator,
- job_count => map_size(Jobs)
- }),
- LogMsg = "~p : ~p jobs didn't terminate cleanly",
- couch_log:error(LogMsg, [?MODULE, map_size(Jobs)]),
- ok
+ after Timeout ->
+ ?LOG_ERROR(#{
+ what => unclean_job_termination,
+ in => replicator,
+ job_count => map_size(Jobs)
+ }),
+ LogMsg = "~p : ~p jobs didn't terminate cleanly",
+ couch_log:error(LogMsg, [?MODULE, map_size(Jobs)]),
+ ok
end.
-
spawn_acceptors(St) ->
#{
workers := Workers,
@@ -340,7 +319,6 @@ spawn_acceptors(St) ->
St
end.
-
% Worker process exit handlers
handle_acceptor_exit(#{acceptors := Acceptors} = St, Pid, Reason) ->
@@ -355,7 +333,6 @@ handle_acceptor_exit(#{acceptors := Acceptors} = St, Pid, Reason) ->
couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
{noreply, spawn_acceptors(St1)}.
-
handle_worker_exit(#{workers := Workers} = St, Pid, Reason) ->
St1 = St#{workers := maps:remove(Pid, Workers)},
case Reason of
@@ -377,7 +354,6 @@ handle_worker_exit(#{workers := Workers} = St, Pid, Reason) ->
end,
{noreply, spawn_acceptors(St1)}.
-
handle_unknown_exit(St, Pid, Reason) ->
?LOG_ERROR(#{
what => unknown_process_crash,
@@ -388,7 +364,6 @@ handle_unknown_exit(St, Pid, Reason) ->
couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
{stop, {unknown_pid_exit, Pid}, St}.
-
get_config() ->
Defaults = #{
max_acceptors => ?MAX_ACCEPTORS,
@@ -398,6 +373,9 @@ get_config() ->
min_run_time_sec => ?MIN_RUN_TIME_SEC,
transient_job_max_age_sec => ?TRANSIENT_JOB_MAX_AGE_SEC
},
- maps:map(fun(K, Default) ->
- config:get_integer("replicator", atom_to_list(K), Default)
- end, Defaults).
+ maps:map(
+ fun(K, Default) ->
+ config:get_integer("replicator", atom_to_list(K), Default)
+ end,
+ Defaults
+ ).
diff --git a/src/couch_replicator/src/couch_replicator_jobs.erl b/src/couch_replicator/src/couch_replicator_jobs.erl
index 51f441caf..06ccc4493 100644
--- a/src/couch_replicator/src/couch_replicator_jobs.erl
+++ b/src/couch_replicator/src/couch_replicator_jobs.erl
@@ -12,7 +12,6 @@
-module(couch_replicator_jobs).
-
-export([
% couch_jobs type timeouts
set_timeout/0,
@@ -47,15 +46,12 @@
get_job_ids/1
]).
-
-include("couch_replicator.hrl").
-include_lib("fabric/include/fabric2.hrl").
-
-define(REP_JOBS, <<"rep_jobs">>).
-define(REP_JOBS_TIMEOUT_SEC, 61).
-
% Data model
% ----------
%
@@ -73,11 +69,9 @@
set_timeout() ->
couch_jobs:set_type_timeout(?REP_JOBS, ?REP_JOBS_TIMEOUT_SEC).
-
get_timeout() ->
?REP_JOBS_TIMEOUT_SEC.
-
new_job(#{} = Rep, DbName, DbUUID, DocId, State, StateInfo, DocState) ->
NowSec = erlang:system_time(second),
AddedEvent = #{?HIST_TYPE => ?HIST_ADDED, ?HIST_TIMESTAMP => NowSec},
@@ -102,7 +96,6 @@ new_job(#{} = Rep, DbName, DbUUID, DocId, State, StateInfo, DocState) ->
?CHECKPOINT_HISTORY => []
}.
-
add_job(Tx, JobId, JobData) ->
couch_stats:increment_counter([couch_replicator, jobs, adds]),
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
@@ -115,7 +108,6 @@ add_job(Tx, JobId, JobData) ->
ok = couch_jobs:add(JTx, ?REP_JOBS, JobId, JobData)
end).
-
remove_job(Tx, JobId) ->
couch_stats:increment_counter([couch_replicator, jobs, removes]),
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
@@ -127,13 +119,11 @@ remove_job(Tx, JobId) ->
end
end).
-
get_job_data(Tx, JobId) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
couch_jobs:get_job_data(JTx, ?REP_JOBS, JobId)
end).
-
% UserFun = fun(JTx, JobId, JobState, JobData, UserAcc)
%
fold_jobs(Tx, UserFun, Acc) when is_function(UserFun, 5) ->
@@ -141,10 +131,8 @@ fold_jobs(Tx, UserFun, Acc) when is_function(UserFun, 5) ->
couch_jobs:fold_jobs(JTx, ?REP_JOBS, UserFun, Acc)
end).
-
pending_count(_Tx, Limit) when is_integer(Limit), Limit =< 0 ->
0;
-
pending_count(Tx, Limit) when is_integer(Limit), Limit > 0 ->
Opts = #{
max_sched_time => erlang:system_time(second),
@@ -152,7 +140,6 @@ pending_count(Tx, Limit) when is_integer(Limit), Limit > 0 ->
},
couch_jobs:pending_count(Tx, ?REP_JOBS, Opts).
-
wait_running(JobId) ->
case couch_jobs:subscribe(?REP_JOBS, JobId) of
{ok, finished, JobData} ->
@@ -168,7 +155,6 @@ wait_running(JobId) ->
{error, Error}
end.
-
wait_running(JobId, SubId) ->
case couch_jobs:wait(SubId, infinity) of
{?REP_JOBS, _, running, #{?STATE := ?ST_PENDING}} ->
@@ -183,44 +169,41 @@ wait_running(JobId, SubId) ->
{ok, JobData}
end.
-
wait_result(JobId) ->
case couch_jobs:subscribe(?REP_JOBS, JobId) of
{ok, finished, JobData} ->
{ok, JobData};
{ok, SubId, _, _} ->
- {?REP_JOBS, _, finished, JobData} = couch_jobs:wait(SubId,
- finished, infinity),
+ {?REP_JOBS, _, finished, JobData} = couch_jobs:wait(
+ SubId,
+ finished,
+ infinity
+ ),
{ok, JobData};
{error, Error} ->
{error, Error}
end.
-
accept_job(MaxSchedTime) when is_integer(MaxSchedTime) ->
Opts = #{max_sched_time => MaxSchedTime},
couch_jobs:accept(?REP_JOBS, Opts).
-
update_job_data(Tx, #{} = Job, #{} = JobData) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
couch_jobs:update(JTx, Job, JobData)
end).
-
finish_job(Tx, #{} = Job, #{} = JobData) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
couch_jobs:finish(JTx, Job, JobData)
end).
-
reschedule_job(Tx, #{} = Job, #{} = JobData, Time) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
{ok, Job1} = couch_jobs:resubmit(JTx, Job, Time),
ok = couch_jobs:finish(JTx, Job1, JobData)
end).
-
try_update_rep_id(Tx, JobId, RepId) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
#{tx := ErlFdbTx, layer_prefix := LayerPrefix} = JTx,
@@ -235,7 +218,6 @@ try_update_rep_id(Tx, JobId, RepId) ->
end
end).
-
update_rep_id(Tx, JobId, RepId) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
#{tx := ErlFdbTx, layer_prefix := LayerPrefix} = JTx,
@@ -243,10 +225,8 @@ update_rep_id(Tx, JobId, RepId) ->
ok = erlfdb:set(ErlFdbTx, Key, JobId)
end).
-
clear_old_rep_id(_, _, null) ->
ok;
-
clear_old_rep_id(Tx, JobId, RepId) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
#{tx := ErlFdbTx, layer_prefix := LayerPrefix} = JTx,
@@ -261,7 +241,6 @@ clear_old_rep_id(Tx, JobId, RepId) ->
end
end).
-
get_job_id(Tx, RepId) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
#{tx := ErlFdbTx, layer_prefix := LayerPrefix} = JTx,
@@ -274,7 +253,6 @@ get_job_id(Tx, RepId) ->
end
end).
-
% Debug functions
remove_jobs(Tx, JobIds) when is_list(JobIds) ->
@@ -283,19 +261,20 @@ remove_jobs(Tx, JobIds) when is_list(JobIds) ->
end),
[].
-
get_job_ids(Tx) ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(Tx), fun(JTx) ->
#{tx := ErlFdbTx, layer_prefix := LayerPrefix} = JTx,
Prefix = erlfdb_tuple:pack({?REPLICATION_IDS}, LayerPrefix),
KVs = erlfdb:wait(erlfdb:get_range_startswith(ErlFdbTx, Prefix)),
- lists:map(fun({K, JobId}) ->
- {RepId} = erlfdb_tuple:unpack(K, Prefix),
- {RepId, JobId}
- end, KVs)
+ lists:map(
+ fun({K, JobId}) ->
+ {RepId} = erlfdb_tuple:unpack(K, Prefix),
+ {RepId, JobId}
+ end,
+ KVs
+ )
end).
-
% Private functions
remove_job(#{jtx := true} = JTx, JobId, OldJobData) ->
diff --git a/src/couch_replicator/src/couch_replicator_parse.erl b/src/couch_replicator/src/couch_replicator_parse.erl
index 38d50d437..654a369b1 100644
--- a/src/couch_replicator/src/couch_replicator_parse.erl
+++ b/src/couch_replicator/src/couch_replicator_parse.erl
@@ -12,7 +12,6 @@
-module(couch_replicator_parse).
-
-export([
parse_rep_doc/1,
parse_transient_rep/2,
@@ -20,12 +19,10 @@
parse_rep_db/3
]).
-
-include_lib("ibrowse/include/ibrowse.hrl").
-include("couch_replicator.hrl").
-include_lib("kernel/include/logger.hrl").
-
-define(DEFAULT_SOCK_OPTS, "[{keepalive, true}, {nodelay, false}]").
-define(VALID_SOCK_OPTS, [
buffer,
@@ -54,69 +51,68 @@
{"socket_options", ?DEFAULT_SOCK_OPTS, fun parse_sock_opts/1}
]).
-
-spec parse_rep_doc({[_]}) -> #{}.
parse_rep_doc(RepDoc) ->
- {ok, Rep} = try
- parse_rep(RepDoc, null)
- catch
- throw:{error, Reason}:Stack ->
- ?LOG_ERROR(#{
- what => replication_doc_parse_error,
- in => replicator,
- details => Reason,
- stacktrace => Stack
- }),
- LogErr1 = "~p parse_rep_doc fail ~p ~p",
- couch_log:error(LogErr1, [?MODULE, Reason, Stack]),
- throw({bad_rep_doc, Reason});
- Tag:Err:Stack ->
- ?LOG_ERROR(#{
- what => replication_doc_parse_error,
- in => replicator,
- tag => Tag,
- details => Err,
- stacktrace => Stack
- }),
- LogErr2 = "~p parse_rep_doc fail ~p:~p ~p",
- couch_log:error(LogErr2, [?MODULE, Tag, Err, Stack]),
- throw({bad_rep_doc, couch_util:to_binary({Tag, Err})})
- end,
+ {ok, Rep} =
+ try
+ parse_rep(RepDoc, null)
+ catch
+ throw:{error, Reason}:Stack ->
+ ?LOG_ERROR(#{
+ what => replication_doc_parse_error,
+ in => replicator,
+ details => Reason,
+ stacktrace => Stack
+ }),
+ LogErr1 = "~p parse_rep_doc fail ~p ~p",
+ couch_log:error(LogErr1, [?MODULE, Reason, Stack]),
+ throw({bad_rep_doc, Reason});
+ Tag:Err:Stack ->
+ ?LOG_ERROR(#{
+ what => replication_doc_parse_error,
+ in => replicator,
+ tag => Tag,
+ details => Err,
+ stacktrace => Stack
+ }),
+ LogErr2 = "~p parse_rep_doc fail ~p:~p ~p",
+ couch_log:error(LogErr2, [?MODULE, Tag, Err, Stack]),
+ throw({bad_rep_doc, couch_util:to_binary({Tag, Err})})
+ end,
Rep.
-
-spec parse_transient_rep({[_]} | #{}, user_name()) -> {ok, #{}}.
parse_transient_rep({Props} = EJson, UserName) when is_list(Props) ->
Str = couch_util:json_encode(EJson),
Map = couch_util:json_decode(Str, [return_maps]),
parse_transient_rep(Map, UserName);
-
parse_transient_rep(#{} = Body, UserName) ->
- {ok, Rep} = try
- parse_rep(Body, UserName)
- catch
- throw:{error, Reason}:Stack ->
- ?LOG_ERROR(#{
- what => transient_replication_parse_error,
- in => replicator,
- details => Reason,
- stacktrace => Stack
- }),
- LogErr1 = "~p parse_transient_rep fail ~p ~p",
- couch_log:error(LogErr1, [?MODULE, Reason, Stack]),
- throw({bad_request, Reason});
- Tag:Err:Stack ->
- ?LOG_ERROR(#{
- what => transient_replication_parse_error,
- in => replicator,
- tag => Tag,
- details => Err,
- stacktrace => Stack
- }),
- LogErr2 = "~p parse_transient_rep fail ~p ~p",
- couch_log:error(LogErr2, [?MODULE, Tag, Err, Stack]),
- throw({bad_request, couch_util:to_binary({Tag, Err})})
- end,
+ {ok, Rep} =
+ try
+ parse_rep(Body, UserName)
+ catch
+ throw:{error, Reason}:Stack ->
+ ?LOG_ERROR(#{
+ what => transient_replication_parse_error,
+ in => replicator,
+ details => Reason,
+ stacktrace => Stack
+ }),
+ LogErr1 = "~p parse_transient_rep fail ~p ~p",
+ couch_log:error(LogErr1, [?MODULE, Reason, Stack]),
+ throw({bad_request, Reason});
+ Tag:Err:Stack ->
+ ?LOG_ERROR(#{
+ what => transient_replication_parse_error,
+ in => replicator,
+ tag => Tag,
+ details => Err,
+ stacktrace => Stack
+ }),
+ LogErr2 = "~p parse_transient_rep fail ~p ~p",
+ couch_log:error(LogErr2, [?MODULE, Tag, Err, Stack]),
+ throw({bad_request, couch_util:to_binary({Tag, Err})})
+ end,
#{?OPTIONS := Options} = Rep,
Cancel = maps:get(<<"cancel">>, Options, false),
Id = maps:get(<<"id">>, Options, nil),
@@ -134,13 +130,11 @@ parse_transient_rep(#{} = Body, UserName) ->
{ok, JobId, Rep}
end.
-
-spec parse_rep({[_]} | #{}, user_name()) -> {ok, #{}}.
parse_rep({Props} = EJson, UserName) when is_list(Props) ->
Str = couch_util:json_encode(EJson),
Map = couch_util:json_decode(Str, [return_maps]),
parse_rep(Map, UserName);
-
parse_rep(#{} = Doc, UserName) ->
{SrcProxy, TgtProxy} = parse_proxy_settings(Doc),
Opts = make_options(Doc),
@@ -176,33 +170,38 @@ parse_rep(#{} = Doc, UserName) ->
{ok, Rep}
end.
-
-spec parse_rep_db(#{}, #{}, #{}) -> #{}.
parse_rep_db(#{} = Endpoint, #{} = ProxyParams, #{} = Options) ->
- ProxyUrl = case ProxyParams of
- #{<<"proxy_url">> := PUrl} -> PUrl;
- _ -> null
- end,
+ ProxyUrl =
+ case ProxyParams of
+ #{<<"proxy_url">> := PUrl} -> PUrl;
+ _ -> null
+ end,
Url0 = maps:get(<<"url">>, Endpoint),
Url = maybe_add_trailing_slash(Url0),
AuthProps = maps:get(<<"auth">>, Endpoint, #{}),
- if is_map(AuthProps) -> ok; true ->
- throw({error, "if defined, `auth` must be an object"})
+ if
+ is_map(AuthProps) -> ok;
+ true -> throw({error, "if defined, `auth` must be an object"})
end,
Headers0 = maps:get(<<"headers">>, Endpoint, #{}),
- if is_map(Headers0) -> ok; true ->
- throw({error, "if defined `headers` must be an object"})
+ if
+ is_map(Headers0) -> ok;
+ true -> throw({error, "if defined `headers` must be an object"})
end,
DefaultHeaders = couch_replicator_utils:default_headers_map(),
Headers = maps:merge(DefaultHeaders, Headers0),
SockOpts = maps:get(<<"socket_options">>, Options, #{}),
- SockAndProxy = maps:merge(#{
- <<"socket_options">> => SockOpts
- }, ProxyParams),
+ SockAndProxy = maps:merge(
+ #{
+ <<"socket_options">> => SockOpts
+ },
+ ProxyParams
+ ),
SslParams = ssl_params(Url),
HttpDb = #{
@@ -216,20 +215,15 @@ parse_rep_db(#{} = Endpoint, #{} = ProxyParams, #{} = Options) ->
<<"proxy_url">> => ProxyUrl
},
normalize_basic_auth(HttpDb);
-
parse_rep_db(<<"http://", _/binary>> = Url, Proxy, Options) ->
parse_rep_db(#{<<"url">> => Url}, Proxy, Options);
-
parse_rep_db(<<"https://", _/binary>> = Url, Proxy, Options) ->
parse_rep_db(#{<<"url">> => Url}, Proxy, Options);
-
parse_rep_db(<<_/binary>>, _Proxy, _Options) ->
throw({error, local_endpoints_not_supported});
-
parse_rep_db(undefined, _Proxy, _Options) ->
throw({error, <<"Missing replication endpoint">>}).
-
parse_proxy_settings(#{} = Doc) ->
Proxy = maps:get(?PROXY, Doc, <<>>),
SrcProxy = maps:get(?SOURCE_PROXY, Doc, <<>>),
@@ -239,7 +233,7 @@ parse_proxy_settings(#{} = Doc) ->
true when SrcProxy =/= <<>> ->
Error = "`proxy` is mutually exclusive with `source_proxy`",
throw({error, Error});
- true when TgtProxy =/= <<>> ->
+ true when TgtProxy =/= <<>> ->
Error = "`proxy` is mutually exclusive with `target_proxy`",
throw({error, Error});
true ->
@@ -248,46 +242,49 @@ parse_proxy_settings(#{} = Doc) ->
{parse_proxy_params(SrcProxy), parse_proxy_params(TgtProxy)}
end.
-
-spec maybe_add_trailing_slash(binary()) -> binary().
maybe_add_trailing_slash(<<>>) ->
<<>>;
-
maybe_add_trailing_slash(Url) when is_binary(Url) ->
case binary:match(Url, <<"?">>) of
nomatch ->
case binary:last(Url) of
- $/ -> Url;
+ $/ -> Url;
_ -> <<Url/binary, "/">>
end;
_ ->
- Url % skip if there are query params
+ % skip if there are query params
+ Url
end.
-
-spec make_options(#{}) -> #{}.
make_options(#{} = RepDoc) ->
Options0 = convert_options(RepDoc),
Options = check_options(Options0),
- ConfigOptions = lists:foldl(fun({K, Default, ConversionFun}, Acc) ->
- V = ConversionFun(config:get("replicator", K, Default)),
- Acc#{list_to_binary(K) => V}
- end, #{}, ?CONFIG_DEFAULTS),
+ ConfigOptions = lists:foldl(
+ fun({K, Default, ConversionFun}, Acc) ->
+ V = ConversionFun(config:get("replicator", K, Default)),
+ Acc#{list_to_binary(K) => V}
+ end,
+ #{},
+ ?CONFIG_DEFAULTS
+ ),
maps:merge(ConfigOptions, Options).
-
-spec convert_options(#{}) -> #{} | no_return().
convert_options(#{} = Doc) ->
maps:fold(fun convert_fold/3, #{}, Doc).
-
-spec convert_fold(binary(), any(), #{}) -> #{}.
convert_fold(<<"cancel">>, V, Acc) when is_boolean(V) ->
Acc#{<<"cancel">> => V};
convert_fold(<<"cancel">>, _, _) ->
throw({error, <<"`cancel` must be a boolean">>});
-convert_fold(IdOpt, V, Acc) when IdOpt =:= <<"_local_id">>;
- IdOpt =:= <<"replication_id">>; IdOpt =:= <<"id">> ->
+convert_fold(IdOpt, V, Acc) when
+ IdOpt =:= <<"_local_id">>;
+ IdOpt =:= <<"replication_id">>;
+ IdOpt =:= <<"id">>
+->
Acc#{<<"id">> => couch_replicator_ids:convert(V)};
convert_fold(<<"create_target">>, V, Acc) when is_boolean(V) ->
Acc#{<<"create_target">> => V};
@@ -313,12 +310,15 @@ convert_fold(<<"doc_ids">>, null, Acc) ->
Acc;
convert_fold(<<"doc_ids">>, V, Acc) when is_list(V) ->
% Compatibility behaviour as: accept a list of percent encoded doc IDs
- Ids = lists:map(fun(Id) ->
- case is_binary(Id) andalso byte_size(Id) > 0 of
- true -> list_to_binary(couch_httpd:unquote(Id));
- false -> throw({error, <<"`doc_ids` array must contain strings">>})
- end
- end, V),
+ Ids = lists:map(
+ fun(Id) ->
+ case is_binary(Id) andalso byte_size(Id) > 0 of
+ true -> list_to_binary(couch_httpd:unquote(Id));
+ false -> throw({error, <<"`doc_ids` array must contain strings">>})
+ end
+ end,
+ V
+ ),
Acc#{<<"doc_ids">> => lists:usort(Ids)};
convert_fold(<<"doc_ids">>, _, _) ->
throw({error, <<"`doc_ids` must be an array">>});
@@ -346,13 +346,12 @@ convert_fold(<<"use_checkpoints">>, _, _) ->
throw({error, <<"`use_checkpoints` must be a boolean">>});
convert_fold(<<"checkpoint_interval">>, V, Acc) ->
Acc#{<<"checkpoint_interval">> => bin2int(V, <<"checkpoint_interval">>)};
-convert_fold(_K, _V, Acc) -> % skip unknown option
+% skip unknown option
+convert_fold(_K, _V, Acc) ->
Acc.
-
bin2int(V, _Field) when is_integer(V) ->
V;
-
bin2int(V, Field) when is_binary(V) ->
try
erlang:binary_to_integer(V)
@@ -360,43 +359,52 @@ bin2int(V, Field) when is_binary(V) ->
error:badarg ->
throw({error, <<"`", Field/binary, "` must be an integer">>})
end;
-
bin2int(_V, Field) ->
throw({error, <<"`", Field/binary, "` must be an integer">>}).
-
-spec check_options(#{}) -> #{}.
check_options(Options) ->
DocIds = maps:is_key(<<"doc_ids">>, Options),
Filter = maps:is_key(<<"filter">>, Options),
Selector = maps:is_key(<<"selector">>, Options),
case {DocIds, Filter, Selector} of
- {false, false, false} -> Options;
- {false, false, _} -> Options;
- {false, _, false} -> Options;
- {_, false, false} -> Options;
- _ -> throw({error, <<"`doc_ids`,`filter`,`selector` are mutually "
- " exclusive">>})
+ {false, false, false} ->
+ Options;
+ {false, false, _} ->
+ Options;
+ {false, _, false} ->
+ Options;
+ {_, false, false} ->
+ Options;
+ _ ->
+ throw(
+ {error, <<
+ "`doc_ids`,`filter`,`selector` are mutually "
+ " exclusive"
+ >>}
+ )
end.
-
parse_sock_opts(Term) ->
{ok, SocketOptions} = couch_util:parse_term(Term),
- lists:foldl(fun
- ({K, V}, Acc) when is_atom(K) ->
- case lists:member(K, ?VALID_SOCK_OPTS) of
- true -> Acc#{atom_to_binary(K, utf8) => V};
- false -> Acc
- end;
- (_, Acc) ->
- Acc
- end, #{}, SocketOptions).
-
+ lists:foldl(
+ fun
+ ({K, V}, Acc) when is_atom(K) ->
+ case lists:member(K, ?VALID_SOCK_OPTS) of
+ true -> Acc#{atom_to_binary(K, utf8) => V};
+ false -> Acc
+ end;
+ (_, Acc) ->
+ Acc
+ end,
+ #{},
+ SocketOptions
+ ).
-spec parse_proxy_params(binary() | #{}) -> #{}.
parse_proxy_params(<<>>) ->
#{};
-parse_proxy_params(ProxyUrl) when is_binary(ProxyUrl)->
+parse_proxy_params(ProxyUrl) when is_binary(ProxyUrl) ->
#url{
host = Host,
port = Port,
@@ -404,10 +412,11 @@ parse_proxy_params(ProxyUrl) when is_binary(ProxyUrl)->
password = Passwd,
protocol = Prot0
} = ibrowse_lib:parse_url(binary_to_list(ProxyUrl)),
- Prot = case lists:member(Prot0, ?VALID_PROXY_PROTOCOLS) of
- true -> atom_to_binary(Prot0, utf8);
- false -> throw({error, <<"Unsupported proxy protocol">>})
- end,
+ Prot =
+ case lists:member(Prot0, ?VALID_PROXY_PROTOCOLS) of
+ true -> atom_to_binary(Prot0, utf8);
+ false -> throw({error, <<"Unsupported proxy protocol">>})
+ end,
ProxyParams = #{
<<"proxy_url">> => ProxyUrl,
<<"proxy_protocol">> => Prot,
@@ -424,7 +433,6 @@ parse_proxy_params(ProxyUrl) when is_binary(ProxyUrl)->
ProxyParams
end.
-
-spec ssl_params(binary()) -> #{}.
ssl_params(Url) ->
case ibrowse_lib:parse_url(binary_to_list(Url)) of
@@ -439,22 +447,27 @@ ssl_params(Url) ->
VerifySslOptions = ssl_verify_options(VerifyCerts =:= "true"),
SslOpts = maps:merge(VerifySslOptions, #{<<"depth">> => Depth}),
HaveCertAndKey = CertFile /= null andalso KeyFile /= null,
- SslOpts1 = case HaveCertAndKey of false -> SslOpts; true ->
- CertOpts0 = #{
- <<"certfile">> => list_to_binary(CertFile),
- <<"keyfile">> => list_to_binary(KeyFile)
- },
- CertOpts = case Password of null -> CertOpts0; _ ->
- CertOpts0#{<<"password">> => list_to_binary(Password)}
+ SslOpts1 =
+ case HaveCertAndKey of
+ false ->
+ SslOpts;
+ true ->
+ CertOpts0 = #{
+ <<"certfile">> => list_to_binary(CertFile),
+ <<"keyfile">> => list_to_binary(KeyFile)
+ },
+ CertOpts =
+ case Password of
+ null -> CertOpts0;
+ _ -> CertOpts0#{<<"password">> => list_to_binary(Password)}
+ end,
+ maps:merge(SslOpts, CertOpts)
end,
- maps:merge(SslOpts, CertOpts)
- end,
#{<<"is_ssl">> => true, <<"ssl_options">> => SslOpts1};
#url{protocol = http} ->
#{}
end.
-
-spec ssl_verify_options(true | false) -> [_].
ssl_verify_options(true) ->
case config:get("replicator", "ssl_trusted_certificates_file") of
@@ -469,19 +482,18 @@ ssl_verify_options(true) ->
<<"cacertfile">> => list_to_binary(CAFile)
}
end;
-
ssl_verify_options(false) ->
#{
<<"verify">> => <<"verify_none">>
}.
-
-spec set_basic_auth_creds(string(), string(), map()) -> map().
-set_basic_auth_creds(undefined, undefined, #{}= HttpDb) ->
+set_basic_auth_creds(undefined, undefined, #{} = HttpDb) ->
HttpDb;
-set_basic_auth_creds(User, Pass, #{} = HttpDb)
- when is_list(User), is_list(Pass) ->
- #{<<"auth_props">> := AuthProps} = HttpDb,
+set_basic_auth_creds(User, Pass, #{} = HttpDb) when
+ is_list(User), is_list(Pass)
+->
+ #{<<"auth_props">> := AuthProps} = HttpDb,
UserPass = #{
<<"username">> => list_to_binary(User),
<<"password">> => list_to_binary(Pass)
@@ -489,10 +501,9 @@ set_basic_auth_creds(User, Pass, #{} = HttpDb)
AuthProps1 = AuthProps#{<<"basic">> => UserPass},
HttpDb#{<<"auth_props">> := AuthProps1}.
-
-spec extract_creds_from_url(binary()) ->
- {ok, {string() | undefined, string() | undefined}, string()} |
- {error, term()}.
+ {ok, {string() | undefined, string() | undefined}, string()}
+ | {error, term()}.
extract_creds_from_url(Url0) ->
Url = binary_to_list(Url0),
case ibrowse_lib:parse_url(Url) of
@@ -509,7 +520,6 @@ extract_creds_from_url(Url0) ->
{ok, {User, Pass}, list_to_binary(NoCreds)}
end.
-
% Normalize basic auth credentials so they are set only in the auth props
% object. If multiple basic auth credentials are provided, the resulting
% credentials are picked in the following order.
@@ -524,33 +534,38 @@ normalize_basic_auth(#{} = HttpDb) ->
<<"headers">> := Headers
} = HttpDb,
{HeaderCreds, HeadersNoCreds} = remove_basic_auth_from_headers(Headers),
- {UrlCreds, UrlWithoutCreds} = case extract_creds_from_url(Url) of
- {ok, Creds = {_, _}, UrlNoCreds} ->
- {Creds, UrlNoCreds};
- {error, _Error} ->
- % Don't crash replicator if user provided an invalid
- % userinfo part
- {undefined, undefined}
- end,
+ {UrlCreds, UrlWithoutCreds} =
+ case extract_creds_from_url(Url) of
+ {ok, Creds = {_, _}, UrlNoCreds} ->
+ {Creds, UrlNoCreds};
+ {error, _Error} ->
+ % Don't crash replicator if user provided an invalid
+ % userinfo part
+ {undefined, undefined}
+ end,
AuthCreds = {_, _} = couch_replicator_utils:get_basic_auth_creds(HttpDb),
HttpDb1 = HttpDb#{
<<"url">> := UrlWithoutCreds,
<<"headers">> := HeadersNoCreds
},
- {User, Pass} = case {AuthCreds, UrlCreds, HeaderCreds} of
- {{U, P}, {_, _}, {_, _}} when is_list(U), is_list(P) -> {U, P};
- {{_, _}, {U, P}, {_, _}} when is_list(U), is_list(P) -> {U, P};
- {{_, _}, {_, _}, {U, P}} -> {U, P}
- end,
+ {User, Pass} =
+ case {AuthCreds, UrlCreds, HeaderCreds} of
+ {{U, P}, {_, _}, {_, _}} when is_list(U), is_list(P) -> {U, P};
+ {{_, _}, {U, P}, {_, _}} when is_list(U), is_list(P) -> {U, P};
+ {{_, _}, {_, _}, {U, P}} -> {U, P}
+ end,
set_basic_auth_creds(User, Pass, HttpDb1).
-
remove_basic_auth_from_headers(#{} = HeadersMap) ->
% Headers are passed in a map however mochiweb_headers expects them to be
% lists so we transform them to lists first, then back to maps
- Headers = maps:fold(fun(K, V, Acc) ->
- [{binary_to_list(K), binary_to_list(V)} | Acc]
- end, [], HeadersMap),
+ Headers = maps:fold(
+ fun(K, V, Acc) ->
+ [{binary_to_list(K), binary_to_list(V)} | Acc]
+ end,
+ [],
+ HeadersMap
+ ),
Headers1 = mochiweb_headers:make(Headers),
case mochiweb_headers:get_value("Authorization", Headers1) of
undefined ->
@@ -560,20 +575,22 @@ remove_basic_auth_from_headers(#{} = HeadersMap) ->
BasicLower = string:to_lower(Basic),
Result = maybe_remove_basic_auth(BasicLower, B64, Headers1),
{{User, Pass}, Headers2} = Result,
- HeadersMapResult = lists:foldl(fun({K, V}, Acc) ->
- Acc#{list_to_binary(K) => list_to_binary(V)}
- end, #{}, Headers2),
+ HeadersMapResult = lists:foldl(
+ fun({K, V}, Acc) ->
+ Acc#{list_to_binary(K) => list_to_binary(V)}
+ end,
+ #{},
+ Headers2
+ ),
{{User, Pass}, HeadersMapResult}
end.
-
maybe_remove_basic_auth("basic", " " ++ Base64, Headers) ->
Headers1 = mochiweb_headers:delete_any("Authorization", Headers),
{decode_basic_creds(Base64), mochiweb_headers:to_list(Headers1)};
maybe_remove_basic_auth(_, _, Headers) ->
{{undefined, undefined}, mochiweb_headers:to_list(Headers)}.
-
decode_basic_creds(Base64) ->
try re:split(base64:decode(Base64), ":", [{return, list}, {parts, 2}]) of
[User, Pass] ->
@@ -586,67 +603,97 @@ decode_basic_creds(Base64) ->
{undefined, undefined}
end.
-
-ifdef(TEST).
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("fabric/test/fabric2_test.hrl").
-
check_options_pass_values_test() ->
?assertEqual(check_options(#{}), #{}),
- ?assertEqual(check_options(#{<<"baz">> => <<"foo">>}),
- #{<<"baz">> => <<"foo">>}),
- ?assertEqual(check_options(#{<<"doc_ids">> => [<<"x">>]}),
- #{<<"doc_ids">> => [<<"x">>]}),
- ?assertEqual(check_options(#{<<"filter">> => <<"f">>}),
- #{<<"filter">> => <<"f">>}),
- ?assertEqual(check_options(#{<<"selector">> => <<"s">>}),
- #{<<"selector">> => <<"s">>}).
-
+ ?assertEqual(
+ check_options(#{<<"baz">> => <<"foo">>}),
+ #{<<"baz">> => <<"foo">>}
+ ),
+ ?assertEqual(
+ check_options(#{<<"doc_ids">> => [<<"x">>]}),
+ #{<<"doc_ids">> => [<<"x">>]}
+ ),
+ ?assertEqual(
+ check_options(#{<<"filter">> => <<"f">>}),
+ #{<<"filter">> => <<"f">>}
+ ),
+ ?assertEqual(
+ check_options(#{<<"selector">> => <<"s">>}),
+ #{<<"selector">> => <<"s">>}
+ ).
check_options_fail_values_test() ->
- ?assertThrow({error, _},
- check_options(#{<<"doc_ids">> => [], <<"filter">> => <<"f">>})),
- ?assertThrow({error, _},
- check_options(#{<<"doc_ids">> => [], <<"selector">> => <<"s">>})),
- ?assertThrow({error, _},
- check_options(#{<<"filter">> => <<"f">>, <<"selector">> => <<"s">>})),
- ?assertThrow({error, _},
+ ?assertThrow(
+ {error, _},
+ check_options(#{<<"doc_ids">> => [], <<"filter">> => <<"f">>})
+ ),
+ ?assertThrow(
+ {error, _},
+ check_options(#{<<"doc_ids">> => [], <<"selector">> => <<"s">>})
+ ),
+ ?assertThrow(
+ {error, _},
+ check_options(#{<<"filter">> => <<"f">>, <<"selector">> => <<"s">>})
+ ),
+ ?assertThrow(
+ {error, _},
check_options(#{
<<"doc_ids">> => [],
<<"filter">> => <<"f">>,
- <<"selector">> => <<"s">>}
- )).
-
+ <<"selector">> => <<"s">>
+ })
+ ).
check_convert_options_pass_test() ->
?assertEqual(#{}, convert_options(#{})),
?assertEqual(#{}, convert_options(#{<<"random">> => 42})),
- ?assertEqual(#{<<"cancel">> => true},
- convert_options(#{<<"cancel">> => true})),
- ?assertEqual(#{<<"create_target">> => true},
- convert_options(#{<<"create_target">> => true})),
- ?assertEqual(#{<<"continuous">> => true},
- convert_options(#{<<"continuous">> => true})),
- ?assertEqual(#{<<"doc_ids">> => [<<"id">>]},
- convert_options(#{<<"doc_ids">> => [<<"id">>]})),
- ?assertEqual(#{<<"selector">> => #{<<"key">> => <<"value">>}},
- convert_options(#{<<"selector">> => #{<<"key">> => <<"value">>}})).
-
+ ?assertEqual(
+ #{<<"cancel">> => true},
+ convert_options(#{<<"cancel">> => true})
+ ),
+ ?assertEqual(
+ #{<<"create_target">> => true},
+ convert_options(#{<<"create_target">> => true})
+ ),
+ ?assertEqual(
+ #{<<"continuous">> => true},
+ convert_options(#{<<"continuous">> => true})
+ ),
+ ?assertEqual(
+ #{<<"doc_ids">> => [<<"id">>]},
+ convert_options(#{<<"doc_ids">> => [<<"id">>]})
+ ),
+ ?assertEqual(
+ #{<<"selector">> => #{<<"key">> => <<"value">>}},
+ convert_options(#{<<"selector">> => #{<<"key">> => <<"value">>}})
+ ).
check_convert_options_fail_test() ->
- ?assertThrow({error, _},
- convert_options(#{<<"cancel">> => <<"true">>})),
- ?assertThrow({error, _},
- convert_options(#{<<"create_target">> => <<"true">>})),
- ?assertThrow({error, _},
- convert_options(#{<<"continuous">> => <<"true">>})),
- ?assertThrow({error, _},
- convert_options(#{<<"doc_ids">> => <<"not_a_list">>})),
- ?assertThrow({error, _},
- convert_options(#{<<"selector">> => <<"bad">>})).
-
+ ?assertThrow(
+ {error, _},
+ convert_options(#{<<"cancel">> => <<"true">>})
+ ),
+ ?assertThrow(
+ {error, _},
+ convert_options(#{<<"create_target">> => <<"true">>})
+ ),
+ ?assertThrow(
+ {error, _},
+ convert_options(#{<<"continuous">> => <<"true">>})
+ ),
+ ?assertThrow(
+ {error, _},
+ convert_options(#{<<"doc_ids">> => <<"not_a_list">>})
+ ),
+ ?assertThrow(
+ {error, _},
+ convert_options(#{<<"selector">> => <<"bad">>})
+ ).
local_replication_endpoint_error_test_() ->
{
@@ -658,30 +705,30 @@ local_replication_endpoint_error_test_() ->
]
}.
-
setup() ->
meck:expect(config, get, fun(_, _, Default) -> Default end).
-
teardown(_) ->
meck:unload().
-
t_error_on_local_endpoint(_) ->
- RepDoc = {[
- {<<"_id">>, <<"someid">>},
- {<<"source">>, <<"localdb">>},
- {<<"target">>, <<"http://somehost.local/tgt">>}
- ]},
+ RepDoc =
+ {[
+ {<<"_id">>, <<"someid">>},
+ {<<"source">>, <<"localdb">>},
+ {<<"target">>, <<"http://somehost.local/tgt">>}
+ ]},
Expect = local_endpoints_not_supported,
?assertThrow({bad_rep_doc, Expect}, parse_rep_doc(RepDoc)).
-
remove_basic_auth_from_headers_test_() ->
B64 = list_to_binary(b64creds("user", "pass")),
- [?_assertEqual({{User, Pass}, NoAuthHeaders},
- remove_basic_auth_from_headers(Headers)) ||
- {{User, Pass, NoAuthHeaders}, Headers} <- [
+ [
+ ?_assertEqual(
+ {{User, Pass}, NoAuthHeaders},
+ remove_basic_auth_from_headers(Headers)
+ )
+ || {{User, Pass, NoAuthHeaders}, Headers} <- [
{
{undefined, undefined, #{}},
#{}
@@ -720,11 +767,9 @@ remove_basic_auth_from_headers_test_() ->
]
].
-
b64creds(User, Pass) ->
base64:encode_to_string(User ++ ":" ++ Pass).
-
set_basic_auth_creds_test() ->
Check = fun(User, Pass, Props) ->
HttpDb = #{<<"auth_props">> => Props},
@@ -734,180 +779,192 @@ set_basic_auth_creds_test() ->
?assertEqual(#{}, Check(undefined, undefined, #{})),
- ?assertEqual(#{<<"other">> => #{}}, Check(undefined, undefined,
- #{<<"other">> => #{}})),
-
- ?assertEqual(#{
- <<"basic">> => #{
- <<"username">> => <<"u">>,
- <<"password">> => <<"p">>
- }
- }, Check("u", "p", #{})),
-
- ?assertEqual(#{
- <<"other">> => #{},
- <<"basic">> => #{
- <<"username">> => <<"u">>,
- <<"password">> => <<"p">>
- }
- }, Check("u", "p", #{<<"other">> => #{}})).
-
-
-normalize_basic_creds_test_() ->
- DefaultHeaders = couch_replicator_utils:default_headers_map(),
- [?_assertEqual(Expect, normalize_basic_auth(Input)) || {Input, Expect} <- [
- {
- #{
- <<"url">> => <<"http://u:p@x.y/db">>,
- <<"auth_props">> => #{},
- <<"headers">> => DefaultHeaders
- },
- #{
- <<"url">> => <<"http://x.y/db">>,
- <<"auth_props">> => auth_props("u", "p"),
- <<"headers">> => DefaultHeaders
+ ?assertEqual(
+ #{<<"other">> => #{}},
+ Check(
+ undefined,
+ undefined,
+ #{<<"other">> => #{}}
+ )
+ ),
+
+ ?assertEqual(
+ #{
+ <<"basic">> => #{
+ <<"username">> => <<"u">>,
+ <<"password">> => <<"p">>
}
},
- {
- #{
- <<"url">> => <<"http://u:p@h:80/db">>,
- <<"auth_props">> => #{},
- <<"headers">> => DefaultHeaders
- },
- #{
- <<"url">> => <<"http://h:80/db">>,
- <<"auth_props">> => auth_props("u", "p"),
- <<"headers">> => DefaultHeaders
+ Check("u", "p", #{})
+ ),
+
+ ?assertEqual(
+ #{
+ <<"other">> => #{},
+ <<"basic">> => #{
+ <<"username">> => <<"u">>,
+ <<"password">> => <<"p">>
}
},
- {
- #{
- <<"url">> => <<"https://u:p@h/db">>,
- <<"auth_props">> => #{},
- <<"headers">> => DefaultHeaders
+ Check("u", "p", #{<<"other">> => #{}})
+ ).
+
+normalize_basic_creds_test_() ->
+ DefaultHeaders = couch_replicator_utils:default_headers_map(),
+ [
+ ?_assertEqual(Expect, normalize_basic_auth(Input))
+ || {Input, Expect} <- [
+ {
+ #{
+ <<"url">> => <<"http://u:p@x.y/db">>,
+ <<"auth_props">> => #{},
+ <<"headers">> => DefaultHeaders
+ },
+ #{
+ <<"url">> => <<"http://x.y/db">>,
+ <<"auth_props">> => auth_props("u", "p"),
+ <<"headers">> => DefaultHeaders
+ }
},
- #{
- <<"url">> => <<"https://h/db">>,
- <<"auth_props">> => auth_props("u", "p"),
- <<"headers">> => DefaultHeaders
- }
- },
- {
- #{
- <<"url">> => <<"http://u:p@[2001:db8:a1b:12f9::1]/db">>,
- <<"auth_props">> => #{},
- <<"headers">> => DefaultHeaders
+ {
+ #{
+ <<"url">> => <<"http://u:p@h:80/db">>,
+ <<"auth_props">> => #{},
+ <<"headers">> => DefaultHeaders
+ },
+ #{
+ <<"url">> => <<"http://h:80/db">>,
+ <<"auth_props">> => auth_props("u", "p"),
+ <<"headers">> => DefaultHeaders
+ }
},
- #{
- <<"url">> => <<"http://[2001:db8:a1b:12f9::1]/db">>,
- <<"auth_props">> => auth_props("u", "p"),
- <<"headers">> => DefaultHeaders
- }
- },
- {
- #{
- <<"url">> => <<"http://h/db">>,
- <<"auth_props">> => #{},
- <<"headers">> => maps:merge(DefaultHeaders, #{
- <<"authorization">> => basic_b64("u", "p")
- })
+ {
+ #{
+ <<"url">> => <<"https://u:p@h/db">>,
+ <<"auth_props">> => #{},
+ <<"headers">> => DefaultHeaders
+ },
+ #{
+ <<"url">> => <<"https://h/db">>,
+ <<"auth_props">> => auth_props("u", "p"),
+ <<"headers">> => DefaultHeaders
+ }
},
- #{
- <<"url">> => <<"http://h/db">>,
- <<"auth_props">> => auth_props("u", "p"),
- <<"headers">> => DefaultHeaders
- }
- },
- {
- #{
- <<"url">> => <<"http://h/db">>,
- <<"auth_props">> => #{},
- <<"headers">> => maps:merge(DefaultHeaders, #{
- <<"authorization">> => basic_b64("u", "p@")
- })
+ {
+ #{
+ <<"url">> => <<"http://u:p@[2001:db8:a1b:12f9::1]/db">>,
+ <<"auth_props">> => #{},
+ <<"headers">> => DefaultHeaders
+ },
+ #{
+ <<"url">> => <<"http://[2001:db8:a1b:12f9::1]/db">>,
+ <<"auth_props">> => auth_props("u", "p"),
+ <<"headers">> => DefaultHeaders
+ }
},
- #{
- <<"url">> => <<"http://h/db">>,
- <<"auth_props">> => auth_props("u", "p@"),
- <<"headers">> => DefaultHeaders
- }
- },
- {
- #{
- <<"url">> => <<"http://h/db">>,
- <<"auth_props">> => #{},
- <<"headers">> => maps:merge(DefaultHeaders, #{
- <<"authorization">> => basic_b64("u", "p@%40")
- })
+ {
+ #{
+ <<"url">> => <<"http://h/db">>,
+ <<"auth_props">> => #{},
+ <<"headers">> => maps:merge(DefaultHeaders, #{
+ <<"authorization">> => basic_b64("u", "p")
+ })
+ },
+ #{
+ <<"url">> => <<"http://h/db">>,
+ <<"auth_props">> => auth_props("u", "p"),
+ <<"headers">> => DefaultHeaders
+ }
},
- #{
- <<"url">> => <<"http://h/db">>,
- <<"auth_props">> => auth_props("u", "p@%40"),
- <<"headers">> => DefaultHeaders
- }
- },
- {
- #{
- <<"url">> => <<"http://h/db">>,
- <<"auth_props">> => #{},
- <<"headers">> => maps:merge(DefaultHeaders, #{
- <<"aUthoriZation">> => basic_b64("U", "p")
- })
+ {
+ #{
+ <<"url">> => <<"http://h/db">>,
+ <<"auth_props">> => #{},
+ <<"headers">> => maps:merge(DefaultHeaders, #{
+ <<"authorization">> => basic_b64("u", "p@")
+ })
+ },
+ #{
+ <<"url">> => <<"http://h/db">>,
+ <<"auth_props">> => auth_props("u", "p@"),
+ <<"headers">> => DefaultHeaders
+ }
},
- #{
- <<"url">> => <<"http://h/db">>,
- <<"auth_props">> => auth_props("U", "p"),
- <<"headers">> => DefaultHeaders
- }
- },
- {
- #{
- <<"url">> => <<"http://u1:p1@h/db">>,
- <<"auth_props">> => #{},
- <<"headers">> => maps:merge(DefaultHeaders, #{
- <<"Authorization">> => basic_b64("u2", "p2")
- })
+ {
+ #{
+ <<"url">> => <<"http://h/db">>,
+ <<"auth_props">> => #{},
+ <<"headers">> => maps:merge(DefaultHeaders, #{
+ <<"authorization">> => basic_b64("u", "p@%40")
+ })
+ },
+ #{
+ <<"url">> => <<"http://h/db">>,
+ <<"auth_props">> => auth_props("u", "p@%40"),
+ <<"headers">> => DefaultHeaders
+ }
},
- #{
- <<"url">> => <<"http://h/db">>,
- <<"auth_props">> => auth_props("u1", "p1"),
- <<"headers">> => DefaultHeaders
- }
- },
- {
- #{
- <<"url">> => <<"http://u1:p1@h/db">>,
- <<"auth_props">> => auth_props("u2", "p2"),
- <<"headers">> => DefaultHeaders
+ {
+ #{
+ <<"url">> => <<"http://h/db">>,
+ <<"auth_props">> => #{},
+ <<"headers">> => maps:merge(DefaultHeaders, #{
+ <<"aUthoriZation">> => basic_b64("U", "p")
+ })
+ },
+ #{
+ <<"url">> => <<"http://h/db">>,
+ <<"auth_props">> => auth_props("U", "p"),
+ <<"headers">> => DefaultHeaders
+ }
},
- #{
- <<"url">> => <<"http://h/db">>,
- <<"auth_props">> => auth_props("u2", "p2"),
- <<"headers">> => DefaultHeaders
- }
- },
- {
- #{
- <<"url">> => <<"http://u1:p1@h/db">>,
- <<"auth_props">> => auth_props("u2", "p2"),
- <<"headers">> => maps:merge(DefaultHeaders, #{
- <<"Authorization">> => basic_b64("u3", "p3")
- })
+ {
+ #{
+ <<"url">> => <<"http://u1:p1@h/db">>,
+ <<"auth_props">> => #{},
+ <<"headers">> => maps:merge(DefaultHeaders, #{
+ <<"Authorization">> => basic_b64("u2", "p2")
+ })
+ },
+ #{
+ <<"url">> => <<"http://h/db">>,
+ <<"auth_props">> => auth_props("u1", "p1"),
+ <<"headers">> => DefaultHeaders
+ }
},
- #{
- <<"url">> => <<"http://h/db">>,
- <<"auth_props">> => auth_props("u2", "p2"),
- <<"headers">> => DefaultHeaders
+ {
+ #{
+ <<"url">> => <<"http://u1:p1@h/db">>,
+ <<"auth_props">> => auth_props("u2", "p2"),
+ <<"headers">> => DefaultHeaders
+ },
+ #{
+ <<"url">> => <<"http://h/db">>,
+ <<"auth_props">> => auth_props("u2", "p2"),
+ <<"headers">> => DefaultHeaders
+ }
+ },
+ {
+ #{
+ <<"url">> => <<"http://u1:p1@h/db">>,
+ <<"auth_props">> => auth_props("u2", "p2"),
+ <<"headers">> => maps:merge(DefaultHeaders, #{
+ <<"Authorization">> => basic_b64("u3", "p3")
+ })
+ },
+ #{
+ <<"url">> => <<"http://h/db">>,
+ <<"auth_props">> => auth_props("u2", "p2"),
+ <<"headers">> => DefaultHeaders
+ }
}
- }
- ]].
-
+ ]
+ ].
basic_b64(User, Pass) when is_list(User), is_list(Pass) ->
B64Creds = list_to_binary(b64creds(User, Pass)),
<<"basic ", B64Creds/binary>>.
-
auth_props(User, Pass) when is_list(User), is_list(Pass) ->
#{
<<"basic">> => #{
diff --git a/src/couch_replicator/src/couch_replicator_rate_limiter.erl b/src/couch_replicator/src/couch_replicator_rate_limiter.erl
index b7b70945c..5d2c184b8 100644
--- a/src/couch_replicator/src/couch_replicator_rate_limiter.erl
+++ b/src/couch_replicator/src/couch_replicator_rate_limiter.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
% This module implements rate limiting based on a variation the additive
% increase / multiplicative decrease feedback control algorithm.
%
@@ -36,29 +35,28 @@
% function is the current period value. Caller then might decide to sleep for
% that amount of time before or after each request.
-
-module(couch_replicator_rate_limiter).
-behaviour(gen_server).
-export([
- start_link/0
+ start_link/0
]).
-export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_info/2,
+ handle_cast/2,
+ code_change/3
]).
-export([
- interval/1,
- max_interval/0,
- failure/1,
- success/1
+ interval/1,
+ max_interval/0,
+ failure/1,
+ success/1
]).
% Types
@@ -66,7 +64,6 @@
-type interval() :: non_neg_integer().
-type msec() :: non_neg_integer().
-
% Definitions
% Main parameters of the algorithm. The factor is the multiplicative part and
@@ -98,79 +95,67 @@
% use something similar to solve the ACK compression problem).
-define(SENSITIVITY_TIME_WINDOW, 80).
-
-record(state, {timer}).
-record(rec, {id, backoff, ts}).
-
-spec start_link() -> {ok, pid()} | ignore | {error, term()}.
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-spec interval(key()) -> interval().
interval(Key) ->
{Interval, _Timestamp} = interval_and_timestamp(Key),
Interval.
-
-spec max_interval() -> interval().
max_interval() ->
?MAX_INTERVAL.
-
-spec failure(key()) -> interval().
failure(Key) ->
{Interval, Timestamp} = interval_and_timestamp(Key),
update_failure(Key, Interval, Timestamp, now_msec()).
-
-spec success(key()) -> interval().
success(Key) ->
{Interval, Timestamp} = interval_and_timestamp(Key),
update_success(Key, Interval, Timestamp, now_msec()).
-
% gen_server callbacks
init([]) ->
couch_replicator_rate_limiter_tables:create(#rec.id),
{ok, #state{timer = new_timer()}}.
-
terminate(_Reason, _State) ->
ok.
-
handle_call(_Msg, _From, State) ->
{reply, invalid, State}.
-
handle_cast(_, State) ->
{noreply, State}.
-
handle_info(cleanup, #state{timer = Timer}) ->
erlang:cancel_timer(Timer),
TIds = couch_replicator_rate_limiter_tables:tids(),
[cleanup_table(TId, now_msec() - ?MAX_INTERVAL) || TId <- TIds],
{noreply, #state{timer = new_timer()}}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
% Private functions
-spec update_success(any(), interval(), msec(), msec()) -> interval().
update_success(_Key, _Interval, _Timestamp = 0, _Now) ->
- 0; % No ets entry. Keep it that way and don't insert a new one.
-
-update_success(_Key, Interval, Timestamp, Now)
- when Now - Timestamp =< ?SENSITIVITY_TIME_WINDOW ->
- Interval; % Ignore too frequent updates.
-
+ % No ets entry. Keep it that way and don't insert a new one.
+ 0;
+update_success(_Key, Interval, Timestamp, Now) when
+ Now - Timestamp =< ?SENSITIVITY_TIME_WINDOW
+->
+ % Ignore too frequent updates.
+ Interval;
update_success(Key, Interval, Timestamp, Now) ->
DecayedInterval = time_decay(Now - Timestamp, Interval),
AdditiveFactor = additive_factor(DecayedInterval),
@@ -186,19 +171,18 @@ update_success(Key, Interval, Timestamp, Now) ->
insert(Key, NewInterval, Now)
end.
-
-spec update_failure(any(), interval(), msec(), msec()) -> interval().
-update_failure(_Key, Interval, Timestamp, Now)
- when Now - Timestamp =< ?SENSITIVITY_TIME_WINDOW ->
- Interval; % Ignore too frequent updates.
-
+update_failure(_Key, Interval, Timestamp, Now) when
+ Now - Timestamp =< ?SENSITIVITY_TIME_WINDOW
+->
+ % Ignore too frequent updates.
+ Interval;
update_failure(Key, Interval, _Timestamp, Now) ->
Interval1 = erlang:max(Interval, ?BASE_INTERVAL),
Interval2 = round(Interval1 * ?BACKOFF_FACTOR),
Interval3 = erlang:min(Interval2, ?MAX_INTERVAL),
insert(Key, Interval3, Now).
-
-spec insert(any(), interval(), msec()) -> interval().
insert(Key, Interval, Timestamp) ->
Entry = #rec{id = Key, backoff = Interval, ts = Timestamp},
@@ -206,7 +190,6 @@ insert(Key, Interval, Timestamp) ->
ets:insert(Table, Entry),
Interval.
-
-spec interval_and_timestamp(key()) -> {interval(), msec()}.
interval_and_timestamp(Key) ->
Table = couch_replicator_rate_limiter_tables:term_to_table(Key),
@@ -217,16 +200,13 @@ interval_and_timestamp(Key) ->
{Interval, Timestamp}
end.
-
-spec time_decay(msec(), interval()) -> interval().
time_decay(Dt, Interval) when Dt > ?TIME_DECAY_THRESHOLD ->
DecayedInterval = Interval - ?TIME_DECAY_FACTOR * Dt,
erlang:max(round(DecayedInterval), 0);
-
time_decay(_Dt, Interval) ->
Interval.
-
% Calculate additive factor. Ideally it would be a constant but in this case
% it is a step function to help handle larger values as they are approaching
% the backoff limit. Large success values closer to the limit add some
@@ -243,18 +223,15 @@ additive_factor(Interval) when Interval > 100 ->
additive_factor(_Interval) ->
?BASE_INTERVAL.
-
-spec new_timer() -> reference().
new_timer() ->
erlang:send_after(?MAX_INTERVAL * 2, self(), cleanup).
-
-spec now_msec() -> msec().
now_msec() ->
{Mega, Sec, Micro} = os:timestamp(),
((Mega * 1000000) + Sec) * 1000 + Micro div 1000.
-
-spec cleanup_table(atom(), msec()) -> non_neg_integer().
cleanup_table(Tid, LimitMSec) ->
Head = #rec{ts = '$1', _ = '_'},
diff --git a/src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl b/src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl
index 72892b410..2e2556888 100644
--- a/src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl
+++ b/src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
% Maintain cluster membership and stability notifications for replications.
% On changes to cluster membership, broadcast events to `replication` gen_event.
% Listeners will get `{cluster, stable}` or `{cluster, unstable}` events.
@@ -26,37 +25,32 @@
-module(couch_replicator_rate_limiter_tables).
-export([
- create/1,
- tids/0,
- term_to_table/1
+ create/1,
+ tids/0,
+ term_to_table/1
]).
-define(SHARDS_N, 16).
-
-spec create(non_neg_integer()) -> ok.
create(KeyPos) ->
Opts = [named_table, public, {keypos, KeyPos}, {read_concurrency, true}],
[ets:new(list_to_atom(TableName), Opts) || TableName <- table_names()],
ok.
-
-spec tids() -> [atom()].
tids() ->
[list_to_existing_atom(TableName) || TableName <- table_names()].
-
-spec term_to_table(any()) -> atom().
term_to_table(Term) ->
PHash = erlang:phash2(Term),
list_to_existing_atom(table_name(PHash rem ?SHARDS_N)).
-
-spec table_names() -> [string()].
table_names() ->
[table_name(N) || N <- lists:seq(0, ?SHARDS_N - 1)].
-
-spec table_name(non_neg_integer()) -> string().
table_name(Id) when is_integer(Id), Id >= 0 andalso Id < ?SHARDS_N ->
atom_to_list(?MODULE) ++ "_" ++ integer_to_list(Id).
diff --git a/src/couch_replicator/src/couch_replicator_stats.erl b/src/couch_replicator/src/couch_replicator_stats.erl
index 69e60a05c..ab96123f5 100644
--- a/src/couch_replicator/src/couch_replicator_stats.erl
+++ b/src/couch_replicator/src/couch_replicator_stats.erl
@@ -65,31 +65,25 @@ increment(Field, Stats) ->
orddict:update_counter(Field, 1, Stats).
sum_stats(S1, S2) ->
- orddict:merge(fun(_, V1, V2) -> V1+V2 end, S1, S2).
+ orddict:merge(fun(_, V1, V2) -> V1 + V2 end, S1, S2).
max_stats(S1, S2) ->
orddict:merge(fun(_, V1, V2) -> max(V1, V2) end, S1, S2).
-
% Handle initializing from a status object, which uses same values but
% different field names, as well as from ejson props from the checkpoint
% history
%
-fmap({missing_found, _}) -> true;
-fmap({missing_revisions_found, V}) -> {true, {missing_found, V}};
-fmap({<<"missing_found">>, V}) -> {true, {missing_found, V}};
-
-fmap({missing_checked, _}) -> true;
-fmap({revisions_checked, V}) -> {true, {missing_checked, V}};
-fmap({<<"missing_checked">>, V}) -> {true, {missing_checked, V}};
-
-fmap({docs_read, _}) -> true;
-fmap({<<"docs_read">>, V}) -> {true, {docs_read, V}};
-
-fmap({docs_written, _}) -> true;
-fmap({<<"docs_written">>, V}) -> {true, {docs_written, V}};
-
-fmap({doc_write_failures, _}) -> true;
-fmap({<<"doc_write_failures">>, V}) -> {true, {doc_write_failures, V}};
-
-fmap({_, _}) -> false.
+fmap({missing_found, _}) -> true;
+fmap({missing_revisions_found, V}) -> {true, {missing_found, V}};
+fmap({<<"missing_found">>, V}) -> {true, {missing_found, V}};
+fmap({missing_checked, _}) -> true;
+fmap({revisions_checked, V}) -> {true, {missing_checked, V}};
+fmap({<<"missing_checked">>, V}) -> {true, {missing_checked, V}};
+fmap({docs_read, _}) -> true;
+fmap({<<"docs_read">>, V}) -> {true, {docs_read, V}};
+fmap({docs_written, _}) -> true;
+fmap({<<"docs_written">>, V}) -> {true, {docs_written, V}};
+fmap({doc_write_failures, _}) -> true;
+fmap({<<"doc_write_failures">>, V}) -> {true, {doc_write_failures, V}};
+fmap({_, _}) -> false.
diff --git a/src/couch_replicator/src/couch_replicator_sup.erl b/src/couch_replicator/src/couch_replicator_sup.erl
index 49d412aaa..e1cae9200 100644
--- a/src/couch_replicator/src/couch_replicator_sup.erl
+++ b/src/couch_replicator/src/couch_replicator_sup.erl
@@ -13,10 +13,8 @@
-module(couch_replicator_sup).
-
-behaviour(supervisor).
-
-export([
start_link/0
]).
@@ -25,29 +23,27 @@
init/1
]).
-
start_link() ->
Backend = fabric2_node_types:is_type(replication),
Frontend = fabric2_node_types:is_type(api_frontend),
Arg = {Backend, Frontend},
supervisor:start_link({local, ?MODULE}, ?MODULE, Arg).
-
init({Backend, Frontend}) ->
- Children = case {Backend, Frontend} of
- {true, true} -> backend() ++ frontend();
- {true, false} -> backend();
- {false, true} -> frontend();
- {false, false} -> []
- end,
- Flags = #{
+ Children =
+ case {Backend, Frontend} of
+ {true, true} -> backend() ++ frontend();
+ {true, false} -> backend();
+ {false, true} -> frontend();
+ {false, false} -> []
+ end,
+ Flags = #{
strategy => rest_for_one,
intensity => 1,
period => 5
},
{ok, {Flags, Children}}.
-
backend() ->
Timeout = 5000,
[
@@ -66,7 +62,6 @@ backend() ->
}
].
-
frontend() ->
[
#{
diff --git a/src/couch_replicator/src/couch_replicator_utils.erl b/src/couch_replicator/src/couch_replicator_utils.erl
index c60cf5682..570644037 100644
--- a/src/couch_replicator/src/couch_replicator_utils.erl
+++ b/src/couch_replicator/src/couch_replicator_utils.erl
@@ -13,46 +13,42 @@
-module(couch_replicator_utils).
-export([
- rep_error_to_binary/1,
- iso8601/0,
- iso8601/1,
- rfc1123_local/0,
- rfc1123_local/1,
- normalize_rep/1,
- compare_reps/2,
- default_headers_map/0,
- parse_replication_states/1,
- parse_int_param/5,
- get_basic_auth_creds/1,
- proplist_options/1
+ rep_error_to_binary/1,
+ iso8601/0,
+ iso8601/1,
+ rfc1123_local/0,
+ rfc1123_local/1,
+ normalize_rep/1,
+ compare_reps/2,
+ default_headers_map/0,
+ parse_replication_states/1,
+ parse_int_param/5,
+ get_basic_auth_creds/1,
+ proplist_options/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("couch_replicator.hrl").
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-
rep_error_to_binary(Error) ->
couch_util:to_binary(error_reason(Error)).
-
error_reason({shutdown, Error}) ->
error_reason(Error);
-error_reason({error, {Error, Reason}})
- when is_atom(Error), is_binary(Reason) ->
+error_reason({error, {Error, Reason}}) when
+ is_atom(Error), is_binary(Reason)
+->
io_lib:format("~s: ~s", [Error, Reason]);
error_reason({error, Reason}) ->
Reason;
error_reason(Reason) ->
Reason.
-
-spec iso8601() -> binary().
iso8601() ->
iso8601(erlang:system_time(second)).
-
-spec iso8601(integer()) -> binary().
iso8601(Sec) when is_integer(Sec) ->
Time = unix_sec_to_timestamp(Sec),
@@ -60,32 +56,27 @@ iso8601(Sec) when is_integer(Sec) ->
Format = "~B-~2..0B-~2..0BT~2..0B:~2..0B:~2..0BZ",
iolist_to_binary(io_lib:format(Format, [Y, Mon, D, H, Min, S])).
-
rfc1123_local() ->
list_to_binary(httpd_util:rfc1123_date()).
-
rfc1123_local(Sec) ->
Time = unix_sec_to_timestamp(Sec),
Local = calendar:now_to_local_time(Time),
list_to_binary(httpd_util:rfc1123_date(Local)).
-
-spec compare_reps(#{} | null, #{} | null) -> boolean().
compare_reps(Rep1, Rep2) ->
NormRep1 = normalize_rep(Rep1),
NormRep2 = normalize_rep(Rep2),
NormRep1 =:= NormRep2.
-
% Normalize a rep map such that it doesn't contain time dependent fields
% pids (like httpc pools), and options / props are sorted. This function would
% used during comparisons.
-spec normalize_rep(#{} | null) -> #{} | null.
normalize_rep(null) ->
null;
-
-normalize_rep(#{} = Rep)->
+normalize_rep(#{} = Rep) ->
#{
?SOURCE := Source,
?TARGET := Target,
@@ -97,10 +88,8 @@ normalize_rep(#{} = Rep)->
?OPTIONS => Options
}.
-
normalize_endpoint(<<DbName/binary>>) ->
DbName;
-
normalize_endpoint(#{} = Endpoint) ->
Ks = [
<<"url">>,
@@ -114,16 +103,18 @@ normalize_endpoint(#{} = Endpoint) ->
],
maps:with(Ks, Endpoint).
-
default_headers_map() ->
- lists:foldl(fun({K, V}, Acc) ->
- Acc#{list_to_binary(K) => list_to_binary(V)}
- end, #{}, (#httpdb{})#httpdb.headers).
-
+ lists:foldl(
+ fun({K, V}, Acc) ->
+ Acc#{list_to_binary(K) => list_to_binary(V)}
+ end,
+ #{},
+ (#httpdb{})#httpdb.headers
+ ).
parse_replication_states(undefined) ->
- []; % This is the default (wildcard) filter
-
+ % This is the default (wildcard) filter
+ [];
parse_replication_states(States) when is_list(States) ->
All = [?ST_RUNNING, ?ST_FAILED, ?ST_COMPLETED, ?ST_PENDING, ?ST_CRASHING],
AllSet = sets:from_list(All),
@@ -139,40 +130,41 @@ parse_replication_states(States) when is_list(States) ->
throw({query_parse_error, ?l2b(Msg2)})
end.
-
parse_int_param(Req, Param, Default, Min, Max) ->
- IntVal = try
- list_to_integer(chttpd:qs_value(Req, Param, integer_to_list(Default)))
- catch error:badarg ->
- Msg1 = io_lib:format("~s must be an integer", [Param]),
- throw({query_parse_error, ?l2b(Msg1)})
- end,
+ IntVal =
+ try
+ list_to_integer(chttpd:qs_value(Req, Param, integer_to_list(Default)))
+ catch
+ error:badarg ->
+ Msg1 = io_lib:format("~s must be an integer", [Param]),
+ throw({query_parse_error, ?l2b(Msg1)})
+ end,
case IntVal >= Min andalso IntVal =< Max of
- true ->
- IntVal;
- false ->
- Msg2 = io_lib:format("~s not in range of [~w,~w]", [Param, Min, Max]),
- throw({query_parse_error, ?l2b(Msg2)})
+ true ->
+ IntVal;
+ false ->
+ Msg2 = io_lib:format("~s not in range of [~w,~w]", [Param, Min, Max]),
+ throw({query_parse_error, ?l2b(Msg2)})
end.
-
proplist_options(#{} = OptionsMap) ->
- maps:fold(fun(K, V, Acc) ->
- [{binary_to_atom(K, utf8), V} | Acc]
- end, [], OptionsMap).
-
+ maps:fold(
+ fun(K, V, Acc) ->
+ [{binary_to_atom(K, utf8), V} | Acc]
+ end,
+ [],
+ OptionsMap
+ ).
unix_sec_to_timestamp(Sec) when is_integer(Sec) ->
MegaSecPart = Sec div 1000000,
SecPart = Sec - MegaSecPart * 1000000,
{MegaSecPart, SecPart, 0}.
-
-spec get_basic_auth_creds(#httpdb{} | map()) ->
{string(), string()} | {undefined, undefined}.
get_basic_auth_creds(#httpdb{auth_props = AuthProps}) ->
get_basic_auth_creds(#{<<"auth_props">> => AuthProps});
-
get_basic_auth_creds(#{<<"auth_props">> := Props}) ->
case Props of
#{<<"basic">> := Basic} ->
@@ -188,7 +180,6 @@ get_basic_auth_creds(#{<<"auth_props">> := Props}) ->
{undefined, undefined}
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -196,52 +187,56 @@ get_basic_auth_creds(#{<<"auth_props">> := Props}) ->
normalize_rep_test_() ->
{
setup,
- fun() -> meck:expect(config, get,
- fun(_, _, Default) -> Default end)
+ fun() ->
+ meck:expect(
+ config,
+ get,
+ fun(_, _, Default) -> Default end
+ )
end,
fun(_) -> meck:unload() end,
?_test(begin
- EJson1 = {[
- {<<"source">>, <<"http://host.com/source_db">>},
- {<<"target">>, <<"http://target.local/db">>},
- {<<"doc_ids">>, [<<"a">>, <<"c">>, <<"b">>]},
- {<<"other_field">>, <<"some_value">>}
- ]},
+ EJson1 =
+ {[
+ {<<"source">>, <<"http://host.com/source_db">>},
+ {<<"target">>, <<"http://target.local/db">>},
+ {<<"doc_ids">>, [<<"a">>, <<"c">>, <<"b">>]},
+ {<<"other_field">>, <<"some_value">>}
+ ]},
Rep1 = couch_replicator_parse:parse_rep_doc(EJson1),
- EJson2 = {[
- {<<"other_field">>, <<"unrelated">>},
- {<<"target">>, <<"http://target.local/db">>},
- {<<"source">>, <<"http://host.com/source_db">>},
- {<<"doc_ids">>, [<<"c">>, <<"a">>, <<"b">>]},
- {<<"other_field2">>, <<"unrelated2">>}
- ]},
+ EJson2 =
+ {[
+ {<<"other_field">>, <<"unrelated">>},
+ {<<"target">>, <<"http://target.local/db">>},
+ {<<"source">>, <<"http://host.com/source_db">>},
+ {<<"doc_ids">>, [<<"c">>, <<"a">>, <<"b">>]},
+ {<<"other_field2">>, <<"unrelated2">>}
+ ]},
Rep2 = couch_replicator_parse:parse_rep_doc(EJson2),
?assertEqual(normalize_rep(Rep1), normalize_rep(Rep2))
end)
}.
-
normalize_endpoint() ->
- HttpDb = #httpdb{
+ HttpDb = #httpdb{
url = "http://host/db",
auth_props = #{
"key" => "val",
"nested" => #{<<"other_key">> => "other_val"}
},
- headers = [{"k2","v2"}, {"k1","v1"}],
+ headers = [{"k2", "v2"}, {"k1", "v1"}],
timeout = 30000,
ibrowse_options = [{k2, v2}, {k1, v1}],
retries = 10,
http_connections = 20
},
Expected = HttpDb#httpdb{
- headers = [{"k1","v1"}, {"k2","v2"}],
+ headers = [{"k1", "v1"}, {"k2", "v2"}],
ibrowse_options = [{k1, v1}, {k2, v2}]
},
?assertEqual(Expected, normalize_endpoint(HttpDb)),
?assertEqual(<<"local">>, normalize_endpoint(<<"local">>)).
-
get_basic_auth_creds_from_httpdb_test() ->
Check = fun(Props) ->
get_basic_auth_creds(#{<<"auth_props">> => Props})
@@ -261,5 +256,4 @@ get_basic_auth_creds_from_httpdb_test() ->
UserPass2 = #{<<"username">> => <<"u">>, <<"password">> => null},
?assertEqual({undefined, undefined}, Check(#{<<"basic">> => UserPass2})).
-
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_worker.erl b/src/couch_replicator/src/couch_replicator_worker.erl
index b57727f98..acf1730ed 100644
--- a/src/couch_replicator/src/couch_replicator_worker.erl
+++ b/src/couch_replicator/src/couch_replicator_worker.erl
@@ -28,8 +28,11 @@
-include_lib("kernel/include/logger.hrl").
% TODO: maybe make both buffer max sizes configurable
--define(DOC_BUFFER_BYTE_SIZE, 512 * 1024). % for remote targets
--define(STATS_DELAY, 10000000). % 10 seconds (in microseconds)
+
+% for remote targets
+-define(DOC_BUFFER_BYTE_SIZE, 512 * 1024).
+% 10 seconds (in microseconds)
+-define(STATS_DELAY, 10000000).
-define(MISSING_DOC_RETRY_MSEC, 2000).
-import(couch_util, [
@@ -37,7 +40,6 @@
get_value/3
]).
-
-record(batch, {
docs = [],
size = 0
@@ -57,12 +59,10 @@
batch = #batch{}
}).
-
-
start_link(Cp, #httpdb{} = Source, Target, ChangesManager, MaxConns) ->
gen_server:start_link(
- ?MODULE, {Cp, Source, Target, ChangesManager, MaxConns}, []).
-
+ ?MODULE, {Cp, Source, Target, ChangesManager, MaxConns}, []
+ ).
init({Cp, Source, Target, ChangesManager, MaxConns}) ->
process_flag(trap_exit, true),
@@ -80,108 +80,122 @@ init({Cp, Source, Target, ChangesManager, MaxConns}) ->
},
{ok, State}.
-
-handle_call({fetch_doc, {_Id, _Revs, _PAs} = Params}, {Pid, _} = From,
- #state{loop = Pid, readers = Readers, pending_fetch = nil,
- source = Src, target = Tgt, max_parallel_conns = MaxConns} = State) ->
+handle_call(
+ {fetch_doc, {_Id, _Revs, _PAs} = Params},
+ {Pid, _} = From,
+ #state{
+ loop = Pid,
+ readers = Readers,
+ pending_fetch = nil,
+ source = Src,
+ target = Tgt,
+ max_parallel_conns = MaxConns
+ } = State
+) ->
case length(Readers) of
- Size when Size < MaxConns ->
- Reader = spawn_doc_reader(Src, Tgt, Params),
- NewState = State#state{
- readers = [Reader | Readers]
- },
- {reply, ok, NewState};
- _ ->
- NewState = State#state{
- pending_fetch = {From, Params}
- },
- {noreply, NewState}
+ Size when Size < MaxConns ->
+ Reader = spawn_doc_reader(Src, Tgt, Params),
+ NewState = State#state{
+ readers = [Reader | Readers]
+ },
+ {reply, ok, NewState};
+ _ ->
+ NewState = State#state{
+ pending_fetch = {From, Params}
+ },
+ {noreply, NewState}
end;
-
handle_call({batch_doc, Doc}, From, State) ->
gen_server:reply(From, ok),
{noreply, maybe_flush_docs(Doc, State)};
-
handle_call({add_stats, IncStats}, From, #state{stats = Stats} = State) ->
gen_server:reply(From, ok),
NewStats = couch_replicator_stats:sum_stats(Stats, IncStats),
NewStats2 = maybe_report_stats(State#state.cp, NewStats),
{noreply, State#state{stats = NewStats2}};
-
-handle_call(flush, {Pid, _} = From,
- #state{loop = Pid, writer = nil, flush_waiter = nil,
- target = Target, batch = Batch} = State) ->
- State2 = case State#state.readers of
- [] ->
- State#state{writer = spawn_writer(Target, Batch)};
- _ ->
- State
- end,
+handle_call(
+ flush,
+ {Pid, _} = From,
+ #state{
+ loop = Pid,
+ writer = nil,
+ flush_waiter = nil,
+ target = Target,
+ batch = Batch
+ } = State
+) ->
+ State2 =
+ case State#state.readers of
+ [] ->
+ State#state{writer = spawn_writer(Target, Batch)};
+ _ ->
+ State
+ end,
{noreply, State2#state{flush_waiter = From}}.
-
handle_cast(Msg, State) ->
{stop, {unexpected_async_call, Msg}, State}.
-
handle_info({'EXIT', Pid, normal}, #state{loop = Pid} = State) ->
#state{
- batch = #batch{docs = []}, readers = [], writer = nil,
- pending_fetch = nil, flush_waiter = nil
+ batch = #batch{docs = []},
+ readers = [],
+ writer = nil,
+ pending_fetch = nil,
+ flush_waiter = nil
} = State,
{stop, normal, State};
-
handle_info({'EXIT', Pid, normal}, #state{writer = Pid} = State) ->
{noreply, after_full_flush(State)};
-
handle_info({'EXIT', Pid, normal}, #state{writer = nil} = State) ->
#state{
- readers = Readers, writer = Writer, batch = Batch,
- source = Source, target = Target,
- pending_fetch = Fetch, flush_waiter = FlushWaiter
+ readers = Readers,
+ writer = Writer,
+ batch = Batch,
+ source = Source,
+ target = Target,
+ pending_fetch = Fetch,
+ flush_waiter = FlushWaiter
} = State,
case Readers -- [Pid] of
- Readers ->
- {noreply, State};
- Readers2 ->
- State2 = case Fetch of
- nil ->
- case (FlushWaiter =/= nil) andalso (Writer =:= nil) andalso
- (Readers2 =:= []) of
- true ->
- State#state{
- readers = Readers2,
- writer = spawn_writer(Target, Batch)
- };
- false ->
- State#state{readers = Readers2}
- end;
- {From, FetchParams} ->
- Reader = spawn_doc_reader(Source, Target, FetchParams),
- gen_server:reply(From, ok),
- State#state{
- readers = [Reader | Readers2],
- pending_fetch = nil
- }
- end,
- {noreply, State2}
+ Readers ->
+ {noreply, State};
+ Readers2 ->
+ State2 =
+ case Fetch of
+ nil ->
+ case
+ (FlushWaiter =/= nil) andalso (Writer =:= nil) andalso
+ (Readers2 =:= [])
+ of
+ true ->
+ State#state{
+ readers = Readers2,
+ writer = spawn_writer(Target, Batch)
+ };
+ false ->
+ State#state{readers = Readers2}
+ end;
+ {From, FetchParams} ->
+ Reader = spawn_doc_reader(Source, Target, FetchParams),
+ gen_server:reply(From, ok),
+ State#state{
+ readers = [Reader | Readers2],
+ pending_fetch = nil
+ }
+ end,
+ {noreply, State2}
end;
-
handle_info({'EXIT', _Pid, max_backoff}, State) ->
{stop, {shutdown, max_backoff}, State};
-
handle_info({'EXIT', _Pid, {bulk_docs_failed, _, _} = Err}, State) ->
{stop, {shutdown, Err}, State};
-
handle_info({'EXIT', _Pid, {revs_diff_failed, _, _} = Err}, State) ->
{stop, {shutdown, Err}, State};
-
handle_info({'EXIT', _Pid, {http_request_failed, _, _, _} = Err}, State) ->
{stop, {shutdown, Err}, State};
-
handle_info({'EXIT', Pid, Reason}, State) ->
- {stop, {process_died, Pid, Reason}, State}.
-
+ {stop, {process_died, Pid, Reason}, State}.
terminate(_Reason, _State) ->
ok.
@@ -209,36 +223,33 @@ format_status(_Opt, [_PDict, State]) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager) ->
ChangesManager ! {get_changes, self()},
receive
- {closed, ChangesManager} ->
- ok;
- {changes, ChangesManager, [], ReportSeq} ->
- Stats = couch_replicator_stats:new(),
- ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity),
- queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager);
- {changes, ChangesManager, Changes, ReportSeq} ->
- {IdRevs, Stats0} = find_missing(Changes, Target),
- ok = gen_server:call(Parent, {add_stats, Stats0}, infinity),
- remote_process_batch(IdRevs, Parent),
- {ok, Stats} = gen_server:call(Parent, flush, infinity),
- ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity),
- erlang:put(last_stats_report, os:timestamp()),
- ?LOG_DEBUG(#{
- what => worker_progress_report,
- in => replicator,
- seq => ReportSeq
- }),
- couch_log:debug("Worker reported completion of seq ~p", [ReportSeq]),
- queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager)
+ {closed, ChangesManager} ->
+ ok;
+ {changes, ChangesManager, [], ReportSeq} ->
+ Stats = couch_replicator_stats:new(),
+ ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity),
+ queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager);
+ {changes, ChangesManager, Changes, ReportSeq} ->
+ {IdRevs, Stats0} = find_missing(Changes, Target),
+ ok = gen_server:call(Parent, {add_stats, Stats0}, infinity),
+ remote_process_batch(IdRevs, Parent),
+ {ok, Stats} = gen_server:call(Parent, flush, infinity),
+ ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity),
+ erlang:put(last_stats_report, os:timestamp()),
+ ?LOG_DEBUG(#{
+ what => worker_progress_report,
+ in => replicator,
+ seq => ReportSeq
+ }),
+ couch_log:debug("Worker reported completion of seq ~p", [ReportSeq]),
+ queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager)
end.
-
remote_process_batch([], _Parent) ->
ok;
-
remote_process_batch([{Id, Revs, PAs} | Rest], Parent) ->
% When the source is a remote database, we fetch a single document revision
% per HTTP request. This is mostly to facilitate retrying of HTTP requests
@@ -248,60 +259,72 @@ remote_process_batch([{Id, Revs, PAs} | Rest], Parent) ->
fun(Rev) ->
ok = gen_server:call(Parent, {fetch_doc, {Id, [Rev], PAs}}, infinity)
end,
- Revs),
+ Revs
+ ),
remote_process_batch(Rest, Parent).
-
spawn_doc_reader(Source, Target, FetchParams) ->
Parent = self(),
spawn_link(fun() ->
fetch_doc(
- Source, FetchParams, fun remote_doc_handler/2, {Parent, Target})
+ Source, FetchParams, fun remote_doc_handler/2, {Parent, Target}
+ )
end).
-
fetch_doc(Source, {Id, Revs, PAs}, DocHandler, Acc) ->
try
couch_replicator_api_wrap:open_doc_revs(
- Source, Id, Revs, [{atts_since, PAs}, latest], DocHandler, Acc)
+ Source, Id, Revs, [{atts_since, PAs}, latest], DocHandler, Acc
+ )
catch
- throw:missing_doc ->
- couch_log:error("Retrying fetch and update of document `~s` as it is "
- "unexpectedly missing. Missing revisions are: ~s",
- [Id, couch_doc:revs_to_strs(Revs)]),
- WaitMSec = config:get_integer("replicator", "missing_doc_retry_msec",
- ?MISSING_DOC_RETRY_MSEC),
- ?LOG_ERROR(#{
- what => missing_document,
- in => replicator,
- source => couch_replicator_api_wrap:db_uri(Source),
- docid => Id,
- revisions => couch_doc:revs_to_strs(Revs),
- retry_delay_sec => WaitMSec / 1000
- }),
- timer:sleep(WaitMSec),
- couch_replicator_api_wrap:open_doc_revs(Source, Id, Revs, [latest], DocHandler, Acc);
- throw:{missing_stub, _} ->
- couch_log:error("Retrying fetch and update of document `~s` due to out of "
- "sync attachment stubs. Missing revisions are: ~s",
- [Id, couch_doc:revs_to_strs(Revs)]),
- WaitMSec = config:get_integer("replicator", "missing_doc_retry_msec",
- ?MISSING_DOC_RETRY_MSEC),
- ?LOG_ERROR(#{
- what => missing_attachment_stub,
- in => replicator,
- source => couch_replicator_api_wrap:db_uri(Source),
- docid => Id,
- revisions => couch_doc:revs_to_strs(Revs),
- retry_delay_sec => WaitMSec / 1000
- }),
- timer:sleep(WaitMSec),
- couch_replicator_api_wrap:open_doc_revs(Source, Id, Revs, [latest], DocHandler, Acc)
+ throw:missing_doc ->
+ couch_log:error(
+ "Retrying fetch and update of document `~s` as it is "
+ "unexpectedly missing. Missing revisions are: ~s",
+ [Id, couch_doc:revs_to_strs(Revs)]
+ ),
+ WaitMSec = config:get_integer(
+ "replicator",
+ "missing_doc_retry_msec",
+ ?MISSING_DOC_RETRY_MSEC
+ ),
+ ?LOG_ERROR(#{
+ what => missing_document,
+ in => replicator,
+ source => couch_replicator_api_wrap:db_uri(Source),
+ docid => Id,
+ revisions => couch_doc:revs_to_strs(Revs),
+ retry_delay_sec => WaitMSec / 1000
+ }),
+ timer:sleep(WaitMSec),
+ couch_replicator_api_wrap:open_doc_revs(Source, Id, Revs, [latest], DocHandler, Acc);
+ throw:{missing_stub, _} ->
+ couch_log:error(
+ "Retrying fetch and update of document `~s` due to out of "
+ "sync attachment stubs. Missing revisions are: ~s",
+ [Id, couch_doc:revs_to_strs(Revs)]
+ ),
+ WaitMSec = config:get_integer(
+ "replicator",
+ "missing_doc_retry_msec",
+ ?MISSING_DOC_RETRY_MSEC
+ ),
+ ?LOG_ERROR(#{
+ what => missing_attachment_stub,
+ in => replicator,
+ source => couch_replicator_api_wrap:db_uri(Source),
+ docid => Id,
+ revisions => couch_doc:revs_to_strs(Revs),
+ retry_delay_sec => WaitMSec / 1000
+ }),
+ timer:sleep(WaitMSec),
+ couch_replicator_api_wrap:open_doc_revs(Source, Id, Revs, [latest], DocHandler, Acc)
end.
-
-remote_doc_handler({ok, #doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc},
- Acc) ->
+remote_doc_handler(
+ {ok, #doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc},
+ Acc
+) ->
% Flush design docs in their own PUT requests to correctly process
% authorization failures for design doc updates.
?LOG_DEBUG(#{what => flush_ddoc, in => replicator}),
@@ -321,39 +344,38 @@ remote_doc_handler({ok, #doc{atts = []} = Doc}, {Parent, _} = Acc) ->
remote_doc_handler({{not_found, missing}, _}, _Acc) ->
throw(missing_doc).
-
doc_handler_flush_doc(#doc{} = Doc, {Parent, Target} = Acc) ->
Stats = couch_replicator_stats:new([{docs_read, 1}]),
Success = (flush_doc(Target, Doc) =:= ok),
- {Result, Stats2} = case Success of
- true ->
- {{ok, Acc}, couch_replicator_stats:increment(docs_written, Stats)};
- false ->
- {{skip, Acc}, couch_replicator_stats:increment(doc_write_failures, Stats)}
- end,
+ {Result, Stats2} =
+ case Success of
+ true ->
+ {{ok, Acc}, couch_replicator_stats:increment(docs_written, Stats)};
+ false ->
+ {{skip, Acc}, couch_replicator_stats:increment(doc_write_failures, Stats)}
+ end,
ok = gen_server:call(Parent, {add_stats, Stats2}, infinity),
Result.
-
spawn_writer(Target, #batch{docs = DocList, size = Size}) ->
case {Target, Size > 0} of
- {#httpdb{}, true} ->
- ?LOG_DEBUG(#{
- what => flush_doc_batch,
- in => replicator,
- batch_size_bytes => Size
- }),
- couch_log:debug("Worker flushing doc batch of size ~p bytes", [Size]);
- _ ->
- ok
+ {#httpdb{}, true} ->
+ ?LOG_DEBUG(#{
+ what => flush_doc_batch,
+ in => replicator,
+ batch_size_bytes => Size
+ }),
+ couch_log:debug("Worker flushing doc batch of size ~p bytes", [Size]);
+ _ ->
+ ok
end,
Parent = self(),
spawn_link(
fun() ->
Stats = flush_docs(Target, DocList),
ok = gen_server:call(Parent, {add_stats, Stats}, infinity)
- end).
-
+ end
+ ).
after_full_flush(#state{stats = Stats, flush_waiter = Waiter} = State) ->
gen_server:reply(Waiter, {ok, Stats}),
@@ -365,11 +387,12 @@ after_full_flush(#state{stats = Stats, flush_waiter = Waiter} = State) ->
batch = #batch{}
}.
-
-maybe_flush_docs(Doc,State) ->
+maybe_flush_docs(Doc, State) ->
#state{
- target = Target, batch = Batch,
- stats = Stats, cp = Cp
+ target = Target,
+ batch = Batch,
+ stats = Stats,
+ cp = Cp
} = State,
{Batch2, WStats} = maybe_flush_docs(Target, Batch, Doc),
Stats2 = couch_replicator_stats:sum_stats(Stats, WStats),
@@ -377,34 +400,35 @@ maybe_flush_docs(Doc,State) ->
Stats4 = maybe_report_stats(Cp, Stats3),
State#state{stats = Stats4, batch = Batch2}.
-
maybe_flush_docs(#httpdb{} = Target, Batch, Doc) ->
#batch{docs = DocAcc, size = SizeAcc} = Batch,
JsonDoc = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
case SizeAcc + iolist_size(JsonDoc) of
- SizeAcc2 when SizeAcc2 > ?DOC_BUFFER_BYTE_SIZE ->
- ?LOG_DEBUG(#{
- what => flush_doc_batch,
- in => replicator,
- batch_size_bytes => SizeAcc2
- }),
- couch_log:debug("Worker flushing doc batch of size ~p bytes", [SizeAcc2]),
- Stats = flush_docs(Target, [JsonDoc | DocAcc]),
- {#batch{}, Stats};
- SizeAcc2 ->
- Stats = couch_replicator_stats:new(),
- {#batch{docs = [JsonDoc | DocAcc], size = SizeAcc2}, Stats}
+ SizeAcc2 when SizeAcc2 > ?DOC_BUFFER_BYTE_SIZE ->
+ ?LOG_DEBUG(#{
+ what => flush_doc_batch,
+ in => replicator,
+ batch_size_bytes => SizeAcc2
+ }),
+ couch_log:debug("Worker flushing doc batch of size ~p bytes", [SizeAcc2]),
+ Stats = flush_docs(Target, [JsonDoc | DocAcc]),
+ {#batch{}, Stats};
+ SizeAcc2 ->
+ Stats = couch_replicator_stats:new(),
+ {#batch{docs = [JsonDoc | DocAcc], size = SizeAcc2}, Stats}
end.
-
flush_docs(_Target, []) ->
couch_replicator_stats:new();
flush_docs(Target, DocList) ->
- FlushResult = couch_replicator_api_wrap:update_docs(Target, DocList,
- [delay_commit], replicated_changes),
+ FlushResult = couch_replicator_api_wrap:update_docs(
+ Target,
+ DocList,
+ [delay_commit],
+ replicated_changes
+ ),
handle_flush_docs_result(FlushResult, Target, DocList).
-
handle_flush_docs_result({error, request_body_too_large}, Target, [Doc]) ->
?LOG_ERROR(#{
what => doc_write_failure,
@@ -426,10 +450,17 @@ handle_flush_docs_result({error, request_body_too_large}, Target, DocList) ->
original_batch_size_bytes => Len,
details => "splitting into two smaller batches and retrying"
}),
- couch_log:notice("Replicator: couldn't write batch of size ~p to ~p because"
+ couch_log:notice(
+ "Replicator: couldn't write batch of size ~p to ~p because"
" request body is too large. Splitting batch into 2 separate batches of"
- " sizes ~p and ~p", [Len, couch_replicator_api_wrap:db_uri(Target),
- length(DocList1), length(DocList2)]),
+ " sizes ~p and ~p",
+ [
+ Len,
+ couch_replicator_api_wrap:db_uri(Target),
+ length(DocList1),
+ length(DocList2)
+ ]
+ ),
Stats1 = flush_docs(Target, DocList1),
Stats2 = flush_docs(Target, DocList2),
couch_replicator_stats:sum_stats(Stats1, Stats2);
@@ -446,11 +477,20 @@ handle_flush_docs_result({ok, Errors}, Target, DocList) ->
error => get_value(error, Props, undefined),
details => get_value(reason, Props, undefined)
}),
- couch_log:error("Replicator: couldn't write document `~s`, revision"
- " `~s`, to target database `~s`. Error: `~s`, reason: `~s`.", [
- get_value(id, Props, ""), get_value(rev, Props, ""), DbUri,
- get_value(error, Props, ""), get_value(reason, Props, "")])
- end, Errors),
+ couch_log:error(
+ "Replicator: couldn't write document `~s`, revision"
+ " `~s`, to target database `~s`. Error: `~s`, reason: `~s`.",
+ [
+ get_value(id, Props, ""),
+ get_value(rev, Props, ""),
+ DbUri,
+ get_value(error, Props, ""),
+ get_value(reason, Props, "")
+ ]
+ )
+ end,
+ Errors
+ ),
couch_replicator_stats:new([
{docs_written, length(DocList) - length(Errors)},
{doc_write_failures, length(Errors)}
@@ -462,104 +502,122 @@ extract_value(Prop, Json) when is_binary(Json) ->
try
{Props} = ?JSON_DECODE(Json),
get_value(Prop, Props, undefined)
- catch _:_ ->
- undefined
+ catch
+ _:_ ->
+ undefined
end;
extract_value(_, _) ->
undefined.
flush_doc(Target, #doc{id = Id, revs = {Pos, [RevId | _]}} = Doc) ->
try couch_replicator_api_wrap:update_doc(Target, Doc, [], replicated_changes) of
- {ok, _} ->
- ok;
- Error ->
- ?LOG_ERROR(#{
- what => doc_write_failure,
- in => replicator,
- target => couch_replicator_api_wrap:db_uri(Target),
- docid => Id,
- revision => couch_doc:rev_to_str({Pos, RevId}),
- details => Error
- }),
- couch_log:error("Replicator: error writing document `~s` to `~s`: ~s",
- [Id, couch_replicator_api_wrap:db_uri(Target), couch_util:to_binary(Error)]),
- Error
+ {ok, _} ->
+ ok;
+ Error ->
+ ?LOG_ERROR(#{
+ what => doc_write_failure,
+ in => replicator,
+ target => couch_replicator_api_wrap:db_uri(Target),
+ docid => Id,
+ revision => couch_doc:rev_to_str({Pos, RevId}),
+ details => Error
+ }),
+ couch_log:error(
+ "Replicator: error writing document `~s` to `~s`: ~s",
+ [Id, couch_replicator_api_wrap:db_uri(Target), couch_util:to_binary(Error)]
+ ),
+ Error
catch
- throw:{missing_stub, _} = MissingStub ->
- throw(MissingStub);
- throw:{Error, Reason} ->
- ?LOG_ERROR(#{
- what => doc_write_failure,
- in => replicator,
- target => couch_replicator_api_wrap:db_uri(Target),
- docid => Id,
- revision => couch_doc:rev_to_str({Pos, RevId}),
- error => Error,
- details => Reason
- }),
- couch_log:error("Replicator: couldn't write document `~s`, revision `~s`,"
- " to target database `~s`. Error: `~s`, reason: `~s`.",
- [Id, couch_doc:rev_to_str({Pos, RevId}),
- couch_replicator_api_wrap:db_uri(Target), to_binary(Error), to_binary(Reason)]),
- {error, Error};
- throw:Err ->
- ?LOG_ERROR(#{
- what => doc_write_failure,
- in => replicator,
- target => couch_replicator_api_wrap:db_uri(Target),
- docid => Id,
- revision => couch_doc:rev_to_str({Pos, RevId}),
- details => Err
- }),
- couch_log:error("Replicator: couldn't write document `~s`, revision `~s`,"
- " to target database `~s`. Error: `~s`.",
- [Id, couch_doc:rev_to_str({Pos, RevId}),
- couch_replicator_api_wrap:db_uri(Target), to_binary(Err)]),
- {error, Err}
+ throw:{missing_stub, _} = MissingStub ->
+ throw(MissingStub);
+ throw:{Error, Reason} ->
+ ?LOG_ERROR(#{
+ what => doc_write_failure,
+ in => replicator,
+ target => couch_replicator_api_wrap:db_uri(Target),
+ docid => Id,
+ revision => couch_doc:rev_to_str({Pos, RevId}),
+ error => Error,
+ details => Reason
+ }),
+ couch_log:error(
+ "Replicator: couldn't write document `~s`, revision `~s`,"
+ " to target database `~s`. Error: `~s`, reason: `~s`.",
+ [
+ Id,
+ couch_doc:rev_to_str({Pos, RevId}),
+ couch_replicator_api_wrap:db_uri(Target),
+ to_binary(Error),
+ to_binary(Reason)
+ ]
+ ),
+ {error, Error};
+ throw:Err ->
+ ?LOG_ERROR(#{
+ what => doc_write_failure,
+ in => replicator,
+ target => couch_replicator_api_wrap:db_uri(Target),
+ docid => Id,
+ revision => couch_doc:rev_to_str({Pos, RevId}),
+ details => Err
+ }),
+ couch_log:error(
+ "Replicator: couldn't write document `~s`, revision `~s`,"
+ " to target database `~s`. Error: `~s`.",
+ [
+ Id,
+ couch_doc:rev_to_str({Pos, RevId}),
+ couch_replicator_api_wrap:db_uri(Target),
+ to_binary(Err)
+ ]
+ ),
+ {error, Err}
end.
-
find_missing(DocInfos, Target) ->
- {IdRevs, AllRevsCount} = lists:foldr(fun
- (#doc_info{revs = []}, {IdRevAcc, CountAcc}) ->
- {IdRevAcc, CountAcc};
- (#doc_info{id = Id, revs = RevsInfo}, {IdRevAcc, CountAcc}) ->
- Revs = [Rev || #rev_info{rev = Rev} <- RevsInfo],
- {[{Id, Revs} | IdRevAcc], CountAcc + length(Revs)}
- end, {[], 0}, DocInfos),
-
-
- Missing = case couch_replicator_api_wrap:get_missing_revs(Target, IdRevs) of
- {ok, Result} -> Result;
- {error, Error} -> exit(Error)
- end,
+ {IdRevs, AllRevsCount} = lists:foldr(
+ fun
+ (#doc_info{revs = []}, {IdRevAcc, CountAcc}) ->
+ {IdRevAcc, CountAcc};
+ (#doc_info{id = Id, revs = RevsInfo}, {IdRevAcc, CountAcc}) ->
+ Revs = [Rev || #rev_info{rev = Rev} <- RevsInfo],
+ {[{Id, Revs} | IdRevAcc], CountAcc + length(Revs)}
+ end,
+ {[], 0},
+ DocInfos
+ ),
+
+ Missing =
+ case couch_replicator_api_wrap:get_missing_revs(Target, IdRevs) of
+ {ok, Result} -> Result;
+ {error, Error} -> exit(Error)
+ end,
MissingRevsCount = lists:foldl(
fun({_Id, MissingRevs, _PAs}, Acc) -> Acc + length(MissingRevs) end,
- 0, Missing),
+ 0,
+ Missing
+ ),
Stats = couch_replicator_stats:new([
{missing_checked, AllRevsCount},
{missing_found, MissingRevsCount}
]),
{Missing, Stats}.
-
maybe_report_stats(Cp, Stats) ->
Now = os:timestamp(),
case timer:now_diff(erlang:get(last_stats_report), Now) >= ?STATS_DELAY of
- true ->
- ok = gen_server:call(Cp, {add_stats, Stats}, infinity),
- erlang:put(last_stats_report, Now),
- couch_replicator_stats:new();
- false ->
- Stats
+ true ->
+ ok = gen_server:call(Cp, {add_stats, Stats}, infinity),
+ erlang:put(last_stats_report, Now),
+ couch_replicator_stats:new();
+ false ->
+ Stats
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
replication_worker_format_status_test() ->
State = #state{
cp = self(),
diff --git a/src/couch_replicator/src/json_stream_parse.erl b/src/couch_replicator/src/json_stream_parse.erl
index b63e01152..3478b9830 100644
--- a/src/couch_replicator/src/json_stream_parse.erl
+++ b/src/couch_replicator/src/json_stream_parse.erl
@@ -12,15 +12,12 @@
-module(json_stream_parse).
-
-export([events/2, to_ejson/1, collect_object/2]).
-define(IS_WS(X), (X == $\ orelse X == $\t orelse X == $\n orelse X == $\r)).
-define(IS_DELIM(X), (X == $} orelse X == $] orelse X == $,)).
-define(IS_DIGIT(X), (X >= $0 andalso X =< $9)).
-
-
% Parses the json into events.
%
% The DataFun param is a function that produces the data for parsing. When
@@ -49,11 +46,11 @@
% {key, KeyString}, and the following event is the value, or start of the
% value (array_start, object_start).
%
-events(Data,EventFun) when is_list(Data)->
- events(list_to_binary(Data),EventFun);
-events(Data,EventFun) when is_binary(Data)->
- events(fun() -> {Data, fun() -> done end} end,EventFun);
-events(DataFun,EventFun) ->
+events(Data, EventFun) when is_list(Data) ->
+ events(list_to_binary(Data), EventFun);
+events(Data, EventFun) when is_binary(Data) ->
+ events(fun() -> {Data, fun() -> done end} end, EventFun);
+events(DataFun, EventFun) ->
parse_one(DataFun, EventFun, <<>>).
% converts the JSON directly to the erlang represention of Json
@@ -62,7 +59,6 @@ to_ejson(DF) ->
[[EJson]] = make_ejson(EF(get_results), [[]]),
EJson.
-
% This function is used to return complete objects while parsing streams.
%
% Return this function from inside an event function right after getting an
@@ -98,228 +94,224 @@ to_ejson(DF) ->
collect_object(Ev, ReturnControl) ->
collect_object(Ev, 0, ReturnControl, [object_start]).
-
-
% internal methods
-parse_one(DF,EF,Acc) ->
+parse_one(DF, EF, Acc) ->
case toke(DF, Acc) of
- none ->
- none;
- {Token, DF2, Rest} ->
- case Token of
- "{" ->
- EF2 = EF(object_start),
- {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
- {DF3, EF3(object_end), Rest2};
- "[" ->
- EF2 = EF(array_start),
- {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
- {DF3, EF3(array_end), Rest2};
- Int when is_integer(Int)->
- {DF2, EF(Int), Rest};
- Float when is_float(Float)->
- {DF2, EF(Float), Rest};
- Atom when is_atom(Atom)->
- {DF2, EF(Atom), Rest};
- String when is_binary(String)->
- {DF2, EF(String), Rest};
- _OtherToken ->
- err(unexpected_token)
- end
+ none ->
+ none;
+ {Token, DF2, Rest} ->
+ case Token of
+ "{" ->
+ EF2 = EF(object_start),
+ {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
+ {DF3, EF3(object_end), Rest2};
+ "[" ->
+ EF2 = EF(array_start),
+ {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
+ {DF3, EF3(array_end), Rest2};
+ Int when is_integer(Int) ->
+ {DF2, EF(Int), Rest};
+ Float when is_float(Float) ->
+ {DF2, EF(Float), Rest};
+ Atom when is_atom(Atom) ->
+ {DF2, EF(Atom), Rest};
+ String when is_binary(String) ->
+ {DF2, EF(String), Rest};
+ _OtherToken ->
+ err(unexpected_token)
+ end
end.
-must_parse_one(DF,EF,Acc,Error)->
+must_parse_one(DF, EF, Acc, Error) ->
case parse_one(DF, EF, Acc) of
- none ->
- err(Error);
- Else ->
- Else
+ none ->
+ err(Error);
+ Else ->
+ Else
end.
must_toke(DF, Data, Error) ->
case toke(DF, Data) of
- none ->
- err(Error);
- Result ->
- Result
+ none ->
+ err(Error);
+ Result ->
+ Result
end.
toke(DF, <<>>) ->
case DF() of
- done ->
- none;
- {Data, DF2} ->
- toke(DF2, Data)
+ done ->
+ none;
+ {Data, DF2} ->
+ toke(DF2, Data)
end;
-toke(DF, <<C,Rest/binary>>) when ?IS_WS(C)->
+toke(DF, <<C, Rest/binary>>) when ?IS_WS(C) ->
toke(DF, Rest);
-toke(DF, <<${,Rest/binary>>) ->
+toke(DF, <<${, Rest/binary>>) ->
{"{", DF, Rest};
-toke(DF, <<$},Rest/binary>>) ->
+toke(DF, <<$}, Rest/binary>>) ->
{"}", DF, Rest};
-toke(DF, <<$[,Rest/binary>>) ->
+toke(DF, <<$[, Rest/binary>>) ->
{"[", DF, Rest};
-toke(DF, <<$],Rest/binary>>) ->
+toke(DF, <<$], Rest/binary>>) ->
{"]", DF, Rest};
-toke(DF, <<$",Rest/binary>>) ->
- toke_string(DF,Rest,[]);
-toke(DF, <<$,,Rest/binary>>) ->
+toke(DF, <<$", Rest/binary>>) ->
+ toke_string(DF, Rest, []);
+toke(DF, <<$,, Rest/binary>>) ->
{",", DF, Rest};
-toke(DF, <<$:,Rest/binary>>) ->
+toke(DF, <<$:, Rest/binary>>) ->
{":", DF, Rest};
-toke(DF, <<$-,Rest/binary>>) ->
- {<<C,_/binary>> = Data, DF2} = must_df(DF,1,Rest,expected_number),
+toke(DF, <<$-, Rest/binary>>) ->
+ {<<C, _/binary>> = Data, DF2} = must_df(DF, 1, Rest, expected_number),
case ?IS_DIGIT(C) of
- true ->
- toke_number_leading(DF2, Data, "-");
- false ->
- err(expected_number)
+ true ->
+ toke_number_leading(DF2, Data, "-");
+ false ->
+ err(expected_number)
end;
-toke(DF, <<C,_/binary>> = Data) when ?IS_DIGIT(C) ->
+toke(DF, <<C, _/binary>> = Data) when ?IS_DIGIT(C) ->
toke_number_leading(DF, Data, []);
-toke(DF, <<$t,Rest/binary>>) ->
+toke(DF, <<$t, Rest/binary>>) ->
{Data, DF2} = must_match(<<"rue">>, DF, Rest),
{true, DF2, Data};
-toke(DF, <<$f,Rest/binary>>) ->
+toke(DF, <<$f, Rest/binary>>) ->
{Data, DF2} = must_match(<<"alse">>, DF, Rest),
{false, DF2, Data};
-toke(DF, <<$n,Rest/binary>>) ->
+toke(DF, <<$n, Rest/binary>>) ->
{Data, DF2} = must_match(<<"ull">>, DF, Rest),
{null, DF2, Data};
toke(_, _) ->
err(bad_token).
-
must_match(Pattern, DF, Data) ->
Size = size(Pattern),
case must_df(DF, Size, Data, bad_token) of
- {<<Pattern:Size/binary,Data2/binary>>, DF2} ->
- {Data2, DF2};
- {_, _} ->
- err(bad_token)
+ {<<Pattern:Size/binary, Data2/binary>>, DF2} ->
+ {Data2, DF2};
+ {_, _} ->
+ err(bad_token)
end.
-must_df(DF,Error)->
+must_df(DF, Error) ->
case DF() of
- done ->
- err(Error);
- {Data, DF2} ->
- {Data, DF2}
- end.
-
-
-must_df(DF,NeedLen,Acc,Error)->
- if size(Acc) >= NeedLen ->
- {Acc, DF};
- true ->
- case DF() of
done ->
err(Error);
{Data, DF2} ->
- must_df(DF2, NeedLen, <<Acc/binary, Data/binary>>, Error)
- end
+ {Data, DF2}
end.
+must_df(DF, NeedLen, Acc, Error) ->
+ if
+ size(Acc) >= NeedLen ->
+ {Acc, DF};
+ true ->
+ case DF() of
+ done ->
+ err(Error);
+ {Data, DF2} ->
+ must_df(DF2, NeedLen, <<Acc/binary, Data/binary>>, Error)
+ end
+ end.
-parse_object(DF,EF,Acc) ->
+parse_object(DF, EF, Acc) ->
case must_toke(DF, Acc, unterminated_object) of
- {String, DF2, Rest} when is_binary(String)->
- EF2 = EF({key,String}),
- case must_toke(DF2,Rest,unterminated_object) of
- {":", DF3, Rest2} ->
- {DF4, EF3, Rest3} = must_parse_one(DF3, EF2, Rest2, expected_value),
- case must_toke(DF4,Rest3, unterminated_object) of
- {",", DF5, Rest4} ->
- parse_object(DF5, EF3, Rest4);
- {"}", DF5, Rest4} ->
- {DF5, EF3, Rest4};
- {_, _, _} ->
- err(unexpected_token)
+ {String, DF2, Rest} when is_binary(String) ->
+ EF2 = EF({key, String}),
+ case must_toke(DF2, Rest, unterminated_object) of
+ {":", DF3, Rest2} ->
+ {DF4, EF3, Rest3} = must_parse_one(DF3, EF2, Rest2, expected_value),
+ case must_toke(DF4, Rest3, unterminated_object) of
+ {",", DF5, Rest4} ->
+ parse_object(DF5, EF3, Rest4);
+ {"}", DF5, Rest4} ->
+ {DF5, EF3, Rest4};
+ {_, _, _} ->
+ err(unexpected_token)
+ end;
+ _Else ->
+ err(expected_colon)
end;
- _Else ->
- err(expected_colon)
- end;
- {"}", DF2, Rest} ->
- {DF2, EF, Rest};
- {_, _, _} ->
- err(unexpected_token)
- end.
-
-parse_array0(DF,EF,Acc) ->
- case toke(DF, Acc) of
- none ->
- err(unterminated_array);
- {",", DF2, Rest} ->
- parse_array(DF2,EF,Rest);
- {"]", DF2, Rest} ->
- {DF2,EF,Rest};
- _ ->
- err(unexpected_token)
+ {"}", DF2, Rest} ->
+ {DF2, EF, Rest};
+ {_, _, _} ->
+ err(unexpected_token)
end.
-parse_array(DF,EF,Acc) ->
+parse_array0(DF, EF, Acc) ->
case toke(DF, Acc) of
- none ->
- err(unterminated_array);
- {Token, DF2, Rest} ->
- case Token of
- "{" ->
- EF2 = EF(object_start),
- {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
- parse_array0(DF3, EF3(object_end), Rest2);
- "[" ->
- EF2 = EF(array_start),
- {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
- parse_array0(DF3, EF3(array_end), Rest2);
- Int when is_integer(Int)->
- parse_array0(DF2, EF(Int), Rest);
- Float when is_float(Float)->
- parse_array0(DF2, EF(Float), Rest);
- Atom when is_atom(Atom)->
- parse_array0(DF2, EF(Atom), Rest);
- String when is_binary(String)->
- parse_array0(DF2, EF(String), Rest);
- "]" ->
+ none ->
+ err(unterminated_array);
+ {",", DF2, Rest} ->
+ parse_array(DF2, EF, Rest);
+ {"]", DF2, Rest} ->
{DF2, EF, Rest};
_ ->
err(unexpected_token)
- end
end.
+parse_array(DF, EF, Acc) ->
+ case toke(DF, Acc) of
+ none ->
+ err(unterminated_array);
+ {Token, DF2, Rest} ->
+ case Token of
+ "{" ->
+ EF2 = EF(object_start),
+ {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
+ parse_array0(DF3, EF3(object_end), Rest2);
+ "[" ->
+ EF2 = EF(array_start),
+ {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
+ parse_array0(DF3, EF3(array_end), Rest2);
+ Int when is_integer(Int) ->
+ parse_array0(DF2, EF(Int), Rest);
+ Float when is_float(Float) ->
+ parse_array0(DF2, EF(Float), Rest);
+ Atom when is_atom(Atom) ->
+ parse_array0(DF2, EF(Atom), Rest);
+ String when is_binary(String) ->
+ parse_array0(DF2, EF(String), Rest);
+ "]" ->
+ {DF2, EF, Rest};
+ _ ->
+ err(unexpected_token)
+ end
+ end.
toke_string(DF, <<>>, Acc) ->
{Data, DF2} = must_df(DF, unterminated_string),
toke_string(DF2, Data, Acc);
-toke_string(DF, <<$\\,$",Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $", Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$" | Acc]);
-toke_string(DF, <<$\\,$\\,Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $\\, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\\ | Acc]);
-toke_string(DF, <<$\\,$/,Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $/, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$/ | Acc]);
-toke_string(DF, <<$\\,$b,Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $b, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\b | Acc]);
-toke_string(DF, <<$\\,$f,Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $f, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\f | Acc]);
-toke_string(DF, <<$\\,$n,Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $n, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\n | Acc]);
-toke_string(DF, <<$\\,$r,Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $r, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\r | Acc]);
-toke_string(DF, <<$\\,$t,Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $t, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\t | Acc]);
-toke_string(DF, <<$\\,$u,Rest/binary>>, Acc) ->
- {<<A,B,C,D,Data/binary>>, DF2} = must_df(DF,4,Rest,missing_hex),
+toke_string(DF, <<$\\, $u, Rest/binary>>, Acc) ->
+ {<<A, B, C, D, Data/binary>>, DF2} = must_df(DF, 4, Rest, missing_hex),
UTFChar = erlang:list_to_integer([A, B, C, D], 16),
- if UTFChar == 16#FFFF orelse UTFChar == 16#FFFE ->
- err(invalid_utf_char);
- true ->
- ok
+ if
+ UTFChar == 16#FFFF orelse UTFChar == 16#FFFE ->
+ err(invalid_utf_char);
+ true ->
+ ok
end,
Chars = xmerl_ucs:to_utf8(UTFChar),
toke_string(DF2, Data, lists:reverse(Chars) ++ Acc);
toke_string(DF, <<$\\>>, Acc) ->
{Data, DF2} = must_df(DF, unterminated_string),
- toke_string(DF2, <<$\\,Data/binary>>, Acc);
+ toke_string(DF2, <<$\\, Data/binary>>, Acc);
toke_string(_DF, <<$\\, _/binary>>, _Acc) ->
err(bad_escape);
toke_string(DF, <<$", Rest/binary>>, Acc) ->
@@ -327,72 +319,74 @@ toke_string(DF, <<$", Rest/binary>>, Acc) ->
toke_string(DF, <<C, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [C | Acc]).
-
-toke_number_leading(DF, <<Digit,Rest/binary>>, Acc)
- when ?IS_DIGIT(Digit) ->
+toke_number_leading(DF, <<Digit, Rest/binary>>, Acc) when
+ ?IS_DIGIT(Digit)
+->
toke_number_leading(DF, Rest, [Digit | Acc]);
-toke_number_leading(DF, <<C,_/binary>>=Rest, Acc)
- when ?IS_WS(C) orelse ?IS_DELIM(C) ->
+toke_number_leading(DF, <<C, _/binary>> = Rest, Acc) when
+ ?IS_WS(C) orelse ?IS_DELIM(C)
+->
{list_to_integer(lists:reverse(Acc)), DF, Rest};
toke_number_leading(DF, <<>>, Acc) ->
case DF() of
- done ->
- {list_to_integer(lists:reverse(Acc)), fun() -> done end, <<>>};
- {Data, DF2} ->
- toke_number_leading(DF2, Data, Acc)
+ done ->
+ {list_to_integer(lists:reverse(Acc)), fun() -> done end, <<>>};
+ {Data, DF2} ->
+ toke_number_leading(DF2, Data, Acc)
end;
toke_number_leading(DF, <<$., Rest/binary>>, Acc) ->
- toke_number_trailing(DF, Rest, [$.|Acc]);
+ toke_number_trailing(DF, Rest, [$. | Acc]);
toke_number_leading(DF, <<$e, Rest/binary>>, Acc) ->
- toke_number_exponent(DF, Rest, [$e, $0, $.|Acc]);
+ toke_number_exponent(DF, Rest, [$e, $0, $. | Acc]);
toke_number_leading(DF, <<$E, Rest/binary>>, Acc) ->
- toke_number_exponent(DF, Rest, [$e, $0, $.|Acc]);
+ toke_number_exponent(DF, Rest, [$e, $0, $. | Acc]);
toke_number_leading(_, _, _) ->
err(unexpected_character_in_number).
-toke_number_trailing(DF, <<Digit,Rest/binary>>, Acc)
- when ?IS_DIGIT(Digit) ->
+toke_number_trailing(DF, <<Digit, Rest/binary>>, Acc) when
+ ?IS_DIGIT(Digit)
+->
toke_number_trailing(DF, Rest, [Digit | Acc]);
-toke_number_trailing(DF, <<C,_/binary>>=Rest, Acc)
- when ?IS_WS(C) orelse ?IS_DELIM(C) ->
+toke_number_trailing(DF, <<C, _/binary>> = Rest, Acc) when
+ ?IS_WS(C) orelse ?IS_DELIM(C)
+->
{list_to_float(lists:reverse(Acc)), DF, Rest};
toke_number_trailing(DF, <<>>, Acc) ->
case DF() of
- done ->
- {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
- {Data, DF2} ->
- toke_number_trailing(DF2, Data, Acc)
+ done ->
+ {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
+ {Data, DF2} ->
+ toke_number_trailing(DF2, Data, Acc)
end;
-toke_number_trailing(DF, <<"e", Rest/binary>>, [C|_]=Acc) when C /= $. ->
- toke_number_exponent(DF, Rest, [$e|Acc]);
-toke_number_trailing(DF, <<"E", Rest/binary>>, [C|_]=Acc) when C /= $. ->
- toke_number_exponent(DF, Rest, [$e|Acc]);
+toke_number_trailing(DF, <<"e", Rest/binary>>, [C | _] = Acc) when C /= $. ->
+ toke_number_exponent(DF, Rest, [$e | Acc]);
+toke_number_trailing(DF, <<"E", Rest/binary>>, [C | _] = Acc) when C /= $. ->
+ toke_number_exponent(DF, Rest, [$e | Acc]);
toke_number_trailing(_, _, _) ->
err(unexpected_character_in_number).
-
-toke_number_exponent(DF, <<Digit,Rest/binary>>, Acc) when ?IS_DIGIT(Digit) ->
+toke_number_exponent(DF, <<Digit, Rest/binary>>, Acc) when ?IS_DIGIT(Digit) ->
toke_number_exponent(DF, Rest, [Digit | Acc]);
-toke_number_exponent(DF, <<Sign,Rest/binary>>, [$e|_]=Acc)
- when Sign == $+ orelse Sign == $- ->
+toke_number_exponent(DF, <<Sign, Rest/binary>>, [$e | _] = Acc) when
+ Sign == $+ orelse Sign == $-
+->
toke_number_exponent(DF, Rest, [Sign | Acc]);
-toke_number_exponent(DF, <<C,_/binary>>=Rest, Acc)
- when ?IS_WS(C) orelse ?IS_DELIM(C) ->
+toke_number_exponent(DF, <<C, _/binary>> = Rest, Acc) when
+ ?IS_WS(C) orelse ?IS_DELIM(C)
+->
{list_to_float(lists:reverse(Acc)), DF, Rest};
toke_number_exponent(DF, <<>>, Acc) ->
case DF() of
- done ->
- {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
- {Data, DF2} ->
- toke_number_exponent(DF2, Data, Acc)
+ done ->
+ {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
+ {Data, DF2} ->
+ toke_number_exponent(DF2, Data, Acc)
end;
toke_number_exponent(_, _, _) ->
- err(unexpected_character_in_number).
-
-
-err(Error)->
- throw({parse_error,Error}).
+ err(unexpected_character_in_number).
+err(Error) ->
+ throw({parse_error, Error}).
make_ejson([], Stack) ->
Stack;
@@ -404,8 +398,8 @@ make_ejson([object_start | RevEvs], [ObjValues, PrevValues | RestStack]) ->
make_ejson(RevEvs, [[{ObjValues} | PrevValues] | RestStack]);
make_ejson([object_end | RevEvs], Stack) ->
make_ejson(RevEvs, [[] | Stack]);
-make_ejson([{key, String} | RevEvs], [[PrevValue|RestObject] | RestStack] = _Stack) ->
- make_ejson(RevEvs, [[{String, PrevValue}|RestObject] | RestStack]);
+make_ejson([{key, String} | RevEvs], [[PrevValue | RestObject] | RestStack] = _Stack) ->
+ make_ejson(RevEvs, [[{String, PrevValue} | RestObject] | RestStack]);
make_ejson([Value | RevEvs], [Vals | RestStack] = _Stack) ->
make_ejson(RevEvs, [[Value | Vals] | RestStack]).
@@ -414,7 +408,6 @@ collect_events(get_results, Acc) ->
collect_events(Ev, Acc) ->
fun(NextEv) -> collect_events(NextEv, [Ev | Acc]) end.
-
collect_object(object_end, 0, ReturnControl, Acc) ->
[[Obj]] = make_ejson([object_end | Acc], [[]]),
ReturnControl(Obj);
diff --git a/src/couch_stats/src/couch_stats.erl b/src/couch_stats/src/couch_stats.erl
index 4fde14acb..e0303fc0f 100644
--- a/src/couch_stats/src/couch_stats.erl
+++ b/src/couch_stats/src/couch_stats.erl
@@ -29,10 +29,8 @@
update_gauge/2
]).
-
-include("couch_stats.hrl").
-
-type response() :: ok | {error, unknown_metric}.
-type stat() :: {any(), [{atom(), any()}]}.
@@ -95,8 +93,9 @@ decrement_counter(Name) ->
decrement_counter(Name, Value) ->
notify_existing_metric(Name, {dec, Value}, counter).
--spec update_histogram(any(), number()) -> response();
- (any(), function()) -> any().
+-spec update_histogram
+ (any(), number()) -> response();
+ (any(), function()) -> any().
update_histogram(Name, Fun) when is_function(Fun, 0) ->
Begin = os:timestamp(),
Result = Fun(),
@@ -118,9 +117,10 @@ update_gauge(Name, Value) ->
notify_existing_metric(Name, Op, Type) ->
try
ok = folsom_metrics:notify_existing_metric(Name, Op, Type)
- catch _:_ ->
- error_logger:error_msg("unknown metric: ~p", [Name]),
- {error, unknown_metric}
+ catch
+ _:_ ->
+ error_logger:error_msg("unknown metric: ~p", [Name]),
+ {error, unknown_metric}
end.
-spec sample_type(any(), atom()) -> stat().
diff --git a/src/couch_stats/src/couch_stats_aggregator.erl b/src/couch_stats/src/couch_stats_aggregator.erl
index 8d8cdf7e5..a3b0b0d45 100644
--- a/src/couch_stats/src/couch_stats_aggregator.erl
+++ b/src/couch_stats/src/couch_stats_aggregator.erl
@@ -31,7 +31,6 @@
format_status/2
]).
-
-include("couch_stats.hrl").
-record(st, {
@@ -58,17 +57,17 @@ init([]) ->
{ok, Descs} = reload_metrics(),
CT = erlang:send_after(get_interval(collect), self(), collect),
RT = erlang:send_after(get_interval(reload), self(), reload),
- {ok, #st{descriptions=Descs, stats=[], collect_timer=CT, reload_timer=RT}}.
+ {ok, #st{descriptions = Descs, stats = [], collect_timer = CT, reload_timer = RT}}.
-handle_call(fetch, _from, #st{stats = Stats}=State) ->
+handle_call(fetch, _from, #st{stats = Stats} = State) ->
{reply, {ok, Stats}, State};
handle_call(flush, _From, State) ->
{reply, ok, collect(State)};
-handle_call(reload, _from, #st{reload_timer=OldRT} = State) ->
+handle_call(reload, _from, #st{reload_timer = OldRT} = State) ->
timer:cancel(OldRT),
{ok, Descriptions} = reload_metrics(),
RT = update_timer(reload),
- {reply, ok, State#st{descriptions=Descriptions, reload_timer=RT}};
+ {reply, ok, State#st{descriptions = Descriptions, reload_timer = RT}};
handle_call(Msg, _From, State) ->
{stop, {unknown_call, Msg}, error, State}.
@@ -79,7 +78,7 @@ handle_info(collect, State) ->
{noreply, collect(State)};
handle_info(reload, State) ->
{ok, Descriptions} = reload_metrics(),
- {noreply, State#st{descriptions=Descriptions}};
+ {noreply, State#st{descriptions = Descriptions}};
handle_info(Msg, State) ->
{stop, {unknown_info, Msg}, State}.
@@ -91,17 +90,21 @@ code_change(_OldVsn, State, _Extra) ->
format_status(_Opt, [_PDict, State]) ->
#st{
- descriptions=Descs,
- stats=Stats,
- collect_timer=CollectT,
- reload_timer=ReloadT
+ descriptions = Descs,
+ stats = Stats,
+ collect_timer = CollectT,
+ reload_timer = ReloadT
} = State,
- [{data, [{"State", [
- {descriptions, {set_size, sets:size(Descs)}},
- {stats, {length, length(Stats)}},
- {collect_timer,CollectT},
- {reload_timer,ReloadT}
- ]}]}].
+ [
+ {data, [
+ {"State", [
+ {descriptions, {set_size, sets:size(Descs)}},
+ {stats, {length, length(Stats)}},
+ {collect_timer, CollectT},
+ {reload_timer, ReloadT}
+ ]}
+ ]}
+ ].
comparison_set(Metrics) ->
sets:from_list(
@@ -116,7 +119,10 @@ reload_metrics() ->
ToDelete = sets:subtract(ExistingSet, CurrentSet),
ToCreate = sets:subtract(CurrentSet, ExistingSet),
sets:fold(
- fun({Name, _}, _) -> couch_stats:delete(Name), nil end,
+ fun({Name, _}, _) ->
+ couch_stats:delete(Name),
+ nil
+ end,
nil,
ToDelete
),
@@ -156,16 +162,16 @@ load_metrics_for_application(AppName) ->
end
end.
-collect(#st{collect_timer=OldCT} = State) ->
+collect(#st{collect_timer = OldCT} = State) ->
timer:cancel(OldCT),
Stats = lists:map(
fun({Name, Props}) ->
- {Name, [{value, couch_stats:sample(Name)}|Props]}
+ {Name, [{value, couch_stats:sample(Name)} | Props]}
end,
State#st.descriptions
),
CT = update_timer(collect),
- State#st{stats=Stats, collect_timer=CT}.
+ State#st{stats = Stats, collect_timer = CT}.
update_timer(Type) ->
Interval = get_interval(Type),
diff --git a/src/couch_stats/src/couch_stats_httpd.erl b/src/couch_stats/src/couch_stats_httpd.erl
index 0c24d8856..b40ba6094 100644
--- a/src/couch_stats/src/couch_stats_httpd.erl
+++ b/src/couch_stats/src/couch_stats_httpd.erl
@@ -18,7 +18,7 @@
%% exported for use by chttpd_misc
-export([transform_stats/1, nest/1, to_ejson/1, extract_path/2]).
-handle_stats_req(#httpd{method='GET', path_parts=[_ | Path]}=Req) ->
+handle_stats_req(#httpd{method = 'GET', path_parts = [_ | Path]} = Req) ->
flush(Req),
Stats0 = couch_stats:fetch(),
Stats = transform_stats(Stats0),
@@ -27,7 +27,6 @@ handle_stats_req(#httpd{method='GET', path_parts=[_ | Path]}=Req) ->
EJSON1 = extract_path(Path, EJSON0),
couch_httpd:send_json(Req, EJSON1).
-
transform_stats(Stats) ->
transform_stats(Stats, []).
@@ -37,51 +36,55 @@ transform_stats([{Key, Props} | Rest], Acc) ->
{_, Type} = proplists:lookup(type, Props),
transform_stats(Rest, [{Key, transform_stat(Type, Props)} | Acc]).
-
transform_stat(counter, Props) ->
Props;
transform_stat(gauge, Props) ->
Props;
transform_stat(histogram, Props) ->
- lists:map(fun
- ({value, Value}) ->
- {value, lists:map(fun
- ({Key, List}) when Key == percentile; Key == histogram ->
- {Key, [tuple_to_list(Item) || Item <- List]};
- (Else) ->
- Else
- end, Value)};
- (Else) ->
- Else
- end, Props).
-
+ lists:map(
+ fun
+ ({value, Value}) ->
+ {value,
+ lists:map(
+ fun
+ ({Key, List}) when Key == percentile; Key == histogram ->
+ {Key, [tuple_to_list(Item) || Item <- List]};
+ (Else) ->
+ Else
+ end,
+ Value
+ )};
+ (Else) ->
+ Else
+ end,
+ Props
+ ).
nest(Proplist) ->
nest(Proplist, []).
nest([], Acc) ->
Acc;
-nest([{[Key|Keys], Value}|Rest], Acc) ->
- Acc1 = case proplists:lookup(Key, Acc) of
- {Key, Old} ->
- [{Key, nest([{Keys, Value}], Old)}|proplists:delete(Key, Acc)];
- none ->
- Term = lists:foldr(fun(K, A) -> [{K, A}] end, Value, Keys),
- [{Key, Term}|Acc]
- end,
+nest([{[Key | Keys], Value} | Rest], Acc) ->
+ Acc1 =
+ case proplists:lookup(Key, Acc) of
+ {Key, Old} ->
+ [{Key, nest([{Keys, Value}], Old)} | proplists:delete(Key, Acc)];
+ none ->
+ Term = lists:foldr(fun(K, A) -> [{K, A}] end, Value, Keys),
+ [{Key, Term} | Acc]
+ end,
nest(Rest, Acc1).
-
-to_ejson([{_, _}|_]=Proplist) ->
+to_ejson([{_, _} | _] = Proplist) ->
EJSONProps = lists:map(
- fun({Key, Value}) -> {maybe_format_key(Key), to_ejson(Value)} end,
- Proplist
+ fun({Key, Value}) -> {maybe_format_key(Key), to_ejson(Value)} end,
+ Proplist
),
{EJSONProps};
to_ejson(NotAProplist) ->
NotAProplist.
-
extract_path([], EJSON) ->
EJSON;
extract_path([Key | Rest], {Props}) ->
@@ -94,7 +97,6 @@ extract_path([Key | Rest], {Props}) ->
extract_path([_ | _], _NotAnObject) ->
null.
-
maybe_format_key(Key) when is_list(Key) ->
list_to_binary(Key);
maybe_format_key(Key) when is_atom(Key) ->
diff --git a/src/couch_stats/src/couch_stats_process_tracker.erl b/src/couch_stats/src/couch_stats_process_tracker.erl
index fef955efd..c53f0f887 100644
--- a/src/couch_stats/src/couch_stats_process_tracker.erl
+++ b/src/couch_stats/src/couch_stats_process_tracker.erl
@@ -28,9 +28,7 @@
terminate/2
]).
--record(st, {
-
-}).
+-record(st, {}).
-spec track(any()) -> ok.
track(Name) ->
@@ -60,7 +58,7 @@ handle_cast(Msg, State) ->
error_logger:error_msg("~p received unknown cast ~p", [?MODULE, Msg]),
{noreply, State}.
-handle_info({'DOWN', Ref, _, _, _}=Msg, State) ->
+handle_info({'DOWN', Ref, _, _, _} = Msg, State) ->
case ets:lookup(?MODULE, Ref) of
[] ->
error_logger:error_msg(
diff --git a/src/couch_stats/src/couch_stats_sup.erl b/src/couch_stats/src/couch_stats_sup.erl
index 55755bb83..e47539901 100644
--- a/src/couch_stats/src/couch_stats_sup.erl
+++ b/src/couch_stats/src/couch_stats_sup.erl
@@ -24,12 +24,12 @@
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
init([]) ->
- {ok, {
- {one_for_one, 5, 10}, [
- ?CHILD(couch_stats_aggregator, worker),
- ?CHILD(couch_stats_process_tracker, worker)
- ]
- }}.
-
+ {ok,
+ {
+ {one_for_one, 5, 10},
+ [
+ ?CHILD(couch_stats_aggregator, worker),
+ ?CHILD(couch_stats_process_tracker, worker)
+ ]
+ }}.
diff --git a/src/couch_tests/src/couch_tests.erl b/src/couch_tests/src/couch_tests.erl
index 5dff3c5e1..de80addf5 100644
--- a/src/couch_tests/src/couch_tests.erl
+++ b/src/couch_tests/src/couch_tests.erl
@@ -116,19 +116,22 @@ validate_fixture(#couch_tests_fixture{} = Fixture0, Args, Opts) ->
StartedAppsAfterTeardown = Ctx1#couch_tests_ctx.started_apps,
validate_and_report([
- {equal, "Expected applications before calling fixture (~p) "
- "to be equal to applications after its calling",
- AppsBefore, AppsAfter},
- {equal, "Expected list of started applications (~p) "
- "to be equal to #couch_tests_fixture.apps (~p)",
- AppsStarted, FixtureApps},
- {equal, "Expected list of started applications (~p) "
- "to be equal to #couch_tests_ctx.started_apps (~p)",
- AppsStarted, StartedAppsBeforeTeardown},
- {equal, "Expected list of stopped applications (~p) "
- "to be equal to #couch_tests_ctx.stopped_apps (~p)",
- AppsStarted, StoppedAppsAfterTeardown},
- {equal, "Expected empty list ~i of #couch_tests_ctx.started_apps (~p) "
+ {equal,
+ "Expected applications before calling fixture (~p) "
+ "to be equal to applications after its calling", AppsBefore, AppsAfter},
+ {equal,
+ "Expected list of started applications (~p) "
+ "to be equal to #couch_tests_fixture.apps (~p)", AppsStarted, FixtureApps},
+ {equal,
+ "Expected list of started applications (~p) "
+ "to be equal to #couch_tests_ctx.started_apps (~p)", AppsStarted,
+ StartedAppsBeforeTeardown},
+ {equal,
+ "Expected list of stopped applications (~p) "
+ "to be equal to #couch_tests_ctx.stopped_apps (~p)", AppsStarted,
+ StoppedAppsAfterTeardown},
+ {equal,
+ "Expected empty list ~i of #couch_tests_ctx.started_apps (~p) "
"after teardown", [], StartedAppsAfterTeardown}
]).
@@ -151,16 +154,19 @@ validate_and_report(Sheet) ->
%% Helper functions definitions
%% ------------------------------------------------------------------
-
do_setup([#couch_tests_fixture{setup = Setup} = Fixture | Rest], Ctx0, Acc) ->
Ctx1 = Ctx0#couch_tests_ctx{started_apps = []},
#couch_tests_ctx{started_apps = Apps} = Ctx2 = Setup(Fixture, Ctx1),
Ctx3 = Ctx2#couch_tests_ctx{started_apps = []},
do_setup(Rest, Ctx3, [Fixture#couch_tests_fixture{apps = Apps} | Acc]);
do_setup([], Ctx, Acc) ->
- Apps = lists:foldl(fun(#couch_tests_fixture{apps = A}, AppsAcc) ->
- A ++ AppsAcc
- end, [], Acc),
+ Apps = lists:foldl(
+ fun(#couch_tests_fixture{apps = A}, AppsAcc) ->
+ A ++ AppsAcc
+ end,
+ [],
+ Acc
+ ),
Ctx#couch_tests_ctx{chain = lists:reverse(Acc), started_apps = Apps}.
do_teardown(Fixture, Ctx0) ->
@@ -175,14 +181,14 @@ do_start_applications([], Acc) ->
lists:reverse(Acc);
do_start_applications([App | Apps], Acc) ->
case application:start(App) of
- {error, {already_started, _}} ->
- do_start_applications(Apps, Acc);
- {error, {not_started, Dep}} ->
- do_start_applications([Dep, App | Apps], Acc);
- {error, {not_running, Dep}} ->
- do_start_applications([Dep, App | Apps], Acc);
- ok ->
- do_start_applications(Apps, [App | Acc])
+ {error, {already_started, _}} ->
+ do_start_applications(Apps, Acc);
+ {error, {not_started, Dep}} ->
+ do_start_applications([Dep, App | Apps], Acc);
+ {error, {not_running, Dep}} ->
+ do_start_applications([Dep, App | Apps], Acc);
+ ok ->
+ do_start_applications(Apps, [App | Acc])
end.
stop_applications(Apps) ->
@@ -192,26 +198,25 @@ do_stop_applications([], Acc) ->
lists:reverse(Acc);
do_stop_applications([App | Apps], Acc) ->
case application:stop(App) of
- {error, _} ->
- do_stop_applications(Apps, Acc);
- ok ->
- do_stop_applications(Apps, [App | Acc])
+ {error, _} ->
+ do_stop_applications(Apps, Acc);
+ ok ->
+ do_stop_applications(Apps, [App | Acc])
end.
-remove_duplicates([]) ->
+remove_duplicates([]) ->
[];
remove_duplicates([H | T]) ->
[H | [X || X <- remove_duplicates(T), X /= H]].
applications() ->
- lists:usort([App || {App, _, _} <-application:which_applications()]).
+ lists:usort([App || {App, _, _} <- application:which_applications()]).
do_validate({equal, _Message, Arg, Arg}, Acc) ->
Acc;
do_validate({equal, Message, Arg1, Arg2}, Acc) ->
[io_lib:format(Message, [Arg1, Arg2]) | Acc].
-
%% ------------------------------------------------------------------
%% Tests
%% ------------------------------------------------------------------
diff --git a/src/couch_tests/src/couch_tests_combinatorics.erl b/src/couch_tests/src/couch_tests_combinatorics.erl
index 343336277..f1ee6dd2e 100644
--- a/src/couch_tests/src/couch_tests_combinatorics.erl
+++ b/src/couch_tests/src/couch_tests_combinatorics.erl
@@ -65,7 +65,7 @@ powerset(X, [H | T], Acc) ->
permutations([]) ->
[[]];
-permutations(L) ->
+permutations(L) ->
[[H | T] || H <- L, T <- permutations(L -- [H])].
%% @doc product({Items1, Items2, ..., ItemsN})
@@ -83,7 +83,7 @@ permutations(L) ->
%% ]
-spec product(Elements :: list()) -> [list()].
-product([H]) ->
+product([H]) ->
[[A] || A <- H];
product([H | T]) ->
[[A | B] || A <- H, B <- product(T)].
@@ -109,7 +109,6 @@ product([H | T]) ->
binary_combinations(NBits) ->
product(lists:duplicate(NBits, [true, false])).
-
%% @doc combinations(N, Items).
%% Generate all combinations by choosing N values from a given list of Items
%% in sorted order. Each combination is sorted and the entire table is sorted.
diff --git a/src/couch_views/src/couch_views.erl b/src/couch_views/src/couch_views.erl
index 5804db092..8b193cee2 100644
--- a/src/couch_views/src/couch_views.erl
+++ b/src/couch_views/src/couch_views.erl
@@ -12,10 +12,8 @@
-module(couch_views).
-
-behavior(fabric2_index).
-
-export([
query/6,
@@ -27,7 +25,6 @@
-include("couch_views.hrl").
-
query(Db, DDoc, ViewName, Callback, Acc0, Args0) ->
case fabric2_db:is_users_db(Db) of
true ->
@@ -55,41 +52,50 @@ query(Db, DDoc, ViewName, Callback, Acc0, Args0) ->
IdxVStamps = {?VIEW_CURRENT_VSN, ?VIEW_CURRENT_VSN},
read_view(TxDb, Mrst, ViewName, Callback, Acc0, Args3, IdxVStamps)
end)
- catch throw:{build_view, WaitSeq} ->
- {ok, IdxVStamps} = couch_views_jobs:build_view(Db, Mrst, WaitSeq),
- read_view(Db, Mrst, ViewName, Callback, Acc0, Args3, IdxVStamps)
+ catch
+ throw:{build_view, WaitSeq} ->
+ {ok, IdxVStamps} = couch_views_jobs:build_view(Db, Mrst, WaitSeq),
+ read_view(Db, Mrst, ViewName, Callback, Acc0, Args3, IdxVStamps)
end.
-
build_indices(#{} = Db, DDocs) when is_list(DDocs) ->
DbName = fabric2_db:name(Db),
- lists:filtermap(fun(DDoc) ->
- try couch_views_util:ddoc_to_mrst(DbName, DDoc) of
- {ok, #mrst{} = Mrst} ->
- {true, couch_views_jobs:build_view_async(Db, Mrst)}
- catch _:_ ->
- false
- end
- end, DDocs).
-
+ lists:filtermap(
+ fun(DDoc) ->
+ try couch_views_util:ddoc_to_mrst(DbName, DDoc) of
+ {ok, #mrst{} = Mrst} ->
+ {true, couch_views_jobs:build_view_async(Db, Mrst)}
+ catch
+ _:_ ->
+ false
+ end
+ end,
+ DDocs
+ ).
cleanup_indices(#{} = Db, DDocs) when is_list(DDocs) ->
DbName = fabric2_db:name(Db),
- ActiveSigs = lists:filtermap(fun(DDoc) ->
- try couch_views_util:ddoc_to_mrst(DbName, DDoc) of
- {ok, #mrst{sig = Sig}} ->
- {true, Sig}
- catch _:_ ->
- false
- end
- end, DDocs),
+ ActiveSigs = lists:filtermap(
+ fun(DDoc) ->
+ try couch_views_util:ddoc_to_mrst(DbName, DDoc) of
+ {ok, #mrst{sig = Sig}} ->
+ {true, Sig}
+ catch
+ _:_ ->
+ false
+ end
+ end,
+ DDocs
+ ),
ExistingSigs = couch_views_fdb:list_signatures(Db),
StaleSigs = ExistingSigs -- ActiveSigs,
- lists:foreach(fun(Sig) ->
- couch_views_jobs:remove(Db, Sig),
- couch_views_fdb:clear_index(Db, Sig)
- end, StaleSigs).
-
+ lists:foreach(
+ fun(Sig) ->
+ couch_views_jobs:remove(Db, Sig),
+ couch_views_fdb:clear_index(Db, Sig)
+ end,
+ StaleSigs
+ ).
get_info(Db, DDoc) ->
DbName = fabric2_db:name(Db),
@@ -99,32 +105,36 @@ get_info(Db, DDoc) ->
Mrst1 = couch_views_trees:open(TxDb, Mrst),
Seq = couch_views_fdb:get_update_seq(TxDb, Mrst1),
DataSize = get_total_view_size(TxDb, Mrst1),
- JobStatus = case couch_views_jobs:job_state(TxDb, Mrst1) of
- {ok, pending} -> true;
- {ok, running} -> true;
- {ok, finished} -> false;
- {error, not_found} -> false
- end,
+ JobStatus =
+ case couch_views_jobs:job_state(TxDb, Mrst1) of
+ {ok, pending} -> true;
+ {ok, running} -> true;
+ {ok, finished} -> false;
+ {error, not_found} -> false
+ end,
{Seq, DataSize, JobStatus}
end),
UpdateOptions = get_update_options(Mrst),
{ok, [
{language, Mrst#mrst.language},
{signature, Sig},
- {sizes, {[
- {active, DataSize}
- ]}},
+ {sizes,
+ {[
+ {active, DataSize}
+ ]}},
{update_seq, UpdateSeq},
{updater_running, Status},
{update_options, UpdateOptions}
]}.
-
get_total_view_size(TxDb, Mrst) ->
- lists:foldl(fun(View, Total) ->
- Total + couch_views_trees:get_kv_size(TxDb, View)
- end, 0, Mrst#mrst.views).
-
+ lists:foldl(
+ fun(View, Total) ->
+ Total + couch_views_trees:get_kv_size(TxDb, View)
+ end,
+ 0,
+ Mrst#mrst.views
+ ).
read_view(Db, Mrst, ViewName, Callback, Acc0, Args, {_, _} = IdxVStamps) ->
{DbReadVsn, ViewReadVsn} = IdxVStamps,
@@ -139,37 +149,46 @@ read_view(Db, Mrst, ViewName, Callback, Acc0, Args, {_, _} = IdxVStamps) ->
erlfdb:set_read_version(maps:get(tx, TxDb), ViewReadVsn)
end,
try
- couch_views_reader:read(TxDb, Mrst, ViewName, Callback, Acc0, Args,
- DbReadVsn)
+ couch_views_reader:read(
+ TxDb,
+ Mrst,
+ ViewName,
+ Callback,
+ Acc0,
+ Args,
+ DbReadVsn
+ )
after
UpdateAfter = Args#mrargs.update == lazy,
- if UpdateAfter == false -> ok; true ->
- % Make sure to use a separate transaction if we are
- % reading from a stale snapshot
- case ViewReadVsn of
- ?VIEW_CURRENT_VSN ->
- couch_views_jobs:build_view_async(TxDb, Mrst);
- _ ->
- couch_views_jobs:build_view_async(Db, Mrst)
- end
+ if
+ UpdateAfter == false ->
+ ok;
+ true ->
+ % Make sure to use a separate transaction if we are
+ % reading from a stale snapshot
+ case ViewReadVsn of
+ ?VIEW_CURRENT_VSN ->
+ couch_views_jobs:build_view_async(TxDb, Mrst);
+ _ ->
+ couch_views_jobs:build_view_async(Db, Mrst)
+ end
end
end
end).
-
maybe_update_view(_Db, _Mrst, _, #mrargs{update = false}) ->
ok;
-
maybe_update_view(_Db, _Mrst, _, #mrargs{update = lazy}) ->
ok;
-
maybe_update_view(TxDb, Mrst, true, _Args) ->
BuildState = couch_views_fdb:get_build_status(TxDb, Mrst),
- if BuildState == ?INDEX_READY -> ok; true ->
- VS = couch_views_fdb:get_creation_vs(TxDb, Mrst),
- throw({build_view, fabric2_fdb:vs_to_seq(VS)})
+ if
+ BuildState == ?INDEX_READY ->
+ ok;
+ true ->
+ VS = couch_views_fdb:get_creation_vs(TxDb, Mrst),
+ throw({build_view, fabric2_fdb:vs_to_seq(VS)})
end;
-
maybe_update_view(TxDb, Mrst, false, _Args) ->
DbSeq = fabric2_db:get_update_seq(TxDb),
ViewSeq = couch_views_fdb:get_update_seq(TxDb, Mrst),
@@ -178,43 +197,41 @@ maybe_update_view(TxDb, Mrst, false, _Args) ->
false -> throw({build_view, DbSeq})
end.
-
to_mrargs(#mrargs{} = Args) ->
Args;
-
to_mrargs(#{} = Args) ->
Fields = record_info(fields, mrargs),
Indexes = lists:seq(2, record_info(size, mrargs)),
LU = lists:zip(Fields, Indexes),
- maps:fold(fun(Key, Value, Acc) ->
- Index = fabric2_util:get_value(couch_util:to_existing_atom(Key), LU),
- setelement(Index, Acc, Value)
- end, #mrargs{}, Args).
-
+ maps:fold(
+ fun(Key, Value, Acc) ->
+ Index = fabric2_util:get_value(couch_util:to_existing_atom(Key), LU),
+ setelement(Index, Acc, Value)
+ end,
+ #mrargs{},
+ Args
+ ).
check_range(Mrst, ViewName, Args) ->
#mrst{
language = Lang,
views = Views
} = Mrst,
- View = case couch_views_util:extract_view(Lang, Args, ViewName, Views) of
- {map, V, _} -> V;
- {red, {_, _, V}, _} -> V
- end,
+ View =
+ case couch_views_util:extract_view(Lang, Args, ViewName, Views) of
+ {map, V, _} -> V;
+ {red, {_, _, V}, _} -> V
+ end,
Cmp = couch_views_util:collate_fun(View),
check_range(Args, Cmp).
-
check_range(#mrargs{start_key = undefined}, _Cmp) ->
ok;
-
check_range(#mrargs{end_key = undefined}, _Cmp) ->
ok;
-
check_range(#mrargs{start_key = K, end_key = K}, _Cmp) ->
ok;
-
check_range(Args, Cmp) ->
#mrargs{
direction = Dir,
@@ -233,17 +250,21 @@ check_range(Args, Cmp) ->
ok
end.
-
check_range_error(Descending) ->
{query_parse_error,
<<"No rows can match your key range, reverse your ",
- "start_key and end_key or set descending=",
- Descending/binary>>}.
-
+ "start_key and end_key or set descending=", Descending/binary>>}.
get_update_options(#mrst{design_opts = Opts}) ->
IncDesign = couch_util:get_value(<<"include_design">>, Opts, false),
LocalSeq = couch_util:get_value(<<"local_seq">>, Opts, false),
- UpdateOptions = if IncDesign -> [include_design]; true -> [] end
- ++ if LocalSeq -> [local_seq]; true -> [] end,
+ UpdateOptions =
+ if
+ IncDesign -> [include_design];
+ true -> []
+ end ++
+ if
+ LocalSeq -> [local_seq];
+ true -> []
+ end,
[atom_to_binary(O, latin1) || O <- UpdateOptions].
diff --git a/src/couch_views/src/couch_views_app.erl b/src/couch_views/src/couch_views_app.erl
index 7337d0580..9b2ed0673 100644
--- a/src/couch_views/src/couch_views_app.erl
+++ b/src/couch_views/src/couch_views_app.erl
@@ -10,22 +10,17 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(couch_views_app).
-
-behaviour(application).
-
-export([
start/2,
stop/1
]).
-
start(_StartType, _StartArgs) ->
couch_views_sup:start_link().
-
stop(_State) ->
ok.
diff --git a/src/couch_views/src/couch_views_batch.erl b/src/couch_views/src/couch_views_batch.erl
index 555eac9ed..e5151eb1b 100644
--- a/src/couch_views/src/couch_views_batch.erl
+++ b/src/couch_views/src/couch_views_batch.erl
@@ -12,14 +12,12 @@
-module(couch_views_batch).
-
-export([
start/1,
success/2,
failure/1
]).
-
-include_lib("couch_views/include/couch_views.hrl").
-type update_stats() :: #{
@@ -31,45 +29,42 @@
-export_type([update_stats/0]).
-callback start(
- Mrst::#mrst{},
- State::term()
- ) -> {NewState::term(), BatchSize::pos_integer()}.
+ Mrst :: #mrst{},
+ State :: term()
+) -> {NewState :: term(), BatchSize :: pos_integer()}.
-callback success(
- Mrst::#mrst{},
- UpdateStats::update_stats(),
- State::term()
- ) -> NewState::term().
-
--callback failure(Mrst::#mrst{}, State::term()) -> NewState::term().
+ Mrst :: #mrst{},
+ UpdateStats :: update_stats(),
+ State :: term()
+) -> NewState :: term().
+-callback failure(Mrst :: #mrst{}, State :: term()) -> NewState :: term().
-define(DEFAULT_MOD, "couch_views_batch_impl").
-
-spec start(#mrst{}) -> pos_integer().
start(#mrst{} = Mrst) ->
- {Mod, State} = case load_state() of
- {M, S} ->
- {M, S};
- undefined ->
- ModStr = config:get("couch_views", "batch_module", ?DEFAULT_MOD),
- ModAtom = list_to_existing_atom(ModStr),
- {ModAtom, undefined}
- end,
+ {Mod, State} =
+ case load_state() of
+ {M, S} ->
+ {M, S};
+ undefined ->
+ ModStr = config:get("couch_views", "batch_module", ?DEFAULT_MOD),
+ ModAtom = list_to_existing_atom(ModStr),
+ {ModAtom, undefined}
+ end,
{NewState, BatchSize} = Mod:start(Mrst, State),
save_state(Mod, NewState),
BatchSize.
-
--spec success(#mrst{}, UpdateStats::update_stats()) -> ok.
+-spec success(#mrst{}, UpdateStats :: update_stats()) -> ok.
success(#mrst{} = Mrst, UpdateStats) ->
{Mod, State} = load_state(),
NewState = Mod:success(Mrst, UpdateStats, State),
save_state(Mod, NewState),
ok.
-
-spec failure(#mrst{}) -> ok.
failure(#mrst{} = Mrst) ->
{Mod, State} = load_state(),
@@ -77,10 +72,8 @@ failure(#mrst{} = Mrst) ->
save_state(Mod, NewState),
ok.
-
load_state() ->
get(?MODULE).
-
save_state(Mod, State) ->
put(?MODULE, {Mod, State}).
diff --git a/src/couch_views/src/couch_views_batch_impl.erl b/src/couch_views/src/couch_views_batch_impl.erl
index d17b5b1ec..1bc6014f3 100644
--- a/src/couch_views/src/couch_views_batch_impl.erl
+++ b/src/couch_views/src/couch_views_batch_impl.erl
@@ -14,17 +14,14 @@
-behavior(couch_views_batch).
-
-export([
start/2,
success/3,
failure/2
]).
-
-include_lib("couch_views/include/couch_views.hrl").
-
-record(batch_st, {
start_time,
state,
@@ -36,11 +33,10 @@
threshold_penalty
}).
-
-spec start(
- Mrst::#mrst{},
- State::term()
- ) -> {NewState::term(), BatchSize::pos_integer()}.
+ Mrst :: #mrst{},
+ State :: term()
+) -> {NewState :: term(), BatchSize :: pos_integer()}.
start(Mrst, undefined) ->
St = #batch_st{
state = search,
@@ -50,25 +46,23 @@ start(Mrst, undefined) ->
max_tx_size_bytes = get_config(batch_max_tx_size_bytes, "9000000"),
max_tx_time_msec = get_config(batch_max_tx_time_msec, "1500"),
threshold_penalty = get_config(
- batch_threshold_penalty,
- "0.2",
- fun float_0_to_1/2
- )
+ batch_threshold_penalty,
+ "0.2",
+ fun float_0_to_1/2
+ )
},
start(Mrst, St);
-
start(_Mrst, #batch_st{size = Size} = St) ->
NewSt = St#batch_st{
start_time = erlang:monotonic_time()
},
{NewSt, Size}.
-
-spec success(
- Mrst::#mrst{},
- UpdateStats::couch_views_batch:update_stats(),
- State::term()
- ) -> NewState::term().
+ Mrst :: #mrst{},
+ UpdateStats :: couch_views_batch:update_stats(),
+ State :: term()
+) -> NewState :: term().
success(_Mrst, #{tx_size := TxSize}, #batch_st{} = St) ->
#batch_st{
start_time = StartTime,
@@ -84,63 +78,61 @@ success(_Mrst, #{tx_size := TxSize}, #batch_st{} = St) ->
TxTimeNative = erlang:monotonic_time() - StartTime,
TxTime = erlang:convert_time_unit(TxTimeNative, native, millisecond),
- {NewSize, NewState} = case TxSize > MaxTxSize orelse TxTime > MaxTxTime of
- true ->
- {round(Size * (1.0 - ThresholdPenalty)), sense};
- false when State == search ->
- {Size + SearchIncr, State};
- false when State == sense ->
- {Size + SenseIncr, State}
- end,
+ {NewSize, NewState} =
+ case TxSize > MaxTxSize orelse TxTime > MaxTxTime of
+ true ->
+ {round(Size * (1.0 - ThresholdPenalty)), sense};
+ false when State == search ->
+ {Size + SearchIncr, State};
+ false when State == sense ->
+ {Size + SenseIncr, State}
+ end,
St#batch_st{
size = erlang:max(1, NewSize),
state = NewState
}.
-
--spec failure(Mrst::#mrst{}, State::term()) -> NewState::term().
+-spec failure(Mrst :: #mrst{}, State :: term()) -> NewState :: term().
failure(_Mrst, #batch_st{} = St) ->
St#batch_st{
size = erlang:max(1, St#batch_st.size div 2),
state = sense
}.
-
get_config(Key, Default) ->
get_config(Key, Default, fun non_neg_integer/2).
-
get_config(Key, Default, Validator) ->
StrVal = config:get("couch_views", atom_to_list(Key), Default),
Validator(Key, StrVal).
-
non_neg_integer(Name, Str) ->
try
Val = list_to_integer(Str),
true = Val > 0,
Val
- catch _:_ ->
- erlang:error({invalid_non_neg_integer, {couch_views, Name, Str}})
+ catch
+ _:_ ->
+ erlang:error({invalid_non_neg_integer, {couch_views, Name, Str}})
end.
-
float_0_to_1(Name, Str) ->
- Val = try
- list_to_float(Str)
- catch error:badarg ->
- erlang:error({invalid_float, {couch_views, Name, Str}})
- end,
- if Val >= 0.0 andalso Val =< 1.0 -> Val; true ->
- erlang:error({float_out_of_range, {couch_views, Name, Str}})
+ Val =
+ try
+ list_to_float(Str)
+ catch
+ error:badarg ->
+ erlang:error({invalid_float, {couch_views, Name, Str}})
+ end,
+ if
+ Val >= 0.0 andalso Val =< 1.0 -> Val;
+ true -> erlang:error({float_out_of_range, {couch_views, Name, Str}})
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
good_config_test() ->
with_good_config(fun() ->
{St, 1} = start(#mrst{}, undefined),
@@ -158,7 +150,6 @@ good_config_test() ->
)
end).
-
bad_config_test() ->
Fields = [
{batch_initial_size, invalid_non_neg_integer},
@@ -168,27 +159,31 @@ bad_config_test() ->
{batch_max_tx_time_msec, invalid_non_neg_integer},
{batch_threshold_penalty, invalid_float}
],
- lists:foreach(fun({Field, Error}) ->
- with_bad_config(atom_to_list(Field), fun() ->
- ?assertError(
- {Error, {couch_views, Field, _}},
- start(#mrst{}, undefined)
- )
- end)
- end, Fields).
-
+ lists:foreach(
+ fun({Field, Error}) ->
+ with_bad_config(atom_to_list(Field), fun() ->
+ ?assertError(
+ {Error, {couch_views, Field, _}},
+ start(#mrst{}, undefined)
+ )
+ end)
+ end,
+ Fields
+ ).
float_range_test() ->
with_bad_float_config("batch_threshold_penalty", fun() ->
- lists:foreach(fun(_) ->
- ?assertError(
- {float_out_of_range, {couch_views, batch_threshold_penalty, _}},
- start(#mrst{}, undefined)
- )
- end, lists:seq(1, 10))
+ lists:foreach(
+ fun(_) ->
+ ?assertError(
+ {float_out_of_range, {couch_views, batch_threshold_penalty, _}},
+ start(#mrst{}, undefined)
+ )
+ end,
+ lists:seq(1, 10)
+ )
end).
-
with_good_config(Fun) ->
meck:new(config),
meck:expect(config, get, fun
@@ -205,7 +200,6 @@ with_good_config(Fun) ->
meck:unload()
end.
-
with_bad_config(FieldName, Fun) ->
meck:new(config),
meck:expect(config, get, fun("couch_views", Field, Default) ->
@@ -225,7 +219,6 @@ with_bad_config(FieldName, Fun) ->
meck:unload()
end.
-
with_bad_float_config(FieldName, Fun) ->
meck:new(config),
meck:expect(config, get, fun("couch_views", Field, Default) ->
diff --git a/src/couch_views/src/couch_views_ddoc.erl b/src/couch_views/src/couch_views_ddoc.erl
index fae4a3433..bf9a2b2fd 100644
--- a/src/couch_views/src/couch_views_ddoc.erl
+++ b/src/couch_views/src/couch_views_ddoc.erl
@@ -11,30 +11,28 @@
% the License.
-module(couch_views_ddoc).
-
-export([
get_interactive_list/1,
get_mango_list/1,
is_interactive/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-
% TODO: build a ddoc cache that checks the md_version
get_interactive_list(Db) ->
DDocs = fabric2_db:get_design_docs(Db),
lists:filter(fun is_interactive/1, DDocs).
-
get_mango_list(Db) ->
DDocs = fabric2_db:get_design_docs(Db),
- lists:filter(fun (DDoc) ->
- {Props} = couch_doc:to_json_obj(DDoc, []),
- fabric2_util:get_value(<<"language">>, Props) == <<"query">>
- end, DDocs).
-
+ lists:filter(
+ fun(DDoc) ->
+ {Props} = couch_doc:to_json_obj(DDoc, []),
+ fabric2_util:get_value(<<"language">>, Props) == <<"query">>
+ end,
+ DDocs
+ ).
is_interactive(#doc{} = DDoc) ->
{Props} = couch_doc:to_json_obj(DDoc, []),
diff --git a/src/couch_views/src/couch_views_encoding.erl b/src/couch_views/src/couch_views_encoding.erl
index 2f69db306..19b51ac34 100644
--- a/src/couch_views/src/couch_views_encoding.erl
+++ b/src/couch_views/src/couch_views_encoding.erl
@@ -12,7 +12,6 @@
-module(couch_views_encoding).
-
-export([
max/0,
encode/1,
@@ -20,7 +19,6 @@
decode/1
]).
-
-define(NULL, 0).
-define(FALSE, 1).
-define(TRUE, 2).
@@ -30,88 +28,75 @@
-define(OBJECT, 6).
-define(MAX, 255).
-
max() ->
max_encoding_value.
-
encode(X) ->
encode(X, value).
-
encode(X, Type) when Type == key; Type == value ->
erlfdb_tuple:pack(encode_int(X, Type)).
-
decode(Encoded) ->
Val = erlfdb_tuple:unpack(Encoded),
decode_int(Val).
-
encode_int(null, _Type) ->
{?NULL};
-
encode_int(false, _Type) ->
{?FALSE};
-
encode_int(true, _Type) ->
{?TRUE};
-
encode_int(max_encoding_value, _Type) ->
{?MAX};
-
encode_int(Num, key) when is_number(Num) ->
{?NUMBER, float(Num)};
-
encode_int(Num, value) when is_number(Num) ->
{?NUMBER, Num};
-
encode_int(Bin, key) when is_binary(Bin) ->
{?STRING, couch_util:get_sort_key(Bin)};
-
encode_int(Bin, value) when is_binary(Bin) ->
{?STRING, Bin};
-
encode_int(List, Type) when is_list(List) ->
- Encoded = lists:map(fun(Item) ->
- encode_int(Item, Type)
- end, List),
+ Encoded = lists:map(
+ fun(Item) ->
+ encode_int(Item, Type)
+ end,
+ List
+ ),
{?LIST, list_to_tuple(Encoded)};
-
encode_int({Props}, Type) when is_list(Props) ->
- Encoded = lists:map(fun({K, V}) ->
- EK = encode_int(K, Type),
- EV = encode_int(V, Type),
- {EK, EV}
- end, Props),
+ Encoded = lists:map(
+ fun({K, V}) ->
+ EK = encode_int(K, Type),
+ EV = encode_int(V, Type),
+ {EK, EV}
+ end,
+ Props
+ ),
{?OBJECT, list_to_tuple(Encoded)}.
-
decode_int({?NULL}) ->
null;
-
decode_int({?FALSE}) ->
false;
-
decode_int({?TRUE}) ->
true;
-
decode_int({?MAX}) ->
max_encoding_value;
-
decode_int({?STRING, Bin}) ->
Bin;
-
decode_int({?NUMBER, Num}) ->
Num;
-
decode_int({?LIST, List}) ->
lists:map(fun decode_int/1, tuple_to_list(List));
-
decode_int({?OBJECT, Object}) ->
- Props = lists:map(fun({EK, EV}) ->
- K = decode_int(EK),
- V = decode_int(EV),
- {K, V}
- end, tuple_to_list(Object)),
+ Props = lists:map(
+ fun({EK, EV}) ->
+ K = decode_int(EK),
+ V = decode_int(EV),
+ {K, V}
+ end,
+ tuple_to_list(Object)
+ ),
{Props}.
diff --git a/src/couch_views/src/couch_views_epi.erl b/src/couch_views/src/couch_views_epi.erl
index 127b09f13..0564f2b50 100644
--- a/src/couch_views/src/couch_views_epi.erl
+++ b/src/couch_views/src/couch_views_epi.erl
@@ -10,13 +10,10 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(couch_views_epi).
-
-behaviour(couch_epi_plugin).
-
-export([
app/0,
providers/0,
@@ -27,34 +24,27 @@
notify/3
]).
-
app() ->
couch_views.
-
providers() ->
[
{fabric2_db, couch_views_fabric2_plugin}
].
-
services() ->
[
{couch_views, couch_views_plugin}
].
-
data_subscriptions() ->
[].
-
data_providers() ->
[].
-
processes() ->
[].
-
notify(_Key, _Old, _New) ->
ok.
diff --git a/src/couch_views/src/couch_views_fabric2_plugin.erl b/src/couch_views/src/couch_views_fabric2_plugin.erl
index cae0e1f75..481f215ff 100644
--- a/src/couch_views/src/couch_views_fabric2_plugin.erl
+++ b/src/couch_views/src/couch_views_fabric2_plugin.erl
@@ -10,15 +10,12 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(couch_views_fabric2_plugin).
-
-export([
after_doc_write/6
]).
-
-after_doc_write(Db, Doc, NewWinner, OldWinner, NewRevId, Seq)->
+after_doc_write(Db, Doc, NewWinner, OldWinner, NewRevId, Seq) ->
couch_views_updater:index(Db, Doc, NewWinner, OldWinner, NewRevId, Seq),
[Db, Doc, NewWinner, OldWinner, NewRevId, Seq].
diff --git a/src/couch_views/src/couch_views_fdb.erl b/src/couch_views/src/couch_views_fdb.erl
index d8c981300..4f5fc8d08 100644
--- a/src/couch_views/src/couch_views_fdb.erl
+++ b/src/couch_views/src/couch_views_fdb.erl
@@ -36,14 +36,11 @@
-compile(nowarn_export_all).
-endif.
-
-include("couch_views.hrl").
-include_lib("fabric/include/fabric2.hrl").
-
get_view_state(Db, #mrst{} = Mrst) ->
get_view_state(Db, Mrst#mrst.sig);
-
get_view_state(Db, Sig) when is_binary(Sig) ->
#{
tx := Tx
@@ -54,20 +51,23 @@ get_view_state(Db, Sig) when is_binary(Sig) ->
ViewVSF = erlfdb:get(Tx, creation_vs_key(Db, Sig)),
BuildStatusF = erlfdb:get(Tx, build_status_key(Db, Sig)),
- Version = case erlfdb:wait(VersionF) of
- not_found -> not_found;
- VsnVal -> element(1, erlfdb_tuple:unpack(VsnVal))
- end,
+ Version =
+ case erlfdb:wait(VersionF) of
+ not_found -> not_found;
+ VsnVal -> element(1, erlfdb_tuple:unpack(VsnVal))
+ end,
- ViewSeq = case erlfdb:wait(ViewSeqF) of
- not_found -> <<>>;
- SeqVal -> SeqVal
- end,
+ ViewSeq =
+ case erlfdb:wait(ViewSeqF) of
+ not_found -> <<>>;
+ SeqVal -> SeqVal
+ end,
- ViewVS = case erlfdb:wait(ViewVSF) of
- not_found -> not_found;
- VSVal -> element(1, erlfdb_tuple:unpack(VSVal))
- end,
+ ViewVS =
+ case erlfdb:wait(ViewVSF) of
+ not_found -> not_found;
+ VSVal -> element(1, erlfdb_tuple:unpack(VSVal))
+ end,
State = #{
version => Version,
@@ -78,22 +78,18 @@ get_view_state(Db, Sig) when is_binary(Sig) ->
maybe_upgrade_view(Db, Sig, State).
-
new_interactive_index(Db, #mrst{} = Mrst, VS) ->
new_interactive_index(Db, Mrst#mrst.sig, VS);
-
new_interactive_index(Db, Sig, VS) ->
set_version(Db, Sig),
new_creation_vs(Db, Sig, VS),
set_build_status(Db, Sig, ?INDEX_BUILDING).
-
%Interactive View Creation Versionstamp
%(<db>, ?DB_VIEWS, ?VIEW_INFO, ?VIEW_CREATION_VS, Sig) = VS
new_creation_vs(TxDb, #mrst{} = Mrst, VS) ->
new_creation_vs(TxDb, Mrst#mrst.sig, VS);
-
new_creation_vs(TxDb, Sig, VS) ->
#{
tx := Tx
@@ -102,14 +98,12 @@ new_creation_vs(TxDb, Sig, VS) ->
Value = erlfdb_tuple:pack_vs({VS}),
ok = erlfdb:set_versionstamped_value(Tx, Key, Value).
-
get_creation_vs(TxDb, MrstOrSig) ->
#{
view_vs := ViewVS
} = get_view_state(TxDb, MrstOrSig),
ViewVS.
-
%Interactive View Build Status
%(<db>, ?DB_VIEWS, ?VIEW_INFO, ?VIEW_BUILD_STATUS, Sig) = INDEX_BUILDING | INDEX_READY
@@ -119,10 +113,8 @@ get_build_status(TxDb, MrstOrSig) ->
} = get_view_state(TxDb, MrstOrSig),
BuildStatus.
-
set_build_status(TxDb, #mrst{} = Mrst, State) ->
set_build_status(TxDb, Mrst#mrst.sig, State);
-
set_build_status(TxDb, Sig, State) ->
#{
tx := Tx
@@ -131,36 +123,37 @@ set_build_status(TxDb, Sig, State) ->
Key = build_status_key(TxDb, Sig),
ok = erlfdb:set(Tx, Key, State).
-
% View Build Sequence Access
% (<db>, ?DB_VIEWS, Sig, ?VIEW_UPDATE_SEQ) = Sequence
-
get_update_seq(TxDb, MrstOrSig) ->
#{
view_seq := ViewSeq
} = get_view_state(TxDb, MrstOrSig),
ViewSeq.
-
set_update_seq(TxDb, Sig, Seq) ->
#{
tx := Tx
} = TxDb,
ok = erlfdb:set(Tx, seq_key(TxDb, Sig), Seq).
-
list_signatures(Db) ->
#{
db_prefix := DbPrefix
} = Db,
ViewSeqRange = {?DB_VIEWS, ?VIEW_INFO, ?VIEW_UPDATE_SEQ},
RangePrefix = erlfdb_tuple:pack(ViewSeqRange, DbPrefix),
- fabric2_fdb:fold_range(Db, RangePrefix, fun({Key, _Val}, Acc) ->
- {Sig} = erlfdb_tuple:unpack(Key, RangePrefix),
- [Sig | Acc]
- end, [], []).
-
+ fabric2_fdb:fold_range(
+ Db,
+ RangePrefix,
+ fun({Key, _Val}, Acc) ->
+ {Sig} = erlfdb_tuple:unpack(Key, RangePrefix),
+ [Sig | Acc]
+ end,
+ [],
+ []
+ ).
clear_index(Db, Signature) ->
#{
@@ -171,10 +164,11 @@ clear_index(Db, Signature) ->
% Get view size to remove from global counter
SizeTuple = {?DB_VIEWS, ?VIEW_INFO, ?VIEW_KV_SIZE, Signature},
SizeKey = erlfdb_tuple:pack(SizeTuple, DbPrefix),
- ViewSize = case erlfdb:wait(erlfdb:get(Tx, SizeKey)) of
- not_found -> 0;
- SizeVal -> ?bin2uint(SizeVal)
- end,
+ ViewSize =
+ case erlfdb:wait(erlfdb:get(Tx, SizeKey)) of
+ not_found -> 0;
+ SizeVal -> ?bin2uint(SizeVal)
+ end,
% Clear index info keys
Keys = [
@@ -182,10 +176,13 @@ clear_index(Db, Signature) ->
{?DB_VIEWS, ?VIEW_INFO, ?VIEW_ROW_COUNT, Signature},
{?DB_VIEWS, ?VIEW_INFO, ?VIEW_KV_SIZE, Signature}
],
- lists:foreach(fun(Key) ->
- FDBKey = erlfdb_tuple:pack(Key, DbPrefix),
- erlfdb:clear(Tx, FDBKey)
- end, Keys),
+ lists:foreach(
+ fun(Key) ->
+ FDBKey = erlfdb_tuple:pack(Key, DbPrefix),
+ erlfdb:clear(Tx, FDBKey)
+ end,
+ Keys
+ ),
% Clear index data
DataTuple = {?DB_VIEWS, ?VIEW_DATA, Signature},
@@ -202,14 +199,17 @@ clear_index(Db, Signature) ->
DbSizeKey = erlfdb_tuple:pack(DbSizeTuple, DbPrefix),
erlfdb:add(Tx, DbSizeKey, -ViewSize).
-
persist_chunks(Tx, set, [Key, Value]) ->
Chunks = fabric2_fdb:chunkify_binary(Value),
- LastId = lists:foldl(fun(Chunk, Id) ->
- ChunkKey = erlfdb_tuple:pack({Id}, Key),
- erlfdb:set(Tx, ChunkKey, Chunk),
- Id + 1
- end, 0, Chunks),
+ LastId = lists:foldl(
+ fun(Chunk, Id) ->
+ ChunkKey = erlfdb_tuple:pack({Id}, Key),
+ erlfdb:set(Tx, ChunkKey, Chunk),
+ Id + 1
+ end,
+ 0,
+ Chunks
+ ),
% We update nodes in place, so its possible that
% a node shrank. This clears any keys that we haven't
@@ -217,16 +217,13 @@ persist_chunks(Tx, set, [Key, Value]) ->
LastIdKey = erlfdb_tuple:pack({LastId}, Key),
EndRange = <<Key/binary, 16#FF>>,
erlfdb:clear_range(Tx, LastIdKey, EndRange);
-
persist_chunks(Tx, get, Key) ->
Rows = erlfdb:get_range_startswith(Tx, Key),
Values = [V || {_K, V} <- Rows],
iolist_to_binary(Values);
-
persist_chunks(Tx, clear, Key) ->
erlfdb:clear_range_startswith(Tx, Key).
-
update_kv_size(TxDb, Sig, OldSize, NewSize) ->
#{
tx := Tx,
@@ -241,7 +238,6 @@ update_kv_size(TxDb, Sig, OldSize, NewSize) ->
DbKey = erlfdb_tuple:pack(DbTuple, DbPrefix),
erlfdb:add(Tx, DbKey, NewSize - OldSize).
-
maybe_upgrade_view(_Db, _Sig, #{version := ?CURRENT_VIEW_IMPL_VERSION} = St) ->
St;
maybe_upgrade_view(Db, Sig, #{version := not_found, view_seq := <<>>} = St) ->
@@ -264,7 +260,6 @@ maybe_upgrade_view(Db, Sig, #{version := not_found} = St) ->
build_status => BuildStatus
}.
-
set_version(Db, Sig) ->
#{
tx := Tx
@@ -273,7 +268,6 @@ set_version(Db, Sig) ->
Val = erlfdb_tuple:pack({?CURRENT_VIEW_IMPL_VERSION}),
erlfdb:set(Tx, Key, Val).
-
reset_interactive_index(_Db, _Sig, #{view_vs := not_found}) ->
% Not an interactive index
{not_found, not_found};
@@ -297,7 +291,6 @@ reset_interactive_index(Db, Sig, _St) ->
{VS, ?INDEX_BUILDING}.
-
version_key(Db, Sig) ->
#{
db_prefix := DbPrefix
@@ -305,7 +298,6 @@ version_key(Db, Sig) ->
Key = {?DB_VIEWS, ?VIEW_INFO, ?VIEW_IMPL_VERSION, Sig},
erlfdb_tuple:pack(Key, DbPrefix).
-
seq_key(Db, Sig) ->
#{
db_prefix := DbPrefix
@@ -313,7 +305,6 @@ seq_key(Db, Sig) ->
Key = {?DB_VIEWS, ?VIEW_INFO, ?VIEW_UPDATE_SEQ, Sig},
erlfdb_tuple:pack(Key, DbPrefix).
-
creation_vs_key(Db, Sig) ->
#{
db_prefix := DbPrefix
@@ -321,7 +312,6 @@ creation_vs_key(Db, Sig) ->
Key = {?DB_VIEWS, ?VIEW_INFO, ?VIEW_CREATION_VS, Sig},
erlfdb_tuple:pack(Key, DbPrefix).
-
build_status_key(Db, Sig) ->
#{
db_prefix := DbPrefix
diff --git a/src/couch_views/src/couch_views_http.erl b/src/couch_views/src/couch_views_http.erl
index 67e2a7708..14620a578 100644
--- a/src/couch_views/src/couch_views_http.erl
+++ b/src/couch_views/src/couch_views_http.erl
@@ -30,34 +30,38 @@
-define(BOOKMARK_VSN, 1).
-parse_body_and_query(#httpd{method='POST'} = Req, Keys) ->
+parse_body_and_query(#httpd{method = 'POST'} = Req, Keys) ->
Props = chttpd:json_body_obj(Req),
parse_body_and_query(Req, Props, Keys);
-
parse_body_and_query(Req, Keys) ->
- parse_params(chttpd:qs(Req), Keys, #mrargs{keys=Keys, group=undefined,
- group_level=undefined}, [keep_group_level]).
+ parse_params(
+ chttpd:qs(Req),
+ Keys,
+ #mrargs{
+ keys = Keys,
+ group = undefined,
+ group_level = undefined
+ },
+ [keep_group_level]
+ ).
parse_body_and_query(Req, {Props}, Keys) ->
- Args = #mrargs{keys=Keys, group=undefined, group_level=undefined},
+ Args = #mrargs{keys = Keys, group = undefined, group_level = undefined},
BodyArgs = parse_params(Props, Keys, Args, [decoded]),
parse_params(chttpd:qs(Req), Keys, BodyArgs, [keep_group_level]).
-parse_params(#httpd{}=Req, Keys) ->
+parse_params(#httpd{} = Req, Keys) ->
parse_params(chttpd:qs(Req), Keys);
parse_params(Props, Keys) ->
Args = #mrargs{},
parse_params(Props, Keys, Args).
-
parse_params(Props, Keys, Args) ->
parse_params(Props, Keys, Args, []).
-
parse_params([{"bookmark", Bookmark}], _Keys, #mrargs{}, _Options) ->
bookmark_decode(Bookmark);
-
-parse_params(Props, Keys, #mrargs{}=Args, Options) ->
+parse_params(Props, Keys, #mrargs{} = Args, Options) ->
case couch_util:get_value("bookmark", Props, nil) of
nil ->
ok;
@@ -66,44 +70,40 @@ parse_params(Props, Keys, #mrargs{}=Args, Options) ->
end,
couch_views_http_util:parse_params(Props, Keys, Args, Options).
-
row_to_obj(Row) ->
Id = couch_util:get_value(id, Row),
row_to_obj(Id, Row).
-
row_to_obj(Id, Row) ->
couch_views_http_util:row_to_obj(Id, Row).
-
-view_cb(Msg, #vacc{paginated = false}=Acc) ->
+view_cb(Msg, #vacc{paginated = false} = Acc) ->
couch_views_http_util:view_cb(Msg, Acc);
-view_cb(Msg, #vacc{paginated = true}=Acc) ->
+view_cb(Msg, #vacc{paginated = true} = Acc) ->
paginated_cb(Msg, Acc).
-
-paginated_cb({row, Row}, #vacc{buffer=Buf}=Acc) ->
+paginated_cb({row, Row}, #vacc{buffer = Buf} = Acc) ->
{ok, Acc#vacc{buffer = [row_to_obj(Row) | Buf]}};
-
-paginated_cb({error, Reason}, #vacc{}=_Acc) ->
+paginated_cb({error, Reason}, #vacc{} = _Acc) ->
throw({error, Reason});
-
-paginated_cb(complete, #vacc{buffer=Buf}=Acc) ->
- {ok, Acc#vacc{buffer=lists:reverse(Buf)}};
-
-paginated_cb({meta, Meta}, #vacc{}=VAcc) ->
- MetaMap = lists:foldl(fun(MetaData, Acc) ->
- case MetaData of
- {_Key, undefined} ->
- Acc;
- {total, Value} ->
- maps:put(total_rows, Value, Acc);
- {Key, Value} ->
- maps:put(list_to_binary(atom_to_list(Key)), Value, Acc)
- end
- end, #{}, Meta),
- {ok, VAcc#vacc{meta=MetaMap}}.
-
+paginated_cb(complete, #vacc{buffer = Buf} = Acc) ->
+ {ok, Acc#vacc{buffer = lists:reverse(Buf)}};
+paginated_cb({meta, Meta}, #vacc{} = VAcc) ->
+ MetaMap = lists:foldl(
+ fun(MetaData, Acc) ->
+ case MetaData of
+ {_Key, undefined} ->
+ Acc;
+ {total, Value} ->
+ maps:put(total_rows, Value, Acc);
+ {Key, Value} ->
+ maps:put(list_to_binary(atom_to_list(Key)), Value, Acc)
+ end
+ end,
+ #{},
+ Meta
+ ),
+ {ok, VAcc#vacc{meta = MetaMap}}.
paginated(Req, EtagTerm, #mrargs{page_size = PageSize} = Args, KeyFun, Fun) ->
Etag = couch_httpd:make_etag(EtagTerm),
@@ -111,7 +111,6 @@ paginated(Req, EtagTerm, #mrargs{page_size = PageSize} = Args, KeyFun, Fun) ->
hd(do_paginated(PageSize, [Args], KeyFun, Fun))
end).
-
paginated(Req, EtagTerm, PageSize, QueriesArgs, KeyFun, Fun) when is_list(QueriesArgs) ->
Etag = couch_httpd:make_etag(EtagTerm),
chttpd:etag_respond(Req, Etag, fun() ->
@@ -119,29 +118,32 @@ paginated(Req, EtagTerm, PageSize, QueriesArgs, KeyFun, Fun) when is_list(Querie
#{results => Results}
end).
-
do_paginated(PageSize, QueriesArgs, KeyFun, Fun) when is_list(QueriesArgs) ->
- {_N, Results} = lists:foldl(fun(Args0, {Limit, Acc}) ->
- case Limit > 0 of
- true ->
- {OriginalLimit, Args} = set_limit(Args0#mrargs{page_size = Limit}),
- {Meta, Items} = Fun(Args),
- Result0 = maybe_add_next_bookmark(
- OriginalLimit, PageSize, Args, Meta, Items, KeyFun),
- Result = maybe_add_previous_bookmark(Args, Result0, KeyFun),
- {Limit - length(maps:get(rows, Result)), [Result | Acc]};
- false ->
- Bookmark = bookmark_encode(Args0),
- Result = #{
- rows => [],
- next => Bookmark
- },
- {Limit, [Result | Acc]}
- end
- end, {PageSize, []}, QueriesArgs),
+ {_N, Results} = lists:foldl(
+ fun(Args0, {Limit, Acc}) ->
+ case Limit > 0 of
+ true ->
+ {OriginalLimit, Args} = set_limit(Args0#mrargs{page_size = Limit}),
+ {Meta, Items} = Fun(Args),
+ Result0 = maybe_add_next_bookmark(
+ OriginalLimit, PageSize, Args, Meta, Items, KeyFun
+ ),
+ Result = maybe_add_previous_bookmark(Args, Result0, KeyFun),
+ {Limit - length(maps:get(rows, Result)), [Result | Acc]};
+ false ->
+ Bookmark = bookmark_encode(Args0),
+ Result = #{
+ rows => [],
+ next => Bookmark
+ },
+ {Limit, [Result | Acc]}
+ end
+ end,
+ {PageSize, []},
+ QueriesArgs
+ ),
lists:reverse(Results).
-
maybe_add_next_bookmark(OriginalLimit, PageSize, Args0, Response, Items, KeyFun) ->
#mrargs{
page_size = RequestedLimit,
@@ -170,7 +172,6 @@ maybe_add_next_bookmark(OriginalLimit, PageSize, Args0, Response, Items, KeyFun)
})
end.
-
maybe_add_previous_bookmark(#mrargs{extra = Extra} = Args, #{rows := Rows} = Result, KeyFun) ->
StartKey = couch_util:get_value(fk, Extra),
StartId = couch_util:get_value(fid, Extra),
@@ -196,32 +197,28 @@ maybe_add_previous_bookmark(#mrargs{extra = Extra} = Args, #{rows := Rows} = Res
maps:put(previous, Bookmark, Result)
end.
-
first_key(_KeyFun, []) ->
{undefined, undefined};
-
first_key(KeyFun, [First | _]) ->
KeyFun(First).
-
-set_limit(#mrargs{page_size = PageSize, limit = Limit} = Args)
- when is_integer(PageSize) andalso Limit > PageSize ->
+set_limit(#mrargs{page_size = PageSize, limit = Limit} = Args) when
+ is_integer(PageSize) andalso Limit > PageSize
+->
{Limit, Args#mrargs{limit = PageSize + 1}};
-
-set_limit(#mrargs{page_size = PageSize, limit = Limit} = Args)
- when is_integer(PageSize) ->
+set_limit(#mrargs{page_size = PageSize, limit = Limit} = Args) when
+ is_integer(PageSize)
+->
{Limit, Args#mrargs{limit = Limit + 1}}.
-
-check_completion(OriginalLimit, RequestedLimit, Items)
- when is_integer(OriginalLimit) andalso OriginalLimit =< RequestedLimit ->
+check_completion(OriginalLimit, RequestedLimit, Items) when
+ is_integer(OriginalLimit) andalso OriginalLimit =< RequestedLimit
+->
{Rows, _} = split(OriginalLimit, Items),
{Rows, nil};
-
check_completion(_OriginalLimit, RequestedLimit, Items) ->
split(RequestedLimit, Items).
-
split(Limit, Items) when length(Items) > Limit ->
case lists:split(Limit, Items) of
{Head, [NextItem | _]} ->
@@ -229,32 +226,33 @@ split(Limit, Items) when length(Items) > Limit ->
{Head, []} ->
{Head, nil}
end;
-
split(_Limit, Items) ->
{Items, nil}.
-
bookmark_encode(Args0) ->
Defaults = #mrargs{},
- {RevTerms, Mask, _} = lists:foldl(fun(Value, {Acc, Mask, Idx}) ->
- case element(Idx, Defaults) of
- Value ->
- {Acc, Mask, Idx + 1};
- _Default when Idx == #mrargs.bookmark ->
- {Acc, Mask, Idx + 1};
- _Default ->
- % Its `(Idx - 1)` because the initial `1`
- % value already accounts for one bit.
- {[Value | Acc], (1 bsl (Idx - 1)) bor Mask, Idx + 1}
- end
- end, {[], 0, 1}, tuple_to_list(Args0)),
+ {RevTerms, Mask, _} = lists:foldl(
+ fun(Value, {Acc, Mask, Idx}) ->
+ case element(Idx, Defaults) of
+ Value ->
+ {Acc, Mask, Idx + 1};
+ _Default when Idx == #mrargs.bookmark ->
+ {Acc, Mask, Idx + 1};
+ _Default ->
+ % Its `(Idx - 1)` because the initial `1`
+ % value already accounts for one bit.
+ {[Value | Acc], (1 bsl (Idx - 1)) bor Mask, Idx + 1}
+ end
+ end,
+ {[], 0, 1},
+ tuple_to_list(Args0)
+ ),
Terms = lists:reverse(RevTerms),
TermBin = term_to_binary(Terms, [compressed, {minor_version, 2}]),
MaskBin = binary:encode_unsigned(Mask),
RawBookmark = <<?BOOKMARK_VSN, MaskBin/binary, TermBin/binary>>,
couch_util:encodeBase64Url(RawBookmark).
-
bookmark_decode(Bookmark) ->
try
RawBin = couch_util:decodeBase64Url(Bookmark),
@@ -262,42 +260,40 @@ bookmark_decode(Bookmark) ->
Mask = binary:decode_unsigned(MaskBin),
Index = mask_to_index(Mask, 1, []),
Terms = binary_to_term(TermBin, [safe]),
- lists:foldl(fun({Idx, Value}, Acc) ->
- setelement(Idx, Acc, Value)
- end, #mrargs{}, lists:zip(Index, Terms))
- catch _:_ ->
- throw({bad_request, <<"Invalid bookmark">>})
+ lists:foldl(
+ fun({Idx, Value}, Acc) ->
+ setelement(Idx, Acc, Value)
+ end,
+ #mrargs{},
+ lists:zip(Index, Terms)
+ )
+ catch
+ _:_ ->
+ throw({bad_request, <<"Invalid bookmark">>})
end.
-
mask_to_index(0, _Pos, Acc) ->
lists:reverse(Acc);
mask_to_index(Mask, Pos, Acc) when is_integer(Mask), Mask > 0 ->
- NewAcc = case Mask band 1 of
- 0 -> Acc;
- 1 -> [Pos | Acc]
- end,
+ NewAcc =
+ case Mask band 1 of
+ 0 -> Acc;
+ 1 -> [Pos | Acc]
+ end,
mask_to_index(Mask bsr 1, Pos + 1, NewAcc).
-
-transform_row(#view_row{value={[{reduce_overflow_error, Msg}]}}) ->
- {row, [{key,null}, {id,error}, {value,reduce_overflow_error}, {reason,Msg}]};
-
-transform_row(#view_row{key=Key, id=reduced, value=Value}) ->
- {row, [{key,Key}, {value,Value}]};
-
-transform_row(#view_row{key=Key, id=undefined}) ->
- {row, [{key,Key}, {id,error}, {value,not_found}]};
-
-transform_row(#view_row{key=Key, id=Id, value=Value, doc=undefined}) ->
- {row, [{id,Id}, {key,Key}, {value,Value}]};
-
-transform_row(#view_row{key=Key, id=_Id, value=_Value, doc={error,Reason}}) ->
- {row, [{id,error}, {key,Key}, {value,Reason}]};
-
-transform_row(#view_row{key=Key, id=Id, value=Value, doc=Doc}) ->
- {row, [{id,Id}, {key,Key}, {value,Value}, {doc,Doc}]}.
-
+transform_row(#view_row{value = {[{reduce_overflow_error, Msg}]}}) ->
+ {row, [{key, null}, {id, error}, {value, reduce_overflow_error}, {reason, Msg}]};
+transform_row(#view_row{key = Key, id = reduced, value = Value}) ->
+ {row, [{key, Key}, {value, Value}]};
+transform_row(#view_row{key = Key, id = undefined}) ->
+ {row, [{key, Key}, {id, error}, {value, not_found}]};
+transform_row(#view_row{key = Key, id = Id, value = Value, doc = undefined}) ->
+ {row, [{id, Id}, {key, Key}, {value, Value}]};
+transform_row(#view_row{key = Key, id = _Id, value = _Value, doc = {error, Reason}}) ->
+ {row, [{id, error}, {key, Key}, {value, Reason}]};
+transform_row(#view_row{key = Key, id = Id, value = Value, doc = Doc}) ->
+ {row, [{id, Id}, {key, Key}, {value, Value}, {doc, Doc}]}.
-ifdef(TEST).
@@ -309,18 +305,22 @@ bookmark_encode_decode_test() ->
bookmark_decode(bookmark_encode(#mrargs{page_size = 5}))
),
- Randomized = lists:foldl(fun(Idx, Acc) ->
- if Idx == #mrargs.bookmark -> Acc; true ->
- setelement(Idx, Acc, couch_uuids:random())
- end
- end, #mrargs{}, lists:seq(1, record_info(size, mrargs))),
+ Randomized = lists:foldl(
+ fun(Idx, Acc) ->
+ if
+ Idx == #mrargs.bookmark -> Acc;
+ true -> setelement(Idx, Acc, couch_uuids:random())
+ end
+ end,
+ #mrargs{},
+ lists:seq(1, record_info(size, mrargs))
+ ),
?assertEqual(
Randomized,
bookmark_decode(bookmark_encode(Randomized))
).
-
check_completion_test() ->
?assertEqual(
{[], nil},
diff --git a/src/couch_views/src/couch_views_http_util.erl b/src/couch_views/src/couch_views_http_util.erl
index b3fd7efc5..3862e118d 100644
--- a/src/couch_views/src/couch_views_http_util.erl
+++ b/src/couch_views/src/couch_views_http_util.erl
@@ -35,80 +35,82 @@
-include_lib("couch_views/include/couch_views.hrl").
%% these clauses start (and possibly end) the response
-view_cb({error, Reason}, #vacc{resp=undefined}=Acc) ->
+view_cb({error, Reason}, #vacc{resp = undefined} = Acc) ->
{ok, Resp} = chttpd:send_error(Acc#vacc.req, Reason),
- {ok, Acc#vacc{resp=Resp}};
-
-view_cb(complete, #vacc{resp=undefined}=Acc) ->
+ {ok, Acc#vacc{resp = Resp}};
+view_cb(complete, #vacc{resp = undefined} = Acc) ->
% Nothing in view
{ok, Resp} = chttpd:send_json(Acc#vacc.req, 200, {[{rows, []}]}),
- {ok, Acc#vacc{resp=Resp}};
-
-view_cb(Msg, #vacc{resp=undefined}=Acc) ->
+ {ok, Acc#vacc{resp = Resp}};
+view_cb(Msg, #vacc{resp = undefined} = Acc) ->
%% Start response
Headers = [],
{ok, Resp} = chttpd:start_delayed_json_response(Acc#vacc.req, 200, Headers),
- view_cb(Msg, Acc#vacc{resp=Resp, should_close=true});
-
+ view_cb(Msg, Acc#vacc{resp = Resp, should_close = true});
%% ---------------------------------------------------
%% From here on down, the response has been started.
-view_cb({error, Reason}, #vacc{resp=Resp}=Acc) ->
+view_cb({error, Reason}, #vacc{resp = Resp} = Acc) ->
{ok, Resp1} = chttpd:send_delayed_error(Resp, Reason),
- {ok, Acc#vacc{resp=Resp1}};
-
-view_cb(complete, #vacc{resp=Resp, buffer=Buf, threshold=Max}=Acc) ->
+ {ok, Acc#vacc{resp = Resp1}};
+view_cb(complete, #vacc{resp = Resp, buffer = Buf, threshold = Max} = Acc) ->
% Finish view output and possibly end the response
{ok, Resp1} = chttpd:close_delayed_json_object(Resp, Buf, "\r\n]}", Max),
case Acc#vacc.should_close of
true ->
{ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
- {ok, Acc#vacc{resp=Resp2}};
+ {ok, Acc#vacc{resp = Resp2}};
_ ->
- {ok, Acc#vacc{resp=Resp1, meta_sent=false, row_sent=false,
- prepend=",\r\n", buffer=[], bufsize=0}}
+ {ok, Acc#vacc{
+ resp = Resp1,
+ meta_sent = false,
+ row_sent = false,
+ prepend = ",\r\n",
+ buffer = [],
+ bufsize = 0
+ }}
end;
-
-view_cb({meta, Meta}, #vacc{meta_sent=false, row_sent=false}=Acc) ->
+view_cb({meta, Meta}, #vacc{meta_sent = false, row_sent = false} = Acc) ->
% Sending metadata as we've not sent it or any row yet
- Parts = case couch_util:get_value(total, Meta) of
- undefined -> [];
- Total -> [io_lib:format("\"total_rows\":~p", [Total])]
- end ++ case couch_util:get_value(offset, Meta) of
- undefined -> [];
- Offset -> [io_lib:format("\"offset\":~p", [Offset])]
- end ++ case couch_util:get_value(update_seq, Meta) of
- undefined -> [];
- null ->
- ["\"update_seq\":null"];
- UpdateSeq when is_integer(UpdateSeq) ->
- [io_lib:format("\"update_seq\":~B", [UpdateSeq])];
- UpdateSeq when is_binary(UpdateSeq) ->
- [io_lib:format("\"update_seq\":\"~s\"", [UpdateSeq])]
- end ++ ["\"rows\":["],
+ Parts =
+ case couch_util:get_value(total, Meta) of
+ undefined -> [];
+ Total -> [io_lib:format("\"total_rows\":~p", [Total])]
+ end ++
+ case couch_util:get_value(offset, Meta) of
+ undefined -> [];
+ Offset -> [io_lib:format("\"offset\":~p", [Offset])]
+ end ++
+ case couch_util:get_value(update_seq, Meta) of
+ undefined ->
+ [];
+ null ->
+ ["\"update_seq\":null"];
+ UpdateSeq when is_integer(UpdateSeq) ->
+ [io_lib:format("\"update_seq\":~B", [UpdateSeq])];
+ UpdateSeq when is_binary(UpdateSeq) ->
+ [io_lib:format("\"update_seq\":\"~s\"", [UpdateSeq])]
+ end ++ ["\"rows\":["],
Chunk = [prepend_val(Acc), "{", string:join(Parts, ","), "\r\n"],
{ok, AccOut} = maybe_flush_response(Acc, Chunk, iolist_size(Chunk)),
- {ok, AccOut#vacc{prepend="", meta_sent=true}};
-
-view_cb({meta, _Meta}, #vacc{}=Acc) ->
+ {ok, AccOut#vacc{prepend = "", meta_sent = true}};
+view_cb({meta, _Meta}, #vacc{} = Acc) ->
%% ignore metadata
{ok, Acc};
-
-view_cb({row, Row}, #vacc{meta_sent=false}=Acc) ->
+view_cb({row, Row}, #vacc{meta_sent = false} = Acc) ->
%% sorted=false and row arrived before meta
% Adding another row
Chunk = [prepend_val(Acc), "{\"rows\":[\r\n", row_to_json(Row)],
- maybe_flush_response(Acc#vacc{meta_sent=true, row_sent=true}, Chunk, iolist_size(Chunk));
-
-view_cb({row, Row}, #vacc{meta_sent=true}=Acc) ->
+ maybe_flush_response(Acc#vacc{meta_sent = true, row_sent = true}, Chunk, iolist_size(Chunk));
+view_cb({row, Row}, #vacc{meta_sent = true} = Acc) ->
% Adding another row
Chunk = [prepend_val(Acc), row_to_json(Row)],
- maybe_flush_response(Acc#vacc{row_sent=true}, Chunk, iolist_size(Chunk)).
-
+ maybe_flush_response(Acc#vacc{row_sent = true}, Chunk, iolist_size(Chunk)).
-maybe_flush_response(#vacc{bufsize=Size, threshold=Max} = Acc, Data, Len)
- when Size > 0 andalso (Size + Len) > Max ->
+maybe_flush_response(#vacc{bufsize = Size, threshold = Max} = Acc, Data, Len) when
+ Size > 0 andalso (Size + Len) > Max
+->
#vacc{buffer = Buffer, resp = Resp} = Acc,
{ok, R1} = chttpd:send_delayed_chunk(Resp, Buffer),
{ok, Acc#vacc{prepend = ",\r\n", buffer = Data, bufsize = Len, resp = R1}};
@@ -121,7 +123,7 @@ maybe_flush_response(Acc0, Data, Len) ->
},
{ok, Acc}.
-prepend_val(#vacc{prepend=Prepend}) ->
+prepend_val(#vacc{prepend = Prepend}) ->
case Prepend of
undefined ->
"";
@@ -129,78 +131,87 @@ prepend_val(#vacc{prepend=Prepend}) ->
Prepend
end.
-
row_to_json(Row) ->
?JSON_ENCODE(row_to_obj(Row)).
-
row_to_json(Kind, Row) ->
?JSON_ENCODE(row_to_obj(Kind, Row)).
-
row_to_obj(Row) ->
Id = couch_util:get_value(id, Row),
row_to_obj(Id, Row).
-
row_to_obj(error, Row) ->
% Special case for _all_docs request with KEYS to
% match prior behavior.
Key = couch_util:get_value(key, Row),
Val = couch_util:get_value(value, Row),
Reason = couch_util:get_value(reason, Row),
- ReasonProp = if Reason == undefined -> []; true ->
- [{reason, Reason}]
- end,
+ ReasonProp =
+ if
+ Reason == undefined -> [];
+ true -> [{reason, Reason}]
+ end,
{[{key, Key}, {error, Val}] ++ ReasonProp};
row_to_obj(Id0, Row) ->
- Id = case Id0 of
- undefined -> [];
- Id0 -> [{id, Id0}]
- end,
+ Id =
+ case Id0 of
+ undefined -> [];
+ Id0 -> [{id, Id0}]
+ end,
Key = couch_util:get_value(key, Row, null),
Val = couch_util:get_value(value, Row),
- Doc = case couch_util:get_value(doc, Row) of
- undefined -> [];
- Doc0 -> [{doc, Doc0}]
- end,
+ Doc =
+ case couch_util:get_value(doc, Row) of
+ undefined -> [];
+ Doc0 -> [{doc, Doc0}]
+ end,
{Id ++ [{key, Key}, {value, Val}] ++ Doc}.
-
-parse_params(#httpd{}=Req, Keys) ->
+parse_params(#httpd{} = Req, Keys) ->
parse_params(chttpd:qs(Req), Keys);
parse_params(Props, Keys) ->
Args = #mrargs{},
parse_params(Props, Keys, Args).
-
parse_params(Props, Keys, Args) ->
parse_params(Props, Keys, Args, []).
-parse_params(Props, Keys, #mrargs{}=Args0, Options) ->
+parse_params(Props, Keys, #mrargs{} = Args0, Options) ->
IsDecoded = lists:member(decoded, Options),
- Args1 = case lists:member(keep_group_level, Options) of
- true ->
- Args0;
- _ ->
- % group_level set to undefined to detect if explicitly set by user
- Args0#mrargs{keys=Keys, group=undefined, group_level=undefined}
- end,
- lists:foldl(fun({K, V}, Acc) ->
- parse_param(K, V, Acc, IsDecoded)
- end, Args1, Props).
-
-
-parse_body_and_query(#httpd{method='POST'} = Req, Keys) ->
+ Args1 =
+ case lists:member(keep_group_level, Options) of
+ true ->
+ Args0;
+ _ ->
+ % group_level set to undefined to detect if explicitly set by user
+ Args0#mrargs{keys = Keys, group = undefined, group_level = undefined}
+ end,
+ lists:foldl(
+ fun({K, V}, Acc) ->
+ parse_param(K, V, Acc, IsDecoded)
+ end,
+ Args1,
+ Props
+ ).
+
+parse_body_and_query(#httpd{method = 'POST'} = Req, Keys) ->
Props = chttpd:json_body_obj(Req),
parse_body_and_query(Req, Props, Keys);
-
parse_body_and_query(Req, Keys) ->
- parse_params(chttpd:qs(Req), Keys, #mrargs{keys=Keys, group=undefined,
- group_level=undefined}, [keep_group_level]).
+ parse_params(
+ chttpd:qs(Req),
+ Keys,
+ #mrargs{
+ keys = Keys,
+ group = undefined,
+ group_level = undefined
+ },
+ [keep_group_level]
+ ).
parse_body_and_query(Req, {Props}, Keys) ->
- Args = #mrargs{keys=Keys, group=undefined, group_level=undefined},
+ Args = #mrargs{keys = Keys, group = undefined, group_level = undefined},
BodyArgs = parse_params(Props, Keys, Args, [decoded]),
parse_params(chttpd:qs(Req), Keys, BodyArgs, [keep_group_level]).
@@ -211,103 +222,103 @@ parse_param(Key, Val, Args, IsDecoded) ->
"" ->
Args;
"reduce" ->
- Args#mrargs{reduce=parse_boolean(Val)};
+ Args#mrargs{reduce = parse_boolean(Val)};
"key" when IsDecoded ->
- Args#mrargs{start_key=Val, end_key=Val};
+ Args#mrargs{start_key = Val, end_key = Val};
"key" ->
JsonKey = ?JSON_DECODE(Val),
- Args#mrargs{start_key=JsonKey, end_key=JsonKey};
+ Args#mrargs{start_key = JsonKey, end_key = JsonKey};
"keys" when IsDecoded ->
- Args#mrargs{keys=Val};
+ Args#mrargs{keys = Val};
"keys" ->
- Args#mrargs{keys=?JSON_DECODE(Val)};
+ Args#mrargs{keys = ?JSON_DECODE(Val)};
"startkey" when IsDecoded ->
- Args#mrargs{start_key=Val};
+ Args#mrargs{start_key = Val};
"start_key" when IsDecoded ->
- Args#mrargs{start_key=Val};
+ Args#mrargs{start_key = Val};
"startkey" ->
- Args#mrargs{start_key=?JSON_DECODE(Val)};
+ Args#mrargs{start_key = ?JSON_DECODE(Val)};
"start_key" ->
- Args#mrargs{start_key=?JSON_DECODE(Val)};
+ Args#mrargs{start_key = ?JSON_DECODE(Val)};
"startkey_docid" ->
- Args#mrargs{start_key_docid=couch_util:to_binary(Val)};
+ Args#mrargs{start_key_docid = couch_util:to_binary(Val)};
"start_key_doc_id" ->
- Args#mrargs{start_key_docid=couch_util:to_binary(Val)};
+ Args#mrargs{start_key_docid = couch_util:to_binary(Val)};
"endkey" when IsDecoded ->
- Args#mrargs{end_key=Val};
+ Args#mrargs{end_key = Val};
"end_key" when IsDecoded ->
- Args#mrargs{end_key=Val};
+ Args#mrargs{end_key = Val};
"endkey" ->
- Args#mrargs{end_key=?JSON_DECODE(Val)};
+ Args#mrargs{end_key = ?JSON_DECODE(Val)};
"end_key" ->
- Args#mrargs{end_key=?JSON_DECODE(Val)};
+ Args#mrargs{end_key = ?JSON_DECODE(Val)};
"endkey_docid" ->
- Args#mrargs{end_key_docid=couch_util:to_binary(Val)};
+ Args#mrargs{end_key_docid = couch_util:to_binary(Val)};
"end_key_doc_id" ->
- Args#mrargs{end_key_docid=couch_util:to_binary(Val)};
+ Args#mrargs{end_key_docid = couch_util:to_binary(Val)};
"limit" ->
- Args#mrargs{limit=parse_pos_int(Val)};
+ Args#mrargs{limit = parse_pos_int(Val)};
"page_size" ->
- Args#mrargs{page_size=parse_pos_int(Val)};
+ Args#mrargs{page_size = parse_pos_int(Val)};
"stale" when Val == "ok" orelse Val == <<"ok">> ->
- Args#mrargs{stable=true, update=false};
+ Args#mrargs{stable = true, update = false};
"stale" when Val == "update_after" orelse Val == <<"update_after">> ->
- Args#mrargs{stable=true, update=lazy};
+ Args#mrargs{stable = true, update = lazy};
"stale" ->
throw({query_parse_error, <<"Invalid value for `stale`.">>});
"stable" when Val == "true" orelse Val == <<"true">> orelse Val == true ->
- Args#mrargs{stable=true};
+ Args#mrargs{stable = true};
"stable" when Val == "false" orelse Val == <<"false">> orelse Val == false ->
- Args#mrargs{stable=false};
+ Args#mrargs{stable = false};
"stable" ->
throw({query_parse_error, <<"Invalid value for `stable`.">>});
"update" when Val == "true" orelse Val == <<"true">> orelse Val == true ->
- Args#mrargs{update=true};
+ Args#mrargs{update = true};
"update" when Val == "false" orelse Val == <<"false">> orelse Val == false ->
- Args#mrargs{update=false};
+ Args#mrargs{update = false};
"update" when Val == "lazy" orelse Val == <<"lazy">> ->
- Args#mrargs{update=lazy};
+ Args#mrargs{update = lazy};
"update" ->
throw({query_parse_error, <<"Invalid value for `update`.">>});
"descending" ->
case parse_boolean(Val) of
- true -> Args#mrargs{direction=rev};
- _ -> Args#mrargs{direction=fwd}
+ true -> Args#mrargs{direction = rev};
+ _ -> Args#mrargs{direction = fwd}
end;
"skip" ->
- Args#mrargs{skip=parse_pos_int(Val)};
+ Args#mrargs{skip = parse_pos_int(Val)};
"group" ->
- Args#mrargs{group=parse_boolean(Val)};
+ Args#mrargs{group = parse_boolean(Val)};
"group_level" ->
- Args#mrargs{group_level=parse_pos_int(Val)};
+ Args#mrargs{group_level = parse_pos_int(Val)};
"inclusive_end" ->
- Args#mrargs{inclusive_end=parse_boolean(Val)};
+ Args#mrargs{inclusive_end = parse_boolean(Val)};
"include_docs" ->
- Args#mrargs{include_docs=parse_boolean(Val)};
+ Args#mrargs{include_docs = parse_boolean(Val)};
"attachments" ->
case parse_boolean(Val) of
- true ->
- Opts = Args#mrargs.doc_options,
- Args#mrargs{doc_options=[attachments|Opts]};
- false ->
- Args
+ true ->
+ Opts = Args#mrargs.doc_options,
+ Args#mrargs{doc_options = [attachments | Opts]};
+ false ->
+ Args
end;
"att_encoding_info" ->
case parse_boolean(Val) of
- true ->
- Opts = Args#mrargs.doc_options,
- Args#mrargs{doc_options=[att_encoding_info|Opts]};
- false ->
- Args
+ true ->
+ Opts = Args#mrargs.doc_options,
+ Args#mrargs{doc_options = [att_encoding_info | Opts]};
+ false ->
+ Args
end;
"update_seq" ->
- Args#mrargs{update_seq=parse_boolean(Val)};
+ Args#mrargs{update_seq = parse_boolean(Val)};
"conflicts" ->
- Args#mrargs{conflicts=parse_boolean(Val)};
+ Args#mrargs{conflicts = parse_boolean(Val)};
"callback" ->
- Args#mrargs{callback=couch_util:to_binary(Val)};
+ Args#mrargs{callback = couch_util:to_binary(Val)};
"sorted" ->
- Args#mrargs{sorted=parse_boolean(Val)};
+ Args#mrargs{sorted = parse_boolean(Val)};
"partition" ->
Partition = couch_util:to_binary(Val),
couch_partition:validate_partition(Partition),
@@ -315,10 +326,9 @@ parse_param(Key, Val, Args, IsDecoded) ->
_ ->
BKey = couch_util:to_binary(Key),
BVal = couch_util:to_binary(Val),
- Args#mrargs{extra=[{BKey, BVal} | Args#mrargs.extra]}
+ Args#mrargs{extra = [{BKey, BVal} | Args#mrargs.extra]}
end.
-
parse_boolean(Val) ->
case couch_lib_parse:parse_boolean(Val) of
{error, Reason} ->
@@ -327,7 +337,6 @@ parse_boolean(Val) ->
Boolean
end.
-
parse_pos_int(Val) ->
case couch_lib_parse:parse_non_neg_integer(Val) of
{error, Reason} ->
diff --git a/src/couch_views/src/couch_views_indexer.erl b/src/couch_views/src/couch_views_indexer.erl
index 9a1295ee7..0141d6bfd 100644
--- a/src/couch_views/src/couch_views_indexer.erl
+++ b/src/couch_views/src/couch_views_indexer.erl
@@ -16,7 +16,6 @@
spawn_link/0
]).
-
-export([
init/0,
map_docs/2,
@@ -33,27 +32,23 @@
-include_lib("fabric/include/fabric2.hrl").
-include_lib("kernel/include/logger.hrl").
-
-define(KEY_SIZE_LIMIT, 8000).
-define(VALUE_SIZE_LIMIT, 64000).
-define(DEFAULT_TX_RETRY_LIMIT, 5).
-
% These are all of the errors that we can fix by using
% a smaller batch size.
--define(IS_RECOVERABLE_ERROR(Code), (
- (Code == ?ERLFDB_TIMED_OUT) orelse
- (Code == ?ERLFDB_TRANSACTION_TOO_OLD) orelse
- (Code == ?ERLFDB_TRANSACTION_TIMED_OUT) orelse
- (Code == ?ERLFDB_TRANSACTION_TOO_LARGE)
-)).
-
+-define(IS_RECOVERABLE_ERROR(Code),
+ ((Code == ?ERLFDB_TIMED_OUT) orelse
+ (Code == ?ERLFDB_TRANSACTION_TOO_OLD) orelse
+ (Code == ?ERLFDB_TRANSACTION_TIMED_OUT) orelse
+ (Code == ?ERLFDB_TRANSACTION_TOO_LARGE))
+).
spawn_link() ->
proc_lib:spawn_link(?MODULE, init, []).
-
init() ->
Opts = #{no_schedule => true},
{ok, Job, Data0} = couch_jobs:accept(?INDEX_JOB_TYPE, Opts),
@@ -69,24 +64,28 @@ init() ->
<<"retries">> := Retries
} = Data,
- {ok, Db} = try
- fabric2_db:open(DbName, [?ADMIN_CTX, {uuid, DbUUID}])
- catch error:database_does_not_exist ->
- fail_job(Job, Data, db_deleted, "Database was deleted")
- end,
+ {ok, Db} =
+ try
+ fabric2_db:open(DbName, [?ADMIN_CTX, {uuid, DbUUID}])
+ catch
+ error:database_does_not_exist ->
+ fail_job(Job, Data, db_deleted, "Database was deleted")
+ end,
- {ok, DDoc} = case fabric2_db:open_doc(Db, DDocId) of
- {ok, DDoc0} ->
- {ok, DDoc0};
- {not_found, _} ->
- fail_job(Job, Data, ddoc_deleted, "Design document was deleted")
- end,
+ {ok, DDoc} =
+ case fabric2_db:open_doc(Db, DDocId) of
+ {ok, DDoc0} ->
+ {ok, DDoc0};
+ {not_found, _} ->
+ fail_job(Job, Data, ddoc_deleted, "Design document was deleted")
+ end,
{ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
HexSig = fabric2_util:to_hex(Mrst#mrst.sig),
- if HexSig == JobSig -> ok; true ->
- fail_job(Job, Data, sig_changed, "Design document was modified")
+ if
+ HexSig == JobSig -> ok;
+ true -> fail_job(Job, Data, sig_changed, "Design document was modified")
end,
DbSeq = fabric2_fdb:transactional(Db, fun(TxDb) ->
@@ -151,33 +150,32 @@ init() ->
end
end.
-
upgrade_data(Data) ->
Defaults = [
{<<"retries">>, 0},
{<<"db_uuid">>, undefined}
],
- lists:foldl(fun({Key, Default}, Acc) ->
- case maps:is_key(Key, Acc) of
- true -> Acc;
- false -> maps:put(Key, Default, Acc)
- end
- end, Data, Defaults),
+ lists:foldl(
+ fun({Key, Default}, Acc) ->
+ case maps:is_key(Key, Acc) of
+ true -> Acc;
+ false -> maps:put(Key, Default, Acc)
+ end
+ end,
+ Data,
+ Defaults
+ ),
% initialize active task
fabric2_active_tasks:update_active_task_info(Data, #{}).
-
% Transaction limit exceeded don't retry
should_retry(_, _, {erlfdb_error, ?ERLFDB_TRANSACTION_TOO_LARGE}) ->
false;
-
should_retry(Retries, RetryLimit, _) when Retries < RetryLimit ->
true;
-
should_retry(_, _, _) ->
false.
-
add_error(error, {erlfdb_error, Code}, Data) ->
CodeBin = couch_util:to_binary(Code),
CodeString = erlfdb:get_error_string(Code),
@@ -185,26 +183,26 @@ add_error(error, {erlfdb_error, Code}, Data) ->
error => foundationdb_error,
reason => list_to_binary([CodeBin, <<"-">>, CodeString])
};
-
add_error(Error, Reason, Data) ->
Data#{
error => couch_util:to_binary(Error),
reason => couch_util:to_binary(Reason)
}.
-
update(#{} = Db, Mrst0, State0) ->
Limit = couch_views_batch:start(Mrst0),
- Result = try
- do_update(Db, Mrst0, State0#{limit => Limit})
- catch
- error:{erlfdb_error, Error} when ?IS_RECOVERABLE_ERROR(Error) ->
- couch_views_batch:failure(Mrst0),
- update(Db, Mrst0, State0)
- end,
+ Result =
+ try
+ do_update(Db, Mrst0, State0#{limit => Limit})
+ catch
+ error:{erlfdb_error, Error} when ?IS_RECOVERABLE_ERROR(Error) ->
+ couch_views_batch:failure(Mrst0),
+ update(Db, Mrst0, State0)
+ end,
case Result of
ok ->
- ok; % Already finished and released map context
+ % Already finished and released map context
+ ok;
{Mrst1, finished} ->
couch_eval:release_map_context(Mrst1#mrst.qserver);
{Mrst1, State1} ->
@@ -215,7 +213,6 @@ update(#{} = Db, Mrst0, State0) ->
update(Db, Mrst1, State1)
end.
-
do_update(Db, Mrst0, State0) ->
TxOpts = #{retry_limit => maps:get(tx_retry_limit, State0)},
TxResult = fabric2_fdb:transactional(Db, TxOpts, fun(TxDb) ->
@@ -223,7 +220,7 @@ do_update(Db, Mrst0, State0) ->
tx := Tx
} = TxDb,
- Snapshot = TxDb#{ tx := erlfdb:snapshot(Tx) },
+ Snapshot = TxDb#{tx := erlfdb:snapshot(Tx)},
State1 = get_update_start_state(TxDb, Mrst0, State0),
Mrst1 = couch_views_trees:open(TxDb, Mrst0),
@@ -281,7 +278,6 @@ do_update(Db, Mrst0, State0) ->
{Mrst, finished}
end.
-
do_finalize(Mrst, State) ->
#{tx_db := OldDb} = State,
ViewReadVsn = erlfdb:get_committed_version(maps:get(tx, OldDb)),
@@ -305,7 +301,6 @@ do_finalize(Mrst, State) ->
report_progress(State1, finished)
end).
-
is_update_finished(State) ->
#{
db_seq := DbSeq,
@@ -313,20 +308,18 @@ is_update_finished(State) ->
view_vs := ViewVs
} = State,
AtDbSeq = LastSeq == DbSeq,
- AtViewVs = case ViewVs of
- not_found -> false;
- _ -> LastSeq == fabric2_fdb:vs_to_seq(ViewVs)
- end,
+ AtViewVs =
+ case ViewVs of
+ not_found -> false;
+ _ -> LastSeq == fabric2_fdb:vs_to_seq(ViewVs)
+ end,
AtDbSeq orelse AtViewVs.
-
maybe_set_build_status(_TxDb, _Mrst1, not_found, _State) ->
ok;
-
maybe_set_build_status(TxDb, Mrst1, _ViewVS, State) ->
couch_views_fdb:set_build_status(TxDb, Mrst1, State).
-
% In the first iteration of update we need
% to populate our db and view sequences
get_update_start_state(TxDb, Mrst, #{view_seq := undefined} = State) ->
@@ -341,13 +334,11 @@ get_update_start_state(TxDb, Mrst, #{view_seq := undefined} = State) ->
view_seq := ViewSeq,
last_seq := ViewSeq
};
-
get_update_start_state(TxDb, _Idx, State) ->
State#{
tx_db := TxDb
}.
-
fold_changes(Snapshot, State) ->
#{
view_seq := SinceSeq,
@@ -376,7 +367,6 @@ fold_changes(Snapshot, State) ->
Result
end.
-
process_changes(Change, Acc) ->
#{
doc_acc := DocAcc,
@@ -393,64 +383,74 @@ process_changes(Change, Acc) ->
IncludeDesign = lists:keymember(<<"include_design">>, 1, DesignOpts),
- Acc1 = case {Id, IncludeDesign} of
- {<<?DESIGN_DOC_PREFIX, _/binary>>, false} ->
- maps:merge(Acc, #{
- rows_processed => RowsProcessed + 1,
- count => Count + 1,
- last_seq => LastSeq
- });
- _ ->
- Acc#{
- doc_acc := DocAcc ++ [Change],
- rows_processed := RowsProcessed + 1,
- count := Count + 1,
- last_seq := LastSeq
- }
- end,
+ Acc1 =
+ case {Id, IncludeDesign} of
+ {<<?DESIGN_DOC_PREFIX, _/binary>>, false} ->
+ maps:merge(Acc, #{
+ rows_processed => RowsProcessed + 1,
+ count => Count + 1,
+ last_seq => LastSeq
+ });
+ _ ->
+ Acc#{
+ doc_acc := DocAcc ++ [Change],
+ rows_processed := RowsProcessed + 1,
+ count := Count + 1,
+ last_seq := LastSeq
+ }
+ end,
DocVS = fabric2_fdb:seq_to_vs(LastSeq),
Go = maybe_stop_at_vs(ViewVS, DocVS),
{Go, Acc1}.
-
maybe_stop_at_vs({versionstamp, _} = ViewVS, DocVS) when DocVS >= ViewVS ->
stop;
-
maybe_stop_at_vs(_, _) ->
ok.
-
map_docs(Mrst, []) ->
{Mrst, []};
-
map_docs(Mrst, Docs) ->
% Run all the non deleted docs through the view engine and
Mrst1 = start_query_server(Mrst),
QServer = Mrst1#mrst.qserver,
- {Deleted0, NotDeleted0} = lists:partition(fun(Doc) ->
- #{deleted := Deleted} = Doc,
- Deleted
- end, Docs),
+ {Deleted0, NotDeleted0} = lists:partition(
+ fun(Doc) ->
+ #{deleted := Deleted} = Doc,
+ Deleted
+ end,
+ Docs
+ ),
- Deleted1 = lists:map(fun(Doc) ->
- Doc#{results => [[] || _ <- Mrst1#mrst.views]}
- end, Deleted0),
+ Deleted1 = lists:map(
+ fun(Doc) ->
+ Doc#{results => [[] || _ <- Mrst1#mrst.views]}
+ end,
+ Deleted0
+ ),
- DocsToMap = lists:map(fun(Doc) ->
- #{doc := DocRec} = Doc,
- DocRec
- end, NotDeleted0),
+ DocsToMap = lists:map(
+ fun(Doc) ->
+ #{doc := DocRec} = Doc,
+ DocRec
+ end,
+ NotDeleted0
+ ),
{ok, AllResults} = couch_eval:map_docs(QServer, DocsToMap),
% The expanded function head here is making an assertion
% that the results match the given doc
- NotDeleted1 = lists:zipwith(fun(#{id := DocId} = Doc, {DocId, Results}) ->
- Doc#{results => Results}
- end, NotDeleted0, AllResults),
+ NotDeleted1 = lists:zipwith(
+ fun(#{id := DocId} = Doc, {DocId, Results}) ->
+ Doc#{results => Results}
+ end,
+ NotDeleted0,
+ AllResults
+ ),
% I'm being a bit careful here resorting the docs
% in order of the changes feed. Theoretically this is
@@ -458,15 +458,17 @@ map_docs(Mrst, Docs) ->
% However, I'm concerned if we ever split this up
% into multiple transactions that this detail might
% be important but forgotten.
- MappedDocs = lists:sort(fun(A, B) ->
- #{sequence := ASeq} = A,
- #{sequence := BSeq} = B,
- ASeq =< BSeq
- end, Deleted1 ++ NotDeleted1),
+ MappedDocs = lists:sort(
+ fun(A, B) ->
+ #{sequence := ASeq} = A,
+ #{sequence := BSeq} = B,
+ ASeq =< BSeq
+ end,
+ Deleted1 ++ NotDeleted1
+ ),
{Mrst1, MappedDocs}.
-
write_docs(TxDb, Mrst, Docs0, State) ->
#mrst{
sig = Sig
@@ -479,79 +481,107 @@ write_docs(TxDb, Mrst, Docs0, State) ->
KeyLimit = key_size_limit(),
ValLimit = value_size_limit(),
- {Docs1, TotalKVCount} = lists:mapfoldl(fun(Doc0, KVCount) ->
- Doc1 = check_kv_size_limit(Mrst, Doc0, KeyLimit, ValLimit),
- {Doc1, KVCount + count_kvs(Doc1)}
- end, 0, Docs0),
+ {Docs1, TotalKVCount} = lists:mapfoldl(
+ fun(Doc0, KVCount) ->
+ Doc1 = check_kv_size_limit(Mrst, Doc0, KeyLimit, ValLimit),
+ {Doc1, KVCount + count_kvs(Doc1)}
+ end,
+ 0,
+ Docs0
+ ),
couch_views_trees:update_views(TxDb, Mrst, Docs1),
- if LastSeq == false -> ok; true ->
- couch_views_fdb:set_update_seq(TxDb, Sig, LastSeq)
+ if
+ LastSeq == false -> ok;
+ true -> couch_views_fdb:set_update_seq(TxDb, Sig, LastSeq)
end,
TotalKVCount.
-
fetch_docs(Db, DesignOpts, Changes) ->
- {Deleted, NotDeleted} = lists:partition(fun(Doc) ->
- #{deleted := Deleted} = Doc,
- Deleted
- end, Changes),
-
- RevState = lists:foldl(fun(Change, Acc) ->
- #{id := Id} = Change,
- RevFuture = fabric2_fdb:get_winning_revs_future(Db, Id, 1),
- Acc#{
- RevFuture => {Id, Change}
- }
- end, #{}, NotDeleted),
+ {Deleted, NotDeleted} = lists:partition(
+ fun(Doc) ->
+ #{deleted := Deleted} = Doc,
+ Deleted
+ end,
+ Changes
+ ),
+
+ RevState = lists:foldl(
+ fun(Change, Acc) ->
+ #{id := Id} = Change,
+ RevFuture = fabric2_fdb:get_winning_revs_future(Db, Id, 1),
+ Acc#{
+ RevFuture => {Id, Change}
+ }
+ end,
+ #{},
+ NotDeleted
+ ),
RevFutures = maps:keys(RevState),
- BodyState = lists:foldl(fun(RevFuture, Acc) ->
- {Id, Change} = maps:get(RevFuture, RevState),
- Revs = fabric2_fdb:get_revs_wait(Db, RevFuture),
-
- % I'm assuming that in this changes transaction that the winning
- % doc body exists since it is listed in the changes feed as not deleted
- #{winner := true} = RevInfo = lists:last(Revs),
- BodyFuture = fabric2_fdb:get_doc_body_future(Db, Id, RevInfo),
- Acc#{
- BodyFuture => {Id, RevInfo, Change}
- }
- end, #{}, erlfdb:wait_for_all(RevFutures)),
+ BodyState = lists:foldl(
+ fun(RevFuture, Acc) ->
+ {Id, Change} = maps:get(RevFuture, RevState),
+ Revs = fabric2_fdb:get_revs_wait(Db, RevFuture),
+
+ % I'm assuming that in this changes transaction that the winning
+ % doc body exists since it is listed in the changes feed as not deleted
+ #{winner := true} = RevInfo = lists:last(Revs),
+ BodyFuture = fabric2_fdb:get_doc_body_future(Db, Id, RevInfo),
+ Acc#{
+ BodyFuture => {Id, RevInfo, Change}
+ }
+ end,
+ #{},
+ erlfdb:wait_for_all(RevFutures)
+ ),
AddLocalSeq = fabric2_util:get_value(<<"local_seq">>, DesignOpts, false),
BodyFutures = maps:keys(BodyState),
- ChangesWithDocs = lists:map(fun (BodyFuture) ->
- {Id, RevInfo, Change} = maps:get(BodyFuture, BodyState),
- Doc = fabric2_fdb:get_doc_body_wait(Db, Id, RevInfo, BodyFuture),
-
- Doc1 = case maps:get(branch_count, RevInfo, 1) of
- 1 when AddLocalSeq ->
- {ok, DocWithLocalSeq} = fabric2_db:apply_open_doc_opts(Doc,
- [RevInfo], [local_seq]),
- DocWithLocalSeq;
- 1 ->
- Doc;
- _ ->
- RevConflicts = fabric2_fdb:get_all_revs(Db, Id),
- DocOpts = if not AddLocalSeq -> []; true -> [local_seq] end,
-
- {ok, DocWithConflicts} = fabric2_db:apply_open_doc_opts(Doc,
- RevConflicts, [conflicts | DocOpts]),
- DocWithConflicts
+ ChangesWithDocs = lists:map(
+ fun(BodyFuture) ->
+ {Id, RevInfo, Change} = maps:get(BodyFuture, BodyState),
+ Doc = fabric2_fdb:get_doc_body_wait(Db, Id, RevInfo, BodyFuture),
+
+ Doc1 =
+ case maps:get(branch_count, RevInfo, 1) of
+ 1 when AddLocalSeq ->
+ {ok, DocWithLocalSeq} = fabric2_db:apply_open_doc_opts(
+ Doc,
+ [RevInfo],
+ [local_seq]
+ ),
+ DocWithLocalSeq;
+ 1 ->
+ Doc;
+ _ ->
+ RevConflicts = fabric2_fdb:get_all_revs(Db, Id),
+ DocOpts =
+ if
+ not AddLocalSeq -> [];
+ true -> [local_seq]
+ end,
+
+ {ok, DocWithConflicts} = fabric2_db:apply_open_doc_opts(
+ Doc,
+ RevConflicts,
+ [conflicts | DocOpts]
+ ),
+ DocWithConflicts
+ end,
+ Change#{doc => Doc1}
end,
- Change#{doc => Doc1}
- end, erlfdb:wait_for_all(BodyFutures)),
+ erlfdb:wait_for_all(BodyFutures)
+ ),
% This combines the deleted changes with the changes that contain docs
% Important to note that this is now unsorted. Which is fine for now
% But later could be an issue if we split this across transactions
Deleted ++ ChangesWithDocs.
-
start_query_server(#mrst{qserver = nil} = Mrst) ->
#mrst{
db_name = DbName,
@@ -561,24 +591,24 @@ start_query_server(#mrst{qserver = nil} = Mrst) ->
lib = Lib,
views = Views
} = Mrst,
- case couch_eval:acquire_map_context(
+ case
+ couch_eval:acquire_map_context(
DbName,
DDocId,
Language,
Sig,
Lib,
[View#mrview.def || View <- Views]
- ) of
+ )
+ of
{ok, QServer} ->
Mrst#mrst{qserver = QServer};
{error, Error} ->
error(Error)
end;
-
start_query_server(#mrst{} = Mrst) ->
Mrst.
-
check_kv_size_limit(Mrst, Doc, KeyLimit, ValLimit) ->
#mrst{
db_name = DbName,
@@ -588,48 +618,60 @@ check_kv_size_limit(Mrst, Doc, KeyLimit, ValLimit) ->
results := Results
} = Doc,
try
- lists:foreach(fun(ViewRows) ->
- lists:foreach(fun({K, V}) ->
- KeySize = couch_ejson_size:encoded_size(K),
- ValSize = couch_ejson_size:encoded_size(V),
-
- if KeySize =< KeyLimit -> ok; true ->
- throw({size_error, key})
- end,
-
- if ValSize =< ValLimit -> ok; true ->
- throw({size_error, value})
- end
- end, ViewRows)
- end, Results),
+ lists:foreach(
+ fun(ViewRows) ->
+ lists:foreach(
+ fun({K, V}) ->
+ KeySize = couch_ejson_size:encoded_size(K),
+ ValSize = couch_ejson_size:encoded_size(V),
+
+ if
+ KeySize =< KeyLimit -> ok;
+ true -> throw({size_error, key})
+ end,
+
+ if
+ ValSize =< ValLimit -> ok;
+ true -> throw({size_error, value})
+ end
+ end,
+ ViewRows
+ )
+ end,
+ Results
+ ),
Doc
- catch throw:{size_error, Type} ->
- #{id := DocId} = Doc,
- ?LOG_ERROR(#{
- what => lists:concat(["oversized_", Type]),
- db => DbName,
- docid => DocId,
- index => IdxName
- }),
- Fmt = "View ~s size error for docid `~s`, excluded from indexing "
- "in db `~s` for design doc `~s`",
- couch_log:error(Fmt, [Type, DocId, DbName, IdxName]),
- Doc#{
- deleted := true,
- results := [[] || _ <- Mrst#mrst.views],
- kv_sizes => []
- }
+ catch
+ throw:{size_error, Type} ->
+ #{id := DocId} = Doc,
+ ?LOG_ERROR(#{
+ what => lists:concat(["oversized_", Type]),
+ db => DbName,
+ docid => DocId,
+ index => IdxName
+ }),
+ Fmt =
+ "View ~s size error for docid `~s`, excluded from indexing "
+ "in db `~s` for design doc `~s`",
+ couch_log:error(Fmt, [Type, DocId, DbName, IdxName]),
+ Doc#{
+ deleted := true,
+ results := [[] || _ <- Mrst#mrst.views],
+ kv_sizes => []
+ }
end.
-
count_kvs(Doc) ->
#{
results := Results
} = Doc,
- lists:foldl(fun(ViewRows, Count) ->
- Count + length(ViewRows)
- end, 0, Results).
-
+ lists:foldl(
+ fun(ViewRows, Count) ->
+ Count + length(ViewRows)
+ end,
+ 0,
+ Results
+ ).
report_progress(State, UpdateType) ->
#{
@@ -652,13 +694,19 @@ report_progress(State, UpdateType) ->
} = JobData,
ActiveTasks = fabric2_active_tasks:get_active_task_info(JobData),
- TotalDone = case maps:get(<<"changes_done">>, ActiveTasks, 0) of
- 0 -> ChangesDone;
- N -> N + ChangesDone
- end,
+ TotalDone =
+ case maps:get(<<"changes_done">>, ActiveTasks, 0) of
+ 0 -> ChangesDone;
+ N -> N + ChangesDone
+ end,
- NewActiveTasks = couch_views_util:active_tasks_info(TotalDone,
- DbName, DDocId, LastSeq, DBSeq),
+ NewActiveTasks = couch_views_util:active_tasks_info(
+ TotalDone,
+ DbName,
+ DDocId,
+ LastSeq,
+ DBSeq
+ ),
% Reconstruct from scratch to remove any
% possible existing error state.
@@ -672,8 +720,10 @@ report_progress(State, UpdateType) ->
<<"db_read_vsn">> => DbReadVsn,
<<"view_read_vsn">> => ViewReadVsn
},
- NewData = fabric2_active_tasks:update_active_task_info(NewData0,
- NewActiveTasks),
+ NewData = fabric2_active_tasks:update_active_task_info(
+ NewData0,
+ NewActiveTasks
+ ),
case UpdateType of
update ->
@@ -696,25 +746,23 @@ report_progress(State, UpdateType) ->
end
end.
-
fail_job(Job, Data, Error, Reason) ->
NewData = add_error(Error, Reason, Data),
couch_jobs:finish(undefined, Job, NewData),
exit(normal).
-
retry_limit() ->
config:get_integer("couch_views", "retry_limit", 3).
-
key_size_limit() ->
config:get_integer("couch_views", "key_size_limit", ?KEY_SIZE_LIMIT).
-
value_size_limit() ->
config:get_integer("couch_views", "value_size_limit", ?VALUE_SIZE_LIMIT).
-
tx_retry_limit() ->
- config:get_integer("couch_views", "indexer_tx_retry_limit",
- ?DEFAULT_TX_RETRY_LIMIT).
+ config:get_integer(
+ "couch_views",
+ "indexer_tx_retry_limit",
+ ?DEFAULT_TX_RETRY_LIMIT
+ ).
diff --git a/src/couch_views/src/couch_views_jobs.erl b/src/couch_views/src/couch_views_jobs.erl
index 17f0118b4..debdc3585 100644
--- a/src/couch_views/src/couch_views_jobs.erl
+++ b/src/couch_views/src/couch_views_jobs.erl
@@ -25,14 +25,11 @@
-compile(nowarn_export_all).
-endif.
-
-include("couch_views.hrl").
-
set_timeout() ->
couch_jobs:set_type_timeout(?INDEX_JOB_TYPE, 26).
-
build_view(TxDb, Mrst, UpdateSeq) ->
{ok, JobId} = build_view_async(TxDb, Mrst),
case wait_for_job(JobId, Mrst#mrst.idx_name, UpdateSeq) of
@@ -40,7 +37,6 @@ build_view(TxDb, Mrst, UpdateSeq) ->
retry -> build_view(TxDb, Mrst, UpdateSeq)
end.
-
build_view_async(TxDb0, Mrst) ->
JobId = job_id(TxDb0, Mrst),
JobData = job_data(TxDb0, Mrst),
@@ -60,28 +56,23 @@ build_view_async(TxDb0, Mrst) ->
end),
{ok, JobId}.
-
remove(TxDb, Sig) ->
DbName = fabric2_db:name(TxDb),
JobId = job_id(DbName, Sig),
couch_jobs:remove(TxDb, ?INDEX_JOB_TYPE, JobId).
-
job_state(#{} = TxDb, #mrst{} = Mrst) ->
JobId = job_id(TxDb, Mrst),
couch_jobs:get_job_state(TxDb, ?INDEX_JOB_TYPE, JobId).
-
ensure_correct_tx(#{tx := undefined} = TxDb) ->
TxDb;
-
ensure_correct_tx(#{tx := Tx} = TxDb) ->
case erlfdb:is_read_only(Tx) of
true -> TxDb#{tx := undefined};
false -> TxDb
end.
-
wait_for_job(JobId, DDocId, UpdateSeq) ->
case couch_jobs:subscribe(?INDEX_JOB_TYPE, JobId) of
{ok, Subscription, _State, _Data} ->
@@ -95,7 +86,6 @@ wait_for_job(JobId, DDocId, UpdateSeq) ->
end
end.
-
wait_for_job(JobId, Subscription, DDocId, UpdateSeq) ->
case wait(Subscription) of
{not_found, not_found} ->
@@ -116,8 +106,9 @@ wait_for_job(JobId, Subscription, DDocId, UpdateSeq) ->
{finished, #{<<"error">> := Error, <<"reason">> := Reason}} ->
couch_jobs:remove(undefined, ?INDEX_JOB_TYPE, JobId),
erlang:error({binary_to_existing_atom(Error, latin1), Reason});
- {finished, #{<<"view_seq">> := ViewSeq} = JobData}
- when ViewSeq >= UpdateSeq ->
+ {finished, #{<<"view_seq">> := ViewSeq} = JobData} when
+ ViewSeq >= UpdateSeq
+ ->
{ok, idx_vstamps(JobData)};
{finished, _} ->
wait_for_job(JobId, DDocId, UpdateSeq);
@@ -125,7 +116,6 @@ wait_for_job(JobId, Subscription, DDocId, UpdateSeq) ->
wait_for_job(JobId, Subscription, DDocId, UpdateSeq)
end.
-
idx_vstamps(#{} = JobData) ->
#{
<<"db_read_vsn">> := DbReadVsn,
@@ -133,17 +123,14 @@ idx_vstamps(#{} = JobData) ->
} = JobData,
{DbReadVsn, ViewReadVsn}.
-
job_id(#{name := DbName}, #mrst{sig = Sig}) ->
job_id(DbName, Sig);
-
job_id(DbName, Sig) ->
HexSig = fabric2_util:to_hex(Sig),
% Put signature first in order to be able to use the no_schedule
% couch_jobs:accept/2 option
<<HexSig/binary, "-", DbName/binary>>.
-
job_data(Db, Mrst) ->
#mrst{
idx_name = DDocId,
@@ -158,7 +145,6 @@ job_data(Db, Mrst) ->
retries => 0
}.
-
wait(Subscription) ->
case couch_jobs:wait(Subscription, infinity) of
{?INDEX_JOB_TYPE, _JobId, JobState, JobData} ->
diff --git a/src/couch_views/src/couch_views_plugin.erl b/src/couch_views/src/couch_views_plugin.erl
index f8169179a..dfdc08ffe 100644
--- a/src/couch_views/src/couch_views_plugin.erl
+++ b/src/couch_views/src/couch_views_plugin.erl
@@ -10,23 +10,18 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(couch_views_plugin).
-
-export([
after_interactive_write/4
]).
-
-define(SERVICE_ID, couch_views).
-
after_interactive_write(Db, Mrst, Result, DocNumber) ->
with_pipe(after_interactive_write, [Db, Mrst, Result, DocNumber]),
ok.
-
%% ------------------------------------------------------------------
%% Internal Function Definitions
%% ------------------------------------------------------------------
@@ -34,7 +29,6 @@ after_interactive_write(Db, Mrst, Result, DocNumber) ->
with_pipe(Func, Args) ->
do_apply(Func, Args, [pipe]).
-
do_apply(Func, Args, Opts) ->
Handle = couch_epi:get_handle(?SERVICE_ID),
- couch_epi:apply(Handle, ?SERVICE_ID, Func, Args, Opts). \ No newline at end of file
+ couch_epi:apply(Handle, ?SERVICE_ID, Func, Args, Opts).
diff --git a/src/couch_views/src/couch_views_reader.erl b/src/couch_views/src/couch_views_reader.erl
index ae7a3c393..816a6a871 100644
--- a/src/couch_views/src/couch_views_reader.erl
+++ b/src/couch_views/src/couch_views_reader.erl
@@ -16,23 +16,20 @@
read/7
]).
-
-include("couch_views.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("fabric/include/fabric2.hrl").
-
-define(LOAD_DOC_TIMEOUT_MSEC, 10000).
-
read(Db, Mrst, ViewName, UserCallback, UserAcc, Args, DbReadVsn) ->
- ReadFun = case Args of
- #mrargs{view_type = map} -> fun read_map_view/7;
- #mrargs{view_type = red} -> fun read_red_view/7
- end,
+ ReadFun =
+ case Args of
+ #mrargs{view_type = map} -> fun read_map_view/7;
+ #mrargs{view_type = red} -> fun read_red_view/7
+ end,
ReadFun(Db, Mrst, ViewName, UserCallback, UserAcc, Args, DbReadVsn).
-
read_map_view(Db, Mrst0, ViewName, UserCallback, UserAcc0, Args, DbReadVsn) ->
DocLoader = maybe_start_doc_loader(Db, DbReadVsn),
try
@@ -58,13 +55,17 @@ read_map_view(Db, Mrst0, ViewName, UserCallback, UserAcc0, Args, DbReadVsn) ->
doc_loader => DocLoader
},
- Acc1 = lists:foldl(fun(KeyArgs, KeyAcc0) ->
- Opts = mrargs_to_fdb_options(KeyArgs),
- KeyAcc1 = KeyAcc0#{
- mrargs := KeyArgs
- },
- couch_views_trees:fold_map_idx(TxDb, View, Opts, Fun, KeyAcc1)
- end, Acc0, expand_keys_args(Args)),
+ Acc1 = lists:foldl(
+ fun(KeyArgs, KeyAcc0) ->
+ Opts = mrargs_to_fdb_options(KeyArgs),
+ KeyAcc1 = KeyAcc0#{
+ mrargs := KeyArgs
+ },
+ couch_views_trees:fold_map_idx(TxDb, View, Opts, Fun, KeyAcc1)
+ end,
+ Acc0,
+ expand_keys_args(Args)
+ ),
#{
acc := UserAcc2
@@ -81,7 +82,6 @@ read_map_view(Db, Mrst0, ViewName, UserCallback, UserAcc0, Args, DbReadVsn) ->
stop_doc_loader(DocLoader)
end.
-
read_red_view(Db, Mrst0, ViewName, UserCallback, UserAcc0, Args, _DbReadVsn) ->
#mrst{
language = Lang,
@@ -106,13 +106,14 @@ read_red_view(Db, Mrst0, ViewName, UserCallback, UserAcc0, Args, _DbReadVsn) ->
Meta = get_red_meta(TxDb, Mrst, View1, Args),
UserAcc1 = maybe_stop(UserCallback(Meta, UserAcc0)),
- Finalizer = case couch_util:get_value(finalizer, Extra) of
- undefined ->
- {_, FunSrc} = lists:nth(Idx, View1#mrview.reduce_funs),
- FunSrc;
- CustomFun->
- CustomFun
- end,
+ Finalizer =
+ case couch_util:get_value(finalizer, Extra) of
+ undefined ->
+ {_, FunSrc} = lists:nth(Idx, View1#mrview.reduce_funs),
+ FunSrc;
+ CustomFun ->
+ CustomFun
+ end,
Acc0 = #{
db => TxDb,
@@ -126,12 +127,13 @@ read_red_view(Db, Mrst0, ViewName, UserCallback, UserAcc0, Args, _DbReadVsn) ->
acc => UserAcc1
},
- Acc1 = lists:foldl(fun(KeyArgs, KeyAcc0) ->
- Opts = mrargs_to_fdb_options(KeyArgs),
- KeyAcc1 = KeyAcc0#{
- mrargs := KeyArgs
- },
- couch_views_trees:fold_red_idx(
+ Acc1 = lists:foldl(
+ fun(KeyArgs, KeyAcc0) ->
+ Opts = mrargs_to_fdb_options(KeyArgs),
+ KeyAcc1 = KeyAcc0#{
+ mrargs := KeyArgs
+ },
+ couch_views_trees:fold_red_idx(
TxDb,
View1,
Idx,
@@ -139,7 +141,10 @@ read_red_view(Db, Mrst0, ViewName, UserCallback, UserAcc0, Args, _DbReadVsn) ->
Fun,
KeyAcc1
)
- end, Acc0, expand_keys_args(Args)),
+ end,
+ Acc0,
+ expand_keys_args(Args)
+ ),
#{
acc := UserAcc2
@@ -154,31 +159,24 @@ read_red_view(Db, Mrst0, ViewName, UserCallback, UserAcc0, Args, _DbReadVsn) ->
{ok, Out}
end.
-
get_map_meta(TxDb, Mrst, View, #mrargs{update_seq = true}) ->
TotalRows = couch_views_trees:get_row_count(TxDb, View),
ViewSeq = couch_views_fdb:get_update_seq(TxDb, Mrst),
- {meta, [{update_seq, ViewSeq}, {total, TotalRows}, {offset, null}]};
-
+ {meta, [{update_seq, ViewSeq}, {total, TotalRows}, {offset, null}]};
get_map_meta(TxDb, _Mrst, View, #mrargs{}) ->
TotalRows = couch_views_trees:get_row_count(TxDb, View),
{meta, [{total, TotalRows}, {offset, null}]}.
-
get_red_meta(TxDb, Mrst, _View, #mrargs{update_seq = true}) ->
ViewSeq = couch_views_fdb:get_update_seq(TxDb, Mrst),
- {meta, [{update_seq, ViewSeq}]};
-
+ {meta, [{update_seq, ViewSeq}]};
get_red_meta(_TxDb, _Mrst, _View, #mrargs{}) ->
{meta, []}.
-
handle_map_row(_DocId, _Key, _Value, #{skip := Skip} = Acc) when Skip > 0 ->
Acc#{skip := Skip - 1};
-
handle_map_row(_DocID, _Key, _Value, #{limit := 0, acc := UserAcc}) ->
throw({complete, UserAcc});
-
handle_map_row(DocId, Key, Value, Acc) ->
#{
db := TxDb,
@@ -195,28 +193,32 @@ handle_map_row(DocId, Key, Value, Acc) ->
{value, Value}
],
- Row = BaseRow ++ if not Args#mrargs.include_docs -> []; true ->
- DocOpts0 = Args#mrargs.doc_options,
- DocOpts1 = DocOpts0 ++ case Args#mrargs.conflicts of
- true -> [conflicts];
- _ -> []
- end,
-
- {TargetDocId, Rev} = get_doc_id(DocId, Value),
- DocObj = load_doc(TxDb, TargetDocId, Rev, DocOpts1, DocLoader),
- [{doc, DocObj}]
- end,
+ Row =
+ BaseRow ++
+ if
+ not Args#mrargs.include_docs ->
+ [];
+ true ->
+ DocOpts0 = Args#mrargs.doc_options,
+ DocOpts1 =
+ DocOpts0 ++
+ case Args#mrargs.conflicts of
+ true -> [conflicts];
+ _ -> []
+ end,
+
+ {TargetDocId, Rev} = get_doc_id(DocId, Value),
+ DocObj = load_doc(TxDb, TargetDocId, Rev, DocOpts1, DocLoader),
+ [{doc, DocObj}]
+ end,
UserAcc1 = maybe_stop(UserCallback({row, Row}, UserAcc0)),
Acc#{limit := Limit - 1, acc := UserAcc1}.
-
handle_red_row(_Key, _Red, #{skip := Skip} = Acc) when Skip > 0 ->
Acc#{skip := Skip - 1};
-
handle_red_row(_Key, _Value, #{limit := 0, acc := UserAcc}) ->
throw({complete, UserAcc});
-
handle_red_row(Key0, Value0, Acc) ->
#{
limit := Limit,
@@ -225,38 +227,35 @@ handle_red_row(Key0, Value0, Acc) ->
acc := UserAcc0
} = Acc,
- Key1 = case Key0 of
- undefined -> null;
- _ -> Key0
- end,
+ Key1 =
+ case Key0 of
+ undefined -> null;
+ _ -> Key0
+ end,
Value1 = maybe_finalize(Finalizer, Value0),
Row = [{key, Key1}, {value, Value1}],
UserAcc1 = maybe_stop(UserCallback({row, Row}, UserAcc0)),
Acc#{limit := Limit - 1, acc := UserAcc1}.
-
maybe_finalize(null, Red) ->
Red;
maybe_finalize(Finalizer, Red) ->
{ok, Finalized} = couch_query_servers:finalize(Finalizer, Red),
Finalized.
-
get_map_view(Lang, Args, ViewName, Views) ->
case couch_views_util:extract_view(Lang, Args, ViewName, Views) of
{map, View, _Args} -> View;
{red, {_Idx, _Lang, View}, _} -> View
end.
-
get_red_view(Lang, Args, ViewName, Views) ->
case couch_views_util:extract_view(Lang, Args, ViewName, Views) of
{red, {Idx, Lang, View}, _} -> check_red_enabled({Idx, Lang, View});
_ -> throw({not_found, missing_named_view})
end.
-
check_red_enabled({Idx, _Lang, View} = Resp) ->
case lists:nth(Idx, View#mrview.reduce_funs) of
{_, disabled} ->
@@ -267,15 +266,16 @@ check_red_enabled({Idx, _Lang, View} = Resp) ->
expand_keys_args(#mrargs{keys = undefined} = Args) ->
[Args];
-
expand_keys_args(#mrargs{keys = Keys} = Args) ->
- lists:map(fun(Key) ->
- Args#mrargs{
- start_key = Key,
- end_key = Key
- }
- end, Keys).
-
+ lists:map(
+ fun(Key) ->
+ Args#mrargs{
+ start_key = Key,
+ end_key = Key
+ }
+ end,
+ Keys
+ ).
mrargs_to_fdb_options(Args) ->
#mrargs{
@@ -289,21 +289,26 @@ mrargs_to_fdb_options(Args) ->
group_level = GroupLevel
} = Args,
- StartKeyOpts = if StartKey == undefined -> []; true ->
- [{start_key, {StartKey, StartKeyDocId}}]
- end,
+ StartKeyOpts =
+ if
+ StartKey == undefined -> [];
+ true -> [{start_key, {StartKey, StartKeyDocId}}]
+ end,
- EndKeyDocId = case {Direction, EndKeyDocId0} of
- {fwd, <<255>>} when InclusiveEnd -> <<255>>;
- {fwd, <<255>>} when not InclusiveEnd -> <<>>;
- {rev, <<>>} when InclusiveEnd -> <<>>;
- {rev, <<>>} when not InclusiveEnd -> <<255>>;
- _ -> EndKeyDocId0
- end,
+ EndKeyDocId =
+ case {Direction, EndKeyDocId0} of
+ {fwd, <<255>>} when InclusiveEnd -> <<255>>;
+ {fwd, <<255>>} when not InclusiveEnd -> <<>>;
+ {rev, <<>>} when InclusiveEnd -> <<>>;
+ {rev, <<>>} when not InclusiveEnd -> <<255>>;
+ _ -> EndKeyDocId0
+ end,
- EndKeyOpts = if EndKey == undefined -> []; true ->
- [{end_key, {EndKey, EndKeyDocId}}]
- end,
+ EndKeyOpts =
+ if
+ EndKey == undefined -> [];
+ true -> [{end_key, {EndKey, EndKeyDocId}}]
+ end,
GroupFunOpt = make_group_key_fun(ViewType, GroupLevel),
@@ -312,20 +317,16 @@ mrargs_to_fdb_options(Args) ->
{inclusive_end, InclusiveEnd}
] ++ StartKeyOpts ++ EndKeyOpts ++ GroupFunOpt.
-
make_group_key_fun(map, _) ->
[];
-
make_group_key_fun(red, exact) ->
[
{group_key_fun, fun({Key, _DocId}) -> Key end}
];
-
make_group_key_fun(red, 0) ->
[
{group_key_fun, group_all}
];
-
make_group_key_fun(red, N) when is_integer(N), N > 0 ->
GKFun = fun
({Key, _DocId}) when is_list(Key) -> lists:sublist(Key, N);
@@ -333,39 +334,31 @@ make_group_key_fun(red, N) when is_integer(N), N > 0 ->
end,
[{group_key_fun, GKFun}].
-
maybe_stop({ok, Acc}) -> Acc;
maybe_stop({stop, Acc}) -> throw({done, Acc}).
-
get_doc_id(Id, {Props}) ->
DocId = couch_util:get_value(<<"_id">>, Props, Id),
Rev = couch_util:get_value(<<"_rev">>, Props, null),
{DocId, Rev};
-
get_doc_id(Id, _Value) ->
{Id, null}.
-
load_doc(TxDb, Id, Rev, DocOpts, undefined) ->
load_doc(TxDb, Id, Rev, DocOpts);
-
load_doc(_TxDb, Id, Rev, DocOpts, DocLoader) when is_pid(DocLoader) ->
DocLoader ! {load_doc, Id, Rev, DocOpts},
receive
{load_doc_res, Result} -> Result
- after
- ?LOAD_DOC_TIMEOUT_MSEC ->
- error(load_doc_timeout)
+ after ?LOAD_DOC_TIMEOUT_MSEC ->
+ error(load_doc_timeout)
end.
-
load_doc(TxDb, Id, null, DocOpts) ->
case fabric2_db:open_doc(TxDb, Id, DocOpts) of
{ok, Doc} -> couch_doc:to_json_obj(Doc, DocOpts);
{not_found, _} -> null
end;
-
load_doc(TxDb, Id, Rev, DocOpts) ->
Rev1 = couch_doc:parse_rev(Rev),
case fabric2_db:open_doc_revs(TxDb, Id, [Rev1], DocOpts) of
@@ -373,15 +366,12 @@ load_doc(TxDb, Id, Rev, DocOpts) ->
{ok, [_Else]} -> null
end.
-
-
% When reading doc bodies at the db version at which the indexer
% observed them, need to use a separate process since the process dict
% is used to hold some of the transaction metadata.
%
maybe_start_doc_loader(_Db, ?VIEW_CURRENT_VSN) ->
undefined;
-
maybe_start_doc_loader(Db0, DbReadVsn) ->
Parent = self(),
Db = Db0#{tx := undefined},
@@ -392,15 +382,12 @@ maybe_start_doc_loader(Db0, DbReadVsn) ->
end)
end).
-
stop_doc_loader(undefined) ->
ok;
-
stop_doc_loader(Pid) when is_pid(Pid) ->
unlink(Pid),
exit(Pid, kill).
-
doc_loader_loop(TxDb, Parent) ->
receive
{load_doc, Id, Rev, DocOpts} ->
diff --git a/src/couch_views/src/couch_views_server.erl b/src/couch_views/src/couch_views_server.erl
index e94eaf170..3e9284cfc 100644
--- a/src/couch_views/src/couch_views_server.erl
+++ b/src/couch_views/src/couch_views_server.erl
@@ -12,7 +12,6 @@
-module(couch_views_server).
-
-behaviour(gen_server).
-include_lib("kernel/include/logger.hrl").
@@ -38,15 +37,12 @@
-define(MAX_ACCEPTORS, 5).
-define(MAX_WORKERS, 100).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
accepted(Worker) when is_pid(Worker) ->
gen_server:call(?MODULE, {accepted, Worker}, infinity).
-
init(_) ->
process_flag(trap_exit, true),
couch_views_jobs:set_timeout(),
@@ -58,11 +54,9 @@ init(_) ->
},
{ok, spawn_acceptors(St)}.
-
terminate(_, _St) ->
ok.
-
handle_call({accepted, Pid}, _From, St) ->
#{
acceptors := Acceptors,
@@ -81,15 +75,12 @@ handle_call({accepted, Pid}, _From, St) ->
couch_log:error(LogMsg, [?MODULE, Pid]),
{stop, {unknown_acceptor_pid, Pid}, St}
end;
-
handle_call(Msg, _From, St) ->
{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
-
handle_cast(Msg, St) ->
{stop, {bad_cast, Msg}, St}.
-
handle_info({'EXIT', Pid, Reason}, St) ->
#{
acceptors := Acceptors,
@@ -102,15 +93,12 @@ handle_info({'EXIT', Pid, Reason}, St) ->
{false, true} -> handle_worker_exit(St, Pid, Reason);
{false, false} -> handle_unknown_exit(St, Pid, Reason)
end;
-
handle_info(Msg, St) ->
{stop, {bad_info, Msg}, St}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
format_status(_Opt, [_PDict, State]) ->
#{
workers := Workers,
@@ -120,10 +108,7 @@ format_status(_Opt, [_PDict, State]) ->
workers => {map_size, maps:size(Workers)},
acceptors => {map_size, maps:size(Acceptors)}
},
- [{data, [{"State",
- Scrubbed
- }]}].
-
+ [{data, [{"State", Scrubbed}]}].
% Worker process exit handlers
@@ -134,11 +119,9 @@ handle_acceptor_exit(#{acceptors := Acceptors} = St, Pid, Reason) ->
couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
{noreply, spawn_acceptors(St1)}.
-
handle_worker_exit(#{workers := Workers} = St, Pid, normal) ->
St1 = St#{workers := maps:remove(Pid, Workers)},
{noreply, spawn_acceptors(St1)};
-
handle_worker_exit(#{workers := Workers} = St, Pid, Reason) ->
St1 = St#{workers := maps:remove(Pid, Workers)},
?LOG_ERROR(#{what => indexer_crash, pid => Pid, reason => Reason}),
@@ -146,14 +129,12 @@ handle_worker_exit(#{workers := Workers} = St, Pid, Reason) ->
couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
{noreply, spawn_acceptors(St1)}.
-
handle_unknown_exit(St, Pid, Reason) ->
?LOG_ERROR(#{what => unknown_process_crash, pid => Pid, reason => Reason}),
LogMsg = "~p : unknown process ~p exited with ~p",
couch_log:error(LogMsg, [?MODULE, Pid, Reason]),
{stop, {unknown_pid_exit, Pid}, St}.
-
spawn_acceptors(St) ->
#{
workers := Workers,
@@ -172,10 +153,8 @@ spawn_acceptors(St) ->
St
end.
-
max_acceptors() ->
config:get_integer("couch_views", "max_acceptors", ?MAX_ACCEPTORS).
-
max_workers() ->
config:get_integer("couch_views", "max_workers", ?MAX_WORKERS).
diff --git a/src/couch_views/src/couch_views_sup.erl b/src/couch_views/src/couch_views_sup.erl
index ee32d4e9f..86f3293e9 100644
--- a/src/couch_views/src/couch_views_sup.erl
+++ b/src/couch_views/src/couch_views_sup.erl
@@ -10,10 +10,8 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(couch_views_sup).
-
-behaviour(supervisor).
-include_lib("kernel/include/logger.hrl").
@@ -22,44 +20,40 @@
start_link/0
]).
-
-export([
init/1
]).
-
start_link() ->
ok = register_views_index(),
- Arg = case fabric2_node_types:is_type(view_indexing) of
- true -> normal;
- false -> builds_disabled
- end,
+ Arg =
+ case fabric2_node_types:is_type(view_indexing) of
+ true -> normal;
+ false -> builds_disabled
+ end,
supervisor:start_link({local, ?MODULE}, ?MODULE, Arg).
-
init(normal) ->
- Children = [
- #{
- id => couch_views_server,
- start => {couch_views_server, start_link, []}
- }
- ] ++ couch_epi:register_service(couch_views_epi, []),
+ Children =
+ [
+ #{
+ id => couch_views_server,
+ start => {couch_views_server, start_link, []}
+ }
+ ] ++ couch_epi:register_service(couch_views_epi, []),
{ok, {flags(), Children}};
-
init(builds_disabled) ->
?LOG_NOTICE(#{what => view_indexing_disabled}),
couch_log:notice("~p : view_indexing disabled", [?MODULE]),
couch_views_jobs:set_timeout(),
{ok, {flags(), []}}.
-
register_views_index() ->
case fabric2_node_types:is_type(api_frontend) of
true -> fabric2_index:register_index(couch_views);
false -> ok
end.
-
flags() ->
#{
strategy => one_for_one,
diff --git a/src/couch_views/src/couch_views_trees.erl b/src/couch_views/src/couch_views_trees.erl
index 9aafbb276..61b7e79a4 100644
--- a/src/couch_views/src/couch_views_trees.erl
+++ b/src/couch_views/src/couch_views_trees.erl
@@ -30,15 +30,12 @@
-compile(nowarn_export_all).
-endif.
-
-include("couch_views.hrl").
-include_lib("fabric/include/fabric2.hrl").
-
open(TxDb, Mrst) ->
open(TxDb, Mrst, []).
-
open(TxDb, Mrst, Options) ->
#mrst{
sig = Sig,
@@ -50,7 +47,6 @@ open(TxDb, Mrst, Options) ->
views = [open_view_tree(TxDb, Sig, Lang, V, Options) || V <- Views]
}.
-
get_row_count(TxDb, View) ->
#{
tx := Tx
@@ -58,7 +54,6 @@ get_row_count(TxDb, View) ->
{Count, _, _} = ebtree:full_reduce(Tx, View#mrview.btree),
Count.
-
get_kv_size(TxDb, View) ->
#{
tx := Tx
@@ -66,7 +61,6 @@ get_kv_size(TxDb, View) ->
{_, TotalSize, _} = ebtree:full_reduce(Tx, View#mrview.btree),
TotalSize.
-
fold_map_idx(TxDb, View, Options, Callback, Acc0) ->
#{
tx := Tx
@@ -82,40 +76,54 @@ fold_map_idx(TxDb, View, Options, Callback, Acc0) ->
Wrapper = fun(KVs0, WAcc) ->
% Remove any keys that match Start or End key
% depending on direction
- KVs1 = case InclusiveEnd of
- true ->
- KVs0;
- false when Dir == fwd ->
- lists:filter(fun({K, _V}) ->
- case CollateFun(K, EndKey) of
- lt -> true;
- eq -> false;
- gt -> false
- end
- end, KVs0);
- false when Dir == rev ->
- lists:filter(fun({K, _V}) ->
- case CollateFun(K, EndKey) of
- lt -> false;
- eq -> false;
- gt -> true
- end
- end, KVs0)
- end,
+ KVs1 =
+ case InclusiveEnd of
+ true ->
+ KVs0;
+ false when Dir == fwd ->
+ lists:filter(
+ fun({K, _V}) ->
+ case CollateFun(K, EndKey) of
+ lt -> true;
+ eq -> false;
+ gt -> false
+ end
+ end,
+ KVs0
+ );
+ false when Dir == rev ->
+ lists:filter(
+ fun({K, _V}) ->
+ case CollateFun(K, EndKey) of
+ lt -> false;
+ eq -> false;
+ gt -> true
+ end
+ end,
+ KVs0
+ )
+ end,
% Expand dups
- KVs2 = lists:flatmap(fun({K, V}) ->
- case V of
- {dups, Dups} when Dir == fwd ->
- [{K, D} || D <- Dups];
- {dups, Dups} when Dir == rev ->
- [{K, D} || D <- lists:reverse(Dups)];
- _ ->
- [{K, V}]
- end
- end, KVs1),
- lists:foldl(fun({{Key, DocId}, Value}, WAccInner) ->
- Callback(DocId, Key, Value, WAccInner)
- end, WAcc, KVs2)
+ KVs2 = lists:flatmap(
+ fun({K, V}) ->
+ case V of
+ {dups, Dups} when Dir == fwd ->
+ [{K, D} || D <- Dups];
+ {dups, Dups} when Dir == rev ->
+ [{K, D} || D <- lists:reverse(Dups)];
+ _ ->
+ [{K, V}]
+ end
+ end,
+ KVs1
+ ),
+ lists:foldl(
+ fun({{Key, DocId}, Value}, WAccInner) ->
+ Callback(DocId, Key, Value, WAccInner)
+ end,
+ WAcc,
+ KVs2
+ )
end,
case Dir of
@@ -126,7 +134,6 @@ fold_map_idx(TxDb, View, Options, Callback, Acc0) ->
ebtree:reverse_range(Tx, Btree, EndKey, StartKey, Wrapper, Acc0)
end.
-
fold_red_idx(TxDb, View, Idx, Options, Callback, Acc0) ->
#{
tx := Tx
@@ -150,15 +157,15 @@ fold_red_idx(TxDb, View, Idx, Options, Callback, Acc0) ->
{inclusive_end, InclusiveEnd}
],
ebtree:group_reduce(
- Tx,
- Btree,
- StartKey,
- EndKey,
- GroupKeyFun,
- Wrapper,
- Acc0,
- EBtreeOpts
- );
+ Tx,
+ Btree,
+ StartKey,
+ EndKey,
+ GroupKeyFun,
+ Wrapper,
+ Acc0,
+ EBtreeOpts
+ );
rev ->
% Start/End keys swapped on purpose because ebtree. Also
% inclusive_start for same reason.
@@ -167,28 +174,31 @@ fold_red_idx(TxDb, View, Idx, Options, Callback, Acc0) ->
{inclusive_start, InclusiveEnd}
],
ebtree:group_reduce(
- Tx,
- Btree,
- EndKey,
- StartKey,
- GroupKeyFun,
- Wrapper,
- Acc0,
- EBtreeOpts
- )
+ Tx,
+ Btree,
+ EndKey,
+ StartKey,
+ GroupKeyFun,
+ Wrapper,
+ Acc0,
+ EBtreeOpts
+ )
end.
-
update_views(TxDb, Mrst, Docs) ->
#{
tx := Tx
} = TxDb,
% Get initial KV size
- OldKVSize = lists:foldl(fun(View, SizeAcc) ->
- {_, Size, _} = ebtree:full_reduce(Tx, View#mrview.btree),
- SizeAcc + Size
- end, 0, Mrst#mrst.views),
+ OldKVSize = lists:foldl(
+ fun(View, SizeAcc) ->
+ {_, Size, _} = ebtree:full_reduce(Tx, View#mrview.btree),
+ SizeAcc + Size
+ end,
+ 0,
+ Mrst#mrst.views
+ ),
% Collect update information
#{
@@ -201,25 +211,31 @@ update_views(TxDb, Mrst, Docs) ->
update_btree(Tx, Mrst#mrst.id_btree, IdMap, DeleteRef),
% Update each view's BTree
- lists:foreach(fun(View) ->
- #mrview{
- id_num = ViewId,
- btree = BTree
- } = View,
-
- ViewMap = maps:get(ViewId, ViewMaps, #{}),
- update_btree(Tx, BTree, ViewMap, DeleteRef)
- end, Mrst#mrst.views),
+ lists:foreach(
+ fun(View) ->
+ #mrview{
+ id_num = ViewId,
+ btree = BTree
+ } = View,
+
+ ViewMap = maps:get(ViewId, ViewMaps, #{}),
+ update_btree(Tx, BTree, ViewMap, DeleteRef)
+ end,
+ Mrst#mrst.views
+ ),
% Get new KV size after update
- NewKVSize = lists:foldl(fun(View, SizeAcc) ->
- {_, Size, _} = ebtree:full_reduce(Tx, View#mrview.btree),
- SizeAcc + Size
- end, 0, Mrst#mrst.views),
+ NewKVSize = lists:foldl(
+ fun(View, SizeAcc) ->
+ {_, Size, _} = ebtree:full_reduce(Tx, View#mrview.btree),
+ SizeAcc + Size
+ end,
+ 0,
+ Mrst#mrst.views
+ ),
couch_views_fdb:update_kv_size(TxDb, Mrst#mrst.sig, OldKVSize, NewKVSize).
-
open_id_tree(TxDb, Sig) ->
#{
tx := Tx,
@@ -232,7 +248,6 @@ open_id_tree(TxDb, Sig) ->
],
ebtree:open(Tx, Prefix, get_order(id_btree), TreeOpts).
-
open_view_tree(TxDb, Sig, Lang, View, Options) ->
#{
tx := Tx,
@@ -247,27 +262,26 @@ open_view_tree(TxDb, Sig, Lang, View, Options) ->
{persist_fun, fun couch_views_fdb:persist_chunks/3},
{encode_fun, create_encode_fun(TxDb)}
],
- ExtraOpts = case lists:keyfind(read_only, 1, Options) of
- {read_only, Idx} ->
- RedFun = make_read_only_reduce_fun(Lang, View, Idx),
- [{reduce_fun, RedFun}];
- false ->
- [
- {reduce_fun, make_reduce_fun(Lang, View)}
- ]
- end,
+ ExtraOpts =
+ case lists:keyfind(read_only, 1, Options) of
+ {read_only, Idx} ->
+ RedFun = make_read_only_reduce_fun(Lang, View, Idx),
+ [{reduce_fun, RedFun}];
+ false ->
+ [
+ {reduce_fun, make_reduce_fun(Lang, View)}
+ ]
+ end,
TreeOpts = BaseOpts ++ ExtraOpts,
View#mrview{
btree = ebtree:open(Tx, Prefix, get_order(view_btree), TreeOpts)
}.
-
get_order(id_btree) ->
min_order(config:get_integer("couch_views", "id_btree_node_size", 100));
get_order(view_btree) ->
min_order(config:get_integer("couch_views", "view_btree_node_size", 100)).
-
min_order(V) when is_integer(V), V < 2 ->
2;
min_order(V) when is_integer(V), V rem 2 == 0 ->
@@ -275,7 +289,6 @@ min_order(V) when is_integer(V), V rem 2 == 0 ->
min_order(V) ->
V + 1.
-
make_read_only_reduce_fun(Lang, View, NthRed) ->
RedFuns = [Src || {_, Src} <- View#mrview.reduce_funs],
LPad = lists:duplicate(NthRed - 1, []),
@@ -292,26 +305,30 @@ make_read_only_reduce_fun(Lang, View, NthRed) ->
[lists:nth(NthRed, UReds)]
end,
UReds = lists:map(ExtractFun, Reductions),
- {ok, Result} = case UReds of
- [RedVal] ->
- {ok, RedVal};
- _ ->
- couch_query_servers:rereduce(Lang, [FunSrc], UReds)
- end,
+ {ok, Result} =
+ case UReds of
+ [RedVal] ->
+ {ok, RedVal};
+ _ ->
+ couch_query_servers:rereduce(Lang, [FunSrc], UReds)
+ end,
{0, 0, LPad ++ Result ++ RPad}
end.
-
make_reduce_fun(Lang, #mrview{} = View) ->
RedFuns = [Src || {_, Src} <- View#mrview.reduce_funs, Src /= disabled],
fun
(KVs0, _ReReduce = false) ->
KVs1 = expand_dupes(KVs0),
- TotalSize = lists:foldl(fun({{K, _DocId}, V}, Acc) ->
- KSize = couch_ejson_size:encoded_size(K),
- VSize = couch_ejson_size:encoded_size(V),
- KSize + VSize + Acc
- end, 0, KVs1),
+ TotalSize = lists:foldl(
+ fun({{K, _DocId}, V}, Acc) ->
+ KSize = couch_ejson_size:encoded_size(K),
+ VSize = couch_ejson_size:encoded_size(V),
+ KSize + VSize + Acc
+ end,
+ 0,
+ KVs1
+ ),
KVs2 = detuple_kvs(KVs1),
{ok, UserReds} = couch_query_servers:reduce(Lang, RedFuns, KVs2),
{length(KVs1), TotalSize, UserReds};
@@ -329,7 +346,6 @@ make_reduce_fun(Lang, #mrview{} = View) ->
{FinalCount, FinalSize, Result}
end.
-
create_encode_fun(TxDb) ->
fun
(encode, Key, Term) ->
@@ -340,54 +356,60 @@ create_encode_fun(TxDb) ->
binary_to_term(Bin, [safe])
end.
-
to_map_opts(Options) ->
- Dir = case lists:keyfind(dir, 1, Options) of
- {dir, D} -> D;
- _ -> fwd
- end,
+ Dir =
+ case lists:keyfind(dir, 1, Options) of
+ {dir, D} -> D;
+ _ -> fwd
+ end,
- InclusiveEnd = case lists:keyfind(inclusive_end, 1, Options) of
- {inclusive_end, IE} -> IE;
- _ -> true
- end,
+ InclusiveEnd =
+ case lists:keyfind(inclusive_end, 1, Options) of
+ {inclusive_end, IE} -> IE;
+ _ -> true
+ end,
- StartKey = case lists:keyfind(start_key, 1, Options) of
- {start_key, SK} -> SK;
- false when Dir == fwd -> ebtree:min();
- false when Dir == rev -> ebtree:max()
- end,
+ StartKey =
+ case lists:keyfind(start_key, 1, Options) of
+ {start_key, SK} -> SK;
+ false when Dir == fwd -> ebtree:min();
+ false when Dir == rev -> ebtree:max()
+ end,
- EndKey = case lists:keyfind(end_key, 1, Options) of
- {end_key, EK} -> EK;
- false when Dir == fwd -> ebtree:max();
- false when Dir == rev -> ebtree:min()
- end,
+ EndKey =
+ case lists:keyfind(end_key, 1, Options) of
+ {end_key, EK} -> EK;
+ false when Dir == fwd -> ebtree:max();
+ false when Dir == rev -> ebtree:min()
+ end,
{Dir, StartKey, EndKey, InclusiveEnd}.
-
to_red_opts(Options) ->
{Dir, StartKey, EndKey, InclusiveEnd} = to_map_opts(Options),
- GroupKeyFun = case lists:keyfind(group_key_fun, 1, Options) of
- {group_key_fun, group_all} -> fun({_Key, _DocId}) -> null end;
- {group_key_fun, GKF} -> GKF;
- false -> fun({_Key, _DocId}) -> null end
- end,
+ GroupKeyFun =
+ case lists:keyfind(group_key_fun, 1, Options) of
+ {group_key_fun, group_all} -> fun({_Key, _DocId}) -> null end;
+ {group_key_fun, GKF} -> GKF;
+ false -> fun({_Key, _DocId}) -> null end
+ end,
{Dir, StartKey, EndKey, InclusiveEnd, GroupKeyFun}.
-
gather_update_info(Tx, Mrst, Docs) ->
% A special token used to indicate that the row should be deleted
DeleteRef = erlang:make_ref(),
AllDocIds = [DocId || #{id := DocId} <- Docs],
- BaseIdMap = lists:foldl(fun(DocId, Acc) ->
- maps:put(DocId, DeleteRef, Acc)
- end, #{}, AllDocIds),
+ BaseIdMap = lists:foldl(
+ fun(DocId, Acc) ->
+ maps:put(DocId, DeleteRef, Acc)
+ end,
+ #{},
+ AllDocIds
+ ),
% Build the initial set of rows to delete
% ExistingViewKeys is a list of {DocId, [{ViewId, [Key | _]} | _]}
@@ -397,15 +419,27 @@ gather_update_info(Tx, Mrst, Docs) ->
% list of keys to delete. The final result is a map of
% maps:
% #{ViewId => #{Key => DeleteRef}}
- BaseViewMaps = lists:foldl(fun({DocId, ViewIdKeys}, ViewIdAcc1) ->
- lists:foldl(fun({ViewId, Keys}, ViewIdAcc2) ->
- OldViewMap = maps:get(ViewId, ViewIdAcc2, #{}),
- NewViewMap = lists:foldl(fun(Key, ViewMapAcc) ->
- maps:put({Key, DocId}, DeleteRef, ViewMapAcc)
- end, OldViewMap, Keys),
- maps:put(ViewId, NewViewMap, ViewIdAcc2)
- end, ViewIdAcc1, ViewIdKeys)
- end, #{}, ExistingViewKeys),
+ BaseViewMaps = lists:foldl(
+ fun({DocId, ViewIdKeys}, ViewIdAcc1) ->
+ lists:foldl(
+ fun({ViewId, Keys}, ViewIdAcc2) ->
+ OldViewMap = maps:get(ViewId, ViewIdAcc2, #{}),
+ NewViewMap = lists:foldl(
+ fun(Key, ViewMapAcc) ->
+ maps:put({Key, DocId}, DeleteRef, ViewMapAcc)
+ end,
+ OldViewMap,
+ Keys
+ ),
+ maps:put(ViewId, NewViewMap, ViewIdAcc2)
+ end,
+ ViewIdAcc1,
+ ViewIdKeys
+ )
+ end,
+ #{},
+ ExistingViewKeys
+ ),
% Build our base accumulator
InfoAcc1 = #{
@@ -419,10 +453,13 @@ gather_update_info(Tx, Mrst, Docs) ->
% #{ViewId => #{Key => Value}}
% where Value may be a copy of `DeleteRef` which flags
% that the Key should be deleted from the view.
- lists:foldl(fun(Doc, InfoAcc2) ->
- insert_doc(Mrst, Doc, InfoAcc2)
- end, InfoAcc1, Docs).
-
+ lists:foldl(
+ fun(Doc, InfoAcc2) ->
+ insert_doc(Mrst, Doc, InfoAcc2)
+ end,
+ InfoAcc1,
+ Docs
+ ).
insert_doc(_Mrst, #{deleted := true} = _Doc, InfoAcc) ->
InfoAcc;
@@ -432,70 +469,84 @@ insert_doc(Mrst, Doc, InfoAcc0) ->
results := Results
} = Doc,
- FinalAcc = lists:foldl(fun({View, RawNewRows}, {IdKeyAcc, InfoAcc1}) ->
- #mrview{
- id_num = ViewId
- } = View,
- #{
- views := ViewMaps
- } = InfoAcc1,
-
- DedupedRows = dedupe_rows(View, RawNewRows),
- IdKeys = lists:usort([K || {K, _V} <- DedupedRows]),
-
- OldViewMap = maps:get(ViewId, ViewMaps, #{}),
- NewViewMap = lists:foldl(fun({K, V}, ViewMapAcc) ->
- maps:put({K, DocId}, V, ViewMapAcc)
- end, OldViewMap, DedupedRows),
-
- {[{ViewId, IdKeys} | IdKeyAcc], InfoAcc1#{
- views := maps:put(ViewId, NewViewMap, ViewMaps)
- }}
- end, {[], InfoAcc0}, lists:zip(Mrst#mrst.views, Results)),
+ FinalAcc = lists:foldl(
+ fun({View, RawNewRows}, {IdKeyAcc, InfoAcc1}) ->
+ #mrview{
+ id_num = ViewId
+ } = View,
+ #{
+ views := ViewMaps
+ } = InfoAcc1,
+
+ DedupedRows = dedupe_rows(View, RawNewRows),
+ IdKeys = lists:usort([K || {K, _V} <- DedupedRows]),
+
+ OldViewMap = maps:get(ViewId, ViewMaps, #{}),
+ NewViewMap = lists:foldl(
+ fun({K, V}, ViewMapAcc) ->
+ maps:put({K, DocId}, V, ViewMapAcc)
+ end,
+ OldViewMap,
+ DedupedRows
+ ),
+
+ {[{ViewId, IdKeys} | IdKeyAcc], InfoAcc1#{
+ views := maps:put(ViewId, NewViewMap, ViewMaps)
+ }}
+ end,
+ {[], InfoAcc0},
+ lists:zip(Mrst#mrst.views, Results)
+ ),
{IdRows, #{ids := IdMap} = InfoAcc2} = FinalAcc,
% Don't store a row in the id_btree if it hasn't got any
% keys that will need to be deleted.
NonEmptyRows = [1 || {_ViewId, Rows} <- IdRows, Rows /= []],
- if length(NonEmptyRows) == 0 -> InfoAcc2; true ->
- InfoAcc2#{ids := maps:put(DocId, IdRows, IdMap)}
+ if
+ length(NonEmptyRows) == 0 -> InfoAcc2;
+ true -> InfoAcc2#{ids := maps:put(DocId, IdRows, IdMap)}
end.
-
update_btree(Tx, BTree, Map, DeleteRef) ->
- {ToRemove, ToInsert} = maps:fold(fun(Key, Value, {Keys, Rows}) ->
- case Value of
- DeleteRef -> {[Key | Keys], Rows};
- _ -> {Keys, [{Key, Value} | Rows]}
- end
- end, {[], []}, Map),
+ {ToRemove, ToInsert} = maps:fold(
+ fun(Key, Value, {Keys, Rows}) ->
+ case Value of
+ DeleteRef -> {[Key | Keys], Rows};
+ _ -> {Keys, [{Key, Value} | Rows]}
+ end
+ end,
+ {[], []},
+ Map
+ ),
- lists:foreach(fun(Key) ->
- ebtree:delete(Tx, BTree, Key)
- end, ToRemove),
+ lists:foreach(
+ fun(Key) ->
+ ebtree:delete(Tx, BTree, Key)
+ end,
+ ToRemove
+ ),
ebtree:insert_multi(Tx, BTree, ToInsert).
-
dedupe_rows(View, KVs0) ->
CollateFun = couch_views_util:collate_fun(View),
- KVs1 = lists:sort(fun({KeyA, ValA}, {KeyB, ValB}) ->
- case CollateFun({KeyA, <<>>}, {KeyB, <<>>}) of
- lt -> true;
- eq -> ValA =< ValB;
- gt -> false
- end
- end, KVs0),
+ KVs1 = lists:sort(
+ fun({KeyA, ValA}, {KeyB, ValB}) ->
+ case CollateFun({KeyA, <<>>}, {KeyB, <<>>}) of
+ lt -> true;
+ eq -> ValA =< ValB;
+ gt -> false
+ end
+ end,
+ KVs0
+ ),
dedupe_rows_int(CollateFun, KVs1).
-
dedupe_rows_int(_CollateFun, []) ->
[];
-
dedupe_rows_int(_CollateFun, [KV]) ->
[KV];
-
dedupe_rows_int(CollateFun, [{K1, V1} | RestKVs]) ->
RestDeduped = dedupe_rows_int(CollateFun, RestKVs),
case RestDeduped of
@@ -508,13 +559,11 @@ dedupe_rows_int(CollateFun, [{K1, V1} | RestKVs]) ->
[{K1, V1}]
end.
-
combine_vals(V1, {dups, V2}) ->
{dups, [V1 | V2]};
combine_vals(V1, V2) ->
{dups, [V1, V2]}.
-
expand_dupes([]) ->
[];
expand_dupes([{K, {dups, Dups}} | Rest]) ->
@@ -523,24 +572,20 @@ expand_dupes([{K, {dups, Dups}} | Rest]) ->
expand_dupes([{K, V} | Rest]) ->
[{K, V} | expand_dupes(Rest)].
-
detuple_kvs([]) ->
[];
detuple_kvs([KV | Rest]) ->
{{Key, Id}, Value} = KV,
[[[Key, Id], Value] | detuple_kvs(Rest)].
-
id_tree_prefix(DbPrefix, Sig) ->
Key = {?DB_VIEWS, ?VIEW_TREES, Sig, ?VIEW_ID_TREE},
erlfdb_tuple:pack(Key, DbPrefix).
-
view_tree_prefix(DbPrefix, Sig, ViewId) ->
Key = {?DB_VIEWS, ?VIEW_TREES, Sig, ?VIEW_ROW_TREES, ViewId},
erlfdb_tuple:pack(Key, DbPrefix).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/couch_views/src/couch_views_updater.erl b/src/couch_views/src/couch_views_updater.erl
index f88c5a23d..0bb2f53fa 100644
--- a/src/couch_views/src/couch_views_updater.erl
+++ b/src/couch_views/src/couch_views_updater.erl
@@ -15,7 +15,6 @@
index/6
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_views/include/couch_views.hrl").
-include_lib("kernel/include/logger.hrl").
@@ -23,13 +22,20 @@
% If the doc revision doesn't not match the NewRevId passed here we can ignore
% the document since it is then a conflict document and it doesn't need
% to be indexed.
-index(Db, #doc{id = Id, revs = Revs} = Doc, _NewWinner, _OldWinner, NewRevId,
- Seq) ->
+index(
+ Db,
+ #doc{id = Id, revs = Revs} = Doc,
+ _NewWinner,
+ _OldWinner,
+ NewRevId,
+ Seq
+) ->
try
{Depth, [FirstRev | _]} = Revs,
DocRev = {Depth, FirstRev},
- if DocRev /= NewRevId -> ok; true ->
- index_int(Db, Doc, Seq)
+ if
+ DocRev /= NewRevId -> ok;
+ true -> index_int(Db, Doc, Seq)
end
catch
error:{erlfdb_error, ErrCode}:Stack when is_integer(ErrCode) ->
@@ -41,8 +47,10 @@ index(Db, #doc{id = Id, revs = Revs} = Doc, _NewWinner, _OldWinner, NewRevId,
db => DbName,
docid => Id
}),
- couch_log:error("Mango index erlfdb error Db ~s Doc ~p ~p",
- [DbName, Id, ErrCode]),
+ couch_log:error(
+ "Mango index erlfdb error Db ~s Doc ~p ~p",
+ [DbName, Id, ErrCode]
+ ),
erlang:raise(error, {erlfdb_error, ErrCode}, Stack);
Error:Reason ->
DbName = fabric2_db:name(Db),
@@ -53,15 +61,22 @@ index(Db, #doc{id = Id, revs = Revs} = Doc, _NewWinner, _OldWinner, NewRevId,
db => DbName,
docid => Id
}),
- couch_log:error("Mango index error for Db ~s Doc ~p ~p ~p",
- [DbName, Id, Error, Reason])
+ couch_log:error(
+ "Mango index error for Db ~s Doc ~p ~p ~p",
+ [DbName, Id, Error, Reason]
+ )
end.
-
% Check if design doc is an interactive index and kick off background worker
% to build the new index up to the creation_vs
-index_int(Db, #doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>,
- deleted = false} = DDoc, Seq) ->
+index_int(
+ Db,
+ #doc{
+ id = <<?DESIGN_DOC_PREFIX, _/binary>>,
+ deleted = false
+ } = DDoc,
+ Seq
+) ->
DbName = fabric2_db:name(Db),
case couch_views_ddoc:is_interactive(DDoc) of
@@ -78,48 +93,56 @@ index_int(Db, #doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>,
ok
end,
write_doc(Db, DDoc);
-
-
index_int(Db, #doc{} = Doc, _Seq) ->
write_doc(Db, Doc).
-
write_doc(Db, #doc{deleted = Deleted} = Doc) ->
DbName = fabric2_db:name(Db),
DDocs = couch_views_ddoc:get_interactive_list(Db),
- Result0 = [#{
- id => Doc#doc.id,
- results => [],
- deleted => Deleted,
- doc => Doc
- }],
+ Result0 = [
+ #{
+ id => Doc#doc.id,
+ results => [],
+ deleted => Deleted,
+ doc => Doc
+ }
+ ],
%% Interactive updates do not update the views update_seq
State = #{
last_seq => false
},
- lists:foreach(fun(DDoc) ->
- {ok, Mrst0} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
- Mrst1 = couch_views_trees:open(Db, Mrst0),
-
- case should_index_doc(Doc, Mrst1) of
- true ->
- {Mrst2, Result1} = couch_views_indexer:map_docs(Mrst1, Result0),
- DocNumber = couch_views_indexer:write_docs(Db, Mrst2,
- Result1, State),
- couch_views_plugin:after_interactive_write(Db, Mrst2,
- Result1, DocNumber),
- couch_eval:release_map_context(Mrst2#mrst.qserver);
- false ->
- ok
- end
- end, DDocs).
-
+ lists:foreach(
+ fun(DDoc) ->
+ {ok, Mrst0} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+ Mrst1 = couch_views_trees:open(Db, Mrst0),
+
+ case should_index_doc(Doc, Mrst1) of
+ true ->
+ {Mrst2, Result1} = couch_views_indexer:map_docs(Mrst1, Result0),
+ DocNumber = couch_views_indexer:write_docs(
+ Db,
+ Mrst2,
+ Result1,
+ State
+ ),
+ couch_views_plugin:after_interactive_write(
+ Db,
+ Mrst2,
+ Result1,
+ DocNumber
+ ),
+ couch_eval:release_map_context(Mrst2#mrst.qserver);
+ false ->
+ ok
+ end
+ end,
+ DDocs
+ ).
should_index_doc(<<?DESIGN_DOC_PREFIX, _/binary>>, Mrst) ->
lists:keymember(<<"include_design">>, 1, Mrst#mrst.design_opts);
-
-should_index_doc(_, _) ->
+should_index_doc(_, _) ->
true.
diff --git a/src/couch_views/src/couch_views_util.erl b/src/couch_views/src/couch_views_util.erl
index 63f5daafc..63dd56641 100644
--- a/src/couch_views/src/couch_views_util.erl
+++ b/src/couch_views/src/couch_views_util.erl
@@ -12,7 +12,6 @@
-module(couch_views_util).
-
-export([
ddoc_to_mrst/2,
collate_fun/1,
@@ -27,35 +26,36 @@
extract_view/4
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("couch_views.hrl").
-include_lib("kernel/include/logger.hrl").
-
-ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
- MakeDict = fun({Name, {MRFuns}}, DictBySrcAcc) ->
- case couch_util:get_value(<<"map">>, MRFuns) of
- MapSrc when MapSrc /= undefined ->
- RedSrc = couch_util:get_value(<<"reduce">>, MRFuns, null),
- {ViewOpts} = couch_util:get_value(<<"options">>, MRFuns, {[]}),
- View = case dict:find({MapSrc, ViewOpts}, DictBySrcAcc) of
- {ok, View0} -> View0;
- error -> #mrview{def=MapSrc, options=ViewOpts}
- end,
- {MapNames, RedSrcs} = case RedSrc of
- null ->
- MNames = [Name | View#mrview.map_names],
- {MNames, View#mrview.reduce_funs};
- _ ->
- RedFuns = [{Name, RedSrc} | View#mrview.reduce_funs],
- {View#mrview.map_names, RedFuns}
- end,
- View2 = View#mrview{map_names=MapNames, reduce_funs=RedSrcs},
- dict:store({MapSrc, ViewOpts}, View2, DictBySrcAcc);
- undefined ->
- DictBySrcAcc
- end;
+ddoc_to_mrst(DbName, #doc{id = Id, body = {Fields}}) ->
+ MakeDict = fun
+ ({Name, {MRFuns}}, DictBySrcAcc) ->
+ case couch_util:get_value(<<"map">>, MRFuns) of
+ MapSrc when MapSrc /= undefined ->
+ RedSrc = couch_util:get_value(<<"reduce">>, MRFuns, null),
+ {ViewOpts} = couch_util:get_value(<<"options">>, MRFuns, {[]}),
+ View =
+ case dict:find({MapSrc, ViewOpts}, DictBySrcAcc) of
+ {ok, View0} -> View0;
+ error -> #mrview{def = MapSrc, options = ViewOpts}
+ end,
+ {MapNames, RedSrcs} =
+ case RedSrc of
+ null ->
+ MNames = [Name | View#mrview.map_names],
+ {MNames, View#mrview.reduce_funs};
+ _ ->
+ RedFuns = [{Name, RedSrc} | View#mrview.reduce_funs],
+ {View#mrview.map_names, RedFuns}
+ end,
+ View2 = View#mrview{map_names = MapNames, reduce_funs = RedSrcs},
+ dict:store({MapSrc, ViewOpts}, View2, DictBySrcAcc);
+ undefined ->
+ DictBySrcAcc
+ end;
({Name, Else}, DictBySrcAcc) ->
?LOG_ERROR(#{
what => invalid_view_definition,
@@ -63,8 +63,10 @@ ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
ddoc => Id,
view => Name
}),
- couch_log:error("design_doc_to_view_group ~s views ~p",
- [Name, Else]),
+ couch_log:error(
+ "design_doc_to_view_group ~s views ~p",
+ [Name, Else]
+ ),
DictBySrcAcc
end,
{DesignOpts} = proplists:get_value(<<"options">>, Fields, {[]}),
@@ -74,7 +76,7 @@ ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
BySrc = lists:foldl(MakeDict, dict:new(), RawViews),
NumViews = fun({_, View}, N) ->
- {View#mrview{id_num = N}, N+1}
+ {View#mrview{id_num = N}, N + 1}
end,
{Views0, _} = lists:mapfoldl(NumViews, 0, lists:sort(dict:to_list(BySrc))),
Views1 = maybe_disable_custom_reduce_funs(Views0),
@@ -83,61 +85,54 @@ ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
Lib = couch_util:get_value(<<"lib">>, RawViews, {[]}),
IdxState = #mrst{
- db_name=DbName,
- idx_name=Id,
- lib=Lib,
- views=Views1,
- language=Language,
- design_opts=DesignOpts,
- partitioned=Partitioned
+ db_name = DbName,
+ idx_name = Id,
+ lib = Lib,
+ views = Views1,
+ language = Language,
+ design_opts = DesignOpts,
+ partitioned = Partitioned
},
SigInfo = {Views1, Language, DesignOpts, sort_lib(Lib)},
- {ok, IdxState#mrst{sig=couch_hash:md5_hash(term_to_binary(SigInfo))}}.
-
+ {ok, IdxState#mrst{sig = couch_hash:md5_hash(term_to_binary(SigInfo))}}.
set_view_type(_Args, _ViewName, []) ->
throw({not_found, missing_named_view});
-
set_view_type(Args, ViewName, [View | Rest]) ->
RedNames = [N || {N, _} <- View#mrview.reduce_funs],
case lists:member(ViewName, RedNames) of
true ->
case Args#mrargs.reduce of
- false -> Args#mrargs{view_type=map};
- _ -> Args#mrargs{view_type=red}
+ false -> Args#mrargs{view_type = map};
+ _ -> Args#mrargs{view_type = red}
end;
false ->
case lists:member(ViewName, View#mrview.map_names) of
- true -> Args#mrargs{view_type=map};
+ true -> Args#mrargs{view_type = map};
false -> set_view_type(Args, ViewName, Rest)
end
end.
-
set_extra(#mrargs{} = Args, Key, Value) ->
Extra0 = Args#mrargs.extra,
Extra1 = lists:ukeysort(1, [{Key, Value} | Extra0]),
Args#mrargs{extra = Extra1}.
-
extract_view(_Lang, _Args, _ViewName, []) ->
throw({not_found, missing_named_view});
-
-extract_view(Lang, #mrargs{view_type=map}=Args, Name, [View | Rest]) ->
+extract_view(Lang, #mrargs{view_type = map} = Args, Name, [View | Rest]) ->
Names = View#mrview.map_names ++ [N || {N, _} <- View#mrview.reduce_funs],
case lists:member(Name, Names) of
true -> {map, View, Args};
_ -> extract_view(Lang, Args, Name, Rest)
end;
-
-extract_view(Lang, #mrargs{view_type=red}=Args, Name, [View | Rest]) ->
+extract_view(Lang, #mrargs{view_type = red} = Args, Name, [View | Rest]) ->
RedNames = [N || {N, _} <- View#mrview.reduce_funs],
case lists:member(Name, RedNames) of
true -> {red, {index_of(Name, RedNames), Lang, View}, Args};
false -> extract_view(Lang, Args, Name, Rest)
end.
-
collate_fun(View) ->
#mrview{
options = Options
@@ -147,21 +142,19 @@ collate_fun(View) ->
_ -> fun collate_rows/2
end.
-
collate_raw(A, A) -> eq;
collate_raw(A, B) when A < B -> lt;
collate_raw(A, B) when A > B -> gt.
-
collate_rows({KeyA, DocIdA}, {KeyB, DocIdB}) ->
case couch_ejson_compare:less(KeyA, KeyB) of
N when N < 0 -> lt;
0 when DocIdA < DocIdB -> lt;
0 when DocIdA == DocIdB -> eq;
- 0 -> gt; % when DocIdA > DocIdB
+ % when DocIdA > DocIdB
+ 0 -> gt;
N when N > 0 -> gt
end;
-
collate_rows(KeyA, KeyB) ->
% When collating reduce group keys they don't
% come with a docid.
@@ -171,11 +164,9 @@ collate_rows(KeyA, KeyB) ->
N when N > 0 -> gt
end.
-
validate_args(Args) ->
validate_args(Args, []).
-
% This is mostly a copy of couch_validate:validate_args/1 but it doesn't
% update start / end keys and also throws a not_implemented error for reduce
%
@@ -194,11 +185,13 @@ validate_args(#mrargs{} = Args, Opts) ->
end,
case {Args#mrargs.view_type, GroupLevel, Args#mrargs.keys} of
- {red, exact, _} -> ok;
+ {red, exact, _} ->
+ ok;
{red, _, KeyList} when is_list(KeyList) ->
Msg = <<"Multi-key fetchs for reduce views must use `group=true`">>,
mrverror(Msg);
- _ -> ok
+ _ ->
+ ok
end,
case Args#mrargs.keys of
@@ -207,13 +200,18 @@ validate_args(#mrargs{} = Args, Opts) ->
_ -> mrverror(<<"`keys` must be an array of strings.">>)
end,
- case {Args#mrargs.keys, Args#mrargs.start_key,
- Args#mrargs.end_key} of
- {undefined, _, _} -> ok;
- {[], _, _} -> ok;
- {[_|_], undefined, undefined} -> ok;
- _ -> mrverror(<<"`keys` is incompatible with `key`"
- ", `start_key` and `end_key`">>)
+ case {Args#mrargs.keys, Args#mrargs.start_key, Args#mrargs.end_key} of
+ {undefined, _, _} ->
+ ok;
+ {[], _, _} ->
+ ok;
+ {[_ | _], undefined, undefined} ->
+ ok;
+ _ ->
+ mrverror(<<
+ "`keys` is incompatible with `key`"
+ ", `start_key` and `end_key`"
+ >>)
end,
case Args#mrargs.start_key_docid of
@@ -314,17 +312,14 @@ validate_args(#mrargs{} = Args, Opts) ->
_ -> ok
end,
- Args#mrargs{group_level=GroupLevel}.
+ Args#mrargs{group_level = GroupLevel}.
validate_limit(Name, Value, _Min, _Max) when not is_integer(Value) ->
mrverror(<<"`", Name/binary, "` should be an integer">>);
-
validate_limit(Name, Value, Min, Max) when Value > Max ->
range_error_msg(Name, Min, Max);
-
validate_limit(Name, Value, Min, Max) when Value < Min ->
range_error_msg(Name, Min, Max);
-
validate_limit(_Name, _Value, _Min, _Max) ->
ok.
@@ -341,34 +336,25 @@ range_error_msg(Name, Min, Max) ->
"]"
>>).
-
-determine_group_level(#mrargs{group=undefined, group_level=undefined}) ->
+determine_group_level(#mrargs{group = undefined, group_level = undefined}) ->
0;
-
-determine_group_level(#mrargs{group=false, group_level=undefined}) ->
+determine_group_level(#mrargs{group = false, group_level = undefined}) ->
0;
-
-determine_group_level(#mrargs{group=false, group_level=Level}) when Level > 0 ->
+determine_group_level(#mrargs{group = false, group_level = Level}) when Level > 0 ->
mrverror(<<"Can't specify group=false and group_level>0 at the same time">>);
-
-determine_group_level(#mrargs{group=true, group_level=undefined}) ->
+determine_group_level(#mrargs{group = true, group_level = undefined}) ->
exact;
-
-determine_group_level(#mrargs{group_level=GroupLevel}) ->
+determine_group_level(#mrargs{group_level = GroupLevel}) ->
GroupLevel.
-
mrverror(Mesg) ->
throw({query_parse_error, Mesg}).
-
is_paginated(#mrargs{page_size = PageSize}) when is_integer(PageSize) ->
true;
-
is_paginated(_) ->
false.
-
active_tasks_info(ChangesDone, DbName, DDocId, LastSeq, DBSeq) ->
#{
<<"type">> => <<"indexer">>,
@@ -381,7 +367,6 @@ active_tasks_info(ChangesDone, DbName, DDocId, LastSeq, DBSeq) ->
<<"pid">> => list_to_binary(pid_to_list(self()))
}.
-
maybe_disable_custom_reduce_funs(Views) ->
case config:get_boolean("couch_views", "custom_reduce_enabled", true) of
true ->
@@ -390,38 +375,40 @@ maybe_disable_custom_reduce_funs(Views) ->
disable_custom_reduce_funs(Views)
end.
-
disable_custom_reduce_funs(Views) ->
- lists:map(fun(View) ->
- #mrview{
- reduce_funs = ReduceFuns
- } = View,
- {Builtin, Custom} = lists:partition(fun({_Name, RedSrc}) ->
- case RedSrc of
- <<"_", _/binary>> -> true;
- <<_/binary>> -> false
- end
- end, ReduceFuns),
- DisabledCustom = [{Name, disabled} || {Name, _Src} <- Custom],
- View#mrview{
- reduce_funs = Builtin ++ DisabledCustom
- }
- end, Views).
-
+ lists:map(
+ fun(View) ->
+ #mrview{
+ reduce_funs = ReduceFuns
+ } = View,
+ {Builtin, Custom} = lists:partition(
+ fun({_Name, RedSrc}) ->
+ case RedSrc of
+ <<"_", _/binary>> -> true;
+ <<_/binary>> -> false
+ end
+ end,
+ ReduceFuns
+ ),
+ DisabledCustom = [{Name, disabled} || {Name, _Src} <- Custom],
+ View#mrview{
+ reduce_funs = Builtin ++ DisabledCustom
+ }
+ end,
+ Views
+ ).
convert_seq_to_stamp(<<"0">>) ->
<<"0-0-0">>;
-
convert_seq_to_stamp(undefined) ->
<<"0-0-0">>;
-
convert_seq_to_stamp(Seq) ->
{_, Stamp, Batch, DocNumber} = fabric2_fdb:seq_to_vs(Seq),
- VS = integer_to_list(Stamp) ++ "-" ++ integer_to_list(Batch) ++ "-"
- ++ integer_to_list(DocNumber),
+ VS =
+ integer_to_list(Stamp) ++ "-" ++ integer_to_list(Batch) ++ "-" ++
+ integer_to_list(DocNumber),
list_to_binary(VS).
-
get_view_queries({Props}) ->
case couch_util:get_value(<<"queries">>, Props) of
undefined ->
@@ -432,7 +419,6 @@ get_view_queries({Props}) ->
throw({bad_request, "`queries` member must be an array."})
end.
-
get_view_keys({Props}) ->
case couch_util:get_value(<<"keys">>, Props) of
undefined ->
@@ -443,30 +429,24 @@ get_view_keys({Props}) ->
throw({bad_request, "`keys` member must be an array."})
end.
-
sort_lib({Lib}) ->
sort_lib(Lib, []).
sort_lib([], LAcc) ->
lists:keysort(1, LAcc);
-
-sort_lib([{LName, {LObj}}|Rest], LAcc) ->
- LSorted = sort_lib(LObj, []), % descend into nested object
- sort_lib(Rest, [{LName, LSorted}|LAcc]);
-
-sort_lib([{LName, LCode}|Rest], LAcc) ->
- sort_lib(Rest, [{LName, LCode}|LAcc]).
-
+sort_lib([{LName, {LObj}} | Rest], LAcc) ->
+ % descend into nested object
+ LSorted = sort_lib(LObj, []),
+ sort_lib(Rest, [{LName, LSorted} | LAcc]);
+sort_lib([{LName, LCode} | Rest], LAcc) ->
+ sort_lib(Rest, [{LName, LCode} | LAcc]).
index_of(Key, List) ->
index_of(Key, List, 1).
-
index_of(_, [], _) ->
throw({error, missing_named_view});
-
index_of(Key, [Key | _], Idx) ->
Idx;
-
index_of(Key, [_ | Rest], Idx) ->
- index_of(Key, Rest, Idx+1).
+ index_of(Key, Rest, Idx + 1).
diff --git a/src/couch_views/src/couch_views_validate.erl b/src/couch_views/src/couch_views_validate.erl
index 558f65d1b..ab1f8cf01 100644
--- a/src/couch_views/src/couch_views_validate.erl
+++ b/src/couch_views/src/couch_views_validate.erl
@@ -12,22 +12,18 @@
-module(couch_views_validate).
-
-export([
validate_args/1,
validate_args/3,
validate_ddoc/2
]).
-
-define(LOWEST_KEY, null).
-define(HIGHEST_KEY, {<<255, 255, 255, 255>>}).
-
-include_lib("couch/include/couch_db.hrl").
-include("couch_views.hrl").
-
% There is another almost identical validate_args in couch_views_util. They
% should probably be merged at some point in the future.
%
@@ -45,11 +41,13 @@ validate_args(Args) ->
end,
case {Args#mrargs.view_type, GroupLevel, Args#mrargs.keys} of
- {red, exact, _} -> ok;
+ {red, exact, _} ->
+ ok;
{red, _, KeyList} when is_list(KeyList) ->
Msg = <<"Multi-key fetches for reduce views must use `group=true`">>,
mrverror(Msg);
- _ -> ok
+ _ ->
+ ok
end,
case Args#mrargs.keys of
@@ -58,13 +56,18 @@ validate_args(Args) ->
_ -> mrverror(<<"`keys` must be an array of strings.">>)
end,
- case {Args#mrargs.keys, Args#mrargs.start_key,
- Args#mrargs.end_key} of
- {undefined, _, _} -> ok;
- {[], _, _} -> ok;
- {[_|_], undefined, undefined} -> ok;
- _ -> mrverror(<<"`keys` is incompatible with `key`"
- ", `start_key` and `end_key`">>)
+ case {Args#mrargs.keys, Args#mrargs.start_key, Args#mrargs.end_key} of
+ {undefined, _, _} ->
+ ok;
+ {[], _, _} ->
+ ok;
+ {[_ | _], undefined, undefined} ->
+ ok;
+ _ ->
+ mrverror(<<
+ "`keys` is incompatible with `key`"
+ ", `start_key` and `end_key`"
+ >>)
end,
case Args#mrargs.start_key_docid of
@@ -136,17 +139,19 @@ validate_args(Args) ->
{red, _} -> mrverror(<<"`conflicts` is invalid for reduce views.">>)
end,
- SKDocId = case {Args#mrargs.direction, Args#mrargs.start_key_docid} of
- {fwd, undefined} -> <<>>;
- {rev, undefined} -> <<255>>;
- {_, SKDocId1} -> SKDocId1
- end,
+ SKDocId =
+ case {Args#mrargs.direction, Args#mrargs.start_key_docid} of
+ {fwd, undefined} -> <<>>;
+ {rev, undefined} -> <<255>>;
+ {_, SKDocId1} -> SKDocId1
+ end,
- EKDocId = case {Args#mrargs.direction, Args#mrargs.end_key_docid} of
- {fwd, undefined} -> <<255>>;
- {rev, undefined} -> <<>>;
- {_, EKDocId1} -> EKDocId1
- end,
+ EKDocId =
+ case {Args#mrargs.direction, Args#mrargs.end_key_docid} of
+ {fwd, undefined} -> <<255>>;
+ {rev, undefined} -> <<>>;
+ {_, EKDocId1} -> EKDocId1
+ end,
case is_boolean(Args#mrargs.sorted) of
true -> ok;
@@ -154,62 +159,62 @@ validate_args(Args) ->
end,
Args#mrargs{
- start_key_docid=SKDocId,
- end_key_docid=EKDocId,
- group_level=GroupLevel
+ start_key_docid = SKDocId,
+ end_key_docid = EKDocId,
+ group_level = GroupLevel
}.
-
validate_args(Db, DDoc, Args0) ->
{ok, State} = couch_views_util:ddoc_to_mrst(fabric2_db:name(Db), DDoc),
Args1 = apply_limit(State#mrst.partitioned, Args0),
validate_args(State, Args1).
-
validate_ddoc(#{} = Db, DDoc) ->
DbName = fabric2_db:name(Db),
IsPartitioned = fabric2_db:is_partitioned(Db),
validate_ddoc(DbName, IsPartitioned, DDoc).
-
% Private functions
-validate_ddoc(DbName, _IsDbPartitioned, DDoc) ->
+validate_ddoc(DbName, _IsDbPartitioned, DDoc) ->
ok = validate_ddoc_fields(DDoc#doc.body),
GetName = fun
(#mrview{map_names = [Name | _]}) -> Name;
(#mrview{reduce_funs = [{Name, _} | _]}) -> Name;
(_) -> null
end,
- ValidateView = fun(Ctx, #mrview{def=MapSrc, reduce_funs=Reds}=View) ->
+ ValidateView = fun(Ctx, #mrview{def = MapSrc, reduce_funs = Reds} = View) ->
couch_eval:try_compile(Ctx, map, GetName(View), MapSrc),
- lists:foreach(fun
- ({_RedName, <<"_sum", _/binary>>}) ->
- ok;
- ({_RedName, <<"_count", _/binary>>}) ->
- ok;
- ({_RedName, <<"_stats", _/binary>>}) ->
- ok;
- ({_RedName, <<"_approx_count_distinct", _/binary>>}) ->
- ok;
- ({_RedName, <<"_", _/binary>> = Bad}) ->
- Msg = ["`", Bad, "` is not a supported reduce function."],
- throw({invalid_design_doc, Msg});
- ({RedName, RedSrc}) ->
- couch_eval:try_compile(Ctx, reduce, RedName, RedSrc)
- end, Reds)
+ lists:foreach(
+ fun
+ ({_RedName, <<"_sum", _/binary>>}) ->
+ ok;
+ ({_RedName, <<"_count", _/binary>>}) ->
+ ok;
+ ({_RedName, <<"_stats", _/binary>>}) ->
+ ok;
+ ({_RedName, <<"_approx_count_distinct", _/binary>>}) ->
+ ok;
+ ({_RedName, <<"_", _/binary>> = Bad}) ->
+ Msg = ["`", Bad, "` is not a supported reduce function."],
+ throw({invalid_design_doc, Msg});
+ ({RedName, RedSrc}) ->
+ couch_eval:try_compile(Ctx, reduce, RedName, RedSrc)
+ end,
+ Reds
+ )
end,
{ok, #mrst{
language = Lang,
views = Views
}} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
- Views =/= [] andalso couch_eval:with_context(#{language => Lang}, fun (Ctx) ->
- lists:foreach(fun(V) -> ValidateView(Ctx, V) end, Views)
- end),
+ Views =/= [] andalso
+ couch_eval:with_context(#{language => Lang}, fun(Ctx) ->
+ lists:foreach(fun(V) -> ValidateView(Ctx, V) end, Views)
+ end),
ok.
-
validate_args(#mrst{} = State, Args0) ->
Args = validate_args(Args0),
@@ -218,135 +223,146 @@ validate_args(#mrst{} = State, Args0) ->
case {ViewPartitioned, Partition} of
{true, undefined} ->
- Msg1 = <<"`partition` parameter is mandatory "
- "for queries to this view.">>,
+ Msg1 = <<
+ "`partition` parameter is mandatory "
+ "for queries to this view."
+ >>,
mrverror(Msg1);
{true, _} ->
apply_partition(Args, Partition);
{false, undefined} ->
Args;
{false, Value} when is_binary(Value) ->
- Msg2 = <<"`partition` parameter is not "
- "supported in this design doc">>,
+ Msg2 = <<
+ "`partition` parameter is not "
+ "supported in this design doc"
+ >>,
mrverror(Msg2)
end.
-
validate_ddoc_fields(DDoc) ->
MapFuncType = map_function_type(DDoc),
- lists:foreach(fun(Path) ->
- validate_ddoc_fields(DDoc, Path)
- end, [
- [{<<"filters">>, object}, {any, [object, string]}],
- [{<<"language">>, string}],
- [{<<"lists">>, object}, {any, [object, string]}],
- [{<<"options">>, object}],
- [{<<"options">>, object}, {<<"include_design">>, boolean}],
- [{<<"options">>, object}, {<<"local_seq">>, boolean}],
- [{<<"options">>, object}, {<<"partitioned">>, boolean}],
- [{<<"rewrites">>, [string, array]}],
- [{<<"shows">>, object}, {any, [object, string]}],
- [{<<"updates">>, object}, {any, [object, string]}],
- [{<<"validate_doc_update">>, string}],
- [{<<"views">>, object}, {<<"lib">>, object}],
- [{<<"views">>, object}, {any, object}, {<<"map">>, MapFuncType}],
- [{<<"views">>, object}, {any, object}, {<<"reduce">>, string}]
- ]),
+ lists:foreach(
+ fun(Path) ->
+ validate_ddoc_fields(DDoc, Path)
+ end,
+ [
+ [{<<"filters">>, object}, {any, [object, string]}],
+ [{<<"language">>, string}],
+ [{<<"lists">>, object}, {any, [object, string]}],
+ [{<<"options">>, object}],
+ [{<<"options">>, object}, {<<"include_design">>, boolean}],
+ [{<<"options">>, object}, {<<"local_seq">>, boolean}],
+ [{<<"options">>, object}, {<<"partitioned">>, boolean}],
+ [{<<"rewrites">>, [string, array]}],
+ [{<<"shows">>, object}, {any, [object, string]}],
+ [{<<"updates">>, object}, {any, [object, string]}],
+ [{<<"validate_doc_update">>, string}],
+ [{<<"views">>, object}, {<<"lib">>, object}],
+ [{<<"views">>, object}, {any, object}, {<<"map">>, MapFuncType}],
+ [{<<"views">>, object}, {any, object}, {<<"reduce">>, string}]
+ ]
+ ),
require_map_function_for_views(DDoc),
ok.
-
require_map_function_for_views({Props}) ->
case couch_util:get_value(<<"views">>, Props) of
- undefined -> ok;
+ undefined ->
+ ok;
{Views} ->
- lists:foreach(fun
- ({<<"lib">>, _}) -> ok;
- ({Key, {Value}}) ->
- case couch_util:get_value(<<"map">>, Value) of
- undefined -> throw({invalid_design_doc,
- <<"View `", Key/binary, "` must contain map function">>});
- _ -> ok
- end
- end, Views),
+ lists:foreach(
+ fun
+ ({<<"lib">>, _}) ->
+ ok;
+ ({Key, {Value}}) ->
+ case couch_util:get_value(<<"map">>, Value) of
+ undefined ->
+ throw(
+ {invalid_design_doc,
+ <<"View `", Key/binary, "` must contain map function">>}
+ );
+ _ ->
+ ok
+ end
+ end,
+ Views
+ ),
ok
end.
-
validate_ddoc_fields(DDoc, Path) ->
case validate_ddoc_fields(DDoc, Path, []) of
- ok -> ok;
+ ok ->
+ ok;
{error, {FailedPath0, Type0}} ->
FailedPath = iolist_to_binary(join(FailedPath0, <<".">>)),
Type = format_type(Type0),
- throw({invalid_design_doc,
- <<"`", FailedPath/binary, "` field must have ",
- Type/binary, " type">>})
+ throw(
+ {invalid_design_doc,
+ <<"`", FailedPath/binary, "` field must have ", Type/binary, " type">>}
+ )
end.
validate_ddoc_fields(undefined, _, _) ->
ok;
-
validate_ddoc_fields(_, [], _) ->
ok;
-
-validate_ddoc_fields({KVS}=Props, [{any, Type} | Rest], Acc) ->
- lists:foldl(fun
- ({Key, _}, ok) ->
- validate_ddoc_fields(Props, [{Key, Type} | Rest], Acc);
- ({_, _}, {error, _}=Error) ->
- Error
- end, ok, KVS);
-
-validate_ddoc_fields({KVS}=Props, [{Key, Type} | Rest], Acc) ->
+validate_ddoc_fields({KVS} = Props, [{any, Type} | Rest], Acc) ->
+ lists:foldl(
+ fun
+ ({Key, _}, ok) ->
+ validate_ddoc_fields(Props, [{Key, Type} | Rest], Acc);
+ ({_, _}, {error, _} = Error) ->
+ Error
+ end,
+ ok,
+ KVS
+ );
+validate_ddoc_fields({KVS} = Props, [{Key, Type} | Rest], Acc) ->
case validate_ddoc_field(Props, {Key, Type}) of
ok ->
- validate_ddoc_fields(couch_util:get_value(Key, KVS),
- Rest,
- [Key | Acc]);
+ validate_ddoc_fields(
+ couch_util:get_value(Key, KVS),
+ Rest,
+ [Key | Acc]
+ );
error ->
{error, {[Key | Acc], Type}};
{error, Key1} ->
{error, {[Key1 | Acc], Type}}
end.
-
validate_ddoc_field(undefined, Type) when is_atom(Type) ->
ok;
-
validate_ddoc_field(_, any) ->
ok;
-
validate_ddoc_field(Value, Types) when is_list(Types) ->
- lists:foldl(fun
- (_, ok) -> ok;
- (Type, _) -> validate_ddoc_field(Value, Type)
- end, error, Types);
+ lists:foldl(
+ fun
+ (_, ok) -> ok;
+ (Type, _) -> validate_ddoc_field(Value, Type)
+ end,
+ error,
+ Types
+ );
validate_ddoc_field(Value, string) when is_binary(Value) ->
ok;
-
validate_ddoc_field(Value, array) when is_list(Value) ->
ok;
-
validate_ddoc_field({Value}, object) when is_list(Value) ->
ok;
-
validate_ddoc_field(Value, boolean) when is_boolean(Value) ->
ok;
-
validate_ddoc_field({Props}, {any, Type}) ->
validate_ddoc_field1(Props, Type);
-
validate_ddoc_field({Props}, {Key, Type}) ->
validate_ddoc_field(couch_util:get_value(Key, Props), Type);
-
validate_ddoc_field(_, _) ->
error.
-
validate_ddoc_field1([], _) ->
ok;
-
validate_ddoc_field1([{Key, Value} | Rest], Type) ->
case validate_ddoc_field(Value, Type) of
ok ->
@@ -355,61 +371,47 @@ validate_ddoc_field1([{Key, Value} | Rest], Type) ->
{error, Key}
end.
-
map_function_type({Props}) ->
case couch_util:get_value(<<"language">>, Props) of
<<"query">> -> object;
_ -> string
end.
-
format_type(Type) when is_atom(Type) ->
?l2b(atom_to_list(Type));
-
format_type(Types) when is_list(Types) ->
iolist_to_binary(join(lists:map(fun atom_to_list/1, Types), <<" or ">>)).
-
join(L, Sep) ->
join(L, Sep, []).
-
-join([H|[]], _, Acc) ->
+join([H | []], _, Acc) ->
[H | Acc];
-
-join([H|T], Sep, Acc) ->
+join([H | T], Sep, Acc) ->
join(T, Sep, [Sep, H | Acc]).
-
-determine_group_level(#mrargs{group=undefined, group_level=undefined}) ->
+determine_group_level(#mrargs{group = undefined, group_level = undefined}) ->
0;
-
-determine_group_level(#mrargs{group=false, group_level=undefined}) ->
+determine_group_level(#mrargs{group = false, group_level = undefined}) ->
0;
-
-determine_group_level(#mrargs{group=false, group_level=Level}) when Level > 0 ->
+determine_group_level(#mrargs{group = false, group_level = Level}) when Level > 0 ->
mrverror(<<"Can't specify group=false and group_level>0 at the same time">>);
-
-determine_group_level(#mrargs{group=true, group_level=undefined}) ->
+determine_group_level(#mrargs{group = true, group_level = undefined}) ->
exact;
-
-determine_group_level(#mrargs{group_level=GroupLevel}) ->
+determine_group_level(#mrargs{group_level = GroupLevel}) ->
GroupLevel.
-
mrverror(Mesg) ->
throw({query_parse_error, Mesg}).
-
-apply_partition(#mrargs{keys=[{p, _, _} | _]} = Args, _Partition) ->
- Args; % already applied
-
-apply_partition(#mrargs{keys=Keys} = Args, Partition) when Keys /= undefined ->
- Args#mrargs{keys=[{p, Partition, K} || K <- Keys]};
-
-apply_partition(#mrargs{start_key={p, _, _}, end_key={p, _, _}} = Args, _Partition) ->
- Args; % already applied.
-
+apply_partition(#mrargs{keys = [{p, _, _} | _]} = Args, _Partition) ->
+ % already applied
+ Args;
+apply_partition(#mrargs{keys = Keys} = Args, Partition) when Keys /= undefined ->
+ Args#mrargs{keys = [{p, Partition, K} || K <- Keys]};
+apply_partition(#mrargs{start_key = {p, _, _}, end_key = {p, _, _}} = Args, _Partition) ->
+ % already applied.
+ Args;
apply_partition(Args, Partition) ->
#mrargs{
direction = Dir,
@@ -417,44 +419,59 @@ apply_partition(Args, Partition) ->
end_key = EndKey
} = Args,
- {DefSK, DefEK} = case Dir of
- fwd -> {?LOWEST_KEY, ?HIGHEST_KEY};
- rev -> {?HIGHEST_KEY, ?LOWEST_KEY}
- end,
-
- SK0 = if StartKey /= undefined -> StartKey; true -> DefSK end,
- EK0 = if EndKey /= undefined -> EndKey; true -> DefEK end,
+ {DefSK, DefEK} =
+ case Dir of
+ fwd -> {?LOWEST_KEY, ?HIGHEST_KEY};
+ rev -> {?HIGHEST_KEY, ?LOWEST_KEY}
+ end,
+
+ SK0 =
+ if
+ StartKey /= undefined -> StartKey;
+ true -> DefSK
+ end,
+ EK0 =
+ if
+ EndKey /= undefined -> EndKey;
+ true -> DefEK
+ end,
Args#mrargs{
start_key = {p, Partition, SK0},
end_key = {p, Partition, EK0}
}.
-
get_extra(#mrargs{} = Args, Key) ->
couch_util:get_value(Key, Args#mrargs.extra).
-
apply_limit(ViewPartitioned, Args) ->
Options = Args#mrargs.extra,
IgnorePQLimit = lists:keyfind(ignore_partition_query_limit, 1, Options),
- LimitType = case {ViewPartitioned, IgnorePQLimit} of
- {true, false} -> "partition_query_limit";
- {true, _} -> "query_limit";
- {false, _} -> "query_limit"
- end,
-
- MaxLimit = config:get_integer("query_server_config",
- LimitType, ?MAX_VIEW_LIMIT),
+ LimitType =
+ case {ViewPartitioned, IgnorePQLimit} of
+ {true, false} -> "partition_query_limit";
+ {true, _} -> "query_limit";
+ {false, _} -> "query_limit"
+ end,
+
+ MaxLimit = config:get_integer(
+ "query_server_config",
+ LimitType,
+ ?MAX_VIEW_LIMIT
+ ),
% Set the highest limit possible if a user has not
% specified a limit
- Args1 = case Args#mrargs.limit == ?MAX_VIEW_LIMIT of
- true -> Args#mrargs{limit = MaxLimit};
- false -> Args
- end,
-
- if Args1#mrargs.limit =< MaxLimit -> Args1; true ->
- Fmt = "Limit is too large, must not exceed ~p",
- mrverror(io_lib:format(Fmt, [MaxLimit]))
+ Args1 =
+ case Args#mrargs.limit == ?MAX_VIEW_LIMIT of
+ true -> Args#mrargs{limit = MaxLimit};
+ false -> Args
+ end,
+
+ if
+ Args1#mrargs.limit =< MaxLimit ->
+ Args1;
+ true ->
+ Fmt = "Limit is too large, must not exceed ~p",
+ mrverror(io_lib:format(Fmt, [MaxLimit]))
end.
diff --git a/src/ctrace/src/ctrace.erl b/src/ctrace/src/ctrace.erl
index 2821352bf..36bfd2fed 100644
--- a/src/ctrace/src/ctrace.erl
+++ b/src/ctrace/src/ctrace.erl
@@ -40,36 +40,33 @@
match/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("passage/include/opentracing.hrl").
-include("ctrace.hrl").
-include_lib("kernel/include/logger.hrl").
-
--type operation()
- :: atom()
+-type operation() ::
+ atom()
| fun().
--type tags()
- :: #{atom() => term()}.
+-type tags() ::
+ #{atom() => term()}.
--type log_fields()
- :: #{atom() => term()}.
+-type log_fields() ::
+ #{atom() => term()}.
--type start_span_options()
- :: [start_span_option()].
+-type start_span_options() ::
+ [start_span_option()].
--type start_span_option()
- :: {time, erlang:timespan()}
+-type start_span_option() ::
+ {time, erlang:timespan()}
| {tags, tags()}.
--type finish_span_options()
- :: [finish_span_option()].
-
--type finish_span_option()
- :: {time, erlang:timespan()}.
+-type finish_span_options() ::
+ [finish_span_option()].
+-type finish_span_option() ::
+ {time, erlang:timespan()}.
-spec is_enabled() -> boolean().
@@ -83,55 +80,57 @@ is_enabled() ->
IsEnabled
end.
-
%% @equiv with_span(Operation, [], Fun)
-spec with_span(
- Operation :: operation(),
- Fun
- ) -> Result when
- Fun :: fun (() -> Result),
- Result :: term().
+ Operation :: operation(),
+ Fun
+) -> Result when
+ Fun :: fun(() -> Result),
+ Result :: term().
with_span(Operation, Fun) ->
with_span(Operation, #{}, Fun).
-spec with_span(
- Operation :: operation(),
- TagsOrOptions :: tags() | start_span_options(),
- Fun
- ) -> Result when
- Fun :: fun (() -> Result),
- Result :: term().
+ Operation :: operation(),
+ TagsOrOptions :: tags() | start_span_options(),
+ Fun
+) -> Result when
+ Fun :: fun(() -> Result),
+ Result :: term().
with_span(Operation, ExtraTags, Fun) when is_map(ExtraTags) ->
with_span(Operation, [{tags, ExtraTags}], Fun);
-
-with_span(Operation, Options, Fun) ->
+with_span(Operation, Options, Fun) ->
try
start_span(Operation, Options),
Fun()
- catch Type:Reason:Stack ->
- log(#{
- ?LOG_FIELD_ERROR_KIND => Type,
- ?LOG_FIELD_MESSAGE => Reason,
- ?LOG_FIELD_STACK => Stack
- }, [error]),
- erlang:raise(Type, Reason, Stack)
+ catch
+ Type:Reason:Stack ->
+ log(
+ #{
+ ?LOG_FIELD_ERROR_KIND => Type,
+ ?LOG_FIELD_MESSAGE => Reason,
+ ?LOG_FIELD_STACK => Stack
+ },
+ [error]
+ ),
+ erlang:raise(Type, Reason, Stack)
after
finish_span()
end.
-spec start_span(
- Operation :: operation()
- ) -> ok.
+ Operation :: operation()
+) -> ok.
start_span(Operation) ->
start_span(Operation, []).
-spec start_span(
- Operation :: operation(),
- Options :: start_span_options()
- ) -> ok.
+ Operation :: operation(),
+ Options :: start_span_options()
+) -> ok.
start_span(Operation, Options) ->
case is_enabled() of
@@ -143,18 +142,18 @@ start_span(Operation, Options) ->
do_start_span(Fun, Options) when is_function(Fun) ->
start_span(fun_to_op(Fun), Options);
-
do_start_span(OperationName, Options0) ->
Options1 = add_time(Options0),
case passage_pd:current_span() of
undefined ->
put(?ORIGIN_KEY, atom_to_binary(OperationName, utf8)),
- Tags = case lists:keyfind(tags, 1, Options0) of
- {tags, T} ->
- T;
- false ->
- #{}
- end,
+ Tags =
+ case lists:keyfind(tags, 1, Options0) of
+ {tags, T} ->
+ T;
+ false ->
+ #{}
+ end,
case match(OperationName, Tags) of
true ->
Options = [
@@ -178,23 +177,23 @@ finish_span() ->
finish_span([]).
-spec finish_span(
- Options :: finish_span_options()
- ) -> ok.
+ Options :: finish_span_options()
+) -> ok.
finish_span(Options0) ->
Options = add_time(Options0),
passage_pd:finish_span(Options).
-spec tag(
- Tags :: tags()
- ) -> ok.
+ Tags :: tags()
+) -> ok.
tag(Tags) ->
passage_pd:set_tags(Tags).
-spec log(
- Fields :: log_fields() | fun (() -> log_fields())
- ) -> ok.
+ Fields :: log_fields() | fun(() -> log_fields())
+) -> ok.
log(FieldsOrFun) ->
log(FieldsOrFun, []).
@@ -280,10 +279,10 @@ context() ->
end.
-spec external_span(
- TraceId :: passage:trace_id(),
- SpanId :: undefined | passage:span_id(),
- ParentSpanId :: undefined | passage:span_id()
- ) -> passage:maybe_span().
+ TraceId :: passage:trace_id(),
+ SpanId :: undefined | passage:span_id(),
+ ParentSpanId :: undefined | passage:span_id()
+) -> passage:maybe_span().
external_span(TraceId, undefined, ParentSpanId) ->
external_span(TraceId, rand:uniform(16#FFFFFFFFFFFFFFFF), ParentSpanId);
@@ -303,7 +302,6 @@ external_span(TraceId, SpanId, ParentSpanId) ->
State = {ok, <<"binary">>, Binary, error},
passage:extract_span(?MAIN_TRACER, binary, IterFun, State).
-
match(OperationId, Tags) ->
OpMod = ctrace_config:filter_module_name(OperationId),
case erlang:function_exported(OpMod, match, 1) of
@@ -317,7 +315,6 @@ match(OperationId, Tags) ->
end
end.
-
do_match(Mod, Tags) ->
case Mod:match(Tags) of
true ->
@@ -328,7 +325,6 @@ do_match(Mod, Tags) ->
rand:uniform() =< Rate
end.
-
add_tags(Options, ExtraTags) ->
case lists:keytake(tags, 1, Options) of
{value, {tags, T}, Opts} ->
@@ -354,8 +350,8 @@ maybe_start_root(Options) ->
end.
fun_to_op(Fun) ->
- {module, M} = erlang:fun_info(Fun, module),
- {name, F} = erlang:fun_info(Fun, name),
- {arity, A} = erlang:fun_info(Fun, arity),
- Str = io_lib:format("~s:~s/~b", [M, F, A]),
- list_to_atom(lists:flatten(Str)).
+ {module, M} = erlang:fun_info(Fun, module),
+ {name, F} = erlang:fun_info(Fun, name),
+ {arity, A} = erlang:fun_info(Fun, arity),
+ Str = io_lib:format("~s:~s/~b", [M, F, A]),
+ list_to_atom(lists:flatten(Str)).
diff --git a/src/ctrace/src/ctrace_config.erl b/src/ctrace/src/ctrace_config.erl
index e9141d21a..011d9504e 100644
--- a/src/ctrace/src/ctrace_config.erl
+++ b/src/ctrace/src/ctrace_config.erl
@@ -31,12 +31,10 @@
-include("ctrace.hrl").
-include_lib("kernel/include/logger.hrl").
-
-spec is_enabled() -> boolean().
is_enabled() ->
config:get_boolean("tracing", "enabled", false).
-
-spec update() -> ok.
update() ->
case is_enabled() of
@@ -45,37 +43,41 @@ update() ->
CompiledFilters = get_compiled_filters(),
- RemovedFilters = lists:foldl(fun({OperationId, FilterDef}, Acc) ->
- case compile_filter(OperationId, FilterDef) of
- true -> Acc -- [OperationId];
- false -> Acc
- end
- end, CompiledFilters, config:get("tracing.filters")),
-
- lists:foreach(fun(OperationId) ->
- ModName = filter_module_name(OperationId),
- code:delete(ModName),
- code:purge(ModName)
- end, RemovedFilters),
+ RemovedFilters = lists:foldl(
+ fun({OperationId, FilterDef}, Acc) ->
+ case compile_filter(OperationId, FilterDef) of
+ true -> Acc -- [OperationId];
+ false -> Acc
+ end
+ end,
+ CompiledFilters,
+ config:get("tracing.filters")
+ ),
+
+ lists:foreach(
+ fun(OperationId) ->
+ ModName = filter_module_name(OperationId),
+ code:delete(ModName),
+ code:purge(ModName)
+ end,
+ RemovedFilters
+ ),
case config:get("tracing.filters", "all") of
undefined -> compile_filter("all", "(#{}) -> false");
_ -> ok
end;
-
false ->
jaeger_passage:stop_tracer(?MAIN_TRACER)
end,
ok.
-
-spec filter_module_name(atom() | string()) -> atom().
filter_module_name(OperationId) when is_atom(OperationId) ->
filter_module_name(atom_to_list(OperationId));
filter_module_name(OperationId) ->
list_to_atom("ctrace_filter_" ++ OperationId).
-
handle_config_change("tracing", "enabled", _, _Persist, St) ->
update(),
{ok, St};
@@ -88,7 +90,6 @@ handle_config_change(_Sec, _Key, _Val, _Persist, St) ->
handle_config_terminate(_Server, _Reason, _State) ->
update().
-
maybe_start_main_tracer(TracerId) ->
case passage_tracer_registry:get_reporter(TracerId) of
error ->
@@ -97,35 +98,35 @@ maybe_start_main_tracer(TracerId) ->
true
end.
-
start_main_tracer(TracerId) ->
MaxQueueLen = config:get_integer("tracing", "max_queue_len", 1024),
Sampler = jaeger_passage_sampler_queue_limit:new(
- passage_sampler_all:new(), TracerId, MaxQueueLen),
+ passage_sampler_all:new(), TracerId, MaxQueueLen
+ ),
ServiceName = list_to_atom(config:get("tracing", "app_name", "couchdb")),
- ProtocolOptions = case config:get("tracing", "protocol", "udp") of
- "udp" ->
- [
- {thrift_format, list_to_atom(
- config:get("tracing", "thrift_format", "compact"))},
- {agent_host,
- config:get("tracing", "agent_host", "127.0.0.1")},
- {agent_port,
- config:get_integer("tracing", "agent_port", 6831)},
- {protocol, udp},
- {default_service_name, ServiceName}
- ];
- "http" ++ _ ->
- [
- {endpoint,
- config:get("tracing", "endpoint", "http://127.0.0.1:14268")},
- {protocol, http},
- {http_client, fun http_client/5},
- {default_service_name, ServiceName}
- ]
- end,
- Options = [{default_service_name, ServiceName}|ProtocolOptions],
+ ProtocolOptions =
+ case config:get("tracing", "protocol", "udp") of
+ "udp" ->
+ [
+ {thrift_format,
+ list_to_atom(
+ config:get("tracing", "thrift_format", "compact")
+ )},
+ {agent_host, config:get("tracing", "agent_host", "127.0.0.1")},
+ {agent_port, config:get_integer("tracing", "agent_port", 6831)},
+ {protocol, udp},
+ {default_service_name, ServiceName}
+ ];
+ "http" ++ _ ->
+ [
+ {endpoint, config:get("tracing", "endpoint", "http://127.0.0.1:14268")},
+ {protocol, http},
+ {http_client, fun http_client/5},
+ {default_service_name, ServiceName}
+ ]
+ end,
+ Options = [{default_service_name, ServiceName} | ProtocolOptions],
ok = jaeger_passage:start_tracer(TracerId, Sampler, Options).
http_client(Endpoint, Method, Headers, Body, _ReporterOptions) ->
@@ -137,20 +138,24 @@ compile_filter(OperationId, FilterDef) ->
couch_log:info("Compiling filter : ~s", [OperationId]),
ctrace_dsl:compile(OperationId, FilterDef),
true
- catch throw:{error, Reason} ->
- ?LOG_ERROR(#{what => compile_filter, id => OperationId, details => Reason}),
- couch_log:error("Cannot compile ~s :: ~s~n", [OperationId, Reason]),
- false
+ catch
+ throw:{error, Reason} ->
+ ?LOG_ERROR(#{what => compile_filter, id => OperationId, details => Reason}),
+ couch_log:error("Cannot compile ~s :: ~s~n", [OperationId, Reason]),
+ false
end.
-
get_compiled_filters() ->
- lists:foldl(fun({Mod, _Path}, Acc) ->
- ModStr = atom_to_list(Mod),
- case ModStr of
- "ctrace_filter_" ++ OpName ->
- [OpName | Acc];
- _ ->
- Acc
- end
- end, [], code:all_loaded()).
+ lists:foldl(
+ fun({Mod, _Path}, Acc) ->
+ ModStr = atom_to_list(Mod),
+ case ModStr of
+ "ctrace_filter_" ++ OpName ->
+ [OpName | Acc];
+ _ ->
+ Acc
+ end
+ end,
+ [],
+ code:all_loaded()
+ ).
diff --git a/src/ctrace/src/ctrace_dsl.erl b/src/ctrace/src/ctrace_dsl.erl
index 5e0b0f252..a62985dbd 100644
--- a/src/ctrace/src/ctrace_dsl.erl
+++ b/src/ctrace/src/ctrace_dsl.erl
@@ -20,33 +20,30 @@
source/2
]).
-
-type ast() :: erl_syntax:syntaxTree().
-
-spec compile(OperationId :: string(), FilterDef :: string()) -> ok.
compile(OperationId, FilterDef) ->
AST = parse_filter(OperationId, FilterDef),
merl:compile_and_load(AST),
ok.
-
-spec source(OperationId :: string(), FilterDef :: string()) -> string().
source(OperationId, FilterDef) ->
AST = parse_filter(OperationId, FilterDef),
Options = [{paper, 160}, {ribbon, 80}],
erl_prettypr:format(erl_syntax:form_list(AST), Options).
-
-spec parse_filter(OperationId :: string(), FilterDef :: string()) -> [ast()].
parse_filter(OperationId, FilterDef) ->
AST = merl:quote("match" ++ FilterDef ++ "."),
case AST of
- ?Q("match(_@Args) when _@__@Guard -> _@Return.")
- when erl_syntax:type(Args) == map_expr ->
- validate_args(Args),
- validate_return(Return),
- generate(OperationId, Args, Guard, Return);
+ ?Q("match(_@Args) when _@__@Guard -> _@Return.") when
+ erl_syntax:type(Args) == map_expr
+ ->
+ validate_args(Args),
+ validate_return(Return),
+ generate(OperationId, Args, Guard, Return);
?Q("match(_@Args) when _@__@Guard -> _@@_.") ->
fail("The only argument of the filter should be map");
?Q("match(_@@Args) when _@__@Guard -> _@@_.") ->
@@ -55,27 +52,29 @@ parse_filter(OperationId, FilterDef) ->
fail("Unknown shape of a filter function")
end.
-
-spec validate_args(MapAST :: ast()) -> ok.
validate_args(MapAST) ->
%% Unfortunatelly merl doesn't seem to support maps
%% so we had to do it manually
- lists:foldl(fun(AST, Bindings) ->
- erl_syntax:type(AST) == map_field_exact
- orelse fail("Only #{field := Var} syntax is supported in the header"),
- NameAST = erl_syntax:map_field_exact_name(AST),
- erl_syntax:type(NameAST) == atom
- orelse fail("Only atoms are supported as field names in the header"),
- Name = erl_syntax:atom_value(NameAST),
- VarAST = erl_syntax:map_field_exact_value(AST),
- erl_syntax:type(VarAST) == variable
- orelse fail("Only capitalized names are supported as matching variables in the header"),
- Var = erl_syntax:variable_name(VarAST),
- maps:is_key(Var, Bindings)
- andalso fail("'~s' variable is already in use", [Var]),
- Bindings#{Var => Name}
- end, #{}, erl_syntax:map_expr_fields(MapAST)).
-
+ lists:foldl(
+ fun(AST, Bindings) ->
+ erl_syntax:type(AST) == map_field_exact orelse
+ fail("Only #{field := Var} syntax is supported in the header"),
+ NameAST = erl_syntax:map_field_exact_name(AST),
+ erl_syntax:type(NameAST) == atom orelse
+ fail("Only atoms are supported as field names in the header"),
+ Name = erl_syntax:atom_value(NameAST),
+ VarAST = erl_syntax:map_field_exact_value(AST),
+ erl_syntax:type(VarAST) == variable orelse
+ fail("Only capitalized names are supported as matching variables in the header"),
+ Var = erl_syntax:variable_name(VarAST),
+ maps:is_key(Var, Bindings) andalso
+ fail("'~s' variable is already in use", [Var]),
+ Bindings#{Var => Name}
+ end,
+ #{},
+ erl_syntax:map_expr_fields(MapAST)
+ ).
-spec validate_return(Return :: [ast()]) -> ok.
validate_return(Return) ->
@@ -83,11 +82,9 @@ validate_return(Return) ->
?Q("true") -> ok;
?Q("false") -> ok;
?Q("_@AST") when erl_syntax:type(AST) == float -> ok;
- _ ->
- fail("Unsupported return value '~s'", [erl_prettypr:format(Return)])
+ _ -> fail("Unsupported return value '~s'", [erl_prettypr:format(Return)])
end.
-
generate(OperationId, Args, Guard, Return) ->
ModuleName = ctrace_config:filter_module_name(OperationId),
Module = ?Q("-module('@ModuleName@')."),
@@ -98,9 +95,8 @@ generate(OperationId, Args, Guard, Return) ->
]),
lists:flatten([Module, Export, Function]).
-
fail(Msg) ->
throw({error, Msg}).
fail(Msg, Args) ->
- throw({error, lists:flatten(io_lib:format(Msg, Args))}). \ No newline at end of file
+ throw({error, lists:flatten(io_lib:format(Msg, Args))}).
diff --git a/src/ctrace/src/ctrace_sup.erl b/src/ctrace/src/ctrace_sup.erl
index 70de3c586..013a1fba3 100644
--- a/src/ctrace/src/ctrace_sup.erl
+++ b/src/ctrace/src/ctrace_sup.erl
@@ -38,4 +38,4 @@ init([]) ->
start => {config_listener_mon, start_link, [ctrace_config, nil]}
}
],
- {ok, {Flags, Children}}. \ No newline at end of file
+ {ok, {Flags, Children}}.
diff --git a/src/ebtree/src/ebtree.erl b/src/ebtree/src/ebtree.erl
index a62074ca0..43d68d002 100644
--- a/src/ebtree/src/ebtree.erl
+++ b/src/ebtree/src/ebtree.erl
@@ -13,25 +13,25 @@
-module(ebtree).
-export([
- open/3,
- open/4,
- min/0,
- max/0,
- insert/4,
- insert_multi/3,
- delete/3,
- lookup/3,
- lookup_multi/3,
- range/6,
- reverse_range/6,
- fold/4,
- fold/5,
- reduce/4,
- reduce/5,
- full_reduce/2,
- group_reduce/7,
- group_reduce/8,
- validate_tree/2
+ open/3,
+ open/4,
+ min/0,
+ max/0,
+ insert/4,
+ insert_multi/3,
+ delete/3,
+ lookup/3,
+ lookup_multi/3,
+ range/6,
+ reverse_range/6,
+ fold/4,
+ fold/5,
+ reduce/4,
+ reduce/5,
+ full_reduce/2,
+ group_reduce/7,
+ group_reduce/8,
+ validate_tree/2
]).
-record(node, {
@@ -39,7 +39,8 @@
level = 0,
prev,
next,
- members = [] %% [{Key0, Value0} | {FirstKey0, LastKey0, Pointer0, Reduction0}, ...]
+ %% [{Key0, Value0} | {FirstKey0, LastKey0, Pointer0, Reduction0}, ...]
+ members = []
}).
-record(tree, {
@@ -72,13 +73,11 @@
-define(MIN, <<0:1>>).
-define(MAX, <<1:1>>).
-
%% @equiv open(Db, Prefix, Order, [])
-spec open(term(), binary(), pos_integer()) -> #tree{}.
open(Db, Prefix, Order) ->
open(Db, Prefix, Order, []).
-
%% @doc Open a new ebtree, initialising it if doesn't already exist.
%% @param Db An erlfdb database or transaction.
%% @param Prefix The key prefix applied to all ebtree keys.
@@ -87,7 +86,9 @@ open(Db, Prefix, Order) ->
%% @param Options Supported options are {reduce_fun, Fun} and {collate_fun, Fun}.
%% @returns A data structure representing the ebtree, to be passed to all other functions.
-spec open(term(), binary(), pos_integer(), list()) -> #tree{}.
-open(Db, Prefix, Order, Options) when is_binary(Prefix), is_integer(Order), Order > 2, Order rem 2 == 0 ->
+open(Db, Prefix, Order, Options) when
+ is_binary(Prefix), is_integer(Order), Order > 2, Order rem 2 == 0
+->
ReduceFun = proplists:get_value(reduce_fun, Options, fun reduce_noop/2),
CollateFun = proplists:get_value(collate_fun, Options, fun collate_raw/2),
EncodeFun = proplists:get_value(encode_fun, Options, fun encode_erlang/3),
@@ -113,12 +114,10 @@ open(Db, Prefix, Order, Options) when is_binary(Prefix), is_integer(Order), Orde
end
end).
-
%% @doc a special value guaranteed to be smaller than any value in an ebtree.
min() ->
?MIN.
-
%% @doc a special value guaranteed to be larger than any value in an ebtree.
max() ->
?MAX.
@@ -153,7 +152,6 @@ lookup(Db, #tree{} = Tree, Key) ->
end,
fold(Db, Tree, Fun, false, []).
-
%% @doc Lookup a list of keys in the ebtree.
%% @param Db An erlfdb database or transaction.
%% @param Tree the ebtree.
@@ -167,11 +165,9 @@ lookup_multi(Db, #tree{} = Tree, Keys) ->
{_, _, FoundKeys} = fold(Db, Tree, FoldFun, Acc, []),
FoundKeys.
-
lookup_multi_fold(_, {_, [], _} = Acc) ->
% No more keys to find
{stop, Acc};
-
lookup_multi_fold({visit, Key1, Value}, {Tree, [Key2 | Rest], Acc}) ->
case collate(Tree, Key1, Key2) of
lt ->
@@ -184,7 +180,6 @@ lookup_multi_fold({visit, Key1, Value}, {Tree, [Key2 | Rest], Acc}) ->
% The user key wasn't found so we drop it
lookup_multi_fold({visit, Key1, Value}, {Tree, Rest, Acc})
end;
-
lookup_multi_fold({traverse, FKey, LKey, R}, {Tree, [UKey | Rest], Acc}) ->
case collate(Tree, FKey, UKey, [gt]) of
true ->
@@ -201,12 +196,10 @@ lookup_multi_fold({traverse, FKey, LKey, R}, {Tree, [UKey | Rest], Acc}) ->
end
end.
-
%% @equiv fold(Db, Tree, Fun, Acc, [])
fold(Db, #tree{} = Tree, Fun, Acc) ->
fold(Db, Tree, Fun, Acc, []).
-
%% @doc Custom traversal of the ebtree.
%% @param Db An erlfdb database or transaction.
%% @param Tree the ebtree.
@@ -216,8 +209,8 @@ fold(Db, #tree{} = Tree, Fun, Acc) ->
%% @returns the final accumulator.
-type fold_args() ::
- {visit, Key :: term(), Value :: term()} |
- {traverse, First :: term(), Last :: term(), Reduction :: term()}.
+ {visit, Key :: term(), Value :: term()}
+ | {traverse, First :: term(), Last :: term(), Reduction :: term()}.
-type fold_option() :: [{dir, fwd | rev}].
@@ -235,19 +228,16 @@ fold(Db, #tree{} = Tree, Fun, Acc, Options) ->
end),
Reduce.
-
fold(Db, #tree{} = Tree, #node{} = Node, Fun, Acc, Options) ->
Dir = proplists:get_value(dir, Options, fwd),
- Members = case Dir of
- fwd -> Node#node.members;
- rev -> lists:reverse(Node#node.members)
- end,
+ Members =
+ case Dir of
+ fwd -> Node#node.members;
+ rev -> lists:reverse(Node#node.members)
+ end,
fold(Db, #tree{} = Tree, Members, Fun, Acc, Options);
-
-
fold(_Db, #tree{} = _Tree, [], _Fun, Acc, _Options) ->
{ok, Acc};
-
fold(Db, #tree{} = Tree, [{K, V} | Rest], Fun, Acc0, Options) ->
case Fun({visit, K, V}, Acc0) of
{ok, Acc1} ->
@@ -255,7 +245,6 @@ fold(Db, #tree{} = Tree, [{K, V} | Rest], Fun, Acc0, Options) ->
{stop, Acc1} ->
{stop, Acc1}
end;
-
fold(Db, #tree{} = Tree, [{F, L, P, R} | Rest], Fun, Acc0, Options) ->
case Fun({traverse, F, L, R}, Acc0) of
{ok, Acc1} ->
@@ -272,7 +261,6 @@ fold(Db, #tree{} = Tree, [{F, L, P, R} | Rest], Fun, Acc0, Options) ->
{stop, Acc1}
end.
-
%% @doc Calculate the final reduce value for the whole ebtree.
%% @param Db An erlfdb database or transaction.
%% @param Tree the ebtree.
@@ -288,7 +276,6 @@ full_reduce(Db, #tree{} = Tree) ->
{MapValues, ReduceValues} = fold(Db, Tree, Fun, {[], []}, []),
do_reduce(Tree, MapValues, ReduceValues).
-
%% @equiv reduce(Db, Tree, StartKey, EndKey, [])
-spec reduce(Db :: term(), Tree :: #tree{}, StartKey :: term(), EndKey :: term()) -> term().
reduce(Db, #tree{} = Tree, StartKey, EndKey) ->
@@ -300,31 +287,102 @@ reduce(Db, #tree{} = Tree, StartKey, EndKey) ->
%% @param StartKey The beginning of the range
%% @param EndKey The end of the range
%% @returns the reduce value for the specified range
--spec reduce(Db :: term(), Tree :: #tree{}, StartKey :: term(),
- EndKey :: term(), Options :: [reduce_option()]) -> term().
+-spec reduce(
+ Db :: term(),
+ Tree :: #tree{},
+ StartKey :: term(),
+ EndKey :: term(),
+ Options :: [reduce_option()]
+) -> term().
reduce(Db, #tree{} = Tree, StartKey, EndKey, Options) ->
InclusiveStart = proplists:get_value(inclusive_start, Options, true),
InclusiveEnd = proplists:get_value(inclusive_end, Options, true),
Fun = fun
({visit, Key, Value}, {MapAcc, ReduceAcc}) ->
- BeforeStart = collate(Tree, Key, StartKey, if InclusiveStart -> [lt]; true -> [lt, eq] end),
- AfterEnd = collate(Tree, Key, EndKey, if InclusiveEnd -> [gt]; true -> [gt, eq] end),
- InRange = collate(Tree, Key, StartKey, if InclusiveStart -> [gt, eq]; true -> [gt] end)
- andalso collate(Tree, Key, EndKey, if InclusiveEnd -> [lt, eq]; true -> [lt] end),
+ BeforeStart = collate(
+ Tree,
+ Key,
+ StartKey,
+ if
+ InclusiveStart -> [lt];
+ true -> [lt, eq]
+ end
+ ),
+ AfterEnd = collate(
+ Tree,
+ Key,
+ EndKey,
+ if
+ InclusiveEnd -> [gt];
+ true -> [gt, eq]
+ end
+ ),
+ InRange =
+ collate(
+ Tree,
+ Key,
+ StartKey,
+ if
+ InclusiveStart -> [gt, eq];
+ true -> [gt]
+ end
+ ) andalso
+ collate(
+ Tree,
+ Key,
+ EndKey,
+ if
+ InclusiveEnd -> [lt, eq];
+ true -> [lt]
+ end
+ ),
if
BeforeStart ->
{ok, {MapAcc, ReduceAcc}};
AfterEnd ->
{stop, {MapAcc, ReduceAcc}};
InRange ->
- {ok, {[{Key, Value} | MapAcc], ReduceAcc}}
+ {ok, {[{Key, Value} | MapAcc], ReduceAcc}}
end;
({traverse, FirstKey, LastKey, Reduction}, {MapAcc, ReduceAcc}) ->
- BeforeStart = collate(Tree, LastKey, StartKey, if InclusiveStart -> [lt]; true -> [lt, eq] end),
- AfterEnd = collate(Tree, FirstKey, EndKey, if InclusiveEnd -> [gt]; true -> [gt, eq] end),
- Whole = collate(Tree, FirstKey, StartKey, if InclusiveStart -> [gt, eq]; true -> [gt] end)
- andalso collate(Tree, LastKey, EndKey, if InclusiveEnd -> [lt, eq]; true -> [lt] end),
+ BeforeStart = collate(
+ Tree,
+ LastKey,
+ StartKey,
+ if
+ InclusiveStart -> [lt];
+ true -> [lt, eq]
+ end
+ ),
+ AfterEnd = collate(
+ Tree,
+ FirstKey,
+ EndKey,
+ if
+ InclusiveEnd -> [gt];
+ true -> [gt, eq]
+ end
+ ),
+ Whole =
+ collate(
+ Tree,
+ FirstKey,
+ StartKey,
+ if
+ InclusiveStart -> [gt, eq];
+ true -> [gt]
+ end
+ ) andalso
+ collate(
+ Tree,
+ LastKey,
+ EndKey,
+ if
+ InclusiveEnd -> [lt, eq];
+ true -> [lt]
+ end
+ ),
if
BeforeStart ->
{skip, {MapAcc, ReduceAcc}};
@@ -339,17 +397,13 @@ reduce(Db, #tree{} = Tree, StartKey, EndKey, Options) ->
{MapValues, ReduceValues} = fold(Db, Tree, Fun, {[], []}, []),
do_reduce(Tree, MapValues, ReduceValues).
-
do_reduce(#tree{} = Tree, [], []) ->
reduce_values(Tree, [], false);
-
do_reduce(#tree{} = Tree, [], ReduceValues) when is_list(ReduceValues) ->
reduce_values(Tree, ReduceValues, true);
-
do_reduce(#tree{} = Tree, MapValues, ReduceValues) when is_list(MapValues), is_list(ReduceValues) ->
do_reduce(Tree, [], [reduce_values(Tree, MapValues, false) | ReduceValues]).
-
%% @equiv group_reduce(Db, Tree, StartKey, EndKey, GroupKeyFun, UserAccFun, UserAcc0, [])
-spec group_reduce(
Db :: term(),
@@ -358,11 +412,11 @@ do_reduce(#tree{} = Tree, MapValues, ReduceValues) when is_list(MapValues), is_l
EndKey :: term(),
GroupKeyFun :: fun((term()) -> group_key()),
UserAccFun :: fun(({group_key(), GroupValue :: term()}, Acc0 :: term()) -> Acc1 :: term()),
- UserAcc0 :: term()) -> Acc1 :: term().
+ UserAcc0 :: term()
+) -> Acc1 :: term().
group_reduce(Db, #tree{} = Tree, StartKey, EndKey, GroupKeyFun, UserAccFun, UserAcc0) ->
group_reduce(Db, Tree, StartKey, EndKey, GroupKeyFun, UserAccFun, UserAcc0, []).
-
%% @doc Calculate the reduce value for all groups in the specified range.
%% @param Db An erlfdb database or transaction.
%% @param Tree The ebtree.
@@ -386,7 +440,8 @@ group_reduce(Db, #tree{} = Tree, StartKey, EndKey, GroupKeyFun, UserAccFun, User
GroupKeyFun :: fun((term()) -> group_key()),
UserAccFun :: fun(({group_key(), GroupValue :: term()}, Acc0 :: term()) -> Acc1 :: term()),
UserAcc0 :: term(),
- Options :: [fold_option() | reduce_option()]) -> Acc1 :: term().
+ Options :: [fold_option() | reduce_option()]
+) -> Acc1 :: term().
group_reduce(Db, #tree{} = Tree, StartKey, EndKey, GroupKeyFun, UserAccFun, UserAcc0, Options) ->
Dir = proplists:get_value(dir, Options, fwd),
InclusiveStart = proplists:get_value(inclusive_start, Options, true),
@@ -394,11 +449,43 @@ group_reduce(Db, #tree{} = Tree, StartKey, EndKey, GroupKeyFun, UserAccFun, User
NoGroupYet = ?MIN,
Fun = fun
({visit, Key, Value}, {CurrentGroup, UserAcc, MapAcc, ReduceAcc}) ->
- BeforeStart = collate(Tree, Key, StartKey, if InclusiveStart -> [lt]; true -> [lt, eq] end),
- AfterEnd = collate(Tree, Key, EndKey, if InclusiveEnd -> [gt]; true -> [gt, eq] end),
+ BeforeStart = collate(
+ Tree,
+ Key,
+ StartKey,
+ if
+ InclusiveStart -> [lt];
+ true -> [lt, eq]
+ end
+ ),
+ AfterEnd = collate(
+ Tree,
+ Key,
+ EndKey,
+ if
+ InclusiveEnd -> [gt];
+ true -> [gt, eq]
+ end
+ ),
InRange =
- collate(Tree, Key, StartKey, if InclusiveStart -> [gt, eq]; true -> [gt] end) andalso
- collate(Tree, Key, EndKey, if InclusiveEnd -> [lt, eq]; true -> [lt] end),
+ collate(
+ Tree,
+ Key,
+ StartKey,
+ if
+ InclusiveStart -> [gt, eq];
+ true -> [gt]
+ end
+ ) andalso
+ collate(
+ Tree,
+ Key,
+ EndKey,
+ if
+ InclusiveEnd -> [lt, eq];
+ true -> [lt]
+ end
+ ),
KeyGroup = GroupKeyFun(Key),
SameGroup = collate(Tree, CurrentGroup, KeyGroup, [eq]),
if
@@ -417,20 +504,70 @@ group_reduce(Db, #tree{} = Tree, StartKey, EndKey, GroupKeyFun, UserAccFun, User
InRange ->
%% implicit end of current group and start of a new one
GroupValue = do_reduce(Tree, MapAcc, ReduceAcc),
- {ok, {KeyGroup, UserAccFun({CurrentGroup, GroupValue}, UserAcc), [{Key, Value}], []}}
+ {ok,
+ {KeyGroup, UserAccFun({CurrentGroup, GroupValue}, UserAcc), [{Key, Value}],
+ []}}
end;
({traverse, FirstKey, LastKey, Reduction}, {CurrentGroup, UserAcc, MapAcc, ReduceAcc}) ->
- BeforeStart = collate(Tree, LastKey, StartKey, if InclusiveStart -> [lt]; true -> [lt, eq] end),
- AfterEnd = collate(Tree, FirstKey, EndKey, if InclusiveEnd -> [gt]; true -> [gt, eq] end),
+ BeforeStart = collate(
+ Tree,
+ LastKey,
+ StartKey,
+ if
+ InclusiveStart -> [lt];
+ true -> [lt, eq]
+ end
+ ),
+ AfterEnd = collate(
+ Tree,
+ FirstKey,
+ EndKey,
+ if
+ InclusiveEnd -> [gt];
+ true -> [gt, eq]
+ end
+ ),
Whole =
collate(Tree, CurrentGroup, GroupKeyFun(FirstKey), [eq]) andalso
- collate(Tree, CurrentGroup, GroupKeyFun(LastKey), [eq]),
+ collate(Tree, CurrentGroup, GroupKeyFun(LastKey), [eq]),
FirstInRange =
- collate(Tree, FirstKey, StartKey, if InclusiveStart -> [gt, eq]; true -> [gt] end) andalso
- collate(Tree, FirstKey, EndKey, if InclusiveEnd -> [lt, eq]; true -> [lt] end),
+ collate(
+ Tree,
+ FirstKey,
+ StartKey,
+ if
+ InclusiveStart -> [gt, eq];
+ true -> [gt]
+ end
+ ) andalso
+ collate(
+ Tree,
+ FirstKey,
+ EndKey,
+ if
+ InclusiveEnd -> [lt, eq];
+ true -> [lt]
+ end
+ ),
LastInRange =
- collate(Tree, LastKey, StartKey, if InclusiveStart -> [gt, eq]; true -> [gt] end) andalso
- collate(Tree, LastKey, EndKey, if InclusiveEnd -> [lt, eq]; true -> [lt] end),
+ collate(
+ Tree,
+ LastKey,
+ StartKey,
+ if
+ InclusiveStart -> [gt, eq];
+ true -> [gt]
+ end
+ ) andalso
+ collate(
+ Tree,
+ LastKey,
+ EndKey,
+ if
+ InclusiveEnd -> [lt, eq];
+ true -> [lt]
+ end
+ ),
if
Dir == fwd andalso BeforeStart ->
{skip, {CurrentGroup, UserAcc, MapAcc, ReduceAcc}};
@@ -446,7 +583,9 @@ group_reduce(Db, #tree{} = Tree, StartKey, EndKey, GroupKeyFun, UserAccFun, User
{ok, {CurrentGroup, UserAcc, MapAcc, ReduceAcc}}
end
end,
- {CurrentGroup, UserAcc1, MapValues, ReduceValues} = fold(Db, Tree, Fun, {NoGroupYet, UserAcc0, [], []}, Options),
+ {CurrentGroup, UserAcc1, MapValues, ReduceValues} = fold(
+ Db, Tree, Fun, {NoGroupYet, UserAcc0, [], []}, Options
+ ),
if
MapValues /= [] orelse ReduceValues /= [] ->
FinalGroup = do_reduce(Tree, MapValues, ReduceValues),
@@ -455,7 +594,6 @@ group_reduce(Db, #tree{} = Tree, StartKey, EndKey, GroupKeyFun, UserAccFun, User
UserAcc1
end.
-
%% @doc Finds all key-value pairs for the specified range in forward order.
%% @param Db An erlfdb database or transaction.
%% @param Tree The ebtree.
@@ -464,20 +602,28 @@ group_reduce(Db, #tree{} = Tree, StartKey, EndKey, GroupKeyFun, UserAccFun, User
%% @param AccFun A function that is called when a key-value pair is found, returning an accumulator.
%% @param Acc0 The initial accumulator
%% @returns the final accumulator
--spec range(Db :: term(), Tree :: #tree{}, StartKey :: term(), EndKey :: term(),
- AccFun :: fun(), Acc0 :: term()) -> term().
+-spec range(
+ Db :: term(),
+ Tree :: #tree{},
+ StartKey :: term(),
+ EndKey :: term(),
+ AccFun :: fun(),
+ Acc0 :: term()
+) -> term().
range(Db, #tree{} = Tree, StartKey, EndKey, AccFun, Acc0) ->
erlfdb:transactional(Db, fun(Tx) ->
range(Tx, Tree, get_node(Tx, Tree, ?NODE_ROOT_ID), StartKey, EndKey, AccFun, Acc0)
end).
-
range(_Tx, #tree{}, #node{id = ?NODE_ROOT_ID, members = []}, _StartKey, _EndKey, _AccFun, Acc0) ->
Acc0;
-
range(Tx, #tree{} = Tree, #node{level = 0} = Node, StartKey, EndKey, AccFun, Acc0) ->
- InRange = [{K, V} || {K, V} <- Node#node.members,
- collate(Tree, StartKey, K, [lt, eq]), collate(Tree, K, EndKey, [lt, eq])],
+ InRange = [
+ {K, V}
+ || {K, V} <- Node#node.members,
+ collate(Tree, StartKey, K, [lt, eq]),
+ collate(Tree, K, EndKey, [lt, eq])
+ ],
Acc1 = AccFun(InRange, Acc0),
LastKey = last_key(Node),
case Node#node.next /= undefined andalso collate(Tree, LastKey, EndKey, [lt, eq]) of
@@ -486,12 +632,10 @@ range(Tx, #tree{} = Tree, #node{level = 0} = Node, StartKey, EndKey, AccFun, Acc
false ->
Acc1
end;
-
range(Tx, #tree{} = Tree, #node{} = Node, StartKey, EndKey, AccFun, Acc) ->
ChildId = find_child_id(Tree, Node, StartKey),
range(Tx, Tree, get_node(Tx, Tree, ChildId), StartKey, EndKey, AccFun, Acc).
-
%% @doc Finds all key-value pairs for the specified range in reverse order.
%% @param Db An erlfdb database or transaction.
%% @param Tree The ebtree.
@@ -500,34 +644,44 @@ range(Tx, #tree{} = Tree, #node{} = Node, StartKey, EndKey, AccFun, Acc) ->
%% @param AccFun A function that is called when a key-value pair is found, returning an accumulator.
%% @param Acc0 The initial accumulator
%% @returns the final accumulator
--spec reverse_range(Db :: term(), Tree :: #tree{}, StartKey :: term(), EndKey :: term(),
- AccFun :: fun(), Acc0 :: term()) -> term().
+-spec reverse_range(
+ Db :: term(),
+ Tree :: #tree{},
+ StartKey :: term(),
+ EndKey :: term(),
+ AccFun :: fun(),
+ Acc0 :: term()
+) -> term().
reverse_range(Db, #tree{} = Tree, StartKey, EndKey, AccFun, Acc0) ->
erlfdb:transactional(Db, fun(Tx) ->
reverse_range(Tx, Tree, get_node(Tx, Tree, ?NODE_ROOT_ID), StartKey, EndKey, AccFun, Acc0)
end).
-
-reverse_range(_Tx, #tree{}, #node{id = ?NODE_ROOT_ID, members = []}, _StartKey, _EndKey, _AccFun, Acc0) ->
+reverse_range(
+ _Tx, #tree{}, #node{id = ?NODE_ROOT_ID, members = []}, _StartKey, _EndKey, _AccFun, Acc0
+) ->
Acc0;
-
reverse_range(Tx, #tree{} = Tree, #node{level = 0} = Node, StartKey, EndKey, AccFun, Acc0) ->
- InRange = [{K, V} || {K, V} <- Node#node.members,
- collate(Tree, StartKey, K, [lt, eq]), collate(Tree, K, EndKey, [lt, eq])],
+ InRange = [
+ {K, V}
+ || {K, V} <- Node#node.members,
+ collate(Tree, StartKey, K, [lt, eq]),
+ collate(Tree, K, EndKey, [lt, eq])
+ ],
Acc1 = AccFun(lists:reverse(InRange), Acc0),
FirstKey = first_key(Node),
case Node#node.prev /= undefined andalso collate(Tree, StartKey, FirstKey, [lt, eq]) of
true ->
- reverse_range(Tx, Tree, get_node(Tx, Tree, Node#node.prev), StartKey, EndKey, AccFun, Acc1);
+ reverse_range(
+ Tx, Tree, get_node(Tx, Tree, Node#node.prev), StartKey, EndKey, AccFun, Acc1
+ );
false ->
Acc1
end;
-
reverse_range(Tx, #tree{} = Tree, #node{} = Node, StartKey, EndKey, AccFun, Acc) ->
ChildId = find_child_id(Tree, Node, EndKey),
reverse_range(Tx, Tree, get_node(Tx, Tree, ChildId), StartKey, EndKey, AccFun, Acc).
-
%% @doc Inserts or updates a value in the ebtree
%% @param Db An erlfdb database or transaction.
%% @param Tree The ebtree.
@@ -537,10 +691,8 @@ reverse_range(Tx, #tree{} = Tree, #node{} = Node, StartKey, EndKey, AccFun, Acc)
-spec insert(Db :: term(), Tree :: #tree{}, Key :: term(), Value :: term()) -> #tree{}.
insert(_Db, #tree{} = _Tree, ?MIN, _Value) ->
erlang:error(min_not_allowed);
-
insert(_Db, #tree{} = _Tree, ?MAX, _Value) ->
erlang:error(max_not_allowed);
-
insert(Db, #tree{} = Tree, Key, Value) ->
erlfdb:transactional(Db, fun(Tx) ->
Root0 = get_node(Tx, Tree, ?NODE_ROOT_ID),
@@ -552,7 +704,8 @@ insert(Db, #tree{} = Tree, Key, Value) ->
Root1 = #node{
id = ?NODE_ROOT_ID,
level = Root0#node.level + 1,
- members = [{FirstKey, LastKey, OldRoot#node.id, []}]},
+ members = [{FirstKey, LastKey, OldRoot#node.id, []}]
+ },
{Root2, _, _} = split_child(Tx, Tree, Root1, OldRoot),
insert_nonfull(Tx, Tree, Root2, Key, Value);
false ->
@@ -561,7 +714,6 @@ insert(Db, #tree{} = Tree, Key, Value) ->
end),
Tree.
-
split_child(Tx, #tree{} = Tree, #node{} = Parent0, #node{} = Child) ->
{LeftMembers, RightMembers} = lists:split(Tree#tree.min, Child#node.members),
@@ -599,68 +751,77 @@ split_child(Tx, #tree{} = Tree, #node{} = Parent0, #node{} = Child) ->
Parent1 = Parent0#node{
members =
- umerge_members(Tree, Parent0#node.level, [{FirstLeftKey, LastLeftKey, LeftId, LeftReduction}],
- umerge_members(Tree, Parent0#node.level, [{FirstRightKey, LastRightKey, RightId, RightReduction}],
- lists:keydelete(Child#node.id, 3, Parent0#node.members)))
+ umerge_members(
+ Tree,
+ Parent0#node.level,
+ [{FirstLeftKey, LastLeftKey, LeftId, LeftReduction}],
+ umerge_members(
+ Tree,
+ Parent0#node.level,
+ [{FirstRightKey, LastRightKey, RightId, RightReduction}],
+ lists:keydelete(Child#node.id, 3, Parent0#node.members)
+ )
+ )
},
clear_node(Tx, Tree, Child),
set_nodes(Tx, Tree, [LeftChild, RightChild, Parent1]),
{Parent1, LeftChild, RightChild}.
-
update_prev_neighbour(_Tx, #tree{} = _Tree, #node{prev = undefined} = _Node) ->
ok;
-
update_prev_neighbour(Tx, #tree{} = Tree, #node{} = Node) ->
Left = get_node(Tx, Tree, Node#node.prev),
set_node(Tx, Tree, Left#node{next = Node#node.id}).
-
update_next_neighbour(_Tx, #tree{} = _Tree, #node{next = undefined} = _Node) ->
ok;
-
update_next_neighbour(Tx, #tree{} = Tree, #node{} = Node) ->
Left = get_node(Tx, Tree, Node#node.next),
set_node(Tx, Tree, Left#node{prev = Node#node.id}).
-
insert_nonfull(Tx, #tree{} = Tree, #node{level = 0} = Node0, Key, Value) ->
Node1 = Node0#node{
members = umerge_members(Tree, 0, [{Key, Value}], Node0#node.members)
},
set_node(Tx, Tree, Node0, Node1),
reduce_node(Tree, Node1);
-
insert_nonfull(Tx, #tree{} = Tree, #node{} = Node0, Key, Value) ->
ChildId0 = find_child_id(Tree, Node0, Key),
Child0 = get_node(Tx, Tree, ChildId0),
- {Node1, Child1} = case ?is_full(Tree, Child0) of
- true ->
- {Parent, LeftChild, RightChild} = split_child(Tx, Tree, Node0, Child0),
- ChildId = find_child_id(Tree, Parent, Key),
- Child = if
- ChildId =:= LeftChild#node.id ->
- LeftChild;
- ChildId =:= RightChild#node.id ->
- RightChild
- end,
- {Parent, Child};
- false ->
- {Node0, Child0}
- end,
+ {Node1, Child1} =
+ case ?is_full(Tree, Child0) of
+ true ->
+ {Parent, LeftChild, RightChild} = split_child(Tx, Tree, Node0, Child0),
+ ChildId = find_child_id(Tree, Parent, Key),
+ Child =
+ if
+ ChildId =:= LeftChild#node.id ->
+ LeftChild;
+ ChildId =:= RightChild#node.id ->
+ RightChild
+ end,
+ {Parent, Child};
+ false ->
+ {Node0, Child0}
+ end,
ChildId1 = Child1#node.id,
NewReduction = insert_nonfull(Tx, Tree, Child1, Key, Value),
- {CurrentFirstKey, CurrentLastKey, ChildId1, _OldReduction} = lists:keyfind(ChildId1, 3, Node1#node.members),
+ {CurrentFirstKey, CurrentLastKey, ChildId1, _OldReduction} = lists:keyfind(
+ ChildId1, 3, Node1#node.members
+ ),
[NewFirstKey, _] = sort_keys(Tree, [Key, CurrentFirstKey]),
[_, NewLastKey] = sort_keys(Tree, [Key, CurrentLastKey]),
Node2 = Node1#node{
- members = lists:keyreplace(ChildId1, 3, Node1#node.members,
- {NewFirstKey, NewLastKey, ChildId1, NewReduction})
+ members = lists:keyreplace(
+ ChildId1,
+ 3,
+ Node1#node.members,
+ {NewFirstKey, NewLastKey, ChildId1, NewReduction}
+ )
},
set_node(Tx, Tree, Node0, Node2),
reduce_node(Tree, Node2).
-
%% @doc Inserts or updates multiple values in the ebtree
%% @param Db An erlfdb database or transaction.
%% @param Tree The ebtree.
@@ -669,7 +830,6 @@ insert_nonfull(Tx, #tree{} = Tree, #node{} = Node0, Key, Value) ->
-spec insert_multi(Db :: term(), Tree :: #tree{}, KeyValues :: [{term(), term()}]) -> #tree{}.
insert_multi(_Db, #tree{} = Tree, []) ->
Tree;
-
insert_multi(Db, #tree{} = Tree, KeyValues) when is_list(KeyValues) ->
% Sort our KeyValues so that we can insert in order
SortedKeyValues = usort_members(Tree, 0, KeyValues),
@@ -681,35 +841,36 @@ insert_multi(Db, #tree{} = Tree, KeyValues) when is_list(KeyValues) ->
end),
Tree.
-
insert_multi(Tx, #tree{} = Tree, #node{level = L} = Node, KeyValues) when L > 0 ->
ChildKVsPairs = assign_kvs(Tree, Node#node.members, KeyValues),
- NewMembers = lists:flatmap(fun({{_F, _L, P, _R} = Child, KVs}) ->
- case KVs of
- [] ->
- [Child];
- _ ->
- ChildNode = get_node(Tx, Tree, P),
- insert_multi(Tx, Tree, ChildNode, KVs)
- end
- end, ChildKVsPairs),
+ NewMembers = lists:flatmap(
+ fun({{_F, _L, P, _R} = Child, KVs}) ->
+ case KVs of
+ [] ->
+ [Child];
+ _ ->
+ ChildNode = get_node(Tx, Tree, P),
+ insert_multi(Tx, Tree, ChildNode, KVs)
+ end
+ end,
+ ChildKVsPairs
+ ),
split_node_multi(Tx, Tree, Node#node{members = NewMembers});
-
insert_multi(Tx, #tree{} = Tree, #node{level = 0} = Node, KeyValues) ->
NewMembers = umerge_members(Tree, 0, KeyValues, Node#node.members),
split_node_multi(Tx, Tree, Node#node{members = NewMembers}).
-
assign_kvs(_Tree, [Child], KeyValues) ->
[{Child, KeyValues}];
-
assign_kvs(Tree, [{_F, L, _P, _R} = Child | RestChildren], KeyValues) ->
- {KVsInChild, RestKVs} = lists:splitwith(fun({Key, _}) ->
- collate(Tree, Key, L, [lt, eq])
- end, KeyValues),
+ {KVsInChild, RestKVs} = lists:splitwith(
+ fun({Key, _}) ->
+ collate(Tree, Key, L, [lt, eq])
+ end,
+ KeyValues
+ ),
[{Child, KVsInChild} | assign_kvs(Tree, RestChildren, RestKVs)].
-
split_node_multi(Tx, Tree, Node) ->
NumMembers = length(Node#node.members),
% Not =< so that we don't leave full nodes
@@ -723,23 +884,24 @@ split_node_multi(Tx, Tree, Node) ->
false ->
clear_node(Tx, Tree, Node),
Nodes0 = create_nodes(Tx, Tree, Node),
- Nodes1 = if Node#node.level > 0 -> Nodes0; true ->
- Nodes2 = update_next_ptrs(Nodes0),
- Nodes3 = update_prev_ptrs(Nodes2),
- Nodes4 = set_first_prev_ptr(Tx, Tree, Node#node.prev, Nodes3),
- set_last_next_ptr(Tx, Tree, Node#node.next, Nodes4)
- end,
+ Nodes1 =
+ if
+ Node#node.level > 0 ->
+ Nodes0;
+ true ->
+ Nodes2 = update_next_ptrs(Nodes0),
+ Nodes3 = update_prev_ptrs(Nodes2),
+ Nodes4 = set_first_prev_ptr(Tx, Tree, Node#node.prev, Nodes3),
+ set_last_next_ptr(Tx, Tree, Node#node.next, Nodes4)
+ end,
set_nodes(Tx, Tree, Nodes1),
[to_member(Tree, N) || N <- Nodes1]
end.
-
grow_tree(_Tx, _Tree, #node{level = 0, members = [{_, _} | _]} = Root) ->
Root;
-
grow_tree(Tx, Tree, #node{level = 0, members = [{_, _, _, _} | _]} = Root) ->
grow_tree(Tx, Tree, Root#node{level = 1});
-
grow_tree(Tx, Tree, Root) ->
case length(Root#node.members) < Tree#tree.max of
true ->
@@ -753,14 +915,12 @@ grow_tree(Tx, Tree, Root) ->
grow_tree(Tx, Tree, NewRoot)
end.
-
to_member(Tree, Node) ->
FirstKey = first_key(Node#node.members),
LastKey = last_key(Node#node.members),
Reds = reduce_node(Tree, Node),
{FirstKey, LastKey, Node#node.id, Reds}.
-
create_nodes(Tx, #tree{} = Tree, Node) ->
case length(Node#node.members) >= Tree#tree.max of
true ->
@@ -780,36 +940,28 @@ create_nodes(Tx, #tree{} = Tree, Node) ->
[NewNode]
end.
-
update_next_ptrs([_] = Nodes) ->
Nodes;
-
update_next_ptrs([N1, N2 | Rest]) ->
[N1#node{next = N2#node.id} | update_next_ptrs([N2 | Rest])].
-
update_prev_ptrs([_] = Nodes) ->
Nodes;
-
update_prev_ptrs([N1, N2 | Rest]) ->
[N1 | update_prev_ptrs([N2#node{prev = N1#node.id} | Rest])].
-
set_first_prev_ptr(Tx, Tree, Prev, [Node | Rest]) ->
NewNode = Node#node{prev = Prev},
update_prev_neighbour(Tx, Tree, NewNode),
[NewNode | Rest].
-
set_last_next_ptr(Tx, Tree, Next, [Node0]) ->
Node1 = Node0#node{next = Next},
update_next_neighbour(Tx, Tree, Node1),
[Node1];
-
set_last_next_ptr(Tx, Tree, Next, [N | Rest]) ->
[N | set_last_next_ptr(Tx, Tree, Next, Rest)].
-
%% @doc Deletes an entry from the ebtree
%% @param Db An erlfdb database or transaction.
%% @param Tree The ebtree.
@@ -832,12 +984,10 @@ delete(Db, #tree{} = Tree, Key) ->
end),
Tree.
-
delete(_Tx, #tree{} = _Tree, #node{level = 0} = Node, Key) ->
Node#node{
members = lists:keydelete(Key, 1, Node#node.members)
};
-
delete(Tx, #tree{} = Tree, #node{} = Parent0, Key) ->
ChildId0 = find_child_id(Tree, Parent0, Key),
Child0 = get_node(Tx, Tree, ChildId0),
@@ -846,27 +996,36 @@ delete(Tx, #tree{} = Tree, #node{} = Parent0, Key) ->
true ->
SiblingId = find_sibling_id(Tree, Parent0, ChildId0, Key),
Sibling = get_node(Tx, Tree, SiblingId),
- NewNodes = case ?at_min(Tree, Sibling) of
- true ->
- Merged = merge(Tree, Child1, Sibling),
- update_prev_neighbour(Tx, Tree, Merged),
- update_next_neighbour(Tx, Tree, Merged),
- [Merged];
- false ->
- {Left, Right} = rebalance(Tree, Child1, Sibling),
- update_prev_neighbour(Tx, Tree, Left),
- update_next_neighbour(Tx, Tree, Right),
- [Left, Right]
- end,
+ NewNodes =
+ case ?at_min(Tree, Sibling) of
+ true ->
+ Merged = merge(Tree, Child1, Sibling),
+ update_prev_neighbour(Tx, Tree, Merged),
+ update_next_neighbour(Tx, Tree, Merged),
+ [Merged];
+ false ->
+ {Left, Right} = rebalance(Tree, Child1, Sibling),
+ update_prev_neighbour(Tx, Tree, Left),
+ update_next_neighbour(Tx, Tree, Right),
+ [Left, Right]
+ end,
%% remove old members and insert new members
Members0 = Parent0#node.members,
Members1 = lists:keydelete(ChildId0, 3, Members0),
Members2 = lists:keydelete(Sibling#node.id, 3, Members1),
- Members3 = lists:foldl(fun(N, Acc) ->
- umerge_members(Tree, Parent0#node.level,
- [{first_key(N), last_key(N), N#node.id, reduce_node(Tree, N)}], Acc)
- end, Members2, NewNodes),
+ Members3 = lists:foldl(
+ fun(N, Acc) ->
+ umerge_members(
+ Tree,
+ Parent0#node.level,
+ [{first_key(N), last_key(N), N#node.id, reduce_node(Tree, N)}],
+ Acc
+ )
+ end,
+ Members2,
+ NewNodes
+ ),
Parent1 = Parent0#node{
members = Members3
@@ -876,14 +1035,19 @@ delete(Tx, #tree{} = Tree, #node{} = Parent0, Key) ->
Parent1;
false ->
set_node(Tx, Tree, Child0, Child1),
- {_OldFirstKey, _OldLastKey, ChildId0, _OldReduction} = lists:keyfind(ChildId0, 3, Parent0#node.members),
+ {_OldFirstKey, _OldLastKey, ChildId0, _OldReduction} = lists:keyfind(
+ ChildId0, 3, Parent0#node.members
+ ),
Parent0#node{
- members = lists:keyreplace(ChildId0, 3, Parent0#node.members,
- {first_key(Child1), last_key(Child1), Child1#node.id, reduce_node(Tree, Child1)})
+ members = lists:keyreplace(
+ ChildId0,
+ 3,
+ Parent0#node.members,
+ {first_key(Child1), last_key(Child1), Child1#node.id, reduce_node(Tree, Child1)}
+ )
}
end.
-
merge(#tree{} = Tree, #node{level = Level} = Node1, #node{level = Level} = Node2) ->
[Left, Right] = sort_nodes(Tree, [Node1, Node2]),
@@ -895,7 +1059,6 @@ merge(#tree{} = Tree, #node{level = Level} = Node1, #node{level = Level} = Node2
members = lists:append(Left#node.members, Right#node.members)
}.
-
rebalance(#tree{} = Tree, #node{level = Level} = Node1, #node{level = Level} = Node2) ->
[Left0, Right0] = sort_nodes(Tree, [Node1, Node2]),
@@ -917,26 +1080,21 @@ rebalance(#tree{} = Tree, #node{level = Level} = Node1, #node{level = Level} = N
}),
{Left1, Right1}.
-
%% lookup functions
find_child_id(#tree{} = Tree, #node{} = Node, Key) ->
element(3, find_child(Tree, Node, Key)).
-
find_sibling_id(#tree{} = Tree, #node{level = L} = Node0, Id, Key) when L > 0 ->
Node1 = Node0#node{members = lists:keydelete(Id, 3, Node0#node.members)},
find_child_id(Tree, Node1, Key).
-
find_child(#tree{} = Tree, #node{level = L} = Node, Key) when L > 0 ->
find_child_int(Tree, Node#node.members, Key).
-
find_child_int(#tree{} = _Tree, [Child], _Key) ->
Child;
-
-find_child_int(#tree{} = Tree, [{_F, L, _P, _R} = Child| Rest], Key) ->
+find_child_int(#tree{} = Tree, [{_F, L, _P, _R} = Child | Rest], Key) ->
case collate(Tree, Key, L, [lt, eq]) of
true ->
Child;
@@ -944,7 +1102,6 @@ find_child_int(#tree{} = Tree, [{_F, L, _P, _R} = Child| Rest], Key) ->
find_child_int(Tree, Rest, Key)
end.
-
%% metadata functions
get_meta(Tx, #tree{} = Tree, MetaKey) ->
@@ -958,7 +1115,6 @@ get_meta(Tx, #tree{} = Tree, MetaKey) ->
EncodeFun(decode, Key, Bin)
end.
-
set_meta(Tx, #tree{} = Tree, MetaKey, MetaValue) ->
#tree{prefix = Prefix, encode_fun = EncodeFun} = Tree,
Key = meta_key(Prefix, MetaKey),
@@ -968,7 +1124,6 @@ set_meta(Tx, #tree{} = Tree, MetaKey, MetaValue) ->
EncodeFun(encode, Key, MetaValue)
).
-
meta_key(Prefix, MetaKey) when is_binary(Prefix) ->
erlfdb_tuple:pack({?META, MetaKey}, Prefix).
@@ -979,42 +1134,40 @@ get_node(Tx, #tree{} = Tree, Id) ->
Value = persist(Tree, Tx, get, Key),
decode_node(Tree, Id, Key, Value).
-
clear_nodes(Tx, #tree{} = Tree, Nodes) ->
- lists:foreach(fun(Node) ->
- clear_node(Tx, Tree, Node)
- end, Nodes).
-
+ lists:foreach(
+ fun(Node) ->
+ clear_node(Tx, Tree, Node)
+ end,
+ Nodes
+ ).
clear_node(Tx, #tree{} = Tree, #node{} = Node) ->
- Key = node_key(Tree#tree.prefix, Node#node.id),
- persist(Tree, Tx, clear, Key).
-
+ Key = node_key(Tree#tree.prefix, Node#node.id),
+ persist(Tree, Tx, clear, Key).
set_nodes(Tx, #tree{} = Tree, Nodes) ->
- lists:foreach(fun(Node) ->
- set_node(Tx, Tree, Node)
- end, Nodes).
-
+ lists:foreach(
+ fun(Node) ->
+ set_node(Tx, Tree, Node)
+ end,
+ Nodes
+ ).
set_node(_Tx, #tree{} = _Tree, #node{} = Same, #node{} = Same) ->
ok;
-
set_node(Tx, #tree{} = Tree, #node{} = _From, #node{} = To) ->
set_node(Tx, Tree, To).
-
set_node(Tx, #tree{} = Tree, #node{} = Node) ->
?validate_node(Tree, Node),
Key = node_key(Tree#tree.prefix, Node#node.id),
Value = encode_node(Tree, Key, Node),
persist(Tree, Tx, set, [Key, Value]).
-
node_key(Prefix, Id) when is_binary(Prefix), is_binary(Id), bit_size(Id) =:= 128 ->
erlfdb_tuple:pack({?NODE, Id}, Prefix).
-
%% @doc Walks the whole tree and checks it for consistency.
%% It also prints it to screen.
validate_tree(Db, #tree{} = Tree) ->
@@ -1023,25 +1176,20 @@ validate_tree(Db, #tree{} = Tree) ->
validate_tree(Tx, Tree, Root)
end).
-
validate_tree(_Tx, #tree{} = Tree, #node{level = 0} = Node) ->
print_node(Node),
validate_node(Tree, Node);
-
validate_tree(Tx, #tree{} = Tree, #node{} = Node) ->
print_node(Node),
validate_node(Tree, Node),
validate_tree(Tx, Tree, Node#node.members);
-
validate_tree(_Tx, #tree{} = _Tree, []) ->
ok;
-
validate_tree(Tx, #tree{} = Tree, [{_F, _L, P, _R} | Rest]) ->
Node = get_node(Tx, Tree, P),
validate_tree(Tx, Tree, Node),
validate_tree(Tx, Tree, Rest).
-
validate_node(#tree{} = Tree, #node{} = Node) ->
NumKeys = length(Node#node.members),
IsLeaf = Node#node.level =:= 0,
@@ -1067,32 +1215,25 @@ validate_node(#tree{} = Tree, #node{} = Node) ->
ok
end.
-
%% data marshalling functions (encodes unnecesary fields as a NIL_REF)
encode_node(#tree{} = Tree, Key, #node{prev = undefined} = Node) ->
encode_node(Tree, Key, Node#node{prev = []});
-
encode_node(#tree{} = Tree, Key, #node{next = undefined} = Node) ->
encode_node(Tree, Key, Node#node{next = []});
-
encode_node(#tree{} = Tree, Key, #node{} = Node) ->
#tree{encode_fun = EncodeFun} = Tree,
EncodeFun(encode, Key, Node#node{id = []}).
-
decode_node(#tree{} = Tree, Id, Key, Value) when is_binary(Value) ->
#tree{encode_fun = EncodeFun} = Tree,
Term = EncodeFun(decode, Key, Value),
decode_node(Id, Term).
-
decode_node(Id, #node{prev = []} = Node) ->
decode_node(Id, Node#node{prev = undefined});
-
decode_node(Id, #node{next = []} = Node) ->
decode_node(Id, Node#node{next = undefined});
-
decode_node(Id, #node{} = Node) ->
Node#node{id = Id}.
@@ -1101,35 +1242,26 @@ decode_node(Id, #node{} = Node) ->
reduce_noop(_KVs, _Rereduce) ->
[].
-
reduce_node(#tree{} = Tree, #node{level = 0} = Node) ->
reduce_values(Tree, Node#node.members, false);
-
reduce_node(#tree{} = Tree, #node{} = Node) ->
Rs = [R || {_F, _L, _P, R} <- Node#node.members],
reduce_values(Tree, Rs, true).
-
reduce_values(#tree{} = Tree, Values, Rereduce) when is_list(Values) ->
#tree{reduce_fun = ReduceFun} = Tree,
ReduceFun(Values, Rereduce).
-
%% collation functions
-
collate(#tree{} = _Tree, ?MIN, _B) ->
lt;
-
collate(#tree{} = _Tree, _A, ?MIN) ->
gt;
-
collate(#tree{} = _Tree, ?MAX, _B) ->
gt;
-
collate(#tree{} = _Tree, _A, ?MAX) ->
lt;
-
collate(#tree{} = Tree, A, B) ->
#tree{collate_fun = CollateFun} = Tree,
case CollateFun(A, B) of
@@ -1139,11 +1271,9 @@ collate(#tree{} = Tree, A, B) ->
_ -> error(invalid_collation_result)
end.
-
collate(#tree{} = Tree, A, B, Allowed) ->
lists:member(collate(Tree, A, B), Allowed).
-
umerge_members(#tree{} = Tree, Level, List1, List2) ->
Collate = fun
({K1, _V1}, {K2, _V2}) when Level == 0 ->
@@ -1153,7 +1283,6 @@ umerge_members(#tree{} = Tree, Level, List1, List2) ->
end,
umerge_members_int(Collate, List1, List2, []).
-
umerge_members_int(Collate, [], [H2 | T2], [HAcc | _] = Acc) ->
case Collate(H2, HAcc) of
lt -> erlang:error(unsorted_members);
@@ -1169,23 +1298,18 @@ umerge_members_int(Collate, [H1 | T1], [H2 | T2], Acc) ->
gt -> umerge_members_int(Collate, [H1 | T1], T2, [H2 | Acc])
end.
-
sort_keys(#tree{} = Tree, List) ->
- CollateWrapper = fun
- (K1, K2) ->
- collate(Tree, K1, K2, [lt, eq])
+ CollateWrapper = fun(K1, K2) ->
+ collate(Tree, K1, K2, [lt, eq])
end,
lists:sort(CollateWrapper, List).
-
sort_nodes(#tree{} = Tree, List) ->
- CollateWrapper = fun
- (#node{} = N1, #node{} = N2) ->
- collate(Tree, first_key(N1), first_key(N2), [lt, eq])
+ CollateWrapper = fun(#node{} = N1, #node{} = N2) ->
+ collate(Tree, first_key(N1), first_key(N2), [lt, eq])
end,
lists:sort(CollateWrapper, List).
-
sort_members(#tree{} = Tree, Level, List) ->
CollateWrapper = fun
({K1, _V1}, {K2, _V2}) when Level == 0 ->
@@ -1195,7 +1319,6 @@ sort_members(#tree{} = Tree, Level, List) ->
end,
lists:sort(CollateWrapper, List).
-
usort_members(#tree{} = Tree, Level, List) ->
CollateWrapper = fun
({K1, _V1}, {K2, _V2}) when Level == 0 ->
@@ -1205,23 +1328,17 @@ usort_members(#tree{} = Tree, Level, List) ->
end,
lists:usort(CollateWrapper, List).
-
collate_raw(A, B) when A < B ->
lt;
-
collate_raw(A, B) when A > B ->
gt;
-
collate_raw(A, A) ->
eq.
-
%% encoding function
encode_erlang(encode, _Key, Value) ->
term_to_binary(Value, [{minor_version, 2}]);
-
-
encode_erlang(decode, _Key, Value) ->
binary_to_term(Value, [safe]).
@@ -1231,36 +1348,30 @@ persist(#tree{} = Tree, Tx, Action, Args) ->
#tree{persist_fun = PersistFun} = Tree,
PersistFun(Tx, Action, Args).
-
simple_persist(Tx, set, [Key, Value]) ->
erlfdb:set(Tx, Key, Value);
-
simple_persist(Tx, get, Key) ->
erlfdb:wait(erlfdb:get(Tx, Key));
-
simple_persist(Tx, clear, Key) ->
erlfdb:clear(Tx, Key).
%% private functions
-init_order(#tree{} = Tree, Order)
- when is_integer(Order), Order > 2, Order rem 2 == 0 ->
+init_order(#tree{} = Tree, Order) when
+ is_integer(Order), Order > 2, Order rem 2 == 0
+->
Tree#tree{
min = Order div 2,
max = Order
}.
-
first_key(#node{} = Node) ->
first_key(Node#node.members);
-
first_key(Members) when is_list(Members) ->
element(1, hd(Members)).
-
last_key(#node{} = Node) ->
last_key(Node#node.members);
-
last_key(Members) when is_list(Members) ->
case lists:last(Members) of
{K, _V} ->
@@ -1269,34 +1380,43 @@ last_key(Members) when is_list(Members) ->
L
end.
-
new_node_id() ->
crypto:strong_rand_bytes(16).
-
%% remove prev/next pointers for nonleaf nodes
remove_pointers_if_not_leaf(#node{level = 0} = Node) ->
Node;
-
remove_pointers_if_not_leaf(#node{} = Node) ->
Node#node{prev = undefined, next = undefined}.
-
-
print_node(#node{level = 0} = Node) ->
- io:format("#node{id = ~s, level = ~w, prev = ~s, next = ~s, members = ~w}~n~n",
- [b64(Node#node.id), Node#node.level, b64(Node#node.prev), b64(Node#node.next),
- Node#node.members]);
-
+ io:format(
+ "#node{id = ~s, level = ~w, prev = ~s, next = ~s, members = ~w}~n~n",
+ [
+ b64(Node#node.id),
+ Node#node.level,
+ b64(Node#node.prev),
+ b64(Node#node.next),
+ Node#node.members
+ ]
+ );
print_node(#node{} = Node) ->
- io:format("#node{id = ~s, level = ~w, prev = ~s, next = ~s, members = ~s}~n~n",
- [base64:encode(Node#node.id), Node#node.level, b64(Node#node.prev), b64(Node#node.next),
- [io_lib:format("{~w, ~w, ~s, ~w}, ", [F, L, b64(P), R]) || {F, L, P, R} <- Node#node.members]]).
-
+ io:format(
+ "#node{id = ~s, level = ~w, prev = ~s, next = ~s, members = ~s}~n~n",
+ [
+ base64:encode(Node#node.id),
+ Node#node.level,
+ b64(Node#node.prev),
+ b64(Node#node.next),
+ [
+ io_lib:format("{~w, ~w, ~s, ~w}, ", [F, L, b64(P), R])
+ || {F, L, P, R} <- Node#node.members
+ ]
+ ]
+ ).
b64(undefined) ->
undefined;
-
b64(Bin) ->
base64:encode(Bin).
@@ -1308,18 +1428,14 @@ b64(Bin) ->
reduce_sum(KVs, false) ->
{_, Vs} = lists:unzip(KVs),
lists:sum(Vs);
-
reduce_sum(Rs, true) ->
lists:sum(Rs).
-
reduce_count(KVs, false) ->
length(KVs);
-
reduce_count(Rs, true) ->
lists:sum(Rs).
-
reduce_stats(KVs, false) ->
{_, Vs} = lists:unzip(KVs),
{
@@ -1329,19 +1445,23 @@ reduce_stats(KVs, false) ->
length(Vs),
lists:sum([V * V || V <- Vs])
};
-
reduce_stats(Rs, true) ->
lists:foldl(
- fun({Sum, Min, Max, Count, SumSqr},
- {SumAcc, MinAcc, MaxAcc, CountAcc, SumSqrAcc}) ->
- {
- Sum + SumAcc,
- erlang:min(Min, MinAcc),
- erlang:max(Max, MaxAcc),
- Count + CountAcc,
- SumSqr + SumSqrAcc
- } end, hd(Rs), tl(Rs)).
-
+ fun(
+ {Sum, Min, Max, Count, SumSqr},
+ {SumAcc, MinAcc, MaxAcc, CountAcc, SumSqrAcc}
+ ) ->
+ {
+ Sum + SumAcc,
+ erlang:min(Min, MinAcc),
+ erlang:max(Max, MaxAcc),
+ Count + CountAcc,
+ SumSqr + SumSqrAcc
+ }
+ end,
+ hd(Rs),
+ tl(Rs)
+ ).
collation_fun_test_() ->
Tree = #tree{collate_fun = fun collate_raw/2},
@@ -1351,44 +1471,38 @@ collation_fun_test_() ->
?_test(?assertEqual(eq, collate(Tree, 3, 3)))
].
-
collate_validation_test() ->
Tree = #tree{collate_fun = fun(_A, _B) -> foo end},
?assertError(invalid_collation_result, collate(Tree, 1, 2)).
-
order_is_preserved_test() ->
Db = erlfdb_util:get_test_db([empty]),
- open(Db, <<1,2,3>>, 4),
- Tree = open(Db, <<1,2,3>>, 8),
+ open(Db, <<1, 2, 3>>, 4),
+ Tree = open(Db, <<1, 2, 3>>, 8),
?assertEqual(4, Tree#tree.max).
-
min_not_allowed_test() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4),
+ Tree = open(Db, <<1, 2, 3>>, 4),
?assertError(min_not_allowed, ebtree:insert(Db, Tree, ebtree:min(), foo)).
-
max_not_allowed_test() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4),
+ Tree = open(Db, <<1, 2, 3>>, 4),
?assertError(max_not_allowed, ebtree:insert(Db, Tree, ebtree:max(), foo)).
-
lookup_test() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4),
- Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, 16)])],
+ Tree = open(Db, <<1, 2, 3>>, 4),
+ Keys = [X || {_, X} <- lists:sort([{rand:uniform(), N} || N <- lists:seq(1, 16)])],
lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key + 1) end, Keys),
lists:foreach(fun(Key) -> ?assertEqual({Key, Key + 1}, lookup(Db, Tree, Key)) end, Keys),
?assertEqual(false, lookup(Db, Tree, 101)).
-
lookup_multi_test() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4),
- Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, 16)])],
+ Tree = open(Db, <<1, 2, 3>>, 4),
+ Keys = [X || {_, X} <- lists:sort([{rand:uniform(), N} || N <- lists:seq(1, 16)])],
lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key + 1) end, Keys),
validate_tree(Db, Tree),
?assertEqual([{1, 2}], lookup_multi(Db, Tree, [1])),
@@ -1396,53 +1510,55 @@ lookup_multi_test() ->
?assertEqual([{15, 16}, {4, 5}, {2, 3}], lookup_multi(Db, Tree, [2, 101, 15, 4, -3])),
?assertEqual([{2, 3}], lookup_multi(Db, Tree, [1.5, 2])).
-
insert_multi_test() ->
Db = erlfdb_util:get_test_db([empty]),
Tree = open(Db, <<1, 2, 3>>, 4),
- AllKVs = lists:foldl(fun(_Seq, Acc) ->
- KVs = [{rand:uniform(), rand:uniform()} || _ <- lists:seq(1, 16)],
- insert_multi(Db, Tree, KVs),
- KVs ++ Acc
- end, [], lists:seq(1, 16)),
- lists:foreach(fun({K, V}) ->
- ?assertEqual({K, V}, lookup(Db, Tree, K))
- end, AllKVs),
+ AllKVs = lists:foldl(
+ fun(_Seq, Acc) ->
+ KVs = [{rand:uniform(), rand:uniform()} || _ <- lists:seq(1, 16)],
+ insert_multi(Db, Tree, KVs),
+ KVs ++ Acc
+ end,
+ [],
+ lists:seq(1, 16)
+ ),
+ lists:foreach(
+ fun({K, V}) ->
+ ?assertEqual({K, V}, lookup(Db, Tree, K))
+ end,
+ AllKVs
+ ),
validate_tree(Db, Tree).
-
delete_test() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4),
- Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, 16)])],
+ Tree = open(Db, <<1, 2, 3>>, 4),
+ Keys = [X || {_, X} <- lists:sort([{rand:uniform(), N} || N <- lists:seq(1, 16)])],
lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key + 1) end, Keys),
lists:foreach(fun(Key) -> ?assertEqual({Key, Key + 1}, lookup(Db, Tree, Key)) end, Keys),
lists:foreach(fun(Key) -> delete(Db, Tree, Key) end, Keys),
lists:foreach(fun(Key) -> ?assertEqual(false, lookup(Db, Tree, Key)) end, Keys).
-
range_after_delete_test() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4),
- Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, 16)])],
+ Tree = open(Db, <<1, 2, 3>>, 4),
+ Keys = [X || {_, X} <- lists:sort([{rand:uniform(), N} || N <- lists:seq(1, 16)])],
lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key + 1) end, Keys),
lists:foreach(fun(Key) -> ?assertEqual({Key, Key + 1}, lookup(Db, Tree, Key)) end, Keys),
lists:foreach(fun(Key) -> delete(Db, Tree, Key) end, lists:seq(1, 16, 2)),
?assertEqual(8, range(Db, Tree, 1, 16, fun(E, A) -> length(E) + A end, 0)),
?assertEqual(8, reverse_range(Db, Tree, 1, 16, fun(E, A) -> length(E) + A end, 0)).
-
full_reduce_empty_test() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_sum/2}]),
+ Tree = open(Db, <<1, 2, 3>>, 4, [{reduce_fun, fun reduce_sum/2}]),
?assertEqual(0, full_reduce(Db, Tree)).
-
full_reduce_test_() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_sum/2}]),
+ Tree = open(Db, <<1, 2, 3>>, 4, [{reduce_fun, fun reduce_sum/2}]),
TestFun = fun(Max) ->
- Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ Keys = [X || {_, X} <- lists:sort([{rand:uniform(), N} || N <- lists:seq(1, Max)])],
lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key) end, Keys),
?assertEqual(round(Max * ((1 + Max) / 2)), full_reduce(Db, Tree))
end,
@@ -1451,23 +1567,21 @@ full_reduce_test_() ->
?_test(TestFun(8))
].
-
full_reduce_after_delete_test() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_sum/2}]),
+ Tree = open(Db, <<1, 2, 3>>, 4, [{reduce_fun, fun reduce_sum/2}]),
Max = 16,
- Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ Keys = [X || {_, X} <- lists:sort([{rand:uniform(), N} || N <- lists:seq(1, Max)])],
lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key) end, Keys),
?assertEqual(round(Max * ((1 + Max) / 2)), full_reduce(Db, Tree)),
lists:foreach(fun(Key) -> delete(Db, Tree, Key) end, Keys),
?assertEqual(0, full_reduce(Db, Tree)).
-
count_reduce_test_() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_count/2}]),
+ Tree = open(Db, <<1, 2, 3>>, 4, [{reduce_fun, fun reduce_count/2}]),
Max = 100,
- Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ Keys = [X || {_, X} <- lists:sort([{rand:uniform(), N} || N <- lists:seq(1, Max)])],
lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key) end, Keys),
Expected = fun(S, E) -> E - S + 1 end,
[
@@ -1477,19 +1591,49 @@ count_reduce_test_() ->
?_test(?assertEqual(Expected(1, 1), reduce(Db, Tree, 1, 1))),
?_test(?assertEqual(Expected(1, 100), reduce(Db, Tree, 0, 200))),
?_test(?assertEqual(Expected(5, 7), reduce(Db, Tree, 5, 7))),
- ?_test(?assertEqual(Expected(6, 7), reduce(Db, Tree, 5, 7,
- [{inclusive_start, false}]))),
- ?_test(?assertEqual(Expected(5, 6), reduce(Db, Tree, 5, 7,
- [{inclusive_end, false}]))),
- ?_test(?assertEqual(Expected(6, 6), reduce(Db, Tree, 5, 7,
- [{inclusive_start, false}, {inclusive_end, false}])))
+ ?_test(
+ ?assertEqual(
+ Expected(6, 7),
+ reduce(
+ Db,
+ Tree,
+ 5,
+ 7,
+ [{inclusive_start, false}]
+ )
+ )
+ ),
+ ?_test(
+ ?assertEqual(
+ Expected(5, 6),
+ reduce(
+ Db,
+ Tree,
+ 5,
+ 7,
+ [{inclusive_end, false}]
+ )
+ )
+ ),
+ ?_test(
+ ?assertEqual(
+ Expected(6, 6),
+ reduce(
+ Db,
+ Tree,
+ 5,
+ 7,
+ [{inclusive_start, false}, {inclusive_end, false}]
+ )
+ )
+ )
].
sum_reduce_test_() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_sum/2}]),
+ Tree = open(Db, <<1, 2, 3>>, 4, [{reduce_fun, fun reduce_sum/2}]),
Max = 100,
- Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ Keys = [X || {_, X} <- lists:sort([{rand:uniform(), N} || N <- lists:seq(1, Max)])],
lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key) end, Keys),
Expected = fun(S, E) -> lists:sum(lists:seq(S, E)) end,
[
@@ -1501,84 +1645,149 @@ sum_reduce_test_() ->
?_test(?assertEqual(Expected(5, 7), reduce(Db, Tree, 5, 7)))
].
-
stats_reduce_test_() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_stats/2}]),
+ Tree = open(Db, <<1, 2, 3>>, 4, [{reduce_fun, fun reduce_stats/2}]),
Max = 100,
- Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ Keys = [X || {_, X} <- lists:sort([{rand:uniform(), N} || N <- lists:seq(1, Max)])],
lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key) end, Keys),
[
- ?_test(?assertEqual({15,1,5,5,55}, reduce(Db, Tree, 1, 5))),
- ?_test(?assertEqual({605,50,60,11,33385}, reduce(Db, Tree, 50, 60))),
- ?_test(?assertEqual({3276,21,83,63,191184}, reduce(Db, Tree, 21, 83))),
- ?_test(?assertEqual({1,1,1,1,1}, reduce(Db, Tree, 1, 1))),
- ?_test(?assertEqual({5050,1,100,100,338350}, reduce(Db, Tree, 0, 200))),
- ?_test(?assertEqual({18,5,7,3,110}, reduce(Db, Tree, 5, 7)))
+ ?_test(?assertEqual({15, 1, 5, 5, 55}, reduce(Db, Tree, 1, 5))),
+ ?_test(?assertEqual({605, 50, 60, 11, 33385}, reduce(Db, Tree, 50, 60))),
+ ?_test(?assertEqual({3276, 21, 83, 63, 191184}, reduce(Db, Tree, 21, 83))),
+ ?_test(?assertEqual({1, 1, 1, 1, 1}, reduce(Db, Tree, 1, 1))),
+ ?_test(?assertEqual({5050, 1, 100, 100, 338350}, reduce(Db, Tree, 0, 200))),
+ ?_test(?assertEqual({18, 5, 7, 3, 110}, reduce(Db, Tree, 5, 7)))
].
-
group_reduce_level_test_() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_sum/2}]),
+ Tree = open(Db, <<1, 2, 3>>, 4, [{reduce_fun, fun reduce_sum/2}]),
Max = 100,
- Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ Keys = [X || {_, X} <- lists:sort([{rand:uniform(), N} || N <- lists:seq(1, Max)])],
GroupKeyFun = fun(Key) -> lists:sublist(Key, 2) end,
- UserAccFun = fun({K,V}, Acc) -> Acc ++ [{K, V}] end,
+ UserAccFun = fun({K, V}, Acc) -> Acc ++ [{K, V}] end,
lists:foreach(fun(Key) -> insert(Db, Tree, [Key rem 4, Key rem 3, Key], Key) end, Keys),
[
- ?_test(?assertEqual([{[1, 0], 408}, {[1, 1], 441}, {[1, 2], 376}],
- group_reduce(Db, Tree, [1], [2], GroupKeyFun, UserAccFun, []))),
-
- ?_test(?assertEqual([{[1, 0], 408}, {[1, 1], 441}, {[1, 2], 376}],
- group_reduce(Db, Tree, [1], [2], GroupKeyFun, UserAccFun, [], [{dir, fwd}]))),
-
- ?_test(?assertEqual([{[1, 2], 376}, {[1, 1], 441}, {[1, 0], 408}],
- group_reduce(Db, Tree, [1], [2], GroupKeyFun, UserAccFun, [], [{dir, rev}]))),
-
- ?_test(?assertEqual([{[0,0],432}, {[0,1],468}, {[0,2],400}, {[1,0],408}, {[1,1],441}, {[1,2],376},
- {[2,0],384}, {[2,1],416}, {[2,2],450}, {[3,0],459}, {[3,1],392}, {[3,2],424}],
- group_reduce(Db, Tree, ebtree:min(), ebtree:max(), GroupKeyFun, UserAccFun, [])))
+ ?_test(
+ ?assertEqual(
+ [{[1, 0], 408}, {[1, 1], 441}, {[1, 2], 376}],
+ group_reduce(Db, Tree, [1], [2], GroupKeyFun, UserAccFun, [])
+ )
+ ),
+
+ ?_test(
+ ?assertEqual(
+ [{[1, 0], 408}, {[1, 1], 441}, {[1, 2], 376}],
+ group_reduce(Db, Tree, [1], [2], GroupKeyFun, UserAccFun, [], [{dir, fwd}])
+ )
+ ),
+
+ ?_test(
+ ?assertEqual(
+ [{[1, 2], 376}, {[1, 1], 441}, {[1, 0], 408}],
+ group_reduce(Db, Tree, [1], [2], GroupKeyFun, UserAccFun, [], [{dir, rev}])
+ )
+ ),
+
+ ?_test(
+ ?assertEqual(
+ [
+ {[0, 0], 432},
+ {[0, 1], 468},
+ {[0, 2], 400},
+ {[1, 0], 408},
+ {[1, 1], 441},
+ {[1, 2], 376},
+ {[2, 0], 384},
+ {[2, 1], 416},
+ {[2, 2], 450},
+ {[3, 0], 459},
+ {[3, 1], 392},
+ {[3, 2], 424}
+ ],
+ group_reduce(Db, Tree, ebtree:min(), ebtree:max(), GroupKeyFun, UserAccFun, [])
+ )
+ )
].
-
group_reduce_int_test_() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4, [{reduce_fun, fun reduce_count/2}]),
+ Tree = open(Db, <<1, 2, 3>>, 4, [{reduce_fun, fun reduce_count/2}]),
Max = 100,
- Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ Keys = [X || {_, X} <- lists:sort([{rand:uniform(), N} || N <- lists:seq(1, Max)])],
GroupKeyFun = fun(_Key) -> null end,
- UserAccFun = fun({K,V}, Acc) -> Acc ++ [{K, V}] end,
+ UserAccFun = fun({K, V}, Acc) -> Acc ++ [{K, V}] end,
lists:foreach(fun(Key) -> insert(Db, Tree, Key, Key) end, Keys),
[
- ?_test(?assertEqual([{null, 100}], group_reduce(Db, Tree,
- ebtree:min(), ebtree:max(), GroupKeyFun, UserAccFun, []))),
- ?_test(?assertEqual([{null, 99}], group_reduce(Db, Tree, 2, ebtree:max(), GroupKeyFun, UserAccFun, []))),
- ?_test(?assertEqual([{null, 96}], group_reduce(Db, Tree, 3, 98, GroupKeyFun, UserAccFun, []))),
- ?_test(?assertEqual([{null, 95}], group_reduce(Db, Tree, 3, 98, GroupKeyFun, UserAccFun, [], [{inclusive_start, false}]))),
- ?_test(?assertEqual([{null, 95}], group_reduce(Db, Tree, 3, 98, GroupKeyFun, UserAccFun, [], [{inclusive_end, false}]))),
- ?_test(?assertEqual([{null, 94}], group_reduce(Db, Tree, 3, 98, GroupKeyFun, UserAccFun, [],
- [{inclusive_start, false}, {inclusive_end, false}])))
+ ?_test(
+ ?assertEqual(
+ [{null, 100}],
+ group_reduce(
+ Db,
+ Tree,
+ ebtree:min(),
+ ebtree:max(),
+ GroupKeyFun,
+ UserAccFun,
+ []
+ )
+ )
+ ),
+ ?_test(
+ ?assertEqual(
+ [{null, 99}], group_reduce(Db, Tree, 2, ebtree:max(), GroupKeyFun, UserAccFun, [])
+ )
+ ),
+ ?_test(
+ ?assertEqual([{null, 96}], group_reduce(Db, Tree, 3, 98, GroupKeyFun, UserAccFun, []))
+ ),
+ ?_test(
+ ?assertEqual(
+ [{null, 95}],
+ group_reduce(Db, Tree, 3, 98, GroupKeyFun, UserAccFun, [], [
+ {inclusive_start, false}
+ ])
+ )
+ ),
+ ?_test(
+ ?assertEqual(
+ [{null, 95}],
+ group_reduce(Db, Tree, 3, 98, GroupKeyFun, UserAccFun, [], [{inclusive_end, false}])
+ )
+ ),
+ ?_test(
+ ?assertEqual(
+ [{null, 94}],
+ group_reduce(
+ Db,
+ Tree,
+ 3,
+ 98,
+ GroupKeyFun,
+ UserAccFun,
+ [],
+ [{inclusive_start, false}, {inclusive_end, false}]
+ )
+ )
+ )
].
-
raw_collation_test() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4),
+ Tree = open(Db, <<1, 2, 3>>, 4),
insert(Db, Tree, null, null),
insert(Db, Tree, 1, 1),
?assertEqual([{1, 1}, {null, null}], range(Db, Tree, 1, null, fun(E, A) -> A ++ E end, [])).
-
custom_collation_test() ->
Db = erlfdb_util:get_test_db([empty]),
CollateFun = fun(A, B) -> collate_raw(B, A) end,
- Tree = open(Db, <<1,2,3>>, 4, [{collate_fun, CollateFun}]),
+ Tree = open(Db, <<1, 2, 3>>, 4, [{collate_fun, CollateFun}]),
insert(Db, Tree, 1, 1),
insert(Db, Tree, 2, 2),
?assertEqual([{2, 2}, {1, 1}], range(Db, Tree, 3, 0, fun(E, A) -> A ++ E end, [])).
-
empty_range_test() ->
Db = erlfdb_util:get_test_db([empty]),
Tree = open(Db, <<1, 2, 3>>, 10),
@@ -1587,23 +1796,26 @@ empty_range_test() ->
range(Db, Tree, min(), max(), fun(_, A) -> A end, blah)
).
-
range_test_() ->
{timeout, 1000, fun() ->
Db = erlfdb_util:get_test_db([empty]),
Max = 100,
- Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
- Tree = lists:foldl(fun(Key, T) -> insert(Db, T, Key, Key + 1) end, open(Db, <<1,2,3>>, 10), Keys),
+ Keys = [X || {_, X} <- lists:sort([{rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ Tree = lists:foldl(
+ fun(Key, T) -> insert(Db, T, Key, Key + 1) end, open(Db, <<1, 2, 3>>, 10), Keys
+ ),
lists:foreach(
fun(_) ->
[StartKey, EndKey] = lists:sort([rand:uniform(Max), rand:uniform(Max)]),
- ?assertEqual([{K, K + 1} || K <- lists:seq(StartKey, EndKey)],
+ ?assertEqual(
+ [{K, K + 1} || K <- lists:seq(StartKey, EndKey)],
range(Db, Tree, StartKey, EndKey, fun(E, A) -> A ++ E end, [])
- ) end,
- lists:seq(1, 100))
+ )
+ end,
+ lists:seq(1, 100)
+ )
end}.
-
empty_reverse_range_test() ->
Db = erlfdb_util:get_test_db([empty]),
Tree = open(Db, <<1, 2, 3>>, 10),
@@ -1612,114 +1824,165 @@ empty_reverse_range_test() ->
reverse_range(Db, Tree, min(), max(), fun(_, A) -> A end, blah)
).
-
reverse_range_test_() ->
{timeout, 1000, fun() ->
Db = erlfdb_util:get_test_db([empty]),
Max = 100,
- Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
- Tree = lists:foldl(fun(Key, T) -> insert(Db, T, Key, Key + 1) end, open(Db, <<1,2,3>>, 8), Keys),
+ Keys = [X || {_, X} <- lists:sort([{rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ Tree = lists:foldl(
+ fun(Key, T) -> insert(Db, T, Key, Key + 1) end, open(Db, <<1, 2, 3>>, 8), Keys
+ ),
lists:foreach(
fun(_) ->
[StartKey, EndKey] = lists:sort([rand:uniform(Max), rand:uniform(Max)]),
- ?assertEqual([{K, K + 1} || K <- lists:seq(EndKey, StartKey, -1)],
+ ?assertEqual(
+ [{K, K + 1} || K <- lists:seq(EndKey, StartKey, -1)],
reverse_range(Db, Tree, StartKey, EndKey, fun(E, A) -> A ++ E end, [])
- ) end,
- lists:seq(1, 100))
+ )
+ end,
+ lists:seq(1, 100)
+ )
end}.
-
custom_collation_range_test_() ->
{timeout, 1000, fun() ->
Db = erlfdb_util:get_test_db([empty]),
Max = 100,
- Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ Keys = [X || {_, X} <- lists:sort([{rand:uniform(), N} || N <- lists:seq(1, Max)])],
CollateFun = fun(A, B) -> collate_raw(B, A) end,
- Tree = open(Db, <<1,2,3>>, 6, [{collate_fun, CollateFun}]),
+ Tree = open(Db, <<1, 2, 3>>, 6, [{collate_fun, CollateFun}]),
lists:foldl(fun(Key, T) -> insert(Db, T, Key, Key + 1) end, Tree, Keys),
lists:foreach(
fun(_) ->
[StartKey, EndKey] = sort_keys(Tree, [rand:uniform(Max), rand:uniform(Max)]),
- Seq = if
- StartKey < EndKey ->
- lists:seq(StartKey, EndKey);
- true ->
- lists:seq(StartKey, EndKey, -1)
- end,
- ?assertEqual([{K, K + 1} || K <- Seq],
+ Seq =
+ if
+ StartKey < EndKey ->
+ lists:seq(StartKey, EndKey);
+ true ->
+ lists:seq(StartKey, EndKey, -1)
+ end,
+ ?assertEqual(
+ [{K, K + 1} || K <- Seq],
range(Db, Tree, StartKey, EndKey, fun(E, A) -> A ++ E end, [])
- ) end,
- lists:seq(1, 100))
+ )
+ end,
+ lists:seq(1, 100)
+ )
end}.
-
custom_collation_reverse_range_test_() ->
{timeout, 1000, fun() ->
Db = erlfdb_util:get_test_db([empty]),
Max = 100,
- Keys = [X || {_, X} <- lists:sort([ {rand:uniform(), N} || N <- lists:seq(1, Max)])],
+ Keys = [X || {_, X} <- lists:sort([{rand:uniform(), N} || N <- lists:seq(1, Max)])],
CollateFun = fun(A, B) -> collate_raw(B, A) end,
- Tree = open(Db, <<1,2,3>>, 6, [{collate_fun, CollateFun}]),
+ Tree = open(Db, <<1, 2, 3>>, 6, [{collate_fun, CollateFun}]),
lists:foldl(fun(Key, T) -> insert(Db, T, Key, Key + 1) end, Tree, Keys),
lists:foreach(
fun(_) ->
[StartKey, EndKey] = sort_keys(Tree, [rand:uniform(Max), rand:uniform(Max)]),
- Seq = if
- StartKey < EndKey ->
- lists:seq(StartKey, EndKey);
- true ->
- lists:seq(StartKey, EndKey, -1)
- end,
- ?assertEqual([{K, K + 1} || K <- lists:reverse(Seq)],
+ Seq =
+ if
+ StartKey < EndKey ->
+ lists:seq(StartKey, EndKey);
+ true ->
+ lists:seq(StartKey, EndKey, -1)
+ end,
+ ?assertEqual(
+ [{K, K + 1} || K <- lists:reverse(Seq)],
reverse_range(Db, Tree, StartKey, EndKey, fun(E, A) -> A ++ E end, [])
- ) end,
- lists:seq(1, 100))
+ )
+ end,
+ lists:seq(1, 100)
+ )
end}.
-
validate_tree_test() ->
Db = erlfdb_util:get_test_db([empty]),
- Tree = open(Db, <<1,2,3>>, 4),
+ Tree = open(Db, <<1, 2, 3>>, 4),
[ebtree:insert(Db, Tree, I, I) || I <- lists:seq(1, 16)],
validate_tree(Db, Tree).
-
validate_node_test_() ->
[
- ?_test(?assertError({node_without_id, _}, validate_node(
- #tree{}, #node{id = undefined}))),
- ?_test(?assertError({too_few_keys, _}, validate_node(
- #tree{collate_fun = fun collate_raw/2, min = 2},
- #node{id = 1, members = [{1, 1}]}))),
- ?_test(?assertError({too_many_keys, _}, validate_node(
- #tree{collate_fun = fun collate_raw/2, min = 2, max = 2},
- #node{id = 1, members = [{1, 1}, {2, 2}, {3, 3}]}))),
- ?_test(?assertError({non_leaf_with_prev, _}, validate_node(
- #tree{min = 0}, #node{id = 1, level = 1, prev = 1}))),
- ?_test(?assertError({non_leaf_with_next, _}, validate_node(
- #tree{min = 0}, #node{id = 1, level = 1, next = 1}))),
- ?_test(?assertError({out_of_order, _}, validate_node(
- #tree{min = 0, collate_fun = fun collate_raw/2},
- #node{id = 1, members = [{2, 2}, {1, 1}]}))),
- ?_test(?assertError({duplicates, _}, validate_node(
- #tree{min = 0, collate_fun = fun collate_raw/2},
- #node{id = 1, members = [{1, 1}, {1, 1}]})))
+ ?_test(
+ ?assertError(
+ {node_without_id, _},
+ validate_node(
+ #tree{}, #node{id = undefined}
+ )
+ )
+ ),
+ ?_test(
+ ?assertError(
+ {too_few_keys, _},
+ validate_node(
+ #tree{collate_fun = fun collate_raw/2, min = 2},
+ #node{id = 1, members = [{1, 1}]}
+ )
+ )
+ ),
+ ?_test(
+ ?assertError(
+ {too_many_keys, _},
+ validate_node(
+ #tree{collate_fun = fun collate_raw/2, min = 2, max = 2},
+ #node{id = 1, members = [{1, 1}, {2, 2}, {3, 3}]}
+ )
+ )
+ ),
+ ?_test(
+ ?assertError(
+ {non_leaf_with_prev, _},
+ validate_node(
+ #tree{min = 0}, #node{id = 1, level = 1, prev = 1}
+ )
+ )
+ ),
+ ?_test(
+ ?assertError(
+ {non_leaf_with_next, _},
+ validate_node(
+ #tree{min = 0}, #node{id = 1, level = 1, next = 1}
+ )
+ )
+ ),
+ ?_test(
+ ?assertError(
+ {out_of_order, _},
+ validate_node(
+ #tree{min = 0, collate_fun = fun collate_raw/2},
+ #node{id = 1, members = [{2, 2}, {1, 1}]}
+ )
+ )
+ ),
+ ?_test(
+ ?assertError(
+ {duplicates, _},
+ validate_node(
+ #tree{min = 0, collate_fun = fun collate_raw/2},
+ #node{id = 1, members = [{1, 1}, {1, 1}]}
+ )
+ )
+ )
].
-
umerge_members_test() ->
Tree = #tree{collate_fun = fun collate_raw/2},
NewList = fun() ->
Raw = [{rand:uniform(100), rand:uniform()} || _ <- lists:seq(1, 100)],
lists:ukeysort(1, Raw)
end,
- lists:foreach(fun(_) ->
- A = NewList(),
- B = NewList(),
- Stdlib = lists:ukeymerge(1, A, B),
- Custom = umerge_members(Tree, 0, A, B),
- ?assertEqual(Stdlib, Custom)
- end, lists:seq(1, 100)).
-
+ lists:foreach(
+ fun(_) ->
+ A = NewList(),
+ B = NewList(),
+ Stdlib = lists:ukeymerge(1, A, B),
+ Custom = umerge_members(Tree, 0, A, B),
+ ?assertEqual(Stdlib, Custom)
+ end,
+ lists:seq(1, 100)
+ ).
-endif.
diff --git a/src/fabric/src/fabric2_active_tasks.erl b/src/fabric/src/fabric2_active_tasks.erl
index e706ebaa4..80f19832f 100644
--- a/src/fabric/src/fabric2_active_tasks.erl
+++ b/src/fabric/src/fabric2_active_tasks.erl
@@ -10,10 +10,8 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(fabric2_active_tasks).
-
-export([
get_active_tasks/0,
get_active_task_info/1,
@@ -21,32 +19,35 @@
update_active_task_info/2
]).
-
-define(ACTIVE_TASK_INFO, <<"active_task_info">>).
-
get_active_tasks() ->
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(undefined), fun(JTx) ->
Types = couch_jobs:get_types(JTx),
- lists:foldl(fun(Type, TaskAcc) ->
- JobIds = couch_jobs:get_active_jobs_ids(JTx, Type),
- Tasks = lists:filtermap(fun(JobId) ->
- {ok, Data} = couch_jobs:get_job_data(JTx, Type, JobId),
- case maps:get(?ACTIVE_TASK_INFO, Data, not_found) of
- not_found -> false;
- #{} = Map when map_size(Map) == 0 -> false;
- #{} = Info -> {true, Info}
- end
- end, JobIds),
- TaskAcc ++ Tasks
- end, [], Types)
+ lists:foldl(
+ fun(Type, TaskAcc) ->
+ JobIds = couch_jobs:get_active_jobs_ids(JTx, Type),
+ Tasks = lists:filtermap(
+ fun(JobId) ->
+ {ok, Data} = couch_jobs:get_job_data(JTx, Type, JobId),
+ case maps:get(?ACTIVE_TASK_INFO, Data, not_found) of
+ not_found -> false;
+ #{} = Map when map_size(Map) == 0 -> false;
+ #{} = Info -> {true, Info}
+ end
+ end,
+ JobIds
+ ),
+ TaskAcc ++ Tasks
+ end,
+ [],
+ Types
+ )
end).
-
get_active_task_info(JobData) ->
- #{?ACTIVE_TASK_INFO:= ActiveTaskInfo} = JobData,
+ #{?ACTIVE_TASK_INFO := ActiveTaskInfo} = JobData,
ActiveTaskInfo.
-
update_active_task_info(JobData, ActiveTaskInfo) ->
JobData#{?ACTIVE_TASK_INFO => ActiveTaskInfo}.
diff --git a/src/fabric/src/fabric2_app.erl b/src/fabric/src/fabric2_app.erl
index da95acb53..9d04bfd4d 100644
--- a/src/fabric/src/fabric2_app.erl
+++ b/src/fabric/src/fabric2_app.erl
@@ -13,17 +13,14 @@
-module(fabric2_app).
-behaviour(application).
-
-export([
start/2,
stop/1
]).
-
start(_Type, StartArgs) ->
fabric2_sup:start_link(StartArgs).
-
stop(_State) ->
case application:get_env(erlfdb, test_cluster_pid) of
{ok, Pid} -> Pid ! close;
diff --git a/src/fabric/src/fabric2_db.erl b/src/fabric/src/fabric2_db.erl
index d4f15c5b0..f7fb259e4 100644
--- a/src/fabric/src/fabric2_db.erl
+++ b/src/fabric/src/fabric2_db.erl
@@ -12,7 +12,6 @@
-module(fabric2_db).
-
-export([
create/2,
open/2,
@@ -136,19 +135,19 @@
apply_open_doc_opts/3
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("fabric2.hrl").
-include_lib("kernel/include/logger.hrl").
-
% Default max database name length is based on CouchDb < 4.x compatibility. See
% default.ini entry for additional information.
-define(DEFAULT_MAX_DATABASE_NAME_LENGTH, 238).
-define(DBNAME_REGEX,
- "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" % use the stock CouchDB regex
- "(\\.[0-9]{10,})?$" % but allow an optional shard timestamp at the end
+ % use the stock CouchDB regex
+ "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*"
+ % but allow an optional shard timestamp at the end
+ "(\\.[0-9]{10,})?$"
).
-define(FIRST_DDOC_KEY, <<"_design/">>).
@@ -158,7 +157,6 @@
-define(DEFAULT_UPDATE_DOCS_BATCH_SIZE, 2500000).
-
-record(bacc, {
db,
docs,
@@ -169,7 +167,6 @@
results
}).
-
create(DbName, Options) ->
case validate_dbname(DbName) of
ok ->
@@ -196,7 +193,6 @@ create(DbName, Options) ->
Error
end.
-
open(DbName, Options) ->
UUID = fabric2_util:get_value(uuid, Options),
case fabric2_server:fetch(DbName, UUID) of
@@ -220,7 +216,6 @@ open(DbName, Options) ->
end
end.
-
delete(DbName, Options) ->
% Delete doesn't check user_ctx, that's done at the HTTP API level
% here we just care to get the `database_does_not_exist` error thrown
@@ -235,35 +230,39 @@ delete(DbName, Options) ->
Resp = fabric2_fdb:transactional(Db, fun(TxDb) ->
fabric2_fdb:delete(TxDb)
end),
- if Resp /= ok -> Resp; true ->
- fabric2_db_plugin:after_db_delete(DbName, get_uuid(Db)),
- fabric2_server:remove(DbName)
+ if
+ Resp /= ok ->
+ Resp;
+ true ->
+ fabric2_db_plugin:after_db_delete(DbName, get_uuid(Db)),
+ fabric2_server:remove(DbName)
end
end.
-
undelete(DbName, TgtDbName, TimeStamp, Options) ->
case validate_dbname(TgtDbName) of
ok ->
- Resp = fabric2_fdb:transactional(DbName,
+ Resp = fabric2_fdb:transactional(
+ DbName,
fun(TxDb) ->
fabric2_fdb:undelete(TxDb, TgtDbName, TimeStamp)
end
),
- if Resp /= ok -> ok; true ->
- {ok, Db} = open(TgtDbName, Options),
- fabric2_db_plugin:after_db_create(TgtDbName, get_uuid(Db))
+ if
+ Resp /= ok ->
+ ok;
+ true ->
+ {ok, Db} = open(TgtDbName, Options),
+ fabric2_db_plugin:after_db_create(TgtDbName, get_uuid(Db))
end,
Resp;
Error ->
Error
end.
-
list_dbs() ->
list_dbs([]).
-
list_dbs(Options) ->
Callback = fun(DbName, Acc) -> [DbName | Acc] end,
DbNames = fabric2_fdb:transactional(fun(Tx) ->
@@ -271,39 +270,35 @@ list_dbs(Options) ->
end),
lists:reverse(DbNames).
-
list_dbs(UserFun, UserAcc0, Options) ->
- FoldFun = fun
- (DbName, Acc) -> maybe_stop(UserFun({row, [{id, DbName}]}, Acc))
- end,
+ FoldFun = fun(DbName, Acc) -> maybe_stop(UserFun({row, [{id, DbName}]}, Acc)) end,
try
UserAcc1 = maybe_stop(UserFun({meta, []}, UserAcc0)),
UserAcc2 = fabric2_fdb:transactional(fun(Tx) ->
fabric2_fdb:list_dbs(Tx, FoldFun, UserAcc1, Options)
end),
{ok, maybe_stop(UserFun(complete, UserAcc2))}
- catch throw:{stop, FinalUserAcc} ->
- {ok, FinalUserAcc}
+ catch
+ throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
end.
-
list_dbs_info() ->
list_dbs_info([]).
-
list_dbs_info(Options) ->
Callback = fun(Value, Acc) ->
- NewAcc = case Value of
- {meta, _} -> Acc;
- {row, DbInfo} -> [DbInfo | Acc];
- complete -> Acc
- end,
+ NewAcc =
+ case Value of
+ {meta, _} -> Acc;
+ {row, DbInfo} -> [DbInfo | Acc];
+ complete -> Acc
+ end,
{ok, NewAcc}
end,
{ok, DbInfos} = list_dbs_info(Callback, [], Options),
{ok, lists:reverse(DbInfos)}.
-
list_dbs_info(UserFun, UserAcc0, Options) ->
FoldFun = fun(DbName, InfoFuture, {FutureQ, Count, Acc}) ->
NewFutureQ = queue:in({DbName, InfoFuture}, FutureQ),
@@ -314,73 +309,76 @@ list_dbs_info(UserFun, UserAcc0, Options) ->
InitAcc = {queue:new(), 0, UserAcc1},
UserAcc3 = fabric2_fdb:transactional(fun(Tx) ->
{FinalFutureQ, _, UserAcc2} = fabric2_fdb:list_dbs_info(
- Tx,
- FoldFun,
- InitAcc,
- Options
- ),
+ Tx,
+ FoldFun,
+ InitAcc,
+ Options
+ ),
drain_all_info_futures(FinalFutureQ, UserFun, UserAcc2)
end),
{ok, maybe_stop(UserFun(complete, UserAcc3))}
- catch throw:{stop, FinalUserAcc} ->
- {ok, FinalUserAcc}
+ catch
+ throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
end.
-
list_deleted_dbs_info() ->
list_deleted_dbs_info([]).
-
list_deleted_dbs_info(Options) ->
Callback = fun(Value, Acc) ->
- NewAcc = case Value of
- {meta, _} -> Acc;
- {row, DbInfo} -> [DbInfo | Acc];
- complete -> Acc
- end,
+ NewAcc =
+ case Value of
+ {meta, _} -> Acc;
+ {row, DbInfo} -> [DbInfo | Acc];
+ complete -> Acc
+ end,
{ok, NewAcc}
end,
{ok, DbInfos} = list_deleted_dbs_info(Callback, [], Options),
{ok, lists:reverse(DbInfos)}.
-
list_deleted_dbs_info(UserFun, UserAcc0, Options0) ->
Dir = fabric2_util:get_value(dir, Options0, fwd),
StartKey0 = fabric2_util:get_value(start_key, Options0),
EndKey0 = fabric2_util:get_value(end_key, Options0),
- {FirstBinary, LastBinary} = case Dir of
- fwd -> {<<>>, <<255>>};
- rev -> {<<255>>, <<>>}
- end,
+ {FirstBinary, LastBinary} =
+ case Dir of
+ fwd -> {<<>>, <<255>>};
+ rev -> {<<255>>, <<>>}
+ end,
- StartKey1 = case StartKey0 of
- undefined ->
- {FirstBinary};
- DbName0 when is_binary(DbName0) ->
- {DbName0, FirstBinary};
- [DbName0, TimeStamp0] when is_binary(DbName0), is_binary(TimeStamp0) ->
- {DbName0, TimeStamp0};
- BadStartKey ->
- erlang:error({invalid_start_key, BadStartKey})
- end,
- EndKey1 = case EndKey0 of
- undefined ->
- {LastBinary};
- DbName1 when is_binary(DbName1) ->
- {DbName1, LastBinary};
- [DbName1, TimeStamp1] when is_binary(DbName1), is_binary(TimeStamp1) ->
- {DbName1, TimeStamp1};
- BadEndKey ->
- erlang:error({invalid_end_key, BadEndKey})
- end,
+ StartKey1 =
+ case StartKey0 of
+ undefined ->
+ {FirstBinary};
+ DbName0 when is_binary(DbName0) ->
+ {DbName0, FirstBinary};
+ [DbName0, TimeStamp0] when is_binary(DbName0), is_binary(TimeStamp0) ->
+ {DbName0, TimeStamp0};
+ BadStartKey ->
+ erlang:error({invalid_start_key, BadStartKey})
+ end,
+ EndKey1 =
+ case EndKey0 of
+ undefined ->
+ {LastBinary};
+ DbName1 when is_binary(DbName1) ->
+ {DbName1, LastBinary};
+ [DbName1, TimeStamp1] when is_binary(DbName1), is_binary(TimeStamp1) ->
+ {DbName1, TimeStamp1};
+ BadEndKey ->
+ erlang:error({invalid_end_key, BadEndKey})
+ end,
Options1 = Options0 -- [{start_key, StartKey0}, {end_key, EndKey0}],
- Options2 = [
- {start_key, StartKey1},
- {end_key, EndKey1},
- {wrap_keys, false}
- ] ++ Options1,
+ Options2 =
+ [
+ {start_key, StartKey1},
+ {end_key, EndKey1},
+ {wrap_keys, false}
+ ] ++ Options1,
FoldFun = fun(DbName, TimeStamp, InfoFuture, {FutureQ, Count, Acc}) ->
NewFutureQ = queue:in({DbName, TimeStamp, InfoFuture}, FutureQ),
@@ -391,19 +389,19 @@ list_deleted_dbs_info(UserFun, UserAcc0, Options0) ->
InitAcc = {queue:new(), 0, UserAcc1},
UserAcc3 = fabric2_fdb:transactional(fun(Tx) ->
{FinalFutureQ, _, UserAcc2} = fabric2_fdb:list_deleted_dbs_info(
- Tx,
- FoldFun,
- InitAcc,
- Options2
- ),
+ Tx,
+ FoldFun,
+ InitAcc,
+ Options2
+ ),
drain_all_deleted_info_futures(FinalFutureQ, UserFun, UserAcc2)
end),
{ok, maybe_stop(UserFun(complete, UserAcc3))}
- catch throw:{stop, FinalUserAcc} ->
- {ok, FinalUserAcc}
+ catch
+ throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
end.
-
is_admin(Db, {SecProps}) when is_list(SecProps) ->
case fabric2_db_plugin:check_is_admin(Db) of
true ->
@@ -414,11 +412,9 @@ is_admin(Db, {SecProps}) when is_list(SecProps) ->
is_authorized(Admins, UserCtx)
end.
-
check_is_admin(Db) ->
check_is_admin(Db, get_security(Db)).
-
check_is_admin(Db, SecDoc) ->
case is_admin(Db, SecDoc) of
true ->
@@ -429,11 +425,9 @@ check_is_admin(Db, SecDoc) ->
throw_security_error(UserCtx, Reason)
end.
-
check_is_member(Db) ->
check_is_member(Db, get_security(Db)).
-
check_is_member(Db, SecDoc) ->
case is_member(Db, SecDoc) of
true ->
@@ -443,165 +437,135 @@ check_is_member(Db, SecDoc) ->
throw_security_error(UserCtx)
end.
-
require_admin_check(#{} = Db) ->
Db#{security_fun := fun check_is_admin/2}.
-
require_member_check(#{} = Db) ->
Db#{security_fun := fun check_is_member/2}.
-
name(#{name := DbName}) ->
DbName.
-
get_after_doc_read_fun(#{after_doc_read := AfterDocRead}) ->
AfterDocRead.
-
get_before_doc_update_fun(#{before_doc_update := BeforeDocUpdate}) ->
BeforeDocUpdate.
get_committed_update_seq(#{} = Db) ->
get_update_seq(Db).
-
get_compacted_seq(#{} = Db) ->
get_update_seq(Db).
-
get_compactor_pid(#{} = _Db) ->
nil.
-
get_db_info(#{} = Db) ->
DbProps = fabric2_fdb:transactional(Db, fun(TxDb) ->
fabric2_fdb:get_info(TxDb)
end),
{ok, make_db_info(name(Db), DbProps)}.
-
get_del_doc_count(#{} = Db) ->
get_doc_count(Db, <<"doc_del_count">>).
-
get_doc_count(Db) ->
get_doc_count(Db, <<"doc_count">>).
-
get_doc_count(Db, undefined) ->
get_doc_count(Db, <<"doc_count">>);
-
get_doc_count(Db, <<"_all_docs">>) ->
get_doc_count(Db, <<"doc_count">>);
-
get_doc_count(DbName, <<"_design">>) ->
get_doc_count(DbName, <<"doc_design_count">>);
-
get_doc_count(DbName, <<"_local">>) ->
get_doc_count(DbName, <<"doc_local_count">>);
-
get_doc_count(Db, Key) ->
fabric2_fdb:transactional(Db, fun(TxDb) ->
fabric2_fdb:get_stat(TxDb, Key)
end).
-
get_instance_start_time(#{}) ->
0.
-
get_pid(#{}) ->
nil.
-
get_revs_limit(#{} = Db) ->
get_revs_limit(Db, []).
-
get_revs_limit(#{} = Db, Opts) ->
CurrentDb = get_cached_db(Db, Opts),
maps:get(revs_limit, CurrentDb).
-
get_security(#{} = Db) ->
get_security(Db, []).
-
get_security(#{} = Db, Opts) ->
CurrentDb = get_cached_db(Db, Opts),
maps:get(security_doc, CurrentDb).
-
get_update_seq(#{} = Db) ->
fabric2_fdb:transactional(Db, fun(TxDb) ->
fabric2_fdb:get_last_change(TxDb)
end).
-
get_user_ctx(#{user_ctx := UserCtx}) ->
UserCtx.
-
get_uuid(#{uuid := UUID}) ->
UUID.
-
is_clustered(#{}) ->
false.
-
is_db(#{name := _}) ->
true;
is_db(_) ->
false.
-
is_partitioned(#{}) ->
false.
-
is_system_db(#{name := DbName}) ->
is_system_db_name(DbName).
-
is_system_db_name(DbName) when is_list(DbName) ->
is_system_db_name(?l2b(DbName));
is_system_db_name(DbName) when is_binary(DbName) ->
Suffix = filename:basename(DbName),
case {filename:dirname(DbName), lists:member(Suffix, ?SYSTEM_DATABASES)} of
- {<<".">>, Result} -> Result;
- {_Prefix, false} -> false;
+ {<<".">>, Result} ->
+ Result;
+ {_Prefix, false} ->
+ false;
{Prefix, true} ->
- ReOpts = [{capture,none}, dollar_endonly],
+ ReOpts = [{capture, none}, dollar_endonly],
re:run(Prefix, ?DBNAME_REGEX, ReOpts) == match
end.
-
is_replicator_db(#{name := DbName}) ->
is_replicator_db(DbName);
-
is_replicator_db(DbName) when is_binary(DbName) ->
fabric2_util:dbname_ends_with(DbName, <<"_replicator">>).
-
is_users_db(#{name := DbName}) ->
is_users_db(DbName);
-
is_users_db(DbName) when is_binary(DbName) ->
AuthenticationDb = config:get("chttpd_auth", "authentication_db"),
CfgUsersSuffix = config:get("couchdb", "users_db_suffix", "_users"),
- IsAuthCache = if AuthenticationDb == undefined -> false; true ->
- DbName == ?l2b(AuthenticationDb)
- end,
+ IsAuthCache =
+ if
+ AuthenticationDb == undefined -> false;
+ true -> DbName == ?l2b(AuthenticationDb)
+ end,
IsCfgUsersDb = fabric2_util:dbname_ends_with(DbName, ?l2b(CfgUsersSuffix)),
IsGlobalUsersDb = fabric2_util:dbname_ends_with(DbName, <<"_users">>),
IsAuthCache orelse IsCfgUsersDb orelse IsGlobalUsersDb.
-
set_revs_limit(#{} = Db0, RevsLimit) when is_integer(RevsLimit) ->
Db1 = require_admin_check(Db0),
Resp = fabric2_fdb:transactional(Db1, fun(TxDb) ->
@@ -612,7 +576,6 @@ set_revs_limit(#{} = Db0, RevsLimit) when is_integer(RevsLimit) ->
Err -> Err
end.
-
set_security(#{} = Db0, Security) ->
Db1 = require_admin_check(Db0),
ok = fabric2_util:validate_security_object(Security),
@@ -624,23 +587,18 @@ set_security(#{} = Db0, Security) ->
Err -> Err
end.
-
set_user_ctx(#{} = Db, UserCtx) ->
Db#{user_ctx := UserCtx}.
-
ensure_full_commit(#{}) ->
{ok, 0}.
-
ensure_full_commit(#{}, _Timeout) ->
{ok, 0}.
-
open_doc(#{} = Db, DocId) ->
open_doc(Db, DocId, []).
-
open_doc(#{} = Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId, _Options) ->
fabric2_fdb:transactional(Db, fun(TxDb) ->
case fabric2_fdb:get_local_doc(TxDb, DocId) of
@@ -648,138 +606,170 @@ open_doc(#{} = Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId, _Options) ->
Else -> Else
end
end);
-
open_doc(#{} = Db, DocId, Options) ->
NeedsTreeOpts = [revs_info, conflicts, deleted_conflicts],
NeedsTree = (Options -- NeedsTreeOpts /= Options),
OpenDeleted = lists:member(deleted, Options),
fabric2_fdb:transactional(Db, fun(TxDb) ->
- Revs = case NeedsTree of
- true -> fabric2_fdb:get_all_revs(TxDb, DocId);
- false -> fabric2_fdb:get_winning_revs(TxDb, DocId, 1)
- end,
- if Revs == [] -> {not_found, missing}; true ->
- #{winner := true} = RI = lists:last(Revs),
- case fabric2_fdb:get_doc_body(TxDb, DocId, RI) of
- #doc{deleted = true} when not OpenDeleted ->
- {not_found, deleted};
- #doc{} = Doc ->
- apply_open_doc_opts(Doc, Revs, Options);
- Else ->
- Else
- end
+ Revs =
+ case NeedsTree of
+ true -> fabric2_fdb:get_all_revs(TxDb, DocId);
+ false -> fabric2_fdb:get_winning_revs(TxDb, DocId, 1)
+ end,
+ if
+ Revs == [] ->
+ {not_found, missing};
+ true ->
+ #{winner := true} = RI = lists:last(Revs),
+ case fabric2_fdb:get_doc_body(TxDb, DocId, RI) of
+ #doc{deleted = true} when not OpenDeleted ->
+ {not_found, deleted};
+ #doc{} = Doc ->
+ apply_open_doc_opts(Doc, Revs, Options);
+ Else ->
+ Else
+ end
end
end).
-
open_doc_revs(Db, DocId, Revs, Options) ->
Latest = lists:member(latest, Options),
fabric2_fdb:transactional(Db, fun(TxDb) ->
AllRevInfos = fabric2_fdb:get_all_revs(TxDb, DocId),
- RevTree = lists:foldl(fun(RI, TreeAcc) ->
- RIPath = fabric2_util:revinfo_to_path(RI),
- {Merged, _} = couch_key_tree:merge(TreeAcc, RIPath),
- Merged
- end, [], AllRevInfos),
- {Found, Missing} = case Revs of
- all ->
- {couch_key_tree:get_all_leafs(RevTree), []};
- _ when Latest ->
- couch_key_tree:get_key_leafs(RevTree, Revs);
- _ ->
- couch_key_tree:get(RevTree, Revs)
- end,
- Docs = lists:map(fun({Value, {Pos, [Rev | RevPath]}}) ->
- case Value of
- ?REV_MISSING ->
- % We have the rev in our list but know nothing about it
- {{not_found, missing}, {Pos, Rev}};
+ RevTree = lists:foldl(
+ fun(RI, TreeAcc) ->
+ RIPath = fabric2_util:revinfo_to_path(RI),
+ {Merged, _} = couch_key_tree:merge(TreeAcc, RIPath),
+ Merged
+ end,
+ [],
+ AllRevInfos
+ ),
+ {Found, Missing} =
+ case Revs of
+ all ->
+ {couch_key_tree:get_all_leafs(RevTree), []};
+ _ when Latest ->
+ couch_key_tree:get_key_leafs(RevTree, Revs);
_ ->
- RevInfo = #{
- rev_id => {Pos, Rev},
- rev_path => RevPath
- },
- case fabric2_fdb:get_doc_body(TxDb, DocId, RevInfo) of
- #doc{} = Doc ->
- apply_open_doc_opts(Doc, AllRevInfos, Options);
- Else ->
- {Else, {Pos, Rev}}
- end
- end
- end, Found),
+ couch_key_tree:get(RevTree, Revs)
+ end,
+ Docs = lists:map(
+ fun({Value, {Pos, [Rev | RevPath]}}) ->
+ case Value of
+ ?REV_MISSING ->
+ % We have the rev in our list but know nothing about it
+ {{not_found, missing}, {Pos, Rev}};
+ _ ->
+ RevInfo = #{
+ rev_id => {Pos, Rev},
+ rev_path => RevPath
+ },
+ case fabric2_fdb:get_doc_body(TxDb, DocId, RevInfo) of
+ #doc{} = Doc ->
+ apply_open_doc_opts(Doc, AllRevInfos, Options);
+ Else ->
+ {Else, {Pos, Rev}}
+ end
+ end
+ end,
+ Found
+ ),
MissingDocs = [{{not_found, missing}, MRev} || MRev <- Missing],
{ok, Docs ++ MissingDocs}
end).
-
get_doc_info(Db, DocId) ->
case get_full_doc_info(Db, DocId) of
not_found -> not_found;
FDI -> couch_doc:to_doc_info(FDI)
end.
-
get_full_doc_info(Db, DocId) ->
RevInfos = fabric2_fdb:transactional(Db, fun(TxDb) ->
fabric2_fdb:get_all_revs(TxDb, DocId)
end),
- if RevInfos == [] -> not_found; true ->
- #{winner := true} = Winner = lists:last(RevInfos),
- RevTree = lists:foldl(fun(RI, TreeAcc) ->
- RIPath = fabric2_util:revinfo_to_path(RI),
- {Merged, _} = couch_key_tree:merge(TreeAcc, RIPath),
- Merged
- end, [], RevInfos),
- #full_doc_info{
- id = DocId,
- update_seq = fabric2_fdb:vs_to_seq(maps:get(sequence, Winner)),
- deleted = maps:get(deleted, Winner),
- rev_tree = RevTree
- }
+ if
+ RevInfos == [] ->
+ not_found;
+ true ->
+ #{winner := true} = Winner = lists:last(RevInfos),
+ RevTree = lists:foldl(
+ fun(RI, TreeAcc) ->
+ RIPath = fabric2_util:revinfo_to_path(RI),
+ {Merged, _} = couch_key_tree:merge(TreeAcc, RIPath),
+ Merged
+ end,
+ [],
+ RevInfos
+ ),
+ #full_doc_info{
+ id = DocId,
+ update_seq = fabric2_fdb:vs_to_seq(maps:get(sequence, Winner)),
+ deleted = maps:get(deleted, Winner),
+ rev_tree = RevTree
+ }
end.
-
get_full_doc_infos(Db, DocIds) ->
fabric2_fdb:transactional(Db, fun(TxDb) ->
- lists:map(fun(DocId) ->
- get_full_doc_info(TxDb, DocId)
- end, DocIds)
+ lists:map(
+ fun(DocId) ->
+ get_full_doc_info(TxDb, DocId)
+ end,
+ DocIds
+ )
end).
-
get_missing_revs(Db, JsonIdRevs) ->
IdRevs = [idrevs(IdR) || IdR <- JsonIdRevs],
AllRevInfos = fabric2_fdb:transactional(Db, fun(TxDb) ->
- lists:foldl(fun({Id, _Revs}, Acc) ->
- case maps:is_key(Id, Acc) of
- true ->
- Acc;
- false ->
- RevInfos = fabric2_fdb:get_all_revs(TxDb, Id),
- Acc#{Id => RevInfos}
- end
- end, #{}, IdRevs)
+ lists:foldl(
+ fun({Id, _Revs}, Acc) ->
+ case maps:is_key(Id, Acc) of
+ true ->
+ Acc;
+ false ->
+ RevInfos = fabric2_fdb:get_all_revs(TxDb, Id),
+ Acc#{Id => RevInfos}
+ end
+ end,
+ #{},
+ IdRevs
+ )
end),
- AllMissing = lists:flatmap(fun({Id, Revs}) ->
- #{Id := RevInfos} = AllRevInfos,
- Missing = try
- lists:foldl(fun(RevInfo, RevAcc) ->
- if RevAcc /= [] -> ok; true ->
- throw(all_found)
+ AllMissing = lists:flatmap(
+ fun({Id, Revs}) ->
+ #{Id := RevInfos} = AllRevInfos,
+ Missing =
+ try
+ lists:foldl(
+ fun(RevInfo, RevAcc) ->
+ if
+ RevAcc /= [] -> ok;
+ true -> throw(all_found)
+ end,
+ filter_found_revs(RevInfo, RevAcc)
+ end,
+ Revs,
+ RevInfos
+ )
+ catch
+ throw:all_found ->
+ []
end,
- filter_found_revs(RevInfo, RevAcc)
- end, Revs, RevInfos)
- catch throw:all_found ->
- []
+ if
+ Missing == [] ->
+ [];
+ true ->
+ PossibleAncestors = find_possible_ancestors(RevInfos, Missing),
+ [{Id, Missing, PossibleAncestors}]
+ end
end,
- if Missing == [] -> []; true ->
- PossibleAncestors = find_possible_ancestors(RevInfos, Missing),
- [{Id, Missing, PossibleAncestors}]
- end
- end, IdRevs),
+ IdRevs
+ ),
{ok, AllMissing}.
-
get_design_docs(Db) ->
fabric2_fdb:transactional(Db, fun(TxDb) ->
#{
@@ -802,12 +792,15 @@ get_design_docs(Db) ->
% Using foldl instead of map means that the design
% docs come out in sorted order.
- lists:foldl(fun({DocId, Rev, Future}, Acc) ->
- [fabric2_fdb:get_doc_body_wait(TxDb, DocId, Rev, Future) | Acc]
- end, [], Futures)
+ lists:foldl(
+ fun({DocId, Rev, Future}, Acc) ->
+ [fabric2_fdb:get_doc_body_wait(TxDb, DocId, Rev, Future) | Acc]
+ end,
+ [],
+ Futures
+ )
end).
-
validate_docid(<<"">>) ->
throw({illegal_docid, <<"Document id must not be empty">>});
validate_docid(<<"_design/">>) ->
@@ -815,10 +808,11 @@ validate_docid(<<"_design/">>) ->
validate_docid(<<"_local/">>) ->
throw({illegal_docid, <<"Illegal document id `_local/`">>});
validate_docid(Id) when is_binary(Id) ->
- MaxLen = case config:get("couchdb", "max_document_id_length", "infinity") of
- "infinity" -> infinity;
- IntegerVal -> list_to_integer(IntegerVal)
- end,
+ MaxLen =
+ case config:get("couchdb", "max_document_id_length", "infinity") of
+ "infinity" -> infinity;
+ IntegerVal -> list_to_integer(IntegerVal)
+ end,
case MaxLen > 0 andalso byte_size(Id) > MaxLen of
true -> throw({illegal_docid, <<"Document id is too long">>});
false -> ok
@@ -828,29 +822,30 @@ validate_docid(Id) when is_binary(Id) ->
true -> ok
end,
case Id of
- <<?DESIGN_DOC_PREFIX, _/binary>> -> ok;
- <<?LOCAL_DOC_PREFIX, _/binary>> -> ok;
- <<"_", _/binary>> ->
- case fabric2_db_plugin:validate_docid(Id) of
- true ->
- ok;
- false ->
- throw(
- {illegal_docid,
- <<"Only reserved document ids may start with underscore.">>})
- end;
- _Else -> ok
+ <<?DESIGN_DOC_PREFIX, _/binary>> ->
+ ok;
+ <<?LOCAL_DOC_PREFIX, _/binary>> ->
+ ok;
+ <<"_", _/binary>> ->
+ case fabric2_db_plugin:validate_docid(Id) of
+ true ->
+ ok;
+ false ->
+ throw(
+ {illegal_docid, <<"Only reserved document ids may start with underscore.">>}
+ )
+ end;
+ _Else ->
+ ok
end;
validate_docid(Id) ->
?LOG_DEBUG(#{what => illegal_docid, docid => Id}),
couch_log:debug("Document id is not a string: ~p", [Id]),
throw({illegal_docid, <<"Document id must be a string">>}).
-
update_doc(Db, Doc) ->
update_doc(Db, Doc, []).
-
update_doc(Db, Doc, Options) ->
case update_docs(Db, [Doc], Options) of
{ok, [{ok, NewRev}]} ->
@@ -867,11 +862,9 @@ update_doc(Db, Doc, Options) ->
{ok, {Pos, RevId}}
end.
-
update_docs(Db, Docs) ->
update_docs(Db, Docs, []).
-
update_docs(Db, Docs0, Options) ->
Docs1 = apply_before_doc_update(Db, Docs0, Options),
try
@@ -883,57 +876,62 @@ update_docs(Db, Docs0, Options) ->
fabric2_index:db_updated(name(Db)),
% Convert errors
- Resps1 = lists:map(fun(Resp) ->
- case Resp of
- {#doc{} = Doc, Error} ->
- #doc{
- id = DocId,
- revs = Revs
- } = Doc,
- RevId = case Revs of
- {RevPos, [Rev | _]} -> {RevPos, Rev};
- {0, []} -> {0, <<>>};
- Else -> Else
- end,
- {{DocId, RevId}, Error};
- Else ->
- Else
- end
- end, Resps0),
+ Resps1 = lists:map(
+ fun(Resp) ->
+ case Resp of
+ {#doc{} = Doc, Error} ->
+ #doc{
+ id = DocId,
+ revs = Revs
+ } = Doc,
+ RevId =
+ case Revs of
+ {RevPos, [Rev | _]} -> {RevPos, Rev};
+ {0, []} -> {0, <<>>};
+ Else -> Else
+ end,
+ {{DocId, RevId}, Error};
+ Else ->
+ Else
+ end
+ end,
+ Resps0
+ ),
case is_replicated(Options) of
true ->
- {ok, lists:flatmap(fun(R) ->
- case R of
- {ok, []} -> [];
- {{_, _}, {ok, []}} -> [];
- Else -> [Else]
- end
- end, Resps1)};
+ {ok,
+ lists:flatmap(
+ fun(R) ->
+ case R of
+ {ok, []} -> [];
+ {{_, _}, {ok, []}} -> [];
+ Else -> [Else]
+ end
+ end,
+ Resps1
+ )};
false ->
{ok, Resps1}
end
- catch throw:{aborted, Errors} ->
- {aborted, Errors}
+ catch
+ throw:{aborted, Errors} ->
+ {aborted, Errors}
end.
-
read_attachment(Db, DocId, AttId) ->
fabric2_fdb:transactional(Db, fun(TxDb) ->
fabric2_fdb:read_attachment(TxDb, DocId, AttId)
end).
-
write_attachment(Db, DocId, Att) ->
Data = couch_att:fetch(data, Att),
Encoding = couch_att:fetch(encoding, Att),
{ok, AttId} = fabric2_fdb:write_attachment(Db, DocId, Data, Encoding),
couch_att:store(data, {loc, Db, DocId, AttId}, Att).
-
fold_docs(Db, UserFun, UserAcc) ->
fold_docs(Db, UserFun, UserAcc, []).
-
fold_docs(Db, UserFun, UserAcc0, Options) ->
fabric2_fdb:transactional(Db, fun(TxDb) ->
try
@@ -946,35 +944,42 @@ fold_docs(Db, UserFun, UserAcc0, Options) ->
UserAcc1 = maybe_stop(UserFun({meta, Meta}, UserAcc0)),
- UserAcc2 = fabric2_fdb:fold_range(TxDb, Prefix, fun({K, V}, Acc) ->
- {DocId} = erlfdb_tuple:unpack(K, Prefix),
- RevId = erlfdb_tuple:unpack(V),
- Row0 = [
- {id, DocId},
- {key, DocId},
- {value, {[{rev, couch_doc:rev_to_str(RevId)}]}}
- ],
-
- DocOpts = couch_util:get_value(doc_opts, Options, []),
- OpenOpts = [deleted | DocOpts],
-
- Row1 = case lists:keyfind(include_docs, 1, Options) of
- {include_docs, true} ->
- Row0 ++ open_json_doc(TxDb, DocId, OpenOpts, DocOpts);
- _ ->
- Row0
+ UserAcc2 = fabric2_fdb:fold_range(
+ TxDb,
+ Prefix,
+ fun({K, V}, Acc) ->
+ {DocId} = erlfdb_tuple:unpack(K, Prefix),
+ RevId = erlfdb_tuple:unpack(V),
+ Row0 = [
+ {id, DocId},
+ {key, DocId},
+ {value, {[{rev, couch_doc:rev_to_str(RevId)}]}}
+ ],
+
+ DocOpts = couch_util:get_value(doc_opts, Options, []),
+ OpenOpts = [deleted | DocOpts],
+
+ Row1 =
+ case lists:keyfind(include_docs, 1, Options) of
+ {include_docs, true} ->
+ Row0 ++ open_json_doc(TxDb, DocId, OpenOpts, DocOpts);
+ _ ->
+ Row0
+ end,
+
+ maybe_stop(UserFun({row, Row1}, Acc))
end,
-
- maybe_stop(UserFun({row, Row1}, Acc))
- end, UserAcc1, Options),
+ UserAcc1,
+ Options
+ ),
{ok, maybe_stop(UserFun(complete, UserAcc2))}
- catch throw:{stop, FinalUserAcc} ->
- {ok, FinalUserAcc}
+ catch
+ throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
end
end).
-
fold_docs(Db, DocIds, UserFun, UserAcc0, Options) ->
fabric2_fdb:transactional(Db, fun(TxDb) ->
try
@@ -991,18 +996,22 @@ fold_docs(Db, DocIds, UserFun, UserAcc0, Options) ->
user_fun => UserFun
},
- FinalAcc1 = lists:foldl(fun(DocId, Acc) ->
- #{
- revs_q := RevsQ,
- revs_count := RevsCount
- } = Acc,
- Future = fold_docs_get_revs(TxDb, DocId, NeedsTree),
- NewAcc = Acc#{
- revs_q := queue:in({DocId, Future}, RevsQ),
- revs_count := RevsCount + 1
- },
- drain_fold_docs_revs_futures(TxDb, NewAcc)
- end, InitAcc, DocIds),
+ FinalAcc1 = lists:foldl(
+ fun(DocId, Acc) ->
+ #{
+ revs_q := RevsQ,
+ revs_count := RevsCount
+ } = Acc,
+ Future = fold_docs_get_revs(TxDb, DocId, NeedsTree),
+ NewAcc = Acc#{
+ revs_q := queue:in({DocId, Future}, RevsQ),
+ revs_count := RevsCount + 1
+ },
+ drain_fold_docs_revs_futures(TxDb, NewAcc)
+ end,
+ InitAcc,
+ DocIds
+ ),
FinalAcc2 = drain_all_fold_docs_revs_futures(TxDb, FinalAcc1),
FinalAcc3 = drain_all_fold_docs_body_futures(TxDb, FinalAcc2),
@@ -1011,29 +1020,26 @@ fold_docs(Db, DocIds, UserFun, UserAcc0, Options) ->
user_acc := FinalUserAcc
} = FinalAcc3,
{ok, FinalUserAcc}
-
- catch throw:{stop, StopUserAcc} ->
- {ok, StopUserAcc}
+ catch
+ throw:{stop, StopUserAcc} ->
+ {ok, StopUserAcc}
end
end).
-
-
-
fold_design_docs(Db, UserFun, UserAcc0, Options1) ->
Options2 = set_design_doc_keys(Options1),
fold_docs(Db, UserFun, UserAcc0, Options2).
-
fold_local_docs(Db, UserFun, UserAcc0, Options0) ->
- % This is mostly for testing and sanity checking. When calling from a test
- % namespace will be automatically set. We also assert when called from the
- % API the correct namespace was set
- Options = case lists:keyfind(namespace, 1, Options0) of
- {namespace, <<"_local">>} -> Options0;
- false -> [{namespace, <<"_local">>} | Options0]
- end,
- fabric2_fdb:transactional(Db, fun(TxDb) ->
+ % This is mostly for testing and sanity checking. When calling from a test
+ % namespace will be automatically set. We also assert when called from the
+ % API the correct namespace was set
+ Options =
+ case lists:keyfind(namespace, 1, Options0) of
+ {namespace, <<"_local">>} -> Options0;
+ false -> [{namespace, <<"_local">>} | Options0]
+ end,
+ fabric2_fdb:transactional(Db, fun(TxDb) ->
try
#{
db_prefix := DbPrefix
@@ -1044,27 +1050,37 @@ fold_local_docs(Db, UserFun, UserAcc0, Options0) ->
UserAcc1 = maybe_stop(UserFun({meta, Meta}, UserAcc0)),
- UserAcc2 = fabric2_fdb:fold_range(TxDb, Prefix, fun({K, V}, Acc) ->
- {DocId} = erlfdb_tuple:unpack(K, Prefix),
- Rev = fabric2_fdb:get_local_doc_rev(TxDb, DocId, V),
- maybe_stop(UserFun({row, [
- {id, DocId},
- {key, DocId},
- {value, {[{rev, couch_doc:rev_to_str({0, Rev})}]}}
- ]}, Acc))
- end, UserAcc1, Options),
+ UserAcc2 = fabric2_fdb:fold_range(
+ TxDb,
+ Prefix,
+ fun({K, V}, Acc) ->
+ {DocId} = erlfdb_tuple:unpack(K, Prefix),
+ Rev = fabric2_fdb:get_local_doc_rev(TxDb, DocId, V),
+ maybe_stop(
+ UserFun(
+ {row, [
+ {id, DocId},
+ {key, DocId},
+ {value, {[{rev, couch_doc:rev_to_str({0, Rev})}]}}
+ ]},
+ Acc
+ )
+ )
+ end,
+ UserAcc1,
+ Options
+ ),
{ok, maybe_stop(UserFun(complete, UserAcc2))}
- catch throw:{stop, FinalUserAcc} ->
- {ok, FinalUserAcc}
+ catch
+ throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
end
end).
-
fold_changes(Db, SinceSeq, UserFun, UserAcc) ->
fold_changes(Db, SinceSeq, UserFun, UserAcc, []).
-
fold_changes(Db, SinceSeq, UserFun, UserAcc, Options) ->
fabric2_fdb:transactional(Db, fun(TxDb) ->
try
@@ -1074,59 +1090,68 @@ fold_changes(Db, SinceSeq, UserFun, UserAcc, Options) ->
Prefix = erlfdb_tuple:pack({?DB_CHANGES}, DbPrefix),
- Dir = case fabric2_util:get_value(dir, Options, fwd) of
- rev -> rev;
- _ -> fwd
- end,
+ Dir =
+ case fabric2_util:get_value(dir, Options, fwd) of
+ rev -> rev;
+ _ -> fwd
+ end,
- RestartTx = case fabric2_util:get_value(restart_tx, Options) of
- undefined -> [{restart_tx, true}];
- _AlreadySet -> []
- end,
+ RestartTx =
+ case fabric2_util:get_value(restart_tx, Options) of
+ undefined -> [{restart_tx, true}];
+ _AlreadySet -> []
+ end,
StartKey = get_since_seq(TxDb, Dir, SinceSeq),
- EndKey = case fabric2_util:get_value(end_key, Options) of
- undefined when Dir == rev ->
- fabric2_util:seq_zero_vs();
- undefined ->
- fabric2_util:seq_max_vs();
- EK when is_binary(EK) ->
- fabric2_fdb:seq_to_vs(EK);
- EK when is_tuple(EK), element(1, EK) == versionstamp ->
- EK
- end,
+ EndKey =
+ case fabric2_util:get_value(end_key, Options) of
+ undefined when Dir == rev ->
+ fabric2_util:seq_zero_vs();
+ undefined ->
+ fabric2_util:seq_max_vs();
+ EK when is_binary(EK) ->
+ fabric2_fdb:seq_to_vs(EK);
+ EK when is_tuple(EK), element(1, EK) == versionstamp ->
+ EK
+ end,
BaseOpts = [{start_key, StartKey}] ++ RestartTx ++ Options,
FoldOpts = lists:keystore(end_key, 1, BaseOpts, {end_key, EndKey}),
- {ok, fabric2_fdb:fold_range(TxDb, Prefix, fun({K, V}, Acc) ->
- {SeqVS} = erlfdb_tuple:unpack(K, Prefix),
- {DocId, Deleted, RevId} = erlfdb_tuple:unpack(V),
-
- Change = #{
- id => DocId,
- sequence => fabric2_fdb:vs_to_seq(SeqVS),
- rev_id => RevId,
- deleted => Deleted
- },
-
- maybe_stop(UserFun(Change, Acc))
- end, UserAcc, FoldOpts)}
- catch throw:{stop, FinalUserAcc} ->
- {ok, FinalUserAcc}
+ {ok,
+ fabric2_fdb:fold_range(
+ TxDb,
+ Prefix,
+ fun({K, V}, Acc) ->
+ {SeqVS} = erlfdb_tuple:unpack(K, Prefix),
+ {DocId, Deleted, RevId} = erlfdb_tuple:unpack(V),
+
+ Change = #{
+ id => DocId,
+ sequence => fabric2_fdb:vs_to_seq(SeqVS),
+ rev_id => RevId,
+ deleted => Deleted
+ },
+
+ maybe_stop(UserFun(Change, Acc))
+ end,
+ UserAcc,
+ FoldOpts
+ )}
+ catch
+ throw:{stop, FinalUserAcc} ->
+ {ok, FinalUserAcc}
end
end).
-
dbname_suffix(DbName) ->
filename:basename(DbName).
-
validate_dbname(DbName) when is_list(DbName) ->
validate_dbname(?l2b(DbName));
-
validate_dbname(DbName) when is_binary(DbName) ->
fabric2_db_plugin:validate_dbname(
- DbName, DbName, fun validate_dbname_int/2).
+ DbName, DbName, fun validate_dbname_int/2
+ ).
validate_dbname_int(DbName, DbName) when is_binary(DbName) ->
case validate_dbname_length(DbName) of
@@ -1134,18 +1159,19 @@ validate_dbname_int(DbName, DbName) when is_binary(DbName) ->
{error, _} = Error -> Error
end.
-
validate_dbname_length(DbName) ->
- MaxLength = config:get_integer("couchdb", "max_database_name_length",
- ?DEFAULT_MAX_DATABASE_NAME_LENGTH),
+ MaxLength = config:get_integer(
+ "couchdb",
+ "max_database_name_length",
+ ?DEFAULT_MAX_DATABASE_NAME_LENGTH
+ ),
case byte_size(DbName) =< MaxLength of
true -> ok;
false -> {error, {database_name_too_long, DbName}}
end.
-
validate_dbname_pat(DbName) ->
- case re:run(DbName, ?DBNAME_REGEX, [{capture,none}, dollar_endonly]) of
+ case re:run(DbName, ?DBNAME_REGEX, [{capture, none}, dollar_endonly]) of
match ->
ok;
nomatch ->
@@ -1155,32 +1181,31 @@ validate_dbname_pat(DbName) ->
end
end.
-
maybe_add_sys_db_callbacks(Db) ->
IsReplicatorDb = is_replicator_db(Db),
IsUsersDb = is_users_db(Db),
- {BDU, ADR} = if
- IsReplicatorDb ->
- {
- fun couch_replicator_docs:before_doc_update/3,
- fun couch_replicator_docs:after_doc_read/2
- };
- IsUsersDb ->
- {
- fun fabric2_users_db:before_doc_update/3,
- fun fabric2_users_db:after_doc_read/2
- };
- true ->
- {undefined, undefined}
- end,
+ {BDU, ADR} =
+ if
+ IsReplicatorDb ->
+ {
+ fun couch_replicator_docs:before_doc_update/3,
+ fun couch_replicator_docs:after_doc_read/2
+ };
+ IsUsersDb ->
+ {
+ fun fabric2_users_db:before_doc_update/3,
+ fun fabric2_users_db:after_doc_read/2
+ };
+ true ->
+ {undefined, undefined}
+ end,
Db#{
before_doc_update := BDU,
after_doc_read := ADR
}.
-
make_db_info(DbName, Props) ->
BaseProps = [
{cluster, {[{n, 0}, {q, 0}, {r, 0}, {w, 0}]}},
@@ -1193,14 +1218,16 @@ make_db_info(DbName, Props) ->
{purge_seq, 0}
],
- lists:foldl(fun({Key, Val}, Acc) ->
- lists:keystore(Key, 1, Acc, {Key, Val})
- end, BaseProps, Props).
-
+ lists:foldl(
+ fun({Key, Val}, Acc) ->
+ lists:keystore(Key, 1, Acc, {Key, Val})
+ end,
+ BaseProps,
+ Props
+ ).
drain_info_futures(FutureQ, Count, _UserFun, Acc) when Count < 100 ->
{FutureQ, Count, Acc};
-
drain_info_futures(FutureQ, Count, UserFun, Acc) when Count >= 100 ->
{{value, {DbName, Future}}, RestQ} = queue:out(FutureQ),
InfoProps = fabric2_fdb:get_info_wait(Future),
@@ -1208,7 +1235,6 @@ drain_info_futures(FutureQ, Count, UserFun, Acc) when Count >= 100 ->
NewAcc = maybe_stop(UserFun({row, DbInfo}, Acc)),
{RestQ, Count - 1, NewAcc}.
-
drain_all_info_futures(FutureQ, UserFun, Acc) ->
case queue:out(FutureQ) of
{{value, {DbName, Future}}, RestQ} ->
@@ -1220,30 +1246,31 @@ drain_all_info_futures(FutureQ, UserFun, Acc) ->
Acc
end.
-
drain_deleted_info_futures(FutureQ, Count, _UserFun, Acc) when Count < 100 ->
{FutureQ, Count, Acc};
-
drain_deleted_info_futures(FutureQ, Count, UserFun, Acc) when Count >= 100 ->
{{value, {DbName, TimeStamp, Future}}, RestQ} = queue:out(FutureQ),
BaseProps = fabric2_fdb:get_info_wait(Future),
- DeletedProps = BaseProps ++ [
- {deleted, true},
- {timestamp, TimeStamp}
- ],
+ DeletedProps =
+ BaseProps ++
+ [
+ {deleted, true},
+ {timestamp, TimeStamp}
+ ],
DbInfo = make_db_info(DbName, DeletedProps),
NewAcc = maybe_stop(UserFun({row, DbInfo}, Acc)),
{RestQ, Count - 1, NewAcc}.
-
drain_all_deleted_info_futures(FutureQ, UserFun, Acc) ->
case queue:out(FutureQ) of
{{value, {DbName, TimeStamp, Future}}, RestQ} ->
BaseProps = fabric2_fdb:get_info_wait(Future),
- DeletedProps = BaseProps ++ [
- {deleted, true},
- {timestamp, TimeStamp}
- ],
+ DeletedProps =
+ BaseProps ++
+ [
+ {deleted, true},
+ {timestamp, TimeStamp}
+ ],
DbInfo = make_db_info(DbName, DeletedProps),
NewAcc = maybe_stop(UserFun({row, DbInfo}, Acc)),
drain_all_deleted_info_futures(RestQ, UserFun, NewAcc);
@@ -1251,61 +1278,61 @@ drain_all_deleted_info_futures(FutureQ, UserFun, Acc) ->
Acc
end.
-
fold_docs_get_revs(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId, _) ->
fabric2_fdb:get_local_doc_rev_future(Db, DocId);
-
fold_docs_get_revs(Db, DocId, true) ->
fabric2_fdb:get_all_revs_future(Db, DocId);
-
fold_docs_get_revs(Db, DocId, false) ->
fabric2_fdb:get_winning_revs_future(Db, DocId, 1).
-
fold_docs_get_revs_wait(_Db, <<?LOCAL_DOC_PREFIX, _/binary>>, RevsFuture) ->
Rev = fabric2_fdb:get_local_doc_rev_wait(RevsFuture),
[Rev];
-
fold_docs_get_revs_wait(Db, _DocId, RevsFuture) ->
fabric2_fdb:get_revs_wait(Db, RevsFuture).
-
-fold_docs_get_doc_body_future(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId,
- [Rev]) ->
+fold_docs_get_doc_body_future(
+ Db,
+ <<?LOCAL_DOC_PREFIX, _/binary>> = DocId,
+ [Rev]
+) ->
fabric2_fdb:get_local_doc_body_future(Db, DocId, Rev);
-
fold_docs_get_doc_body_future(Db, DocId, Revs) ->
Winner = get_rev_winner(Revs),
fabric2_fdb:get_doc_body_future(Db, DocId, Winner).
-
-fold_docs_get_doc_body_wait(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId, [Rev],
- _DocOpts, BodyFuture) ->
+fold_docs_get_doc_body_wait(
+ Db,
+ <<?LOCAL_DOC_PREFIX, _/binary>> = DocId,
+ [Rev],
+ _DocOpts,
+ BodyFuture
+) ->
case fabric2_fdb:get_local_doc_body_wait(Db, DocId, Rev, BodyFuture) of
{not_found, missing} -> {not_found, missing};
Doc -> {ok, Doc}
end;
-
fold_docs_get_doc_body_wait(Db, DocId, Revs, DocOpts, BodyFuture) ->
RevInfo = get_rev_winner(Revs),
- Base = fabric2_fdb:get_doc_body_wait(Db, DocId, RevInfo,
- BodyFuture),
+ Base = fabric2_fdb:get_doc_body_wait(
+ Db,
+ DocId,
+ RevInfo,
+ BodyFuture
+ ),
apply_open_doc_opts(Base, Revs, DocOpts).
-
drain_fold_docs_revs_futures(_TxDb, #{revs_count := C} = Acc) when C < 100 ->
Acc;
drain_fold_docs_revs_futures(TxDb, Acc) ->
drain_one_fold_docs_revs_future(TxDb, Acc).
-
drain_all_fold_docs_revs_futures(_TxDb, #{revs_count := C} = Acc) when C =< 0 ->
Acc;
drain_all_fold_docs_revs_futures(TxDb, #{revs_count := C} = Acc) when C > 0 ->
NewAcc = drain_one_fold_docs_revs_future(TxDb, Acc),
drain_all_fold_docs_revs_futures(TxDb, NewAcc).
-
drain_one_fold_docs_revs_future(TxDb, Acc) ->
#{
revs_q := RevsQ,
@@ -1316,13 +1343,14 @@ drain_one_fold_docs_revs_future(TxDb, Acc) ->
{{value, {DocId, RevsFuture}}, RestRevsQ} = queue:out(RevsQ),
Revs = fold_docs_get_revs_wait(TxDb, DocId, RevsFuture),
- DocFuture = case Revs of
- [] ->
- {DocId, [], not_found};
- [_ | _] ->
- BodyFuture = fold_docs_get_doc_body_future(TxDb, DocId, Revs),
- {DocId, Revs, BodyFuture}
- end,
+ DocFuture =
+ case Revs of
+ [] ->
+ {DocId, [], not_found};
+ [_ | _] ->
+ BodyFuture = fold_docs_get_doc_body_future(TxDb, DocId, Revs),
+ {DocId, Revs, BodyFuture}
+ end,
NewAcc = Acc#{
revs_q := RestRevsQ,
revs_count := RevsCount - 1,
@@ -1331,20 +1359,17 @@ drain_one_fold_docs_revs_future(TxDb, Acc) ->
},
drain_fold_docs_body_futures(TxDb, NewAcc).
-
drain_fold_docs_body_futures(_TxDb, #{body_count := C} = Acc) when C < 100 ->
Acc;
drain_fold_docs_body_futures(TxDb, Acc) ->
drain_one_fold_docs_body_future(TxDb, Acc).
-
drain_all_fold_docs_body_futures(_TxDb, #{body_count := C} = Acc) when C =< 0 ->
Acc;
drain_all_fold_docs_body_futures(TxDb, #{body_count := C} = Acc) when C > 0 ->
NewAcc = drain_one_fold_docs_body_future(TxDb, Acc),
drain_all_fold_docs_body_futures(TxDb, NewAcc).
-
drain_one_fold_docs_body_future(TxDb, Acc) ->
#{
body_q := BodyQ,
@@ -1354,12 +1379,13 @@ drain_one_fold_docs_body_future(TxDb, Acc) ->
user_acc := UserAcc
} = Acc,
{{value, {DocId, Revs, BodyFuture}}, RestBodyQ} = queue:out(BodyQ),
- Doc = case BodyFuture of
- not_found ->
- {not_found, missing};
- _ ->
- fold_docs_get_doc_body_wait(TxDb, DocId, Revs, DocOpts, BodyFuture)
- end,
+ Doc =
+ case BodyFuture of
+ not_found ->
+ {not_found, missing};
+ _ ->
+ fold_docs_get_doc_body_wait(TxDb, DocId, Revs, DocOpts, BodyFuture)
+ end,
NewUserAcc = maybe_stop(UserFun(DocId, Doc, UserAcc)),
Acc#{
body_q := RestBodyQ,
@@ -1367,14 +1393,15 @@ drain_one_fold_docs_body_future(TxDb, Acc) ->
user_acc := NewUserAcc
}.
-
get_rev_winner(Revs) ->
- [Winner] = lists:filter(fun(Rev) ->
- maps:get(winner, Rev)
- end, Revs),
+ [Winner] = lists:filter(
+ fun(Rev) ->
+ maps:get(winner, Rev)
+ end,
+ Revs
+ ),
Winner.
-
new_revid(Db, Doc) ->
#doc{
id = DocId,
@@ -1384,33 +1411,41 @@ new_revid(Db, Doc) ->
deleted = Deleted
} = Doc,
- {NewAtts, AttSigInfo} = lists:mapfoldl(fun(Att, Acc) ->
- [Name, Type, Data, Md5] = couch_att:fetch([name, type, data, md5], Att),
- case Data of
- {loc, _, _, _} ->
- {Att, [{Name, Type, Md5} | Acc]};
- _ ->
- Att1 = couch_att:flush(Db, DocId, Att),
- Att2 = couch_att:store(revpos, OldStart + 1, Att1),
- {Att2, [{Name, Type, couch_att:fetch(md5, Att2)} | Acc]}
- end
- end, [], Atts),
+ {NewAtts, AttSigInfo} = lists:mapfoldl(
+ fun(Att, Acc) ->
+ [Name, Type, Data, Md5] = couch_att:fetch([name, type, data, md5], Att),
+ case Data of
+ {loc, _, _, _} ->
+ {Att, [{Name, Type, Md5} | Acc]};
+ _ ->
+ Att1 = couch_att:flush(Db, DocId, Att),
+ Att2 = couch_att:store(revpos, OldStart + 1, Att1),
+ {Att2, [{Name, Type, couch_att:fetch(md5, Att2)} | Acc]}
+ end
+ end,
+ [],
+ Atts
+ ),
- Rev = case length(Atts) == length(AttSigInfo) of
- true ->
- OldRev = case OldRevs of [] -> 0; [OldRev0 | _] -> OldRev0 end,
- SigTerm = [Deleted, OldStart, OldRev, Body, AttSigInfo],
- couch_hash:md5_hash(term_to_binary(SigTerm, [{minor_version, 1}]));
- false ->
- erlang:error(missing_att_info)
- end,
+ Rev =
+ case length(Atts) == length(AttSigInfo) of
+ true ->
+ OldRev =
+ case OldRevs of
+ [] -> 0;
+ [OldRev0 | _] -> OldRev0
+ end,
+ SigTerm = [Deleted, OldStart, OldRev, Body, AttSigInfo],
+ couch_hash:md5_hash(term_to_binary(SigTerm, [{minor_version, 1}]));
+ false ->
+ erlang:error(missing_att_info)
+ end,
Doc#doc{
revs = {OldStart + 1, [Rev | OldRevs]},
atts = NewAtts
}.
-
get_all_docs_meta(TxDb, Options) ->
NS = couch_util:get_value(namespace, Options),
DocCount = get_doc_count(TxDb, NS),
@@ -1422,12 +1457,10 @@ get_all_docs_meta(TxDb, Options) ->
[]
end ++ [{total, DocCount}, {offset, null}].
-
maybe_set_interactive(#{} = Db, Options) ->
Interactive = fabric2_util:get_value(interactive, Options, false),
Db#{interactive := Interactive}.
-
maybe_set_user_ctx(Db, Options) ->
case fabric2_util:get_value(user_ctx, Options) of
#user_ctx{} = UserCtx ->
@@ -1436,7 +1469,6 @@ maybe_set_user_ctx(Db, Options) ->
Db
end.
-
is_member(Db, {SecProps}) when is_list(SecProps) ->
case is_admin(Db, {SecProps}) of
true ->
@@ -1452,7 +1484,6 @@ is_member(Db, {SecProps}) when is_list(SecProps) ->
end
end.
-
is_authorized(Group, UserCtx) ->
#user_ctx{
name = UserName,
@@ -1467,7 +1498,6 @@ is_authorized(Group, UserCtx) ->
check_security(names, UserName, Names)
end.
-
check_security(roles, [], _) ->
false;
check_security(roles, UserRoles, Roles) ->
@@ -1481,7 +1511,6 @@ check_security(names, null, _) ->
check_security(names, UserName, Names) ->
lists:member(UserName, Names).
-
throw_security_error(#user_ctx{name = null} = UserCtx) ->
Reason = <<"You are not authorized to access this db.">>,
throw_security_error(UserCtx, Reason);
@@ -1489,29 +1518,24 @@ throw_security_error(#user_ctx{name = _} = UserCtx) ->
Reason = <<"You are not allowed to access this db.">>,
throw_security_error(UserCtx, Reason).
-
throw_security_error(#user_ctx{} = UserCtx, Reason) ->
Error = security_error_type(UserCtx),
throw({Error, Reason}).
-
security_error_type(#user_ctx{name = null}) ->
unauthorized;
security_error_type(#user_ctx{name = _}) ->
forbidden.
-
is_public_db(SecProps) ->
{Members} = get_members(SecProps),
Names = fabric2_util:get_value(<<"names">>, Members, []),
Roles = fabric2_util:get_value(<<"roles">>, Members, []),
Names =:= [] andalso Roles =:= [].
-
get_admins(SecProps) ->
fabric2_util:get_value(<<"admins">>, SecProps, {[]}).
-
get_members(SecProps) ->
% we fallback to readers here for backwards compatibility
case fabric2_util:get_value(<<"members">>, SecProps) of
@@ -1521,7 +1545,6 @@ get_members(SecProps) ->
Members
end.
-
apply_open_doc_opts(Doc0, Revs, Options) ->
IncludeRevsInfo = lists:member(revs_info, Options),
IncludeConflicts = lists:member(conflicts, Options),
@@ -1530,54 +1553,80 @@ apply_open_doc_opts(Doc0, Revs, Options) ->
% This revs_info becomes fairly useless now that we're
% not keeping old document bodies around...
- Meta1 = if not IncludeRevsInfo -> []; true ->
- {Pos, [Rev | RevPath]} = Doc0#doc.revs,
- RevPathMissing = lists:map(fun(R) -> {R, missing} end, RevPath),
- [{revs_info, Pos, [{Rev, available} | RevPathMissing]}]
- end,
+ Meta1 =
+ if
+ not IncludeRevsInfo ->
+ [];
+ true ->
+ {Pos, [Rev | RevPath]} = Doc0#doc.revs,
+ RevPathMissing = lists:map(fun(R) -> {R, missing} end, RevPath),
+ [{revs_info, Pos, [{Rev, available} | RevPathMissing]}]
+ end,
- Meta2 = if not IncludeConflicts -> []; true ->
- Conflicts = [RI || RI = #{winner := false, deleted := false} <- Revs],
- if Conflicts == [] -> []; true ->
- ConflictRevs = [maps:get(rev_id, RI) || RI <- Conflicts],
- [{conflicts, ConflictRevs}]
- end
- end,
+ Meta2 =
+ if
+ not IncludeConflicts ->
+ [];
+ true ->
+ Conflicts = [RI || RI = #{winner := false, deleted := false} <- Revs],
+ if
+ Conflicts == [] ->
+ [];
+ true ->
+ ConflictRevs = [maps:get(rev_id, RI) || RI <- Conflicts],
+ [{conflicts, ConflictRevs}]
+ end
+ end,
- Meta3 = if not IncludeDelConflicts -> []; true ->
- DelConflicts = [RI || RI = #{winner := false, deleted := true} <- Revs],
- if DelConflicts == [] -> []; true ->
- DelConflictRevs = [maps:get(rev_id, RI) || RI <- DelConflicts],
- [{deleted_conflicts, DelConflictRevs}]
- end
- end,
+ Meta3 =
+ if
+ not IncludeDelConflicts ->
+ [];
+ true ->
+ DelConflicts = [RI || RI = #{winner := false, deleted := true} <- Revs],
+ if
+ DelConflicts == [] ->
+ [];
+ true ->
+ DelConflictRevs = [maps:get(rev_id, RI) || RI <- DelConflicts],
+ [{deleted_conflicts, DelConflictRevs}]
+ end
+ end,
- Meta4 = if not IncludeLocalSeq -> []; true ->
- #{winner := true, sequence := SeqVS} = lists:last(Revs),
- [{local_seq, fabric2_fdb:vs_to_seq(SeqVS)}]
- end,
+ Meta4 =
+ if
+ not IncludeLocalSeq ->
+ [];
+ true ->
+ #{winner := true, sequence := SeqVS} = lists:last(Revs),
+ [{local_seq, fabric2_fdb:vs_to_seq(SeqVS)}]
+ end,
- Doc1 = case lists:keyfind(atts_since, 1, Options) of
- {_, PossibleAncestors} ->
- #doc{
- revs = DocRevs,
- atts = Atts0
- } = Doc0,
- RevPos = find_ancestor_rev_pos(DocRevs, PossibleAncestors),
- Atts1 = lists:map(fun(Att) ->
- [AttPos, Data] = couch_att:fetch([revpos, data], Att),
- if AttPos > RevPos -> couch_att:store(data, Data, Att);
- true -> couch_att:store(data, stub, Att)
- end
- end, Atts0),
- Doc0#doc{atts = Atts1};
- false ->
- Doc0
- end,
+ Doc1 =
+ case lists:keyfind(atts_since, 1, Options) of
+ {_, PossibleAncestors} ->
+ #doc{
+ revs = DocRevs,
+ atts = Atts0
+ } = Doc0,
+ RevPos = find_ancestor_rev_pos(DocRevs, PossibleAncestors),
+ Atts1 = lists:map(
+ fun(Att) ->
+ [AttPos, Data] = couch_att:fetch([revpos, data], Att),
+ if
+ AttPos > RevPos -> couch_att:store(data, Data, Att);
+ true -> couch_att:store(data, stub, Att)
+ end
+ end,
+ Atts0
+ ),
+ Doc0#doc{atts = Atts1};
+ false ->
+ Doc0
+ end,
{ok, Doc1#doc{meta = Meta1 ++ Meta2 ++ Meta3 ++ Meta4}}.
-
find_ancestor_rev_pos({_, []}, _PossibleAncestors) ->
0;
find_ancestor_rev_pos(_DocRevs, []) ->
@@ -1588,29 +1637,33 @@ find_ancestor_rev_pos({RevPos, [RevId | Rest]}, AttsSinceRevs) ->
false -> find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs)
end.
-
filter_found_revs(RevInfo, Revs) ->
#{
rev_id := {Pos, Rev},
rev_path := RevPath
} = RevInfo,
FullRevPath = [Rev | RevPath],
- lists:flatmap(fun({FindPos, FindRev} = RevIdToFind) ->
- if FindPos > Pos -> [RevIdToFind]; true ->
- % Add 1 because lists:nth is 1 based
- Idx = Pos - FindPos + 1,
- case Idx > length(FullRevPath) of
- true ->
+ lists:flatmap(
+ fun({FindPos, FindRev} = RevIdToFind) ->
+ if
+ FindPos > Pos ->
[RevIdToFind];
- false ->
- case lists:nth(Idx, FullRevPath) == FindRev of
- true -> [];
- false -> [RevIdToFind]
+ true ->
+ % Add 1 because lists:nth is 1 based
+ Idx = Pos - FindPos + 1,
+ case Idx > length(FullRevPath) of
+ true ->
+ [RevIdToFind];
+ false ->
+ case lists:nth(Idx, FullRevPath) == FindRev of
+ true -> [];
+ false -> [RevIdToFind]
+ end
end
end
- end
- end, Revs).
-
+ end,
+ Revs
+ ).
find_possible_ancestors(RevInfos, MissingRevs) ->
% Find any revinfos that are possible ancestors
@@ -1624,45 +1677,52 @@ find_possible_ancestors(RevInfos, MissingRevs) ->
% Since we are looking at any missing revision
% we can just compare against the maximum missing
% start position.
- MaxMissingPos = case MissingRevs of
- [] -> 0;
- [_ | _] -> lists:max([Start || {Start, _Rev} <- MissingRevs])
- end,
- lists:flatmap(fun(RevInfo) ->
- #{rev_id := {RevPos, _} = RevId} = RevInfo,
- case RevPos < MaxMissingPos of
- true -> [RevId];
- false -> []
- end
- end, RevInfos).
-
+ MaxMissingPos =
+ case MissingRevs of
+ [] -> 0;
+ [_ | _] -> lists:max([Start || {Start, _Rev} <- MissingRevs])
+ end,
+ lists:flatmap(
+ fun(RevInfo) ->
+ #{rev_id := {RevPos, _} = RevId} = RevInfo,
+ case RevPos < MaxMissingPos of
+ true -> [RevId];
+ false -> []
+ end
+ end,
+ RevInfos
+ ).
apply_before_doc_update(Db, Docs, Options) ->
- UpdateType = case lists:member(replicated_changes, Options) of
- true -> replicated_changes;
- false -> interactive_edit
- end,
- lists:map(fun(Doc) ->
- fabric2_db_plugin:before_doc_update(Db, Doc, UpdateType)
- end, Docs).
-
+ UpdateType =
+ case lists:member(replicated_changes, Options) of
+ true -> replicated_changes;
+ false -> interactive_edit
+ end,
+ lists:map(
+ fun(Doc) ->
+ fabric2_db_plugin:before_doc_update(Db, Doc, UpdateType)
+ end,
+ Docs
+ ).
update_doc_int(#{} = Db, #doc{} = Doc, Options) ->
- IsLocal = case Doc#doc.id of
- <<?LOCAL_DOC_PREFIX, _/binary>> -> true;
- _ -> false
- end,
+ IsLocal =
+ case Doc#doc.id of
+ <<?LOCAL_DOC_PREFIX, _/binary>> -> true;
+ _ -> false
+ end,
try
case {IsLocal, is_replicated(Options)} of
{false, false} -> update_doc_interactive(Db, Doc, Options);
{false, true} -> update_doc_replicated(Db, Doc, Options);
{true, _} -> update_local_doc(Db, Doc, Options)
end
- catch throw:{?MODULE, Return} ->
- Return
+ catch
+ throw:{?MODULE, Return} ->
+ Return
end.
-
batch_update_docs(Db, Docs, Options) ->
BAcc = #bacc{
db = Db,
@@ -1676,10 +1736,8 @@ batch_update_docs(Db, Docs, Options) ->
#bacc{results = Res} = batch_update_docs(BAcc),
lists:reverse(Res).
-
batch_update_docs(#bacc{docs = []} = BAcc) ->
BAcc;
-
batch_update_docs(#bacc{db = Db} = BAcc) ->
#bacc{
db = Db,
@@ -1706,9 +1764,12 @@ batch_update_docs(#bacc{db = Db} = BAcc) ->
end),
% Clean up after the transaction ends so we can recurse with a clean state
- maps:map(fun(Tag, RangeFuture) when is_reference(Tag) ->
- ok = erlfdb:cancel(RangeFuture, [flush])
- end, BAccTx2#bacc.rev_futures),
+ maps:map(
+ fun(Tag, RangeFuture) when is_reference(Tag) ->
+ ok = erlfdb:cancel(RangeFuture, [flush])
+ end,
+ BAccTx2#bacc.rev_futures
+ ),
BAcc1 = BAccTx2#bacc{
db = Db,
@@ -1717,10 +1778,8 @@ batch_update_docs(#bacc{db = Db} = BAcc) ->
batch_update_docs(BAcc1).
-
batch_update_interactive_tx(#bacc{docs = []} = BAcc) ->
BAcc;
-
batch_update_interactive_tx(#bacc{} = BAcc) ->
#bacc{
db = TxDb,
@@ -1731,11 +1790,13 @@ batch_update_interactive_tx(#bacc{} = BAcc) ->
seen = Seen,
results = Results
} = BAcc,
- {Res, Seen1} = try
- update_docs_interactive(TxDb, Doc, Options, RevFutures, Seen)
- catch throw:{?MODULE, Return} ->
- {Return, Seen}
- end,
+ {Res, Seen1} =
+ try
+ update_docs_interactive(TxDb, Doc, Options, RevFutures, Seen)
+ catch
+ throw:{?MODULE, Return} ->
+ {Return, Seen}
+ end,
BAcc1 = BAcc#bacc{
docs = Docs,
results = [Res | Results],
@@ -1746,10 +1807,8 @@ batch_update_interactive_tx(#bacc{} = BAcc) ->
false -> batch_update_interactive_tx(BAcc1)
end.
-
batch_update_replicated_tx(#bacc{docs = []} = BAcc) ->
BAcc;
-
batch_update_replicated_tx(#bacc{} = BAcc) ->
#bacc{
db = TxDb,
@@ -1777,11 +1836,14 @@ batch_update_replicated_tx(#bacc{} = BAcc) ->
end
end.
-
-update_docs_interactive(Db, #doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>} = Doc,
- Options, _Futures, SeenIds) ->
+update_docs_interactive(
+ Db,
+ #doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>} = Doc,
+ Options,
+ _Futures,
+ SeenIds
+) ->
{update_local_doc(Db, Doc, Options), SeenIds};
-
update_docs_interactive(Db, Doc, Options, Futures, SeenIds) ->
case lists:member(Doc#doc.id, SeenIds) of
true ->
@@ -1796,40 +1858,45 @@ update_docs_interactive(Db, Doc, Options, Futures, SeenIds) ->
end
end.
-
update_doc_interactive(Db, Doc0, Options) ->
% Get the current winning revision. This is needed
% regardless of which branch we're updating. The extra
% revision we're grabbing is an optimization to
% save us a round trip if we end up deleting
% the winning revision branch.
- NumRevs = if Doc0#doc.deleted -> 2; true -> 1 end,
+ NumRevs =
+ if
+ Doc0#doc.deleted -> 2;
+ true -> 1
+ end,
Future = fabric2_fdb:get_winning_revs_future(Db, Doc0#doc.id, NumRevs),
update_doc_interactive(Db, Doc0, Future, Options).
-
update_doc_interactive(Db, Doc0, Future, _Options) ->
RevInfos = fabric2_fdb:get_revs_wait(Db, Future),
- {Winner, SecondPlace} = case RevInfos of
- [] -> {not_found, not_found};
- [WRI] -> {WRI, not_found};
- [WRI, SPRI] -> {WRI, SPRI}
- end,
- WinnerRevId = case Winner of
- not_found ->
- {0, <<>>};
- _ ->
- case maps:get(deleted, Winner) of
- true -> {0, <<>>};
- false -> maps:get(rev_id, Winner)
- end
- end,
+ {Winner, SecondPlace} =
+ case RevInfos of
+ [] -> {not_found, not_found};
+ [WRI] -> {WRI, not_found};
+ [WRI, SPRI] -> {WRI, SPRI}
+ end,
+ WinnerRevId =
+ case Winner of
+ not_found ->
+ {0, <<>>};
+ _ ->
+ case maps:get(deleted, Winner) of
+ true -> {0, <<>>};
+ false -> maps:get(rev_id, Winner)
+ end
+ end,
% Check that a revision was specified if required
Doc0RevId = doc_to_revid(Doc0),
HasRev = Doc0RevId =/= {0, <<>>},
- if HasRev orelse WinnerRevId == {0, <<>>} -> ok; true ->
- ?RETURN({Doc0, conflict})
+ if
+ HasRev orelse WinnerRevId == {0, <<>>} -> ok;
+ true -> ?RETURN({Doc0, conflict})
end,
% Allow inserting new deleted documents. Only works when the document has
@@ -1840,38 +1907,40 @@ update_doc_interactive(Db, Doc0, Future, _Options) ->
end,
% Get the target revision to update
- Target = case Doc0RevId == WinnerRevId of
- true ->
- Winner;
- false ->
- case fabric2_fdb:get_non_deleted_rev(Db, Doc0#doc.id, Doc0RevId) of
- #{deleted := false} = Target0 ->
- Target0;
- not_found ->
- % Either a missing revision or a deleted
- % revision. Either way a conflict. Note
- % that we get not_found for a deleted revision
- % because we only check for the non-deleted
- % key in fdb
- ?RETURN({Doc0, conflict})
- end
- end,
+ Target =
+ case Doc0RevId == WinnerRevId of
+ true ->
+ Winner;
+ false ->
+ case fabric2_fdb:get_non_deleted_rev(Db, Doc0#doc.id, Doc0RevId) of
+ #{deleted := false} = Target0 ->
+ Target0;
+ not_found ->
+ % Either a missing revision or a deleted
+ % revision. Either way a conflict. Note
+ % that we get not_found for a deleted revision
+ % because we only check for the non-deleted
+ % key in fdb
+ ?RETURN({Doc0, conflict})
+ end
+ end,
- Doc1 = case Winner of
- #{deleted := true} when not Doc0#doc.deleted ->
- % When recreating a deleted document we want to extend
- % the winning revision branch rather than create a
- % new branch. If we did not do this we could be
- % recreating into a state that previously existed.
- Doc0#doc{revs = fabric2_util:revinfo_to_revs(Winner)};
- #{} ->
- % Otherwise we're extending the target's revision
- % history with this update
- Doc0#doc{revs = fabric2_util:revinfo_to_revs(Target)};
- not_found ->
- % Creating a new doc means our revs start empty
- Doc0
- end,
+ Doc1 =
+ case Winner of
+ #{deleted := true} when not Doc0#doc.deleted ->
+ % When recreating a deleted document we want to extend
+ % the winning revision branch rather than create a
+ % new branch. If we did not do this we could be
+ % recreating into a state that previously existed.
+ Doc0#doc{revs = fabric2_util:revinfo_to_revs(Winner)};
+ #{} ->
+ % Otherwise we're extending the target's revision
+ % history with this update
+ Doc0#doc{revs = fabric2_util:revinfo_to_revs(Target)};
+ not_found ->
+ % Creating a new doc means our revs start empty
+ Doc0
+ end,
% Validate the doc update and create the
% new revinfo map
@@ -1898,44 +1967,54 @@ update_doc_interactive(Db, Doc0, Future, _Options) ->
},
% Gather the list of possible winnig revisions
- Possible = case Target == Winner of
- true when not Doc4#doc.deleted ->
- [NewRevInfo];
- true when Doc4#doc.deleted ->
- case SecondPlace of
- #{} -> [NewRevInfo, SecondPlace];
- not_found -> [NewRevInfo]
- end;
- false ->
- [NewRevInfo, Winner]
- end,
+ Possible =
+ case Target == Winner of
+ true when not Doc4#doc.deleted ->
+ [NewRevInfo];
+ true when Doc4#doc.deleted ->
+ case SecondPlace of
+ #{} -> [NewRevInfo, SecondPlace];
+ not_found -> [NewRevInfo]
+ end;
+ false ->
+ [NewRevInfo, Winner]
+ end,
% Sort the rev infos such that the winner is first
- {NewWinner0, NonWinner} = case fabric2_util:sort_revinfos(Possible) of
- [W] -> {W, not_found};
- [W, NW] -> {W, NW}
- end,
+ {NewWinner0, NonWinner} =
+ case fabric2_util:sort_revinfos(Possible) of
+ [W] -> {W, not_found};
+ [W, NW] -> {W, NW}
+ end,
- BranchCount = case Winner of
- not_found -> 1;
- #{branch_count := BC} -> BC
- end,
+ BranchCount =
+ case Winner of
+ not_found -> 1;
+ #{branch_count := BC} -> BC
+ end,
NewWinner = NewWinner0#{branch_count := BranchCount},
- ToUpdate = if NonWinner == not_found -> []; true -> [NonWinner] end,
- ToRemove = if Target == not_found -> []; true -> [Target] end,
+ ToUpdate =
+ if
+ NonWinner == not_found -> [];
+ true -> [NonWinner]
+ end,
+ ToRemove =
+ if
+ Target == not_found -> [];
+ true -> [Target]
+ end,
ok = fabric2_fdb:write_doc(
- Db,
- Doc4,
- NewWinner,
- Winner,
- ToUpdate,
- ToRemove
- ),
+ Db,
+ Doc4,
+ NewWinner,
+ Winner,
+ ToUpdate,
+ ToRemove
+ ),
{ok, {NewRevPos, NewRev}}.
-
update_doc_replicated(Db, Doc0, _Options) ->
#doc{
id = DocId,
@@ -1957,27 +2036,34 @@ update_doc_replicated(Db, Doc0, _Options) ->
AllRevInfos = fabric2_fdb:get_all_revs(Db, DocId),
- RevTree = lists:foldl(fun(RI, TreeAcc) ->
- RIPath = fabric2_util:revinfo_to_path(RI),
- {Merged, _} = couch_key_tree:merge(TreeAcc, RIPath),
- Merged
- end, [], AllRevInfos),
+ RevTree = lists:foldl(
+ fun(RI, TreeAcc) ->
+ RIPath = fabric2_util:revinfo_to_path(RI),
+ {Merged, _} = couch_key_tree:merge(TreeAcc, RIPath),
+ Merged
+ end,
+ [],
+ AllRevInfos
+ ),
DocRevPath = fabric2_util:revinfo_to_path(DocRevInfo0),
{NewTree, Status} = couch_key_tree:merge(RevTree, DocRevPath),
- if Status /= internal_node -> ok; true ->
- % We already know this revision so nothing
- % left to do.
- ?RETURN({Doc0, {ok, []}})
+ if
+ Status /= internal_node ->
+ ok;
+ true ->
+ % We already know this revision so nothing
+ % left to do.
+ ?RETURN({Doc0, {ok, []}})
end,
% Its possible to have a replication with fewer than $revs_limit
% revisions which extends an existing branch. To avoid
% losing revision history we extract the new node from the
% tree and use the combined path after stemming.
- {[{_, {RevPos, UnstemmedRevs}}], []}
- = couch_key_tree:get(NewTree, [{RevPos, Rev}]),
+ {[{_, {RevPos, UnstemmedRevs}}], []} =
+ couch_key_tree:get(NewTree, [{RevPos, Rev}]),
Doc1 = stem_revisions(Db, Doc0#doc{revs = {RevPos, UnstemmedRevs}}),
@@ -1998,114 +2084,142 @@ update_doc_replicated(Db, Doc0, _Options) ->
% Possible winners are the previous winner and
% the new DocRevInfo
- Winner = case fabric2_util:sort_revinfos(AllRevInfos) of
- [#{winner := true} = WRI | _] -> WRI;
- [] -> not_found
- end,
- {NewWinner0, NonWinner} = case Winner == PrevRevInfo of
- true ->
- {DocRevInfo2, not_found};
- false ->
- [W, NW] = fabric2_util:sort_revinfos([Winner, DocRevInfo2]),
- {W, NW}
- end,
+ Winner =
+ case fabric2_util:sort_revinfos(AllRevInfos) of
+ [#{winner := true} = WRI | _] -> WRI;
+ [] -> not_found
+ end,
+ {NewWinner0, NonWinner} =
+ case Winner == PrevRevInfo of
+ true ->
+ {DocRevInfo2, not_found};
+ false ->
+ [W, NW] = fabric2_util:sort_revinfos([Winner, DocRevInfo2]),
+ {W, NW}
+ end,
NewWinner = NewWinner0#{branch_count := length(AllLeafsFull)},
- ToUpdate = if NonWinner == not_found -> []; true -> [NonWinner] end,
- ToRemove = if PrevRevInfo == not_found -> []; true -> [PrevRevInfo] end,
+ ToUpdate =
+ if
+ NonWinner == not_found -> [];
+ true -> [NonWinner]
+ end,
+ ToRemove =
+ if
+ PrevRevInfo == not_found -> [];
+ true -> [PrevRevInfo]
+ end,
ok = fabric2_fdb:write_doc(
- Db,
- Doc3,
- NewWinner,
- Winner,
- ToUpdate,
- ToRemove
- ),
+ Db,
+ Doc3,
+ NewWinner,
+ Winner,
+ ToUpdate,
+ ToRemove
+ ),
{ok, []}.
-
update_local_doc(Db, Doc0, _Options) ->
- Doc1 = case increment_local_doc_rev(Doc0) of
- {ok, Updated} -> Updated;
- {error, Error} -> ?RETURN({Doc0, Error})
- end,
+ Doc1 =
+ case increment_local_doc_rev(Doc0) of
+ {ok, Updated} -> Updated;
+ {error, Error} -> ?RETURN({Doc0, Error})
+ end,
ok = fabric2_fdb:write_local_doc(Db, Doc1),
#doc{revs = {0, [Rev]}} = Doc1,
{ok, {0, integer_to_binary(Rev)}}.
-
flush_doc_atts(Db, Doc) ->
#doc{
id = DocId,
atts = Atts
} = Doc,
- NewAtts = lists:map(fun(Att) ->
- case couch_att:fetch(data, Att) of
- {loc, _, _, _} ->
- Att;
- _ ->
- couch_att:flush(Db, DocId, Att)
- end
- end, Atts),
+ NewAtts = lists:map(
+ fun(Att) ->
+ case couch_att:fetch(data, Att) of
+ {loc, _, _, _} ->
+ Att;
+ _ ->
+ couch_att:flush(Db, DocId, Att)
+ end
+ end,
+ Atts
+ ),
Doc#doc{atts = NewAtts}.
-
get_winning_rev_futures(Db, Docs) ->
- lists:foldl(fun(Doc, Acc) ->
- #doc{
- id = DocId,
- deleted = Deleted
- } = Doc,
- IsLocal = case DocId of
- <<?LOCAL_DOC_PREFIX, _/binary>> -> true;
- _ -> false
+ lists:foldl(
+ fun(Doc, Acc) ->
+ #doc{
+ id = DocId,
+ deleted = Deleted
+ } = Doc,
+ IsLocal =
+ case DocId of
+ <<?LOCAL_DOC_PREFIX, _/binary>> -> true;
+ _ -> false
+ end,
+ if
+ IsLocal ->
+ Acc;
+ true ->
+ NumRevs =
+ if
+ Deleted -> 2;
+ true -> 1
+ end,
+ Future = fabric2_fdb:get_winning_revs_future(Db, DocId, NumRevs),
+ DocTag = doc_tag(Doc),
+ Acc#{DocTag => Future}
+ end
end,
- if IsLocal -> Acc; true ->
- NumRevs = if Deleted -> 2; true -> 1 end,
- Future = fabric2_fdb:get_winning_revs_future(Db, DocId, NumRevs),
- DocTag = doc_tag(Doc),
- Acc#{DocTag => Future}
- end
- end, #{}, Docs).
-
+ #{},
+ Docs
+ ).
prep_and_validate(Db, NewDoc, PrevRevInfo) ->
HasStubs = couch_doc:has_stubs(NewDoc),
HasVDUs = [] /= maps:get(validate_doc_update_funs, Db),
- IsDDoc = case NewDoc#doc.id of
- <<?DESIGN_DOC_PREFIX, _/binary>> -> true;
- _ -> false
- end,
+ IsDDoc =
+ case NewDoc#doc.id of
+ <<?DESIGN_DOC_PREFIX, _/binary>> -> true;
+ _ -> false
+ end,
- WasDeleted = case PrevRevInfo of
- not_found -> false;
- #{deleted := D} -> D
- end,
+ WasDeleted =
+ case PrevRevInfo of
+ not_found -> false;
+ #{deleted := D} -> D
+ end,
- PrevDoc = case HasStubs orelse (HasVDUs and not IsDDoc) of
- true when PrevRevInfo /= not_found, not WasDeleted ->
- case fabric2_fdb:get_doc_body(Db, NewDoc#doc.id, PrevRevInfo) of
- #doc{} = PDoc -> PDoc;
- {not_found, _} -> nil
- end;
- _ ->
- nil
- end,
+ PrevDoc =
+ case HasStubs orelse (HasVDUs and not IsDDoc) of
+ true when PrevRevInfo /= not_found, not WasDeleted ->
+ case fabric2_fdb:get_doc_body(Db, NewDoc#doc.id, PrevRevInfo) of
+ #doc{} = PDoc -> PDoc;
+ {not_found, _} -> nil
+ end;
+ _ ->
+ nil
+ end,
- MergedDoc = if not HasStubs -> NewDoc; true ->
- % This will throw an error if we have any
- % attachment stubs missing data
- couch_doc:merge_stubs(NewDoc, PrevDoc)
- end,
+ MergedDoc =
+ if
+ not HasStubs ->
+ NewDoc;
+ true ->
+ % This will throw an error if we have any
+ % attachment stubs missing data
+ couch_doc:merge_stubs(NewDoc, PrevDoc)
+ end,
check_duplicate_attachments(MergedDoc),
validate_doc_update(Db, MergedDoc, PrevDoc),
MergedDoc.
-
validate_doc_update(Db, #doc{id = <<"_design/", _/binary>>} = Doc, _) ->
case catch check_is_admin(Db) of
ok -> validate_ddoc(Db, Doc);
@@ -2118,23 +2232,27 @@ validate_doc_update(Db, Doc, PrevDoc) ->
} = Db,
Fun = fun() ->
JsonCtx = fabric2_util:user_ctx_to_json(Db),
- lists:map(fun(VDU) ->
- try
- case VDU(Doc, PrevDoc, JsonCtx, Security) of
- ok -> ok;
- Error1 -> throw(Error1)
+ lists:map(
+ fun(VDU) ->
+ try
+ case VDU(Doc, PrevDoc, JsonCtx, Security) of
+ ok -> ok;
+ Error1 -> throw(Error1)
+ end
+ catch
+ throw:Error2 ->
+ ?RETURN({Doc, Error2})
end
- catch throw:Error2 ->
- ?RETURN({Doc, Error2})
- end
- end, VDUs)
+ end,
+ VDUs
+ )
end,
Stat = [couchdb, query_server, vdu_process_time],
- if VDUs == [] -> ok; true ->
- couch_stats:update_histogram(Stat, Fun)
+ if
+ VDUs == [] -> ok;
+ true -> couch_stats:update_histogram(Stat, Fun)
end.
-
validate_ddoc(Db, DDoc) ->
try
ok = couch_views_validate:validate_ddoc(Db, DDoc)
@@ -2147,7 +2265,6 @@ validate_ddoc(Db, DDoc) ->
?RETURN({DDoc, Error})
end.
-
validate_atomic_update(_, false) ->
ok;
validate_atomic_update(AllDocs, true) ->
@@ -2155,49 +2272,50 @@ validate_atomic_update(AllDocs, true) ->
% to basically extract the prep_and_validate_updates function from couch_db
% and only run that, without actually writing in case of a success.
Error = {not_implemented, <<"all_or_nothing is not supported">>},
- PreCommitFailures = lists:map(fun(#doc{id=Id, revs = {Pos,Revs}}) ->
- case Revs of [] -> RevId = <<>>; [RevId|_] -> ok end,
- {{Id, {Pos, RevId}}, Error}
- end, AllDocs),
+ PreCommitFailures = lists:map(
+ fun(#doc{id = Id, revs = {Pos, Revs}}) ->
+ case Revs of
+ [] -> RevId = <<>>;
+ [RevId | _] -> ok
+ end,
+ {{Id, {Pos, RevId}}, Error}
+ end,
+ AllDocs
+ ),
throw({aborted, PreCommitFailures}).
-
check_duplicate_attachments(#doc{atts = Atts}) ->
- lists:foldl(fun(Att, Names) ->
- Name = couch_att:fetch(name, Att),
- case ordsets:is_element(Name, Names) of
- true -> throw({bad_request, <<"Duplicate attachments">>});
- false -> ordsets:add_element(Name, Names)
- end
- end, ordsets:new(), Atts).
-
+ lists:foldl(
+ fun(Att, Names) ->
+ Name = couch_att:fetch(name, Att),
+ case ordsets:is_element(Name, Names) of
+ true -> throw({bad_request, <<"Duplicate attachments">>});
+ false -> ordsets:add_element(Name, Names)
+ end
+ end,
+ ordsets:new(),
+ Atts
+ ).
get_since_seq(Db, rev, <<>>) ->
get_since_seq(Db, rev, now);
-
-get_since_seq(_Db, _Dir, Seq) when Seq == <<>>; Seq == <<"0">>; Seq == 0->
+get_since_seq(_Db, _Dir, Seq) when Seq == <<>>; Seq == <<"0">>; Seq == 0 ->
fabric2_util:seq_zero_vs();
-
get_since_seq(Db, Dir, Seq) when Seq == now; Seq == <<"now">> ->
CurrSeq = fabric2_fdb:get_last_change(Db),
get_since_seq(Db, Dir, CurrSeq);
-
get_since_seq(_Db, _Dir, Seq) when is_binary(Seq), size(Seq) == 24 ->
fabric2_fdb:next_vs(fabric2_fdb:seq_to_vs(Seq));
-
get_since_seq(Db, Dir, List) when is_list(List) ->
get_since_seq(Db, Dir, list_to_binary(List));
-
get_since_seq(_Db, _Dir, Seq) ->
erlang:error({invalid_since_seq, Seq}).
-
get_leaf_path(Pos, Rev, [{Pos, [{Rev, _RevInfo} | LeafPath]} | _]) ->
LeafPath;
get_leaf_path(Pos, Rev, [_WrongLeaf | RestLeafs]) ->
get_leaf_path(Pos, Rev, RestLeafs).
-
find_prev_revinfo(_Pos, []) ->
not_found;
find_prev_revinfo(Pos, [{_Rev, ?REV_MISSING} | RestPath]) ->
@@ -2205,7 +2323,6 @@ find_prev_revinfo(Pos, [{_Rev, ?REV_MISSING} | RestPath]) ->
find_prev_revinfo(_Pos, [{_Rev, #{} = RevInfo} | _]) ->
RevInfo.
-
increment_local_doc_rev(#doc{deleted = true} = Doc) ->
{ok, Doc#doc{revs = {0, [0]}}};
increment_local_doc_rev(#doc{revs = {0, []}} = Doc) ->
@@ -2214,20 +2331,19 @@ increment_local_doc_rev(#doc{revs = {0, [RevStr | _]}} = Doc) ->
try
PrevRev = binary_to_integer(RevStr),
{ok, Doc#doc{revs = {0, [PrevRev + 1]}}}
- catch error:badarg ->
- {error, <<"Invalid rev format">>}
+ catch
+ error:badarg ->
+ {error, <<"Invalid rev format">>}
end;
increment_local_doc_rev(#doc{}) ->
{error, <<"Invalid rev format">>}.
-
doc_to_revid(#doc{revs = Revs}) ->
case Revs of
{0, []} -> {0, <<>>};
{RevPos, [Rev | _]} -> {RevPos, Rev}
end.
-
tag_docs([]) ->
[];
tag_docs([#doc{meta = Meta} = Doc | Rest]) ->
@@ -2235,50 +2351,41 @@ tag_docs([#doc{meta = Meta} = Doc | Rest]) ->
NewDoc = Doc#doc{meta = Meta1},
[NewDoc | tag_docs(Rest)].
-
doc_tag(#doc{meta = Meta}) ->
fabric2_util:get_value(ref, Meta).
-
idrevs({Id, Revs}) when is_list(Revs) ->
{docid(Id), [rev(R) || R <- Revs]}.
-
docid(DocId) when is_list(DocId) ->
list_to_binary(DocId);
docid(DocId) ->
DocId.
-
rev(Rev) when is_list(Rev); is_binary(Rev) ->
couch_doc:parse_rev(Rev);
rev({Seq, Hash} = Rev) when is_integer(Seq), is_binary(Hash) ->
Rev.
-
maybe_stop({ok, Acc}) ->
Acc;
maybe_stop({stop, Acc}) ->
throw({stop, Acc}).
-
set_design_doc_keys(Options1) ->
Dir = couch_util:get_value(dir, Options1, fwd),
Options2 = set_design_doc_start_key(Options1, Dir),
set_design_doc_end_key(Options2, Dir).
-
set_design_doc_start_key(Options, fwd) ->
Key1 = couch_util:get_value(start_key, Options, ?FIRST_DDOC_KEY),
Key2 = max(Key1, ?FIRST_DDOC_KEY),
lists:keystore(start_key, 1, Options, {start_key, Key2});
-
set_design_doc_start_key(Options, rev) ->
Key1 = couch_util:get_value(start_key, Options, ?LAST_DDOC_KEY),
Key2 = min(Key1, ?LAST_DDOC_KEY),
lists:keystore(start_key, 1, Options, {start_key, Key2}).
-
set_design_doc_end_key(Options, fwd) ->
case couch_util:get_value(end_key_gt, Options) of
undefined ->
@@ -2289,7 +2396,6 @@ set_design_doc_end_key(Options, fwd) ->
Key2 = min(EKeyGT, ?LAST_DDOC_KEY),
lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
end;
-
set_design_doc_end_key(Options, rev) ->
case couch_util:get_value(end_key_gt, Options) of
undefined ->
@@ -2301,7 +2407,6 @@ set_design_doc_end_key(Options, rev) ->
lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
end.
-
stem_revisions(#{} = Db, #doc{} = Doc) ->
#{revs_limit := RevsLimit} = Db,
#doc{revs = {RevPos, Revs}} = Doc,
@@ -2310,7 +2415,6 @@ stem_revisions(#{} = Db, #doc{} = Doc) ->
false -> Doc
end.
-
open_json_doc(Db, DocId, OpenOpts, DocOpts) ->
case fabric2_db:open_doc(Db, DocId, OpenOpts) of
{not_found, missing} ->
@@ -2321,7 +2425,6 @@ open_json_doc(Db, DocId, OpenOpts, DocOpts) ->
[{doc, couch_doc:to_json_obj(Doc, DocOpts)}]
end.
-
get_cached_db(#{} = Db, Opts) when is_list(Opts) ->
MaxAge = fabric2_util:get_value(max_age, Opts, 0),
Now = erlang:monotonic_time(millisecond),
@@ -2335,16 +2438,17 @@ get_cached_db(#{} = Db, Opts) when is_list(Opts) ->
end)
end.
-
is_replicated(Options) when is_list(Options) ->
lists:member(replicated_changes, Options).
-
get_batch_size(Options) ->
case fabric2_util:get_value(batch_size, Options) of
undefined ->
- config:get_integer("fabric", "update_docs_batch_size",
- ?DEFAULT_UPDATE_DOCS_BATCH_SIZE);
+ config:get_integer(
+ "fabric",
+ "update_docs_batch_size",
+ ?DEFAULT_UPDATE_DOCS_BATCH_SIZE
+ );
Val when is_integer(Val) ->
Val
end.
diff --git a/src/fabric/src/fabric2_db_expiration.erl b/src/fabric/src/fabric2_db_expiration.erl
index b8a063c08..0344a2787 100644
--- a/src/fabric/src/fabric2_db_expiration.erl
+++ b/src/fabric/src/fabric2_db_expiration.erl
@@ -12,10 +12,8 @@
-module(fabric2_db_expiration).
-
-behaviour(gen_server).
-
-export([
start_link/0,
cleanup/1,
@@ -31,7 +29,6 @@
code_change/3
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("fabric/include/fabric2.hrl").
-include_lib("kernel/include/logger.hrl").
@@ -39,64 +36,56 @@
-define(JOB_TYPE, <<"db_expiration">>).
-define(JOB_ID, <<"db_expiration_job">>).
-define(DEFAULT_JOB_Version, 1).
--define(DEFAULT_RETENTION_SEC, 172800). % 48 hours
--define(DEFAULT_SCHEDULE_SEC, 3600). % 1 hour
+% 48 hours
+-define(DEFAULT_RETENTION_SEC, 172800).
+% 1 hour
+-define(DEFAULT_SCHEDULE_SEC, 3600).
-define(ERROR_RESCHEDULE_SEC, 5).
-define(CHECK_ENABLED_SEC, 2).
-define(JOB_TIMEOUT_SEC, 30).
-
-record(st, {
job
}).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
init(_) ->
process_flag(trap_exit, true),
{ok, #st{job = undefined}, 0}.
-
terminate(_M, _St) ->
ok.
-
handle_call(Msg, _From, St) ->
{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
-
handle_cast(Msg, St) ->
{stop, {bad_cast, Msg}, St}.
-
handle_info(timeout, #st{job = undefined} = St) ->
ok = wait_for_couch_jobs_app(),
ok = couch_jobs:set_type_timeout(?JOB_TYPE, ?JOB_TIMEOUT_SEC),
ok = maybe_add_job(),
Pid = spawn_link(?MODULE, cleanup, [is_enabled()]),
{noreply, St#st{job = Pid}};
-
handle_info({'EXIT', Pid, Exit}, #st{job = Pid} = St) ->
case Exit of
- normal -> ok;
+ normal ->
+ ok;
Error ->
?LOG_ERROR(#{what => job_error, details => Error}),
couch_log:error("~p : job error ~p", [?MODULE, Error])
end,
NewPid = spawn_link(?MODULE, cleanup, [is_enabled()]),
{noreply, St#st{job = NewPid}};
-
handle_info(Msg, St) ->
{stop, {bad_info, Msg}, St}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
wait_for_couch_jobs_app() ->
% Because of a circular dependency between couch_jobs and fabric apps, wait
% for couch_jobs to initialize before continuing. If we refactor the
@@ -109,7 +98,6 @@ wait_for_couch_jobs_app() ->
wait_for_couch_jobs_app()
end.
-
maybe_add_job() ->
case couch_jobs:get_job_data(undefined, ?JOB_TYPE, job_id()) of
{error, not_found} ->
@@ -119,11 +107,9 @@ maybe_add_job() ->
ok
end.
-
cleanup(false) ->
timer:sleep(?CHECK_ENABLED_SEC * 1000),
exit(normal);
-
cleanup(true) ->
Now = erlang:system_time(second),
ScheduleSec = schedule_sec(),
@@ -140,8 +126,10 @@ cleanup(true) ->
job => Job,
details => Error
}),
- couch_log:error("~p : processing error ~p ~p ~p",
- [?MODULE, Job, Error, Stack]),
+ couch_log:error(
+ "~p : processing error ~p ~p ~p",
+ [?MODULE, Job, Error, Stack]
+ ),
ok = resubmit_job(Job, Data, ?ERROR_RESCHEDULE_SEC),
exit({job_error, Error, Stack})
end;
@@ -150,17 +138,15 @@ cleanup(true) ->
?MODULE:cleanup(is_enabled())
end.
-
resubmit_job(Job, Data, After) ->
Now = erlang:system_time(second),
SchedTime = Now + After,
couch_jobs_fdb:tx(couch_jobs_fdb:get_jtx(), fun(JTx) ->
- {ok, Job1} = couch_jobs:resubmit(JTx, Job, SchedTime),
- ok = couch_jobs:finish(JTx, Job1, Data)
+ {ok, Job1} = couch_jobs:resubmit(JTx, Job, SchedTime),
+ ok = couch_jobs:finish(JTx, Job1, Data)
end),
ok.
-
process_expirations(#{} = Job, #{} = Data) ->
Start = now_sec(),
Callback = fun(Value, LastUpdateAt) ->
@@ -178,34 +164,34 @@ process_expirations(#{} = Job, #{} = Data) ->
),
{ok, Job, Data}.
-
process_row(DbInfo) ->
DbName = proplists:get_value(db_name, DbInfo),
TimeStamp = proplists:get_value(timestamp, DbInfo),
Now = now_sec(),
Retention = retention_sec(),
Since = Now - Retention,
- case Since >= timestamp_to_sec(TimeStamp) of
+ case Since >= timestamp_to_sec(TimeStamp) of
true ->
?LOG_NOTICE(#{
what => expire_db,
db => DbName,
deleted_at => TimeStamp
}),
- couch_log:notice("Permanently deleting ~s database with"
- " timestamp ~s", [DbName, TimeStamp]),
+ couch_log:notice(
+ "Permanently deleting ~s database with"
+ " timestamp ~s",
+ [DbName, TimeStamp]
+ ),
ok = fabric2_db:delete(DbName, [{deleted_at, TimeStamp}]);
false ->
ok
end.
-
maybe_report_progress(Job, LastUpdateAt) ->
% Update periodically the job so it doesn't expire
Now = now_sec(),
Progress = #{
<<"processed_at">> => Now
-
},
case (Now - LastUpdateAt) > (?JOB_TIMEOUT_SEC div 2) of
true ->
@@ -215,44 +201,47 @@ maybe_report_progress(Job, LastUpdateAt) ->
LastUpdateAt
end.
-
job_id() ->
JobVersion = job_version(),
<<?JOB_ID/binary, "-", JobVersion:16/integer>>.
-
now_sec() ->
Now = os:timestamp(),
Nowish = calendar:now_to_universal_time(Now),
calendar:datetime_to_gregorian_seconds(Nowish).
-
timestamp_to_sec(TimeStamp) ->
- <<Year:4/binary, "-", Month:2/binary, "-", Day:2/binary,
- "T",
- Hour:2/binary, ":", Minutes:2/binary, ":", Second:2/binary,
- "Z">> = TimeStamp,
+ <<Year:4/binary, "-", Month:2/binary, "-", Day:2/binary, "T", Hour:2/binary, ":",
+ Minutes:2/binary, ":", Second:2/binary, "Z">> = TimeStamp,
calendar:datetime_to_gregorian_seconds(
- {{?bin2int(Year), ?bin2int(Month), ?bin2int(Day)},
- {?bin2int(Hour), ?bin2int(Minutes), ?bin2int(Second)}}
+ {{?bin2int(Year), ?bin2int(Month), ?bin2int(Day)}, {
+ ?bin2int(Hour),
+ ?bin2int(Minutes),
+ ?bin2int(Second)
+ }}
).
-
is_enabled() ->
config:get_boolean("couchdb", "db_expiration_enabled", false).
-
job_version() ->
- config:get_integer("couchdb", "db_expiration_job_version",
- ?DEFAULT_JOB_Version).
-
+ config:get_integer(
+ "couchdb",
+ "db_expiration_job_version",
+ ?DEFAULT_JOB_Version
+ ).
retention_sec() ->
- config:get_integer("couchdb", "db_expiration_retention_sec",
- ?DEFAULT_RETENTION_SEC).
-
+ config:get_integer(
+ "couchdb",
+ "db_expiration_retention_sec",
+ ?DEFAULT_RETENTION_SEC
+ ).
schedule_sec() ->
- config:get_integer("couchdb", "db_expiration_schedule_sec",
- ?DEFAULT_SCHEDULE_SEC).
+ config:get_integer(
+ "couchdb",
+ "db_expiration_schedule_sec",
+ ?DEFAULT_SCHEDULE_SEC
+ ).
diff --git a/src/fabric/src/fabric2_db_plugin.erl b/src/fabric/src/fabric2_db_plugin.erl
index 095b94cf4..fd92f9284 100644
--- a/src/fabric/src/fabric2_db_plugin.erl
+++ b/src/fabric/src/fabric2_db_plugin.erl
@@ -24,13 +24,10 @@
is_valid_purge_client/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-
-define(SERVICE_ID, fabric2_db).
-
%% ------------------------------------------------------------------
%% API Function Definitions
%% ------------------------------------------------------------------
@@ -38,18 +35,14 @@
validate_dbname(DbName, Normalized, Default) ->
maybe_handle(validate_dbname, [DbName, Normalized], Default).
-
after_db_create(DbName, DbUUID) when is_binary(DbName), is_binary(DbUUID) ->
with_pipe(after_db_create, [DbName, DbUUID]).
-
after_db_delete(DbName, DbUUID) when is_binary(DbName), is_binary(DbUUID) ->
with_pipe(after_db_delete, [DbName, DbUUID]).
-
before_doc_update(_, #doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>} = Doc, _) ->
Doc;
-
before_doc_update(Db, Doc0, UpdateType) ->
Fun = fabric2_db:get_before_doc_update_fun(Db),
case with_pipe(before_doc_update, [Doc0, Db, UpdateType]) of
@@ -59,11 +52,9 @@ before_doc_update(Db, Doc0, UpdateType) ->
Doc1
end.
-
-after_doc_write(Db, Doc, NewWinner, OldWinner, NewRevId, Seq)->
+after_doc_write(Db, Doc, NewWinner, OldWinner, NewRevId, Seq) ->
with_pipe(after_doc_write, [Db, Doc, NewWinner, OldWinner, NewRevId, Seq]).
-
after_doc_read(Db, Doc0) ->
Fun = fabric2_db:get_after_doc_read_fun(Db),
case with_pipe(after_doc_read, [Doc0, Db]) of
@@ -71,19 +62,16 @@ after_doc_read(Db, Doc0) ->
[Doc1, _Db] -> Doc1
end.
-
validate_docid(Id) ->
Handle = couch_epi:get_handle(?SERVICE_ID),
%% callbacks return true only if it specifically allow the given Id
couch_epi:any(Handle, ?SERVICE_ID, validate_docid, [Id], []).
-
check_is_admin(Db) ->
Handle = couch_epi:get_handle(?SERVICE_ID),
%% callbacks return true only if it specifically allow the given Id
couch_epi:any(Handle, ?SERVICE_ID, check_is_admin, [Db], []).
-
is_valid_purge_client(DbName, Props) ->
Handle = couch_epi:get_handle(?SERVICE_ID),
%% callbacks return true only if it specifically allow the given Id
@@ -103,10 +91,10 @@ do_apply(Func, Args, Opts) ->
maybe_handle(Func, Args, Default) ->
Handle = couch_epi:get_handle(?SERVICE_ID),
case couch_epi:decide(Handle, ?SERVICE_ID, Func, Args, []) of
- no_decision when is_function(Default) ->
- apply(Default, Args);
- no_decision ->
- Default;
- {decided, Result} ->
- Result
+ no_decision when is_function(Default) ->
+ apply(Default, Args);
+ no_decision ->
+ Default;
+ {decided, Result} ->
+ Result
end.
diff --git a/src/fabric/src/fabric2_events.erl b/src/fabric/src/fabric2_events.erl
index e1198243a..0e4b05706 100644
--- a/src/fabric/src/fabric2_events.erl
+++ b/src/fabric/src/fabric2_events.erl
@@ -12,7 +12,6 @@
-module(fabric2_events).
-
-export([
link_listener/4,
stop_listener/1
@@ -23,10 +22,8 @@
poll/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-
link_listener(Mod, Fun, Acc, Options) ->
State = #{
dbname => fabric2_util:get_value(dbname, Options),
@@ -42,11 +39,9 @@ link_listener(Mod, Fun, Acc, Options) ->
end,
{ok, Pid}.
-
stop_listener(Pid) ->
Pid ! stop_listening.
-
init(Parent, #{dbname := DbName} = State) ->
{ok, Db} = fabric2_db:open(DbName, [?ADMIN_CTX]),
Since = fabric2_db:get_update_seq(Db),
@@ -54,7 +49,6 @@ init(Parent, #{dbname := DbName} = State) ->
Parent ! {self(), initialized},
poll(State#{since => Since}).
-
poll(#{} = State) ->
#{
dbname := DbName,
@@ -65,23 +59,25 @@ poll(#{} = State) ->
callback := Fun,
acc := Acc
} = State,
- {Resp, NewSince} = try
- Opts = [?ADMIN_CTX, {uuid, DbUUID}],
- case fabric2_db:open(DbName, Opts) of
- {ok, Db} ->
- case fabric2_db:get_update_seq(Db) of
- Since ->
- {{ok, Acc}, Since};
- Other ->
- {Mod:Fun(DbName, updated, Acc), Other}
- end;
- Error ->
- exit(Error)
- end
- catch error:database_does_not_exist ->
- Mod:Fun(DbName, deleted, Acc),
- {{stop, ok}, Since}
- end,
+ {Resp, NewSince} =
+ try
+ Opts = [?ADMIN_CTX, {uuid, DbUUID}],
+ case fabric2_db:open(DbName, Opts) of
+ {ok, Db} ->
+ case fabric2_db:get_update_seq(Db) of
+ Since ->
+ {{ok, Acc}, Since};
+ Other ->
+ {Mod:Fun(DbName, updated, Acc), Other}
+ end;
+ Error ->
+ exit(Error)
+ end
+ catch
+ error:database_does_not_exist ->
+ Mod:Fun(DbName, deleted, Acc),
+ {{stop, ok}, Since}
+ end,
receive
stop_listening ->
ok;
diff --git a/src/fabric/src/fabric2_fdb.erl b/src/fabric/src/fabric2_fdb.erl
index 8ccfeab91..ea0c80663 100644
--- a/src/fabric/src/fabric2_fdb.erl
+++ b/src/fabric/src/fabric2_fdb.erl
@@ -12,7 +12,6 @@
-module(fabric2_fdb).
-
-export([
transactional/1,
transactional/2,
@@ -86,11 +85,9 @@
debug_cluster/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("fabric2.hrl").
-
-record(fold_acc, {
db,
restart_tx,
@@ -113,62 +110,64 @@
retries = 0
}).
-
transactional(Fun) ->
do_transaction(Fun, undefined, #{}).
-
transactional(DbName, Fun) when is_binary(DbName), is_function(Fun) ->
transactional(DbName, #{}, Fun);
-
transactional(#{} = Db, Fun) when is_function(Fun) ->
transactional(Db, #{}, Fun).
-
-
transactional(DbName, #{} = TxOptions, Fun) when is_binary(DbName) ->
with_span(Fun, #{'db.name' => DbName}, fun() ->
- do_transaction(fun(Tx) ->
- Fun(init_db(Tx, DbName))
- end, undefined, TxOptions)
+ do_transaction(
+ fun(Tx) ->
+ Fun(init_db(Tx, DbName))
+ end,
+ undefined,
+ TxOptions
+ )
end);
-
transactional(#{tx := undefined} = Db, #{} = TxOptions, Fun) ->
DbName = maps:get(name, Db, undefined),
try
Db1 = refresh(Db),
Reopen = maps:get(reopen, Db1, false),
Db2 = maps:remove(reopen, Db1),
- LayerPrefix = case Reopen of
- true -> undefined;
- false -> maps:get(layer_prefix, Db2)
- end,
+ LayerPrefix =
+ case Reopen of
+ true -> undefined;
+ false -> maps:get(layer_prefix, Db2)
+ end,
with_span(Fun, #{'db.name' => DbName}, fun() ->
- do_transaction(fun(Tx) ->
- case Reopen of
- true -> Fun(reopen(Db2#{tx => Tx}));
- false -> Fun(Db2#{tx => Tx})
- end
- end, LayerPrefix, TxOptions)
- end)
- catch throw:{?MODULE, reopen} ->
- with_span('db.reopen', #{'db.name' => DbName}, fun() ->
- transactional(Db#{reopen => true}, Fun)
+ do_transaction(
+ fun(Tx) ->
+ case Reopen of
+ true -> Fun(reopen(Db2#{tx => Tx}));
+ false -> Fun(Db2#{tx => Tx})
+ end
+ end,
+ LayerPrefix,
+ TxOptions
+ )
end)
+ catch
+ throw:{?MODULE, reopen} ->
+ with_span('db.reopen', #{'db.name' => DbName}, fun() ->
+ transactional(Db#{reopen => true}, Fun)
+ end)
end;
transactional(#{tx := {erlfdb_snapshot, _}} = Db, #{} = _TxOptions, Fun) ->
DbName = maps:get(name, Db, undefined),
with_span(Fun, #{'db.name' => DbName}, fun() ->
Fun(Db)
end);
-
transactional(#{tx := {erlfdb_transaction, _}} = Db, #{} = _TxOptions, Fun) ->
DbName = maps:get(name, Db, undefined),
with_span(Fun, #{'db.name' => DbName}, fun() ->
Fun(Db)
end).
-
do_transaction(Fun, LayerPrefix, #{} = TxOptions) when is_function(Fun, 1) ->
Db = get_db_handle(),
try
@@ -194,21 +193,20 @@ do_transaction(Fun, LayerPrefix, #{} = TxOptions) when is_function(Fun, 1) ->
clear_transaction()
end.
-
apply_tx_options(Tx, #{} = TxOptions) ->
- maps:map(fun(K, V) ->
- erlfdb:set_option(Tx, K, V)
- end, TxOptions).
-
+ maps:map(
+ fun(K, V) ->
+ erlfdb:set_option(Tx, K, V)
+ end,
+ TxOptions
+ ).
with_snapshot(#{tx := {erlfdb_transaction, _} = Tx} = TxDb, Fun) ->
SSDb = TxDb#{tx := erlfdb:snapshot(Tx)},
Fun(SSDb);
-
with_snapshot(#{tx := {erlfdb_snapshot, _}} = SSDb, Fun) ->
Fun(SSDb).
-
create(#{} = Db0, Options) ->
#{
name := DbName,
@@ -245,14 +243,17 @@ create(#{} = Db0, Options) ->
{?DB_STATS, <<"sizes">>, <<"external">>, ?uint2bin(2)},
{?DB_STATS, <<"sizes">>, <<"views">>, ?uint2bin(0)}
],
- lists:foreach(fun
- ({P, K, V}) ->
- Key = erlfdb_tuple:pack({P, K}, DbPrefix),
- erlfdb:set(Tx, Key, V);
- ({P, S, K, V}) ->
- Key = erlfdb_tuple:pack({P, S, K}, DbPrefix),
- erlfdb:set(Tx, Key, V)
- end, Defaults),
+ lists:foreach(
+ fun
+ ({P, K, V}) ->
+ Key = erlfdb_tuple:pack({P, K}, DbPrefix),
+ erlfdb:set(Tx, Key, V);
+ ({P, S, K, V}) ->
+ Key = erlfdb_tuple:pack({P, S, K}, DbPrefix),
+ erlfdb:set(Tx, Key, V)
+ end,
+ Defaults
+ ),
UserCtx = fabric2_util:get_value(user_ctx, Options, #user_ctx{}),
Options1 = lists:keydelete(user_ctx, 1, Options),
@@ -277,7 +278,6 @@ create(#{} = Db0, Options) ->
},
aegis:init_db(Db2, Options).
-
open(#{} = Db0, Options) ->
#{
name := DbName,
@@ -286,10 +286,11 @@ open(#{} = Db0, Options) ->
} = Db1 = ensure_current(Db0, false),
DbKey = erlfdb_tuple:pack({?ALL_DBS, DbName}, LayerPrefix),
- DbPrefix = case erlfdb:wait(erlfdb:get(Tx, DbKey)) of
- Bin when is_binary(Bin) -> Bin;
- not_found -> erlang:error(database_does_not_exist)
- end,
+ DbPrefix =
+ case erlfdb:wait(erlfdb:get(Tx, DbKey)) of
+ Bin when is_binary(Bin) -> Bin;
+ not_found -> erlang:error(database_does_not_exist)
+ end,
DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
DbVersion = erlfdb:wait(erlfdb:get(Tx, DbVersionKey)),
@@ -335,7 +336,6 @@ open(#{} = Db0, Options) ->
load_validate_doc_funs(Db4).
-
% Match on `name` in the function head since some non-fabric2 db
% objects might not have names and so they don't get cached
refresh(#{tx := undefined, name := DbName} = Db) ->
@@ -358,12 +358,9 @@ refresh(#{tx := undefined, name := DbName} = Db) ->
_ ->
Db
end;
-
refresh(#{} = Db) ->
Db.
-
-
reopen(#{} = OldDb) ->
require_transaction(OldDb),
#{
@@ -387,7 +384,6 @@ reopen(#{} = OldDb) ->
NewDb#{security_fun := SecurityFun, interactive := Interactive}.
-
delete(#{} = Db) ->
DoRecovery = fabric2_util:do_recovery(),
case DoRecovery of
@@ -395,7 +391,6 @@ delete(#{} = Db) ->
false -> hard_delete_db(Db)
end.
-
undelete(#{} = Db0, TgtDbName, TimeStamp) ->
#{
name := DbName,
@@ -427,7 +422,6 @@ undelete(#{} = Db0, TgtDbName, TimeStamp) ->
end
end.
-
remove_deleted_db(#{} = Db0, TimeStamp) ->
#{
name := DbName,
@@ -454,7 +448,6 @@ remove_deleted_db(#{} = Db0, TimeStamp) ->
ok
end.
-
exists(#{name := DbName} = Db) when is_binary(DbName) ->
#{
tx := Tx,
@@ -467,54 +460,70 @@ exists(#{name := DbName} = Db) when is_binary(DbName) ->
not_found -> false
end.
-
get_dir(Tx) ->
Root = erlfdb_directory:root(),
Dir = fabric2_server:fdb_directory(),
CouchDB = erlfdb_directory:create_or_open(Tx, Root, Dir),
erlfdb_directory:get_name(CouchDB).
-
list_dbs(Tx, Callback, AccIn, Options0) ->
- Options = case fabric2_util:get_value(restart_tx, Options0) of
- undefined -> [{restart_tx, true} | Options0];
- _AlreadySet -> Options0
- end,
+ Options =
+ case fabric2_util:get_value(restart_tx, Options0) of
+ undefined -> [{restart_tx, true} | Options0];
+ _AlreadySet -> Options0
+ end,
LayerPrefix = get_dir(Tx),
Prefix = erlfdb_tuple:pack({?ALL_DBS}, LayerPrefix),
- fold_range({tx, Tx}, Prefix, fun({K, _V}, Acc) ->
- {DbName} = erlfdb_tuple:unpack(K, Prefix),
- Callback(DbName, Acc)
- end, AccIn, Options).
-
+ fold_range(
+ {tx, Tx},
+ Prefix,
+ fun({K, _V}, Acc) ->
+ {DbName} = erlfdb_tuple:unpack(K, Prefix),
+ Callback(DbName, Acc)
+ end,
+ AccIn,
+ Options
+ ).
list_dbs_info(Tx, Callback, AccIn, Options0) ->
- Options = case fabric2_util:get_value(restart_tx, Options0) of
- undefined -> [{restart_tx, true} | Options0];
- _AlreadySet -> Options0
- end,
+ Options =
+ case fabric2_util:get_value(restart_tx, Options0) of
+ undefined -> [{restart_tx, true} | Options0];
+ _AlreadySet -> Options0
+ end,
LayerPrefix = get_dir(Tx),
Prefix = erlfdb_tuple:pack({?ALL_DBS}, LayerPrefix),
- fold_range({tx, Tx}, Prefix, fun({DbNameKey, DbPrefix}, Acc) ->
- {DbName} = erlfdb_tuple:unpack(DbNameKey, Prefix),
- InfoFuture = get_info_future(Tx, DbPrefix),
- Callback(DbName, InfoFuture, Acc)
- end, AccIn, Options).
-
+ fold_range(
+ {tx, Tx},
+ Prefix,
+ fun({DbNameKey, DbPrefix}, Acc) ->
+ {DbName} = erlfdb_tuple:unpack(DbNameKey, Prefix),
+ InfoFuture = get_info_future(Tx, DbPrefix),
+ Callback(DbName, InfoFuture, Acc)
+ end,
+ AccIn,
+ Options
+ ).
list_deleted_dbs_info(Tx, Callback, AccIn, Options0) ->
- Options = case fabric2_util:get_value(restart_tx, Options0) of
- undefined -> [{restart_tx, true} | Options0];
- _AlreadySet -> Options0
- end,
+ Options =
+ case fabric2_util:get_value(restart_tx, Options0) of
+ undefined -> [{restart_tx, true} | Options0];
+ _AlreadySet -> Options0
+ end,
LayerPrefix = get_dir(Tx),
Prefix = erlfdb_tuple:pack({?DELETED_DBS}, LayerPrefix),
- fold_range({tx, Tx}, Prefix, fun({DbKey, DbPrefix}, Acc) ->
- {DbName, TimeStamp} = erlfdb_tuple:unpack(DbKey, Prefix),
- InfoFuture = get_info_future(Tx, DbPrefix),
- Callback(DbName, TimeStamp, InfoFuture, Acc)
- end, AccIn, Options).
-
+ fold_range(
+ {tx, Tx},
+ Prefix,
+ fun({DbKey, DbPrefix}, Acc) ->
+ {DbName, TimeStamp} = erlfdb_tuple:unpack(DbKey, Prefix),
+ InfoFuture = get_info_future(Tx, DbPrefix),
+ Callback(DbName, TimeStamp, InfoFuture, Acc)
+ end,
+ AccIn,
+ Options
+ ).
get_info(#{} = Db) ->
#{
@@ -525,7 +534,6 @@ get_info(#{} = Db) ->
AegisProps = aegis:get_db_info(Db),
[{encryption, {AegisProps}} | DbInfo].
-
get_info_future(Tx, DbPrefix) ->
{CStart, CEnd} = erlfdb_tuple:range({?DB_CHANGES}, DbPrefix),
ChangesFuture = erlfdb:get_range(Tx, CStart, CEnd, [
@@ -542,10 +550,11 @@ get_info_future(Tx, DbPrefix) ->
% Save the tx object only if it's read-only as we might retry to get the
% future again after the tx was reset
- SaveTx = case erlfdb:get_writes_allowed(Tx) of
- true -> undefined;
- false -> Tx
- end,
+ SaveTx =
+ case erlfdb:get_writes_allowed(Tx) of
+ true -> undefined;
+ false -> Tx
+ end,
#info_future{
tx = SaveTx,
@@ -555,11 +564,10 @@ get_info_future(Tx, DbPrefix) ->
uuid_future = UUIDFuture
}.
-
-get_info_wait(#info_future{tx = Tx, retries = Retries} = Future)
- when Tx =:= undefined orelse Retries >= 2 ->
+get_info_wait(#info_future{tx = Tx, retries = Retries} = Future) when
+ Tx =:= undefined orelse Retries >= 2
+->
get_info_wait_int(Future);
-
get_info_wait(#info_future{tx = Tx, retries = Retries} = Future) ->
try
get_info_wait_int(Future)
@@ -573,7 +581,6 @@ get_info_wait(#info_future{tx = Tx, retries = Retries} = Future) ->
get_info_wait(Future1#info_future{retries = Retries + 1})
end.
-
load_config(#{} = Db) ->
#{
tx := Tx,
@@ -583,32 +590,35 @@ load_config(#{} = Db) ->
{Start, End} = erlfdb_tuple:range({?DB_CONFIG}, DbPrefix),
Future = erlfdb:get_range(Tx, Start, End),
- lists:foldl(fun({K, V}, DbAcc) ->
- {?DB_CONFIG, Key} = erlfdb_tuple:unpack(K, DbPrefix),
- case Key of
- <<"uuid">> -> DbAcc#{uuid := V};
- <<"revs_limit">> -> DbAcc#{revs_limit := ?bin2uint(V)};
- <<"security_doc">> -> DbAcc#{security_doc := ?JSON_DECODE(V)}
- end
- end, Db, erlfdb:wait(Future)).
-
+ lists:foldl(
+ fun({K, V}, DbAcc) ->
+ {?DB_CONFIG, Key} = erlfdb_tuple:unpack(K, DbPrefix),
+ case Key of
+ <<"uuid">> -> DbAcc#{uuid := V};
+ <<"revs_limit">> -> DbAcc#{revs_limit := ?bin2uint(V)};
+ <<"security_doc">> -> DbAcc#{security_doc := ?JSON_DECODE(V)}
+ end
+ end,
+ Db,
+ erlfdb:wait(Future)
+ ).
set_config(#{} = Db0, Key, Val) when is_atom(Key) ->
#{
tx := Tx,
db_prefix := DbPrefix
} = Db = ensure_current(Db0),
- {BinKey, BinVal} = case Key of
- uuid -> {<<"uuid">>, Val};
- revs_limit -> {<<"revs_limit">>, ?uint2bin(max(1, Val))};
- security_doc -> {<<"security_doc">>, ?JSON_ENCODE(Val)}
- end,
+ {BinKey, BinVal} =
+ case Key of
+ uuid -> {<<"uuid">>, Val};
+ revs_limit -> {<<"revs_limit">>, ?uint2bin(max(1, Val))};
+ security_doc -> {<<"security_doc">>, ?JSON_ENCODE(Val)}
+ end,
DbKey = erlfdb_tuple:pack({?DB_CONFIG, BinKey}, DbPrefix),
erlfdb:set(Tx, DbKey, BinVal),
{ok, DbVersion} = bump_db_version(Db),
{ok, Db#{db_version := DbVersion, Key := Val}}.
-
get_stat(#{} = Db, StatKey) ->
#{
tx := Tx,
@@ -622,10 +632,8 @@ get_stat(#{} = Db, StatKey) ->
% atomic op adds for the moment.
?bin2uint(erlfdb:wait(erlfdb:get(Tx, Key))).
-
incr_stat(_Db, _StatKey, 0) ->
ok;
-
incr_stat(#{} = Db, StatKey, Increment) when is_integer(Increment) ->
#{
tx := Tx,
@@ -635,10 +643,8 @@ incr_stat(#{} = Db, StatKey, Increment) when is_integer(Increment) ->
Key = erlfdb_tuple:pack({?DB_STATS, StatKey}, DbPrefix),
erlfdb:add(Tx, Key, Increment).
-
incr_stat(_Db, _Section, _Key, 0) ->
ok;
-
incr_stat(#{} = Db, Section, Key, Increment) when is_integer(Increment) ->
#{
tx := Tx,
@@ -648,7 +654,6 @@ incr_stat(#{} = Db, Section, Key, Increment) when is_integer(Increment) ->
BinKey = erlfdb_tuple:pack({?DB_STATS, Section, Key}, DbPrefix),
erlfdb:add(Tx, BinKey, Increment).
-
get_all_revs(#{} = Db, DocId) ->
DbName = maps:get(name, Db, undefined),
with_span('db.get_all_revs', #{'db.name' => DbName, 'doc.id' => DocId}, fun() ->
@@ -656,12 +661,10 @@ get_all_revs(#{} = Db, DocId) ->
get_revs_wait(Db, Future)
end).
-
get_all_revs_future(#{} = Db, DocId) ->
Options = [{streaming_mode, want_all}],
get_revs_future(Db, DocId, Options).
-
get_winning_revs(Db, DocId, NumRevs) ->
DbName = maps:get(name, Db, undefined),
with_span('db.get_winning_revs', #{'db.name' => DbName, 'doc.id' => DocId}, fun() ->
@@ -669,12 +672,10 @@ get_winning_revs(Db, DocId, NumRevs) ->
get_revs_wait(Db, Future)
end).
-
get_winning_revs_future(#{} = Db, DocId, NumRevs) ->
Options = [{reverse, true}, {limit, NumRevs}],
get_revs_future(Db, DocId, Options).
-
get_revs_future(#{} = Db, DocId, Options) ->
#{
tx := Tx,
@@ -684,21 +685,24 @@ get_revs_future(#{} = Db, DocId, Options) ->
{StartKey, EndKey} = erlfdb_tuple:range({?DB_REVS, DocId}, DbPrefix),
erlfdb:fold_range_future(Tx, StartKey, EndKey, Options).
-
get_revs_wait(#{} = Db, RangeFuture) ->
#{
tx := Tx,
db_prefix := DbPrefix
} = ensure_current(Db),
- RevRows = erlfdb:fold_range_wait(Tx, RangeFuture, fun({K, V}, Acc) ->
- Key = erlfdb_tuple:unpack(K, DbPrefix),
- Val = erlfdb_tuple:unpack(V),
- [fdb_to_revinfo(Key, Val) | Acc]
- end, []),
+ RevRows = erlfdb:fold_range_wait(
+ Tx,
+ RangeFuture,
+ fun({K, V}, Acc) ->
+ Key = erlfdb_tuple:unpack(K, DbPrefix),
+ Val = erlfdb_tuple:unpack(V),
+ [fdb_to_revinfo(Key, Val) | Acc]
+ end,
+ []
+ ),
lists:reverse(RevRows).
-
get_non_deleted_rev(#{} = Db, DocId, RevId) ->
#{
tx := Tx,
@@ -716,7 +720,6 @@ get_non_deleted_rev(#{} = Db, DocId, RevId) ->
fdb_to_revinfo(BaseKey, erlfdb_tuple:unpack(Val))
end.
-
get_doc_body(Db, DocId, RevInfo) ->
DbName = maps:get(name, Db, undefined),
with_span('db.get_doc_body', #{'db.name' => DbName, 'doc.id' => DocId}, fun() ->
@@ -724,7 +727,6 @@ get_doc_body(Db, DocId, RevInfo) ->
get_doc_body_wait(Db, DocId, RevInfo, Future)
end).
-
get_doc_body_future(#{} = Db, DocId, RevInfo) ->
#{
tx := Tx,
@@ -739,7 +741,6 @@ get_doc_body_future(#{} = Db, DocId, RevInfo) ->
{StartKey, EndKey} = erlfdb_tuple:range(Key, DbPrefix),
erlfdb:fold_range_future(Tx, StartKey, EndKey, []).
-
get_doc_body_wait(#{} = Db0, DocId, RevInfo, Future) ->
#{
tx := Tx
@@ -758,7 +759,6 @@ get_doc_body_wait(#{} = Db0, DocId, RevInfo, Future) ->
fdb_to_doc(Db, DocId, RevPos, [Rev | RevPath], BodyRows).
-
get_local_doc_rev_future(Db, DocId) ->
#{
tx := Tx,
@@ -768,11 +768,9 @@ get_local_doc_rev_future(Db, DocId) ->
Key = erlfdb_tuple:pack({?DB_LOCAL_DOCS, DocId}, DbPrefix),
erlfdb:get(Tx, Key).
-
get_local_doc_rev_wait(Future) ->
erlfdb:wait(Future).
-
get_local_doc_body_future(#{} = Db, DocId, _Rev) ->
#{
tx := Tx,
@@ -782,14 +780,12 @@ get_local_doc_body_future(#{} = Db, DocId, _Rev) ->
Prefix = erlfdb_tuple:pack({?DB_LOCAL_DOC_BODIES, DocId}, DbPrefix),
erlfdb:get_range_startswith(Tx, Prefix).
-
get_local_doc_body_wait(#{} = Db0, DocId, Rev, Future) ->
Db = ensure_current(Db0),
{_, Chunks} = lists:unzip(aegis:decrypt(Db, erlfdb:wait(Future))),
fdb_to_local_doc(Db, DocId, Rev, Chunks).
-
get_local_doc(#{} = Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId) ->
RevFuture = get_local_doc_rev_future(Db, DocId),
Rev = get_local_doc_rev_wait(RevFuture),
@@ -797,7 +793,6 @@ get_local_doc(#{} = Db, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId) ->
BodyFuture = get_local_doc_body_future(Db, DocId, Rev),
get_local_doc_body_wait(Db, DocId, Rev, BodyFuture).
-
get_local_doc_rev(_Db0, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId, Val) ->
case Val of
<<255, RevBin/binary>> ->
@@ -806,8 +801,9 @@ get_local_doc_rev(_Db0, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId, Val) ->
case erlfdb_tuple:unpack(RevBin) of
{?CURR_LDOC_FORMAT, Rev, _Size} -> Rev
end
- catch _:_ ->
- erlang:error({invalid_local_doc_rev, DocId, Val})
+ catch
+ _:_ ->
+ erlang:error({invalid_local_doc_rev, DocId, Val})
end;
<<131, _/binary>> ->
% Compatibility clause for an older encoding format
@@ -830,7 +826,6 @@ get_local_doc_rev(_Db0, <<?LOCAL_DOC_PREFIX, _/binary>> = DocId, Val) ->
end
end.
-
write_doc(#{} = Db0, Doc, NewWinner0, OldWinner, ToUpdate, ToRemove) ->
#{
tx := Tx,
@@ -857,13 +852,17 @@ write_doc(#{} = Db0, Doc, NewWinner0, OldWinner, ToUpdate, ToRemove) ->
% the attachments have not changed.
AttHash = fabric2_util:hash_atts(Atts),
RevsToCheck = [NewWinner0] ++ ToUpdate ++ ToRemove,
- AttHashCount = lists:foldl(fun(Att, Count) ->
- #{att_hash := RevAttHash} = Att,
- case RevAttHash == AttHash of
- true -> Count + 1;
- false -> Count
- end
- end, 0, RevsToCheck),
+ AttHashCount = lists:foldl(
+ fun(Att, Count) ->
+ #{att_hash := RevAttHash} = Att,
+ case RevAttHash == AttHash of
+ true -> Count + 1;
+ false -> Count
+ end
+ end,
+ 0,
+ RevsToCheck
+ ),
if
AttHashCount == length(RevsToCheck) ->
ok;
@@ -883,35 +882,42 @@ write_doc(#{} = Db0, Doc, NewWinner0, OldWinner, ToUpdate, ToRemove) ->
{WKey, WVal, WinnerVS} = revinfo_to_fdb(Tx, DbPrefix, DocId, NewWinner),
ok = erlfdb:set_versionstamped_value(Tx, WKey, WVal),
- lists:foreach(fun(RI0) ->
- RI = RI0#{winner := false},
- {K, V, undefined} = revinfo_to_fdb(Tx, DbPrefix, DocId, RI),
- ok = erlfdb:set(Tx, K, V)
- end, ToUpdate),
-
- lists:foreach(fun(RI0) ->
- RI = RI0#{winner := false},
- {K, _, undefined} = revinfo_to_fdb(Tx, DbPrefix, DocId, RI),
- ok = erlfdb:clear(Tx, K),
- ok = clear_doc_body(Db, DocId, RI0)
- end, ToRemove),
+ lists:foreach(
+ fun(RI0) ->
+ RI = RI0#{winner := false},
+ {K, V, undefined} = revinfo_to_fdb(Tx, DbPrefix, DocId, RI),
+ ok = erlfdb:set(Tx, K, V)
+ end,
+ ToUpdate
+ ),
+
+ lists:foreach(
+ fun(RI0) ->
+ RI = RI0#{winner := false},
+ {K, _, undefined} = revinfo_to_fdb(Tx, DbPrefix, DocId, RI),
+ ok = erlfdb:clear(Tx, K),
+ ok = clear_doc_body(Db, DocId, RI0)
+ end,
+ ToRemove
+ ),
% _all_docs
- UpdateStatus = case {OldWinner, NewWinner} of
- {not_found, #{deleted := false}} ->
- created;
- {not_found, #{deleted := true}} ->
- replicate_deleted;
- {#{deleted := true}, #{deleted := false}} ->
- recreated;
- {#{deleted := false}, #{deleted := false}} ->
- updated;
- {#{deleted := false}, #{deleted := true}} ->
- deleted;
- {#{deleted := true}, #{deleted := true}} ->
- ignore
- end,
+ UpdateStatus =
+ case {OldWinner, NewWinner} of
+ {not_found, #{deleted := false}} ->
+ created;
+ {not_found, #{deleted := true}} ->
+ replicate_deleted;
+ {#{deleted := true}, #{deleted := false}} ->
+ recreated;
+ {#{deleted := false}, #{deleted := false}} ->
+ updated;
+ {#{deleted := false}, #{deleted := true}} ->
+ deleted;
+ {#{deleted := true}, #{deleted := true}} ->
+ ignore
+ end,
case UpdateStatus of
replicate_deleted ->
@@ -929,10 +935,13 @@ write_doc(#{} = Db0, Doc, NewWinner0, OldWinner, ToUpdate, ToRemove) ->
% _changes
- if OldWinner == not_found -> ok; true ->
- OldSeq = maps:get(sequence, OldWinner),
- OldSeqKey = erlfdb_tuple:pack({?DB_CHANGES, OldSeq}, DbPrefix),
- erlfdb:clear(Tx, OldSeqKey)
+ if
+ OldWinner == not_found ->
+ ok;
+ true ->
+ OldSeq = maps:get(sequence, OldWinner),
+ OldSeqKey = erlfdb_tuple:pack({?DB_CHANGES, OldSeq}, DbPrefix),
+ erlfdb:clear(Tx, OldSeqKey)
end,
NewSeqKey = erlfdb_tuple:pack_vs({?DB_CHANGES, WinnerVS}, DbPrefix),
@@ -941,26 +950,30 @@ write_doc(#{} = Db0, Doc, NewWinner0, OldWinner, ToUpdate, ToRemove) ->
% Bump db version on design doc changes
- IsDDoc = case Doc#doc.id of
- <<?DESIGN_DOC_PREFIX, _/binary>> -> true;
- _ -> false
- end,
+ IsDDoc =
+ case Doc#doc.id of
+ <<?DESIGN_DOC_PREFIX, _/binary>> -> true;
+ _ -> false
+ end,
- if not IsDDoc -> ok; true ->
- bump_db_version(Db)
+ if
+ not IsDDoc -> ok;
+ true -> bump_db_version(Db)
end,
% Update our document counts
case UpdateStatus of
created ->
- if not IsDDoc -> ok; true ->
- incr_stat(Db, <<"doc_design_count">>, 1)
+ if
+ not IsDDoc -> ok;
+ true -> incr_stat(Db, <<"doc_design_count">>, 1)
end,
incr_stat(Db, <<"doc_count">>, 1);
recreated ->
- if not IsDDoc -> ok; true ->
- incr_stat(Db, <<"doc_design_count">>, 1)
+ if
+ not IsDDoc -> ok;
+ true -> incr_stat(Db, <<"doc_design_count">>, 1)
end,
incr_stat(Db, <<"doc_count">>, 1),
incr_stat(Db, <<"doc_del_count">>, -1);
@@ -969,8 +982,9 @@ write_doc(#{} = Db0, Doc, NewWinner0, OldWinner, ToUpdate, ToRemove) ->
ignore ->
ok;
deleted ->
- if not IsDDoc -> ok; true ->
- incr_stat(Db, <<"doc_design_count">>, -1)
+ if
+ not IsDDoc -> ok;
+ true -> incr_stat(Db, <<"doc_design_count">>, -1)
end,
incr_stat(Db, <<"doc_count">>, -1),
incr_stat(Db, <<"doc_del_count">>, 1);
@@ -978,8 +992,14 @@ write_doc(#{} = Db0, Doc, NewWinner0, OldWinner, ToUpdate, ToRemove) ->
ok
end,
- fabric2_db_plugin:after_doc_write(Db, Doc, NewWinner, OldWinner,
- NewRevId, WinnerVS),
+ fabric2_db_plugin:after_doc_write(
+ Db,
+ Doc,
+ NewWinner,
+ OldWinner,
+ NewRevId,
+ WinnerVS
+ ),
% Update database size
AddSize = sum_add_rev_sizes([NewWinner | ToUpdate]),
@@ -988,7 +1008,6 @@ write_doc(#{} = Db0, Doc, NewWinner0, OldWinner, ToUpdate, ToRemove) ->
ok.
-
write_local_doc(#{} = Db0, Doc) ->
#{
tx := Tx,
@@ -999,17 +1018,18 @@ write_local_doc(#{} = Db0, Doc) ->
{LDocKey, LDocVal, NewSize, Rows} = local_doc_to_fdb(Db, Doc),
- {WasDeleted, PrevSize} = case erlfdb:wait(erlfdb:get(Tx, LDocKey)) of
- <<255, RevBin/binary>> ->
- case erlfdb_tuple:unpack(RevBin) of
- {?CURR_LDOC_FORMAT, _Rev, Size} ->
- {false, Size}
- end;
- <<_/binary>> ->
- {false, 0};
- not_found ->
- {true, 0}
- end,
+ {WasDeleted, PrevSize} =
+ case erlfdb:wait(erlfdb:get(Tx, LDocKey)) of
+ <<255, RevBin/binary>> ->
+ case erlfdb_tuple:unpack(RevBin) of
+ {?CURR_LDOC_FORMAT, _Rev, Size} ->
+ {false, Size}
+ end;
+ <<_/binary>> ->
+ {false, 0};
+ not_found ->
+ {true, 0}
+ end,
BPrefix = erlfdb_tuple:pack({?DB_LOCAL_DOC_BODIES, Id}, DbPrefix),
@@ -1022,9 +1042,12 @@ write_local_doc(#{} = Db0, Doc) ->
% Make sure to clear the whole range, in case there was a larger
% document body there before.
erlfdb:clear_range_startswith(Tx, BPrefix),
- lists:foreach(fun({K, V}) ->
- erlfdb:set(Tx, K, aegis:encrypt(Db, K, V))
- end, Rows)
+ lists:foreach(
+ fun({K, V}) ->
+ erlfdb:set(Tx, K, aegis:encrypt(Db, K, V))
+ end,
+ Rows
+ )
end,
case {WasDeleted, Doc#doc.deleted} of
@@ -1040,7 +1063,6 @@ write_local_doc(#{} = Db0, Doc) ->
ok.
-
read_attachment(#{} = Db, DocId, AttId) ->
#{
tx := Tx,
@@ -1048,18 +1070,20 @@ read_attachment(#{} = Db, DocId, AttId) ->
} = ensure_current(Db),
AttKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId}, DbPrefix),
- Data = case erlfdb:wait(erlfdb:get_range_startswith(Tx, AttKey)) of
- not_found ->
- throw({not_found, missing});
- KVs ->
- {_, Chunks} = lists:unzip(aegis:decrypt(Db, KVs)),
- iolist_to_binary(Chunks)
- end,
+ Data =
+ case erlfdb:wait(erlfdb:get_range_startswith(Tx, AttKey)) of
+ not_found ->
+ throw({not_found, missing});
+ KVs ->
+ {_, Chunks} = lists:unzip(aegis:decrypt(Db, KVs)),
+ iolist_to_binary(Chunks)
+ end,
IdKey = erlfdb_tuple:pack({?DB_ATT_NAMES, DocId, AttId}, DbPrefix),
case erlfdb:wait(erlfdb:get(Tx, IdKey)) of
<<>> ->
- Data; % Old format, before CURR_ATT_STORAGE_VER = 0
+ % Old format, before CURR_ATT_STORAGE_VER = 0
+ Data;
<<_/binary>> = InfoBin ->
{?CURR_ATT_STORAGE_VER, Compressed} = erlfdb_tuple:unpack(InfoBin),
case Compressed of
@@ -1068,9 +1092,9 @@ read_attachment(#{} = Db, DocId, AttId) ->
end
end.
-
-write_attachment(#{} = Db, DocId, Data, Encoding)
- when is_binary(Data), is_atom(Encoding) ->
+write_attachment(#{} = Db, DocId, Data, Encoding) when
+ is_binary(Data), is_atom(Encoding)
+->
#{
tx := Tx,
db_prefix := DbPrefix
@@ -1078,17 +1102,18 @@ write_attachment(#{} = Db, DocId, Data, Encoding)
AttId = fabric2_util:uuid(),
- {Data1, Compressed} = case Encoding of
- gzip ->
- {Data, false};
- _ ->
- Opts = [{minor_version, 1}, {compressed, 6}],
- CompressedData = term_to_binary(Data, Opts),
- case size(CompressedData) < Data of
- true -> {CompressedData, true};
- false -> {Data, false}
- end
- end,
+ {Data1, Compressed} =
+ case Encoding of
+ gzip ->
+ {Data, false};
+ _ ->
+ Opts = [{minor_version, 1}, {compressed, 6}],
+ CompressedData = term_to_binary(Data, Opts),
+ case size(CompressedData) < Data of
+ true -> {CompressedData, true};
+ false -> {Data, false}
+ end
+ end,
IdKey = erlfdb_tuple:pack({?DB_ATT_NAMES, DocId, AttId}, DbPrefix),
InfoVal = erlfdb_tuple:pack({?CURR_ATT_STORAGE_VER, Compressed}),
@@ -1096,14 +1121,17 @@ write_attachment(#{} = Db, DocId, Data, Encoding)
Chunks = chunkify_binary(Data1),
- lists:foldl(fun(Chunk, ChunkId) ->
- AttKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId, ChunkId}, DbPrefix),
- ok = erlfdb:set(Tx, AttKey, aegis:encrypt(Db, AttKey, Chunk)),
- ChunkId + 1
- end, 0, Chunks),
+ lists:foldl(
+ fun(Chunk, ChunkId) ->
+ AttKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId, ChunkId}, DbPrefix),
+ ok = erlfdb:set(Tx, AttKey, aegis:encrypt(Db, AttKey, Chunk)),
+ ChunkId + 1
+ end,
+ 0,
+ Chunks
+ ),
{ok, AttId}.
-
get_last_change(#{} = Db) ->
#{
tx := Tx,
@@ -1120,26 +1148,28 @@ get_last_change(#{} = Db) ->
vs_to_seq(SeqVS)
end.
-
fold_range(TxOrDb, RangePrefix, UserFun, UserAcc, Options) ->
- {Db, Tx} = case TxOrDb of
- {tx, TxObj} ->
- {undefined, TxObj};
- #{} = DbObj ->
- DbObj1 = #{tx := TxObj} = ensure_current(DbObj),
- {DbObj1, TxObj}
- end,
+ {Db, Tx} =
+ case TxOrDb of
+ {tx, TxObj} ->
+ {undefined, TxObj};
+ #{} = DbObj ->
+ DbObj1 = #{tx := TxObj} = ensure_current(DbObj),
+ {DbObj1, TxObj}
+ end,
% FoundationDB treats a limit 0 of as unlimited so we guard against it
- case fabric2_util:get_value(limit, Options) of 0 -> UserAcc; _ ->
- FAcc = get_fold_acc(Db, RangePrefix, UserFun, UserAcc, Options),
- try
- fold_range(Tx, FAcc)
- after
- erase(?PDICT_FOLD_ACC_STATE)
- end
+ case fabric2_util:get_value(limit, Options) of
+ 0 ->
+ UserAcc;
+ _ ->
+ FAcc = get_fold_acc(Db, RangePrefix, UserFun, UserAcc, Options),
+ try
+ fold_range(Tx, FAcc)
+ after
+ erase(?PDICT_FOLD_ACC_STATE)
+ end
end.
-
fold_range(Tx, FAcc) ->
#fold_acc{
start_key = Start,
@@ -1148,8 +1178,9 @@ fold_range(Tx, FAcc) ->
base_opts = BaseOpts,
restart_tx = DoRestart
} = FAcc,
- case DoRestart of false -> ok; true ->
- ok = erlfdb:set_option(Tx, disallow_writes)
+ case DoRestart of
+ false -> ok;
+ true -> ok = erlfdb:set_option(Tx, disallow_writes)
end,
Opts = [{limit, Limit} | BaseOpts],
Callback = fun fold_range_cb/2,
@@ -1158,22 +1189,22 @@ fold_range(Tx, FAcc) ->
user_acc = FinalUserAcc
} = erlfdb:fold_range(Tx, Start, End, Callback, FAcc, Opts),
FinalUserAcc
- catch error:{erlfdb_error, Error} when
- ?ERLFDB_IS_RETRYABLE(Error) andalso DoRestart ->
- % Possibly handle cluster_version_changed and future_version as well to
- % continue iteration instead fallback to transactional and retrying
- % from the beginning which is bound to fail when streaming data out to a
- % socket.
- fold_range(Tx, restart_fold(Tx, FAcc))
+ catch
+ error:{erlfdb_error, Error} when
+ ?ERLFDB_IS_RETRYABLE(Error) andalso DoRestart
+ ->
+ % Possibly handle cluster_version_changed and future_version as well to
+ % continue iteration instead fallback to transactional and retrying
+ % from the beginning which is bound to fail when streaming data out to a
+ % socket.
+ fold_range(Tx, restart_fold(Tx, FAcc))
end.
-
vs_to_seq(VS) when is_tuple(VS) ->
% 51 is the versionstamp type tag
<<51:8, SeqBin:12/binary>> = erlfdb_tuple:pack({VS}),
fabric2_util:to_hex(SeqBin).
-
seq_to_vs(Seq) when is_binary(Seq) ->
Seq1 = fabric2_util:from_hex(Seq),
% 51 is the versionstamp type tag
@@ -1181,46 +1212,42 @@ seq_to_vs(Seq) when is_binary(Seq) ->
{VS} = erlfdb_tuple:unpack(Seq2),
VS.
-
next_vs({versionstamp, VS, Batch, TxId}) ->
- {V, B, T} = case TxId < 16#FFFF of
- true ->
- {VS, Batch, TxId + 1};
- false ->
- case Batch < 16#FFFF of
- true ->
- {VS, Batch + 1, 0};
- false ->
- {VS + 1, 0, 0}
- end
- end,
+ {V, B, T} =
+ case TxId < 16#FFFF of
+ true ->
+ {VS, Batch, TxId + 1};
+ false ->
+ case Batch < 16#FFFF of
+ true ->
+ {VS, Batch + 1, 0};
+ false ->
+ {VS + 1, 0, 0}
+ end
+ end,
{versionstamp, V, B, T};
-
next_vs({versionstamp, VS, Batch}) ->
- {V, B} = case Batch < 16#FFFF of
- true ->
- {VS, Batch + 1};
- false ->
- {VS + 1, 0}
- end,
+ {V, B} =
+ case Batch < 16#FFFF of
+ true ->
+ {VS, Batch + 1};
+ false ->
+ {VS + 1, 0}
+ end,
{versionstamp, V, B}.
-
new_versionstamp(Tx) ->
TxId = erlfdb:get_next_tx_id(Tx),
{versionstamp, 16#FFFFFFFFFFFFFFFF, 16#FFFF, TxId}.
-
get_approximate_tx_size(#{} = TxDb) ->
require_transaction(TxDb),
#{tx := Tx} = TxDb,
erlfdb:wait(erlfdb:get_approximate_size(Tx)).
-
chunkify_binary(Data) ->
chunkify_binary(Data, binary_chunk_size()).
-
chunkify_binary(Data, Size) ->
case Data of
<<>> ->
@@ -1231,22 +1258,22 @@ chunkify_binary(Data, Size) ->
[Data]
end.
-
debug_cluster() ->
debug_cluster(<<>>, <<16#FE, 16#FF, 16#FF>>).
-
debug_cluster(Start, End) ->
transactional(fun(Tx) ->
- lists:foreach(fun({Key, Val}) ->
- io:format(standard_error, "~s => ~s~n", [
+ lists:foreach(
+ fun({Key, Val}) ->
+ io:format(standard_error, "~s => ~s~n", [
string:pad(erlfdb_util:repr(Key), 60),
erlfdb_util:repr(Val)
])
- end, erlfdb:get_range(Tx, Start, End))
+ end,
+ erlfdb:get_range(Tx, Start, End)
+ )
end).
-
init_db(Tx, DbName) ->
Prefix = get_dir(Tx),
Version = erlfdb:wait(erlfdb:get(Tx, ?METADATA_VERSION_KEY)),
@@ -1260,7 +1287,6 @@ init_db(Tx, DbName) ->
db_options => []
}.
-
load_validate_doc_funs(#{} = Db) ->
FoldFun = fun
({row, Row}, Acc) ->
@@ -1277,55 +1303,67 @@ load_validate_doc_funs(#{} = Db) ->
{ok, Infos1} = fabric2_db:fold_docs(Db, FoldFun, [], Options),
- Infos2 = lists:map(fun(Info) ->
- #{
- id := DDocId = <<"_design/", _/binary>>
- } = Info,
- Info#{
- rev_info => get_winning_revs_future(Db, DDocId, 1)
- }
- end, Infos1),
-
- Infos3 = lists:flatmap(fun(Info) ->
- #{
- id := DDocId,
- rev_info := RevInfoFuture
- } = Info,
- [RevInfo] = get_revs_wait(Db, RevInfoFuture),
- #{deleted := Deleted} = RevInfo,
- if Deleted -> []; true ->
- [Info#{
+ Infos2 = lists:map(
+ fun(Info) ->
+ #{
+ id := DDocId = <<"_design/", _/binary>>
+ } = Info,
+ Info#{
+ rev_info => get_winning_revs_future(Db, DDocId, 1)
+ }
+ end,
+ Infos1
+ ),
+
+ Infos3 = lists:flatmap(
+ fun(Info) ->
+ #{
+ id := DDocId,
+ rev_info := RevInfoFuture
+ } = Info,
+ [RevInfo] = get_revs_wait(Db, RevInfoFuture),
+ #{deleted := Deleted} = RevInfo,
+ if
+ Deleted ->
+ [];
+ true ->
+ [
+ Info#{
+ rev_info := RevInfo,
+ body => get_doc_body_future(Db, DDocId, RevInfo)
+ }
+ ]
+ end
+ end,
+ Infos2
+ ),
+
+ VDUs = lists:flatmap(
+ fun(Info) ->
+ #{
+ id := DDocId,
rev_info := RevInfo,
- body => get_doc_body_future(Db, DDocId, RevInfo)
- }]
- end
- end, Infos2),
-
- VDUs = lists:flatmap(fun(Info) ->
- #{
- id := DDocId,
- rev_info := RevInfo,
- body := BodyFuture
- } = Info,
- #doc{} = Doc = get_doc_body_wait(Db, DDocId, RevInfo, BodyFuture),
- case couch_doc:get_validate_doc_fun(Doc) of
- nil -> [];
- Fun -> [Fun]
- end
- end, Infos3),
+ body := BodyFuture
+ } = Info,
+ #doc{} = Doc = get_doc_body_wait(Db, DDocId, RevInfo, BodyFuture),
+ case couch_doc:get_validate_doc_fun(Doc) of
+ nil -> [];
+ Fun -> [Fun]
+ end
+ end,
+ Infos3
+ ),
Db#{
validate_doc_update_funs := VDUs
}.
-
bump_metadata_version(Tx) ->
% The 14 zero bytes is pulled from the PR for adding the
% metadata version key. Not sure why 14 bytes when version
% stamps are only 80, but whatever for now.
erlfdb:set_versionstamped_value(Tx, ?METADATA_VERSION_KEY, <<0:112>>).
-
check_metadata_version(#{} = Db) ->
#{
tx := Tx,
@@ -1333,28 +1371,30 @@ check_metadata_version(#{} = Db) ->
} = Db,
AlreadyChecked = get(?PDICT_CHECKED_MD_IS_CURRENT),
- if AlreadyChecked == true -> {current, Db}; true ->
- case erlfdb:wait(erlfdb:get_ss(Tx, ?METADATA_VERSION_KEY)) of
- Version ->
- put(?PDICT_CHECKED_MD_IS_CURRENT, true),
- % We want to set a read conflict on the db version as we'd want
- % to conflict with any writes to this particular db. However
- % during db creation db prefix might not exist yet so we don't
- % add a read-conflict on it then.
- case maps:get(db_prefix, Db, not_found) of
- not_found ->
- ok;
- <<_/binary>> = DbPrefix ->
- DbVerKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
- erlfdb:add_read_conflict_key(Tx, DbVerKey)
- end,
- {current, Db};
- NewVersion ->
- {stale, Db#{md_version := NewVersion}}
- end
+ if
+ AlreadyChecked == true ->
+ {current, Db};
+ true ->
+ case erlfdb:wait(erlfdb:get_ss(Tx, ?METADATA_VERSION_KEY)) of
+ Version ->
+ put(?PDICT_CHECKED_MD_IS_CURRENT, true),
+ % We want to set a read conflict on the db version as we'd want
+ % to conflict with any writes to this particular db. However
+ % during db creation db prefix might not exist yet so we don't
+ % add a read-conflict on it then.
+ case maps:get(db_prefix, Db, not_found) of
+ not_found ->
+ ok;
+ <<_/binary>> = DbPrefix ->
+ DbVerKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+ erlfdb:add_read_conflict_key(Tx, DbVerKey)
+ end,
+ {current, Db};
+ NewVersion ->
+ {stale, Db#{md_version := NewVersion}}
+ end
end.
-
bump_db_version(#{} = Db) ->
#{
tx := Tx,
@@ -1367,7 +1407,6 @@ bump_db_version(#{} = Db) ->
ok = bump_metadata_version(Tx),
{ok, DbVersion}.
-
check_db_version(#{} = Db, CheckDbVersion) ->
#{
tx := Tx,
@@ -1376,18 +1415,20 @@ check_db_version(#{} = Db, CheckDbVersion) ->
} = Db,
AlreadyChecked = get(?PDICT_CHECKED_DB_IS_CURRENT),
- if not CheckDbVersion orelse AlreadyChecked == true -> current; true ->
- DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
- case erlfdb:wait(erlfdb:get(Tx, DbVersionKey)) of
- DbVersion ->
- put(?PDICT_CHECKED_DB_IS_CURRENT, true),
- current;
- _NewDBVersion ->
- stale
- end
+ if
+ not CheckDbVersion orelse AlreadyChecked == true ->
+ current;
+ true ->
+ DbVersionKey = erlfdb_tuple:pack({?DB_VERSION}, DbPrefix),
+ case erlfdb:wait(erlfdb:get(Tx, DbVersionKey)) of
+ DbVersion ->
+ put(?PDICT_CHECKED_DB_IS_CURRENT, true),
+ current;
+ _NewDBVersion ->
+ stale
+ end
end.
-
soft_delete_db(Db) ->
#{
name := DbName,
@@ -1410,7 +1451,6 @@ soft_delete_db(Db) ->
{deletion_frequency_exceeded, DbName}
end.
-
hard_delete_db(Db) ->
#{
name := DbName,
@@ -1426,22 +1466,22 @@ hard_delete_db(Db) ->
bump_metadata_version(Tx),
ok.
-
write_doc_body(#{} = Db0, #doc{} = Doc) ->
#{
tx := Tx
} = Db = ensure_current(Db0),
Rows = doc_to_fdb(Db, Doc),
- lists:foreach(fun({Key, Value}) ->
- ok = erlfdb:set(Tx, Key, aegis:encrypt(Db, Key, Value))
- end, Rows).
-
+ lists:foreach(
+ fun({Key, Value}) ->
+ ok = erlfdb:set(Tx, Key, aegis:encrypt(Db, Key, Value))
+ end,
+ Rows
+ ).
clear_doc_body(_Db, _DocId, not_found) ->
% No old body to clear
ok;
-
clear_doc_body(#{} = Db, DocId, #{} = RevInfo) ->
#{
tx := Tx,
@@ -1456,7 +1496,6 @@ clear_doc_body(#{} = Db, DocId, #{} = RevInfo) ->
{StartKey, EndKey} = erlfdb_tuple:range(BaseKey, DbPrefix),
ok = erlfdb:clear_range(Tx, StartKey, EndKey).
-
cleanup_attachments(Db, DocId, NewDoc, ToRemove) ->
#{
tx := Tx,
@@ -1470,40 +1509,54 @@ cleanup_attachments(Db, DocId, NewDoc, ToRemove) ->
AllDocs = [{ok, NewDoc} | DiskDocs],
% Get referenced attachment ids
- ActiveIdSet = lists:foldl(fun({ok, Doc}, Acc) ->
- #doc{
- revs = {Pos, [Rev | _]}
- } = Doc,
- case lists:member({Pos, Rev}, RemoveRevs) of
- true ->
- Acc;
- false ->
- lists:foldl(fun(Att, InnerAcc) ->
- {loc, _Db, _DocId, AttId} = couch_att:fetch(data, Att),
- sets:add_element(AttId, InnerAcc)
- end, Acc, Doc#doc.atts)
- end
- end, sets:new(), AllDocs),
+ ActiveIdSet = lists:foldl(
+ fun({ok, Doc}, Acc) ->
+ #doc{
+ revs = {Pos, [Rev | _]}
+ } = Doc,
+ case lists:member({Pos, Rev}, RemoveRevs) of
+ true ->
+ Acc;
+ false ->
+ lists:foldl(
+ fun(Att, InnerAcc) ->
+ {loc, _Db, _DocId, AttId} = couch_att:fetch(data, Att),
+ sets:add_element(AttId, InnerAcc)
+ end,
+ Acc,
+ Doc#doc.atts
+ )
+ end
+ end,
+ sets:new(),
+ AllDocs
+ ),
AttPrefix = erlfdb_tuple:pack({?DB_ATT_NAMES, DocId}, DbPrefix),
Options = [{streaming_mode, want_all}],
Future = erlfdb:get_range_startswith(Tx, AttPrefix, Options),
- ExistingIdSet = lists:foldl(fun({K, _}, Acc) ->
- {?DB_ATT_NAMES, DocId, AttId} = erlfdb_tuple:unpack(K, DbPrefix),
- sets:add_element(AttId, Acc)
- end, sets:new(), erlfdb:wait(Future)),
+ ExistingIdSet = lists:foldl(
+ fun({K, _}, Acc) ->
+ {?DB_ATT_NAMES, DocId, AttId} = erlfdb_tuple:unpack(K, DbPrefix),
+ sets:add_element(AttId, Acc)
+ end,
+ sets:new(),
+ erlfdb:wait(Future)
+ ),
AttsToRemove = sets:subtract(ExistingIdSet, ActiveIdSet),
- lists:foreach(fun(AttId) ->
- IdKey = erlfdb_tuple:pack({?DB_ATT_NAMES, DocId, AttId}, DbPrefix),
- erlfdb:clear(Tx, IdKey),
-
- ChunkKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId}, DbPrefix),
- erlfdb:clear_range_startswith(Tx, ChunkKey)
- end, sets:to_list(AttsToRemove)).
+ lists:foreach(
+ fun(AttId) ->
+ IdKey = erlfdb_tuple:pack({?DB_ATT_NAMES, DocId, AttId}, DbPrefix),
+ erlfdb:clear(Tx, IdKey),
+ ChunkKey = erlfdb_tuple:pack({?DB_ATTS, DocId, AttId}, DbPrefix),
+ erlfdb:clear_range_startswith(Tx, ChunkKey)
+ end,
+ sets:to_list(AttsToRemove)
+ ).
revinfo_to_fdb(Tx, DbPrefix, DocId, #{winner := true} = RevId) ->
#{
@@ -1527,7 +1580,6 @@ revinfo_to_fdb(Tx, DbPrefix, DocId, #{winner := true} = RevId) ->
KBin = erlfdb_tuple:pack(Key, DbPrefix),
VBin = erlfdb_tuple:pack_vs(Val),
{KBin, VBin, VS};
-
revinfo_to_fdb(_Tx, DbPrefix, DocId, #{} = RevId) ->
#{
deleted := Deleted,
@@ -1542,7 +1594,6 @@ revinfo_to_fdb(_Tx, DbPrefix, DocId, #{} = RevId) ->
VBin = erlfdb_tuple:pack(Val),
{KBin, VBin, undefined}.
-
fdb_to_revinfo(Key, {?CURR_REV_FORMAT, _, _, _, _, _} = Val) ->
{?DB_REVS, _DocId, NotDeleted, RevPos, Rev} = Key,
{_RevFormat, Sequence, BranchCount, RevPath, AttHash, RevSize} = Val,
@@ -1557,8 +1608,7 @@ fdb_to_revinfo(Key, {?CURR_REV_FORMAT, _, _, _, _, _} = Val) ->
att_hash => AttHash,
rev_size => RevSize
};
-
-fdb_to_revinfo(Key, {?CURR_REV_FORMAT, _, _, _} = Val) ->
+fdb_to_revinfo(Key, {?CURR_REV_FORMAT, _, _, _} = Val) ->
{?DB_REVS, _DocId, NotDeleted, RevPos, Rev} = Key,
{_RevFormat, RevPath, AttHash, RevSize} = Val,
#{
@@ -1572,26 +1622,21 @@ fdb_to_revinfo(Key, {?CURR_REV_FORMAT, _, _, _} = Val) ->
att_hash => AttHash,
rev_size => RevSize
};
-
fdb_to_revinfo(Key, {0, Seq, BCount, RPath}) ->
Val = {1, Seq, BCount, RPath, <<>>},
fdb_to_revinfo(Key, Val);
-
fdb_to_revinfo(Key, {0, RPath}) ->
Val = {1, RPath, <<>>},
fdb_to_revinfo(Key, Val);
-
fdb_to_revinfo(Key, {1, Seq, BCount, RPath, AttHash}) ->
% Don't forget to change ?CURR_REV_FORMAT to 2 here when it increments
Val = {?CURR_REV_FORMAT, Seq, BCount, RPath, AttHash, 0},
fdb_to_revinfo(Key, Val);
-
fdb_to_revinfo(Key, {1, RPath, AttHash}) ->
% Don't forget to change ?CURR_REV_FORMAT to 2 here when it increments
Val = {?CURR_REV_FORMAT, RPath, AttHash, 0},
fdb_to_revinfo(Key, Val).
-
doc_to_fdb(Db, #doc{} = Doc) ->
#{
db_prefix := DbPrefix
@@ -1611,23 +1656,28 @@ doc_to_fdb(Db, #doc{} = Doc) ->
Value = term_to_binary({Body, DiskAtts, Deleted}, Opts),
Chunks = chunkify_binary(Value),
- {Rows, _} = lists:mapfoldl(fun(Chunk, ChunkId) ->
- Key = erlfdb_tuple:pack({?DB_DOCS, Id, Start, Rev, ChunkId}, DbPrefix),
- {{Key, Chunk}, ChunkId + 1}
- end, 0, Chunks),
+ {Rows, _} = lists:mapfoldl(
+ fun(Chunk, ChunkId) ->
+ Key = erlfdb_tuple:pack({?DB_DOCS, Id, Start, Rev, ChunkId}, DbPrefix),
+ {{Key, Chunk}, ChunkId + 1}
+ end,
+ 0,
+ Chunks
+ ),
Rows.
-
fdb_to_doc(_Db, _DocId, _Pos, _Path, []) ->
{not_found, missing};
-
fdb_to_doc(Db, DocId, Pos, Path, BinRows) when is_list(BinRows) ->
Bin = iolist_to_binary(BinRows),
{Body, DiskAtts, Deleted} = binary_to_term(Bin, [safe]),
- Atts = lists:map(fun(Att) ->
- couch_att:from_disk_term(Db, DocId, Att)
- end, DiskAtts),
+ Atts = lists:map(
+ fun(Att) ->
+ couch_att:from_disk_term(Db, DocId, Att)
+ end,
+ DiskAtts
+ ),
Doc0 = #doc{
id = DocId,
revs = {Pos, Path},
@@ -1641,7 +1691,6 @@ fdb_to_doc(Db, DocId, Pos, Path, BinRows) when is_list(BinRows) ->
#{after_doc_read := ADR} -> ADR(Doc0, Db)
end.
-
local_doc_to_fdb(Db, #doc{} = Doc) ->
#{
db_prefix := DbPrefix
@@ -1655,16 +1704,21 @@ local_doc_to_fdb(Db, #doc{} = Doc) ->
Key = erlfdb_tuple:pack({?DB_LOCAL_DOCS, Id}, DbPrefix),
- StoreRev = case Rev of
- _ when is_integer(Rev) -> integer_to_binary(Rev);
- _ when is_binary(Rev) -> Rev
- end,
+ StoreRev =
+ case Rev of
+ _ when is_integer(Rev) -> integer_to_binary(Rev);
+ _ when is_binary(Rev) -> Rev
+ end,
BVal = term_to_binary(Body, [{minor_version, 1}, {compressed, 6}]),
- {Rows, _} = lists:mapfoldl(fun(Chunk, ChunkId) ->
- K = erlfdb_tuple:pack({?DB_LOCAL_DOC_BODIES, Id, ChunkId}, DbPrefix),
- {{K, Chunk}, ChunkId + 1}
- end, 0, chunkify_binary(BVal)),
+ {Rows, _} = lists:mapfoldl(
+ fun(Chunk, ChunkId) ->
+ K = erlfdb_tuple:pack({?DB_LOCAL_DOC_BODIES, Id, ChunkId}, DbPrefix),
+ {{K, Chunk}, ChunkId + 1}
+ end,
+ 0,
+ chunkify_binary(BVal)
+ ),
NewSize = fabric2_util:ldoc_size(Doc),
RawValue = erlfdb_tuple:pack({?CURR_LDOC_FORMAT, StoreRev, NewSize}),
@@ -1674,10 +1728,8 @@ local_doc_to_fdb(Db, #doc{} = Doc) ->
{Key, Value, NewSize, Rows}.
-
fdb_to_local_doc(_Db, _DocId, not_found, []) ->
{not_found, missing};
-
fdb_to_local_doc(_Db, DocId, <<131, _/binary>> = Val, []) ->
% This is an upgrade clause for the old encoding. We allow reading the old
% value and will perform an upgrade of the storage format on an update.
@@ -1688,11 +1740,11 @@ fdb_to_local_doc(_Db, DocId, <<131, _/binary>> = Val, []) ->
deleted = false,
body = Body
};
-
fdb_to_local_doc(_Db, DocId, <<255, RevBin/binary>>, Rows) when is_list(Rows) ->
- Rev = case erlfdb_tuple:unpack(RevBin) of
- {?CURR_LDOC_FORMAT, Rev0, _Size} -> Rev0
- end,
+ Rev =
+ case erlfdb_tuple:unpack(RevBin) of
+ {?CURR_LDOC_FORMAT, Rev0, _Size} -> Rev0
+ end,
BodyBin = iolist_to_binary(Rows),
Body = binary_to_term(BodyBin, [safe]),
@@ -1703,43 +1755,48 @@ fdb_to_local_doc(_Db, DocId, <<255, RevBin/binary>>, Rows) when is_list(Rows) ->
deleted = false,
body = Body
};
-
fdb_to_local_doc(Db, DocId, RawRev, Rows) ->
BaseRev = erlfdb_tuple:pack({?CURR_LDOC_FORMAT, RawRev, 0}),
Rev = <<255, BaseRev/binary>>,
fdb_to_local_doc(Db, DocId, Rev, Rows).
-
sum_add_rev_sizes(RevInfos) ->
- lists:foldl(fun(RI, Acc) ->
- #{
- exists := Exists,
- rev_size := Size
- } = RI,
- case Exists of
- true -> Acc;
- false -> Size + Acc
- end
- end, 0, RevInfos).
-
+ lists:foldl(
+ fun(RI, Acc) ->
+ #{
+ exists := Exists,
+ rev_size := Size
+ } = RI,
+ case Exists of
+ true -> Acc;
+ false -> Size + Acc
+ end
+ end,
+ 0,
+ RevInfos
+ ).
sum_rem_rev_sizes(RevInfos) ->
- lists:foldl(fun(RI, Acc) ->
- #{
- exists := true,
- rev_size := Size
- } = RI,
- Size + Acc
- end, 0, RevInfos).
-
-
-get_fold_acc(Db, RangePrefix, UserCallback, UserAcc, Options)
- when is_map(Db) orelse Db =:= undefined ->
-
- Reverse = case fabric2_util:get_value(dir, Options) of
- rev -> true;
- _ -> false
- end,
+ lists:foldl(
+ fun(RI, Acc) ->
+ #{
+ exists := true,
+ rev_size := Size
+ } = RI,
+ Size + Acc
+ end,
+ 0,
+ RevInfos
+ ).
+
+get_fold_acc(Db, RangePrefix, UserCallback, UserAcc, Options) when
+ is_map(Db) orelse Db =:= undefined
+->
+ Reverse =
+ case fabric2_util:get_value(dir, Options) of
+ rev -> true;
+ _ -> false
+ end,
StartKey0 = fabric2_util:get_value(start_key, Options),
EndKeyGt = fabric2_util:get_value(end_key_gt, Options),
@@ -1750,35 +1807,38 @@ get_fold_acc(Db, RangePrefix, UserCallback, UserAcc, Options)
% CouchDB swaps the key meanings based on the direction
% of the fold. FoundationDB does not so we have to
% swap back here.
- {StartKey1, EndKey1} = case Reverse of
- false -> {StartKey0, EndKey0};
- true -> {EndKey0, StartKey0}
- end,
+ {StartKey1, EndKey1} =
+ case Reverse of
+ false -> {StartKey0, EndKey0};
+ true -> {EndKey0, StartKey0}
+ end,
% Set the maximum bounds for the start and endkey
- StartKey2 = case StartKey1 of
- undefined ->
- <<RangePrefix/binary, 16#00>>;
- SK2 when not WrapKeys ->
- erlfdb_tuple:pack(SK2, RangePrefix);
- SK2 ->
- erlfdb_tuple:pack({SK2}, RangePrefix)
- end,
+ StartKey2 =
+ case StartKey1 of
+ undefined ->
+ <<RangePrefix/binary, 16#00>>;
+ SK2 when not WrapKeys ->
+ erlfdb_tuple:pack(SK2, RangePrefix);
+ SK2 ->
+ erlfdb_tuple:pack({SK2}, RangePrefix)
+ end,
- EndKey2 = case EndKey1 of
- undefined ->
- <<RangePrefix/binary, 16#FF>>;
- EK2 when Reverse andalso not WrapKeys ->
- PackedEK = erlfdb_tuple:pack(EK2, RangePrefix),
- <<PackedEK/binary, 16#FF>>;
- EK2 when Reverse ->
- PackedEK = erlfdb_tuple:pack({EK2}, RangePrefix),
- <<PackedEK/binary, 16#FF>>;
- EK2 when not WrapKeys ->
- erlfdb_tuple:pack(EK2, RangePrefix);
- EK2 ->
- erlfdb_tuple:pack({EK2}, RangePrefix)
- end,
+ EndKey2 =
+ case EndKey1 of
+ undefined ->
+ <<RangePrefix/binary, 16#FF>>;
+ EK2 when Reverse andalso not WrapKeys ->
+ PackedEK = erlfdb_tuple:pack(EK2, RangePrefix),
+ <<PackedEK/binary, 16#FF>>;
+ EK2 when Reverse ->
+ PackedEK = erlfdb_tuple:pack({EK2}, RangePrefix),
+ <<PackedEK/binary, 16#FF>>;
+ EK2 when not WrapKeys ->
+ erlfdb_tuple:pack(EK2, RangePrefix);
+ EK2 ->
+ erlfdb_tuple:pack({EK2}, RangePrefix)
+ end,
% FoundationDB ranges are applied as SK <= key < EK
% By default, CouchDB is SK <= key <= EK with the
@@ -1789,51 +1849,59 @@ get_fold_acc(Db, RangePrefix, UserCallback, UserAcc, Options)
% Thus we have this wonderful bit of logic to account
% for all of those combinations.
- StartKey3 = case {Reverse, InclusiveEnd} of
- {true, false} ->
- erlfdb_key:first_greater_than(StartKey2);
- _ ->
- StartKey2
- end,
+ StartKey3 =
+ case {Reverse, InclusiveEnd} of
+ {true, false} ->
+ erlfdb_key:first_greater_than(StartKey2);
+ _ ->
+ StartKey2
+ end,
- EndKey3 = case {Reverse, InclusiveEnd} of
- {false, true} when EndKey0 /= undefined ->
- erlfdb_key:first_greater_than(EndKey2);
- {true, _} ->
- erlfdb_key:first_greater_than(EndKey2);
- _ ->
- EndKey2
- end,
+ EndKey3 =
+ case {Reverse, InclusiveEnd} of
+ {false, true} when EndKey0 /= undefined ->
+ erlfdb_key:first_greater_than(EndKey2);
+ {true, _} ->
+ erlfdb_key:first_greater_than(EndKey2);
+ _ ->
+ EndKey2
+ end,
- Skip = case fabric2_util:get_value(skip, Options) of
- S when is_integer(S), S >= 0 -> S;
- _ -> 0
- end,
+ Skip =
+ case fabric2_util:get_value(skip, Options) of
+ S when is_integer(S), S >= 0 -> S;
+ _ -> 0
+ end,
- Limit = case fabric2_util:get_value(limit, Options) of
- L when is_integer(L), L >= 0 -> L + Skip;
- undefined -> 0
- end,
+ Limit =
+ case fabric2_util:get_value(limit, Options) of
+ L when is_integer(L), L >= 0 -> L + Skip;
+ undefined -> 0
+ end,
- TargetBytes = case fabric2_util:get_value(target_bytes, Options) of
- T when is_integer(T), T >= 0 -> [{target_bytes, T}];
- undefined -> []
- end,
+ TargetBytes =
+ case fabric2_util:get_value(target_bytes, Options) of
+ T when is_integer(T), T >= 0 -> [{target_bytes, T}];
+ undefined -> []
+ end,
- StreamingMode = case fabric2_util:get_value(streaming_mode, Options) of
- undefined -> [];
- Name when is_atom(Name) -> [{streaming_mode, Name}]
- end,
+ StreamingMode =
+ case fabric2_util:get_value(streaming_mode, Options) of
+ undefined -> [];
+ Name when is_atom(Name) -> [{streaming_mode, Name}]
+ end,
- Snapshot = case fabric2_util:get_value(snapshot, Options) of
- undefined -> [];
- B when is_boolean(B) -> [{snapshot, B}]
- end,
+ Snapshot =
+ case fabric2_util:get_value(snapshot, Options) of
+ undefined -> [];
+ B when is_boolean(B) -> [{snapshot, B}]
+ end,
- BaseOpts = [{reverse, Reverse}]
- ++ TargetBytes
- ++ StreamingMode
- ++ Snapshot,
+ BaseOpts =
+ [{reverse, Reverse}] ++
+ TargetBytes ++
+ StreamingMode ++
+ Snapshot,
RestartTx = fabric2_util:get_value(restart_tx, Options, false),
@@ -1850,7 +1918,6 @@ get_fold_acc(Db, RangePrefix, UserCallback, UserAcc, Options)
user_acc = UserAcc
}.
-
fold_range_cb({K, V}, #fold_acc{} = Acc) ->
#fold_acc{
skip = Skip,
@@ -1859,21 +1926,22 @@ fold_range_cb({K, V}, #fold_acc{} = Acc) ->
user_acc = UserAcc,
base_opts = Opts
} = Acc,
- Acc1 = case Skip =:= 0 of
- true ->
- UserAcc1 = UserFun({K, V}, UserAcc),
- Acc#fold_acc{limit = max(0, Limit - 1), user_acc = UserAcc1};
- false ->
- Acc#fold_acc{skip = Skip - 1, limit = Limit - 1}
- end,
- Acc2 = case fabric2_util:get_value(reverse, Opts, false) of
- true -> Acc1#fold_acc{end_key = erlfdb_key:last_less_or_equal(K)};
- false -> Acc1#fold_acc{start_key = erlfdb_key:first_greater_than(K)}
- end,
+ Acc1 =
+ case Skip =:= 0 of
+ true ->
+ UserAcc1 = UserFun({K, V}, UserAcc),
+ Acc#fold_acc{limit = max(0, Limit - 1), user_acc = UserAcc1};
+ false ->
+ Acc#fold_acc{skip = Skip - 1, limit = Limit - 1}
+ end,
+ Acc2 =
+ case fabric2_util:get_value(reverse, Opts, false) of
+ true -> Acc1#fold_acc{end_key = erlfdb_key:last_less_or_equal(K)};
+ false -> Acc1#fold_acc{start_key = erlfdb_key:first_greater_than(K)}
+ end,
put(?PDICT_FOLD_ACC_STATE, Acc2),
Acc2.
-
restart_fold(Tx, #fold_acc{} = Acc) ->
erase(?PDICT_CHECKED_MD_IS_CURRENT),
@@ -1884,15 +1952,16 @@ restart_fold(Tx, #fold_acc{} = Acc) ->
case {erase(?PDICT_FOLD_ACC_STATE), Acc#fold_acc.retries} of
{#fold_acc{db = Db} = Acc1, _} ->
Acc1#fold_acc{db = check_db_instance(Db), retries = 0};
- {undefined, Retries} when Retries < MaxRetries orelse
- MaxRetries =:= -1 ->
+ {undefined, Retries} when
+ Retries < MaxRetries orelse
+ MaxRetries =:= -1
+ ->
Db = check_db_instance(Acc#fold_acc.db),
Acc#fold_acc{db = Db, retries = Retries + 1};
{undefined, _} ->
error(fold_range_not_progressing)
end.
-
get_db_handle() ->
case get(?PDICT_DB_KEY) of
undefined ->
@@ -1903,7 +1972,6 @@ get_db_handle() ->
Db
end.
-
require_transaction(#{tx := {erlfdb_snapshot, _}} = _Db) ->
ok;
require_transaction(#{tx := {erlfdb_transaction, _}} = _Db) ->
@@ -1911,31 +1979,30 @@ require_transaction(#{tx := {erlfdb_transaction, _}} = _Db) ->
require_transaction(#{} = _Db) ->
erlang:error(transaction_required).
-
ensure_current(Db) ->
ensure_current(Db, true).
-
ensure_current(#{} = Db0, CheckDbVersion) ->
require_transaction(Db0),
- Db3 = case check_metadata_version(Db0) of
- {current, Db1} ->
- Db1;
- {stale, Db1} ->
- case check_db_version(Db1, CheckDbVersion) of
- current ->
- % If db version is current, update cache with the latest
- % metadata so other requests can immediately see the
- % refreshed db handle.
- Now = erlang:monotonic_time(millisecond),
- Db2 = Db1#{check_current_ts := Now},
- fabric2_server:maybe_update(Db2),
- Db2;
- stale ->
- fabric2_server:maybe_remove(Db1),
- throw({?MODULE, reopen})
- end
- end,
+ Db3 =
+ case check_metadata_version(Db0) of
+ {current, Db1} ->
+ Db1;
+ {stale, Db1} ->
+ case check_db_version(Db1, CheckDbVersion) of
+ current ->
+ % If db version is current, update cache with the latest
+ % metadata so other requests can immediately see the
+ % refreshed db handle.
+ Now = erlang:monotonic_time(millisecond),
+ Db2 = Db1#{check_current_ts := Now},
+ fabric2_server:maybe_update(Db2),
+ Db2;
+ stale ->
+ fabric2_server:maybe_remove(Db1),
+ throw({?MODULE, reopen})
+ end
+ end,
case maps:get(security_fun, Db3) of
SecurityFun when is_function(SecurityFun, 2) ->
#{security_doc := SecDoc} = Db3,
@@ -1945,10 +2012,8 @@ ensure_current(#{} = Db0, CheckDbVersion) ->
Db3
end.
-
check_db_instance(undefined) ->
undefined;
-
check_db_instance(#{} = Db) ->
require_transaction(Db),
case check_metadata_version(Db) of
@@ -1967,17 +2032,14 @@ check_db_instance(#{} = Db) ->
end
end.
-
is_transaction_applied(Tx) ->
- was_commit_unknown_result()
- andalso has_transaction_id()
- andalso transaction_id_exists(Tx).
-
+ was_commit_unknown_result() andalso
+ has_transaction_id() andalso
+ transaction_id_exists(Tx).
get_previous_transaction_result() ->
get(?PDICT_TX_RES_KEY).
-
execute_transaction(Tx, Fun, LayerPrefix) ->
put(?PDICT_CHECKED_MD_IS_CURRENT, false),
put(?PDICT_CHECKED_DB_IS_CURRENT, false),
@@ -1991,7 +2053,6 @@ execute_transaction(Tx, Fun, LayerPrefix) ->
end,
Result.
-
clear_transaction() ->
fabric2_txids:remove(get(?PDICT_TX_ID_KEY)),
erase(?PDICT_CHECKED_DB_IS_CURRENT),
@@ -2000,7 +2061,6 @@ clear_transaction() ->
erase(?PDICT_TX_RES_KEY),
erase(?PDICT_TX_RES_WAS_UNKNOWN).
-
was_commit_unknown_result() ->
case get(?PDICT_TX_RES_WAS_UNKNOWN) of
true ->
@@ -2015,15 +2075,12 @@ was_commit_unknown_result() ->
end
end.
-
has_transaction_id() ->
is_binary(get(?PDICT_TX_ID_KEY)).
-
transaction_id_exists(Tx) ->
erlfdb:wait(erlfdb:get(Tx, get(?PDICT_TX_ID_KEY))) == <<>>.
-
get_transaction_id(Tx, LayerPrefix) ->
case get(?PDICT_TX_ID_KEY) of
undefined ->
@@ -2034,25 +2091,26 @@ get_transaction_id(Tx, LayerPrefix) ->
TxId
end.
-
with_span(Operation, ExtraTags, Fun) ->
case ctrace:has_span() of
true ->
- Tags = maps:merge(#{
- 'span.kind' => <<"client">>,
- component => <<"couchdb.fabric">>,
- 'db.instance' => fabric2_server:fdb_cluster(),
- 'db.namespace' => fabric2_server:fdb_directory(),
- 'db.type' => <<"fdb">>,
- nonce => get(nonce),
- pid => self()
- }, ExtraTags),
+ Tags = maps:merge(
+ #{
+ 'span.kind' => <<"client">>,
+ component => <<"couchdb.fabric">>,
+ 'db.instance' => fabric2_server:fdb_cluster(),
+ 'db.namespace' => fabric2_server:fdb_directory(),
+ 'db.type' => <<"fdb">>,
+ nonce => get(nonce),
+ pid => self()
+ },
+ ExtraTags
+ ),
ctrace:with_span(Operation, Tags, Fun);
false ->
Fun()
end.
-
get_info_wait_int(#info_future{} = InfoFuture) ->
#info_future{
db_prefix = DbPrefix,
@@ -2061,40 +2119,44 @@ get_info_wait_int(#info_future{} = InfoFuture) ->
meta_future = MetaFuture
} = InfoFuture,
- RawSeq = case erlfdb:wait(ChangesFuture) of
- [] ->
- vs_to_seq(fabric2_util:seq_zero_vs());
- [{SeqKey, _}] ->
- {?DB_CHANGES, SeqVS} = erlfdb_tuple:unpack(SeqKey, DbPrefix),
- vs_to_seq(SeqVS)
- end,
+ RawSeq =
+ case erlfdb:wait(ChangesFuture) of
+ [] ->
+ vs_to_seq(fabric2_util:seq_zero_vs());
+ [{SeqKey, _}] ->
+ {?DB_CHANGES, SeqVS} = erlfdb_tuple:unpack(SeqKey, DbPrefix),
+ vs_to_seq(SeqVS)
+ end,
CProp = {update_seq, RawSeq},
UUIDProp = {uuid, erlfdb:wait(UUIDFuture)},
- MProps = lists:foldl(fun({K, V}, Acc) ->
- case erlfdb_tuple:unpack(K, DbPrefix) of
- {?DB_STATS, <<"doc_count">>} ->
- [{doc_count, ?bin2uint(V)} | Acc];
- {?DB_STATS, <<"doc_del_count">>} ->
- [{doc_del_count, ?bin2uint(V)} | Acc];
- {?DB_STATS, <<"sizes">>, Name} ->
- Val = ?bin2uint(V),
- {_, {Sizes}} = lists:keyfind(sizes, 1, Acc),
- NewSizes = lists:keystore(Name, 1, Sizes, {Name, Val}),
- lists:keystore(sizes, 1, Acc, {sizes, {NewSizes}});
- {?DB_STATS, _} ->
- Acc
- end
- end, [{sizes, {[]}}], erlfdb:wait(MetaFuture)),
+ MProps = lists:foldl(
+ fun({K, V}, Acc) ->
+ case erlfdb_tuple:unpack(K, DbPrefix) of
+ {?DB_STATS, <<"doc_count">>} ->
+ [{doc_count, ?bin2uint(V)} | Acc];
+ {?DB_STATS, <<"doc_del_count">>} ->
+ [{doc_del_count, ?bin2uint(V)} | Acc];
+ {?DB_STATS, <<"sizes">>, Name} ->
+ Val = ?bin2uint(V),
+ {_, {Sizes}} = lists:keyfind(sizes, 1, Acc),
+ NewSizes = lists:keystore(Name, 1, Sizes, {Name, Val}),
+ lists:keystore(sizes, 1, Acc, {sizes, {NewSizes}});
+ {?DB_STATS, _} ->
+ Acc
+ end
+ end,
+ [{sizes, {[]}}],
+ erlfdb:wait(MetaFuture)
+ ),
[CProp, UUIDProp | MProps].
-
binary_chunk_size() ->
config:get_integer(
- "fabric", "binary_chunk_size", ?DEFAULT_BINARY_CHUNK_SIZE).
-
+ "fabric", "binary_chunk_size", ?DEFAULT_BINARY_CHUNK_SIZE
+ ).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -2103,7 +2165,7 @@ fdb_to_revinfo_version_compatibility_test() ->
DocId = <<"doc_id">>,
FirstRevFormat = 0,
RevPos = 1,
- Rev = <<60,84,174,140,210,120,192,18,100,148,9,181,129,165,248,92>>,
+ Rev = <<60, 84, 174, 140, 210, 120, 192, 18, 100, 148, 9, 181, 129, 165, 248, 92>>,
RevPath = {},
NotDeleted = true,
Sequence = {versionstamp, 10873034897377, 0, 0},
@@ -2112,17 +2174,18 @@ fdb_to_revinfo_version_compatibility_test() ->
KeyWinner = {?DB_REVS, DocId, NotDeleted, RevPos, Rev},
ValWinner = {FirstRevFormat, Sequence, BranchCount, RevPath},
ExpectedWinner = expected(
- true, BranchCount, NotDeleted, RevPos, Rev, RevPath, Sequence),
+ true, BranchCount, NotDeleted, RevPos, Rev, RevPath, Sequence
+ ),
?assertEqual(ExpectedWinner, fdb_to_revinfo(KeyWinner, ValWinner)),
KeyLoser = {?DB_REVS, DocId, NotDeleted, RevPos, Rev},
ValLoser = {FirstRevFormat, RevPath},
ExpectedLoser = expected(
- false, undefined, NotDeleted, RevPos, Rev, RevPath, undefined),
+ false, undefined, NotDeleted, RevPos, Rev, RevPath, undefined
+ ),
?assertEqual(ExpectedLoser, fdb_to_revinfo(KeyLoser, ValLoser)),
ok.
-
expected(Winner, BranchCount, NotDeleted, RevPos, Rev, RevPath, Sequence) ->
#{
att_hash => <<>>,
@@ -2136,5 +2199,4 @@ expected(Winner, BranchCount, NotDeleted, RevPos, Rev, RevPath, Sequence) ->
winner => Winner
}.
-
-endif.
diff --git a/src/fabric/src/fabric2_index.erl b/src/fabric/src/fabric2_index.erl
index 8d52e8b19..78675a03f 100644
--- a/src/fabric/src/fabric2_index.erl
+++ b/src/fabric/src/fabric2_index.erl
@@ -12,10 +12,8 @@
-module(fabric2_index).
-
-behaviour(gen_server).
-
-export([
register_index/1,
db_updated/1,
@@ -32,33 +30,27 @@
code_change/3
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("kernel/include/logger.hrl").
-
-callback build_indices(Db :: map(), DDocs :: list(#doc{})) ->
- [{ok, JobId::binary()} | {error, any()}].
+ [{ok, JobId :: binary()} | {error, any()}].
-callback cleanup_indices(Db :: map(), DDocs :: list(#doc{})) ->
[ok | {error, any()}].
-
-define(SHARDS, 32).
-define(DEFAULT_DELAY_MSEC, 60000).
-define(DEFAULT_RESOLUTION_MSEC, 10000).
-
register_index(Mod) when is_atom(Mod) ->
Indices = lists:usort([Mod | registrations()]),
application:set_env(fabric, indices, Indices).
-
db_updated(DbName) when is_binary(DbName) ->
Table = table(erlang:phash2(DbName) rem ?SHARDS),
ets:insert_new(Table, {DbName, now_msec()}).
-
cleanup(Db) ->
try
fabric2_fdb:transactional(Db, fun(TxDb) ->
@@ -81,38 +73,33 @@ cleanup(Db) ->
couch_log:error(LogMsg, [?MODULE, DbName, Tag, Reason, Stack])
end.
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
init(_) ->
- lists:foreach(fun(T) ->
- spawn_link(fun() -> process_loop(T) end)
- end, create_tables()),
+ lists:foreach(
+ fun(T) ->
+ spawn_link(fun() -> process_loop(T) end)
+ end,
+ create_tables()
+ ),
{ok, nil}.
-
terminate(_M, _St) ->
ok.
-
handle_call(Msg, _From, St) ->
{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
-
handle_cast(Msg, St) ->
{stop, {bad_cast, Msg}, St}.
-
handle_info(Msg, St) ->
{stop, {bad_info, Msg}, St}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
create_tables() ->
Opts = [
named_table,
@@ -123,11 +110,9 @@ create_tables() ->
Tables = [table(N) || N <- lists:seq(0, ?SHARDS - 1)],
[ets:new(T, Opts) || T <- Tables].
-
table(Id) when is_integer(Id), Id >= 0 andalso Id < ?SHARDS ->
list_to_atom("fabric2_index_" ++ integer_to_list(Id)).
-
process_loop(Table) ->
Now = now_msec(),
Delay = delay_msec(),
@@ -144,7 +129,6 @@ process_loop(Table) ->
timer:sleep(Resolution + Jitter),
process_loop(Table).
-
clean_stale(Table, Since) ->
Head = {'_', '$1'},
Guard = {'<', '$1', Since},
@@ -153,7 +137,6 @@ clean_stale(Table, Since) ->
% we started processing already at that timestamp.
ets:select_delete(Table, [{Head, [Guard], [true]}]).
-
process_updates(Table, Since) ->
Head = {'$1', '$2'},
Guard = {'=<', '$2', Since},
@@ -162,13 +145,11 @@ process_updates(Table, Since) ->
{Match, Cont} -> process_updates_iter(Match, Cont)
end.
-
process_updates_iter([], Cont) ->
case ets:select(Cont) of
'$end_of_table' -> ok;
{Match, Cont1} -> process_updates_iter(Match, Cont1)
end;
-
process_updates_iter([Db | Rest], Cont) ->
try
process_db(Db)
@@ -188,7 +169,6 @@ process_updates_iter([Db | Rest], Cont) ->
end,
process_updates_iter(Rest, Cont).
-
process_db(DbName) when is_binary(DbName) ->
{ok, Db} = fabric2_db:open(DbName, [?ADMIN_CTX]),
fabric2_fdb:transactional(Db, fun(TxDb) ->
@@ -202,53 +182,54 @@ process_db(DbName) when is_binary(DbName) ->
end
end).
-
build_indices(_TxDb, []) ->
[];
-
build_indices(TxDb, DDocs) ->
- lists:flatmap(fun(Mod) ->
- Mod:build_indices(TxDb, DDocs)
- end, registrations()).
-
+ lists:flatmap(
+ fun(Mod) ->
+ Mod:build_indices(TxDb, DDocs)
+ end,
+ registrations()
+ ).
cleanup_indices(TxDb, DDocs) ->
- lists:foreach(fun(Mod) ->
- Mod:cleanup_indices(TxDb, DDocs)
- end, registrations()).
-
+ lists:foreach(
+ fun(Mod) ->
+ Mod:cleanup_indices(TxDb, DDocs)
+ end,
+ registrations()
+ ).
registrations() ->
application:get_env(fabric, indices, []).
-
should_update(#doc{body = {Props}}) ->
couch_util:get_value(<<"autoupdate">>, Props, true).
-
shuffle(Items) ->
Tagged = [{rand:uniform(), I} || I <- Items],
Sorted = lists:sort(Tagged),
[I || {_T, I} <- Sorted].
-
now_msec() ->
erlang:monotonic_time(millisecond).
-
is_enabled() ->
config:get_boolean("fabric", "index_updater_enabled", true).
-
delay_msec() ->
- config:get_integer("fabric", "index_updater_delay_msec",
- ?DEFAULT_DELAY_MSEC).
-
+ config:get_integer(
+ "fabric",
+ "index_updater_delay_msec",
+ ?DEFAULT_DELAY_MSEC
+ ).
resolution_msec() ->
- config:get_integer("fabric", "index_updater_resolution_msec",
- ?DEFAULT_RESOLUTION_MSEC).
-
+ config:get_integer(
+ "fabric",
+ "index_updater_resolution_msec",
+ ?DEFAULT_RESOLUTION_MSEC
+ ).
auto_cleanup() ->
config:get_boolean("fabric", "index_updater_remove_old_indices", false).
diff --git a/src/fabric/src/fabric2_node_types.erl b/src/fabric/src/fabric2_node_types.erl
index 110f04d15..f70d75ad8 100644
--- a/src/fabric/src/fabric2_node_types.erl
+++ b/src/fabric/src/fabric2_node_types.erl
@@ -12,12 +12,10 @@
-module(fabric2_node_types).
-
-export([
is_type/1
]).
-
is_type(Type) when is_atom(Type) ->
case {from_os_env(Type), from_app_env(Type)} of
{V, _} when is_boolean(V) ->
@@ -30,7 +28,6 @@ is_type(Type) when is_atom(Type) ->
true
end.
-
from_os_env(Type) when is_atom(Type) ->
StrType = erlang:atom_to_list(Type),
StrTypeUpper = string:to_upper(StrType),
@@ -44,9 +41,8 @@ from_os_env(Type) when is_atom(Type) ->
end
end.
-
from_app_env(Type) when is_atom(Type) ->
case application:get_env(fabric, node_types) of
- undefined -> undefined;
+ undefined -> undefined;
{ok, Props} when is_list(Props) -> proplists:get_value(Type, Props)
end.
diff --git a/src/fabric/src/fabric2_server.erl b/src/fabric/src/fabric2_server.erl
index 0da2b79e9..1704c2f37 100644
--- a/src/fabric/src/fabric2_server.erl
+++ b/src/fabric/src/fabric2_server.erl
@@ -14,7 +14,6 @@
-behaviour(gen_server).
-vsn(1).
-
-export([
start_link/0,
@@ -31,7 +30,6 @@
get_retry_limit/0
]).
-
-export([
init/1,
terminate/2,
@@ -41,7 +39,6 @@
code_change/3
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("kernel/include/file.hrl").
-include_lib("kernel/include/logger.hrl").
@@ -59,20 +56,18 @@
-define(DEFAULT_RETRY_LIMIT, "100").
-define(TX_OPTIONS, #{
- machine_id => {binary, undefined},
- datacenter_id => {binary, undefined},
+ machine_id => {binary, undefined},
+ datacenter_id => {binary, undefined},
transaction_logging_max_field_length => {integer, undefined},
- timeout => {integer, ?DEFAULT_TIMEOUT_MSEC},
- retry_limit => {integer, ?DEFAULT_RETRY_LIMIT},
- max_retry_delay => {integer, undefined},
- size_limit => {integer, undefined}
+ timeout => {integer, ?DEFAULT_TIMEOUT_MSEC},
+ retry_limit => {integer, ?DEFAULT_RETRY_LIMIT},
+ max_retry_delay => {integer, undefined},
+ size_limit => {integer, undefined}
}).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
fetch(DbName, UUID) when is_binary(DbName) ->
case {UUID, ets:lookup(?MODULE, DbName)} of
{_, []} -> undefined;
@@ -81,7 +76,6 @@ fetch(DbName, UUID) when is_binary(DbName) ->
{<<_/binary>>, [{DbName, _UUID, _, #{} = _Db}]} -> undefined
end.
-
store(#{name := DbName} = Db0) when is_binary(DbName) ->
#{
uuid := UUID,
@@ -94,14 +88,13 @@ store(#{name := DbName} = Db0) when is_binary(DbName) ->
end,
ok.
-
maybe_update(#{name := DbName} = Db0) when is_binary(DbName) ->
#{
uuid := UUID,
md_version := MDVer
} = Db0,
Db1 = sanitize(Db0),
- Head = {DbName, UUID, '$1', '_'},
+ Head = {DbName, UUID, '$1', '_'},
Guard = {'=<', '$1', MDVer},
Body = {DbName, UUID, MDVer, {const, Db1}},
try
@@ -111,12 +104,10 @@ maybe_update(#{name := DbName} = Db0) when is_binary(DbName) ->
false
end.
-
remove(DbName) when is_binary(DbName) ->
true = ets:delete(?MODULE, DbName),
ok.
-
maybe_remove(#{name := DbName} = Db) when is_binary(DbName) ->
#{
uuid := UUID,
@@ -126,41 +117,37 @@ maybe_remove(#{name := DbName} = Db) when is_binary(DbName) ->
Guard = {'=<', '$1', MDVer},
1 =:= ets:select_delete(?MODULE, [{Head, [Guard], [true]}]).
-
init(_) ->
ets:new(?MODULE, [
- public,
- named_table,
- {read_concurrency, true},
- {write_concurrency, true}
- ]),
+ public,
+ named_table,
+ {read_concurrency, true},
+ {write_concurrency, true}
+ ]),
{Cluster, Db} = get_db_and_cluster([empty]),
application:set_env(fabric, ?FDB_CLUSTER, Cluster),
application:set_env(fabric, db, Db),
- Dir = case config:get("fabric", "fdb_directory") of
- Val when is_list(Val), length(Val) > 0 ->
- [?l2b(Val)];
- _ ->
- [?DEFAULT_FDB_DIRECTORY]
- end,
+ Dir =
+ case config:get("fabric", "fdb_directory") of
+ Val when is_list(Val), length(Val) > 0 ->
+ [?l2b(Val)];
+ _ ->
+ [?DEFAULT_FDB_DIRECTORY]
+ end,
application:set_env(fabric, ?FDB_DIRECTORY, Dir),
config:subscribe_for_changes([?TX_OPTIONS_SECTION]),
{ok, nil}.
-
terminate(_, _St) ->
ok.
-
handle_call(Msg, _From, St) ->
{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
-
handle_cast(Msg, St) ->
{stop, {bad_cast, Msg}, St}.
-
handle_info({config_change, ?TX_OPTIONS_SECTION, _K, deleted, _}, St) ->
% Since we don't know the exact default values to reset the options
% to we recreate the db handle instead which will start with a default
@@ -168,40 +155,32 @@ handle_info({config_change, ?TX_OPTIONS_SECTION, _K, deleted, _}, St) ->
{_Cluster, NewDb} = get_db_and_cluster([]),
application:set_env(fabric, db, NewDb),
{noreply, St};
-
handle_info({config_change, ?TX_OPTIONS_SECTION, K, V, _}, St) ->
{ok, Db} = application:get_env(fabric, db),
apply_tx_options(Db, [{K, V}]),
{noreply, St};
-
handle_info({gen_event_EXIT, _Handler, _Reason}, St) ->
erlang:send_after(?RELISTEN_DELAY, self(), restart_config_listener),
{noreply, St};
-
handle_info(restart_config_listener, St) ->
config:subscribe_for_changes([?TX_OPTIONS_SECTION]),
{noreply, St};
-
handle_info(Msg, St) ->
{stop, {bad_info, Msg}, St}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
fdb_directory() ->
get_env(?FDB_DIRECTORY).
fdb_cluster() ->
get_env(?FDB_CLUSTER).
-
get_retry_limit() ->
Default = list_to_integer(?DEFAULT_RETRY_LIMIT),
config:get_integer(?TX_OPTIONS_SECTION, "retry_limit", Default).
-
get_env(Key) ->
case get(Key) of
undefined ->
@@ -216,25 +195,26 @@ get_env(Key) ->
Value
end.
-
get_db_and_cluster(EunitDbOpts) ->
- {Cluster, Db} = case application:get_env(fabric, eunit_run) of
- {ok, true} ->
- {<<"eunit_test">>, erlfdb_util:get_test_db(EunitDbOpts)};
- undefined ->
- ClusterFileStr = get_cluster_file_path(),
- {ok, ConnectionStr} = file:read_file(ClusterFileStr),
- DbHandle = erlfdb:open(iolist_to_binary(ClusterFileStr)),
- {string:trim(ConnectionStr), DbHandle}
- end,
+ {Cluster, Db} =
+ case application:get_env(fabric, eunit_run) of
+ {ok, true} ->
+ {<<"eunit_test">>, erlfdb_util:get_test_db(EunitDbOpts)};
+ undefined ->
+ ClusterFileStr = get_cluster_file_path(),
+ {ok, ConnectionStr} = file:read_file(ClusterFileStr),
+ DbHandle = erlfdb:open(iolist_to_binary(ClusterFileStr)),
+ {string:trim(ConnectionStr), DbHandle}
+ end,
apply_tx_options(Db, config:get(?TX_OPTIONS_SECTION)),
{Cluster, Db}.
get_cluster_file_path() ->
- Locations = [
- {custom, config:get("erlfdb", "cluster_file")},
- {custom, os:getenv("FDB_CLUSTER_FILE", undefined)}
- ] ++ default_locations(os:type()),
+ Locations =
+ [
+ {custom, config:get("erlfdb", "cluster_file")},
+ {custom, os:getenv("FDB_CLUSTER_FILE", undefined)}
+ ] ++ default_locations(os:type()),
case find_cluster_file(Locations) of
{ok, Location} ->
Location;
@@ -242,25 +222,20 @@ get_cluster_file_path() ->
erlang:error(Reason)
end.
-
default_locations({unix, _}) ->
[
{default, ?CLUSTER_FILE_MACOS},
{default, ?CLUSTER_FILE_LINUX}
];
-
default_locations({win32, _}) ->
[
{default, ?CLUSTER_FILE_WIN32}
].
-
find_cluster_file([]) ->
{error, cluster_file_missing};
-
find_cluster_file([{custom, undefined} | Rest]) ->
find_cluster_file(Rest);
-
find_cluster_file([{Type, Location} | Rest]) ->
Msg = #{
what => fdb_connection_setup,
@@ -278,7 +253,8 @@ find_cluster_file([{Type, Location} | Rest]) ->
{ok, #file_info{access = read}} ->
?LOG_WARNING(Msg#{
status => read_only_file,
- details => "If coordinators are changed without updating this "
+ details =>
+ "If coordinators are changed without updating this "
"file CouchDB may be unable to connect to the FDB cluster!"
}),
couch_log:warning(
@@ -330,20 +306,21 @@ find_cluster_file([{Type, Location} | Rest]) ->
find_cluster_file(Rest)
end.
-
apply_tx_options(Db, Cfg) ->
- maps:map(fun(Option, {Type, Default}) ->
- case lists:keyfind(atom_to_list(Option), 1, Cfg) of
- false ->
- case Default of
- undefined -> ok;
- _Defined -> apply_tx_option(Db, Option, Default, Type)
- end;
- {_K, Val} ->
- apply_tx_option(Db, Option, Val, Type)
- end
- end, ?TX_OPTIONS).
-
+ maps:map(
+ fun(Option, {Type, Default}) ->
+ case lists:keyfind(atom_to_list(Option), 1, Cfg) of
+ false ->
+ case Default of
+ undefined -> ok;
+ _Defined -> apply_tx_option(Db, Option, Default, Type)
+ end;
+ {_K, Val} ->
+ apply_tx_option(Db, Option, Val, Type)
+ end
+ end,
+ ?TX_OPTIONS
+ ).
apply_tx_option(Db, Option, Val, integer) ->
try
@@ -358,7 +335,6 @@ apply_tx_option(Db, Option, Val, integer) ->
Msg = "~p : Invalid integer tx option ~p = ~p",
couch_log:error(Msg, [?MODULE, Option, Val])
end;
-
apply_tx_option(Db, Option, Val, binary) ->
BinVal = list_to_binary(Val),
case size(BinVal) < 16 of
@@ -375,7 +351,6 @@ apply_tx_option(Db, Option, Val, binary) ->
couch_log:error(Msg, [?MODULE, Option])
end.
-
set_option(Db, Option, Val) ->
try
erlfdb:set_option(Db, Option, Val)
@@ -392,7 +367,6 @@ set_option(Db, Option, Val) ->
couch_log:error(Msg, [?MODULE, Option, Val])
end.
-
sanitize(#{} = Db) ->
Db#{
tx := undefined,
@@ -401,7 +375,6 @@ sanitize(#{} = Db) ->
interactive := false
}.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -424,11 +397,9 @@ teardown(_) ->
meck:unload().
find_cluster_file_test_() ->
- {setup,
- fun setup/0,
- fun teardown/1,
- [
- {"ignore unspecified config", ?_assertEqual(
+ {setup, fun setup/0, fun teardown/1, [
+ {"ignore unspecified config",
+ ?_assertEqual(
{ok, "ok.cluster"},
find_cluster_file([
{custom, undefined},
@@ -436,21 +407,24 @@ find_cluster_file_test_() ->
])
)},
- {"allow read-only file", ?_assertEqual(
+ {"allow read-only file",
+ ?_assertEqual(
{ok, "readonly.cluster"},
find_cluster_file([
{custom, "readonly.cluster"}
])
)},
- {"fail if no access to configured cluster file", ?_assertEqual(
+ {"fail if no access to configured cluster file",
+ ?_assertEqual(
{error, cluster_file_permissions},
find_cluster_file([
{custom, "noaccess.cluster"}
])
)},
- {"fail if configured cluster file is missing", ?_assertEqual(
+ {"fail if configured cluster file is missing",
+ ?_assertEqual(
{error, enoent},
find_cluster_file([
{custom, "missing.cluster"},
@@ -458,14 +432,14 @@ find_cluster_file_test_() ->
])
)},
- {"check multiple default locations", ?_assertEqual(
+ {"check multiple default locations",
+ ?_assertEqual(
{ok, "ok.cluster"},
find_cluster_file([
{default, "missing.cluster"},
{default, "ok.cluster"}
])
)}
- ]
- }.
+ ]}.
-endif.
diff --git a/src/fabric/src/fabric2_sup.erl b/src/fabric/src/fabric2_sup.erl
index 874a8c240..e8636fcec 100644
--- a/src/fabric/src/fabric2_sup.erl
+++ b/src/fabric/src/fabric2_sup.erl
@@ -14,7 +14,6 @@
-behaviour(supervisor).
-vsn(1).
-
-export([
start_link/1
]).
@@ -23,11 +22,9 @@
init/1
]).
-
start_link(Args) ->
supervisor:start_link({local, ?MODULE}, ?MODULE, Args).
-
init([]) ->
config:enable_feature(fdb),
Flags = {rest_for_one, 1, 5},
diff --git a/src/fabric/src/fabric2_txids.erl b/src/fabric/src/fabric2_txids.erl
index 285e342ed..58bbef1b1 100644
--- a/src/fabric/src/fabric2_txids.erl
+++ b/src/fabric/src/fabric2_txids.erl
@@ -14,14 +14,12 @@
-behaviour(gen_server).
-vsn(1).
-
-export([
start_link/0,
create/2,
remove/1
]).
-
-export([
init/1,
terminate/2,
@@ -32,58 +30,52 @@
format_status/2
]).
-
-include("fabric2.hrl").
-
-define(ONE_HOUR, 3600000000).
-define(MAX_TX_IDS, 1000).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
create(Tx, undefined) ->
Prefix = fabric2_fdb:get_dir(Tx),
create(Tx, Prefix);
-
create(_Tx, LayerPrefix) ->
{Mega, Secs, Micro} = os:timestamp(),
Key = {?TX_IDS, Mega, Secs, Micro, fabric2_util:uuid()},
erlfdb_tuple:pack(Key, LayerPrefix).
-
remove(TxId) when is_binary(TxId) ->
gen_server:cast(?MODULE, {remove, TxId});
-
remove(undefined) ->
ok.
-
-
init(_) ->
{ok, #{
last_sweep => os:timestamp(),
txids => []
}}.
-
terminate(_, #{txids := TxIds}) ->
- if TxIds == [] -> ok; true ->
- fabric2_fdb:transactional(fun(Tx) ->
- lists:foreach(fun(TxId) ->
- erlfdb:clear(Tx, TxId)
- end, TxIds)
- end)
+ if
+ TxIds == [] ->
+ ok;
+ true ->
+ fabric2_fdb:transactional(fun(Tx) ->
+ lists:foreach(
+ fun(TxId) ->
+ erlfdb:clear(Tx, TxId)
+ end,
+ TxIds
+ )
+ end)
end,
ok.
-
handle_call(Msg, _From, St) ->
{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
-
handle_cast({remove, TxId}, St) ->
#{
last_sweep := LastSweep,
@@ -102,15 +94,12 @@ handle_cast({remove, TxId}, St) ->
{noreply, NewSt}
end.
-
handle_info(Msg, St) ->
{stop, {bad_info, Msg}, St}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
format_status(_Opt, [_PDict, State]) ->
#{
txids := TxIds
@@ -118,10 +107,7 @@ format_status(_Opt, [_PDict, State]) ->
Scrubbed = State#{
txids => {length, length(TxIds)}
},
- [{data, [{"State",
- Scrubbed
- }]}].
-
+ [{data, [{"State", Scrubbed}]}].
clean(St, NeedsSweep) ->
#{
@@ -129,9 +115,12 @@ clean(St, NeedsSweep) ->
txids := TxIds
} = St,
fabric2_fdb:transactional(fun(Tx) ->
- lists:foreach(fun(TxId) ->
- erlfdb:clear(Tx, TxId)
- end, TxIds),
+ lists:foreach(
+ fun(TxId) ->
+ erlfdb:clear(Tx, TxId)
+ end,
+ TxIds
+ ),
case NeedsSweep of
true ->
sweep(Tx, LastSweep),
@@ -144,7 +133,6 @@ clean(St, NeedsSweep) ->
end
end).
-
sweep(Tx, {Mega, Secs, Micro}) ->
Prefix = fabric2_fdb:get_dir(Tx),
StartKey = erlfdb_tuple:pack({?TX_IDS}, Prefix),
diff --git a/src/fabric/src/fabric2_users_db.erl b/src/fabric/src/fabric2_users_db.erl
index e1c8c3fcb..f20861326 100644
--- a/src/fabric/src/fabric2_users_db.erl
+++ b/src/fabric/src/fabric2_users_db.erl
@@ -34,8 +34,7 @@
-define(REQUIREMENT_ERROR, "Password does not conform to requirements.").
-define(PASSWORD_SERVER_ERROR, "Server cannot hash passwords at this time.").
--define(
- DDOCS_ADMIN_ONLY,
+-define(DDOCS_ADMIN_ONLY,
<<"Only administrators can view design docs in the users database.">>
).
@@ -55,12 +54,12 @@ before_doc_update(Doc, Db, _UpdateType) ->
#user_ctx{name = Name} = fabric2_db:get_user_ctx(Db),
DocName = get_doc_name(Doc),
case (catch fabric2_db:check_is_admin(Db)) of
- ok ->
- save_doc(Doc);
- _ when Name =:= DocName orelse Name =:= null ->
- save_doc(Doc);
- _ ->
- throw(not_found)
+ ok ->
+ save_doc(Doc);
+ _ when Name =:= DocName orelse Name =:= null ->
+ save_doc(Doc);
+ _ ->
+ throw(not_found)
end.
% If newDoc.password == null || newDoc.password == undefined:
@@ -70,45 +69,48 @@ before_doc_update(Doc, Db, _UpdateType) ->
% newDoc.password_sha = hash_pw(newDoc.password + salt)
% newDoc.salt = salt
% newDoc.password = null
-save_doc(#doc{body={Body}} = Doc) ->
+save_doc(#doc{body = {Body}} = Doc) ->
%% Support both schemes to smooth migration from legacy scheme
Scheme = chttpd_util:get_chttpd_auth_config("password_scheme", "pbkdf2"),
case {fabric2_util:get_value(?PASSWORD, Body), Scheme} of
- {null, _} -> % server admins don't have a user-db password entry
- Doc;
- {undefined, _} ->
- Doc;
- {ClearPassword, "simple"} -> % deprecated
- ok = validate_password(ClearPassword),
- Salt = couch_uuids:random(),
- PasswordSha = couch_passwords:simple(ClearPassword, Salt),
- Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?SIMPLE),
- Body1 = ?replace(Body0, ?SALT, Salt),
- Body2 = ?replace(Body1, ?PASSWORD_SHA, PasswordSha),
- Body3 = proplists:delete(?PASSWORD, Body2),
- Doc#doc{body={Body3}};
- {ClearPassword, "pbkdf2"} ->
- ok = validate_password(ClearPassword),
- Iterations = chttpd_util:get_chttpd_auth_config_integer(
- "iterations", 10),
- Salt = couch_uuids:random(),
- DerivedKey = couch_passwords:pbkdf2(ClearPassword, Salt, Iterations),
- Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?PBKDF2),
- Body1 = ?replace(Body0, ?ITERATIONS, Iterations),
- Body2 = ?replace(Body1, ?DERIVED_KEY, DerivedKey),
- Body3 = ?replace(Body2, ?SALT, Salt),
- Body4 = proplists:delete(?PASSWORD, Body3),
- Doc#doc{body={Body4}};
- {_ClearPassword, Scheme} ->
- ?LOG_ERROR(#{
- what => invalid_config_setting,
- section => couch_httpd_auth,
- key => password_scheme,
- value => Scheme,
- details => "password_scheme must one of (simple, pbkdf2)"
- }),
- couch_log:error("[couch_httpd_auth] password_scheme value of '~p' is invalid.", [Scheme]),
- throw({forbidden, ?PASSWORD_SERVER_ERROR})
+ % server admins don't have a user-db password entry
+ {null, _} ->
+ Doc;
+ {undefined, _} ->
+ Doc;
+ % deprecated
+ {ClearPassword, "simple"} ->
+ ok = validate_password(ClearPassword),
+ Salt = couch_uuids:random(),
+ PasswordSha = couch_passwords:simple(ClearPassword, Salt),
+ Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?SIMPLE),
+ Body1 = ?replace(Body0, ?SALT, Salt),
+ Body2 = ?replace(Body1, ?PASSWORD_SHA, PasswordSha),
+ Body3 = proplists:delete(?PASSWORD, Body2),
+ Doc#doc{body = {Body3}};
+ {ClearPassword, "pbkdf2"} ->
+ ok = validate_password(ClearPassword),
+ Iterations = chttpd_util:get_chttpd_auth_config_integer(
+ "iterations", 10
+ ),
+ Salt = couch_uuids:random(),
+ DerivedKey = couch_passwords:pbkdf2(ClearPassword, Salt, Iterations),
+ Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?PBKDF2),
+ Body1 = ?replace(Body0, ?ITERATIONS, Iterations),
+ Body2 = ?replace(Body1, ?DERIVED_KEY, DerivedKey),
+ Body3 = ?replace(Body2, ?SALT, Salt),
+ Body4 = proplists:delete(?PASSWORD, Body3),
+ Doc#doc{body = {Body4}};
+ {_ClearPassword, Scheme} ->
+ ?LOG_ERROR(#{
+ what => invalid_config_setting,
+ section => couch_httpd_auth,
+ key => password_scheme,
+ value => Scheme,
+ details => "password_scheme must one of (simple, pbkdf2)"
+ }),
+ couch_log:error("[couch_httpd_auth] password_scheme value of '~p' is invalid.", [Scheme]),
+ throw({forbidden, ?PASSWORD_SERVER_ERROR})
end.
% Validate if a new password matches all RegExp in the password_regexp setting.
@@ -121,47 +123,52 @@ validate_password(ClearPassword) ->
"[]" ->
ok;
ValidateConfig ->
- RequirementList = case couch_util:parse_term(ValidateConfig) of
- {ok, RegExpList} when is_list(RegExpList) ->
- RegExpList;
- {ok, NonListValue} ->
- couch_log:error(
- "[couch_httpd_auth] password_regexp value of '~p'"
- " is not a list.",
- [NonListValue]
- ),
- throw({forbidden, ?PASSWORD_SERVER_ERROR});
- {error, ErrorInfo} ->
- couch_log:error(
- "[couch_httpd_auth] password_regexp value of '~p'"
- " could not get parsed. ~p",
- [ValidateConfig, ErrorInfo]
- ),
- throw({forbidden, ?PASSWORD_SERVER_ERROR})
- end,
- % Check the password on every RegExp.
- lists:foreach(fun(RegExpTuple) ->
- case get_password_regexp_and_error_msg(RegExpTuple) of
- {ok, RegExp, PasswordErrorMsg} ->
- check_password(ClearPassword, RegExp, PasswordErrorMsg);
- {error} ->
+ RequirementList =
+ case couch_util:parse_term(ValidateConfig) of
+ {ok, RegExpList} when is_list(RegExpList) ->
+ RegExpList;
+ {ok, NonListValue} ->
couch_log:error(
- "[couch_httpd_auth] password_regexp part of '~p' "
- "is not a RegExp string or "
- "a RegExp and Reason tuple.",
- [RegExpTuple]
+ "[couch_httpd_auth] password_regexp value of '~p'"
+ " is not a list.",
+ [NonListValue]
+ ),
+ throw({forbidden, ?PASSWORD_SERVER_ERROR});
+ {error, ErrorInfo} ->
+ couch_log:error(
+ "[couch_httpd_auth] password_regexp value of '~p'"
+ " could not get parsed. ~p",
+ [ValidateConfig, ErrorInfo]
),
throw({forbidden, ?PASSWORD_SERVER_ERROR})
- end
- end, RequirementList),
+ end,
+ % Check the password on every RegExp.
+ lists:foreach(
+ fun(RegExpTuple) ->
+ case get_password_regexp_and_error_msg(RegExpTuple) of
+ {ok, RegExp, PasswordErrorMsg} ->
+ check_password(ClearPassword, RegExp, PasswordErrorMsg);
+ {error} ->
+ couch_log:error(
+ "[couch_httpd_auth] password_regexp part of '~p' "
+ "is not a RegExp string or "
+ "a RegExp and Reason tuple.",
+ [RegExpTuple]
+ ),
+ throw({forbidden, ?PASSWORD_SERVER_ERROR})
+ end
+ end,
+ RequirementList
+ ),
ok
end.
% Get the RegExp out of the tuple and combine the the error message.
% First is with a Reason string.
-get_password_regexp_and_error_msg({RegExp, Reason})
- when is_list(RegExp) andalso is_list(Reason)
- andalso length(Reason) > 0 ->
+get_password_regexp_and_error_msg({RegExp, Reason}) when
+ is_list(RegExp) andalso is_list(Reason) andalso
+ length(Reason) > 0
+->
{ok, RegExp, lists:concat([?REQUIREMENT_ERROR, " ", Reason])};
% With a not correct Reason string.
get_password_regexp_and_error_msg({RegExp, _Reason}) when is_list(RegExp) ->
@@ -185,7 +192,6 @@ check_password(Password, RegExp, ErrorMsg) ->
throw({bad_request, ErrorMsg})
end.
-
% If the doc is a design doc
% If the request's userCtx identifies an admin
% -> return doc
@@ -213,19 +219,17 @@ after_doc_read(Doc, Db) ->
_ ->
Doc1 = strip_non_public_fields(Doc),
case Doc1 of
- #doc{body={[]}} -> throw(not_found);
+ #doc{body = {[]}} -> throw(not_found);
_ -> Doc1
end
end.
-
-get_doc_name(#doc{id= <<"org.couchdb.user:", Name/binary>>}) ->
+get_doc_name(#doc{id = <<"org.couchdb.user:", Name/binary>>}) ->
Name;
get_doc_name(_) ->
undefined.
-
-strip_non_public_fields(#doc{body={Props}}=Doc) ->
+strip_non_public_fields(#doc{body = {Props}} = Doc) ->
PublicFields = chttpd_util:get_chttpd_auth_config("public_fields", ""),
Public = re:split(PublicFields, "\\s*,\\s*", [{return, binary}]),
- Doc#doc{body={[{K, V} || {K, V} <- Props, lists:member(K, Public)]}}.
+ Doc#doc{body = {[{K, V} || {K, V} <- Props, lists:member(K, Public)]}}.
diff --git a/src/fabric/src/fabric2_util.erl b/src/fabric/src/fabric2_util.erl
index cd22778ef..f1f524371 100644
--- a/src/fabric/src/fabric2_util.erl
+++ b/src/fabric/src/fabric2_util.erl
@@ -12,7 +12,6 @@
-module(fabric2_util).
-
-export([
revinfo_to_revs/1,
revinfo_to_path/1,
@@ -48,11 +47,9 @@
pmap/3
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_views/include/couch_views.hrl").
-
revinfo_to_revs(RevInfo) ->
#{
rev_id := {RevPos, Rev},
@@ -60,7 +57,6 @@ revinfo_to_revs(RevInfo) ->
} = RevInfo,
{RevPos, [Rev | RevPath]}.
-
revinfo_to_path(RevInfo) ->
#{
rev_id := {RevPos, Rev},
@@ -70,19 +66,15 @@ revinfo_to_path(RevInfo) ->
Path = revinfo_to_path(RevInfo, Revs),
{RevPos - length(Revs) + 1, Path}.
-
revinfo_to_path(RevInfo, [Rev]) ->
{Rev, RevInfo, []};
-
revinfo_to_path(RevInfo, [Rev | Rest]) ->
{Rev, ?REV_MISSING, [revinfo_to_path(RevInfo, Rest)]}.
-
sort_revinfos(RevInfos) ->
CmpFun = fun(A, B) -> rev_sort_key(A) > rev_sort_key(B) end,
lists:sort(CmpFun, RevInfos).
-
rev_sort_key(#{} = RevInfo) ->
#{
deleted := Deleted,
@@ -90,7 +82,6 @@ rev_sort_key(#{} = RevInfo) ->
} = RevInfo,
{not Deleted, RevPos, Rev}.
-
rev_size(#doc{} = Doc) ->
#doc{
id = Id,
@@ -99,23 +90,28 @@ rev_size(#doc{} = Doc) ->
atts = Atts
} = Doc,
- {Start, Rev} = case Revs of
- {0, []} -> {0, <<>>};
- {N, [RevId | _]} -> {N, RevId}
- end,
+ {Start, Rev} =
+ case Revs of
+ {0, []} -> {0, <<>>};
+ {N, [RevId | _]} -> {N, RevId}
+ end,
lists:sum([
size(Id),
size(erlfdb_tuple:pack({Start})),
size(Rev),
- 1, % FDB tuple encoding of booleans for deleted flag is 1 byte
+ % FDB tuple encoding of booleans for deleted flag is 1 byte
+ 1,
couch_ejson_size:encoded_size(Body),
- lists:foldl(fun(Att, Acc) ->
- couch_att:external_size(Att) + Acc
- end, 0, Atts)
+ lists:foldl(
+ fun(Att, Acc) ->
+ couch_att:external_size(Att) + Acc
+ end,
+ 0,
+ Atts
+ )
]).
-
ldoc_size(#doc{id = <<"_local/", _/binary>>} = Doc) ->
#doc{
id = Id,
@@ -124,10 +120,11 @@ ldoc_size(#doc{id = <<"_local/", _/binary>>} = Doc) ->
body = Body
} = Doc,
- StoreRev = case Rev of
- _ when is_integer(Rev) -> integer_to_binary(Rev);
- _ when is_binary(Rev) -> Rev
- end,
+ StoreRev =
+ case Rev of
+ _ when is_integer(Rev) -> integer_to_binary(Rev);
+ _ when is_binary(Rev) -> Rev
+ end,
case Deleted of
true ->
@@ -140,15 +137,12 @@ ldoc_size(#doc{id = <<"_local/", _/binary>>} = Doc) ->
])
end.
-
seq_zero_vs() ->
{versionstamp, 0, 0, 0}.
-
seq_max_vs() ->
{versionstamp, 18446744073709551615, 65535, 65535}.
-
user_ctx_to_json(Db) ->
UserCtx = fabric2_db:get_user_ctx(Db),
{[
@@ -157,7 +151,6 @@ user_ctx_to_json(Db) ->
{<<"roles">>, UserCtx#user_ctx.roles}
]}.
-
validate_security_object({SecProps}) ->
Admins = get_value(<<"admins">>, SecProps, {[]}),
ok = validate_names_and_roles(Admins),
@@ -167,72 +160,71 @@ validate_security_object({SecProps}) ->
Members = get_value(<<"members">>, SecProps, Readers),
ok = validate_names_and_roles(Members).
-
validate_names_and_roles({Props}) when is_list(Props) ->
validate_json_list_of_strings(<<"names">>, Props),
validate_json_list_of_strings(<<"roles">>, Props);
validate_names_and_roles(_) ->
throw("admins or members must be a JSON list of strings").
-
validate_json_list_of_strings(Member, Props) ->
case get_value(Member, Props, []) of
Values when is_list(Values) ->
NonBinary = lists:filter(fun(V) -> not is_binary(V) end, Values),
- if NonBinary == [] -> ok; true ->
- MemberStr = binary_to_list(Member),
- throw(MemberStr ++ " must be a JSON list of strings")
+ if
+ NonBinary == [] ->
+ ok;
+ true ->
+ MemberStr = binary_to_list(Member),
+ throw(MemberStr ++ " must be a JSON list of strings")
end;
_ ->
MemberStr = binary_to_list(Member),
throw(MemberStr ++ " must be a JSON list of strings")
end.
-
hash_atts([]) ->
<<>>;
-
hash_atts(Atts) ->
- SortedAtts = lists:sort(fun(A, B) ->
- couch_att:fetch(name, A) =< couch_att:fetch(name, B)
- end, Atts),
- Md5St = lists:foldl(fun(Att, Acc) ->
- {loc, _Db, _DocId, AttId} = couch_att:fetch(data, Att),
- couch_hash:md5_hash_update(Acc, AttId)
- end, couch_hash:md5_hash_init(), SortedAtts),
+ SortedAtts = lists:sort(
+ fun(A, B) ->
+ couch_att:fetch(name, A) =< couch_att:fetch(name, B)
+ end,
+ Atts
+ ),
+ Md5St = lists:foldl(
+ fun(Att, Acc) ->
+ {loc, _Db, _DocId, AttId} = couch_att:fetch(data, Att),
+ couch_hash:md5_hash_update(Acc, AttId)
+ end,
+ couch_hash:md5_hash_init(),
+ SortedAtts
+ ),
couch_hash:md5_hash_final(Md5St).
-
dbname_ends_with(#{} = Db, Suffix) ->
dbname_ends_with(fabric2_db:name(Db), Suffix);
-
dbname_ends_with(DbName, Suffix) when is_binary(DbName), is_binary(Suffix) ->
Suffix == filename:basename(DbName).
-
get_value(Key, List) ->
get_value(Key, List, undefined).
-
get_value(Key, List, Default) ->
case lists:keysearch(Key, 1, List) of
- {value, {Key,Value}} ->
+ {value, {Key, Value}} ->
Value;
false ->
Default
end.
-
to_hex(Bin) ->
list_to_binary(to_hex_int(Bin)).
-
to_hex_int(<<>>) ->
[];
to_hex_int(<<Hi:4, Lo:4, Rest/binary>>) ->
[nibble_to_hex(Hi), nibble_to_hex(Lo) | to_hex(Rest)].
-
nibble_to_hex(I) ->
case I of
0 -> $0;
@@ -253,11 +245,9 @@ nibble_to_hex(I) ->
15 -> $f
end.
-
from_hex(Bin) ->
iolist_to_binary(from_hex_int(Bin)).
-
from_hex_int(<<>>) ->
[];
from_hex_int(<<Hi:8, Lo:8, RestBinary/binary>>) ->
@@ -267,7 +257,6 @@ from_hex_int(<<Hi:8, Lo:8, RestBinary/binary>>) ->
from_hex_int(<<BadHex/binary>>) ->
erlang:error({invalid_hex, BadHex}).
-
hex_to_nibble(N) ->
case N of
$0 -> 0;
@@ -295,40 +284,42 @@ hex_to_nibble(N) ->
_ -> erlang:error({invalid_hex, N})
end.
-
uuid() ->
to_hex(crypto:strong_rand_bytes(16)).
-
encode_all_doc_key(B) when is_binary(B) -> B;
encode_all_doc_key(Term) when Term < <<>> -> <<>>;
encode_all_doc_key(_) -> <<255>>.
-
all_docs_view_opts(#mrargs{} = Args) ->
NS = couch_util:get_value(namespace, Args#mrargs.extra),
- StartKey = case Args#mrargs.start_key of
- undefined -> Args#mrargs.start_key_docid;
- SKey -> SKey
- end,
- EndKey = case Args#mrargs.end_key of
- undefined -> Args#mrargs.end_key_docid;
- EKey -> EKey
- end,
- StartKeyOpts = case StartKey of
- undefined -> [];
- _ -> [{start_key, encode_all_doc_key(StartKey)}]
- end,
- EndKeyOpts = case {EndKey, Args#mrargs.inclusive_end} of
- {undefined, _} -> [];
- {_, false} -> [{end_key_gt, encode_all_doc_key(EndKey)}];
- {_, true} -> [{end_key, encode_all_doc_key(EndKey)}]
- end,
-
- DocOpts = case Args#mrargs.conflicts of
- true -> [conflicts | Args#mrargs.doc_options];
- _ -> Args#mrargs.doc_options
- end,
+ StartKey =
+ case Args#mrargs.start_key of
+ undefined -> Args#mrargs.start_key_docid;
+ SKey -> SKey
+ end,
+ EndKey =
+ case Args#mrargs.end_key of
+ undefined -> Args#mrargs.end_key_docid;
+ EKey -> EKey
+ end,
+ StartKeyOpts =
+ case StartKey of
+ undefined -> [];
+ _ -> [{start_key, encode_all_doc_key(StartKey)}]
+ end,
+ EndKeyOpts =
+ case {EndKey, Args#mrargs.inclusive_end} of
+ {undefined, _} -> [];
+ {_, false} -> [{end_key_gt, encode_all_doc_key(EndKey)}];
+ {_, true} -> [{end_key, encode_all_doc_key(EndKey)}]
+ end,
+
+ DocOpts =
+ case Args#mrargs.conflicts of
+ true -> [conflicts | Args#mrargs.doc_options];
+ _ -> Args#mrargs.doc_options
+ end,
[
{dir, Args#mrargs.direction},
@@ -340,7 +331,6 @@ all_docs_view_opts(#mrargs{} = Args) ->
{doc_opts, DocOpts}
] ++ StartKeyOpts ++ EndKeyOpts.
-
iso8601_timestamp() ->
Now = os:timestamp(),
{{Year, Month, Date}, {Hour, Minute, Second}} =
@@ -348,44 +338,49 @@ iso8601_timestamp() ->
Format = "~4.10.0B-~2.10.0B-~2.10.0BT~2.10.0B:~2.10.0B:~2.10.0BZ",
io_lib:format(Format, [Year, Month, Date, Hour, Minute, Second]).
-
now(ms) ->
{Mega, Sec, Micro} = os:timestamp(),
(Mega * 1000000 + Sec) * 1000 + round(Micro / 1000);
now(sec) ->
now(ms) div 1000.
-
do_recovery() ->
- config:get_boolean("couchdb",
- "enable_database_recovery", false).
-
+ config:get_boolean(
+ "couchdb",
+ "enable_database_recovery",
+ false
+ ).
pmap(Fun, Args) ->
pmap(Fun, Args, []).
-
pmap(Fun, Args, Opts) ->
- Refs = lists:map(fun(Arg) ->
- {_, Ref} = spawn_monitor(fun() -> exit(pmap_exec(Fun, Arg)) end),
- Ref
- end, Args),
+ Refs = lists:map(
+ fun(Arg) ->
+ {_, Ref} = spawn_monitor(fun() -> exit(pmap_exec(Fun, Arg)) end),
+ Ref
+ end,
+ Args
+ ),
Timeout = fabric2_util:get_value(timeout, Opts, 5000),
- lists:map(fun(Ref) ->
- receive
- {'DOWN', Ref, _, _, {'$res', Res}} ->
- Res;
- {'DOWN', Ref, _, _, {'$err', Tag, Reason, Stack}} ->
- erlang:raise(Tag, Reason, Stack)
- after Timeout ->
- error({pmap_timeout, Timeout})
- end
- end, Refs).
-
+ lists:map(
+ fun(Ref) ->
+ receive
+ {'DOWN', Ref, _, _, {'$res', Res}} ->
+ Res;
+ {'DOWN', Ref, _, _, {'$err', Tag, Reason, Stack}} ->
+ erlang:raise(Tag, Reason, Stack)
+ after Timeout ->
+ error({pmap_timeout, Timeout})
+ end
+ end,
+ Refs
+ ).
pmap_exec(Fun, Arg) ->
try
{'$res', Fun(Arg)}
- catch Tag:Reason:Stack ->
- {'$err', Tag, Reason, Stack}
+ catch
+ Tag:Reason:Stack ->
+ {'$err', Tag, Reason, Stack}
end.
diff --git a/src/jwtf/src/jwtf.erl b/src/jwtf/src/jwtf.erl
index 4c4f80c70..d62789b0a 100644
--- a/src/jwtf/src/jwtf.erl
+++ b/src/jwtf/src/jwtf.erl
@@ -25,7 +25,8 @@
]).
-define(ALGS, [
- {<<"RS256">>, {public_key, sha256}}, % RSA PKCS#1 signature with SHA-256
+ % RSA PKCS#1 signature with SHA-256
+ {<<"RS256">>, {public_key, sha256}},
{<<"RS384">>, {public_key, sha384}},
{<<"RS512">>, {public_key, sha512}},
{<<"ES256">>, {public_key, sha256}},
@@ -33,7 +34,8 @@
{<<"ES512">>, {public_key, sha512}},
{<<"HS256">>, {hmac, sha256}},
{<<"HS384">>, {hmac, sha384}},
- {<<"HS512">>, {hmac, sha512}}]).
+ {<<"HS512">>, {hmac, sha512}}
+]).
-define(CHECKS, [
alg,
@@ -43,8 +45,8 @@
kid,
nbf,
sig,
- typ]).
-
+ typ
+]).
% @doc encode
% Encode the JSON Header and Claims using Key and Alg obtained from Header
@@ -52,21 +54,23 @@
{ok, binary()} | no_return().
encode(Header = {HeaderProps}, Claims, Key) ->
try
- Alg = case prop(<<"alg">>, HeaderProps) of
- undefined ->
- throw({bad_request, <<"Missing alg header parameter">>});
- Val ->
- Val
- end,
+ Alg =
+ case prop(<<"alg">>, HeaderProps) of
+ undefined ->
+ throw({bad_request, <<"Missing alg header parameter">>});
+ Val ->
+ Val
+ end,
EncodedHeader = b64url:encode(jiffy:encode(Header)),
EncodedClaims = b64url:encode(jiffy:encode(Claims)),
Message = <<EncodedHeader/binary, $., EncodedClaims/binary>>,
- SignatureOrMac = case verification_algorithm(Alg) of
- {public_key, Algorithm} ->
- public_key:sign(Message, Algorithm, Key);
- {hmac, Algorithm} ->
- hmac(Algorithm, Key, Message)
- end,
+ SignatureOrMac =
+ case verification_algorithm(Alg) of
+ {public_key, Algorithm} ->
+ public_key:sign(Message, Algorithm, Key);
+ {hmac, Algorithm} ->
+ hmac(Algorithm, Key, Message)
+ end,
EncodedSignatureOrMac = b64url:encode(SignatureOrMac),
{ok, <<Message/binary, $., EncodedSignatureOrMac/binary>>}
catch
@@ -74,7 +78,6 @@ encode(Header = {HeaderProps}, Claims, Key) ->
{error, Error}
end.
-
% @doc decode
% Decodes the supplied encoded token, checking
% for the attributes defined in Checks and calling
@@ -90,14 +93,12 @@ decode(EncodedToken, Checks, KS) ->
{error, Error}
end.
-
% @doc valid_algorithms
% Return a list of supported algorithms
-spec valid_algorithms() -> [binary()].
valid_algorithms() ->
proplists:get_keys(?ALGS).
-
% @doc verification_algorithm
% Return {VerificationMethod, Algorithm} tuple for the specified Alg
-spec verification_algorithm(binary()) ->
@@ -110,7 +111,6 @@ verification_algorithm(Alg) ->
throw({bad_request, <<"Invalid alg header parameter">>})
end.
-
validate(Header0, Payload0, Signature, Checks, KS) ->
validate_checks(Checks),
Header1 = props(decode_b64url_json(Header0)),
@@ -123,7 +123,6 @@ validate(Header0, Payload0, Signature, Checks, KS) ->
Key = key(Header1, Checks, KS),
verify(Alg, Header0, Payload0, Signature, Key).
-
validate_checks(Checks) when is_list(Checks) ->
case {lists:usort(Checks), lists:sort(Checks)} of
{L, L} ->
@@ -139,22 +138,17 @@ validate_checks(Checks) when is_list(Checks) ->
error({unknown_checks, UnknownChecks})
end.
-
valid_check(Check) when is_atom(Check) ->
lists:member(Check, ?CHECKS);
-
valid_check({Check, _}) when is_atom(Check) ->
lists:member(Check, ?CHECKS);
-
valid_check(_) ->
false.
-
validate_header(Props, Checks) ->
validate_typ(Props, Checks),
validate_alg(Props, Checks).
-
validate_typ(Props, Checks) ->
Required = prop(typ, Checks),
TYP = prop(<<"typ">>, Props),
@@ -169,7 +163,6 @@ validate_typ(Props, Checks) ->
throw({bad_request, <<"Invalid typ header parameter">>})
end.
-
validate_alg(Props, Checks) ->
Required = prop(alg, Checks),
Alg = prop(<<"alg">>, Props),
@@ -187,7 +180,6 @@ validate_alg(Props, Checks) ->
end
end.
-
%% Only validate required checks.
validate_payload(Props, Checks) ->
validate_iss(Props, Checks),
@@ -195,13 +187,13 @@ validate_payload(Props, Checks) ->
validate_nbf(Props, Checks),
validate_exp(Props, Checks).
-
validate_iss(Props, Checks) ->
ExpectedISS = prop(iss, Checks),
ActualISS = prop(<<"iss">>, Props),
case {ExpectedISS, ActualISS} of
- {undefined, _} -> % ignore unrequired check
+ % ignore unrequired check
+ {undefined, _} ->
ok;
{ISS, undefined} when ISS /= undefined ->
throw({bad_request, <<"Missing iss claim">>});
@@ -211,13 +203,13 @@ validate_iss(Props, Checks) ->
throw({bad_request, <<"Invalid iss claim">>})
end.
-
validate_iat(Props, Checks) ->
Required = prop(iat, Checks),
IAT = prop(<<"iat">>, Props),
case {Required, IAT} of
- {undefined, _} -> % ignore unrequired check
+ % ignore unrequired check
+ {undefined, _} ->
ok;
{true, undefined} ->
throw({bad_request, <<"Missing iat claim">>});
@@ -227,13 +219,13 @@ validate_iat(Props, Checks) ->
throw({bad_request, <<"Invalid iat claim">>})
end.
-
validate_nbf(Props, Checks) ->
Required = prop(nbf, Checks),
NBF = prop(<<"nbf">>, Props),
case {Required, NBF} of
- {undefined, _} -> % ignore unrequired check
+ % ignore unrequired check
+ {undefined, _} ->
ok;
{true, undefined} ->
throw({bad_request, <<"Missing nbf claim">>});
@@ -241,13 +233,13 @@ validate_nbf(Props, Checks) ->
assert_past(<<"nbf">>, NBF)
end.
-
validate_exp(Props, Checks) ->
Required = prop(exp, Checks),
EXP = prop(<<"exp">>, Props),
case {Required, EXP} of
- {undefined, _} -> % ignore unrequired check
+ % ignore unrequired check
+ {undefined, _} ->
ok;
{true, undefined} ->
throw({bad_request, <<"Missing exp claim">>});
@@ -255,7 +247,6 @@ validate_exp(Props, Checks) ->
assert_future(<<"exp">>, EXP)
end.
-
key(Props, Checks, KS) ->
Alg = prop(<<"alg">>, Props),
Required = prop(kid, Checks),
@@ -267,7 +258,6 @@ key(Props, Checks, KS) ->
KS(Alg, KID)
end.
-
verify(Alg, Header, Payload, SignatureOrMac0, Key) ->
Message = <<Header/binary, $., Payload/binary>>,
SignatureOrMac1 = b64url:decode(SignatureOrMac0),
@@ -279,7 +269,6 @@ verify(Alg, Header, Payload, SignatureOrMac0, Key) ->
hmac_verify(Algorithm, Message, SignatureOrMac1, Key)
end.
-
public_key_verify(Algorithm, Message, Signature, PublicKey) ->
case public_key:verify(Message, Algorithm, Signature, PublicKey) of
true ->
@@ -288,7 +277,6 @@ public_key_verify(Algorithm, Message, Signature, PublicKey) ->
throw({bad_request, <<"Bad signature">>})
end.
-
hmac_verify(Algorithm, Message, HMAC, SecretKey) ->
case hmac(Algorithm, SecretKey, Message) of
HMAC ->
@@ -297,14 +285,12 @@ hmac_verify(Algorithm, Message, HMAC, SecretKey) ->
throw({bad_request, <<"Bad HMAC">>})
end.
-
split(EncodedToken) ->
case binary:split(EncodedToken, <<$.>>, [global]) of
[_, _, _] = Split -> Split;
_ -> throw({bad_request, <<"Malformed token">>})
end.
-
decode_b64url_json(B64UrlEncoded) ->
try
case b64url:decode(B64UrlEncoded) of
@@ -318,14 +304,11 @@ decode_b64url_json(B64UrlEncoded) ->
throw({bad_request, Error})
end.
-
props({Props}) ->
Props;
-
props(_) ->
throw({bad_request, <<"Not an object">>}).
-
assert_past(Name, Time) ->
case Time < now_seconds() of
true ->
@@ -342,16 +325,13 @@ assert_future(Name, Time) ->
throw({unauthorized, <<Name/binary, " not in future">>})
end.
-
now_seconds() ->
{MegaSecs, Secs, _MicroSecs} = os:timestamp(),
MegaSecs * 1000000 + Secs.
-
prop(Prop, Props) ->
proplists:get_value(Prop, Props).
-
-ifdef(OTP_RELEASE).
-if(?OTP_RELEASE >= 22).
@@ -366,7 +346,8 @@ hmac(Alg, Key, Message) ->
hmac(Alg, Key, Message) ->
crypto:hmac(Alg, Key, Message).
--endif. % -if(?OTP_RELEASE >= 22)
+% -if(?OTP_RELEASE >= 22)
+-endif.
-else.
@@ -374,8 +355,8 @@ hmac(Alg, Key, Message) ->
hmac(Alg, Key, Message) ->
crypto:hmac(Alg, Key, Message).
--endif. % -ifdef(OTP_RELEASE)
-
+% -ifdef(OTP_RELEASE)
+-endif.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/jwtf/src/jwtf_keystore.erl b/src/jwtf/src/jwtf_keystore.erl
index 5c2b47985..f362a09f0 100644
--- a/src/jwtf/src/jwtf_keystore.erl
+++ b/src/jwtf/src/jwtf_keystore.erl
@@ -23,8 +23,14 @@
]).
% gen_server api.
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- code_change/3, terminate/2]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3,
+ terminate/2
+]).
% config_listener api
-export([handle_config_change/5, handle_config_terminate/3]).
@@ -33,7 +39,6 @@
get(Alg, undefined) when is_binary(Alg) ->
get(Alg, <<"_default">>);
-
get(Alg, KID0) when is_binary(Alg), is_binary(KID0) ->
Kty = kty(Alg),
KID = binary_to_list(KID0),
@@ -43,10 +48,9 @@ get(Alg, KID0) when is_binary(Alg), is_binary(KID0) ->
ok = gen_server:call(?MODULE, {set, Kty, KID, Key}),
Key;
[{{Kty, KID}, Key}] ->
- Key
+ Key
end.
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
@@ -57,36 +61,28 @@ init(_) ->
ets:new(?MODULE, [public, named_table]),
{ok, nil}.
-
handle_call({set, Kty, KID, Key}, _From, State) ->
true = ets:insert(?MODULE, {{Kty, KID}, Key}),
{reply, ok, State}.
-
handle_cast({delete, Kty, KID}, State) ->
true = ets:delete(?MODULE, {Kty, KID}),
{noreply, State};
-
handle_cast(_Msg, State) ->
{noreply, State}.
-
handle_info(restart_config_listener, State) ->
ok = config:listen_for_changes(?MODULE, nil),
{noreply, State};
-
handle_info(_Msg, State) ->
{noreply, State}.
-
terminate(_Reason, _State) ->
ok.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
% config listener callback
handle_config_change("jwt_keys", ConfigKey, _ConfigValue, _, _) ->
@@ -97,13 +93,11 @@ handle_config_change("jwt_keys", ConfigKey, _ConfigValue, _, _) ->
ignored
end,
{ok, nil};
-
handle_config_change(_, _, _, _, _) ->
{ok, nil}.
handle_config_terminate(_Server, stop, _State) ->
ok;
-
handle_config_terminate(_Server, _Reason, _State) ->
erlang:send_after(100, whereis(?MODULE), restart_config_listener).
@@ -148,19 +142,16 @@ pem_decode(PEM) ->
_ ->
throw({bad_request, <<"Not a valid key">>})
end
- catch
- error:_ ->
- throw({bad_request, <<"Not a valid key">>})
- end.
+ catch
+ error:_ ->
+ throw({bad_request, <<"Not a valid key">>})
+ end.
kty(<<"HS", _/binary>>) ->
"hmac";
-
kty(<<"RS", _/binary>>) ->
"rsa";
-
kty(<<"ES", _/binary>>) ->
"ec";
-
kty(_) ->
throw({bad_request, <<"Unknown kty">>}).
diff --git a/src/jwtf/src/jwtf_sup.erl b/src/jwtf/src/jwtf_sup.erl
index 6f44808de..98d354c96 100644
--- a/src/jwtf/src/jwtf_sup.erl
+++ b/src/jwtf/src/jwtf_sup.erl
@@ -35,4 +35,4 @@ start_link() ->
%% ===================================================================
init([]) ->
- {ok, { {one_for_one, 5, 10}, [?CHILD(jwtf_keystore, worker)]} }.
+ {ok, {{one_for_one, 5, 10}, [?CHILD(jwtf_keystore, worker)]}}.
diff --git a/src/mango/src/mango_crud.erl b/src/mango/src/mango_crud.erl
index 66cef65b3..f24a50a92 100644
--- a/src/mango/src/mango_crud.erl
+++ b/src/mango/src/mango_crud.erl
@@ -24,14 +24,12 @@
collect_cb/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-
-insert(Db, #doc{}=Doc, Opts) ->
+insert(Db, #doc{} = Doc, Opts) ->
insert(Db, [Doc], Opts);
-insert(Db, {_}=Doc, Opts) ->
+insert(Db, {_} = Doc, Opts) ->
insert(Db, [Doc], Opts);
insert(Db, Docs, Opts) when is_list(Docs) ->
case fabric2_db:update_docs(Db, Docs, Opts) of
@@ -43,12 +41,10 @@ insert(Db, Docs, Opts) when is_list(Docs) ->
{error, lists:map(fun result_to_json/1, Errors)}
end.
-
find(Db, Selector, Callback, UserAcc, Opts) ->
{ok, Cursor} = mango_cursor:create(Db, Selector, Opts),
mango_cursor:execute(Cursor, Callback, UserAcc).
-
update(Db, Selector, Update, Options) ->
Upsert = proplists:get_value(upsert, Options),
case collect_docs(Db, Selector, Options) of
@@ -61,46 +57,50 @@ update(Db, Selector, Update, Options) ->
% Probably need to catch and rethrow errors from
% this function.
Doc = couch_doc:from_json_obj(InitDoc),
- NewDoc = case Doc#doc.id of
- <<"">> ->
- Doc#doc{id=couch_uuids:new(), revs={0, []}};
- _ ->
- Doc
- end,
+ NewDoc =
+ case Doc#doc.id of
+ <<"">> ->
+ Doc#doc{id = couch_uuids:new(), revs = {0, []}};
+ _ ->
+ Doc
+ end,
insert(Db, NewDoc, Options)
end;
{ok, Docs} ->
- NewDocs = lists:map(fun(Doc) ->
- mango_doc:apply_update(Doc, Update)
- end, Docs),
+ NewDocs = lists:map(
+ fun(Doc) ->
+ mango_doc:apply_update(Doc, Update)
+ end,
+ Docs
+ ),
insert(Db, NewDocs, Options);
Else ->
Else
end.
-
delete(Db, Selector, Options) ->
case collect_docs(Db, Selector, Options) of
{ok, Docs} ->
- NewDocs = lists:map(fun({Props}) ->
- {[
- {<<"_id">>, proplists:get_value(<<"_id">>, Props)},
- {<<"_rev">>, proplists:get_value(<<"_rev">>, Props)},
- {<<"_deleted">>, true}
- ]}
- end, Docs),
+ NewDocs = lists:map(
+ fun({Props}) ->
+ {[
+ {<<"_id">>, proplists:get_value(<<"_id">>, Props)},
+ {<<"_rev">>, proplists:get_value(<<"_rev">>, Props)},
+ {<<"_deleted">>, true}
+ ]}
+ end,
+ Docs
+ ),
insert(Db, NewDocs, Options);
Else ->
Else
end.
-
explain(Db, Selector, Opts) ->
{ok, Cursor} = mango_cursor:create(Db, Selector, Opts),
mango_cursor:explain(Cursor).
-
-result_to_json(#doc{id=Id}, Result) ->
+result_to_json(#doc{id = Id}, Result) ->
result_to_json(Id, Result);
result_to_json({Props}, Result) ->
Id = couch_util:get_value(<<"_id">>, Props),
@@ -126,7 +126,6 @@ result_to_json(DocId, Error) ->
{reason, Reason}
]}.
-
% This is for errors because for some reason we
% need a different return value for errors? Blargh.
result_to_json({{Id, Rev}, Error}) ->
@@ -138,7 +137,6 @@ result_to_json({{Id, Rev}, Error}) ->
{reason, Reason}
]}.
-
collect_docs(Db, Selector, Options) ->
Cb = fun ?MODULE:collect_cb/2,
case find(Db, Selector, Cb, [], Options) of
@@ -148,7 +146,5 @@ collect_docs(Db, Selector, Options) ->
Else
end.
-
collect_cb({row, Doc}, Acc) ->
{ok, [Doc | Acc]}.
-
diff --git a/src/mango/src/mango_cursor.erl b/src/mango/src/mango_cursor.erl
index ed35817e3..da43c43fc 100644
--- a/src/mango/src/mango_cursor.erl
+++ b/src/mango/src/mango_cursor.erl
@@ -12,7 +12,6 @@
-module(mango_cursor).
-
-export([
create/3,
explain/1,
@@ -23,13 +22,11 @@
maybe_noop_range/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-include("mango_cursor.hrl").
-include("mango_idx.hrl").
-
-define(CURSOR_MODULES, [
mango_cursor_view,
mango_cursor_special
@@ -37,10 +34,9 @@
-define(SUPERVISOR, mango_cursor_sup).
-
create(Db, Selector0, Opts) ->
Selector = mango_selector:normalize(Selector0),
- UsableIndexes = fabric2_fdb:transactional(Db, fun (TxDb) ->
+ UsableIndexes = fabric2_fdb:transactional(Db, fun(TxDb) ->
mango_idx:get_usable_indexes(TxDb, Selector, Opts)
end),
case mango_cursor:maybe_filter_indexes_by_ddoc(UsableIndexes, Opts) of
@@ -51,8 +47,7 @@ create(Db, Selector0, Opts) ->
create_cursor(Db, UserSpecifiedIndex, Selector, Opts)
end.
-
-explain(#cursor{}=Cursor) ->
+explain(#cursor{} = Cursor) ->
#cursor{
index = Idx,
selector = Selector,
@@ -63,22 +58,22 @@ explain(#cursor{}=Cursor) ->
} = Cursor,
Mod = mango_idx:cursor_mod(Idx),
Opts = lists:keydelete(user_ctx, 1, Opts0),
- {[
- {dbname, mango_idx:dbname(Idx)},
- {index, mango_idx:to_json(Idx)},
- {selector, Selector},
- {opts, {Opts}},
- {limit, Limit},
- {skip, Skip},
- {fields, Fields}
- ] ++ Mod:explain(Cursor)}.
-
-
-execute(#cursor{index=Idx}=Cursor, UserFun, UserAcc) ->
+ {
+ [
+ {dbname, mango_idx:dbname(Idx)},
+ {index, mango_idx:to_json(Idx)},
+ {selector, Selector},
+ {opts, {Opts}},
+ {limit, Limit},
+ {skip, Skip},
+ {fields, Fields}
+ ] ++ Mod:explain(Cursor)
+ }.
+
+execute(#cursor{index = Idx} = Cursor, UserFun, UserAcc) ->
Mod = mango_idx:cursor_mod(Idx),
Mod:execute(Cursor, UserFun, UserAcc).
-
maybe_filter_indexes_by_ddoc(Indexes, Opts) ->
case lists:keyfind(use_index, 1, Opts) of
{use_index, []} ->
@@ -89,24 +84,22 @@ maybe_filter_indexes_by_ddoc(Indexes, Opts) ->
filter_indexes(Indexes, DesignId, ViewName)
end.
-
filter_indexes(Indexes, DesignId0) ->
- DesignId = case DesignId0 of
- <<"_design/", _/binary>> ->
- DesignId0;
- Else ->
- <<"_design/", Else/binary>>
- end,
+ DesignId =
+ case DesignId0 of
+ <<"_design/", _/binary>> ->
+ DesignId0;
+ Else ->
+ <<"_design/", Else/binary>>
+ end,
FiltFun = fun(I) -> mango_idx:ddoc(I) == DesignId end,
lists:filter(FiltFun, Indexes).
-
filter_indexes(Indexes0, DesignId, ViewName) ->
Indexes = filter_indexes(Indexes0, DesignId),
FiltFun = fun(I) -> mango_idx:name(I) == ViewName end,
lists:filter(FiltFun, Indexes).
-
remove_indexes_with_partial_filter_selector(Indexes) ->
FiltFun = fun(Idx) ->
case mango_idx:get_partial_filter_selector(Idx) of
@@ -116,7 +109,6 @@ remove_indexes_with_partial_filter_selector(Indexes) ->
end,
lists:filter(FiltFun, Indexes).
-
maybe_add_warning(UserFun, #cursor{index = Index, opts = Opts}, Stats, UserAcc) ->
W0 = invalid_index_warning(Index, Opts),
W1 = no_index_warning(Index),
@@ -132,39 +124,41 @@ maybe_add_warning(UserFun, #cursor{index = Index, opts = Opts}, Stats, UserAcc)
UserAcc1
end.
-
create_cursor(Db, Indexes, Selector, Opts) ->
[{CursorMod, CursorModIndexes} | _] = group_indexes_by_type(Indexes),
CursorMod:create(Db, CursorModIndexes, Selector, Opts).
-
group_indexes_by_type(Indexes) ->
- IdxDict = lists:foldl(fun(I, D) ->
- dict:append(mango_idx:cursor_mod(I), I, D)
- end, dict:new(), Indexes),
+ IdxDict = lists:foldl(
+ fun(I, D) ->
+ dict:append(mango_idx:cursor_mod(I), I, D)
+ end,
+ dict:new(),
+ Indexes
+ ),
% The first cursor module that has indexes will be
% used to service this query. This is so that we
% don't suddenly switch indexes for existing client
% queries.
- lists:flatmap(fun(CMod) ->
- case dict:find(CMod, IdxDict) of
- {ok, CModIndexes} ->
- [{CMod, CModIndexes}];
- error ->
- []
- end
- end, ?CURSOR_MODULES).
-
+ lists:flatmap(
+ fun(CMod) ->
+ case dict:find(CMod, IdxDict) of
+ {ok, CModIndexes} ->
+ [{CMod, CModIndexes}];
+ error ->
+ []
+ end
+ end,
+ ?CURSOR_MODULES
+ ).
% warn if the _all_docs index was used to fulfil a query
no_index_warning(#idx{type = Type}) when Type =:= <<"special">> ->
couch_stats:increment_counter([mango, unindexed_queries]),
[<<"No matching index found, create an index to optimize query time.">>];
-
no_index_warning(_) ->
[].
-
% warn if user specified an index which doesn't exist or isn't valid
% for the selector.
% In this scenario, Mango will ignore the index hint and auto-select an index.
@@ -172,43 +166,51 @@ invalid_index_warning(Index, Opts) ->
UseIndex = lists:keyfind(use_index, 1, Opts),
invalid_index_warning_int(Index, UseIndex).
-
invalid_index_warning_int(Index, {use_index, [DesignId]}) ->
Filtered = filter_indexes([Index], DesignId),
- if Filtered /= [] -> []; true ->
- couch_stats:increment_counter([mango, query_invalid_index]),
- Reason = fmt("_design/~s was not used because it does not contain a valid index for this query.",
- [ddoc_name(DesignId)]),
- [Reason]
+ if
+ Filtered /= [] ->
+ [];
+ true ->
+ couch_stats:increment_counter([mango, query_invalid_index]),
+ Reason = fmt(
+ "_design/~s was not used because it does not contain a valid index for this query.",
+ [ddoc_name(DesignId)]
+ ),
+ [Reason]
end;
-
invalid_index_warning_int(Index, {use_index, [DesignId, ViewName]}) ->
Filtered = filter_indexes([Index], DesignId, ViewName),
- if Filtered /= [] -> []; true ->
- couch_stats:increment_counter([mango, query_invalid_index]),
- Reason = fmt("_design/~s, ~s was not used because it is not a valid index for this query.",
- [ddoc_name(DesignId), ViewName]),
- [Reason]
+ if
+ Filtered /= [] ->
+ [];
+ true ->
+ couch_stats:increment_counter([mango, query_invalid_index]),
+ Reason = fmt(
+ "_design/~s, ~s was not used because it is not a valid index for this query.",
+ [ddoc_name(DesignId), ViewName]
+ ),
+ [Reason]
end;
-
invalid_index_warning_int(_, _) ->
[].
-
% warn if a large number of documents needed to be scanned per result
% returned, implying a lot of in-memory filtering
-index_scan_warning(#execution_stats {
- totalDocsExamined = Docs,
- resultsReturned = ResultCount
- }) ->
+index_scan_warning(#execution_stats{
+ totalDocsExamined = Docs,
+ resultsReturned = ResultCount
+}) ->
Ratio = calculate_index_scan_ratio(Docs, ResultCount),
Threshold = config:get_integer("mango", "index_scan_warning_threshold", 10),
case Threshold > 0 andalso Ratio > Threshold of
true ->
couch_stats:increment_counter([mango, too_many_docs_scanned]),
- Reason = <<"The number of documents examined is high in proportion to the number of results returned. Consider adding a more specific index to improve this.">>,
+ Reason =
+ <<"The number of documents examined is high in proportion to the number of results returned. Consider adding a more specific index to improve this.">>,
[Reason];
- false -> []
+ false ->
+ []
end.
% When there is an empty array for certain operators, we don't actually
@@ -227,20 +229,15 @@ maybe_noop_range({[{Op, []}]}, IndexRanges) ->
maybe_noop_range(_, IndexRanges) ->
IndexRanges.
-
calculate_index_scan_ratio(DocsScanned, 0) ->
DocsScanned;
-
calculate_index_scan_ratio(DocsScanned, ResultCount) ->
DocsScanned / ResultCount.
-
fmt(Format, Args) ->
iolist_to_binary(io_lib:format(Format, Args)).
-
ddoc_name(<<"_design/", Name/binary>>) ->
Name;
-
ddoc_name(Name) ->
Name.
diff --git a/src/mango/src/mango_cursor_special.erl b/src/mango/src/mango_cursor_special.erl
index 33a1f8c46..19b24c535 100644
--- a/src/mango/src/mango_cursor_special.erl
+++ b/src/mango/src/mango_cursor_special.erl
@@ -22,12 +22,10 @@
handle_message/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_views/include/couch_views.hrl").
-include("mango_cursor.hrl").
-
create(Db, Indexes, Selector, Opts) ->
InitialRange = mango_idx_view:field_ranges(Selector),
CatchAll = [{<<"_id">>, {'$gt', null, '$lt', mango_json_max}}],
@@ -37,7 +35,7 @@ create(Db, Indexes, Selector, Opts) ->
FieldRanges = InitialRange ++ CatchAll,
Composited = mango_cursor_view:composite_indexes(Indexes, FieldRanges),
{Index, IndexRanges} = mango_cursor_view:choose_best_index(Db, Composited),
-
+
Limit = couch_util:get_value(limit, Opts, mango_opts:default_limit()),
Skip = couch_util:get_value(skip, Opts, 0),
Fields = couch_util:get_value(fields, Opts, all_fields),
diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl
index 4a22e15fa..713e32ea2 100644
--- a/src/mango/src/mango_cursor_view.erl
+++ b/src/mango/src/mango_cursor_view.erl
@@ -25,14 +25,12 @@
choose_best_index/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_views/include/couch_views.hrl").
-include("mango_cursor.hrl").
-include("mango_idx_view.hrl").
-include_lib("kernel/include/logger.hrl").
-
create(Db, Indexes, Selector, Opts) ->
FieldRanges = mango_idx_view:field_ranges(Selector),
Composited = composite_indexes(Indexes, FieldRanges),
@@ -57,7 +55,6 @@ create(Db, Indexes, Selector, Opts) ->
bookmark = Bookmark
}}.
-
explain(Cursor) ->
#cursor{
opts = Opts
@@ -66,46 +63,46 @@ explain(Cursor) ->
BaseArgs = base_args(Cursor),
Args = apply_opts(Opts, BaseArgs),
- [{mrargs, {[
- {include_docs, Args#mrargs.include_docs},
- {view_type, Args#mrargs.view_type},
- {reduce, Args#mrargs.reduce},
- {start_key, maybe_replace_max_json(Args#mrargs.start_key)},
- {end_key, maybe_replace_max_json(Args#mrargs.end_key)},
- {direction, Args#mrargs.direction},
- {stable, Args#mrargs.stable},
- {update, Args#mrargs.update},
- {conflicts, Args#mrargs.conflicts}
- ]}}].
-
+ [
+ {mrargs,
+ {[
+ {include_docs, Args#mrargs.include_docs},
+ {view_type, Args#mrargs.view_type},
+ {reduce, Args#mrargs.reduce},
+ {start_key, maybe_replace_max_json(Args#mrargs.start_key)},
+ {end_key, maybe_replace_max_json(Args#mrargs.end_key)},
+ {direction, Args#mrargs.direction},
+ {stable, Args#mrargs.stable},
+ {update, Args#mrargs.update},
+ {conflicts, Args#mrargs.conflicts}
+ ]}}
+ ].
% replace internal values that cannot
% be represented as a valid UTF-8 string
% with a token for JSON serialization
maybe_replace_max_json([]) ->
[];
-
maybe_replace_max_json([?MAX_JSON_OBJ | T]) ->
[<<"<MAX>">> | maybe_replace_max_json(T)];
-
maybe_replace_max_json([H | T]) ->
[H | maybe_replace_max_json(T)];
-
maybe_replace_max_json(?MAX_STR) ->
<<"<MAX>">>;
-
maybe_replace_max_json(EndKey) ->
EndKey.
-
base_args(#cursor{index = Idx} = Cursor) ->
- {StartKey, EndKey} = case Cursor#cursor.ranges of
- [empty] ->
- {null, null};
- _ ->
- {mango_idx:start_key(Idx, Cursor#cursor.ranges),
- mango_idx:end_key(Idx, Cursor#cursor.ranges)}
- end,
+ {StartKey, EndKey} =
+ case Cursor#cursor.ranges of
+ [empty] ->
+ {null, null};
+ _ ->
+ {
+ mango_idx:start_key(Idx, Cursor#cursor.ranges),
+ mango_idx:end_key(Idx, Cursor#cursor.ranges)
+ }
+ end,
#mrargs{
view_type = map,
reduce = false,
@@ -117,7 +114,6 @@ base_args(#cursor{index = Idx} = Cursor) ->
]
}.
-
execute(#cursor{db = Db, index = Idx, execution_stats = Stats} = Cursor0, UserFun, UserAcc) ->
Cursor = Cursor0#cursor{
user_fun = UserFun,
@@ -133,50 +129,57 @@ execute(#cursor{db = Db, index = Idx, execution_stats = Stats} = Cursor0, UserFu
#cursor{opts = Opts, bookmark = Bookmark} = Cursor,
Args0 = apply_opts(Opts, BaseArgs),
Args = mango_json_bookmark:update_args(Bookmark, Args0),
- Result = case mango_idx:def(Idx) of
- all_docs ->
- CB = fun ?MODULE:handle_all_docs_message/2,
- AllDocOpts = fabric2_util:all_docs_view_opts(Args),
- fabric2_db:fold_docs(Db, CB, Cursor, AllDocOpts);
- _ ->
- CB = fun ?MODULE:handle_message/2,
- % Normal view
- DDocId = mango_idx:ddoc(Idx),
- {ok, DDoc} = fabric2_db:open_doc(Db, DDocId),
- Name = mango_idx:name(Idx),
- couch_views:query(Db, DDoc, Name, CB, Cursor, Args)
- end,
+ Result =
+ case mango_idx:def(Idx) of
+ all_docs ->
+ CB = fun ?MODULE:handle_all_docs_message/2,
+ AllDocOpts = fabric2_util:all_docs_view_opts(Args),
+ fabric2_db:fold_docs(Db, CB, Cursor, AllDocOpts);
+ _ ->
+ CB = fun ?MODULE:handle_message/2,
+ % Normal view
+ DDocId = mango_idx:ddoc(Idx),
+ {ok, DDoc} = fabric2_db:open_doc(Db, DDocId),
+ Name = mango_idx:name(Idx),
+ couch_views:query(Db, DDoc, Name, CB, Cursor, Args)
+ end,
case Result of
{ok, LastCursor} ->
NewBookmark = mango_json_bookmark:create(LastCursor),
Arg = {add_key, bookmark, NewBookmark},
{_Go, FinalUserAcc} = UserFun(Arg, LastCursor#cursor.user_acc),
Stats0 = LastCursor#cursor.execution_stats,
- FinalUserAcc0 = mango_execution_stats:maybe_add_stats(Opts, UserFun, Stats0, FinalUserAcc),
- FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Cursor, Stats0, FinalUserAcc0),
+ FinalUserAcc0 = mango_execution_stats:maybe_add_stats(
+ Opts, UserFun, Stats0, FinalUserAcc
+ ),
+ FinalUserAcc1 = mango_cursor:maybe_add_warning(
+ UserFun, Cursor, Stats0, FinalUserAcc0
+ ),
{ok, FinalUserAcc1};
{error, Reason} ->
{error, Reason}
end
end.
-
% Any of these indexes may be a composite index. For each
% index find the most specific set of fields for each
% index. Ie, if an index has columns a, b, c, d, then
% check FieldRanges for a, b, c, and d and return
% the longest prefix of columns found.
composite_indexes(Indexes, FieldRanges) ->
- lists:foldl(fun(Idx, Acc) ->
- Cols = mango_idx:columns(Idx),
- Prefix = composite_prefix(Cols, FieldRanges),
- % Calcuate the difference between the FieldRanges/Selector
- % and the Prefix. We want to select the index with a prefix
- % that is as close to the FieldRanges as possible
- PrefixDifference = length(FieldRanges) - length(Prefix),
- [{Idx, Prefix, PrefixDifference} | Acc]
- end, [], Indexes).
-
+ lists:foldl(
+ fun(Idx, Acc) ->
+ Cols = mango_idx:columns(Idx),
+ Prefix = composite_prefix(Cols, FieldRanges),
+ % Calcuate the difference between the FieldRanges/Selector
+ % and the Prefix. We want to select the index with a prefix
+ % that is as close to the FieldRanges as possible
+ PrefixDifference = length(FieldRanges) - length(Prefix),
+ [{Idx, Prefix, PrefixDifference} | Acc]
+ end,
+ [],
+ Indexes
+ ).
composite_prefix([], _) ->
[];
@@ -188,7 +191,6 @@ composite_prefix([Col | Rest], Ranges) ->
[]
end.
-
% The query planner
% First choose the index with the lowest difference between its
% Prefix and the FieldRanges. If that is equal, then
@@ -224,20 +226,19 @@ choose_best_index(_DbName, IndexRanges) ->
{SelectedIndex, SelectedIndexRanges, _} = hd(lists:sort(Cmp, IndexRanges)),
{SelectedIndex, SelectedIndexRanges}.
-
handle_message({meta, _}, Cursor) ->
{ok, Cursor};
handle_message({row, Props}, Cursor) ->
case match_doc(Cursor, Props) of
{ok, Doc, {execution_stats, Stats}} ->
- Cursor1 = Cursor#cursor {
+ Cursor1 = Cursor#cursor{
execution_stats = Stats
},
Cursor2 = update_bookmark_keys(Cursor1, Props),
FinalDoc = mango_fields:extract(Doc, Cursor2#cursor.fields),
handle_doc(Cursor2, FinalDoc);
{no_match, _, {execution_stats, Stats}} ->
- Cursor1 = Cursor#cursor {
+ Cursor1 = Cursor#cursor{
execution_stats = Stats
},
{ok, Cursor1};
@@ -257,7 +258,6 @@ handle_message(complete, Cursor) ->
handle_message({error, Reason}, _Cursor) ->
{error, Reason}.
-
handle_all_docs_message({row, Props}, Cursor) ->
case is_design_doc(Props) of
true -> {ok, Cursor};
@@ -266,7 +266,6 @@ handle_all_docs_message({row, Props}, Cursor) ->
handle_all_docs_message(Message, Cursor) ->
handle_message(Message, Cursor).
-
handle_doc(#cursor{skip = S} = C, _) when S > 0 ->
{ok, C#cursor{skip = S - 1}};
handle_doc(#cursor{limit = L, execution_stats = Stats} = C, Doc) when L > 0 ->
@@ -281,7 +280,6 @@ handle_doc(#cursor{limit = L, execution_stats = Stats} = C, Doc) when L > 0 ->
handle_doc(C, _Doc) ->
{stop, C}.
-
apply_opts([], Args) ->
Args;
apply_opts([{conflicts, true} | Rest], Args) ->
@@ -332,7 +330,6 @@ apply_opts([{_, _} | Rest], Args) ->
% Ignore unknown options
apply_opts(Rest, Args).
-
match_doc(Cursor, RowProps) ->
#cursor{
execution_stats = Stats0,
@@ -347,18 +344,16 @@ match_doc(Cursor, RowProps) ->
{no_match, Doc, {execution_stats, Stats1}}
end.
-
is_design_doc(RowProps) ->
case couch_util:get_value(id, RowProps) of
<<"_design/", _/binary>> -> true;
_ -> false
end.
-
update_bookmark_keys(#cursor{limit = Limit} = Cursor, Props) when Limit > 0 ->
Id = couch_util:get_value(id, Props),
Key = couch_util:get_value(key, Props),
- Cursor#cursor {
+ Cursor#cursor{
bookmark_docid = Id,
bookmark_key = Key
};
diff --git a/src/mango/src/mango_doc.erl b/src/mango/src/mango_doc.erl
index c22b15544..f8cb4c63b 100644
--- a/src/mango/src/mango_doc.erl
+++ b/src/mango/src/mango_doc.erl
@@ -12,7 +12,6 @@
-module(mango_doc).
-
-export([
from_bson/1,
@@ -26,36 +25,35 @@
set_field/3
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-
from_bson({Props}) ->
- DocProps = case lists:keytake(<<"_id">>, 1, Props) of
- {value, {<<"_id">>, DocId0}, RestProps} ->
- DocId = case DocId0 of
- {[{<<"$id">>, Id}]} ->
- Id;
- Else ->
- Else
- end,
- [{<<"_id">>, DocId} | RestProps];
- false ->
- Props
- end,
+ DocProps =
+ case lists:keytake(<<"_id">>, 1, Props) of
+ {value, {<<"_id">>, DocId0}, RestProps} ->
+ DocId =
+ case DocId0 of
+ {[{<<"$id">>, Id}]} ->
+ Id;
+ Else ->
+ Else
+ end,
+ [{<<"_id">>, DocId} | RestProps];
+ false ->
+ Props
+ end,
Doc = couch_doc:from_json_obj({DocProps}),
case Doc#doc.id of
<<"">> ->
- Doc#doc{id=couch_uuids:new(), revs={0, []}};
+ Doc#doc{id = couch_uuids:new(), revs = {0, []}};
_ ->
Doc
end.
-
-apply_update(#doc{body={Props}}=Doc, Update) ->
+apply_update(#doc{body = {Props}} = Doc, Update) ->
NewProps = apply_update(Props, Update),
- Doc#doc{body={NewProps}};
+ Doc#doc{body = {NewProps}};
apply_update({Props}, {Update}) ->
Result = do_update({Props}, Update),
case has_operators(Result) of
@@ -66,13 +64,11 @@ apply_update({Props}, {Update}) ->
end,
Result.
-
update_as_insert({Update}) ->
NewProps = do_update_to_insert(Update, {[]}),
apply_update(NewProps, {Update}).
-
-has_operators(#doc{body=Body}) ->
+has_operators(#doc{body = Body}) ->
has_operators(Body);
has_operators({Props}) when is_list(Props) ->
has_operators_obj(Props);
@@ -85,7 +81,6 @@ has_operators(Val) when is_number(Val) ->
has_operators(Val) when is_binary(Val) ->
false.
-
has_operators_obj([]) ->
false;
has_operators_obj([{K, V} | Rest]) ->
@@ -101,7 +96,6 @@ has_operators_obj([{K, V} | Rest]) ->
end
end.
-
has_operators_arr([]) ->
false;
has_operators_arr([V | Rest]) ->
@@ -112,25 +106,24 @@ has_operators_arr([V | Rest]) ->
has_operators_arr(Rest)
end.
-
do_update(Props, []) ->
Props;
do_update(Props, [{Op, Value} | Rest]) ->
UpdateFun = update_operator_fun(Op),
- NewProps = case UpdateFun of
- undefined ->
- lists:keystore(Op, 1, Props, {Op, Value});
- Fun when is_function(Fun, 2) ->
- case Value of
- {ValueProps} ->
- Fun(Props, ValueProps);
- _ ->
- ?MANGO_ERROR({invalid_operand, Op, Value})
- end
- end,
+ NewProps =
+ case UpdateFun of
+ undefined ->
+ lists:keystore(Op, 1, Props, {Op, Value});
+ Fun when is_function(Fun, 2) ->
+ case Value of
+ {ValueProps} ->
+ Fun(Props, ValueProps);
+ _ ->
+ ?MANGO_ERROR({invalid_operand, Op, Value})
+ end
+ end,
do_update(NewProps, Rest).
-
update_operator_fun(<<"$", _/binary>> = Op) ->
OperatorFuns = [
% Object operators
@@ -160,217 +153,230 @@ update_operator_fun(<<"$", _/binary>> = Op) ->
update_operator_fun(_) ->
undefined.
-
do_update_inc(Props, []) ->
Props;
do_update_inc(Props, [{Field, Incr} | Rest]) ->
- if is_number(Incr) -> ok; true ->
- ?MANGO_ERROR({invalid_increment, Incr})
- end,
- NewProps = case get_field(Props, Field, fun is_number/1) of
- Value when is_number(Value) ->
- set_field(Props, Field, Value + Incr);
- not_found ->
- set_field(Props, Field, Incr);
- _ ->
- Props
+ if
+ is_number(Incr) -> ok;
+ true -> ?MANGO_ERROR({invalid_increment, Incr})
end,
+ NewProps =
+ case get_field(Props, Field, fun is_number/1) of
+ Value when is_number(Value) ->
+ set_field(Props, Field, Value + Incr);
+ not_found ->
+ set_field(Props, Field, Incr);
+ _ ->
+ Props
+ end,
do_update_inc(NewProps, Rest).
-
do_update_rename(Props, []) ->
Props;
do_update_rename(Props, [{OldField, NewField} | Rest]) ->
- NewProps = case rem_field(Props, OldField) of
- {RemProps, OldValue} ->
- set_field(RemProps, NewField, OldValue);
- _ ->
- Props
- end,
+ NewProps =
+ case rem_field(Props, OldField) of
+ {RemProps, OldValue} ->
+ set_field(RemProps, NewField, OldValue);
+ _ ->
+ Props
+ end,
do_update_rename(NewProps, Rest).
-
do_update_set_on_insert(Props, _) ->
% This is only called during calls to apply_update/2
% which means this isn't an insert, so drop it on
% the floor.
Props.
-
do_update_set(Props, []) ->
Props;
do_update_set(Props, [{Field, Value} | Rest]) ->
NewProps = set_field(Props, Field, Value),
do_update_set(NewProps, Rest).
-
do_update_unset(Props, []) ->
Props;
do_update_unset(Props, [{Field, _} | Rest]) ->
- NewProps = case rem_field(Props, Field) of
- {RemProps, _} ->
- RemProps;
- _ ->
- Props
- end,
+ NewProps =
+ case rem_field(Props, Field) of
+ {RemProps, _} ->
+ RemProps;
+ _ ->
+ Props
+ end,
do_update_unset(NewProps, Rest).
-
do_update_add_to_set(Props, []) ->
Props;
do_update_add_to_set(Props, [{Field, NewValue} | Rest]) ->
- ToAdd = case NewValue of
- {[{<<"$each">>, NewValues}]} when is_list(NewValues) ->
- NewValues;
- {[{<<"$each">>, NewValue}]} ->
- [NewValue];
- Else ->
- [Else]
- end,
- NewProps = case get_field(Props, Field) of
- OldValues when is_list(OldValues) ->
- FinalValues = lists:foldl(fun(V, Acc) ->
- lists:append(Acc, [V])
- end, OldValues, ToAdd),
- set_field(Props, Field, FinalValues);
- _ ->
- Props
- end,
+ ToAdd =
+ case NewValue of
+ {[{<<"$each">>, NewValues}]} when is_list(NewValues) ->
+ NewValues;
+ {[{<<"$each">>, NewValue}]} ->
+ [NewValue];
+ Else ->
+ [Else]
+ end,
+ NewProps =
+ case get_field(Props, Field) of
+ OldValues when is_list(OldValues) ->
+ FinalValues = lists:foldl(
+ fun(V, Acc) ->
+ lists:append(Acc, [V])
+ end,
+ OldValues,
+ ToAdd
+ ),
+ set_field(Props, Field, FinalValues);
+ _ ->
+ Props
+ end,
do_update_add_to_set(NewProps, Rest).
-
do_update_pop(Props, []) ->
Props;
do_update_pop(Props, [{Field, Pos} | Rest]) ->
- NewProps = case get_field(Props, Field) of
- OldValues when is_list(OldValues) ->
- NewValues = case Pos > 0 of
- true ->
- lists:sublist(OldValues, 1, length(OldValues) - 1);
- false ->
- lists:sublist(OldValues, 2, length(OldValues) - 1)
- end,
- set_field(Props, Field, NewValues);
- _ ->
- Props
- end,
+ NewProps =
+ case get_field(Props, Field) of
+ OldValues when is_list(OldValues) ->
+ NewValues =
+ case Pos > 0 of
+ true ->
+ lists:sublist(OldValues, 1, length(OldValues) - 1);
+ false ->
+ lists:sublist(OldValues, 2, length(OldValues) - 1)
+ end,
+ set_field(Props, Field, NewValues);
+ _ ->
+ Props
+ end,
do_update_pop(NewProps, Rest).
-
do_update_pull_all(Props, []) ->
Props;
do_update_pull_all(Props, [{Field, Values} | Rest]) ->
- ToRem = case is_list(Values) of
- true -> Values;
- false -> [Values]
- end,
- NewProps = case get_field(Props, Field) of
- OldValues when is_list(OldValues) ->
- NewValues = lists:foldl(fun(ValToRem, Acc) ->
- % The logic in these filter functions is a bit
- % subtle. The way to think of this is that we
- % return true for all elements we want to keep.
- FilterFun = case has_operators(ValToRem) of
- true ->
- fun(A) ->
- Sel = mango_selector:normalize(ValToRem),
- not mango_selector:match(A, Sel)
- end;
- false ->
- fun(A) -> A /= ValToRem end
- end,
- lists:filter(FilterFun, Acc)
- end, OldValues, ToRem),
- set_field(Props, Field, NewValues);
- _ ->
- Props
- end,
+ ToRem =
+ case is_list(Values) of
+ true -> Values;
+ false -> [Values]
+ end,
+ NewProps =
+ case get_field(Props, Field) of
+ OldValues when is_list(OldValues) ->
+ NewValues = lists:foldl(
+ fun(ValToRem, Acc) ->
+ % The logic in these filter functions is a bit
+ % subtle. The way to think of this is that we
+ % return true for all elements we want to keep.
+ FilterFun =
+ case has_operators(ValToRem) of
+ true ->
+ fun(A) ->
+ Sel = mango_selector:normalize(ValToRem),
+ not mango_selector:match(A, Sel)
+ end;
+ false ->
+ fun(A) -> A /= ValToRem end
+ end,
+ lists:filter(FilterFun, Acc)
+ end,
+ OldValues,
+ ToRem
+ ),
+ set_field(Props, Field, NewValues);
+ _ ->
+ Props
+ end,
do_update_add_to_set(NewProps, Rest).
-
do_update_pull(Props, []) ->
Props;
do_update_pull(Props, [{Field, Value} | Rest]) ->
- ToRem = case Value of
- {[{<<"$each">>, Values}]} when is_list(Values) ->
- Values;
- {[{<<"$each">>, Value}]} ->
- [Value];
- Else ->
- [Else]
- end,
+ ToRem =
+ case Value of
+ {[{<<"$each">>, Values}]} when is_list(Values) ->
+ Values;
+ {[{<<"$each">>, Value}]} ->
+ [Value];
+ Else ->
+ [Else]
+ end,
NewProps = do_update_pull_all(Props, [{Field, ToRem}]),
do_update_pull(NewProps, Rest).
-
do_update_push_all(_, []) ->
[];
do_update_push_all(Props, [{Field, Values} | Rest]) ->
- ToAdd = case is_list(Values) of
- true -> Values;
- false -> [Values]
- end,
- NewProps = case get_field(Props, Field) of
- OldValues when is_list(OldValues) ->
- NewValues = OldValues ++ ToAdd,
- set_field(Props, Field, NewValues);
- _ ->
- Props
- end,
+ ToAdd =
+ case is_list(Values) of
+ true -> Values;
+ false -> [Values]
+ end,
+ NewProps =
+ case get_field(Props, Field) of
+ OldValues when is_list(OldValues) ->
+ NewValues = OldValues ++ ToAdd,
+ set_field(Props, Field, NewValues);
+ _ ->
+ Props
+ end,
do_update_push_all(NewProps, Rest).
-
do_update_push(Props, []) ->
Props;
do_update_push(Props, [{Field, Value} | Rest]) ->
- ToAdd = case Value of
- {[{<<"$each">>, Values}]} when is_list(Values) ->
- Values;
- {[{<<"$each">>, Value}]} ->
- [Value];
- Else ->
- [Else]
- end,
+ ToAdd =
+ case Value of
+ {[{<<"$each">>, Values}]} when is_list(Values) ->
+ Values;
+ {[{<<"$each">>, Value}]} ->
+ [Value];
+ Else ->
+ [Else]
+ end,
NewProps = do_update_push_all(Props, [{Field, ToAdd}]),
do_update_push(NewProps, Rest).
-
-
do_update_bitwise(Props, []) ->
Props;
do_update_bitwise(Props, [{Field, Value} | Rest]) ->
- DoOp = case Value of
- {[{<<"and">>, Val}]} when is_integer(Val) ->
- fun(V) -> V band Val end;
- {[{<<"or">>, Val}]} when is_integer(Val) ->
- fun(V) -> V bor Val end;
- _ ->
- fun(V) -> V end
- end,
- NewProps = case get_field(Props, Field, fun is_number/1) of
- Value when is_number(Value) ->
- NewValue = DoOp(Value),
- set_field(Props, Field, NewValue);
- _ ->
- Props
- end,
+ DoOp =
+ case Value of
+ {[{<<"and">>, Val}]} when is_integer(Val) ->
+ fun(V) -> V band Val end;
+ {[{<<"or">>, Val}]} when is_integer(Val) ->
+ fun(V) -> V bor Val end;
+ _ ->
+ fun(V) -> V end
+ end,
+ NewProps =
+ case get_field(Props, Field, fun is_number/1) of
+ Value when is_number(Value) ->
+ NewValue = DoOp(Value),
+ set_field(Props, Field, NewValue);
+ _ ->
+ Props
+ end,
do_update_bitwise(NewProps, Rest).
-
do_update_to_insert([], Doc) ->
Doc;
do_update_to_insert([{<<"$setOnInsert">>, {FieldProps}}], Doc) ->
- lists:foldl(fun({Field, Value}, DocAcc) ->
- set_field(DocAcc, Field, Value)
- end, Doc, FieldProps);
+ lists:foldl(
+ fun({Field, Value}, DocAcc) ->
+ set_field(DocAcc, Field, Value)
+ end,
+ Doc,
+ FieldProps
+ );
do_update_to_insert([{_, _} | Rest], Doc) ->
do_update_to_insert(Rest, Doc).
-
get_field(Props, Field) ->
get_field(Props, Field, no_validation).
-
get_field(Props, Field, Validator) when is_binary(Field) ->
{ok, Path} = mango_util:parse_field(Field),
get_field(Props, Path, Validator);
@@ -402,13 +408,13 @@ get_field(Values, [Name | Rest], Validator) when is_list(Values) ->
false ->
bad_path
end
- catch error:badarg ->
- bad_path
+ catch
+ error:badarg ->
+ bad_path
end;
-get_field(_, [_|_], _) ->
+get_field(_, [_ | _], _) ->
bad_path.
-
rem_field(Props, Field) when is_binary(Field) ->
{ok, Path} = mango_util:parse_field(Field),
rem_field(Props, Path);
@@ -443,8 +449,9 @@ rem_field(Values, [Name]) when is_list(Values) ->
false ->
bad_path
end
- catch error:badarg ->
- bad_path
+ catch
+ error:badarg ->
+ bad_path
end;
rem_field(Values, [Name | Rest]) when is_list(Values) ->
% Name might be an integer index into an array
@@ -463,13 +470,13 @@ rem_field(Values, [Name | Rest]) when is_list(Values) ->
false ->
bad_path
end
- catch error:badarg ->
- bad_path
+ catch
+ error:badarg ->
+ bad_path
end;
-rem_field(_, [_|_]) ->
+rem_field(_, [_ | _]) ->
bad_path.
-
set_field(Props, Field, Value) when is_binary(Field) ->
{ok, Path} = mango_util:parse_field(Field),
set_field(Props, Path, Value);
@@ -495,8 +502,9 @@ set_field(Values, [Name], Value) when is_list(Values) ->
false ->
Values
end
- catch error:badarg ->
- Values
+ catch
+ error:badarg ->
+ Values
end;
set_field(Values, [Name | Rest], Value) when is_list(Values) ->
% Name might be an integer index into an array
@@ -511,27 +519,25 @@ set_field(Values, [Name | Rest], Value) when is_list(Values) ->
false ->
Values
end
- catch error:badarg ->
- Values
+ catch
+ error:badarg ->
+ Values
end;
-set_field(Value, [_|_], _) ->
+set_field(Value, [_ | _], _) ->
Value.
-
make_nested([], Value) ->
Value;
make_nested([Name | Rest], Value) ->
{[{Name, make_nested(Rest, Value)}]}.
-
rem_elem(1, [Value | Rest]) ->
{Rest, Value};
rem_elem(I, [Item | Rest]) when I > 1 ->
- {Tail, Value} = rem_elem(I+1, Rest),
+ {Tail, Value} = rem_elem(I + 1, Rest),
{[Item | Tail], Value}.
-
set_elem(1, [_ | Rest], Value) ->
[Value | Rest];
set_elem(I, [Item | Rest], Value) when I > 1 ->
- [Item | set_elem(I-1, Rest, Value)].
+ [Item | set_elem(I - 1, Rest, Value)].
diff --git a/src/mango/src/mango_epi.erl b/src/mango/src/mango_epi.erl
index d593d6371..da9521249 100644
--- a/src/mango/src/mango_epi.erl
+++ b/src/mango/src/mango_epi.erl
@@ -29,7 +29,7 @@ app() ->
providers() ->
[
- {chttpd_handlers, mango_httpd_handlers}
+ {chttpd_handlers, mango_httpd_handlers}
].
services() ->
diff --git a/src/mango/src/mango_error.erl b/src/mango/src/mango_error.erl
index 9ac8f6368..bcce93cac 100644
--- a/src/mango/src/mango_error.erl
+++ b/src/mango/src/mango_error.erl
@@ -12,21 +12,20 @@
-module(mango_error).
-
-include_lib("couch/include/couch_db.hrl").
-
-export([
info/2
]).
-
info(mango_idx, {no_usable_index, missing_sort_index}) ->
{
400,
<<"no_usable_index">>,
- <<"No index exists for this sort, "
- "try indexing by the sort fields.">>
+ <<
+ "No index exists for this sort, "
+ "try indexing by the sort fields."
+ >>
};
info(mango_idx, {no_usable_index, missing_sort_index_global}) ->
{
@@ -40,7 +39,6 @@ info(mango_json_bookmark, {invalid_bookmark, BadBookmark}) ->
<<"invalid_bookmark">>,
fmt("Invalid bookmark value: ~s", [?JSON_ENCODE(BadBookmark)])
};
-
info(mango_cursor_text, {invalid_bookmark, BadBookmark}) ->
{
400,
@@ -53,8 +51,9 @@ info(mango_cursor_text, multiple_text_indexes) ->
<<"multiple_text_indexes">>,
<<"You must specify an index with the `use_index` parameter.">>
};
-info(mango_cursor_text, {text_search_error, {error, {bad_request, Msg}}})
- when is_binary(Msg) ->
+info(mango_cursor_text, {text_search_error, {error, {bad_request, Msg}}}) when
+ is_binary(Msg)
+->
{
400,
<<"text_search_error">>,
@@ -66,7 +65,6 @@ info(mango_cursor_text, {text_search_error, {error, Error}}) ->
<<"text_search_error">>,
fmt("~p", [Error])
};
-
info(mango_fields, {invalid_fields_json, BadFields}) ->
{
400,
@@ -79,7 +77,6 @@ info(mango_fields, {invalid_field_json, BadField}) ->
<<"invalid_field">>,
fmt("Invalid JSON for field spec: ~w", [BadField])
};
-
info(mango_httpd, error_saving_ddoc) ->
{
500,
@@ -102,9 +99,8 @@ info(mango_httpd, invalid_list_index_params) ->
{
500,
<<"invalid_list_index_params">>,
- <<"Index parameter ranges: limit > 1, skip > 0" >>
+ <<"Index parameter ranges: limit > 1, skip > 0">>
};
-
info(mango_idx, {invalid_index_type, BadType}) ->
{
400,
@@ -135,7 +131,6 @@ info(mango_idx, {index_service_unavailable, IndexName}) ->
<<"required index service unavailable">>,
fmt("~s", [IndexName])
};
-
info(mango_idx_view, {invalid_index_json, BadIdx}) ->
{
400,
@@ -146,9 +141,12 @@ info(mango_idx_text, {invalid_index_fields_definition, Def}) ->
{
400,
<<"invalid_index_fields_definition">>,
- fmt("Text Index field definitions must be of the form
- {\"name\": \"non-empty fieldname\", \"type\":
- \"boolean,number, or string\"}. Def: ~p", [Def])
+ fmt(
+ "Text Index field definitions must be of the form\n"
+ " {\"name\": \"non-empty fieldname\", \"type\":\n"
+ " \"boolean,number, or string\"}. Def: ~p",
+ [Def]
+ )
};
info(mango_idx_view, {index_not_found, BadIdx}) ->
{
@@ -156,7 +154,6 @@ info(mango_idx_view, {index_not_found, BadIdx}) ->
<<"invalid_index">>,
fmt("JSON index ~s not found in this design doc.", [BadIdx])
};
-
info(mango_idx_text, {invalid_index_text, BadIdx}) ->
{
400,
@@ -175,13 +172,14 @@ info(mango_idx_text, index_all_disabled) ->
<<"index_all_disabled">>,
<<"New text indexes are forbidden to index all fields.">>
};
-
info(mango_opts, {invalid_bulk_docs, Val}) ->
{
400,
<<"invalid_bulk_docs">>,
- fmt("Bulk Delete requires an array of non-null docids. Docids: ~w",
- [Val])
+ fmt(
+ "Bulk Delete requires an array of non-null docids. Docids: ~w",
+ [Val]
+ )
};
info(mango_opts, {invalid_ejson, Val}) ->
{
@@ -255,15 +253,15 @@ info(mango_opts, {invalid_index_name, BadName}) ->
<<"invalid_index_name">>,
fmt("Invalid index name: ~w", [BadName])
};
-
info(mango_opts, {multiple_text_operator, {invalid_selector, BadSel}}) ->
{
400,
<<"multiple_text_selector">>,
- fmt("Selector cannot contain more than one $text operator: ~w",
- [BadSel])
+ fmt(
+ "Selector cannot contain more than one $text operator: ~w",
+ [BadSel]
+ )
};
-
info(mango_selector, {invalid_selector, missing_field_name}) ->
{
400,
@@ -294,7 +292,6 @@ info(mango_selector, {bad_field, BadSel}) ->
<<"bad_field">>,
fmt("Invalid field normalization on selector: ~w", [BadSel])
};
-
info(mango_selector_text, {invalid_operator, Op}) ->
{
400,
@@ -303,14 +300,14 @@ info(mango_selector_text, {invalid_operator, Op}) ->
};
info(mango_selector_text, {text_sort_error, Field}) ->
S = binary_to_list(Field),
- Msg = "Unspecified or ambiguous sort type. Try appending :number or"
+ Msg =
+ "Unspecified or ambiguous sort type. Try appending :number or"
" :string to the sort field. ~s",
{
400,
<<"text_sort_error">>,
fmt(Msg, [S])
};
-
info(mango_sort, {invalid_sort_json, BadSort}) ->
{
400,
@@ -335,7 +332,6 @@ info(mango_sort, {unsupported, mixed_sort}) ->
<<"unsupported_mixed_sort">>,
<<"Sorts currently only support a single direction for all fields.">>
};
-
info(mango_util, {error_loading_doc, DocId}) ->
{
500,
@@ -354,7 +350,6 @@ info(mango_util, {invalid_ddoc_lang, Lang}) ->
<<"invalid_ddoc_lang">>,
fmt("Existing design doc has an invalid language: ~w", [Lang])
};
-
info(Module, Reason) ->
{
500,
@@ -362,6 +357,5 @@ info(Module, Reason) ->
fmt("Unknown Error: ~s :: ~w", [Module, Reason])
}.
-
fmt(Format, Args) ->
iolist_to_binary(io_lib:format(Format, Args)).
diff --git a/src/mango/src/mango_eval.erl b/src/mango/src/mango_eval.erl
index 7fd81df77..fc7725c62 100644
--- a/src/mango/src/mango_eval.erl
+++ b/src/mango/src/mango_eval.erl
@@ -10,11 +10,9 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(mango_eval).
-behavior(couch_eval).
-
-export([
acquire_map_context/1,
release_map_context/1,
@@ -24,69 +22,69 @@
try_compile/4
]).
-
-export([
index_doc/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango_idx.hrl").
-
acquire_map_context(Opts) ->
#{
db_name := DbName,
ddoc_id := DDocId,
map_funs := MapFuns
} = Opts,
- Indexes = lists:map(fun (Def) ->
- #idx{
- type = <<"json">>,
- dbname = DbName,
- ddoc = DDocId,
- def = Def
- }
- end, MapFuns),
+ Indexes = lists:map(
+ fun(Def) ->
+ #idx{
+ type = <<"json">>,
+ dbname = DbName,
+ ddoc = DDocId,
+ def = Def
+ }
+ end,
+ MapFuns
+ ),
{ok, Indexes}.
-
release_map_context(_) ->
ok.
-
map_docs(Indexes, Docs) ->
- {ok, lists:map(fun(Doc) ->
- Json = couch_doc:to_json_obj(Doc, []),
- Results = index_doc(Indexes, Json),
- {Doc#doc.id, Results}
- end, Docs)}.
-
+ {ok,
+ lists:map(
+ fun(Doc) ->
+ Json = couch_doc:to_json_obj(Doc, []),
+ Results = index_doc(Indexes, Json),
+ {Doc#doc.id, Results}
+ end,
+ Docs
+ )}.
acquire_context() ->
{ok, no_ctx}.
-
release_context(_) ->
ok.
-
try_compile(_Ctx, _FunType, _IndexName, IndexInfo) ->
mango_idx_view:validate_index_def(IndexInfo).
-
index_doc(Indexes, Doc) ->
- lists:map(fun(Idx) ->
- {IdxDef} = mango_idx:def(Idx),
- Results = get_index_entries(IdxDef, Doc),
- case lists:member(not_found, Results) of
- true ->
- [];
- false ->
- [{Results, null}]
- end
- end, Indexes).
-
+ lists:map(
+ fun(Idx) ->
+ {IdxDef} = mango_idx:def(Idx),
+ Results = get_index_entries(IdxDef, Doc),
+ case lists:member(not_found, Results) of
+ true ->
+ [];
+ false ->
+ [{Results, null}]
+ end
+ end,
+ Indexes
+ ).
get_index_entries(IdxDef, Doc) ->
{Fields} = couch_util:get_value(<<"fields">>, IdxDef),
@@ -98,16 +96,17 @@ get_index_entries(IdxDef, Doc) ->
get_index_values(Fields, Doc)
end.
-
get_index_values(Fields, Doc) ->
- lists:map(fun({Field, _Dir}) ->
- case mango_doc:get_field(Doc, Field) of
- not_found -> not_found;
- bad_path -> not_found;
- Value -> Value
- end
- end, Fields).
-
+ lists:map(
+ fun({Field, _Dir}) ->
+ case mango_doc:get_field(Doc, Field) of
+ not_found -> not_found;
+ bad_path -> not_found;
+ Value -> Value
+ end
+ end,
+ Fields
+ ).
get_index_partial_filter_selector(IdxDef) ->
case couch_util:get_value(<<"partial_filter_selector">>, IdxDef, {[]}) of
@@ -119,12 +118,12 @@ get_index_partial_filter_selector(IdxDef) ->
Else
end.
-
should_index(Selector, Doc) ->
NormSelector = mango_selector:normalize(Selector),
Matches = mango_selector:match(NormSelector, Doc),
- IsDesign = case mango_doc:get_field(Doc, <<"_id">>) of
- <<"_design/", _/binary>> -> true;
- _ -> false
- end,
+ IsDesign =
+ case mango_doc:get_field(Doc, <<"_id">>) of
+ <<"_design/", _/binary>> -> true;
+ _ -> false
+ end,
Matches and not IsDesign.
diff --git a/src/mango/src/mango_execution_stats.erl b/src/mango/src/mango_execution_stats.erl
index fe9d27b90..79d74fcc9 100644
--- a/src/mango/src/mango_execution_stats.erl
+++ b/src/mango/src/mango_execution_stats.erl
@@ -12,7 +12,6 @@
-module(mango_execution_stats).
-
-export([
to_json/1,
incr_keys_examined/1,
@@ -24,10 +23,8 @@
maybe_add_stats/4
]).
-
-include("mango_cursor.hrl").
-
to_json(Stats) ->
{[
{total_keys_examined, Stats#execution_stats.totalKeysExamined},
@@ -36,44 +33,37 @@ to_json(Stats) ->
{execution_time_ms, Stats#execution_stats.executionTimeMs}
]}.
-
incr_keys_examined(Stats) ->
- Stats#execution_stats {
+ Stats#execution_stats{
totalKeysExamined = Stats#execution_stats.totalKeysExamined + 1
}.
-
incr_docs_examined(Stats) ->
incr_docs_examined(Stats, 1).
-
incr_docs_examined(Stats, N) ->
- Stats#execution_stats {
+ Stats#execution_stats{
totalDocsExamined = Stats#execution_stats.totalDocsExamined + N
}.
-
incr_results_returned(Stats) ->
couch_stats:increment_counter([mango, results_returned]),
- Stats#execution_stats {
+ Stats#execution_stats{
resultsReturned = Stats#execution_stats.resultsReturned + 1
}.
-
log_start(Stats) ->
- Stats#execution_stats {
+ Stats#execution_stats{
executionStartTime = os:timestamp()
}.
-
log_end(Stats) ->
End = os:timestamp(),
Diff = timer:now_diff(End, Stats#execution_stats.executionStartTime) / 1000,
- Stats#execution_stats {
+ Stats#execution_stats{
executionTimeMs = Diff
}.
-
maybe_add_stats(Opts, UserFun, Stats0, UserAcc) ->
Stats1 = log_end(Stats0),
couch_stats:update_histogram([mango, query_time], Stats1#execution_stats.executionTimeMs),
diff --git a/src/mango/src/mango_fields.erl b/src/mango/src/mango_fields.erl
index 273256025..1745cf9dd 100644
--- a/src/mango/src/mango_fields.erl
+++ b/src/mango/src/mango_fields.erl
@@ -17,10 +17,8 @@
extract/2
]).
-
-include("mango.hrl").
-
new([]) ->
{ok, all_fields};
new(Fields) when is_list(Fields) ->
@@ -28,24 +26,26 @@ new(Fields) when is_list(Fields) ->
new(Else) ->
?MANGO_ERROR({invalid_fields_json, Else}).
-
extract(Doc, undefined) ->
Doc;
extract(Doc, all_fields) ->
Doc;
extract(Doc, Fields) ->
- lists:foldl(fun(F, NewDoc) ->
- {ok, Path} = mango_util:parse_field(F),
- case mango_doc:get_field(Doc, Path) of
- not_found ->
- NewDoc;
- bad_path ->
- NewDoc;
- Value ->
- mango_doc:set_field(NewDoc, Path, Value)
- end
- end, {[]}, Fields).
-
+ lists:foldl(
+ fun(F, NewDoc) ->
+ {ok, Path} = mango_util:parse_field(F),
+ case mango_doc:get_field(Doc, Path) of
+ not_found ->
+ NewDoc;
+ bad_path ->
+ NewDoc;
+ Value ->
+ mango_doc:set_field(NewDoc, Path, Value)
+ end
+ end,
+ {[]},
+ Fields
+ ).
field(Val) when is_binary(Val) ->
Val;
diff --git a/src/mango/src/mango_httpd.erl b/src/mango/src/mango_httpd.erl
index 0d035dd99..6b1fe1b3d 100644
--- a/src/mango/src/mango_httpd.erl
+++ b/src/mango/src/mango_httpd.erl
@@ -12,12 +12,10 @@
-module(mango_httpd).
-
-export([
handle_req/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-include("mango_idx.hrl").
@@ -38,50 +36,51 @@ handle_req(#httpd{} = Req, Db) ->
catch
throw:{mango_error, Module, Reason}:Stack ->
case mango_error:info(Module, Reason) of
- {500, ErrorStr, ReasonStr} ->
- chttpd:send_error(Req, {ErrorStr, ReasonStr, Stack});
- {Code, ErrorStr, ReasonStr} ->
- chttpd:send_error(Req, Code, ErrorStr, ReasonStr)
+ {500, ErrorStr, ReasonStr} ->
+ chttpd:send_error(Req, {ErrorStr, ReasonStr, Stack});
+ {Code, ErrorStr, ReasonStr} ->
+ chttpd:send_error(Req, Code, ErrorStr, ReasonStr)
end
end.
-
-handle_req_int(#httpd{path_parts=[_, <<"_index">> | _]} = Req, Db) ->
+handle_req_int(#httpd{path_parts = [_, <<"_index">> | _]} = Req, Db) ->
handle_index_req(Req, Db);
-handle_req_int(#httpd{path_parts=[_, <<"_explain">> | _]} = Req, Db) ->
+handle_req_int(#httpd{path_parts = [_, <<"_explain">> | _]} = Req, Db) ->
handle_explain_req(Req, Db);
-handle_req_int(#httpd{path_parts=[_, <<"_find">> | _]} = Req, Db) ->
+handle_req_int(#httpd{path_parts = [_, <<"_find">> | _]} = Req, Db) ->
handle_find_req(Req, Db);
handle_req_int(_, _) ->
throw({not_found, missing}).
-
-handle_index_req(#httpd{method='GET', path_parts=[_, _]}=Req, Db) ->
- Params = lists:flatmap(fun({K, V}) -> parse_index_param(K, V) end,
- chttpd:qs(Req)),
+handle_index_req(#httpd{method = 'GET', path_parts = [_, _]} = Req, Db) ->
+ Params = lists:flatmap(
+ fun({K, V}) -> parse_index_param(K, V) end,
+ chttpd:qs(Req)
+ ),
Idxs = fabric2_fdb:transactional(Db, fun(TxDb) ->
lists:sort(mango_idx:list(TxDb))
end),
JsonIdxs0 = lists:map(fun mango_idx:to_json/1, Idxs),
TotalRows = length(JsonIdxs0),
- Limit = case couch_util:get_value(limit, Params, TotalRows) of
- Limit0 when Limit0 < 1 ->
- ?MANGO_ERROR(invalid_list_index_params);
- Limit0 ->
- Limit0
- end,
- Skip = case couch_util:get_value(skip, Params, 0) of
- Skip0 when Skip0 < 0 ->
- ?MANGO_ERROR(invalid_list_index_params);
- Skip0 when Skip0 > TotalRows ->
- TotalRows;
- Skip0 ->
- Skip0
- end,
- JsonIdxs = lists:sublist(JsonIdxs0, Skip+1, Limit),
- chttpd:send_json(Req, {[{total_rows, TotalRows}, {indexes, JsonIdxs}]});
-
-handle_index_req(#httpd{method='POST', path_parts=[_, _]}=Req, Db) ->
+ Limit =
+ case couch_util:get_value(limit, Params, TotalRows) of
+ Limit0 when Limit0 < 1 ->
+ ?MANGO_ERROR(invalid_list_index_params);
+ Limit0 ->
+ Limit0
+ end,
+ Skip =
+ case couch_util:get_value(skip, Params, 0) of
+ Skip0 when Skip0 < 0 ->
+ ?MANGO_ERROR(invalid_list_index_params);
+ Skip0 when Skip0 > TotalRows ->
+ TotalRows;
+ Skip0 ->
+ Skip0
+ end,
+ JsonIdxs = lists:sublist(JsonIdxs0, Skip + 1, Limit),
+ chttpd:send_json(Req, {[{total_rows, TotalRows}, {indexes, JsonIdxs}]});
+handle_index_req(#httpd{method = 'POST', path_parts = [_, _]} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
{ok, Opts} = mango_opts:validate_idx_create(chttpd:json_body_obj(Req)),
{ok, Idx0} = mango_idx:new(Db, Opts),
@@ -108,46 +107,72 @@ handle_index_req(#httpd{method='POST', path_parts=[_, _]}=Req, Db) ->
end
end
end),
- chttpd:send_json(Req, {[{result, Status}, {id, Id}, {name, Name}]});
-
-handle_index_req(#httpd{path_parts=[_, _]}=Req, _Db) ->
+ chttpd:send_json(Req, {[{result, Status}, {id, Id}, {name, Name}]});
+handle_index_req(#httpd{path_parts = [_, _]} = Req, _Db) ->
chttpd:send_method_not_allowed(Req, "GET,POST");
-
%% Essentially we just iterate through the list of ddoc ids passed in and
%% delete one by one. If an error occurs, all previous documents will be
%% deleted, but an error will be thrown for the current ddoc id.
-handle_index_req(#httpd{method='POST', path_parts=[_, <<"_index">>,
- <<"_bulk_delete">>]}=Req, Db) ->
+handle_index_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [
+ _,
+ <<"_index">>,
+ <<"_bulk_delete">>
+ ]
+ } = Req,
+ Db
+) ->
chttpd:validate_ctype(Req, "application/json"),
{ok, Opts} = mango_opts:validate_bulk_delete(chttpd:json_body_obj(Req)),
- {Success, Fail} = fabric2_fdb:transactional(Db, fun (TxDb) ->
+ {Success, Fail} = fabric2_fdb:transactional(Db, fun(TxDb) ->
Idxs = mango_idx:list(TxDb),
DDocs = get_bulk_delete_ddocs(Opts),
- lists:foldl(fun(DDocId0, {Success0, Fail0}) ->
- DDocId = convert_to_design_id(DDocId0),
- Filt = fun(Idx) -> mango_idx:ddoc(Idx) == DDocId end,
- Id = {<<"id">>, DDocId},
- case mango_idx:delete(Filt, TxDb, Idxs, Opts) of
- {ok, true} ->
- {[{[Id, {<<"ok">>, true}]} | Success0], Fail0};
- {error, Error} ->
- {Success0, [{[Id, {<<"error">>, Error}]} | Fail0]}
- end
- end, {[], []}, DDocs)
+ lists:foldl(
+ fun(DDocId0, {Success0, Fail0}) ->
+ DDocId = convert_to_design_id(DDocId0),
+ Filt = fun(Idx) -> mango_idx:ddoc(Idx) == DDocId end,
+ Id = {<<"id">>, DDocId},
+ case mango_idx:delete(Filt, TxDb, Idxs, Opts) of
+ {ok, true} ->
+ {[{[Id, {<<"ok">>, true}]} | Success0], Fail0};
+ {error, Error} ->
+ {Success0, [{[Id, {<<"error">>, Error}]} | Fail0]}
+ end
+ end,
+ {[], []},
+ DDocs
+ )
end),
chttpd:send_json(Req, {[{<<"success">>, Success}, {<<"fail">>, Fail}]});
-
-handle_index_req(#httpd{path_parts=[_, <<"_index">>,
- <<"_bulk_delete">>]}=Req, _Db) ->
+handle_index_req(
+ #httpd{
+ path_parts = [
+ _,
+ <<"_index">>,
+ <<"_bulk_delete">>
+ ]
+ } = Req,
+ _Db
+) ->
chttpd:send_method_not_allowed(Req, "POST");
-
-handle_index_req(#httpd{method='DELETE',
- path_parts=[A, B, <<"_design">>, DDocId0, Type, Name]}=Req, Db) ->
+handle_index_req(
+ #httpd{
+ method = 'DELETE',
+ path_parts = [A, B, <<"_design">>, DDocId0, Type, Name]
+ } = Req,
+ Db
+) ->
PathParts = [A, B, <<"_design/", DDocId0/binary>>, Type, Name],
- handle_index_req(Req#httpd{path_parts=PathParts}, Db);
-
-handle_index_req(#httpd{method='DELETE',
- path_parts=[_, _, DDocId0, Type, Name]}=Req, Db) ->
+ handle_index_req(Req#httpd{path_parts = PathParts}, Db);
+handle_index_req(
+ #httpd{
+ method = 'DELETE',
+ path_parts = [_, _, DDocId0, Type, Name]
+ } = Req,
+ Db
+) ->
Result = fabric2_fdb:transactional(Db, fun(TxDb) ->
Idxs = mango_idx:list(TxDb),
DDocId = convert_to_design_id(DDocId0),
@@ -167,12 +192,10 @@ handle_index_req(#httpd{method='DELETE',
{error, Error} ->
?MANGO_ERROR({error_saving_ddoc, Error})
end;
-
-handle_index_req(#httpd{path_parts=[_, _, _DDocId0, _Type, _Name]}=Req, _Db) ->
+handle_index_req(#httpd{path_parts = [_, _, _DDocId0, _Type, _Name]} = Req, _Db) ->
chttpd:send_method_not_allowed(Req, "DELETE").
-
-handle_explain_req(#httpd{method='POST'}=Req, Db) ->
+handle_explain_req(#httpd{method = 'POST'} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
Body = chttpd:json_body_obj(Req),
{ok, Opts0} = mango_opts:validate_find(Body),
@@ -181,12 +204,10 @@ handle_explain_req(#httpd{method='POST'}=Req, Db) ->
mango_crud:explain(TxDb, Sel, Opts)
end),
chttpd:send_json(Req, Resp);
-
handle_explain_req(Req, _Db) ->
chttpd:send_method_not_allowed(Req, "POST").
-
-handle_find_req(#httpd{method='POST'}=Req0, Db) ->
+handle_find_req(#httpd{method = 'POST'} = Req0, Db) ->
{ok, Req1} = mango_plugin:before_find(Req0),
chttpd:validate_ctype(Req1, "application/json"),
Body = chttpd:json_body_obj(Req1),
@@ -199,12 +220,9 @@ handle_find_req(#httpd{method='POST'}=Req0, Db) ->
{error, Error} ->
chttpd:send_error(Req1, Error)
end;
-
-
handle_find_req(Req, _Db) ->
chttpd:send_method_not_allowed(Req, "POST").
-
get_bulk_delete_ddocs(Opts) ->
case lists:keyfind(docids, 1, Opts) of
{docids, DDocs} when is_list(DDocs) ->
@@ -213,32 +231,32 @@ get_bulk_delete_ddocs(Opts) ->
[]
end.
-
convert_to_design_id(DDocId) ->
case DDocId of
<<"_design/", _/binary>> -> DDocId;
_ -> <<"_design/", DDocId/binary>>
end.
-
start_find_resp(Req) ->
chttpd:start_delayed_json_response(Req, 200, [], "{\"docs\":[").
-
end_find_resp(Req, Acc0) ->
- #vacc{resp=Resp00, buffer=Buf, kvs=KVs0, threshold=Max} = Acc0,
+ #vacc{resp = Resp00, buffer = Buf, kvs = KVs0, threshold = Max} = Acc0,
{ok, Resp0} = chttpd:close_delayed_json_object(Resp00, Buf, "\r\n]", Max),
{ok, KVs1} = mango_plugin:after_find(Req, Resp0, KVs0),
- FinalAcc = lists:foldl(fun({K, V}, Acc) ->
- JK = ?JSON_ENCODE(K),
- JV = ?JSON_ENCODE(V),
- [JV, ": ", JK, ",\r\n" | Acc]
- end, [], KVs1),
+ FinalAcc = lists:foldl(
+ fun({K, V}, Acc) ->
+ JK = ?JSON_ENCODE(K),
+ JV = ?JSON_ENCODE(V),
+ [JV, ": ", JK, ",\r\n" | Acc]
+ end,
+ [],
+ KVs1
+ ),
Chunk = lists:reverse(FinalAcc, ["}\r\n"]),
{ok, Resp1} = chttpd:send_delayed_chunk(Resp0, Chunk),
chttpd:end_delayed_json_response(Resp1).
-
run_find(Resp, Db, Sel, Opts) ->
Acc0 = #vacc{
resp = Resp,
@@ -248,18 +266,18 @@ run_find(Resp, Db, Sel, Opts) ->
},
mango_crud:find(Db, Sel, fun handle_doc/2, Acc0, Opts).
-
handle_doc({add_key, Key, Value}, Acc0) ->
- #vacc{kvs=KVs} = Acc0,
+ #vacc{kvs = KVs} = Acc0,
NewKVs = lists:keystore(Key, 1, KVs, {Key, Value}),
{ok, Acc0#vacc{kvs = NewKVs}};
handle_doc({row, Doc}, Acc0) ->
- #vacc{prepend=Prepend} = Acc0,
+ #vacc{prepend = Prepend} = Acc0,
Chunk = [Prepend, ?JSON_ENCODE(Doc)],
maybe_flush_response(Acc0, Chunk, iolist_size(Chunk)).
-maybe_flush_response(#vacc{bufsize=Size, threshold=Max} = Acc, Data, Len)
- when Size > 0 andalso (Size + Len) > Max ->
+maybe_flush_response(#vacc{bufsize = Size, threshold = Max} = Acc, Data, Len) when
+ Size > 0 andalso (Size + Len) > Max
+->
#vacc{buffer = Buffer, resp = Resp} = Acc,
{ok, R1} = chttpd:send_delayed_chunk(Resp, Buffer),
{ok, Acc#vacc{prepend = ",\r\n", buffer = Data, bufsize = Len, resp = R1}};
@@ -272,18 +290,17 @@ maybe_flush_response(Acc0, Data, Len) ->
},
{ok, Acc}.
-
parse_index_param("limit", Value) ->
[{limit, parse_val(Value)}];
parse_index_param("skip", Value) ->
[{skip, parse_val(Value)}];
parse_index_param(_Key, _Value) ->
- [].
+ [].
parse_val(Value) ->
case (catch list_to_integer(Value)) of
- IntVal when is_integer(IntVal) ->
- IntVal;
- _ ->
- ?MANGO_ERROR(invalid_list_index_params)
+ IntVal when is_integer(IntVal) ->
+ IntVal;
+ _ ->
+ ?MANGO_ERROR(invalid_list_index_params)
end.
diff --git a/src/mango/src/mango_httpd_handlers.erl b/src/mango/src/mango_httpd_handlers.erl
index c1ddd6c4e..26a19bc0f 100644
--- a/src/mango/src/mango_httpd_handlers.erl
+++ b/src/mango/src/mango_httpd_handlers.erl
@@ -16,22 +16,19 @@
url_handler(_) -> no_match.
-db_handler(<<"_index">>) -> fun mango_httpd:handle_req/2;
-db_handler(<<"_explain">>) -> fun mango_httpd:handle_req/2;
-db_handler(<<"_find">>) -> fun mango_httpd:handle_req/2;
+db_handler(<<"_index">>) -> fun mango_httpd:handle_req/2;
+db_handler(<<"_explain">>) -> fun mango_httpd:handle_req/2;
+db_handler(<<"_find">>) -> fun mango_httpd:handle_req/2;
db_handler(_) -> no_match.
design_handler(_) -> no_match.
handler_info('GET', [Db, <<"_index">>], _) ->
{'db.mango.index.read', #{'db.name' => Db}};
-
handler_info('POST', [Db, <<"_index">>], _) ->
{'db.mango.index.create', #{'db.name' => Db}};
-
handler_info('POST', [Db, <<"_index">>, <<"_bulk_delete">>], _) ->
{'db.mango.index.delete', #{'db.name' => Db, multi => true}};
-
handler_info('DELETE', [Db, <<"_index">>, <<"_design">>, Name, Type, Idx], _) ->
{'db.mango.index.delete', #{
'db.name' => Db,
@@ -39,15 +36,11 @@ handler_info('DELETE', [Db, <<"_index">>, <<"_design">>, Name, Type, Idx], _) ->
'index.type' => Type,
'index.name' => Idx
}};
-
handler_info(M, [Db, <<"_index">>, <<"_design/", N/binary>>, T, I], R) ->
handler_info(M, [Db, <<"_index">>, <<"_design">>, N, T, I], R);
-
handler_info('POST', [Db, <<"_explain">>], _) ->
{'db.mango.explain.execute', #{'db.name' => Db}};
-
handler_info('POST', [Db, <<"_find">>], _) ->
{'db.mango.find.execute', #{'db.name' => Db}};
-
handler_info(_, _, _) ->
- no_match. \ No newline at end of file
+ no_match.
diff --git a/src/mango/src/mango_idx.erl b/src/mango/src/mango_idx.erl
index e27f327edf..bb8247472 100644
--- a/src/mango/src/mango_idx.erl
+++ b/src/mango/src/mango_idx.erl
@@ -16,7 +16,6 @@
-module(mango_idx).
-
-export([
list/1,
@@ -45,46 +44,49 @@
get_partial_filter_selector/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-include("mango_idx.hrl").
-include_lib("couch_views/include/couch_views.hrl").
-
list(Db) ->
DDocs = couch_views_ddoc:get_mango_list(Db),
DbName = fabric2_db:name(Db),
- Indexes = lists:foldl(fun(DDoc, Acc) ->
- {Props} = couch_doc:to_json_obj(DDoc, []),
-
- case proplists:get_value(<<"language">>, Props) == <<"query">> of
- true ->
- {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
-
- IsInteractive = couch_views_ddoc:is_interactive(DDoc),
- BuildState = couch_views_fdb:get_build_status(Db, Mrst),
-
- Idxs = lists:map(fun(Idx) ->
- Idx#idx{
- build_status = BuildState,
- interactive = IsInteractive
- }
- end, from_ddoc(Db, DDoc)),
- Acc ++ Idxs;
- false ->
- Acc
- end
-
- end, [], DDocs),
+ Indexes = lists:foldl(
+ fun(DDoc, Acc) ->
+ {Props} = couch_doc:to_json_obj(DDoc, []),
+
+ case proplists:get_value(<<"language">>, Props) == <<"query">> of
+ true ->
+ {ok, Mrst} = couch_views_util:ddoc_to_mrst(DbName, DDoc),
+
+ IsInteractive = couch_views_ddoc:is_interactive(DDoc),
+ BuildState = couch_views_fdb:get_build_status(Db, Mrst),
+
+ Idxs = lists:map(
+ fun(Idx) ->
+ Idx#idx{
+ build_status = BuildState,
+ interactive = IsInteractive
+ }
+ end,
+ from_ddoc(Db, DDoc)
+ ),
+ Acc ++ Idxs;
+ false ->
+ Acc
+ end
+ end,
+ [],
+ DDocs
+ ),
Indexes ++ special(Db).
-
get_usable_indexes(Db, Selector, Opts) ->
ExistingIndexes = mango_idx:list(Db),
GlobalIndexes = mango_cursor:remove_indexes_with_partial_filter_selector(
- ExistingIndexes
- ),
+ ExistingIndexes
+ ),
BuiltIndexes = remove_unbuilt_indexes(GlobalIndexes),
UserSpecifiedIndex = mango_cursor:maybe_filter_indexes_by_ddoc(ExistingIndexes, Opts),
UsableIndexes0 = lists:usort(BuiltIndexes ++ UserSpecifiedIndex),
@@ -99,11 +101,9 @@ get_usable_indexes(Db, Selector, Opts) ->
UsableIndexes
end.
-
mango_sort_error(_Db, _Opts) ->
?MANGO_ERROR({no_usable_index, missing_sort_index}).
-
get_sort_fields(Opts) ->
case lists:keyfind(sort, 1, Opts) of
{sort, Sort} ->
@@ -112,7 +112,6 @@ get_sort_fields(Opts) ->
[]
end.
-
new(Db, Opts) ->
Def = get_idx_def(Opts),
Type = get_idx_type(Opts),
@@ -127,12 +126,10 @@ new(Db, Opts) ->
opts = filter_opts(Opts)
}}.
-
validate_new(Idx, Db) ->
Mod = idx_mod(Idx),
Mod:validate_new(Idx, Db).
-
add(DDoc, Idx) ->
Mod = idx_mod(Idx),
{ok, NewDDoc} = Mod:add(DDoc, Idx),
@@ -140,7 +137,6 @@ add(DDoc, Idx) ->
Body = ?JSON_DECODE(?JSON_ENCODE(NewDDoc#doc.body)),
{ok, NewDDoc#doc{body = Body}}.
-
remove(DDoc, Idx) ->
Mod = idx_mod(Idx),
{ok, NewDDoc} = Mod:remove(DDoc, Idx),
@@ -148,18 +144,18 @@ remove(DDoc, Idx) ->
Body = ?JSON_DECODE(?JSON_ENCODE(NewDDoc#doc.body)),
{ok, NewDDoc#doc{body = Body}}.
-
delete(Filt, Db, Indexes, DelOpts) ->
case lists:filter(Filt, Indexes) of
[Idx] ->
{ok, DDoc} = mango_util:load_ddoc(Db, mango_idx:ddoc(Idx)),
{ok, NewDDoc} = mango_idx:remove(DDoc, Idx),
- FinalDDoc = case NewDDoc#doc.body of
- {[{<<"language">>, <<"query">>}]} ->
- NewDDoc#doc{deleted = true, body = {[]}};
- _ ->
- NewDDoc
- end,
+ FinalDDoc =
+ case NewDDoc#doc.body of
+ {[{<<"language">>, <<"query">>}]} ->
+ NewDDoc#doc{deleted = true, body = {[]}};
+ _ ->
+ NewDDoc
+ end,
case mango_crud:insert(Db, FinalDDoc, DelOpts) of
{ok, _} ->
{ok, true};
@@ -170,7 +166,6 @@ delete(Filt, Db, Indexes, DelOpts) ->
{error, not_found}
end.
-
from_ddoc(Db, #doc{id = DDocId} = DDoc) ->
{Props} = couch_doc:to_json_obj(DDoc, []),
DbName = db_to_name(Db),
@@ -178,23 +173,25 @@ from_ddoc(Db, #doc{id = DDocId} = DDoc) ->
case proplists:get_value(<<"language">>, Props) of
<<"query">> -> ok;
- _ ->
- ?MANGO_ERROR(invalid_query_ddoc_language)
- end,
- IdxMods = case is_text_service_available() of
- true ->
- [mango_idx_view, mango_idx_text];
- false ->
- [mango_idx_view]
+ _ -> ?MANGO_ERROR(invalid_query_ddoc_language)
end,
+ IdxMods =
+ case is_text_service_available() of
+ true ->
+ [mango_idx_view, mango_idx_text];
+ false ->
+ [mango_idx_view]
+ end,
Idxs = lists:flatmap(fun(Mod) -> Mod:from_ddoc({Props}) end, IdxMods),
- lists:map(fun(Idx) ->
- Idx#idx{
- dbname = DbName,
- ddoc = DDocId
- }
- end, Idxs).
-
+ lists:map(
+ fun(Idx) ->
+ Idx#idx{
+ dbname = DbName,
+ ddoc = DDocId
+ }
+ end,
+ Idxs
+ ).
special(Db) ->
AllDocs = #idx{
@@ -208,64 +205,51 @@ special(Db) ->
% Add one for _update_seq
[AllDocs].
-
-dbname(#idx{dbname=DbName}) ->
+dbname(#idx{dbname = DbName}) ->
DbName.
-
-ddoc(#idx{ddoc=DDoc}) ->
+ddoc(#idx{ddoc = DDoc}) ->
DDoc.
-
-name(#idx{name=Name}) ->
+name(#idx{name = Name}) ->
Name.
-
-type(#idx{type=Type}) ->
+type(#idx{type = Type}) ->
Type.
-
-def(#idx{def=Def}) ->
+def(#idx{def = Def}) ->
Def.
-
-opts(#idx{opts=Opts}) ->
+opts(#idx{opts = Opts}) ->
Opts.
-
-to_json(#idx{}=Idx) ->
+to_json(#idx{} = Idx) ->
Mod = idx_mod(Idx),
Mod:to_json(Idx).
-
-columns(#idx{}=Idx) ->
+columns(#idx{} = Idx) ->
Mod = idx_mod(Idx),
Mod:columns(Idx).
-
-is_usable(#idx{}=Idx, Selector, SortFields) ->
+is_usable(#idx{} = Idx, Selector, SortFields) ->
Mod = idx_mod(Idx),
Mod:is_usable(Idx, Selector, SortFields).
-
-start_key(#idx{}=Idx, Ranges) ->
+start_key(#idx{} = Idx, Ranges) ->
Mod = idx_mod(Idx),
Mod:start_key(Ranges).
-
-end_key(#idx{}=Idx, Ranges) ->
+end_key(#idx{} = Idx, Ranges) ->
Mod = idx_mod(Idx),
Mod:end_key(Ranges).
-
cursor_mod(#idx{type = <<"json">>}) ->
mango_cursor_view;
-cursor_mod(#idx{def = all_docs, type= <<"special">>}) ->
+cursor_mod(#idx{def = all_docs, type = <<"special">>}) ->
mango_cursor_special;
cursor_mod(#idx{type = <<"text">>}) ->
?MANGO_ERROR({index_service_unavailable, <<"text">>}).
-
idx_mod(#idx{type = <<"json">>}) ->
mango_idx_view;
idx_mod(#idx{type = <<"special">>}) ->
@@ -273,7 +257,6 @@ idx_mod(#idx{type = <<"special">>}) ->
idx_mod(#idx{type = <<"text">>}) ->
?MANGO_ERROR({index_service_unavailable, <<"text">>}).
-
db_to_name(Name) when is_binary(Name) ->
Name;
db_to_name(Name) when is_list(Name) ->
@@ -281,7 +264,6 @@ db_to_name(Name) when is_list(Name) ->
db_to_name(Db) ->
fabric2_db:name(Db).
-
get_idx_def(Opts) ->
case proplists:get_value(def, Opts) of
undefined ->
@@ -290,27 +272,27 @@ get_idx_def(Opts) ->
Def
end.
-
get_idx_type(Opts) ->
case proplists:get_value(type, Opts) of
- <<"json">> -> <<"json">>;
- <<"text">> -> case is_text_service_available() of
- true ->
- <<"text">>;
- false ->
- ?MANGO_ERROR({index_service_unavailable, <<"text">>})
+ <<"json">> ->
+ <<"json">>;
+ <<"text">> ->
+ case is_text_service_available() of
+ true ->
+ <<"text">>;
+ false ->
+ ?MANGO_ERROR({index_service_unavailable, <<"text">>})
end;
%<<"geo">> -> <<"geo">>;
- undefined -> <<"json">>;
+ undefined ->
+ <<"json">>;
BadType ->
?MANGO_ERROR({invalid_index_type, BadType})
end.
-
is_text_service_available() ->
false.
-
get_idx_ddoc(Idx, Opts) ->
case proplists:get_value(ddoc, Opts) of
<<"_design/", _Rest/binary>> = Name ->
@@ -322,7 +304,6 @@ get_idx_ddoc(Idx, Opts) ->
<<"_design/", Bin/binary>>
end.
-
get_idx_name(Idx, Opts) ->
case proplists:get_value(name, Opts) of
Name when is_binary(Name) ->
@@ -331,14 +312,12 @@ get_idx_name(Idx, Opts) ->
gen_name(Idx, Opts)
end.
-
gen_name(Idx, Opts0) ->
Opts = lists:usort(Opts0),
TermBin = term_to_binary({Idx, Opts}),
Sha = crypto:hash(sha, TermBin),
mango_util:enc_hex(Sha).
-
filter_opts([]) ->
[];
filter_opts([{user_ctx, _} | Rest]) ->
@@ -352,7 +331,6 @@ filter_opts([{type, _} | Rest]) ->
filter_opts([Opt | Rest]) ->
[Opt | filter_opts(Rest)].
-
get_partial_filter_selector(#idx{def = Def}) when Def =:= all_docs; Def =:= undefined ->
undefined;
get_partial_filter_selector(#idx{def = {Def}}) ->
@@ -362,7 +340,6 @@ get_partial_filter_selector(#idx{def = {Def}}) ->
Selector -> Selector
end.
-
% Partial filter selectors is supported in text indexes via the selector field
% This adds backwards support for existing indexes that might have a selector in it
get_legacy_selector(Def) ->
@@ -375,27 +352,33 @@ get_legacy_selector(Def) ->
% remove any interactive indexes that are not built. If an index is not
% interactive than we do not remove it as it will be built when queried
remove_unbuilt_indexes(Indexes) ->
- lists:filter(fun(Idx) ->
- case Idx#idx.interactive of
- true -> Idx#idx.build_status == ?INDEX_READY;
- _ -> true
- end
- end, Indexes).
-
+ lists:filter(
+ fun(Idx) ->
+ case Idx#idx.interactive of
+ true -> Idx#idx.build_status == ?INDEX_READY;
+ _ -> true
+ end
+ end,
+ Indexes
+ ).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
index(SelectorName, Selector) ->
{
- idx,<<"mango_test_46418cd02081470d93290dc12306ebcb">>,
- <<"_design/57e860dee471f40a2c74ea5b72997b81dda36a24">>,
- <<"Selected">>,<<"json">>,
- {[{<<"fields">>,{[{<<"location">>,<<"asc">>}]}},
- {SelectorName,{Selector}}]},
- [{<<"def">>,{[{<<"fields">>,[<<"location">>]}]}}],
- <<"ready">>,
- false
+ idx,
+ <<"mango_test_46418cd02081470d93290dc12306ebcb">>,
+ <<"_design/57e860dee471f40a2c74ea5b72997b81dda36a24">>,
+ <<"Selected">>,
+ <<"json">>,
+ {[
+ {<<"fields">>, {[{<<"location">>, <<"asc">>}]}},
+ {SelectorName, {Selector}}
+ ]},
+ [{<<"def">>, {[{<<"fields">>, [<<"location">>]}]}}],
+ <<"ready">>,
+ false
}.
get_partial_filter_all_docs_test() ->
@@ -415,12 +398,12 @@ get_partial_filter_selector_missing_test() ->
?assertEqual(undefined, get_partial_filter_selector(Idx)).
get_partial_filter_selector_with_selector_test() ->
- Selector = [{<<"location">>,{[{<<"$gt">>,<<"FRA">>}]}}],
+ Selector = [{<<"location">>, {[{<<"$gt">>, <<"FRA">>}]}}],
Idx = index(<<"partial_filter_selector">>, Selector),
?assertEqual({Selector}, get_partial_filter_selector(Idx)).
get_partial_filter_selector_with_legacy_selector_test() ->
- Selector = [{<<"location">>,{[{<<"$gt">>,<<"FRA">>}]}}],
+ Selector = [{<<"location">>, {[{<<"$gt">>, <<"FRA">>}]}}],
Idx = index(<<"selector">>, Selector),
?assertEqual({Selector}, get_partial_filter_selector(Idx)).
@@ -428,7 +411,6 @@ get_partial_filter_selector_with_legacy_default_selector_test() ->
Idx = index(<<"selector">>, []),
?assertEqual(undefined, get_partial_filter_selector(Idx)).
-
get_idx_ddoc_name_only_test() ->
Opts = [{ddoc, <<"foo">>}],
?assertEqual(<<"_design/foo">>, get_idx_ddoc({}, Opts)).
diff --git a/src/mango/src/mango_idx_special.erl b/src/mango/src/mango_idx_special.erl
index 3548372b6..4f7cbf285 100644
--- a/src/mango/src/mango_idx_special.erl
+++ b/src/mango/src/mango_idx_special.erl
@@ -12,7 +12,6 @@
-module(mango_idx_special).
-
-export([
validate/1,
add/2,
@@ -25,53 +24,47 @@
end_key/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango_idx.hrl").
-include_lib("couch_views/include/couch_views.hrl").
-
validate(_) ->
erlang:exit(invalid_call).
-
add(_, _) ->
erlang:exit(invalid_call).
-
remove(_, _) ->
erlang:exit(invalid_call).
-
from_ddoc(_) ->
erlang:exit(invalid_call).
-
-to_json(#idx{def=all_docs}) ->
+to_json(#idx{def = all_docs}) ->
{[
{ddoc, null},
{name, <<"_all_docs">>},
{type, <<"special">>},
- {def, {[
- {<<"fields">>, [{[
- {<<"_id">>, <<"asc">>}
- ]}]}
- ]}},
+ {def,
+ {[
+ {<<"fields">>, [
+ {[
+ {<<"_id">>, <<"asc">>}
+ ]}
+ ]}
+ ]}},
{build_status, ?INDEX_READY}
]}.
-
-columns(#idx{def=all_docs}) ->
+columns(#idx{def = all_docs}) ->
[<<"_id">>].
-
-is_usable(#idx{def=all_docs}, _Selector, []) ->
+is_usable(#idx{def = all_docs}, _Selector, []) ->
true;
-is_usable(#idx{def=all_docs} = Idx, Selector, SortFields) ->
+is_usable(#idx{def = all_docs} = Idx, Selector, SortFields) ->
Fields = mango_idx_view:indexable_fields(Selector),
lists:member(<<"_id">>, Fields) and can_use_sort(Idx, SortFields, Selector).
-
start_key([{'$gt', Key, _, _}]) ->
case mango_json:special(Key) of
true ->
@@ -86,7 +79,6 @@ start_key([{'$eq', Key, '$eq', Key}]) ->
false = mango_json:special(Key),
Key.
-
end_key([{_, _, '$lt', Key}]) ->
case mango_json:special(Key) of
true ->
@@ -101,7 +93,6 @@ end_key([{'$eq', Key, '$eq', Key}]) ->
false = mango_json:special(Key),
Key.
-
can_use_sort(_Idx, [], _Selector) ->
true;
can_use_sort(Idx, SortFields, _Selector) ->
diff --git a/src/mango/src/mango_idx_view.erl b/src/mango/src/mango_idx_view.erl
index 35b741a49..1ddb94410 100644
--- a/src/mango/src/mango_idx_view.erl
+++ b/src/mango/src/mango_idx_view.erl
@@ -12,7 +12,6 @@
-module(mango_idx_view).
-
-export([
validate_new/2,
validate_index_def/1,
@@ -30,87 +29,88 @@
field_ranges/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-include("mango_idx.hrl").
-include("mango_idx_view.hrl").
-include_lib("kernel/include/logger.hrl").
-
-validate_new(#idx{}=Idx, _Db) ->
+validate_new(#idx{} = Idx, _Db) ->
{ok, Def} = do_validate(Idx#idx.def),
- {ok, Idx#idx{def=Def}}.
-
+ {ok, Idx#idx{def = Def}}.
validate_index_def(Def) ->
def_to_json(Def).
-
-add(#doc{body={Props0}}=DDoc, Idx) ->
- Views1 = case proplists:get_value(<<"views">>, Props0) of
- {Views0} -> Views0;
- _ -> []
- end,
+add(#doc{body = {Props0}} = DDoc, Idx) ->
+ Views1 =
+ case proplists:get_value(<<"views">>, Props0) of
+ {Views0} -> Views0;
+ _ -> []
+ end,
NewView = make_view(Idx),
Views2 = lists:keystore(element(1, NewView), 1, Views1, NewView),
Props1 = lists:keystore(<<"views">>, 1, Props0, {<<"views">>, {Views2}}),
{Opts0} = proplists:get_value(<<"options">>, Props1, {[]}),
- Opts1 = case lists:keymember(<<"interactive">>, 1, Opts0) of
- true -> Opts0;
- false -> Opts0 ++ [{<<"interactive">>, true}]
- end,
+ Opts1 =
+ case lists:keymember(<<"interactive">>, 1, Opts0) of
+ true -> Opts0;
+ false -> Opts0 ++ [{<<"interactive">>, true}]
+ end,
Props2 = lists:keystore(<<"options">>, 1, Props1, {<<"options">>, {Opts1}}),
Props3 = [{<<"autoupdate">>, false}],
- {ok, DDoc#doc{body={Props2 ++ Props3}}}.
-
-
-remove(#doc{body={Props0}}=DDoc, Idx) ->
- Views1 = case proplists:get_value(<<"views">>, Props0) of
- {Views0} ->
- Views0;
- _ ->
- ?MANGO_ERROR({index_not_found, Idx#idx.name})
- end,
+ {ok, DDoc#doc{body = {Props2 ++ Props3}}}.
+
+remove(#doc{body = {Props0}} = DDoc, Idx) ->
+ Views1 =
+ case proplists:get_value(<<"views">>, Props0) of
+ {Views0} ->
+ Views0;
+ _ ->
+ ?MANGO_ERROR({index_not_found, Idx#idx.name})
+ end,
Views2 = lists:keydelete(Idx#idx.name, 1, Views1),
- if Views2 /= Views1 -> ok; true ->
- ?MANGO_ERROR({index_not_found, Idx#idx.name})
- end,
- Props3 = case Views2 of
- [] ->
- Props1 = lists:keydelete(<<"views">>, 1, Props0),
- Props2 = lists:keydelete(<<"options">>, 1, Props1),
- lists:keydelete(<<"autoupdate">>, 1, Props2);
- _ ->
- lists:keystore(<<"views">>, 1, Props0, {<<"views">>, {Views2}})
+ if
+ Views2 /= Views1 -> ok;
+ true -> ?MANGO_ERROR({index_not_found, Idx#idx.name})
end,
- {ok, DDoc#doc{body={Props3}}}.
-
+ Props3 =
+ case Views2 of
+ [] ->
+ Props1 = lists:keydelete(<<"views">>, 1, Props0),
+ Props2 = lists:keydelete(<<"options">>, 1, Props1),
+ lists:keydelete(<<"autoupdate">>, 1, Props2);
+ _ ->
+ lists:keystore(<<"views">>, 1, Props0, {<<"views">>, {Views2}})
+ end,
+ {ok, DDoc#doc{body = {Props3}}}.
from_ddoc({Props}) ->
case lists:keyfind(<<"views">>, 1, Props) of
{<<"views">>, {Views}} when is_list(Views) ->
- lists:flatmap(fun({Name, {VProps}}) ->
- case validate_ddoc(VProps) of
- invalid_view ->
- [];
- {Def, Opts} ->
- I = #idx{
- type = <<"json">>,
- name = Name,
- def = Def,
- opts = Opts
- },
- [I]
- end
- end, Views);
+ lists:flatmap(
+ fun({Name, {VProps}}) ->
+ case validate_ddoc(VProps) of
+ invalid_view ->
+ [];
+ {Def, Opts} ->
+ I = #idx{
+ type = <<"json">>,
+ name = Name,
+ def = Def,
+ opts = Opts
+ },
+ [I]
+ end
+ end,
+ Views
+ );
_ ->
[]
end.
-
to_json(Idx) ->
{[
{ddoc, Idx#idx.ddoc},
@@ -120,13 +120,11 @@ to_json(Idx) ->
{build_status, Idx#idx.build_status}
]}.
-
columns(Idx) ->
{Props} = Idx#idx.def,
{<<"fields">>, {Fields}} = lists:keyfind(<<"fields">>, 1, Props),
[Key || {Key, _} <- Fields].
-
is_usable(Idx, Selector, SortFields) ->
% This index is usable if all of the columns are
% restricted by the selector such that they are required to exist
@@ -141,31 +139,35 @@ is_usable(Idx, Selector, SortFields) ->
% we don't need to check the selector for these either
RequiredFields2 = ordsets:subtract(
RequiredFields1,
- [<<"_id">>, <<"_rev">>]),
-
- mango_selector:has_required_fields(Selector, RequiredFields2)
- andalso not is_text_search(Selector)
- andalso can_use_sort(RequiredFields, SortFields, Selector).
+ [<<"_id">>, <<"_rev">>]
+ ),
+ mango_selector:has_required_fields(Selector, RequiredFields2) andalso
+ not is_text_search(Selector) andalso
+ can_use_sort(RequiredFields, SortFields, Selector).
is_text_search({[]}) ->
false;
is_text_search({[{<<"$default">>, _}]}) ->
true;
is_text_search({[{_Field, Cond}]}) when is_list(Cond) ->
- lists:foldl(fun(C, Exists) ->
- Exists orelse is_text_search(C)
- end, false, Cond);
+ lists:foldl(
+ fun(C, Exists) ->
+ Exists orelse is_text_search(C)
+ end,
+ false,
+ Cond
+ );
is_text_search({[{_Field, Cond}]}) when is_tuple(Cond) ->
is_text_search(Cond);
is_text_search({[{_Field, _Cond}]}) ->
false;
%% we reached values, which should always be false
-is_text_search(Val)
- when is_number(Val); is_boolean(Val); is_binary(Val)->
+is_text_search(Val) when
+ is_number(Val); is_boolean(Val); is_binary(Val)
+->
false.
-
start_key([]) ->
[];
start_key([{'$gt', Key, _, _} | Rest]) ->
@@ -182,7 +184,6 @@ start_key([{'$eq', Key, '$eq', Key} | Rest]) ->
false = mango_json:special(Key),
[Key | start_key(Rest)].
-
end_key([]) ->
[?MAX_JSON_OBJ];
end_key([{_, _, '$lt', Key} | Rest]) ->
@@ -199,14 +200,12 @@ end_key([{'$eq', Key, '$eq', Key} | Rest]) ->
false = mango_json:special(Key),
[Key | end_key(Rest)].
-
do_validate({Props}) ->
{ok, Opts} = mango_opts:validate(Props, opts()),
{ok, {Opts}};
do_validate(Else) ->
?MANGO_ERROR({invalid_index_json, Else}).
-
def_to_json({Props}) ->
def_to_json(Props);
def_to_json([]) ->
@@ -222,7 +221,6 @@ def_to_json([{<<"partial_filter_selector">>, {[]}} | Rest]) ->
def_to_json([{Key, Value} | Rest]) ->
[{Key, Value} | def_to_json(Rest)].
-
opts() ->
[
{<<"fields">>, [
@@ -237,16 +235,15 @@ opts() ->
]}
].
-
make_view(Idx) ->
- View = {[
- {<<"map">>, Idx#idx.def},
- {<<"reduce">>, <<"_count">>},
- {<<"options">>, {Idx#idx.opts}}
- ]},
+ View =
+ {[
+ {<<"map">>, Idx#idx.def},
+ {<<"reduce">>, <<"_count">>},
+ {<<"options">>, {Idx#idx.opts}}
+ ]},
{Idx#idx.name, View}.
-
validate_ddoc(VProps) ->
try
Def = proplists:get_value(<<"map">>, VProps),
@@ -254,19 +251,21 @@ validate_ddoc(VProps) ->
{Opts0} = proplists:get_value(<<"options">>, VProps),
Opts = lists:keydelete(<<"sort">>, 1, Opts0),
{Def, Opts}
- catch Error:Reason ->
- ?LOG_ERROR(#{
- what => invalid_index_definition,
- tag => Error,
- details => Reason,
- index => VProps
- }),
- couch_log:error("Invalid Index Def ~p. Error: ~p, Reason: ~p",
- [VProps, Error, Reason]),
- invalid_view
+ catch
+ Error:Reason ->
+ ?LOG_ERROR(#{
+ what => invalid_index_definition,
+ tag => Error,
+ details => Reason,
+ index => VProps
+ }),
+ couch_log:error(
+ "Invalid Index Def ~p. Error: ~p, Reason: ~p",
+ [VProps, Error, Reason]
+ ),
+ invalid_view
end.
-
% This function returns a list of indexes that
% can be used to restrict this query. This works by
% searching the selector looking for field names that
@@ -286,11 +285,9 @@ validate_ddoc(VProps) ->
% We can see through '$and' trivially
indexable_fields({[{<<"$and">>, Args}]}) ->
lists:usort(lists:flatten([indexable_fields(A) || A <- Args]));
-
% So far we can't see through any other operator
indexable_fields({[{<<"$", _/binary>>, _}]}) ->
[];
-
% If we have a field with a terminator that is locatable
% using an index then the field is a possible index
indexable_fields({[{Field, Cond}]}) ->
@@ -300,12 +297,10 @@ indexable_fields({[{Field, Cond}]}) ->
false ->
[]
end;
-
% An empty selector
indexable_fields({[]}) ->
[].
-
% Check if a condition is indexable. The logical
% comparisons are mostly straight forward. We
% currently don't understand '$in' which is
@@ -322,24 +317,20 @@ indexable({[{<<"$gt">>, _}]}) ->
true;
indexable({[{<<"$gte">>, _}]}) ->
true;
-
% All other operators are currently not indexable.
% This is also a subtle assertion that we don't
% call indexable/1 on a field name.
indexable({[{<<"$", _/binary>>, _}]}) ->
false.
-
% For each field, return {Field, Range}
field_ranges(Selector) ->
Fields = indexable_fields(Selector),
field_ranges(Selector, Fields).
-
field_ranges(Selector, Fields) ->
field_ranges(Selector, Fields, []).
-
field_ranges(_Selector, [], Acc) ->
lists:reverse(Acc);
field_ranges(Selector, [Field | Rest], Acc) ->
@@ -350,7 +341,6 @@ field_ranges(Selector, [Field | Rest], Acc) ->
field_ranges(Selector, Rest, [{Field, Range} | Acc])
end.
-
% Find the complete range for a given index in this
% selector. This works by AND'ing logical comparisons
% together so that we can define the start and end
@@ -361,32 +351,31 @@ field_ranges(Selector, [Field | Rest], Acc) ->
range(Selector, Index) ->
range(Selector, Index, '$gt', mango_json:min(), '$lt', mango_json:max()).
-
% Adjust Low and High based on values found for the
% givend Index in Selector.
range({[{<<"$and">>, Args}]}, Index, LCmp, Low, HCmp, High) ->
- lists:foldl(fun
- (Arg, {LC, L, HC, H}) ->
- range(Arg, Index, LC, L, HC, H);
- (_Arg, empty) ->
- empty
- end, {LCmp, Low, HCmp, High}, Args);
-
+ lists:foldl(
+ fun
+ (Arg, {LC, L, HC, H}) ->
+ range(Arg, Index, LC, L, HC, H);
+ (_Arg, empty) ->
+ empty
+ end,
+ {LCmp, Low, HCmp, High},
+ Args
+ );
% We can currently only traverse '$and' operators
range({[{<<"$", _/binary>>}]}, _Index, LCmp, Low, HCmp, High) ->
{LCmp, Low, HCmp, High};
-
% If the field name matches the index see if we can narrow
% the acceptable range.
range({[{Index, Cond}]}, Index, LCmp, Low, HCmp, High) ->
range(Cond, LCmp, Low, HCmp, High);
-
% Else we have a field unrelated to this index so just
% return the current values.
range(_, _, LCmp, Low, HCmp, High) ->
{LCmp, Low, HCmp, High}.
-
% The comments below are a bit cryptic at first but they show
% where the Arg cand land in the current range.
%
@@ -443,7 +432,6 @@ range({[{<<"$lt">>, Arg}]}, LCmp, Low, HCmp, High) ->
max ->
{LCmp, Low, HCmp, High}
end;
-
range({[{<<"$lte">>, Arg}]}, LCmp, Low, HCmp, High) ->
case range_pos(Low, Arg, High) of
min ->
@@ -459,7 +447,6 @@ range({[{<<"$lte">>, Arg}]}, LCmp, Low, HCmp, High) ->
max ->
{LCmp, Low, HCmp, High}
end;
-
range({[{<<"$eq">>, Arg}]}, LCmp, Low, HCmp, High) ->
case range_pos(Low, Arg, High) of
min ->
@@ -477,7 +464,6 @@ range({[{<<"$eq">>, Arg}]}, LCmp, Low, HCmp, High) ->
max ->
empty
end;
-
range({[{<<"$gte">>, Arg}]}, LCmp, Low, HCmp, High) ->
case range_pos(Low, Arg, High) of
min ->
@@ -493,7 +479,6 @@ range({[{<<"$gte">>, Arg}]}, LCmp, Low, HCmp, High) ->
max ->
empty
end;
-
range({[{<<"$gt">>, Arg}]}, LCmp, Low, HCmp, High) ->
case range_pos(Low, Arg, High) of
min ->
@@ -507,14 +492,12 @@ range({[{<<"$gt">>, Arg}]}, LCmp, Low, HCmp, High) ->
max ->
empty
end;
-
% There's some other un-indexable restriction on the index
% that will be applied as a post-filter. Ignore it and
% carry on our merry way.
range({[{<<"$", _/binary>>, _}]}, LCmp, Low, HCmp, High) ->
{LCmp, Low, HCmp, High}.
-
% Returns the value min | low | mid | high | max depending
% on how Arg compares to Low and High.
range_pos(Low, Arg, High) ->
@@ -532,7 +515,6 @@ range_pos(Low, Arg, High) ->
end
end.
-
% Can_use_sort works as follows:
%
% * no sort fields then we can use this
diff --git a/src/mango/src/mango_json.erl b/src/mango/src/mango_json.erl
index 9584c2d7e..ca18d8898 100644
--- a/src/mango/src/mango_json.erl
+++ b/src/mango/src/mango_json.erl
@@ -12,7 +12,6 @@
-module(mango_json).
-
-export([
min/0,
max/0,
@@ -23,19 +22,15 @@
to_binary/1
]).
-
-define(MIN_VAL, mango_json_min).
-define(MAX_VAL, mango_json_max).
-
min() ->
?MIN_VAL.
-
max() ->
?MAX_VAL.
-
cmp(?MIN_VAL, ?MIN_VAL) ->
0;
cmp(?MIN_VAL, _) ->
@@ -51,7 +46,6 @@ cmp(_, ?MAX_VAL) ->
cmp(A, B) ->
couch_ejson_compare:less(A, B).
-
cmp_raw(?MIN_VAL, ?MIN_VAL) ->
0;
cmp_raw(?MIN_VAL, _) ->
@@ -77,7 +71,6 @@ cmp_raw(A, B) ->
end
end.
-
type(null) ->
<<"null">>;
type(Bool) when is_boolean(Bool) ->
@@ -91,7 +84,6 @@ type({Props}) when is_list(Props) ->
type(Vals) when is_list(Vals) ->
<<"array">>.
-
special(?MIN_VAL) ->
true;
special(?MAX_VAL) ->
@@ -99,7 +91,6 @@ special(?MAX_VAL) ->
special(_) ->
false.
-
to_binary({Props}) ->
Pred = fun({Key, Value}) ->
{to_binary(Key), to_binary(Value)}
@@ -118,4 +109,4 @@ to_binary(Data) when is_atom(Data) ->
to_binary(Data) when is_number(Data) ->
Data;
to_binary(Data) when is_binary(Data) ->
- Data. \ No newline at end of file
+ Data.
diff --git a/src/mango/src/mango_json_bookmark.erl b/src/mango/src/mango_json_bookmark.erl
index b60ecdb18..668a128d4 100644
--- a/src/mango/src/mango_json_bookmark.erl
+++ b/src/mango/src/mango_json_bookmark.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(mango_json_bookmark).
-export([
@@ -18,15 +17,14 @@
create/1
]).
-
-include_lib("couch_views/include/couch_views.hrl").
-include("mango_cursor.hrl").
-include("mango.hrl").
-update_args(EncodedBookmark, #mrargs{skip = Skip} = Args) ->
+update_args(EncodedBookmark, #mrargs{skip = Skip} = Args) ->
Bookmark = unpack(EncodedBookmark),
case is_list(Bookmark) of
- true ->
+ true ->
{startkey, Startkey} = lists:keyfind(startkey, 1, Bookmark),
{startkey_docid, StartkeyDocId} = lists:keyfind(startkey_docid, 1, Bookmark),
Args#mrargs{
@@ -37,35 +35,36 @@ update_args(EncodedBookmark, #mrargs{skip = Skip} = Args) ->
false ->
Args
end.
-
-create(#cursor{bookmark_docid = BookmarkDocId, bookmark_key = BookmarkKey}) when BookmarkKey =/= undefined ->
+create(#cursor{bookmark_docid = BookmarkDocId, bookmark_key = BookmarkKey}) when
+ BookmarkKey =/= undefined
+->
QueryArgs = [
{startkey_docid, BookmarkDocId},
{startkey, BookmarkKey}
],
- Bin = term_to_binary(QueryArgs, [compressed, {minor_version,1}]),
+ Bin = term_to_binary(QueryArgs, [compressed, {minor_version, 1}]),
couch_util:encodeBase64Url(Bin);
create(#cursor{bookmark = Bookmark}) ->
Bookmark.
-
unpack(nil) ->
nil;
unpack(Packed) ->
try
Bookmark = binary_to_term(couch_util:decodeBase64Url(Packed), [safe]),
verify(Bookmark)
- catch _:_ ->
- ?MANGO_ERROR({invalid_bookmark, Packed})
+ catch
+ _:_ ->
+ ?MANGO_ERROR({invalid_bookmark, Packed})
end.
verify(Bookmark) when is_list(Bookmark) ->
- case lists:keymember(startkey, 1, Bookmark) andalso lists:keymember(startkey_docid, 1, Bookmark) of
+ case
+ lists:keymember(startkey, 1, Bookmark) andalso lists:keymember(startkey_docid, 1, Bookmark)
+ of
true -> Bookmark;
_ -> throw(invalid_bookmark)
end;
verify(_Bookmark) ->
throw(invalid_bookmark).
-
- \ No newline at end of file
diff --git a/src/mango/src/mango_opts.erl b/src/mango/src/mango_opts.erl
index 7bae9c90d..a27dc648e 100644
--- a/src/mango/src/mango_opts.erl
+++ b/src/mango/src/mango_opts.erl
@@ -38,10 +38,8 @@
default_limit/0
]).
-
-include("mango.hrl").
-
validate_idx_create({Props}) ->
Opts = [
{<<"index">>, [
@@ -74,7 +72,6 @@ validate_idx_create({Props}) ->
],
validate(Props, Opts).
-
validate_find({Props}) ->
Opts = [
{<<"selector">>, [
@@ -156,7 +153,6 @@ validate_find({Props}) ->
],
validate(Props, Opts).
-
validate_bulk_delete({Props}) ->
Opts = [
{<<"docids">>, [
@@ -172,7 +168,6 @@ validate_bulk_delete({Props}) ->
],
validate(Props, Opts).
-
validate(Props, Opts) ->
case mango_util:assert_ejson({Props}) of
true ->
@@ -189,13 +184,11 @@ validate(Props, Opts) ->
end,
{ok, Acc}.
-
is_string(Val) when is_binary(Val) ->
{ok, Val};
is_string(Else) ->
?MANGO_ERROR({invalid_string, Else}).
-
is_boolean(true) ->
{ok, true};
is_boolean(false) ->
@@ -203,19 +196,16 @@ is_boolean(false) ->
is_boolean(Else) ->
?MANGO_ERROR({invalid_boolean, Else}).
-
is_pos_integer(V) when is_integer(V), V > 0 ->
{ok, V};
is_pos_integer(Else) ->
?MANGO_ERROR({invalid_pos_integer, Else}).
-
is_non_neg_integer(V) when is_integer(V), V >= 0 ->
{ok, V};
is_non_neg_integer(Else) ->
?MANGO_ERROR({invalid_non_neg_integer, Else}).
-
is_object({Props}) ->
true = mango_util:assert_ejson({Props}),
{ok, {Props}};
@@ -223,27 +213,26 @@ is_object(Else) ->
?MANGO_ERROR({invalid_object, Else}).
is_ok_or_false(<<"ok">>) ->
- {ok, ok};
-is_ok_or_false(<<"false">>) -> % convenience
- {ok, false};
+ {ok, ok};
+% convenience
+is_ok_or_false(<<"false">>) ->
+ {ok, false};
is_ok_or_false(false) ->
- {ok, false};
+ {ok, false};
is_ok_or_false(Else) ->
- ?MANGO_ERROR({invalid_ok_or_false_value, Else}).
+ ?MANGO_ERROR({invalid_ok_or_false_value, Else}).
validate_idx_name(auto_name) ->
{ok, auto_name};
validate_idx_name(Else) ->
is_string(Else).
-
validate_selector({Props}) ->
Norm = mango_selector:normalize({Props}),
{ok, Norm};
validate_selector(Else) ->
?MANGO_ERROR({invalid_selector_json, Else}).
-
%% We re-use validate_use_index to make sure the index names are valid
validate_bulk_docs(Docs) when is_list(Docs) ->
lists:foreach(fun validate_use_index/1, Docs),
@@ -251,7 +240,6 @@ validate_bulk_docs(Docs) when is_list(Docs) ->
validate_bulk_docs(Else) ->
?MANGO_ERROR({invalid_bulk_docs, Else}).
-
validate_use_index(IndexName) when is_binary(IndexName) ->
case binary:split(IndexName, <<"/">>) of
[DesignId] ->
@@ -271,13 +259,13 @@ validate_use_index([]) ->
{ok, []};
validate_use_index([DesignId]) when is_binary(DesignId) ->
{ok, [DesignId]};
-validate_use_index([DesignId, ViewName])
- when is_binary(DesignId), is_binary(ViewName) ->
+validate_use_index([DesignId, ViewName]) when
+ is_binary(DesignId), is_binary(ViewName)
+->
{ok, [DesignId, ViewName]};
validate_use_index(Else) ->
?MANGO_ERROR({invalid_index_name, Else}).
-
validate_bookmark(null) ->
{ok, nil};
validate_bookmark(<<>>) ->
@@ -287,15 +275,12 @@ validate_bookmark(Bin) when is_binary(Bin) ->
validate_bookmark(Else) ->
?MANGO_ERROR({invalid_bookmark, Else}).
-
validate_sort(Value) ->
mango_sort:new(Value).
-
validate_fields(Value) ->
mango_fields:new(Value).
-
validate_opts([], Props, Acc) ->
{Props, lists:reverse(Acc)};
validate_opts([{Name, Desc} | Rest], Props, Acc) ->
@@ -309,7 +294,6 @@ validate_opts([{Name, Desc} | Rest], Props, Acc) ->
validate_opts(Rest, Props, NewAcc)
end.
-
validate_opt(_Name, [], Value) ->
Value;
validate_opt(Name, Desc0, undefined) ->
@@ -342,6 +326,5 @@ validate_opt(Name, [{validator, Fun} | Rest], Value) ->
?MANGO_ERROR({invalid_value, Name, Value})
end.
-
default_limit() ->
config:get_integer("mango", "default_limit", 25).
diff --git a/src/mango/src/mango_plugin.erl b/src/mango/src/mango_plugin.erl
index de23f8e7c..d48914cf3 100644
--- a/src/mango/src/mango_plugin.erl
+++ b/src/mango/src/mango_plugin.erl
@@ -27,12 +27,10 @@ before_find(HttpReq0) ->
[HttpReq1] = with_pipe(before_find, [HttpReq0]),
{ok, HttpReq1}.
-
after_find(HttpReq, HttpResp, Arg0) ->
[_HttpReq, _HttpResp, Arg1] = with_pipe(after_find, [HttpReq, HttpResp, Arg0]),
{ok, Arg1}.
-
%% ------------------------------------------------------------------
%% Internal Function Definitions
%% ------------------------------------------------------------------
@@ -40,7 +38,6 @@ after_find(HttpReq, HttpResp, Arg0) ->
with_pipe(Func, Args) ->
do_apply(Func, Args, [pipe]).
-
do_apply(Func, Args, Opts) ->
Handle = couch_epi:get_handle(?SERVICE_ID),
couch_epi:apply(Handle, ?SERVICE_ID, Func, Args, Opts).
diff --git a/src/mango/src/mango_selector.erl b/src/mango/src/mango_selector.erl
index fc6a6d1a7..be2616ff5 100644
--- a/src/mango/src/mango_selector.erl
+++ b/src/mango/src/mango_selector.erl
@@ -12,7 +12,6 @@
-module(mango_selector).
-
-export([
normalize/1,
match/2,
@@ -20,11 +19,9 @@
is_constant_field/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-
% Validate and normalize each operator. This translates
% every selector operator into a consistent version that
% we can then rely on for all other selector functions.
@@ -48,7 +45,6 @@ normalize(Selector) ->
end,
{NProps}.
-
% Match a selector against a #doc{} or EJSON value.
% This assumes that the Selector has been normalized.
% Returns true or false.
@@ -56,14 +52,11 @@ match(Selector, D) ->
couch_stats:increment_counter([mango, evaluate_selector]),
match_int(Selector, D).
-
% An empty selector matches any value.
match_int({[]}, _) ->
true;
-
-match_int(Selector, #doc{body=Body}) ->
+match_int(Selector, #doc{body = Body}) ->
match(Selector, Body, fun mango_json:cmp/2);
-
match_int(Selector, {Props}) ->
match(Selector, {Props}, fun mango_json:cmp/2).
@@ -74,47 +67,38 @@ norm_ops({[{<<"$and">>, Args}]}) when is_list(Args) ->
{[{<<"$and">>, [norm_ops(A) || A <- Args]}]};
norm_ops({[{<<"$and">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$and', Arg});
-
norm_ops({[{<<"$or">>, Args}]}) when is_list(Args) ->
{[{<<"$or">>, [norm_ops(A) || A <- Args]}]};
norm_ops({[{<<"$or">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$or', Arg});
-
-norm_ops({[{<<"$not">>, {_}=Arg}]}) ->
+norm_ops({[{<<"$not">>, {_} = Arg}]}) ->
{[{<<"$not">>, norm_ops(Arg)}]};
norm_ops({[{<<"$not">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$not', Arg});
-
norm_ops({[{<<"$nor">>, Args}]}) when is_list(Args) ->
{[{<<"$nor">>, [norm_ops(A) || A <- Args]}]};
norm_ops({[{<<"$nor">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$nor', Arg});
-
norm_ops({[{<<"$in">>, Args}]} = Cond) when is_list(Args) ->
Cond;
norm_ops({[{<<"$in">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$in', Arg});
-
norm_ops({[{<<"$nin">>, Args}]} = Cond) when is_list(Args) ->
Cond;
norm_ops({[{<<"$nin">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$nin', Arg});
-
norm_ops({[{<<"$exists">>, Arg}]} = Cond) when is_boolean(Arg) ->
Cond;
norm_ops({[{<<"$exists">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$exists', Arg});
-
norm_ops({[{<<"$type">>, Arg}]} = Cond) when is_binary(Arg) ->
Cond;
norm_ops({[{<<"$type">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$type', Arg});
-
norm_ops({[{<<"$mod">>, [D, R]}]} = Cond) when is_integer(D), is_integer(R) ->
Cond;
norm_ops({[{<<"$mod">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$mod', Arg});
-
norm_ops({[{<<"$regex">>, Regex}]} = Cond) when is_binary(Regex) ->
case re:compile(Regex) of
{ok, _} ->
@@ -122,45 +106,40 @@ norm_ops({[{<<"$regex">>, Regex}]} = Cond) when is_binary(Regex) ->
_ ->
?MANGO_ERROR({bad_arg, '$regex', Regex})
end;
-
norm_ops({[{<<"$all">>, Args}]}) when is_list(Args) ->
{[{<<"$all">>, Args}]};
norm_ops({[{<<"$all">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$all', Arg});
-
-norm_ops({[{<<"$elemMatch">>, {_}=Arg}]}) ->
+norm_ops({[{<<"$elemMatch">>, {_} = Arg}]}) ->
{[{<<"$elemMatch">>, norm_ops(Arg)}]};
norm_ops({[{<<"$elemMatch">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$elemMatch', Arg});
-
-norm_ops({[{<<"$allMatch">>, {_}=Arg}]}) ->
+norm_ops({[{<<"$allMatch">>, {_} = Arg}]}) ->
{[{<<"$allMatch">>, norm_ops(Arg)}]};
norm_ops({[{<<"$allMatch">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$allMatch', Arg});
-
-norm_ops({[{<<"$keyMapMatch">>, {_}=Arg}]}) ->
+norm_ops({[{<<"$keyMapMatch">>, {_} = Arg}]}) ->
{[{<<"$keyMapMatch">>, norm_ops(Arg)}]};
norm_ops({[{<<"$keyMapMatch">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$keyMapMatch', Arg});
-
norm_ops({[{<<"$size">>, Arg}]}) when is_integer(Arg), Arg >= 0 ->
{[{<<"$size">>, Arg}]};
norm_ops({[{<<"$size">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$size', Arg});
-
-norm_ops({[{<<"$text">>, Arg}]}) when is_binary(Arg); is_number(Arg);
- is_boolean(Arg) ->
+norm_ops({[{<<"$text">>, Arg}]}) when
+ is_binary(Arg);
+ is_number(Arg);
+ is_boolean(Arg)
+->
{[{<<"$default">>, {[{<<"$text">>, Arg}]}}]};
norm_ops({[{<<"$text">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$text', Arg});
-
% Not technically an operator but we pass it through here
% so that this function accepts its own output. This exists
% so that $text can have a field name value which simplifies
% logic elsewhere.
norm_ops({[{<<"$default">>, _}]} = Selector) ->
Selector;
-
% Terminals where we can't perform any validation
% on the value because any value is acceptable.
norm_ops({[{<<"$lt">>, _}]} = Cond) ->
@@ -175,7 +154,6 @@ norm_ops({[{<<"$gte">>, _}]} = Cond) ->
Cond;
norm_ops({[{<<"$gt">>, _}]} = Cond) ->
Cond;
-
% Known but unsupported operators
norm_ops({[{<<"$where">>, _}]}) ->
?MANGO_ERROR({not_supported, '$where'});
@@ -187,24 +165,19 @@ norm_ops({[{<<"$near">>, _}]}) ->
?MANGO_ERROR({not_supported, '$near'});
norm_ops({[{<<"$nearSphere">>, _}]}) ->
?MANGO_ERROR({not_supported, '$nearSphere'});
-
% Unknown operator
-norm_ops({[{<<"$", _/binary>>=Op, _}]}) ->
+norm_ops({[{<<"$", _/binary>> = Op, _}]}) ->
?MANGO_ERROR({invalid_operator, Op});
-
% A {Field: Cond} pair
norm_ops({[{Field, Cond}]}) ->
{[{Field, norm_ops(Cond)}]};
-
% An implicit $and
norm_ops({[_, _ | _] = Props}) ->
{[{<<"$and">>, [norm_ops({[P]}) || P <- Props]}]};
-
% A bare value condition means equality
norm_ops(Value) ->
{[{<<"$eq">>, Value}]}.
-
% This takes a selector and normalizes all of the
% field names as far as possible. For instance:
%
@@ -233,52 +206,40 @@ norm_fields({[]}) ->
norm_fields(Selector) ->
norm_fields(Selector, <<>>).
-
% Operators where we can push the field names further
% down the operator tree
norm_fields({[{<<"$and">>, Args}]}, Path) ->
{[{<<"$and">>, [norm_fields(A, Path) || A <- Args]}]};
-
norm_fields({[{<<"$or">>, Args}]}, Path) ->
{[{<<"$or">>, [norm_fields(A, Path) || A <- Args]}]};
-
norm_fields({[{<<"$not">>, Arg}]}, Path) ->
{[{<<"$not">>, norm_fields(Arg, Path)}]};
-
norm_fields({[{<<"$nor">>, Args}]}, Path) ->
{[{<<"$nor">>, [norm_fields(A, Path) || A <- Args]}]};
-
% Fields where we can normalize fields in the
% operator arguments independently.
norm_fields({[{<<"$elemMatch">>, Arg}]}, Path) ->
Cond = {[{<<"$elemMatch">>, norm_fields(Arg)}]},
{[{Path, Cond}]};
-
norm_fields({[{<<"$allMatch">>, Arg}]}, Path) ->
Cond = {[{<<"$allMatch">>, norm_fields(Arg)}]},
{[{Path, Cond}]};
-
norm_fields({[{<<"$keyMapMatch">>, Arg}]}, Path) ->
Cond = {[{<<"$keyMapMatch">>, norm_fields(Arg)}]},
{[{Path, Cond}]};
-
-
% The text operator operates against the internal
% $default field. This also asserts that the $default
% field is at the root as well as that it only has
% a $text operator applied.
-norm_fields({[{<<"$default">>, {[{<<"$text">>, _Arg}]}}]}=Sel, <<>>) ->
+norm_fields({[{<<"$default">>, {[{<<"$text">>, _Arg}]}}]} = Sel, <<>>) ->
Sel;
norm_fields({[{<<"$default">>, _}]} = Selector, _) ->
?MANGO_ERROR({bad_field, Selector});
-
-
% Any other operator is a terminal below which no
% field names should exist. Set the path to this
% terminal and return it.
norm_fields({[{<<"$", _/binary>>, _}]} = Cond, Path) ->
{[{Path, Cond}]};
-
% We've found a field name. Append it to the path
% and skip this node as we unroll the stack as
% the full path will be further down the branch.
@@ -288,16 +249,13 @@ norm_fields({[{Field, Cond}]}, <<>>) ->
norm_fields(Cond, Field);
norm_fields({[{Field, Cond}]}, Path) ->
norm_fields(Cond, <<Path/binary, ".", Field/binary>>);
-
% An empty selector
norm_fields({[]}, Path) ->
{Path, {[]}};
-
% Else we have an invalid selector
norm_fields(BadSelector, _) ->
?MANGO_ERROR({bad_field, BadSelector}).
-
% Take all the negation operators and move the logic
% as far down the branch as possible. This does things
% like:
@@ -325,33 +283,25 @@ norm_fields(BadSelector, _) ->
% Operators that cause a negation
norm_negations({[{<<"$not">>, Arg}]}) ->
negate(Arg);
-
norm_negations({[{<<"$nor">>, Args}]}) ->
{[{<<"$and">>, [negate(A) || A <- Args]}]};
-
% Operators that we merely seek through as we look for
% negations.
norm_negations({[{<<"$and">>, Args}]}) ->
{[{<<"$and">>, [norm_negations(A) || A <- Args]}]};
-
norm_negations({[{<<"$or">>, Args}]}) ->
{[{<<"$or">>, [norm_negations(A) || A <- Args]}]};
-
norm_negations({[{<<"$elemMatch">>, Arg}]}) ->
{[{<<"$elemMatch">>, norm_negations(Arg)}]};
-
norm_negations({[{<<"$allMatch">>, Arg}]}) ->
{[{<<"$allMatch">>, norm_negations(Arg)}]};
-
norm_negations({[{<<"$keyMapMatch">>, Arg}]}) ->
{[{<<"$keyMapMatch">>, norm_negations(Arg)}]};
-
% All other conditions can't introduce negations anywhere
% further down the operator tree.
norm_negations(Cond) ->
Cond.
-
% Actually negate an expression. Make sure and read up
% on DeMorgan's laws if you're trying to read this, but
% in a nutshell:
@@ -368,20 +318,15 @@ norm_negations(Cond) ->
% norm_negations/1
negate({[{<<"$not">>, Arg}]}) ->
norm_negations(Arg);
-
negate({[{<<"$nor">>, Args}]}) ->
{[{<<"$or">>, [norm_negations(A) || A <- Args]}]};
-
% DeMorgan Negations
negate({[{<<"$and">>, Args}]}) ->
{[{<<"$or">>, [negate(A) || A <- Args]}]};
-
negate({[{<<"$or">>, Args}]}) ->
{[{<<"$and">>, [negate(A) || A <- Args]}]};
-
negate({[{<<"$default">>, _}]} = Arg) ->
?MANGO_ERROR({bad_arg, '$not', Arg});
-
% Negating comparison operators is straight forward
negate({[{<<"$lt">>, Arg}]}) ->
{[{<<"$gte">>, Arg}]};
@@ -399,22 +344,18 @@ negate({[{<<"$in">>, Args}]}) ->
{[{<<"$nin">>, Args}]};
negate({[{<<"$nin">>, Args}]}) ->
{[{<<"$in">>, Args}]};
-
% We can also trivially negate the exists operator
negate({[{<<"$exists">>, Arg}]}) ->
{[{<<"$exists">>, not Arg}]};
-
% Anything else we have to just terminate the
% negation by reinserting the negation operator
negate({[{<<"$", _/binary>>, _}]} = Cond) ->
{[{<<"$not">>, Cond}]};
-
% Finally, negating a field just means we negate its
% condition.
negate({[{Field, Cond}]}) ->
{[{Field, negate(Cond)}]}.
-
% We need to treat an empty array as always true. This will be applied
% for $or, $in, $all, $nin as well.
match({[{<<"$and">>, []}]}, _, _) ->
@@ -422,16 +363,13 @@ match({[{<<"$and">>, []}]}, _, _) ->
match({[{<<"$and">>, Args}]}, Value, Cmp) ->
Pred = fun(SubSel) -> match(SubSel, Value, Cmp) end,
lists:all(Pred, Args);
-
match({[{<<"$or">>, []}]}, _, _) ->
true;
match({[{<<"$or">>, Args}]}, Value, Cmp) ->
Pred = fun(SubSel) -> match(SubSel, Value, Cmp) end,
lists:any(Pred, Args);
-
match({[{<<"$not">>, Arg}]}, Value, Cmp) ->
not match(Arg, Value, Cmp);
-
match({[{<<"$all">>, []}]}, _, _) ->
false;
% All of the values in Args must exist in Values or
@@ -440,16 +378,16 @@ match({[{<<"$all">>, []}]}, _, _) ->
match({[{<<"$all">>, Args}]}, Values, _Cmp) when is_list(Values) ->
Pred = fun(A) -> lists:member(A, Values) end,
HasArgs = lists:all(Pred, Args),
- IsArgs = case Args of
- [A] when is_list(A) ->
- A == Values;
- _ ->
- false
- end,
+ IsArgs =
+ case Args of
+ [A] when is_list(A) ->
+ A == Values;
+ _ ->
+ false
+ end,
HasArgs orelse IsArgs;
match({[{<<"$all">>, _Args}]}, _Values, _Cmp) ->
false;
-
%% This is for $elemMatch, $allMatch, and possibly $in because of our normalizer.
%% A selector such as {"field_name": {"$elemMatch": {"$gte": 80, "$lt": 85}}}
%% gets normalized to:
@@ -464,17 +402,19 @@ match({[{<<"$all">>, _Args}]}, _Values, _Cmp) ->
%% So we filter out the <<>>.
match({[{<<>>, Arg}]}, Values, Cmp) ->
match(Arg, Values, Cmp);
-
% Matches when any element in values matches the
% sub-selector Arg.
match({[{<<"$elemMatch">>, Arg}]}, Values, Cmp) when is_list(Values) ->
try
- lists:foreach(fun(V) ->
- case match(Arg, V, Cmp) of
- true -> throw(matched);
- _ -> ok
- end
- end, Values),
+ lists:foreach(
+ fun(V) ->
+ case match(Arg, V, Cmp) of
+ true -> throw(matched);
+ _ -> ok
+ end
+ end,
+ Values
+ ),
false
catch
throw:matched ->
@@ -484,17 +424,19 @@ match({[{<<"$elemMatch">>, Arg}]}, Values, Cmp) when is_list(Values) ->
end;
match({[{<<"$elemMatch">>, _Arg}]}, _Value, _Cmp) ->
false;
-
% Matches when all elements in values match the
% sub-selector Arg.
match({[{<<"$allMatch">>, Arg}]}, [_ | _] = Values, Cmp) ->
try
- lists:foreach(fun(V) ->
- case match(Arg, V, Cmp) of
- false -> throw(unmatched);
- _ -> ok
- end
- end, Values),
+ lists:foreach(
+ fun(V) ->
+ case match(Arg, V, Cmp) of
+ false -> throw(unmatched);
+ _ -> ok
+ end
+ end,
+ Values
+ ),
true
catch
_:_ ->
@@ -502,17 +444,19 @@ match({[{<<"$allMatch">>, Arg}]}, [_ | _] = Values, Cmp) ->
end;
match({[{<<"$allMatch">>, _Arg}]}, _Value, _Cmp) ->
false;
-
% Matches when any key in the map value matches the
% sub-selector Arg.
match({[{<<"$keyMapMatch">>, Arg}]}, Value, Cmp) when is_tuple(Value) ->
try
- lists:foreach(fun(V) ->
- case match(Arg, V, Cmp) of
- true -> throw(matched);
- _ -> ok
- end
- end, [Key || {Key, _} <- element(1, Value)]),
+ lists:foreach(
+ fun(V) ->
+ case match(Arg, V, Cmp) of
+ true -> throw(matched);
+ _ -> ok
+ end
+ end,
+ [Key || {Key, _} <- element(1, Value)]
+ ),
false
catch
throw:matched ->
@@ -522,7 +466,6 @@ match({[{<<"$keyMapMatch">>, Arg}]}, Value, Cmp) when is_tuple(Value) ->
end;
match({[{<<"$keyMapMatch">>, _Arg}]}, _Value, _Cmp) ->
false;
-
% Our comparison operators are fairly straight forward
match({[{<<"$lt">>, Arg}]}, Value, Cmp) ->
Cmp(Value, Arg) < 0;
@@ -536,67 +479,62 @@ match({[{<<"$gte">>, Arg}]}, Value, Cmp) ->
Cmp(Value, Arg) >= 0;
match({[{<<"$gt">>, Arg}]}, Value, Cmp) ->
Cmp(Value, Arg) > 0;
-
match({[{<<"$in">>, []}]}, _, _) ->
false;
-match({[{<<"$in">>, Args}]}, Values, Cmp) when is_list(Values)->
+match({[{<<"$in">>, Args}]}, Values, Cmp) when is_list(Values) ->
Pred = fun(Arg) ->
- lists:foldl(fun(Value,Match) ->
- (Cmp(Value, Arg) == 0) or Match
- end, false, Values)
+ lists:foldl(
+ fun(Value, Match) ->
+ (Cmp(Value, Arg) == 0) or Match
+ end,
+ false,
+ Values
+ )
end,
lists:any(Pred, Args);
match({[{<<"$in">>, Args}]}, Value, Cmp) ->
Pred = fun(Arg) -> Cmp(Value, Arg) == 0 end,
lists:any(Pred, Args);
-
match({[{<<"$nin">>, []}]}, _, _) ->
true;
-match({[{<<"$nin">>, Args}]}, Values, Cmp) when is_list(Values)->
+match({[{<<"$nin">>, Args}]}, Values, Cmp) when is_list(Values) ->
not match({[{<<"$in">>, Args}]}, Values, Cmp);
match({[{<<"$nin">>, Args}]}, Value, Cmp) ->
Pred = fun(Arg) -> Cmp(Value, Arg) /= 0 end,
lists:all(Pred, Args);
-
% This logic is a bit subtle. Basically, if value is
% not undefined, then it exists.
match({[{<<"$exists">>, ShouldExist}]}, Value, _Cmp) ->
Exists = Value /= undefined,
ShouldExist andalso Exists;
-
match({[{<<"$type">>, Arg}]}, Value, _Cmp) when is_binary(Arg) ->
Arg == mango_json:type(Value);
-
match({[{<<"$mod">>, [D, R]}]}, Value, _Cmp) when is_integer(Value) ->
Value rem D == R;
match({[{<<"$mod">>, _}]}, _Value, _Cmp) ->
false;
-
match({[{<<"$regex">>, Regex}]}, Value, _Cmp) when is_binary(Value) ->
try
match == re:run(Value, Regex, [{capture, none}])
- catch _:_ ->
- false
+ catch
+ _:_ ->
+ false
end;
match({[{<<"$regex">>, _}]}, _Value, _Cmp) ->
false;
-
match({[{<<"$size">>, Arg}]}, Values, _Cmp) when is_list(Values) ->
length(Values) == Arg;
match({[{<<"$size">>, _}]}, _Value, _Cmp) ->
false;
-
% We don't have any choice but to believe that the text
% index returned valid matches
match({[{<<"$default">>, _}]}, _Value, _Cmp) ->
true;
-
% All other operators are internal assertion errors for
% matching because we either should've removed them during
% normalization or something else broke.
-match({[{<<"$", _/binary>>=Op, _}]}, _, _) ->
+match({[{<<"$", _/binary>> = Op, _}]}, _, _) ->
?MANGO_ERROR({invalid_operator, Op});
-
% We need to traverse value to find field. The call to
% mango_doc:get_field/2 may return either not_found or
% bad_path in which case matching fails.
@@ -613,11 +551,9 @@ match({[{Field, Cond}]}, Value, Cmp) ->
SubValue ->
match(Cond, SubValue, Cmp)
end;
-
match({[_, _ | _] = _Props} = Sel, _Value, _Cmp) ->
erlang:error({unnormalized_selector, Sel}).
-
% Returns true if Selector requires all
% fields in RequiredFields to exist in any matching documents.
@@ -634,48 +570,48 @@ has_required_fields(Selector, RequiredFields) ->
% Empty selector
has_required_fields_int({[]}, Remainder) ->
Remainder;
-
% No more required fields
has_required_fields_int(_, []) ->
[];
-
% No more selector
has_required_fields_int([], Remainder) ->
Remainder;
-
has_required_fields_int(Selector, RequiredFields) when not is_list(Selector) ->
has_required_fields_int([Selector], RequiredFields);
-
% We can "see" through $and operator. Iterate
% through the list of child operators.
-has_required_fields_int([{[{<<"$and">>, Args}]}], RequiredFields)
- when is_list(Args) ->
+has_required_fields_int([{[{<<"$and">>, Args}]}], RequiredFields) when
+ is_list(Args)
+->
has_required_fields_int(Args, RequiredFields);
-
% We can "see" through $or operator. Required fields
% must be covered by all children.
-has_required_fields_int([{[{<<"$or">>, Args}]} | Rest], RequiredFields)
- when is_list(Args) ->
- Remainder0 = lists:foldl(fun(Arg, Acc) ->
- % for each child test coverage against the full
- % set of required fields
- Remainder = has_required_fields_int(Arg, RequiredFields),
-
- % collect the remaining fields across all children
- Acc ++ Remainder
- end, [], Args),
+has_required_fields_int([{[{<<"$or">>, Args}]} | Rest], RequiredFields) when
+ is_list(Args)
+->
+ Remainder0 = lists:foldl(
+ fun(Arg, Acc) ->
+ % for each child test coverage against the full
+ % set of required fields
+ Remainder = has_required_fields_int(Arg, RequiredFields),
+
+ % collect the remaining fields across all children
+ Acc ++ Remainder
+ end,
+ [],
+ Args
+ ),
% remove duplicate fields
Remainder1 = lists:usort(Remainder0),
has_required_fields_int(Rest, Remainder1);
-
% Handle $and operator where it has peers. Required fields
% can be covered by any child.
-has_required_fields_int([{[{<<"$and">>, Args}]} | Rest], RequiredFields)
- when is_list(Args) ->
+has_required_fields_int([{[{<<"$and">>, Args}]} | Rest], RequiredFields) when
+ is_list(Args)
+->
Remainder = has_required_fields_int(Args, RequiredFields),
has_required_fields_int(Rest, Remainder);
-
has_required_fields_int([{[{Field, Cond}]} | Rest], RequiredFields) ->
case Cond of
% $exists:false is a special case - this is the only operator
@@ -686,30 +622,22 @@ has_required_fields_int([{[{Field, Cond}]} | Rest], RequiredFields) ->
has_required_fields_int(Rest, lists:delete(Field, RequiredFields))
end.
-
% Returns true if a field in the selector is a constant value e.g. {a: {$eq: 1}}
is_constant_field({[]}, _Field) ->
false;
-
is_constant_field(Selector, Field) when not is_list(Selector) ->
is_constant_field([Selector], Field);
-
is_constant_field([], _Field) ->
false;
-
is_constant_field([{[{<<"$and">>, Args}]}], Field) when is_list(Args) ->
lists:any(fun(Arg) -> is_constant_field(Arg, Field) end, Args);
-
is_constant_field([{[{<<"$and">>, Args}]}], Field) ->
is_constant_field(Args, Field);
-
is_constant_field([{[{Field, {[{Cond, _Val}]}}]} | _Rest], Field) ->
Cond =:= <<"$eq">>;
-
is_constant_field([{[{_UnMatched, _}]} | Rest], Field) ->
is_constant_field(Rest, Field).
-
%%%%%%%% module tests below %%%%%%%%
-ifdef(TEST).
@@ -721,42 +649,49 @@ is_constant_field_basic_test() ->
?assertEqual(true, is_constant_field(Selector, Field)).
is_constant_field_basic_two_test() ->
- Selector = normalize({[{<<"$and">>,
- [
- {[{<<"cars">>,{[{<<"$eq">>,<<"2">>}]}}]},
- {[{<<"age">>,{[{<<"$gt">>,10}]}}]}
- ]
- }]}),
+ Selector = normalize(
+ {[
+ {<<"$and">>, [
+ {[{<<"cars">>, {[{<<"$eq">>, <<"2">>}]}}]},
+ {[{<<"age">>, {[{<<"$gt">>, 10}]}}]}
+ ]}
+ ]}
+ ),
Field = <<"cars">>,
?assertEqual(true, is_constant_field(Selector, Field)).
is_constant_field_not_eq_test() ->
- Selector = normalize({[{<<"$and">>,
- [
- {[{<<"cars">>,{[{<<"$eq">>,<<"2">>}]}}]},
- {[{<<"age">>,{[{<<"$gt">>,10}]}}]}
- ]
- }]}),
+ Selector = normalize(
+ {[
+ {<<"$and">>, [
+ {[{<<"cars">>, {[{<<"$eq">>, <<"2">>}]}}]},
+ {[{<<"age">>, {[{<<"$gt">>, 10}]}}]}
+ ]}
+ ]}
+ ),
Field = <<"age">>,
?assertEqual(false, is_constant_field(Selector, Field)).
is_constant_field_missing_field_test() ->
- Selector = normalize({[{<<"$and">>,
- [
- {[{<<"cars">>,{[{<<"$eq">>,<<"2">>}]}}]},
- {[{<<"age">>,{[{<<"$gt">>,10}]}}]}
- ]
- }]}),
+ Selector = normalize(
+ {[
+ {<<"$and">>, [
+ {[{<<"cars">>, {[{<<"$eq">>, <<"2">>}]}}]},
+ {[{<<"age">>, {[{<<"$gt">>, 10}]}}]}
+ ]}
+ ]}
+ ),
Field = <<"wrong">>,
?assertEqual(false, is_constant_field(Selector, Field)).
is_constant_field_or_field_test() ->
- Selector = {[{<<"$or">>,
- [
- {[{<<"A">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]
- }]},
+ Selector =
+ {[
+ {<<"$or">>, [
+ {[{<<"A">>, <<"foo">>}]},
+ {[{<<"B">>, <<"foo">>}]}
+ ]}
+ ]},
Normalized = normalize(Selector),
Field = <<"A">>,
?assertEqual(false, is_constant_field(Normalized, Field)).
@@ -767,37 +702,40 @@ is_constant_field_empty_selector_test() ->
?assertEqual(false, is_constant_field(Selector, Field)).
is_constant_nested_and_test() ->
- Selector1 = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector2 = {[{<<"$and">>,
- [
- {[{<<"B">>, {[{<<"$gt">>,10}]}}]}
- ]
- }]},
- Selector = {[{<<"$and">>,
- [
- Selector1,
- Selector2
- ]
- }]},
+ Selector1 =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector2 =
+ {[
+ {<<"$and">>, [
+ {[{<<"B">>, {[{<<"$gt">>, 10}]}}]}
+ ]}
+ ]},
+ Selector =
+ {[
+ {<<"$and">>, [
+ Selector1,
+ Selector2
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, is_constant_field(Normalized, <<"A">>)),
?assertEqual(false, is_constant_field(Normalized, <<"B">>)).
is_constant_combined_or_and_equals_test() ->
- Selector = {[{<<"A">>, "foo"},
- {<<"$or">>,
- [
- {[{<<"B">>, <<"bar">>}]},
- {[{<<"B">>, <<"baz">>}]}
- ]
- },
- {<<"C">>, "qux"}
- ]},
+ Selector =
+ {[
+ {<<"A">>, "foo"},
+ {<<"$or">>, [
+ {[{<<"B">>, <<"bar">>}]},
+ {[{<<"B">>, <<"baz">>}]}
+ ]},
+ {<<"C">>, "qux"}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, is_constant_field(Normalized, <<"C">>)),
?assertEqual(false, is_constant_field(Normalized, <<"B">>)).
@@ -822,202 +760,225 @@ has_required_fields_empty_selector_test() ->
has_required_fields_exists_false_test() ->
RequiredFields = [<<"A">>],
- Selector = {[{<<"A">>,{[{<<"$exists">>, false}]}}]},
+ Selector = {[{<<"A">>, {[{<<"$exists">>, false}]}}]},
Normalized = normalize(Selector),
?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
has_required_fields_and_true_test() ->
RequiredFields = [<<"A">>],
- Selector = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]
- }]},
+ Selector =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]},
+ {[{<<"B">>, <<"foo">>}]}
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
has_required_fields_nested_and_true_test() ->
RequiredFields = [<<"A">>, <<"B">>],
- Selector1 = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector2 = {[{<<"$and">>,
- [
- {[{<<"B">>, <<"foo">>}]}
- ]
- }]},
- Selector = {[{<<"$and">>,
- [
- Selector1,
- Selector2
- ]
- }]},
+ Selector1 =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector2 =
+ {[
+ {<<"$and">>, [
+ {[{<<"B">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector =
+ {[
+ {<<"$and">>, [
+ Selector1,
+ Selector2
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
has_required_fields_and_false_test() ->
RequiredFields = [<<"A">>, <<"C">>],
- Selector = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]
- }]},
+ Selector =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]},
+ {[{<<"B">>, <<"foo">>}]}
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
has_required_fields_or_false_test() ->
RequiredFields = [<<"A">>],
- Selector = {[{<<"$or">>,
- [
- {[{<<"A">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]
- }]},
+ Selector =
+ {[
+ {<<"$or">>, [
+ {[{<<"A">>, <<"foo">>}]},
+ {[{<<"B">>, <<"foo">>}]}
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
has_required_fields_or_true_test() ->
RequiredFields = [<<"A">>, <<"B">>, <<"C">>],
- Selector = {[{<<"A">>, "foo"},
- {<<"$or">>,
- [
- {[{<<"B">>, <<"bar">>}]},
- {[{<<"B">>, <<"baz">>}]}
- ]
- },
- {<<"C">>, "qux"}
- ]},
+ Selector =
+ {[
+ {<<"A">>, "foo"},
+ {<<"$or">>, [
+ {[{<<"B">>, <<"bar">>}]},
+ {[{<<"B">>, <<"baz">>}]}
+ ]},
+ {<<"C">>, "qux"}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
has_required_fields_and_nested_or_true_test() ->
RequiredFields = [<<"A">>, <<"B">>],
- Selector1 = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector2 = {[{<<"$or">>,
- [
- {[{<<"B">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]
- }]},
- Selector = {[{<<"$and">>,
- [
- Selector1,
- Selector2
- ]
- }]},
+ Selector1 =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector2 =
+ {[
+ {<<"$or">>, [
+ {[{<<"B">>, <<"foo">>}]},
+ {[{<<"B">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector =
+ {[
+ {<<"$and">>, [
+ Selector1,
+ Selector2
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, has_required_fields(Normalized, RequiredFields)),
- SelectorReverse = {[{<<"$and">>,
- [
- Selector2,
- Selector1
- ]
- }]},
+ SelectorReverse =
+ {[
+ {<<"$and">>, [
+ Selector2,
+ Selector1
+ ]}
+ ]},
NormalizedReverse = normalize(SelectorReverse),
?assertEqual(true, has_required_fields(NormalizedReverse, RequiredFields)).
has_required_fields_and_nested_or_false_test() ->
RequiredFields = [<<"A">>, <<"B">>],
- Selector1 = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector2 = {[{<<"$or">>,
- [
- {[{<<"A">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]
- }]},
- Selector = {[{<<"$and">>,
- [
- Selector1,
- Selector2
- ]
- }]},
+ Selector1 =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector2 =
+ {[
+ {<<"$or">>, [
+ {[{<<"A">>, <<"foo">>}]},
+ {[{<<"B">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector =
+ {[
+ {<<"$and">>, [
+ Selector1,
+ Selector2
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(false, has_required_fields(Normalized, RequiredFields)),
- SelectorReverse = {[{<<"$and">>,
- [
- Selector2,
- Selector1
- ]
- }]},
+ SelectorReverse =
+ {[
+ {<<"$and">>, [
+ Selector2,
+ Selector1
+ ]}
+ ]},
NormalizedReverse = normalize(SelectorReverse),
?assertEqual(false, has_required_fields(NormalizedReverse, RequiredFields)).
has_required_fields_or_nested_and_true_test() ->
RequiredFields = [<<"A">>],
- Selector1 = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector2 = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector = {[{<<"$or">>,
- [
- Selector1,
- Selector2
- ]
- }]},
+ Selector1 =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector2 =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector =
+ {[
+ {<<"$or">>, [
+ Selector1,
+ Selector2
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
has_required_fields_or_nested_or_true_test() ->
RequiredFields = [<<"A">>],
- Selector1 = {[{<<"$or">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector2 = {[{<<"$or">>,
- [
- {[{<<"A">>, <<"bar">>}]}
- ]
- }]},
- Selector = {[{<<"$or">>,
- [
- Selector1,
- Selector2
- ]
- }]},
+ Selector1 =
+ {[
+ {<<"$or">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector2 =
+ {[
+ {<<"$or">>, [
+ {[{<<"A">>, <<"bar">>}]}
+ ]}
+ ]},
+ Selector =
+ {[
+ {<<"$or">>, [
+ Selector1,
+ Selector2
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
has_required_fields_or_nested_or_false_test() ->
RequiredFields = [<<"A">>],
- Selector1 = {[{<<"$or">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector2 = {[{<<"$or">>,
- [
- {[{<<"B">>, <<"bar">>}]}
- ]
- }]},
- Selector = {[{<<"$or">>,
- [
- Selector1,
- Selector2
- ]
- }]},
+ Selector1 =
+ {[
+ {<<"$or">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector2 =
+ {[
+ {<<"$or">>, [
+ {[{<<"B">>, <<"bar">>}]}
+ ]}
+ ]},
+ Selector =
+ {[
+ {<<"$or">>, [
+ Selector1,
+ Selector2
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
diff --git a/src/mango/src/mango_selector_text.erl b/src/mango/src/mango_selector_text.erl
index b3b61ff26..aaa1e3329 100644
--- a/src/mango/src/mango_selector_text.erl
+++ b/src/mango/src/mango_selector_text.erl
@@ -12,7 +12,6 @@
-module(mango_selector_text).
-
-export([
convert/1,
convert/2,
@@ -20,20 +19,16 @@
append_sort_type/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-
%% Regex for <<"\\.">>
-define(PERIOD, "\\.").
-
convert(Object) ->
TupleTree = convert([], Object),
iolist_to_binary(to_query(TupleTree)).
-
convert(Path, {[{<<"$and">>, Args}]}) ->
Parts = [convert(Path, Arg) || Arg <- Args],
{op_and, Parts};
@@ -45,12 +40,10 @@ convert(Path, {[{<<"$not">>, Arg}]}) ->
convert(Path, {[{<<"$default">>, Arg}]}) ->
{op_field, {_, Query}} = convert(Path, Arg),
{op_default, Query};
-
% The $text operator specifies a Lucene syntax query
% so we just pull it in directly.
convert(Path, {[{<<"$text">>, Query}]}) when is_binary(Query) ->
{op_field, {make_field(Path, Query), value_str(Query)}};
-
% The MongoDB docs for $all are super confusing and read more
% like they screwed up the implementation of this operator
% and then just documented it as a feature.
@@ -68,15 +61,14 @@ convert(Path, {[{<<"$all">>, Args}]}) ->
% that means we just need to search for each value in
% Path.[] and Path.[].[] and rely on our filtering to limit
% the results properly.
- Fields1 = convert(Path, {[{<<"$eq">> , Values}]}),
- Fields2 = convert([<<"[]">>| Path], {[{<<"$eq">> , Values}]}),
+ Fields1 = convert(Path, {[{<<"$eq">>, Values}]}),
+ Fields2 = convert([<<"[]">> | Path], {[{<<"$eq">>, Values}]}),
{op_or, [Fields1, Fields2]};
_ ->
% Otherwise the $all operator is equivalent to an $and
% operator so we treat it as such.
convert([<<"[]">> | Path], {[{<<"$and">>, Args}]})
end;
-
% The $elemMatch Lucene query is not an exact translation
% as we can't enforce that the matches are all for the same
% item in an array. We just rely on the final selector match
@@ -85,18 +77,22 @@ convert(Path, {[{<<"$all">>, Args}]}) ->
% say this has to match against an array.
convert(Path, {[{<<"$elemMatch">>, Arg}]}) ->
convert([<<"[]">> | Path], Arg);
-
convert(Path, {[{<<"$allMatch">>, Arg}]}) ->
convert([<<"[]">> | Path], Arg);
-
% Our comparison operators are fairly straight forward
-convert(Path, {[{<<"$lt">>, Arg}]}) when is_list(Arg); is_tuple(Arg);
- Arg =:= null ->
+convert(Path, {[{<<"$lt">>, Arg}]}) when
+ is_list(Arg);
+ is_tuple(Arg);
+ Arg =:= null
+->
field_exists_query(Path);
convert(Path, {[{<<"$lt">>, Arg}]}) ->
{op_field, {make_field(Path, Arg), range(lt, Arg)}};
-convert(Path, {[{<<"$lte">>, Arg}]}) when is_list(Arg); is_tuple(Arg);
- Arg =:= null->
+convert(Path, {[{<<"$lte">>, Arg}]}) when
+ is_list(Arg);
+ is_tuple(Arg);
+ Arg =:= null
+->
field_exists_query(Path);
convert(Path, {[{<<"$lte">>, Arg}]}) ->
{op_field, {make_field(Path, Arg), range(lte, Arg)}};
@@ -115,66 +111,64 @@ convert(Path, {[{<<"$eq">>, Arg}]}) ->
{op_field, {make_field(Path, Arg), value_str(Arg)}};
convert(Path, {[{<<"$ne">>, Arg}]}) ->
{op_not, {field_exists_query(Path), convert(Path, {[{<<"$eq">>, Arg}]})}};
-convert(Path, {[{<<"$gte">>, Arg}]}) when is_list(Arg); is_tuple(Arg);
- Arg =:= null ->
+convert(Path, {[{<<"$gte">>, Arg}]}) when
+ is_list(Arg);
+ is_tuple(Arg);
+ Arg =:= null
+->
field_exists_query(Path);
convert(Path, {[{<<"$gte">>, Arg}]}) ->
{op_field, {make_field(Path, Arg), range(gte, Arg)}};
-convert(Path, {[{<<"$gt">>, Arg}]}) when is_list(Arg); is_tuple(Arg);
- Arg =:= null->
+convert(Path, {[{<<"$gt">>, Arg}]}) when
+ is_list(Arg);
+ is_tuple(Arg);
+ Arg =:= null
+->
field_exists_query(Path);
convert(Path, {[{<<"$gt">>, Arg}]}) ->
{op_field, {make_field(Path, Arg), range(gt, Arg)}};
-
convert(Path, {[{<<"$in">>, Args}]}) ->
{op_or, convert_in(Path, Args)};
-
convert(Path, {[{<<"$nin">>, Args}]}) ->
{op_not, {field_exists_query(Path), convert(Path, {[{<<"$in">>, Args}]})}};
-
convert(Path, {[{<<"$exists">>, ShouldExist}]}) ->
FieldExists = field_exists_query(Path),
case ShouldExist of
true -> FieldExists;
false -> {op_not, {FieldExists, false}}
end;
-
% We're not checking the actual type here, just looking for
% anything that has a possibility of matching by checking
% for the field name. We use the same logic for $exists on
% the actual query.
convert(Path, {[{<<"$type">>, _}]}) ->
field_exists_query(Path);
-
convert(Path, {[{<<"$mod">>, _}]}) ->
field_exists_query(Path, "number");
-
% The lucene regular expression engine does not use java's regex engine but
% instead a custom implementation. The syntax is therefore different, so we do
% would get different behavior than our view indexes. To be consistent, we will
% simply return docs for fields that exist and then run our match filter.
convert(Path, {[{<<"$regex">>, _}]}) ->
field_exists_query(Path, "string");
-
convert(Path, {[{<<"$size">>, Arg}]}) ->
{op_field, {make_field([<<"[]">> | Path], length), value_str(Arg)}};
-
% All other operators are internal assertion errors for
% matching because we either should've removed them during
% normalization or something else broke.
-convert(_Path, {[{<<"$", _/binary>>=Op, _}]}) ->
+convert(_Path, {[{<<"$", _/binary>> = Op, _}]}) ->
?MANGO_ERROR({invalid_operator, Op});
-
% We've hit a field name specifier. Check if the field name is accessing
% arrays. Convert occurrences of element position references to .[]. Then we
% need to break the name into path parts and continue our conversion.
convert(Path, {[{Field0, Cond}]}) ->
- {ok, PP0} = case Field0 of
- <<>> ->
- {ok, []};
- _ ->
- mango_util:parse_field(Field0)
- end,
+ {ok, PP0} =
+ case Field0 of
+ <<>> ->
+ {ok, []};
+ _ ->
+ mango_util:parse_field(Field0)
+ end,
% Later on, we perform a lucene_escape_user call on the
% final Path, which calls parse_field again. Calling the function
% twice converts <<"a\\.b">> to [<<"a">>,<<"b">>]. This leads to
@@ -182,8 +176,15 @@ convert(Path, {[{Field0, Cond}]}) ->
% our escaping mechanism, we simply revert this first parse_field
% effect and replace instances of "." to "\\.".
MP = mango_util:cached_re(mango_period, ?PERIOD),
- PP1 = [re:replace(P, MP, <<"\\\\.">>,
- [global,{return,binary}]) || P <- PP0],
+ PP1 = [
+ re:replace(
+ P,
+ MP,
+ <<"\\\\.">>,
+ [global, {return, binary}]
+ )
+ || P <- PP0
+ ],
{PP2, HasInteger} = replace_array_indexes(PP1, [], false),
NewPath = PP2 ++ Path,
case HasInteger of
@@ -195,101 +196,88 @@ convert(Path, {[{Field0, Cond}]}) ->
false ->
convert(NewPath, Cond)
end;
-
%% For $in
convert(Path, Val) when is_binary(Val); is_number(Val); is_boolean(Val) ->
{op_field, {make_field(Path, Val), value_str(Val)}};
-
% Anything else is a bad selector.
convert(_Path, {Props} = Sel) when length(Props) > 1 ->
erlang:error({unnormalized_selector, Sel}).
-
to_query_nested(Args) ->
QueryArgs = lists:map(fun to_query/1, Args),
% removes empty queries that result from selectors with empty arrays
FilterFun = fun(A) -> A =/= [] andalso A =/= "()" end,
lists:filter(FilterFun, QueryArgs).
-
to_query({op_and, []}) ->
[];
-
to_query({op_and, Args}) when is_list(Args) ->
case to_query_nested(Args) of
[] -> [];
- QueryArgs -> ["(", mango_util:join(<<" AND ">>, QueryArgs), ")"]
+ QueryArgs -> ["(", mango_util:join(<<" AND ">>, QueryArgs), ")"]
end;
-
to_query({op_or, []}) ->
[];
-
to_query({op_or, Args}) when is_list(Args) ->
case to_query_nested(Args) of
[] -> [];
QueryArgs -> ["(", mango_util:join(" OR ", QueryArgs), ")"]
end;
-
to_query({op_not, {ExistsQuery, Arg}}) when is_tuple(Arg) ->
case to_query(Arg) of
[] -> ["(", to_query(ExistsQuery), ")"];
Query -> ["(", to_query(ExistsQuery), " AND NOT (", Query, "))"]
end;
-
%% For $exists:false
to_query({op_not, {ExistsQuery, false}}) ->
["($fieldnames:/.*/ ", " AND NOT (", to_query(ExistsQuery), "))"];
-
to_query({op_insert, Arg}) when is_binary(Arg) ->
["(", Arg, ")"];
-
%% We escape : and / for now for values and all lucene chars for fieldnames
%% This needs to be resolved.
to_query({op_field, {Name, Value}}) ->
NameBin = iolist_to_binary(Name),
["(", mango_util:lucene_escape_user(NameBin), ":", Value, ")"];
-
%% This is for indexable_fields
to_query({op_null, {Name, Value}}) ->
NameBin = iolist_to_binary(Name),
["(", mango_util:lucene_escape_user(NameBin), ":", Value, ")"];
-
to_query({op_fieldname, {Name, Wildcard}}) ->
NameBin = iolist_to_binary(Name),
["($fieldnames:", mango_util:lucene_escape_user(NameBin), Wildcard, ")"];
-
to_query({op_default, Value}) ->
["($default:", Value, ")"].
-
%% We match on fieldname and fieldname.[]
convert_in(Path, Args) ->
Path0 = [<<"[]">> | Path],
- lists:map(fun(Arg) ->
- case Arg of
- {Object} ->
- Parts = lists:map(fun (SubObject) ->
- Fields1 = convert(Path, {[SubObject]}),
- Fields2 = convert(Path0, {[SubObject]}),
+ lists:map(
+ fun(Arg) ->
+ case Arg of
+ {Object} ->
+ Parts = lists:map(
+ fun(SubObject) ->
+ Fields1 = convert(Path, {[SubObject]}),
+ Fields2 = convert(Path0, {[SubObject]}),
+ {op_or, [Fields1, Fields2]}
+ end,
+ Object
+ ),
+ {op_or, Parts};
+ SingleVal ->
+ Fields1 = {op_field, {make_field(Path, SingleVal), value_str(SingleVal)}},
+ Fields2 = {op_field, {make_field(Path0, SingleVal), value_str(SingleVal)}},
{op_or, [Fields1, Fields2]}
- end, Object),
- {op_or, Parts};
- SingleVal ->
- Fields1 = {op_field, {make_field(Path, SingleVal),
- value_str(SingleVal)}},
- Fields2 = {op_field, {make_field(Path0, SingleVal),
- value_str(SingleVal)}},
- {op_or, [Fields1, Fields2]}
- end
- end, Args).
-
+ end
+ end,
+ Args
+ ).
make_field(Path, length) ->
[path_str(Path), <<":length">>];
make_field(Path, Arg) ->
[path_str(Path), <<":">>, type_str(Arg)].
-
range(lt, Arg) ->
Min = get_range(min, Arg),
[<<"[", Min/binary, " TO ">>, value_str(Arg), <<"}">>];
@@ -312,7 +300,6 @@ get_range(max, Arg) when is_number(Arg) ->
get_range(max, _Arg) ->
<<"\u0x10FFFF">>.
-
field_exists_query(Path) ->
% We specify two here for :* and .* so that we don't incorrectly
% match a path foo.name against foo.name_first (if were to just
@@ -326,15 +313,12 @@ field_exists_query(Path) ->
],
{op_or, Parts}.
-
field_exists_query(Path, Type) ->
{op_fieldname, {[path_str(Path), ":"], Type}}.
-
path_str(Path) ->
path_str(Path, []).
-
path_str([], Acc) ->
Acc;
path_str([Part], Acc) ->
@@ -350,7 +334,6 @@ path_str([Part | Rest], Acc) ->
path_str(Rest, [<<".">>, Part | Acc])
end.
-
type_str(Value) when is_number(Value) ->
<<"number">>;
type_str(Value) when is_boolean(Value) ->
@@ -360,7 +343,6 @@ type_str(Value) when is_binary(Value) ->
type_str(null) ->
<<"null">>.
-
value_str(Value) when is_binary(Value) ->
case mango_util:is_number_string(Value) of
true ->
@@ -380,7 +362,6 @@ value_str(false) ->
value_str(null) ->
<<"true">>.
-
append_sort_type(RawSortField, Selector) ->
EncodeField = mango_util:lucene_escape_user(RawSortField),
String = mango_util:has_suffix(EncodeField, <<"_3astring">>),
@@ -395,7 +376,6 @@ append_sort_type(RawSortField, Selector) ->
<<EncodeField/binary, Type/binary>>
end.
-
get_sort_type(Field, Selector) ->
Types = get_sort_types(Field, Selector, []),
case lists:usort(Types) of
@@ -404,35 +384,40 @@ get_sort_type(Field, Selector) ->
_ -> ?MANGO_ERROR({text_sort_error, Field})
end.
-
-get_sort_types(Field, {[{Field, {[{<<"$", _/binary>>, Cond}]}}]}, Acc)
- when is_binary(Cond) ->
+get_sort_types(Field, {[{Field, {[{<<"$", _/binary>>, Cond}]}}]}, Acc) when
+ is_binary(Cond)
+->
[str | Acc];
-
-get_sort_types(Field, {[{Field, {[{<<"$", _/binary>>, Cond}]}}]}, Acc)
- when is_number(Cond) ->
+get_sort_types(Field, {[{Field, {[{<<"$", _/binary>>, Cond}]}}]}, Acc) when
+ is_number(Cond)
+->
[num | Acc];
-
get_sort_types(Field, {[{_, Cond}]}, Acc) when is_list(Cond) ->
- lists:foldl(fun(Arg, InnerAcc) ->
- get_sort_types(Field, Arg, InnerAcc)
- end, Acc, Cond);
-
-get_sort_types(Field, {[{_, Cond}]}, Acc) when is_tuple(Cond)->
+ lists:foldl(
+ fun(Arg, InnerAcc) ->
+ get_sort_types(Field, Arg, InnerAcc)
+ end,
+ Acc,
+ Cond
+ );
+get_sort_types(Field, {[{_, Cond}]}, Acc) when is_tuple(Cond) ->
get_sort_types(Field, Cond, Acc);
-
-get_sort_types(_Field, _, Acc) ->
+get_sort_types(_Field, _, Acc) ->
Acc.
-
replace_array_indexes([], NewPartsAcc, HasIntAcc) ->
{NewPartsAcc, HasIntAcc};
replace_array_indexes([Part | Rest], NewPartsAcc, HasIntAcc) ->
- {NewPart, HasInt} = try
- _ = list_to_integer(binary_to_list(Part)),
- {<<"[]">>, true}
- catch _:_ ->
- {Part, false}
- end,
- replace_array_indexes(Rest, [NewPart | NewPartsAcc],
- HasInt or HasIntAcc).
+ {NewPart, HasInt} =
+ try
+ _ = list_to_integer(binary_to_list(Part)),
+ {<<"[]">>, true}
+ catch
+ _:_ ->
+ {Part, false}
+ end,
+ replace_array_indexes(
+ Rest,
+ [NewPart | NewPartsAcc],
+ HasInt or HasIntAcc
+ ).
diff --git a/src/mango/src/mango_sort.erl b/src/mango/src/mango_sort.erl
index 17249c297..808b6e7f2 100644
--- a/src/mango/src/mango_sort.erl
+++ b/src/mango/src/mango_sort.erl
@@ -19,10 +19,8 @@
directions/1
]).
-
-include("mango.hrl").
-
new(Fields) when is_list(Fields) ->
Sort = {[sort_field(Field) || Field <- Fields]},
validate(Sort),
@@ -30,7 +28,6 @@ new(Fields) when is_list(Fields) ->
new(Else) ->
?MANGO_ERROR({invalid_sort_json, Else}).
-
to_json({Fields}) ->
to_json(Fields);
to_json([]) ->
@@ -38,15 +35,12 @@ to_json([]) ->
to_json([{Name, Dir} | Rest]) ->
[{[{Name, Dir}]} | to_json(Rest)].
-
fields({Props}) ->
[Name || {Name, _Dir} <- Props].
-
directions({Props}) ->
[Dir || {_Name, Dir} <- Props].
-
sort_field(<<"">>) ->
?MANGO_ERROR({invalid_sort_field, <<"">>});
sort_field(Field) when is_binary(Field) ->
@@ -60,7 +54,6 @@ sort_field({Name, BadDir}) when is_binary(Name) ->
sort_field(Else) ->
?MANGO_ERROR({invalid_sort_field, Else}).
-
validate({Props}) ->
% Assert each field is in the same direction
% until we support mixed direction sorts.
diff --git a/src/mango/src/mango_sup.erl b/src/mango/src/mango_sup.erl
index b0dedf125..c0b04d9c9 100644
--- a/src/mango/src/mango_sup.erl
+++ b/src/mango/src/mango_sup.erl
@@ -16,9 +16,8 @@
-export([start_link/1]).
-
start_link(Args) ->
- supervisor:start_link({local,?MODULE}, ?MODULE, Args).
+ supervisor:start_link({local, ?MODULE}, ?MODULE, Args).
init([]) ->
{ok, {{one_for_one, 3, 10}, couch_epi:register_service(mango_epi, [])}}.
diff --git a/src/mango/src/mango_util.erl b/src/mango/src/mango_util.erl
index d649f95f1..00480a65d 100644
--- a/src/mango/src/mango_util.erl
+++ b/src/mango/src/mango_util.erl
@@ -12,7 +12,6 @@
-module(mango_util).
-
-export([
open_doc/2,
load_ddoc/2,
@@ -42,48 +41,44 @@
cached_re/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-
-define(DIGITS, "(\\p{N}+)").
-define(HEXDIGITS, "([0-9a-fA-F]+)").
-define(EXP, "[eE][+-]?" ++ ?DIGITS).
-define(NUMSTRING,
-"[\\x00-\\x20]*" ++ "[+-]?(" ++ "NaN|"
- ++ "Infinity|" ++ "((("
- ++ ?DIGITS
- ++ "(\\.)?("
- ++ ?DIGITS
- ++ "?)("
- ++ ?EXP
- ++ ")?)|"
- ++ "(\\.("
- ++ ?DIGITS
- ++ ")("
- ++ ?EXP
- ++ ")?)|"
- ++ "(("
- ++ "(0[xX]"
- ++ ?HEXDIGITS
- ++ "(\\.)?)|"
- ++ "(0[xX]"
- ++ ?HEXDIGITS
- ++ "?(\\.)"
- ++ ?HEXDIGITS
- ++ ")"
- ++ ")[pP][+-]?" ++ ?DIGITS ++ "))" ++ "[fFdD]?))" ++ "[\\x00-\\x20]*").
-
+ "[\\x00-\\x20]*" ++ "[+-]?(" ++ "NaN|" ++
+ "Infinity|" ++ "(((" ++
+ ?DIGITS ++
+ "(\\.)?(" ++
+ ?DIGITS ++
+ "?)(" ++
+ ?EXP ++
+ ")?)|" ++
+ "(\\.(" ++
+ ?DIGITS ++
+ ")(" ++
+ ?EXP ++
+ ")?)|" ++
+ "((" ++
+ "(0[xX]" ++
+ ?HEXDIGITS ++
+ "(\\.)?)|" ++
+ "(0[xX]" ++
+ ?HEXDIGITS ++
+ "?(\\.)" ++
+ ?HEXDIGITS ++
+ ")" ++
+ ")[pP][+-]?" ++ ?DIGITS ++ "))" ++ "[fFdD]?))" ++ "[\\x00-\\x20]*"
+).
open_doc(Db, DocId) ->
open_doc(Db, DocId, [deleted, ejson_body]).
-
open_doc(Db, DocId, Options) ->
fabric2_db:open_doc(Db, DocId, Options).
-
load_ddoc(Db, DDocId) ->
load_ddoc(Db, DDocId, [deleted, ejson_body]).
@@ -92,13 +87,13 @@ load_ddoc(Db, DDocId, DbOpts) ->
{ok, Doc} ->
{ok, check_lang(Doc)};
{not_found, missing} ->
- Body = {[
- {<<"language">>, <<"query">>}
- ]},
+ Body =
+ {[
+ {<<"language">>, <<"query">>}
+ ]},
{ok, #doc{id = DDocId, body = Body}}
end.
-
assert_ejson({Props}) ->
assert_ejson_obj(Props);
assert_ejson(Vals) when is_list(Vals) ->
@@ -116,7 +111,6 @@ assert_ejson(Number) when is_number(Number) ->
assert_ejson(_Else) ->
false.
-
assert_ejson_obj([]) ->
true;
assert_ejson_obj([{Key, Val} | Rest]) when is_binary(Key) ->
@@ -129,7 +123,6 @@ assert_ejson_obj([{Key, Val} | Rest]) when is_binary(Key) ->
assert_ejson_obj(_Else) ->
false.
-
assert_ejson_arr([]) ->
true;
assert_ejson_arr([Val | Rest]) ->
@@ -140,11 +133,11 @@ assert_ejson_arr([Val | Rest]) ->
false
end.
-
check_lang(#doc{id = Id, deleted = true}) ->
- Body = {[
- {<<"language">>, <<"query">>}
- ]},
+ Body =
+ {[
+ {<<"language">>, <<"query">>}
+ ]},
#doc{id = Id, body = Body};
check_lang(#doc{body = {Props}} = Doc) ->
case lists:keyfind(<<"language">>, 1, Props) of
@@ -154,13 +147,11 @@ check_lang(#doc{body = {Props}} = Doc) ->
?MANGO_ERROR({invalid_ddoc_lang, Else})
end.
-
to_lower(Key) when is_binary(Key) ->
KStr = binary_to_list(Key),
KLower = string:to_lower(KStr),
list_to_binary(KLower).
-
enc_dbname(<<>>) ->
<<>>;
enc_dbname(<<A:8/integer, Rest/binary>>) ->
@@ -168,7 +159,6 @@ enc_dbname(<<A:8/integer, Rest/binary>>) ->
Tail = enc_dbname(Rest),
<<Bytes/binary, Tail/binary>>.
-
enc_db_byte(N) when N >= $a, N =< $z -> <<N>>;
enc_db_byte(N) when N >= $0, N =< $9 -> <<N>>;
enc_db_byte(N) when N == $/; N == $_; N == $- -> <<N>>;
@@ -177,7 +167,6 @@ enc_db_byte(N) ->
L = enc_hex_byte(N rem 16),
<<$$, H:8/integer, L:8/integer>>.
-
dec_dbname(<<>>) ->
<<>>;
dec_dbname(<<$$, _:8/integer>>) ->
@@ -190,7 +179,6 @@ dec_dbname(<<N:8/integer, Rest/binary>>) ->
Tail = dec_dbname(Rest),
<<N:8/integer, Tail/binary>>.
-
enc_hex(<<>>) ->
<<>>;
enc_hex(<<V:8/integer, Rest/binary>>) ->
@@ -199,12 +187,10 @@ enc_hex(<<V:8/integer, Rest/binary>>) ->
Tail = enc_hex(Rest),
<<H:8/integer, L:8/integer, Tail/binary>>.
-
enc_hex_byte(N) when N >= 0, N < 10 -> $0 + N;
enc_hex_byte(N) when N >= 10, N < 16 -> $a + (N - 10);
enc_hex_byte(N) -> throw({invalid_hex_value, N}).
-
dec_hex(<<>>) ->
<<>>;
dec_hex(<<_:8/integer>>) ->
@@ -214,14 +200,11 @@ dec_hex(<<H:8/integer, L:8/integer, Rest/binary>>) ->
Tail = dec_hex(Rest),
<<Byte:8/integer, Tail/binary>>.
-
dec_hex_byte(N) when N >= $0, N =< $9 -> (N - $0);
dec_hex_byte(N) when N >= $a, N =< $f -> (N - $a) + 10;
dec_hex_byte(N) when N >= $A, N =< $F -> (N - $A) + 10;
dec_hex_byte(N) -> throw({invalid_hex_character, N}).
-
-
lucene_escape_field(Bin) when is_binary(Bin) ->
Str = binary_to_list(Bin),
Enc = lucene_escape_field(Str),
@@ -238,58 +221,58 @@ lucene_escape_field([H | T]) when is_number(H), H >= 0, H =< 255 ->
Hi = enc_hex_byte(H div 16),
Lo = enc_hex_byte(H rem 16),
[$_, Hi, Lo | lucene_escape_field(T)]
- end;
+ end;
lucene_escape_field([]) ->
[].
-
lucene_escape_query_value(IoList) when is_list(IoList) ->
lucene_escape_query_value(iolist_to_binary(IoList));
lucene_escape_query_value(Bin) when is_binary(Bin) ->
IoList = lucene_escape_qv(Bin),
iolist_to_binary(IoList).
-
% This escapes the special Lucene query characters
% listed below as well as any whitespace.
%
% + - && || ! ( ) { } [ ] ^ ~ * ? : \ " /
%
-lucene_escape_qv(<<>>) -> [];
+lucene_escape_qv(<<>>) ->
+ [];
lucene_escape_qv(<<"&&", Rest/binary>>) ->
["\\&&" | lucene_escape_qv(Rest)];
lucene_escape_qv(<<"||", Rest/binary>>) ->
["\\||" | lucene_escape_qv(Rest)];
lucene_escape_qv(<<C, Rest/binary>>) ->
NeedsEscape = "+-(){}[]!^~*?:/\\\" \t\r\n",
- Out = case lists:member(C, NeedsEscape) of
- true -> ["\\", C];
- false -> [C]
- end,
+ Out =
+ case lists:member(C, NeedsEscape) of
+ true -> ["\\", C];
+ false -> [C]
+ end,
Out ++ lucene_escape_qv(Rest).
-
lucene_escape_user(Field) ->
{ok, Path} = parse_field(Field),
Escaped = [mango_util:lucene_escape_field(P) || P <- Path],
iolist_to_binary(join(".", Escaped)).
-
has_suffix(Bin, Suffix) when is_binary(Bin), is_binary(Suffix) ->
SBin = size(Bin),
SSuffix = size(Suffix),
- if SBin < SSuffix -> false; true ->
- PSize = SBin - SSuffix,
- case Bin of
- <<_:PSize/binary, Suffix/binary>> ->
- true;
- _ ->
- false
- end
+ if
+ SBin < SSuffix ->
+ false;
+ true ->
+ PSize = SBin - SSuffix,
+ case Bin of
+ <<_:PSize/binary, Suffix/binary>> ->
+ true;
+ _ ->
+ false
+ end
end.
-
join(_Sep, []) ->
[];
join(_Sep, [Item]) ->
@@ -297,10 +280,9 @@ join(_Sep, [Item]) ->
join(Sep, [Item | Rest]) ->
[Item, Sep | join(Sep, Rest)].
-
is_number_string(Value) when is_binary(Value) ->
is_number_string(binary_to_list(Value));
-is_number_string(Value) when is_list(Value)->
+is_number_string(Value) when is_list(Value) ->
MP = cached_re(mango_numstring_re, ?NUMSTRING),
case re:run(Value, MP) of
nomatch ->
@@ -309,7 +291,6 @@ is_number_string(Value) when is_list(Value)->
true
end.
-
cached_re(Name, RE) ->
case mochiglobal:get(Name) of
undefined ->
@@ -320,7 +301,6 @@ cached_re(Name, RE) ->
MP
end.
-
parse_field(Field) ->
case binary:match(Field, <<"\\">>, []) of
nomatch ->
@@ -331,12 +311,15 @@ parse_field(Field) ->
end.
parse_field_slow(Field) ->
- Path = lists:map(fun
- (P) when P =:= <<>> ->
- ?MANGO_ERROR({invalid_field_name, Field});
- (P) ->
- re:replace(P, <<"\\\\">>, <<>>, [global, {return, binary}])
- end, re:split(Field, <<"(?<!\\\\)\\.">>)),
+ Path = lists:map(
+ fun
+ (P) when P =:= <<>> ->
+ ?MANGO_ERROR({invalid_field_name, Field});
+ (P) ->
+ re:replace(P, <<"\\\\">>, <<>>, [global, {return, binary}])
+ end,
+ re:split(Field, <<"(?<!\\\\)\\.">>)
+ ),
{ok, Path}.
check_non_empty(Field, Parts) ->
@@ -347,7 +330,6 @@ check_non_empty(Field, Parts) ->
Parts
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").