summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Vatamaniuc <vatamane@gmail.com>2021-11-20 01:00:08 -0500
committerNick Vatamaniuc <vatamane@gmail.com>2021-11-20 02:09:48 -0500
commit97f47c59e1ec8154ac6b4870f73e4821acb22ce2 (patch)
tree82158f0b6c7e97e6955bf0c558aac6eb0329b410
parent29640aa456897da539287f8bf6f66b7b66e48207 (diff)
downloadcouchdb-erlfmt-3.x.tar.gz
Apply erlfmt formatting to source treeerlfmt-3.x
These exceptions from main were ported over to 3.x ``` --- a/src/chttpd/src/chttpd.erl +++ b/src/chttpd/src/chttpd.erl @@ -491,6 +491,7 @@ extract_cookie(#httpd{mochi_req = MochiReq}) -> end. %%% end hack +%% erlfmt-ignore set_auth_handlers() -> AuthenticationDefault = "{chttpd_auth, cookie_authentication_handler}, ``` ``` --- a/src/couch/src/couch_debug.erl +++ b/src/couch/src/couch_debug.erl @@ -49,6 +49,7 @@ help() -> ]. -spec help(Function :: atom()) -> ok. +%% erlfmt-ignore help(opened_files) -> ```
-rw-r--r--dev/monitor_parent.erl2
-rw-r--r--rel/plugins/eunit_plugin.erl24
-rw-r--r--src/chttpd/src/chttpd.erl1032
-rw-r--r--src/chttpd/src/chttpd_auth.erl20
-rw-r--r--src/chttpd/src/chttpd_auth_cache.erl140
-rw-r--r--src/chttpd/src/chttpd_auth_request.erl119
-rw-r--r--src/chttpd/src/chttpd_cors.erl279
-rw-r--r--src/chttpd/src/chttpd_db.erl2678
-rw-r--r--src/chttpd/src/chttpd_epi.erl2
-rw-r--r--src/chttpd/src/chttpd_external.erl169
-rw-r--r--src/chttpd/src/chttpd_handlers.erl12
-rw-r--r--src/chttpd/src/chttpd_httpd_handlers.erl44
-rw-r--r--src/chttpd/src/chttpd_misc.erl239
-rw-r--r--src/chttpd/src/chttpd_node.erl229
-rw-r--r--src/chttpd/src/chttpd_plugin.erl3
-rw-r--r--src/chttpd/src/chttpd_prefer_header.erl34
-rw-r--r--src/chttpd/src/chttpd_rewrite.erl411
-rw-r--r--src/chttpd/src/chttpd_show.erl248
-rw-r--r--src/chttpd/src/chttpd_stats.erl13
-rw-r--r--src/chttpd/src/chttpd_sup.erl54
-rw-r--r--src/chttpd/src/chttpd_test_util.erl1
-rw-r--r--src/chttpd/src/chttpd_util.erl45
-rw-r--r--src/chttpd/src/chttpd_view.erl115
-rw-r--r--src/chttpd/src/chttpd_xframe_options.erl48
-rw-r--r--src/chttpd/test/eunit/chttpd_auth_tests.erl148
-rw-r--r--src/chttpd/test/eunit/chttpd_cors_test.erl376
-rw-r--r--src/chttpd/test/eunit/chttpd_csp_tests.erl57
-rw-r--r--src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl97
-rw-r--r--src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl186
-rw-r--r--src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl159
-rw-r--r--src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl146
-rw-r--r--src/chttpd/test/eunit/chttpd_db_test.erl818
-rw-r--r--src/chttpd/test/eunit/chttpd_dbs_info_test.erl125
-rw-r--r--src/chttpd/test/eunit/chttpd_delayed_test.erl36
-rw-r--r--src/chttpd/test/eunit/chttpd_error_info_tests.erl43
-rw-r--r--src/chttpd/test/eunit/chttpd_external_test.erl21
-rw-r--r--src/chttpd/test/eunit/chttpd_handlers_tests.erl27
-rw-r--r--src/chttpd/test/eunit/chttpd_open_revs_error_test.erl46
-rw-r--r--src/chttpd/test/eunit/chttpd_plugin_tests.erl59
-rw-r--r--src/chttpd/test/eunit/chttpd_prefer_header_test.erl37
-rw-r--r--src/chttpd/test/eunit/chttpd_purge_tests.erl382
-rw-r--r--src/chttpd/test/eunit/chttpd_security_tests.erl405
-rw-r--r--src/chttpd/test/eunit/chttpd_session_tests.erl25
-rw-r--r--src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl57
-rw-r--r--src/chttpd/test/eunit/chttpd_util_test.erl37
-rw-r--r--src/chttpd/test/eunit/chttpd_view_test.erl144
-rw-r--r--src/chttpd/test/eunit/chttpd_welcome_test.erl15
-rw-r--r--src/chttpd/test/eunit/chttpd_xframe_test.erl2
-rw-r--r--src/couch/src/couch.erl25
-rw-r--r--src/couch/src/couch_att.erl399
-rw-r--r--src/couch/src/couch_auth_cache.erl148
-rw-r--r--src/couch/src/couch_base32.erl167
-rw-r--r--src/couch/src/couch_bt_engine.erl566
-rw-r--r--src/couch/src/couch_bt_engine_compactor.erl642
-rw-r--r--src/couch/src/couch_bt_engine_header.erl206
-rw-r--r--src/couch/src/couch_bt_engine_stream.erl10
-rw-r--r--src/couch/src/couch_btree.erl1112
-rw-r--r--src/couch/src/couch_changes.erl594
-rw-r--r--src/couch/src/couch_compress.erl28
-rw-r--r--src/couch/src/couch_db.erl1771
-rw-r--r--src/couch/src/couch_db_engine.erl511
-rw-r--r--src/couch/src/couch_db_epi.erl1
-rw-r--r--src/couch/src/couch_db_header.erl193
-rw-r--r--src/couch/src/couch_db_plugin.erl12
-rw-r--r--src/couch/src/couch_db_split.erl379
-rw-r--r--src/couch/src/couch_db_updater.erl685
-rw-r--r--src/couch/src/couch_debug.erl161
-rw-r--r--src/couch/src/couch_doc.erl495
-rw-r--r--src/couch/src/couch_ejson_compare.erl91
-rw-r--r--src/couch/src/couch_ejson_size.erl57
-rw-r--r--src/couch/src/couch_emsort.erl115
-rw-r--r--src/couch/src/couch_event_sup.erl12
-rw-r--r--src/couch/src/couch_file.erl659
-rw-r--r--src/couch/src/couch_flags.erl31
-rw-r--r--src/couch/src/couch_flags_config.erl139
-rw-r--r--src/couch/src/couch_hotp.erl11
-rw-r--r--src/couch/src/couch_httpd.erl955
-rw-r--r--src/couch/src/couch_httpd_auth.erl635
-rw-r--r--src/couch/src/couch_httpd_db.erl1912
-rw-r--r--src/couch/src/couch_httpd_misc_handlers.erl288
-rw-r--r--src/couch/src/couch_httpd_multipart.erl333
-rw-r--r--src/couch/src/couch_httpd_rewrite.erl365
-rw-r--r--src/couch/src/couch_httpd_vhost.erl238
-rw-r--r--src/couch/src/couch_io_logger.erl12
-rw-r--r--src/couch/src/couch_key_tree.erl294
-rw-r--r--src/couch/src/couch_lru.erl51
-rw-r--r--src/couch/src/couch_multidb_changes.erl332
-rw-r--r--src/couch/src/couch_native_process.erl294
-rw-r--r--src/couch/src/couch_os_process.erl171
-rw-r--r--src/couch/src/couch_partition.erl43
-rw-r--r--src/couch/src/couch_passwords.erl151
-rw-r--r--src/couch/src/couch_primary_sup.erl24
-rw-r--r--src/couch/src/couch_proc_manager.erl301
-rw-r--r--src/couch/src/couch_query_servers.erl626
-rw-r--r--src/couch/src/couch_rand.erl3
-rw-r--r--src/couch/src/couch_secondary_sup.erl57
-rw-r--r--src/couch/src/couch_server.erl678
-rw-r--r--src/couch/src/couch_stream.erl192
-rw-r--r--src/couch/src/couch_sup.erl92
-rw-r--r--src/couch/src/couch_task_status.erl61
-rw-r--r--src/couch/src/couch_totp.erl13
-rw-r--r--src/couch/src/couch_users_db.erl196
-rw-r--r--src/couch/src/couch_util.erl341
-rw-r--r--src/couch/src/couch_uuids.erl21
-rw-r--r--src/couch/src/couch_work_queue.erl106
-rw-r--r--src/couch/src/test_request.erl1
-rw-r--r--src/couch/src/test_util.erl151
-rw-r--r--src/couch/test/eunit/chttpd_endpoints_tests.erl43
-rw-r--r--src/couch/test/eunit/couch_auth_cache_tests.erl167
-rw-r--r--src/couch/test/eunit/couch_bt_engine_compactor_ev.erl71
-rw-r--r--src/couch/test/eunit/couch_bt_engine_compactor_ev_tests.erl88
-rw-r--r--src/couch/test/eunit/couch_bt_engine_compactor_tests.erl53
-rw-r--r--src/couch/test/eunit/couch_bt_engine_tests.erl4
-rw-r--r--src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl357
-rw-r--r--src/couch/test/eunit/couch_btree_tests.erl381
-rw-r--r--src/couch/test/eunit/couch_changes_tests.erl549
-rw-r--r--src/couch/test/eunit/couch_db_doc_tests.erl46
-rw-r--r--src/couch/test/eunit/couch_db_mpr_tests.erl76
-rw-r--r--src/couch/test/eunit/couch_db_plugin_tests.erl88
-rw-r--r--src/couch/test/eunit/couch_db_props_upgrade_tests.erl10
-rw-r--r--src/couch/test/eunit/couch_db_split_tests.erl289
-rw-r--r--src/couch/test/eunit/couch_db_tests.erl67
-rw-r--r--src/couch/test/eunit/couch_doc_json_tests.erl349
-rw-r--r--src/couch/test/eunit/couch_doc_tests.erl127
-rw-r--r--src/couch/test/eunit/couch_ejson_compare_tests.erl62
-rw-r--r--src/couch/test/eunit/couch_ejson_size_tests.erl87
-rw-r--r--src/couch/test/eunit/couch_etag_tests.erl9
-rw-r--r--src/couch/test/eunit/couch_file_tests.erl100
-rw-r--r--src/couch/test/eunit/couch_flags_config_tests.erl139
-rw-r--r--src/couch/test/eunit/couch_flags_tests.erl106
-rw-r--r--src/couch/test/eunit/couch_index_tests.erl197
-rw-r--r--src/couch/test/eunit/couch_js_tests.erl143
-rw-r--r--src/couch/test/eunit/couch_key_tree_prop_tests.erl257
-rw-r--r--src/couch/test/eunit/couch_key_tree_tests.erl583
-rw-r--r--src/couch/test/eunit/couch_passwords_tests.erl79
-rw-r--r--src/couch/test/eunit/couch_query_servers_tests.erl86
-rw-r--r--src/couch/test/eunit/couch_server_tests.erl52
-rw-r--r--src/couch/test/eunit/couch_stream_tests.erl24
-rw-r--r--src/couch/test/eunit/couch_task_status_tests.erl64
-rw-r--r--src/couch/test/eunit/couch_util_tests.erl44
-rw-r--r--src/couch/test/eunit/couch_uuids_tests.erl18
-rw-r--r--src/couch/test/eunit/couch_work_queue_tests.erl82
-rw-r--r--src/couch/test/eunit/couchdb_attachments_tests.erl446
-rw-r--r--src/couch/test/eunit/couchdb_auth_tests.erl65
-rwxr-xr-xsrc/couch/test/eunit/couchdb_cookie_domain_tests.erl20
-rw-r--r--src/couch/test/eunit/couchdb_cors_tests.erl319
-rw-r--r--src/couch/test/eunit/couchdb_db_tests.erl13
-rw-r--r--src/couch/test/eunit/couchdb_design_doc_tests.erl58
-rw-r--r--src/couch/test/eunit/couchdb_file_compression_tests.erl71
-rw-r--r--src/couch/test/eunit/couchdb_location_header_tests.erl21
-rw-r--r--src/couch/test/eunit/couchdb_mrview_cors_tests.erl44
-rw-r--r--src/couch/test/eunit/couchdb_mrview_tests.erl131
-rw-r--r--src/couch/test/eunit/couchdb_os_proc_pool.erl116
-rw-r--r--src/couch/test/eunit/couchdb_update_conflicts_tests.erl242
-rw-r--r--src/couch/test/eunit/couchdb_vhosts_tests.erl252
-rw-r--r--src/couch/test/eunit/couchdb_views_tests.erl410
-rw-r--r--src/couch/test/eunit/global_changes_tests.erl25
-rw-r--r--src/couch/test/eunit/json_stream_parse_tests.erl128
-rw-r--r--src/couch/test/eunit/test_web.erl22
-rw-r--r--src/couch_dist/src/couch_dist.erl46
-rw-r--r--src/couch_dist/test/eunit/couch_dist_tests.erl26
-rw-r--r--src/couch_epi/src/couch_epi.erl88
-rw-r--r--src/couch_epi/src/couch_epi_codechange_monitor.erl10
-rw-r--r--src/couch_epi/src/couch_epi_codegen.erl45
-rw-r--r--src/couch_epi/src/couch_epi_data.erl11
-rw-r--r--src/couch_epi/src/couch_epi_data_gen.erl173
-rw-r--r--src/couch_epi/src/couch_epi_functions.erl10
-rw-r--r--src/couch_epi/src/couch_epi_functions_gen.erl330
-rw-r--r--src/couch_epi/src/couch_epi_module_keeper.erl31
-rw-r--r--src/couch_epi/src/couch_epi_plugin.erl190
-rw-r--r--src/couch_epi/src/couch_epi_sup.erl59
-rw-r--r--src/couch_epi/src/couch_epi_util.erl2
-rw-r--r--src/couch_epi/test/eunit/couch_epi_basic_test.erl110
-rw-r--r--src/couch_epi/test/eunit/couch_epi_tests.erl378
-rw-r--r--src/couch_event/src/couch_event.erl9
-rw-r--r--src/couch_event/src/couch_event_app.erl2
-rw-r--r--src/couch_event/src/couch_event_listener.erl76
-rw-r--r--src/couch_event/src/couch_event_listener_mfa.erl43
-rw-r--r--src/couch_event/src/couch_event_os_listener.erl19
-rw-r--r--src/couch_event/src/couch_event_server.erl70
-rw-r--r--src/couch_event/src/couch_event_sup2.erl14
-rw-r--r--src/couch_index/src/couch_index.erl194
-rw-r--r--src/couch_index/src/couch_index_compactor.erl37
-rw-r--r--src/couch_index/src/couch_index_plugin_couch_db.erl2
-rw-r--r--src/couch_index/src/couch_index_server.erl162
-rw-r--r--src/couch_index/src/couch_index_sup.erl3
-rw-r--r--src/couch_index/src/couch_index_updater.erl130
-rw-r--r--src/couch_index/src/couch_index_util.erl34
-rw-r--r--src/couch_index/test/eunit/couch_index_compaction_tests.erl28
-rw-r--r--src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl86
-rw-r--r--src/couch_log/src/couch_log.erl11
-rw-r--r--src/couch_log/src/couch_log_app.erl1
-rw-r--r--src/couch_log/src/couch_log_config.erl49
-rw-r--r--src/couch_log/src/couch_log_config_dyn.erl2
-rw-r--r--src/couch_log/src/couch_log_error_logger_h.erl9
-rw-r--r--src/couch_log/src/couch_log_formatter.erl287
-rw-r--r--src/couch_log/src/couch_log_monitor.erl11
-rw-r--r--src/couch_log/src/couch_log_server.erl27
-rw-r--r--src/couch_log/src/couch_log_sup.erl12
-rw-r--r--src/couch_log/src/couch_log_trunc_io.erl873
-rw-r--r--src/couch_log/src/couch_log_trunc_io_fmt.erl433
-rw-r--r--src/couch_log/src/couch_log_util.erl170
-rw-r--r--src/couch_log/src/couch_log_writer.erl24
-rw-r--r--src/couch_log/src/couch_log_writer_file.erl11
-rw-r--r--src/couch_log/src/couch_log_writer_journald.erl24
-rw-r--r--src/couch_log/src/couch_log_writer_stderr.erl5
-rw-r--r--src/couch_log/src/couch_log_writer_syslog.erl169
-rw-r--r--src/couch_log/test/eunit/couch_log_config_listener_test.erl35
-rw-r--r--src/couch_log/test/eunit/couch_log_config_test.erl31
-rw-r--r--src/couch_log/test/eunit/couch_log_error_logger_h_test.erl17
-rw-r--r--src/couch_log/test/eunit/couch_log_formatter_test.erl123
-rw-r--r--src/couch_log/test/eunit/couch_log_monitor_test.erl21
-rw-r--r--src/couch_log/test/eunit/couch_log_server_test.erl32
-rw-r--r--src/couch_log/test/eunit/couch_log_test.erl26
-rw-r--r--src/couch_log/test/eunit/couch_log_test_util.erl108
-rw-r--r--src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl27
-rw-r--r--src/couch_log/test/eunit/couch_log_util_test.erl68
-rw-r--r--src/couch_log/test/eunit/couch_log_writer_ets.erl5
-rw-r--r--src/couch_log/test/eunit/couch_log_writer_file_test.erl74
-rw-r--r--src/couch_log/test/eunit/couch_log_writer_stderr_test.erl27
-rw-r--r--src/couch_log/test/eunit/couch_log_writer_syslog_test.erl108
-rw-r--r--src/couch_log/test/eunit/couch_log_writer_test.erl14
-rw-r--r--src/couch_mrview/src/couch_mrview.erl609
-rw-r--r--src/couch_mrview/src/couch_mrview_cleanup.erl63
-rw-r--r--src/couch_mrview/src/couch_mrview_compactor.erl173
-rw-r--r--src/couch_mrview/src/couch_mrview_http.erl517
-rw-r--r--src/couch_mrview/src/couch_mrview_index.erl174
-rw-r--r--src/couch_mrview/src/couch_mrview_show.erl357
-rw-r--r--src/couch_mrview/src/couch_mrview_test_util.erl141
-rw-r--r--src/couch_mrview/src/couch_mrview_update_notifier.erl4
-rw-r--r--src/couch_mrview/src/couch_mrview_updater.erl264
-rw-r--r--src/couch_mrview/src/couch_mrview_util.erl778
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl109
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl121
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl57
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl84
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl655
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl106
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_http_tests.erl23
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl20
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl112
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl114
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl352
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl468
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl66
-rw-r--r--src/couch_mrview/test/eunit/couch_mrview_util_tests.erl28
-rw-r--r--src/couch_peruser/src/couch_peruser.erl439
-rw-r--r--src/couch_peruser/src/couch_peruser_app.erl3
-rw-r--r--src/couch_peruser/src/couch_peruser_sup.erl5
-rw-r--r--src/couch_peruser/test/eunit/couch_peruser_test.erl207
-rw-r--r--src/couch_plugins/src/couch_plugins.erl286
-rw-r--r--src/couch_plugins/src/couch_plugins_httpd.erl30
-rw-r--r--src/couch_prometheus/src/couch_prometheus_http.erl54
-rw-r--r--src/couch_prometheus/src/couch_prometheus_server.erl73
-rw-r--r--src/couch_prometheus/src/couch_prometheus_sup.erl3
-rw-r--r--src/couch_prometheus/src/couch_prometheus_util.erl71
-rw-r--r--src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl72
-rw-r--r--src/couch_prometheus/test/eunit/couch_prometheus_util_tests.erl74
-rw-r--r--src/couch_pse_tests/src/cpse_gather.erl46
-rw-r--r--src/couch_pse_tests/src/cpse_test_attachments.erl31
-rw-r--r--src/couch_pse_tests/src/cpse_test_compaction.erl125
-rw-r--r--src/couch_pse_tests/src/cpse_test_copy_purge_infos.erl52
-rw-r--r--src/couch_pse_tests/src/cpse_test_fold_changes.erl79
-rw-r--r--src/couch_pse_tests/src/cpse_test_fold_docs.erl252
-rw-r--r--src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl86
-rw-r--r--src/couch_pse_tests/src/cpse_test_get_set_props.erl29
-rw-r--r--src/couch_pse_tests/src/cpse_test_open_close_delete.erl19
-rw-r--r--src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl39
-rw-r--r--src/couch_pse_tests/src/cpse_test_purge_docs.erl89
-rw-r--r--src/couch_pse_tests/src/cpse_test_purge_replication.erl29
-rw-r--r--src/couch_pse_tests/src/cpse_test_purge_seqs.erl17
-rw-r--r--src/couch_pse_tests/src/cpse_test_read_write_docs.erl44
-rw-r--r--src/couch_pse_tests/src/cpse_test_ref_counting.erl20
-rw-r--r--src/couch_pse_tests/src/cpse_util.erl327
-rw-r--r--src/couch_replicator/src/couch_replicator.erl259
-rw-r--r--src/couch_replicator/src/couch_replicator_api_wrap.erl925
-rw-r--r--src/couch_replicator/src/couch_replicator_auth.erl11
-rw-r--r--src/couch_replicator/src/couch_replicator_auth_noop.erl8
-rw-r--r--src/couch_replicator/src/couch_replicator_auth_session.erl217
-rw-r--r--src/couch_replicator/src/couch_replicator_changes_reader.erl118
-rw-r--r--src/couch_replicator/src/couch_replicator_clustering.erl82
-rw-r--r--src/couch_replicator/src/couch_replicator_connection.erl171
-rw-r--r--src/couch_replicator/src/couch_replicator_db_changes.erl47
-rw-r--r--src/couch_replicator/src/couch_replicator_doc_processor.erl524
-rw-r--r--src/couch_replicator/src/couch_replicator_doc_processor_worker.erl245
-rw-r--r--src/couch_replicator/src/couch_replicator_docs.erl721
-rw-r--r--src/couch_replicator/src/couch_replicator_fabric.erl75
-rw-r--r--src/couch_replicator/src/couch_replicator_fabric_rpc.erl26
-rw-r--r--src/couch_replicator/src/couch_replicator_filters.erl129
-rw-r--r--src/couch_replicator/src/couch_replicator_httpc.erl365
-rw-r--r--src/couch_replicator/src/couch_replicator_httpc_pool.erl109
-rw-r--r--src/couch_replicator/src/couch_replicator_httpd.erl123
-rw-r--r--src/couch_replicator/src/couch_replicator_httpd_util.erl154
-rw-r--r--src/couch_replicator/src/couch_replicator_ids.erl297
-rw-r--r--src/couch_replicator/src/couch_replicator_job_sup.erl2
-rw-r--r--src/couch_replicator/src/couch_replicator_notifier.erl8
-rw-r--r--src/couch_replicator/src/couch_replicator_rate_limiter.erl69
-rw-r--r--src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl12
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler.erl700
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler_job.erl920
-rw-r--r--src/couch_replicator/src/couch_replicator_scheduler_sup.erl9
-rw-r--r--src/couch_replicator/src/couch_replicator_share.erl157
-rw-r--r--src/couch_replicator/src/couch_replicator_stats.erl34
-rw-r--r--src/couch_replicator/src/couch_replicator_sup.erl66
-rw-r--r--src/couch_replicator/src/couch_replicator_utils.erl445
-rw-r--r--src/couch_replicator/src/couch_replicator_worker.erl454
-rw-r--r--src/couch_replicator/src/json_stream_parse.erl391
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl57
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl404
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl148
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl77
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_error_reporting_tests.erl69
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl196
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl28
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl38
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl28
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl126
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl41
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl97
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl10
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl103
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl39
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl151
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_test_helper.erl62
-rw-r--r--src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl93
-rw-r--r--src/couch_stats/src/couch_stats.erl14
-rw-r--r--src/couch_stats/src/couch_stats_aggregator.erl22
-rw-r--r--src/couch_stats/src/couch_stats_httpd.erl60
-rw-r--r--src/couch_stats/src/couch_stats_process_tracker.erl6
-rw-r--r--src/couch_stats/src/couch_stats_sup.erl15
-rw-r--r--src/couch_tests/setups/couch_epi_dispatch.erl9
-rw-r--r--src/couch_tests/src/couch_tests.erl69
-rw-r--r--src/couch_tests/src/couch_tests_combinatorics.erl5
-rw-r--r--src/couch_tests/test/couch_tests_app_tests.erl65
-rw-r--r--src/custodian/src/custodian.erl2
-rw-r--r--src/custodian/src/custodian_db_checker.erl60
-rw-r--r--src/custodian/src/custodian_monitor.erl6
-rw-r--r--src/custodian/src/custodian_noop_monitor.erl5
-rw-r--r--src/custodian/src/custodian_server.erl84
-rw-r--r--src/custodian/src/custodian_sup.erl16
-rw-r--r--src/custodian/src/custodian_util.erl213
-rw-r--r--src/ddoc_cache/src/ddoc_cache.erl9
-rw-r--r--src/ddoc_cache/src/ddoc_cache_app.erl3
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry.erl73
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry_custom.erl5
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl7
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl8
-rw-r--r--src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl20
-rw-r--r--src/ddoc_cache/src/ddoc_cache_lru.erl146
-rw-r--r--src/ddoc_cache/src/ddoc_cache_opener.erl9
-rw-r--r--src/ddoc_cache/src/ddoc_cache_sup.erl3
-rw-r--r--src/ddoc_cache/src/ddoc_cache_value.erl3
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl16
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl6
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl6
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl69
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_ev.erl1
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl9
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl148
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl14
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl5
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl61
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl2
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl15
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl71
-rw-r--r--src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl64
-rw-r--r--src/dreyfus/src/clouseau_rpc.erl15
-rw-r--r--src/dreyfus/src/dreyfus_app.erl1
-rw-r--r--src/dreyfus/src/dreyfus_bookmark.erl52
-rw-r--r--src/dreyfus/src/dreyfus_config.erl11
-rw-r--r--src/dreyfus/src/dreyfus_epi.erl13
-rw-r--r--src/dreyfus/src/dreyfus_fabric.erl195
-rw-r--r--src/dreyfus/src/dreyfus_fabric_cleanup.erl97
-rw-r--r--src/dreyfus/src/dreyfus_fabric_group1.erl98
-rw-r--r--src/dreyfus/src/dreyfus_fabric_group2.erl129
-rw-r--r--src/dreyfus/src/dreyfus_fabric_info.erl102
-rw-r--r--src/dreyfus/src/dreyfus_fabric_search.erl288
-rw-r--r--src/dreyfus/src/dreyfus_httpd.erl563
-rw-r--r--src/dreyfus/src/dreyfus_httpd_handlers.erl5
-rw-r--r--src/dreyfus/src/dreyfus_index.erl369
-rw-r--r--src/dreyfus/src/dreyfus_index_manager.erl92
-rw-r--r--src/dreyfus/src/dreyfus_index_updater.erl61
-rw-r--r--src/dreyfus/src/dreyfus_plugin_couch_db.erl2
-rw-r--r--src/dreyfus/src/dreyfus_rpc.erl2
-rw-r--r--src/dreyfus/src/dreyfus_sup.erl4
-rw-r--r--src/dreyfus/src/dreyfus_util.erl385
-rw-r--r--src/dreyfus/test/dreyfus_blacklist_await_test.erl24
-rw-r--r--src/dreyfus/test/dreyfus_blacklist_request_test.erl114
-rw-r--r--src/dreyfus/test/dreyfus_config_test.erl17
-rw-r--r--src/dreyfus/test/dreyfus_purge_test.erl681
-rw-r--r--src/fabric/src/fabric.erl587
-rw-r--r--src/fabric/src/fabric_db_create.erl227
-rw-r--r--src/fabric/src/fabric_db_delete.erl75
-rw-r--r--src/fabric/src/fabric_db_doc_count.erl17
-rw-r--r--src/fabric/src/fabric_db_info.erl120
-rw-r--r--src/fabric/src/fabric_db_meta.erl112
-rw-r--r--src/fabric/src/fabric_db_partition_info.erl49
-rw-r--r--src/fabric/src/fabric_db_update_listener.erl68
-rw-r--r--src/fabric/src/fabric_db_uuids.erl19
-rw-r--r--src/fabric/src/fabric_design_doc_count.erl5
-rw-r--r--src/fabric/src/fabric_dict.erl3
-rw-r--r--src/fabric/src/fabric_doc_atts.erl133
-rw-r--r--src/fabric/src/fabric_doc_missing_revs.erl81
-rw-r--r--src/fabric/src/fabric_doc_open.erl383
-rw-r--r--src/fabric/src/fabric_doc_open_revs.erl361
-rw-r--r--src/fabric/src/fabric_doc_purge.erl218
-rw-r--r--src/fabric/src/fabric_doc_update.erl408
-rw-r--r--src/fabric/src/fabric_group_info.erl142
-rw-r--r--src/fabric/src/fabric_ring.erl153
-rw-r--r--src/fabric/src/fabric_rpc.erl418
-rw-r--r--src/fabric/src/fabric_streams.erl132
-rw-r--r--src/fabric/src/fabric_util.erl250
-rw-r--r--src/fabric/src/fabric_view.erl521
-rw-r--r--src/fabric/src/fabric_view_all_docs.erl337
-rw-r--r--src/fabric/src/fabric_view_changes.erl784
-rw-r--r--src/fabric/src/fabric_view_map.erl182
-rw-r--r--src/fabric/src/fabric_view_reduce.erl116
-rw-r--r--src/fabric/test/eunit/fabric_db_create_tests.erl6
-rw-r--r--src/fabric/test/eunit/fabric_db_info_tests.erl6
-rw-r--r--src/fabric/test/eunit/fabric_db_uuids_tests.erl16
-rw-r--r--src/fabric/test/eunit/fabric_moved_shards_seq_tests.erl25
-rw-r--r--src/fabric/test/eunit/fabric_rpc_purge_tests.erl85
-rw-r--r--src/fabric/test/eunit/fabric_rpc_tests.erl96
-rw-r--r--src/fabric/test/eunit/fabric_tests.erl31
-rw-r--r--src/global_changes/src/global_changes_app.erl3
-rw-r--r--src/global_changes/src/global_changes_epi.erl1
-rw-r--r--src/global_changes/src/global_changes_httpd.erl217
-rw-r--r--src/global_changes/src/global_changes_listener.erl178
-rw-r--r--src/global_changes/src/global_changes_plugin.erl1
-rw-r--r--src/global_changes/src/global_changes_server.erl165
-rw-r--r--src/global_changes/src/global_changes_sup.erl22
-rw-r--r--src/global_changes/src/global_changes_util.erl2
-rw-r--r--src/global_changes/test/eunit/global_changes_hooks_tests.erl22
-rw-r--r--src/ioq/src/ioq.erl43
-rw-r--r--src/ioq/src/ioq_sup.erl2
-rw-r--r--src/jwtf/src/jwtf.erl83
-rw-r--r--src/jwtf/src/jwtf_keystore.erl27
-rw-r--r--src/jwtf/src/jwtf_sup.erl2
-rw-r--r--src/jwtf/test/jwtf_keystore_tests.erl40
-rw-r--r--src/jwtf/test/jwtf_tests.erl296
-rw-r--r--src/ken/src/ken_event_handler.erl1
-rw-r--r--src/ken/src/ken_server.erl464
-rw-r--r--src/ken/src/ken_sup.erl3
-rw-r--r--src/ken/test/ken_server_test.erl31
-rw-r--r--src/mango/src/mango_crud.erl58
-rw-r--r--src/mango/src/mango_cursor.erl141
-rw-r--r--src/mango/src/mango_cursor_special.erl4
-rw-r--r--src/mango/src/mango_cursor_text.erl122
-rw-r--r--src/mango/src/mango_cursor_view.erl181
-rw-r--r--src/mango/src/mango_doc.erl394
-rw-r--r--src/mango/src/mango_epi.erl2
-rw-r--r--src/mango/src/mango_error.erl67
-rw-r--r--src/mango/src/mango_execution_stats.erl23
-rw-r--r--src/mango/src/mango_fields.erl30
-rw-r--r--src/mango/src/mango_httpd.erl235
-rw-r--r--src/mango/src/mango_httpd_handlers.erl6
-rw-r--r--src/mango/src/mango_idx.erl237
-rw-r--r--src/mango/src/mango_idx_special.erl33
-rw-r--r--src/mango/src/mango_idx_text.erl259
-rw-r--r--src/mango/src/mango_idx_view.erl189
-rw-r--r--src/mango/src/mango_json.erl11
-rw-r--r--src/mango/src/mango_json_bookmark.erl25
-rw-r--r--src/mango/src/mango_native_proc.erl186
-rw-r--r--src/mango/src/mango_opts.erl37
-rw-r--r--src/mango/src/mango_selector.erl639
-rw-r--r--src/mango/src/mango_selector_text.erl191
-rw-r--r--src/mango/src/mango_sort.erl7
-rw-r--r--src/mango/src/mango_sup.erl3
-rw-r--r--src/mango/src/mango_util.erl143
-rw-r--r--src/mem3/src/mem3.erl287
-rw-r--r--src/mem3/src/mem3_bdu.erl59
-rw-r--r--src/mem3/src/mem3_cluster.erl48
-rw-r--r--src/mem3/src/mem3_epi.erl2
-rw-r--r--src/mem3/src/mem3_hash.erl17
-rw-r--r--src/mem3/src/mem3_httpd.erl100
-rw-r--r--src/mem3/src/mem3_httpd_handlers.erl2
-rw-r--r--src/mem3/src/mem3_nodes.erl104
-rw-r--r--src/mem3/src/mem3_plugin_couch_db.erl1
-rw-r--r--src/mem3/src/mem3_rep.erl687
-rw-r--r--src/mem3/src/mem3_reshard.erl207
-rw-r--r--src/mem3/src/mem3_reshard_api.erl130
-rw-r--r--src/mem3/src/mem3_reshard_dbdoc.erl73
-rw-r--r--src/mem3/src/mem3_reshard_httpd.erl182
-rw-r--r--src/mem3/src/mem3_reshard_index.erl39
-rw-r--r--src/mem3/src/mem3_reshard_job.erl263
-rw-r--r--src/mem3/src/mem3_reshard_job_sup.erl15
-rw-r--r--src/mem3/src/mem3_reshard_store.erl81
-rw-r--r--src/mem3/src/mem3_reshard_sup.erl27
-rw-r--r--src/mem3/src/mem3_reshard_validate.erl41
-rw-r--r--src/mem3/src/mem3_rpc.erl688
-rw-r--r--src/mem3/src/mem3_seeds.erl78
-rw-r--r--src/mem3/src/mem3_shards.erl355
-rw-r--r--src/mem3/src/mem3_sup.erl5
-rw-r--r--src/mem3/src/mem3_sync.erl287
-rw-r--r--src/mem3/src/mem3_sync_event.erl21
-rw-r--r--src/mem3/src/mem3_sync_event_listener.erl61
-rw-r--r--src/mem3/src/mem3_sync_nodes.erl37
-rw-r--r--src/mem3/src/mem3_sync_security.erl74
-rw-r--r--src/mem3/src/mem3_util.erl599
-rw-r--r--src/mem3/test/eunit/mem3_bdu_test.erl35
-rw-r--r--src/mem3/test/eunit/mem3_cluster_test.erl42
-rw-r--r--src/mem3/test/eunit/mem3_hash_test.erl10
-rw-r--r--src/mem3/test/eunit/mem3_rep_test.erl289
-rw-r--r--src/mem3/test/eunit/mem3_reshard_api_test.erl1307
-rw-r--r--src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl461
-rw-r--r--src/mem3/test/eunit/mem3_reshard_test.erl1172
-rw-r--r--src/mem3/test/eunit/mem3_ring_prop_tests.erl127
-rw-r--r--src/mem3/test/eunit/mem3_seeds_test.erl6
-rw-r--r--src/mem3/test/eunit/mem3_shards_test.erl73
-rw-r--r--src/mem3/test/eunit/mem3_sync_security_test.erl9
-rw-r--r--src/mem3/test/eunit/mem3_util_test.erl162
-rw-r--r--src/rexi/src/rexi.erl79
-rw-r--r--src/rexi/src/rexi_app.erl1
-rw-r--r--src/rexi/src/rexi_buffer.erl29
-rw-r--r--src/rexi/src/rexi_monitor.erl24
-rw-r--r--src/rexi/src/rexi_server.erl132
-rw-r--r--src/rexi/src/rexi_server_mon.erl54
-rw-r--r--src/rexi/src/rexi_server_sup.erl3
-rw-r--r--src/rexi/src/rexi_sup.erl87
-rw-r--r--src/rexi/src/rexi_utils.erl102
-rw-r--r--src/setup/src/setup.erl127
-rw-r--r--src/setup/src/setup_epi.erl3
-rw-r--r--src/setup/src/setup_httpd.erl122
-rw-r--r--src/setup/src/setup_sup.erl4
-rw-r--r--src/smoosh/src/smoosh.erl45
-rw-r--r--src/smoosh/src/smoosh_channel.erl336
-rw-r--r--src/smoosh/src/smoosh_priority_queue.erl67
-rw-r--r--src/smoosh/src/smoosh_server.erl339
-rw-r--r--src/smoosh/src/smoosh_sup.erl2
-rw-r--r--src/smoosh/src/smoosh_utils.erl45
-rw-r--r--src/weatherreport/src/weatherreport.erl128
-rw-r--r--src/weatherreport/src/weatherreport_check.erl34
-rw-r--r--src/weatherreport/src/weatherreport_check_custodian.erl10
-rw-r--r--src/weatherreport/src/weatherreport_check_disk.erl81
-rw-r--r--src/weatherreport/src/weatherreport_check_internal_replication.erl16
-rw-r--r--src/weatherreport/src/weatherreport_check_ioq.erl20
-rw-r--r--src/weatherreport/src/weatherreport_check_mem3_sync.erl10
-rw-r--r--src/weatherreport/src/weatherreport_check_membership.erl13
-rw-r--r--src/weatherreport/src/weatherreport_check_memory_use.erl12
-rw-r--r--src/weatherreport/src/weatherreport_check_message_queues.erl13
-rw-r--r--src/weatherreport/src/weatherreport_check_node_stats.erl16
-rw-r--r--src/weatherreport/src/weatherreport_check_nodes_connected.erl19
-rw-r--r--src/weatherreport/src/weatherreport_check_process_calls.erl129
-rw-r--r--src/weatherreport/src/weatherreport_check_process_memory.erl13
-rw-r--r--src/weatherreport/src/weatherreport_check_safe_to_rebuild.erl29
-rw-r--r--src/weatherreport/src/weatherreport_check_search.erl10
-rw-r--r--src/weatherreport/src/weatherreport_check_tcp_queues.erl27
-rw-r--r--src/weatherreport/src/weatherreport_config.erl70
-rw-r--r--src/weatherreport/src/weatherreport_getopt.erl248
-rw-r--r--src/weatherreport/src/weatherreport_log.erl45
-rw-r--r--src/weatherreport/src/weatherreport_node.erl49
-rw-r--r--src/weatherreport/src/weatherreport_runner.erl65
-rw-r--r--src/weatherreport/src/weatherreport_util.erl50
553 files changed, 45794 insertions, 38647 deletions
diff --git a/dev/monitor_parent.erl b/dev/monitor_parent.erl
index 382f37e9c..0e9e6c5b7 100644
--- a/dev/monitor_parent.erl
+++ b/dev/monitor_parent.erl
@@ -14,12 +14,10 @@
-export([start/0]).
-
start() ->
{ok, [[PPid]]} = init:get_argument(parent_pid),
spawn(fun() -> monitor_parent(PPid) end).
-
monitor_parent(PPid) ->
timer:sleep(1000),
case os:type() of
diff --git a/rel/plugins/eunit_plugin.erl b/rel/plugins/eunit_plugin.erl
index 1de20b394..69003aba6 100644
--- a/rel/plugins/eunit_plugin.erl
+++ b/rel/plugins/eunit_plugin.erl
@@ -12,21 +12,18 @@
-module(eunit_plugin).
-
-export([setup_eunit/2]).
-
setup_eunit(Config, AppFile) ->
case is_base_dir(Config) of
false -> ok;
true -> build_eunit_config(Config, AppFile)
end.
-
%% from https://github.com/ChicagoBoss/ChicagoBoss/blob/master/skel/priv/rebar/boss_plugin.erl
is_base_dir(RebarConf) ->
- filename:absname(rebar_utils:get_cwd()) =:= rebar_config:get_xconf(RebarConf, base_dir, undefined).
-
+ filename:absname(rebar_utils:get_cwd()) =:=
+ rebar_config:get_xconf(RebarConf, base_dir, undefined).
build_eunit_config(Config0, AppFile) ->
Cwd = filename:absname(rebar_utils:get_cwd()),
@@ -40,15 +37,16 @@ build_eunit_config(Config0, AppFile) ->
Config = rebar_config:set_global(Config3, view_index_dir, ViewIndexDir),
rebar_templater:create(Config, AppFile).
-
cleanup_dirs(Dirs) ->
- lists:foreach(fun(Dir) ->
- case filelib:is_dir(Dir) of
- true -> del_dir(Dir);
- false -> ok
- end
- end, Dirs).
-
+ lists:foreach(
+ fun(Dir) ->
+ case filelib:is_dir(Dir) of
+ true -> del_dir(Dir);
+ false -> ok
+ end
+ end,
+ Dirs
+ ).
del_dir(Dir) ->
All = filelib:wildcard(Dir ++ "/**"),
diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl
index aea1ee407..3cfdb5584 100644
--- a/src/chttpd/src/chttpd.erl
+++ b/src/chttpd/src/chttpd.erl
@@ -17,30 +17,65 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("chttpd/include/chttpd.hrl").
--export([start_link/0, start_link/1, start_link/2,
- stop/0, handle_request/1, handle_request_int/1,
- primary_header_value/2, header_value/2, header_value/3, qs_value/2,
- qs_value/3, qs/1, qs_json_value/3, path/1, absolute_uri/2, body_length/1,
- verify_is_server_admin/1, unquote/1, quote/1, recv/2, recv_chunked/4,
- error_info/1, parse_form/1, json_body/1, json_body_obj/1, body/1,
- doc_etag/1, make_etag/1, etag_respond/3, etag_match/2,
- partition/1, serve_file/3, serve_file/4,
- server_header/0, start_chunked_response/3,send_chunk/2,
- start_response_length/4, send/2, start_json_response/2,
- start_json_response/3, end_json_response/1, send_response/4,
+-export([
+ start_link/0, start_link/1, start_link/2,
+ stop/0,
+ handle_request/1,
+ handle_request_int/1,
+ primary_header_value/2,
+ header_value/2, header_value/3,
+ qs_value/2,
+ qs_value/3,
+ qs/1,
+ qs_json_value/3,
+ path/1,
+ absolute_uri/2,
+ body_length/1,
+ verify_is_server_admin/1,
+ unquote/1,
+ quote/1,
+ recv/2,
+ recv_chunked/4,
+ error_info/1,
+ parse_form/1,
+ json_body/1,
+ json_body_obj/1,
+ body/1,
+ doc_etag/1,
+ make_etag/1,
+ etag_respond/3,
+ etag_match/2,
+ partition/1,
+ serve_file/3, serve_file/4,
+ server_header/0,
+ start_chunked_response/3,
+ send_chunk/2,
+ start_response_length/4,
+ send/2,
+ start_json_response/2,
+ start_json_response/3,
+ end_json_response/1,
+ send_response/4,
send_response_no_cors/4,
- send_method_not_allowed/2, send_error/2, send_error/4, send_redirect/2,
- send_chunked_error/2, send_json/2,send_json/3,send_json/4,
- validate_ctype/2]).
+ send_method_not_allowed/2,
+ send_error/2, send_error/4,
+ send_redirect/2,
+ send_chunked_error/2,
+ send_json/2, send_json/3, send_json/4,
+ validate_ctype/2
+]).
-export([authenticate_request/3]).
--export([start_delayed_json_response/2, start_delayed_json_response/3,
- start_delayed_json_response/4,
+-export([
+ start_delayed_json_response/2, start_delayed_json_response/3, start_delayed_json_response/4,
start_delayed_chunked_response/3, start_delayed_chunked_response/4,
- send_delayed_chunk/2, send_delayed_last_chunk/1,
- send_delayed_error/2, end_delayed_json_response/1,
- get_delayed_req/1]).
+ send_delayed_chunk/2,
+ send_delayed_last_chunk/1,
+ send_delayed_error/2,
+ end_delayed_json_response/1,
+ get_delayed_req/1
+]).
-export([
chunked_response_buffer_size/0,
@@ -53,8 +88,8 @@
code,
headers,
chunks,
- resp=nil,
- buffer_response=false
+ resp = nil,
+ buffer_response = false
}).
-define(DEFAULT_SERVER_OPTIONS, "[{recbuf, undefined}]").
@@ -65,23 +100,28 @@ start_link() ->
start_link(http) ->
Port = config:get("chttpd", "port", "5984"),
start_link(?MODULE, [{port, Port}]);
-
start_link(https) ->
Port = config:get("ssl", "port", "6984"),
{ok, Ciphers} = couch_util:parse_term(config:get("ssl", "ciphers", "undefined")),
{ok, Versions} = couch_util:parse_term(config:get("ssl", "tls_versions", "undefined")),
- {ok, SecureRenegotiate} = couch_util:parse_term(config:get("ssl", "secure_renegotiate", "undefined")),
+ {ok, SecureRenegotiate} = couch_util:parse_term(
+ config:get("ssl", "secure_renegotiate", "undefined")
+ ),
ServerOpts0 =
- [{cacertfile, config:get("ssl", "cacert_file", undefined)},
- {keyfile, config:get("ssl", "key_file", undefined)},
- {certfile, config:get("ssl", "cert_file", undefined)},
- {password, config:get("ssl", "password", undefined)},
- {secure_renegotiate, SecureRenegotiate},
- {versions, Versions},
- {ciphers, Ciphers}],
-
- case (couch_util:get_value(keyfile, ServerOpts0) == undefined orelse
- couch_util:get_value(certfile, ServerOpts0) == undefined) of
+ [
+ {cacertfile, config:get("ssl", "cacert_file", undefined)},
+ {keyfile, config:get("ssl", "key_file", undefined)},
+ {certfile, config:get("ssl", "cert_file", undefined)},
+ {password, config:get("ssl", "password", undefined)},
+ {secure_renegotiate, SecureRenegotiate},
+ {versions, Versions},
+ {ciphers, Ciphers}
+ ],
+
+ case
+ (couch_util:get_value(keyfile, ServerOpts0) == undefined orelse
+ couch_util:get_value(certfile, ServerOpts0) == undefined)
+ of
true ->
io:format("SSL enabled but PEM certificates are missing.", []),
throw({error, missing_certs});
@@ -89,66 +129,85 @@ start_link(https) ->
ok
end,
- ServerOpts = [Opt || {_, V}=Opt <- ServerOpts0, V /= undefined],
-
- ClientOpts = case config:get("ssl", "verify_ssl_certificates", "false") of
- "false" ->
- [];
- "true" ->
- FailIfNoPeerCert = case config:get("ssl", "fail_if_no_peer_cert", "false") of
- "false" -> false;
- "true" -> true
- end,
- [{depth, list_to_integer(config:get("ssl",
- "ssl_certificate_max_depth", "1"))},
- {fail_if_no_peer_cert, FailIfNoPeerCert},
- {verify, verify_peer}] ++
- case config:get("ssl", "verify_fun", undefined) of
- undefined -> [];
- SpecStr ->
- [{verify_fun, couch_httpd:make_arity_3_fun(SpecStr)}]
- end
- end,
+ ServerOpts = [Opt || {_, V} = Opt <- ServerOpts0, V /= undefined],
+
+ ClientOpts =
+ case config:get("ssl", "verify_ssl_certificates", "false") of
+ "false" ->
+ [];
+ "true" ->
+ FailIfNoPeerCert =
+ case config:get("ssl", "fail_if_no_peer_cert", "false") of
+ "false" -> false;
+ "true" -> true
+ end,
+ [
+ {depth,
+ list_to_integer(
+ config:get(
+ "ssl",
+ "ssl_certificate_max_depth",
+ "1"
+ )
+ )},
+ {fail_if_no_peer_cert, FailIfNoPeerCert},
+ {verify, verify_peer}
+ ] ++
+ case config:get("ssl", "verify_fun", undefined) of
+ undefined -> [];
+ SpecStr -> [{verify_fun, couch_httpd:make_arity_3_fun(SpecStr)}]
+ end
+ end,
SslOpts = ServerOpts ++ ClientOpts,
Options0 =
- [{port, Port},
- {ssl, true},
- {ssl_opts, SslOpts}],
+ [
+ {port, Port},
+ {ssl, true},
+ {ssl_opts, SslOpts}
+ ],
CustomServerOpts = get_server_options("httpsd"),
Options = merge_server_options(Options0, CustomServerOpts),
start_link(https, Options).
start_link(Name, Options) ->
- IP = case config:get("chttpd", "bind_address", "any") of
- "any" -> any;
- Else -> Else
- end,
+ IP =
+ case config:get("chttpd", "bind_address", "any") of
+ "any" -> any;
+ Else -> Else
+ end,
ok = couch_httpd:validate_bind_address(IP),
set_auth_handlers(),
- Options1 = Options ++ [
- {loop, fun ?MODULE:handle_request/1},
- {name, Name},
- {ip, IP}
- ],
+ Options1 =
+ Options ++
+ [
+ {loop, fun ?MODULE:handle_request/1},
+ {name, Name},
+ {ip, IP}
+ ],
ServerOpts = get_server_options("chttpd"),
Options2 = merge_server_options(Options1, ServerOpts),
case mochiweb_http:start(Options2) of
- {ok, Pid} ->
- {ok, Pid};
- {error, Reason} ->
- io:format("Failure to start Mochiweb: ~s~n", [Reason]),
- {error, Reason}
+ {ok, Pid} ->
+ {ok, Pid};
+ {error, Reason} ->
+ io:format("Failure to start Mochiweb: ~s~n", [Reason]),
+ {error, Reason}
end.
get_server_options(Module) ->
ServerOptsCfg =
case Module of
- "chttpd" -> config:get(Module,
- "server_options", ?DEFAULT_SERVER_OPTIONS);
- _ -> config:get(Module, "server_options", "[]")
+ "chttpd" ->
+ config:get(
+ Module,
+ "server_options",
+ ?DEFAULT_SERVER_OPTIONS
+ );
+ _ ->
+ config:get(Module, "server_options", "[]")
end,
{ok, ServerOpts} = couch_util:parse_term(ServerOptsCfg),
ServerOpts.
@@ -168,7 +227,8 @@ handle_request(MochiReq0) ->
handle_request_int(MochiReq) ->
Begin = os:timestamp(),
SocketOptsCfg = config:get(
- "chttpd", "socket_options", ?DEFAULT_SOCKET_OPTIONS),
+ "chttpd", "socket_options", ?DEFAULT_SOCKET_OPTIONS
+ ),
{ok, SocketOpts} = couch_util:parse_term(SocketOptsCfg),
ok = mochiweb_socket:setopts(MochiReq:get(socket), SocketOpts),
@@ -178,49 +238,60 @@ handle_request_int(MochiReq) ->
{"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
% get requested path
- RequestedPath = case MochiReq:get_header_value("x-couchdb-vhost-path") of
- undefined ->
- case MochiReq:get_header_value("x-couchdb-requested-path") of
- undefined -> RawUri;
- R -> R
- end;
- P -> P
- end,
+ RequestedPath =
+ case MochiReq:get_header_value("x-couchdb-vhost-path") of
+ undefined ->
+ case MochiReq:get_header_value("x-couchdb-requested-path") of
+ undefined -> RawUri;
+ R -> R
+ end;
+ P ->
+ P
+ end,
Peer = MochiReq:get(peer),
Method1 =
- case MochiReq:get(method) of
- % already an atom
- Meth when is_atom(Meth) -> Meth;
-
- % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
- % possible (if any module references the atom, then it's existing).
- Meth -> couch_util:to_existing_atom(Meth)
- end,
+ case MochiReq:get(method) of
+ % already an atom
+ Meth when is_atom(Meth) -> Meth;
+ % Non standard HTTP verbs aren't atoms (COPY, MOVE etc) so convert when
+ % possible (if any module references the atom, then it's existing).
+ Meth -> couch_util:to_existing_atom(Meth)
+ end,
increment_method_stats(Method1),
% allow broken HTTP clients to fake a full method vocabulary with an X-HTTP-METHOD-OVERRIDE header
MethodOverride = MochiReq:get_primary_header_value("X-HTTP-Method-Override"),
- Method2 = case lists:member(MethodOverride, ["GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "COPY"]) of
- true ->
- couch_log:notice("MethodOverride: ~s (real method was ~s)", [MethodOverride, Method1]),
- case Method1 of
- 'POST' -> couch_util:to_existing_atom(MethodOverride);
- _ ->
- % Ignore X-HTTP-Method-Override when the original verb isn't POST.
- % I'd like to send a 406 error to the client, but that'd require a nasty refactor.
- % throw({not_acceptable, <<"X-HTTP-Method-Override may only be used with POST requests.">>})
- Method1
- end;
- _ -> Method1
- end,
+ Method2 =
+ case
+ lists:member(MethodOverride, [
+ "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "COPY"
+ ])
+ of
+ true ->
+ couch_log:notice("MethodOverride: ~s (real method was ~s)", [
+ MethodOverride, Method1
+ ]),
+ case Method1 of
+ 'POST' ->
+ couch_util:to_existing_atom(MethodOverride);
+ _ ->
+ % Ignore X-HTTP-Method-Override when the original verb isn't POST.
+ % I'd like to send a 406 error to the client, but that'd require a nasty refactor.
+ % throw({not_acceptable, <<"X-HTTP-Method-Override may only be used with POST requests.">>})
+ Method1
+ end;
+ _ ->
+ Method1
+ end,
% alias HEAD to GET as mochiweb takes care of stripping the body
- Method = case Method2 of
- 'HEAD' -> 'GET';
- Other -> Other
- end,
+ Method =
+ case Method2 of
+ 'HEAD' -> 'GET';
+ Other -> Other
+ end,
Nonce = couch_util:to_hex(crypto:strong_rand_bytes(5)),
@@ -231,10 +302,14 @@ handle_request_int(MochiReq) ->
original_method = Method1,
nonce = Nonce,
method = Method,
- path_parts = [list_to_binary(unquote(Part))
- || Part <- string:tokens(Path, "/")],
- requested_path_parts = [?l2b(unquote(Part))
- || Part <- string:tokens(RequestedPath, "/")]
+ path_parts = [
+ list_to_binary(unquote(Part))
+ || Part <- string:tokens(Path, "/")
+ ],
+ requested_path_parts = [
+ ?l2b(unquote(Part))
+ || Part <- string:tokens(RequestedPath, "/")
+ ]
},
% put small token on heap to keep requests synced to backend calls
@@ -244,12 +319,13 @@ handle_request_int(MochiReq) ->
erlang:put(dont_log_request, true),
erlang:put(dont_log_response, true),
- {HttpReq2, Response} = case before_request(HttpReq0) of
- {ok, HttpReq1} ->
- process_request(HttpReq1);
- {error, Response0} ->
- {HttpReq0, Response0}
- end,
+ {HttpReq2, Response} =
+ case before_request(HttpReq0) of
+ {ok, HttpReq1} ->
+ process_request(HttpReq1);
+ {error, Response0} ->
+ {HttpReq0, Response0}
+ end,
{Status, Code, Reason, Resp} = split_response(Response),
@@ -345,7 +421,8 @@ catch_error(HttpReq, exit, {mochiweb_recv_error, E}, _Stack) ->
Peer,
Method,
MochiReq:get(raw_path),
- E]),
+ E
+ ]),
exit(normal);
catch_error(HttpReq, exit, {uri_too_long, _}, _Stack) ->
send_error(HttpReq, request_uri_too_long);
@@ -359,12 +436,13 @@ catch_error(HttpReq, Tag, Error, Stack) ->
% TODO improve logging and metrics collection for client disconnects
case {Tag, Error, Stack} of
{exit, normal, [{mochiweb_request, send, _, _} | _]} ->
- exit(normal); % Client disconnect (R15+)
+ % Client disconnect (R15+)
+ exit(normal);
_Else ->
send_error(HttpReq, {Error, nil, Stack})
end.
-split_response({ok, #delayed_resp{resp=Resp}}) ->
+split_response({ok, #delayed_resp{resp = Resp}}) ->
{ok, Resp:get(code), undefined, Resp};
split_response({ok, Resp}) ->
{ok, Resp:get(code), undefined, Resp};
@@ -401,28 +479,37 @@ maybe_log(#httpd{} = HttpReq, #httpd_resp{should_log = true} = HttpResp) ->
Host = MochiReq:get_header_value("Host"),
RawUri = MochiReq:get(raw_path),
RequestTime = timer:now_diff(EndTime, BeginTime) / 1000,
- couch_log:notice("~s ~s ~s ~s ~s ~B ~p ~B", [Host, Peer, User,
- Method, RawUri, Code, Status, round(RequestTime)]);
+ couch_log:notice("~s ~s ~s ~s ~s ~B ~p ~B", [
+ Host,
+ Peer,
+ User,
+ Method,
+ RawUri,
+ Code,
+ Status,
+ round(RequestTime)
+ ]);
maybe_log(_HttpReq, #httpd_resp{should_log = false}) ->
ok.
-
%% HACK: replication currently handles two forms of input, #db{} style
%% and #http_db style. We need a third that makes use of fabric. #db{}
%% works fine for replicating the dbs and nodes database because they
%% aren't sharded. So for now when a local db is specified as the source or
%% the target, it's hacked to make it a full url and treated as a remote.
-possibly_hack(#httpd{path_parts=[<<"_replicate">>]}=Req) ->
+possibly_hack(#httpd{path_parts = [<<"_replicate">>]} = Req) ->
{Props0} = chttpd:json_body_obj(Req),
Props1 = fix_uri(Req, Props0, <<"source">>),
Props2 = fix_uri(Req, Props1, <<"target">>),
- Req#httpd{req_body={Props2}};
+ Req#httpd{req_body = {Props2}};
possibly_hack(Req) ->
Req.
check_request_uri_length(Uri) ->
- check_request_uri_length(Uri,
- chttpd_util:get_chttpd_config("max_uri_length")).
+ check_request_uri_length(
+ Uri,
+ chttpd_util:get_chttpd_config("max_uri_length")
+ ).
check_request_uri_length(_Uri, undefined) ->
ok;
@@ -445,24 +532,24 @@ check_url_encoding([_ | Rest]) ->
fix_uri(Req, Props, Type) ->
case replication_uri(Type, Props) of
- undefined ->
- Props;
- Uri0 ->
- case is_http(Uri0) of
- true ->
+ undefined ->
Props;
- false ->
- Uri = make_uri(Req, quote(Uri0)),
- [{Type,Uri}|proplists:delete(Type,Props)]
- end
+ Uri0 ->
+ case is_http(Uri0) of
+ true ->
+ Props;
+ false ->
+ Uri = make_uri(Req, quote(Uri0)),
+ [{Type, Uri} | proplists:delete(Type, Props)]
+ end
end.
replication_uri(Type, PostProps) ->
case couch_util:get_value(Type, PostProps) of
- {Props} ->
- couch_util:get_value(<<"url">>, Props);
- Else ->
- Else
+ {Props} ->
+ couch_util:get_value(<<"url">>, Props);
+ Else ->
+ Else
end.
is_http(<<"http://", _/binary>>) ->
@@ -474,13 +561,19 @@ is_http(_) ->
make_uri(Req, Raw) ->
Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
- Url = list_to_binary(["http://", config:get("httpd", "bind_address"),
- ":", Port, "/", Raw]),
+ Url = list_to_binary([
+ "http://",
+ config:get("httpd", "bind_address"),
+ ":",
+ Port,
+ "/",
+ Raw
+ ]),
Headers = [
- {<<"authorization">>, ?l2b(header_value(Req,"authorization",""))},
+ {<<"authorization">>, ?l2b(header_value(Req, "authorization", ""))},
{<<"cookie">>, ?l2b(extract_cookie(Req))}
],
- {[{<<"url">>,Url}, {<<"headers">>,{Headers}}]}.
+ {[{<<"url">>, Url}, {<<"headers">>, {Headers}}]}.
extract_cookie(#httpd{mochi_req = MochiReq}) ->
case MochiReq:get_cookie_value("AuthSession") of
@@ -491,6 +584,7 @@ extract_cookie(#httpd{mochi_req = MochiReq}) ->
end.
%%% end hack
+%% erlfmt-ignore
set_auth_handlers() ->
AuthenticationDefault = "{chttpd_auth, cookie_authentication_handler},
{chttpd_auth, default_authentication_handler}",
@@ -525,20 +619,21 @@ authenticate_request(Req) ->
authenticate_request(#httpd{} = Req0, AuthModule, AuthFuns) ->
Req = Req0#httpd{
auth_module = AuthModule,
- authentication_handlers = AuthFuns},
+ authentication_handlers = AuthFuns
+ },
authenticate_request(Req, AuthFuns).
% Try authentication handlers in order until one returns a result
-authenticate_request(#httpd{user_ctx=#user_ctx{}} = Req, _AuthFuns) ->
+authenticate_request(#httpd{user_ctx = #user_ctx{}} = Req, _AuthFuns) ->
Req;
-authenticate_request(#httpd{} = Req, [{Name, AuthFun}|Rest]) ->
+authenticate_request(#httpd{} = Req, [{Name, AuthFun} | Rest]) ->
authenticate_request(maybe_set_handler(AuthFun(Req), Name), Rest);
-authenticate_request(#httpd{} = Req, [AuthFun|Rest]) ->
+authenticate_request(#httpd{} = Req, [AuthFun | Rest]) ->
authenticate_request(AuthFun(Req), Rest);
authenticate_request(Response, _AuthFuns) ->
Response.
-maybe_set_handler(#httpd{user_ctx=#user_ctx{} = UserCtx} = Req, Name) ->
+maybe_set_handler(#httpd{user_ctx = #user_ctx{} = UserCtx} = Req, Name) ->
Req#httpd{user_ctx = UserCtx#user_ctx{handler = Name}};
maybe_set_handler(Else, _) ->
Else.
@@ -551,16 +646,16 @@ increment_method_stats(Method) ->
partition(Path) ->
mochiweb_util:partition(Path, "/").
-header_value(#httpd{mochi_req=MochiReq}, Key) ->
+header_value(#httpd{mochi_req = MochiReq}, Key) ->
MochiReq:get_header_value(Key).
-header_value(#httpd{mochi_req=MochiReq}, Key, Default) ->
+header_value(#httpd{mochi_req = MochiReq}, Key, Default) ->
case MochiReq:get_header_value(Key) of
- undefined -> Default;
- Value -> Value
+ undefined -> Default;
+ Value -> Value
end.
-primary_header_value(#httpd{mochi_req=MochiReq}, Key) ->
+primary_header_value(#httpd{mochi_req = MochiReq}, Key) ->
MochiReq:get_primary_header_value(Key).
serve_file(Req, RelativePath, DocumentRoot) ->
@@ -588,44 +683,52 @@ qs(#httpd{mochi_req = MochiReq, qs = undefined}) ->
qs(#httpd{qs = QS}) ->
QS.
-path(#httpd{mochi_req=MochiReq}) ->
+path(#httpd{mochi_req = MochiReq}) ->
MochiReq:get(path).
-absolute_uri(#httpd{mochi_req=MochiReq, absolute_uri = undefined}, Path) ->
+absolute_uri(#httpd{mochi_req = MochiReq, absolute_uri = undefined}, Path) ->
XHost = chttpd_util:get_chttpd_config(
- "x_forwarded_host", "X-Forwarded-Host"),
- Host = case MochiReq:get_header_value(XHost) of
- undefined ->
- case MochiReq:get_header_value("Host") of
- undefined ->
- {ok, {Address, Port}} = case MochiReq:get(socket) of
- {ssl, SslSocket} -> ssl:sockname(SslSocket);
- Socket -> inet:sockname(Socket)
- end,
- inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
- Value1 ->
- Value1
- end;
- Value -> Value
- end,
+ "x_forwarded_host", "X-Forwarded-Host"
+ ),
+ Host =
+ case MochiReq:get_header_value(XHost) of
+ undefined ->
+ case MochiReq:get_header_value("Host") of
+ undefined ->
+ {ok, {Address, Port}} =
+ case MochiReq:get(socket) of
+ {ssl, SslSocket} -> ssl:sockname(SslSocket);
+ Socket -> inet:sockname(Socket)
+ end,
+ inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
+ Value1 ->
+ Value1
+ end;
+ Value ->
+ Value
+ end,
XSsl = chttpd_util:get_chttpd_config("x_forwarded_ssl", "X-Forwarded-Ssl"),
- Scheme = case MochiReq:get_header_value(XSsl) of
- "on" -> "https";
- _ ->
- XProto = chttpd_util:get_chttpd_config(
- "x_forwarded_proto", "X-Forwarded-Proto"),
- case MochiReq:get_header_value(XProto) of
- % Restrict to "https" and "http" schemes only
- "https" -> "https";
- _ ->
- case MochiReq:get(scheme) of
- https ->
- "https";
- http ->
- "http"
- end
- end
- end,
+ Scheme =
+ case MochiReq:get_header_value(XSsl) of
+ "on" ->
+ "https";
+ _ ->
+ XProto = chttpd_util:get_chttpd_config(
+ "x_forwarded_proto", "X-Forwarded-Proto"
+ ),
+ case MochiReq:get_header_value(XProto) of
+ % Restrict to "https" and "http" schemes only
+ "https" ->
+ "https";
+ _ ->
+ case MochiReq:get(scheme) of
+ https ->
+ "https";
+ http ->
+ "http"
+ end
+ end
+ end,
Scheme ++ "://" ++ Host ++ Path;
absolute_uri(#httpd{absolute_uri = URI}, Path) ->
URI ++ Path.
@@ -639,27 +742,28 @@ unquote(UrlEncodedString) ->
quote(UrlDecodedString) ->
mochiweb_util:quote_plus(UrlDecodedString).
-parse_form(#httpd{mochi_req=MochiReq}) ->
+parse_form(#httpd{mochi_req = MochiReq}) ->
mochiweb_multipart:parse_form(MochiReq).
-recv(#httpd{mochi_req=MochiReq}, Len) ->
+recv(#httpd{mochi_req = MochiReq}, Len) ->
MochiReq:recv(Len).
-recv_chunked(#httpd{mochi_req=MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
+recv_chunked(#httpd{mochi_req = MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
% Fun is called once with each chunk
% Fun({Length, Binary}, State)
% called with Length == 0 on the last time.
MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState).
-body_length(#httpd{mochi_req=MochiReq}) ->
+body_length(#httpd{mochi_req = MochiReq}) ->
MochiReq:get(body_length).
-body(#httpd{mochi_req=MochiReq, req_body=ReqBody}) ->
+body(#httpd{mochi_req = MochiReq, req_body = ReqBody}) ->
case ReqBody of
undefined ->
% Maximum size of document PUT request body (4GB)
MaxSize = chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", 4294967296),
+ "max_http_request_size", 4294967296
+ ),
Begin = os:timestamp(),
try
MochiReq:recv_body(MaxSize)
@@ -674,38 +778,35 @@ body(#httpd{mochi_req=MochiReq, req_body=ReqBody}) ->
validate_ctype(Req, Ctype) ->
couch_httpd:validate_ctype(Req, Ctype).
-json_body(#httpd{req_body=undefined} = Httpd) ->
+json_body(#httpd{req_body = undefined} = Httpd) ->
case body(Httpd) of
undefined ->
throw({bad_request, "Missing request body"});
Body ->
?JSON_DECODE(maybe_decompress(Httpd, Body))
end;
-
-json_body(#httpd{req_body=ReqBody}) ->
+json_body(#httpd{req_body = ReqBody}) ->
ReqBody.
json_body_obj(Httpd) ->
case json_body(Httpd) of
{Props} -> {Props};
- _Else ->
- throw({bad_request, "Request body must be a JSON object"})
+ _Else -> throw({bad_request, "Request body must be a JSON object"})
end.
-
-doc_etag(#doc{id=Id, body=Body, revs={Start, [DiskRev|_]}}) ->
+doc_etag(#doc{id = Id, body = Body, revs = {Start, [DiskRev | _]}}) ->
couch_httpd:doc_etag(Id, Body, {Start, DiskRev}).
make_etag(Term) ->
<<SigInt:128/integer>> = couch_hash:md5_hash(term_to_binary(Term)),
- list_to_binary(io_lib:format("\"~.36B\"",[SigInt])).
+ list_to_binary(io_lib:format("\"~.36B\"", [SigInt])).
etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
etag_match(Req, binary_to_list(CurrentEtag));
-
etag_match(Req, CurrentEtag) ->
EtagsToMatch0 = string:tokens(
- chttpd:header_value(Req, "If-None-Match", ""), ", "),
+ chttpd:header_value(Req, "If-None-Match", ""), ", "
+ ),
EtagsToMatch = lists:map(fun strip_weak_prefix/1, EtagsToMatch0),
lists:member(CurrentEtag, EtagsToMatch).
@@ -716,27 +817,27 @@ strip_weak_prefix(Etag) ->
etag_respond(Req, CurrentEtag, RespFun) ->
case etag_match(Req, CurrentEtag) of
- true ->
- % the client has this in their cache.
- Headers = [{"ETag", CurrentEtag}],
- chttpd:send_response(Req, 304, Headers, <<>>);
- false ->
- % Run the function.
- RespFun()
+ true ->
+ % the client has this in their cache.
+ Headers = [{"ETag", CurrentEtag}],
+ chttpd:send_response(Req, 304, Headers, <<>>);
+ false ->
+ % Run the function.
+ RespFun()
end.
-verify_is_server_admin(#httpd{user_ctx=#user_ctx{roles=Roles}}) ->
+verify_is_server_admin(#httpd{user_ctx = #user_ctx{roles = Roles}}) ->
case lists:member(<<"_admin">>, Roles) of
- true -> ok;
- false -> throw({unauthorized, <<"You are not a server admin.">>})
+ true -> ok;
+ false -> throw({unauthorized, <<"You are not a server admin.">>})
end.
-start_response_length(#httpd{mochi_req=MochiReq}=Req, Code, Headers0, Length) ->
+start_response_length(#httpd{mochi_req = MochiReq} = Req, Code, Headers0, Length) ->
Headers1 = basic_headers(Req, Headers0),
Resp = handle_response(Req, Code, Headers1, Length, start_response_length),
case MochiReq:get(method) of
- 'HEAD' -> throw({http_head_abort, Resp});
- _ -> ok
+ 'HEAD' -> throw({http_head_abort, Resp});
+ _ -> ok
end,
{ok, Resp}.
@@ -744,12 +845,12 @@ send(Resp, Data) ->
Resp:send(Data),
{ok, Resp}.
-start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers0) ->
+start_chunked_response(#httpd{mochi_req = MochiReq} = Req, Code, Headers0) ->
Headers1 = basic_headers(Req, Headers0),
Resp = handle_response(Req, Code, Headers1, chunked, respond),
case MochiReq:get(method) of
- 'HEAD' -> throw({http_head_abort, Resp});
- _ -> ok
+ 'HEAD' -> throw({http_head_abort, Resp});
+ _ -> ok
end,
{ok, Resp}.
@@ -768,8 +869,14 @@ send_response_no_cors(Req, Code, Headers0, Body) ->
couch_httpd:send_response_no_cors(Req, Code, Headers1, Body).
send_method_not_allowed(Req, Methods) ->
- send_error(Req, 405, [{"Allow", Methods}], <<"method_not_allowed">>,
- ?l2b("Only " ++ Methods ++ " allowed"), []).
+ send_error(
+ Req,
+ 405,
+ [{"Allow", Methods}],
+ <<"method_not_allowed">>,
+ ?l2b("Only " ++ Methods ++ " allowed"),
+ []
+ ).
send_json(Req, Value) ->
send_json(Req, 200, Value).
@@ -791,15 +898,12 @@ start_json_response(Req, Code, Headers0) ->
end_json_response(Resp) ->
couch_httpd:end_json_response(Resp).
-
start_delayed_json_response(Req, Code) ->
start_delayed_json_response(Req, Code, []).
-
start_delayed_json_response(Req, Code, Headers) ->
start_delayed_json_response(Req, Code, Headers, "").
-
start_delayed_json_response(Req, Code, Headers, FirstChunk) ->
{ok, #delayed_resp{
start_fun = fun start_json_response/3,
@@ -807,13 +911,12 @@ start_delayed_json_response(Req, Code, Headers, FirstChunk) ->
code = Code,
headers = Headers,
chunks = [FirstChunk],
- buffer_response = buffer_response(Req)}}.
-
+ buffer_response = buffer_response(Req)
+ }}.
start_delayed_chunked_response(Req, Code, Headers) ->
start_delayed_chunked_response(Req, Code, Headers, "").
-
start_delayed_chunked_response(Req, Code, Headers, FirstChunk) ->
{ok, #delayed_resp{
start_fun = fun start_chunked_response/3,
@@ -821,34 +924,30 @@ start_delayed_chunked_response(Req, Code, Headers, FirstChunk) ->
code = Code,
headers = Headers,
chunks = [FirstChunk],
- buffer_response = buffer_response(Req)}}.
-
+ buffer_response = buffer_response(Req)
+ }}.
-send_delayed_chunk(#delayed_resp{buffer_response=false}=DelayedResp, Chunk) ->
- {ok, #delayed_resp{resp=Resp}=DelayedResp1} =
+send_delayed_chunk(#delayed_resp{buffer_response = false} = DelayedResp, Chunk) ->
+ {ok, #delayed_resp{resp = Resp} = DelayedResp1} =
start_delayed_response(DelayedResp),
{ok, Resp} = send_chunk(Resp, Chunk),
{ok, DelayedResp1};
-
-send_delayed_chunk(#delayed_resp{buffer_response=true}=DelayedResp, Chunk) ->
+send_delayed_chunk(#delayed_resp{buffer_response = true} = DelayedResp, Chunk) ->
#delayed_resp{chunks = Chunks} = DelayedResp,
{ok, DelayedResp#delayed_resp{chunks = [Chunk | Chunks]}}.
-
send_delayed_last_chunk(Req) ->
send_delayed_chunk(Req, []).
-
-send_delayed_error(#delayed_resp{req=Req,resp=nil}=DelayedResp, Reason) ->
+send_delayed_error(#delayed_resp{req = Req, resp = nil} = DelayedResp, Reason) ->
{Code, ErrorStr, ReasonStr} = error_info(Reason),
{ok, Resp} = send_error(Req, Code, ErrorStr, ReasonStr),
- {ok, DelayedResp#delayed_resp{resp=Resp}};
-send_delayed_error(#delayed_resp{resp=Resp, req=Req}, Reason) ->
+ {ok, DelayedResp#delayed_resp{resp = Resp}};
+send_delayed_error(#delayed_resp{resp = Resp, req = Req}, Reason) ->
update_timeout_stats(Reason, Req),
log_error_with_stack_trace(Reason),
throw({http_abort, Resp, Reason}).
-
close_delayed_json_object(Resp, Buffer, Terminator, 0) ->
% Use a separate chunk to close the streamed array to maintain strict
% compatibility with earlier versions. See COUCHDB-2724
@@ -857,13 +956,11 @@ close_delayed_json_object(Resp, Buffer, Terminator, 0) ->
close_delayed_json_object(Resp, Buffer, Terminator, _Threshold) ->
send_delayed_chunk(Resp, [Buffer | Terminator]).
-
-end_delayed_json_response(#delayed_resp{buffer_response=false}=DelayedResp) ->
- {ok, #delayed_resp{resp=Resp}} =
+end_delayed_json_response(#delayed_resp{buffer_response = false} = DelayedResp) ->
+ {ok, #delayed_resp{resp = Resp}} =
start_delayed_response(DelayedResp),
end_json_response(Resp);
-
-end_delayed_json_response(#delayed_resp{buffer_response=true}=DelayedResp) ->
+end_delayed_json_response(#delayed_resp{buffer_response = true} = DelayedResp) ->
#delayed_resp{
start_fun = StartFun,
req = Req,
@@ -872,36 +969,37 @@ end_delayed_json_response(#delayed_resp{buffer_response=true}=DelayedResp) ->
chunks = Chunks
} = DelayedResp,
{ok, Resp} = StartFun(Req, Code, Headers),
- lists:foreach(fun
- ([]) -> ok;
- (Chunk) -> send_chunk(Resp, Chunk)
- end, lists:reverse(Chunks)),
+ lists:foreach(
+ fun
+ ([]) -> ok;
+ (Chunk) -> send_chunk(Resp, Chunk)
+ end,
+ lists:reverse(Chunks)
+ ),
end_json_response(Resp).
-
-get_delayed_req(#delayed_resp{req=#httpd{mochi_req=MochiReq}}) ->
+get_delayed_req(#delayed_resp{req = #httpd{mochi_req = MochiReq}}) ->
MochiReq;
get_delayed_req(Resp) ->
Resp:get(request).
-start_delayed_response(#delayed_resp{resp=nil}=DelayedResp) ->
+start_delayed_response(#delayed_resp{resp = nil} = DelayedResp) ->
#delayed_resp{
- start_fun=StartFun,
- req=Req,
- code=Code,
- headers=Headers,
- chunks=[FirstChunk]
- }=DelayedResp,
+ start_fun = StartFun,
+ req = Req,
+ code = Code,
+ headers = Headers,
+ chunks = [FirstChunk]
+ } = DelayedResp,
{ok, Resp} = StartFun(Req, Code, Headers),
case FirstChunk of
"" -> ok;
_ -> {ok, Resp} = send_chunk(Resp, FirstChunk)
end,
- {ok, DelayedResp#delayed_resp{resp=Resp}};
-start_delayed_response(#delayed_resp{}=DelayedResp) ->
+ {ok, DelayedResp#delayed_resp{resp = Resp}};
+start_delayed_response(#delayed_resp{} = DelayedResp) ->
{ok, DelayedResp}.
-
buffer_response(Req) ->
case chttpd:qs_value(Req, "buffer_response") of
"false" ->
@@ -912,7 +1010,6 @@ buffer_response(Req) ->
config:get_boolean("chttpd", "buffer_response", false)
end.
-
error_info({Error, Reason}) when is_list(Reason) ->
error_info({Error, couch_util:to_binary(Reason)});
error_info(bad_request) ->
@@ -941,8 +1038,8 @@ error_info({conflict, _}) ->
{409, <<"conflict">>, <<"Document update conflict.">>};
error_info({partition_overflow, DocId}) ->
Descr = <<
- "Partition limit exceeded due to update on '", DocId/binary, "'"
- >>,
+ "Partition limit exceeded due to update on '", DocId/binary, "'"
+ >>,
{403, <<"partition_overflow">>, Descr};
error_info({{not_found, missing}, {_, _}}) ->
{409, <<"not_found">>, <<"missing_rev">>};
@@ -953,8 +1050,10 @@ error_info({forbidden, Msg}) ->
error_info({unauthorized, Msg}) ->
{401, <<"unauthorized">>, Msg};
error_info(file_exists) ->
- {412, <<"file_exists">>, <<"The database could not be "
- "created, the file already exists.">>};
+ {412, <<"file_exists">>, <<
+ "The database could not be "
+ "created, the file already exists."
+ >>};
error_info({error, {nodedown, Reason}}) ->
{412, <<"nodedown">>, Reason};
error_info({maintenance_mode, Node}) ->
@@ -970,16 +1069,17 @@ error_info({bad_ctype, Reason}) ->
error_info(requested_range_not_satisfiable) ->
{416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>};
error_info({error, {illegal_database_name, Name}}) ->
- Message = <<"Name: '", Name/binary, "'. Only lowercase characters (a-z), ",
- "digits (0-9), and any of the characters _, $, (, ), +, -, and / ",
- "are allowed. Must begin with a letter.">>,
+ Message =
+ <<"Name: '", Name/binary, "'. Only lowercase characters (a-z), ",
+ "digits (0-9), and any of the characters _, $, (, ), +, -, and / ",
+ "are allowed. Must begin with a letter.">>,
{400, <<"illegal_database_name">>, Message};
error_info({illegal_docid, Reason}) ->
{400, <<"illegal_docid">>, Reason};
error_info({illegal_partition, Reason}) ->
{400, <<"illegal_partition">>, Reason};
-error_info({_DocID,{illegal_docid,DocID}}) ->
- {400, <<"illegal_docid">>,DocID};
+error_info({_DocID, {illegal_docid, DocID}}) ->
+ {400, <<"illegal_docid">>, DocID};
error_info({error, {database_name_too_long, DbName}}) ->
{400, <<"database_name_too_long">>,
<<"At least one path segment of `", DbName/binary, "` is too long.">>};
@@ -994,16 +1094,22 @@ error_info({request_entity_too_large, {attachment, AttName}}) ->
error_info({request_entity_too_large, DocID}) ->
{413, <<"document_too_large">>, DocID};
error_info({error, security_migration_updates_disabled}) ->
- {503, <<"security_migration">>, <<"Updates to security docs are disabled during "
- "security migration.">>};
+ {503, <<"security_migration">>, <<
+ "Updates to security docs are disabled during "
+ "security migration."
+ >>};
error_info(all_workers_died) ->
- {503, <<"service unvailable">>, <<"Nodes are unable to service this "
- "request due to overloading or maintenance mode.">>};
+ {503, <<"service unvailable">>, <<
+ "Nodes are unable to service this "
+ "request due to overloading or maintenance mode."
+ >>};
error_info(not_implemented) ->
{501, <<"not_implemented">>, <<"this feature is not yet implemented">>};
error_info(timeout) ->
- {500, <<"timeout">>, <<"The request could not be processed in a reasonable"
- " amount of time.">>};
+ {500, <<"timeout">>, <<
+ "The request could not be processed in a reasonable"
+ " amount of time."
+ >>};
error_info({service_unavailable, Reason}) ->
{503, <<"service unavailable">>, Reason};
error_info({timeout, _Reason}) ->
@@ -1033,61 +1139,83 @@ maybe_handle_error(Error) ->
{500, <<"unknown_error">>, couch_util:to_binary(Error)}
end.
-
-error_headers(#httpd{mochi_req=MochiReq}=Req, 401=Code, ErrorStr, ReasonStr) ->
+error_headers(#httpd{mochi_req = MochiReq} = Req, 401 = Code, ErrorStr, ReasonStr) ->
% this is where the basic auth popup is triggered
case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
- undefined ->
- case chttpd_util:get_chttpd_config("WWW-Authenticate") of
undefined ->
- % If the client is a browser and the basic auth popup isn't turned on
- % redirect to the session page.
- case ErrorStr of
- <<"unauthorized">> ->
- case chttpd_util:get_chttpd_auth_config(
- "authentication_redirect", "/_utils/session.html") of
- undefined -> {Code, []};
- AuthRedirect ->
- case chttpd_util:get_chttpd_auth_config_boolean(
- "require_valid_user", false) of
- true ->
- % send the browser popup header no matter what if we are require_valid_user
- {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]};
- false ->
- case MochiReq:accepts_content_type("application/json") of
- true ->
- {Code, []};
- false ->
- case MochiReq:accepts_content_type("text/html") of
- true ->
- % Redirect to the path the user requested, not
- % the one that is used internally.
- UrlReturnRaw = case MochiReq:get_header_value("x-couchdb-vhost-path") of
+ case chttpd_util:get_chttpd_config("WWW-Authenticate") of
+ undefined ->
+ % If the client is a browser and the basic auth popup isn't turned on
+ % redirect to the session page.
+ case ErrorStr of
+ <<"unauthorized">> ->
+ case
+ chttpd_util:get_chttpd_auth_config(
+ "authentication_redirect", "/_utils/session.html"
+ )
+ of
undefined ->
- MochiReq:get(path);
- VHostPath ->
- VHostPath
- end,
- RedirectLocation = lists:flatten([
- AuthRedirect,
- "?return=", couch_util:url_encode(UrlReturnRaw),
- "&reason=", couch_util:url_encode(ReasonStr)
- ]),
- {302, [{"Location", absolute_uri(Req, RedirectLocation)}]};
- false ->
- {Code, []}
- end
- end
- end
- end;
- _Else ->
- {Code, []}
+ {Code, []};
+ AuthRedirect ->
+ case
+ chttpd_util:get_chttpd_auth_config_boolean(
+ "require_valid_user", false
+ )
+ of
+ true ->
+ % send the browser popup header no matter what if we are require_valid_user
+ {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]};
+ false ->
+ case
+ MochiReq:accepts_content_type("application/json")
+ of
+ true ->
+ {Code, []};
+ false ->
+ case
+ MochiReq:accepts_content_type("text/html")
+ of
+ true ->
+ % Redirect to the path the user requested, not
+ % the one that is used internally.
+ UrlReturnRaw =
+ case
+ MochiReq:get_header_value(
+ "x-couchdb-vhost-path"
+ )
+ of
+ undefined ->
+ MochiReq:get(path);
+ VHostPath ->
+ VHostPath
+ end,
+ RedirectLocation = lists:flatten([
+ AuthRedirect,
+ "?return=",
+ couch_util:url_encode(UrlReturnRaw),
+ "&reason=",
+ couch_util:url_encode(ReasonStr)
+ ]),
+ {302, [
+ {"Location",
+ absolute_uri(
+ Req, RedirectLocation
+ )}
+ ]};
+ false ->
+ {Code, []}
+ end
+ end
+ end
+ end;
+ _Else ->
+ {Code, []}
+ end;
+ Type ->
+ {Code, [{"WWW-Authenticate", Type}]}
end;
Type ->
{Code, [{"WWW-Authenticate", Type}]}
- end;
- Type ->
- {Code, [{"WWW-Authenticate", Type}]}
end;
error_headers(_, Code, _, _) ->
{Code, []}.
@@ -1104,16 +1232,30 @@ send_error(#httpd{} = Req, Code, ErrorStr, ReasonStr) ->
send_error(Req, Code, [], ErrorStr, ReasonStr, []).
send_error(Req, Code, Headers, ErrorStr, ReasonStr, []) ->
- send_json(Req, Code, Headers,
- {[{<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr}]});
+ send_json(
+ Req,
+ Code,
+ Headers,
+ {[
+ {<<"error">>, ErrorStr},
+ {<<"reason">>, ReasonStr}
+ ]}
+ );
send_error(Req, Code, Headers, ErrorStr, ReasonStr, Stack) ->
log_error_with_stack_trace({ErrorStr, ReasonStr, Stack}),
- send_json(Req, Code, [stack_trace_id(Stack) | Headers],
- {[{<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr} |
- case Stack of [] -> []; _ -> [{<<"ref">>, stack_hash(Stack)}] end
- ]}).
+ send_json(
+ Req,
+ Code,
+ [stack_trace_id(Stack) | Headers],
+ {[
+ {<<"error">>, ErrorStr},
+ {<<"reason">>, ReasonStr}
+ | case Stack of
+ [] -> [];
+ _ -> [{<<"ref">>, stack_hash(Stack)}]
+ end
+ ]}
+ ).
update_timeout_stats(<<"timeout">>, #httpd{requested_path_parts = PathParts}) ->
update_timeout_stats(PathParts);
@@ -1122,20 +1264,27 @@ update_timeout_stats(timeout, #httpd{requested_path_parts = PathParts}) ->
update_timeout_stats(_, _) ->
ok.
-update_timeout_stats([_, <<"_partition">>, _, <<"_design">>, _,
- <<"_view">> | _]) ->
+update_timeout_stats([
+ _,
+ <<"_partition">>,
+ _,
+ <<"_design">>,
+ _,
+ <<"_view">>
+ | _
+]) ->
couch_stats:increment_counter([couchdb, httpd, partition_view_timeouts]);
-update_timeout_stats([_, <<"_partition">>, _, <<"_find">>| _]) ->
+update_timeout_stats([_, <<"_partition">>, _, <<"_find">> | _]) ->
couch_stats:increment_counter([couchdb, httpd, partition_find_timeouts]);
-update_timeout_stats([_, <<"_partition">>, _, <<"_explain">>| _]) ->
+update_timeout_stats([_, <<"_partition">>, _, <<"_explain">> | _]) ->
couch_stats:increment_counter([couchdb, httpd, partition_explain_timeouts]);
update_timeout_stats([_, <<"_partition">>, _, <<"_all_docs">> | _]) ->
couch_stats:increment_counter([couchdb, httpd, partition_all_docs_timeouts]);
update_timeout_stats([_, <<"_design">>, _, <<"_view">> | _]) ->
couch_stats:increment_counter([couchdb, httpd, view_timeouts]);
-update_timeout_stats([_, <<"_find">>| _]) ->
+update_timeout_stats([_, <<"_find">> | _]) ->
couch_stats:increment_counter([couchdb, httpd, find_timeouts]);
-update_timeout_stats([_, <<"_explain">>| _]) ->
+update_timeout_stats([_, <<"_explain">> | _]) ->
couch_stats:increment_counter([couchdb, httpd, explain_timeouts]);
update_timeout_stats([_, <<"_all_docs">> | _]) ->
couch_stats:increment_counter([couchdb, httpd, all_docs_timeouts]);
@@ -1146,17 +1295,21 @@ update_timeout_stats(_) ->
send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) ->
send_chunk(Resp, Reason),
send_chunk(Resp, []);
-
send_chunked_error(Resp, Error) ->
Stack = json_stack(Error),
log_error_with_stack_trace(Error),
{Code, ErrorStr, ReasonStr} = error_info(Error),
- JsonError = {[{<<"code">>, Code},
- {<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr} |
- case Stack of [] -> []; _ -> [{<<"ref">>, stack_hash(Stack)}] end
- ]},
- send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])),
+ JsonError =
+ {[
+ {<<"code">>, Code},
+ {<<"error">>, ErrorStr},
+ {<<"reason">>, ReasonStr}
+ | case Stack of
+ [] -> [];
+ _ -> [{<<"ref">>, stack_hash(Stack)}]
+ end
+ ]},
+ send_chunk(Resp, ?l2b([$\n, ?JSON_ENCODE(JsonError), $\n])),
send_chunk(Resp, []).
send_redirect(Req, Path) ->
@@ -1184,39 +1337,58 @@ json_stack({_Error, _Reason, Stack}) when is_list(Stack) ->
json_stack(_) ->
[].
-json_stack_item({M,F,A}) ->
+json_stack_item({M, F, A}) ->
list_to_binary(io_lib:format("~s:~s/~B", [M, F, json_stack_arity(A)]));
-json_stack_item({M,F,A,L}) ->
+json_stack_item({M, F, A, L}) ->
case proplists:get_value(line, L) of
- undefined -> json_stack_item({M,F,A});
- Line -> list_to_binary(io_lib:format("~s:~s/~B L~B",
- [M, F, json_stack_arity(A), Line]))
+ undefined ->
+ json_stack_item({M, F, A});
+ Line ->
+ list_to_binary(
+ io_lib:format(
+ "~s:~s/~B L~B",
+ [M, F, json_stack_arity(A), Line]
+ )
+ )
end;
json_stack_item(_) ->
<<"bad entry in stacktrace">>.
json_stack_arity(A) ->
- if is_integer(A) -> A; is_list(A) -> length(A); true -> 0 end.
+ if
+ is_integer(A) -> A;
+ is_list(A) -> length(A);
+ true -> 0
+ end.
maybe_decompress(Httpd, Body) ->
case header_value(Httpd, "Content-Encoding", "identity") of
- "gzip" ->
- try
- zlib:gunzip(Body)
- catch error:data_error ->
- throw({bad_request, "Request body is not properly gzipped."})
- end;
- "identity" ->
- Body;
- Else ->
- throw({bad_ctype, [Else, " is not a supported content encoding."]})
+ "gzip" ->
+ try
+ zlib:gunzip(Body)
+ catch
+ error:data_error ->
+ throw({bad_request, "Request body is not properly gzipped."})
+ end;
+ "identity" ->
+ Body;
+ Else ->
+ throw({bad_ctype, [Else, " is not a supported content encoding."]})
end.
log_error_with_stack_trace({bad_request, _, _}) ->
ok;
log_error_with_stack_trace({Error, Reason, Stack}) ->
- EFmt = if is_binary(Error) -> "~s"; true -> "~w" end,
- RFmt = if is_binary(Reason) -> "~s"; true -> "~w" end,
+ EFmt =
+ if
+ is_binary(Error) -> "~s";
+ true -> "~w"
+ end,
+ RFmt =
+ if
+ is_binary(Reason) -> "~s";
+ true -> "~w"
+ end,
Fmt = "req_err(~w) " ++ EFmt ++ " : " ++ RFmt ++ "~n ~p",
couch_log:error(Fmt, [stack_hash(Stack), Error, Reason, Stack]);
log_error_with_stack_trace(_) ->
@@ -1241,9 +1413,10 @@ chunked_response_buffer_size() ->
chttpd_util:get_chttpd_config_integer("chunked_response_buffer", 1490).
basic_headers(Req, Headers0) ->
- Headers = Headers0
- ++ server_header()
- ++ couch_httpd_auth:cookie_auth_header(Req, Headers0),
+ Headers =
+ Headers0 ++
+ server_header() ++
+ couch_httpd_auth:cookie_auth_header(Req, Headers0),
Headers1 = chttpd_cors:headers(Req, Headers),
Headers2 = chttpd_xframe_options:header(Req, Headers1),
Headers3 = [reqid(), timing() | Headers2],
@@ -1292,52 +1465,83 @@ check_url_encoding_pass_test_() ->
check_url_encoding_fail_test_() ->
[
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname/doc_id%")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname/doc_id%?rev=1-abcdefgh")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%2")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname/doc_id%2")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/user%2Fdbname%")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/user%2Fdbname/doc_id%")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("%")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/%")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/%2")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%2%3A")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%%3Ae")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%2g")),
- ?_assertThrow({bad_request, invalid_url_encoding},
- check_url_encoding("/dbname%g2"))
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname%")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname/doc_id%")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname/doc_id%?rev=1-abcdefgh")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname%2")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname/doc_id%2")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/user%2Fdbname%")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/user%2Fdbname/doc_id%")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("%")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/%")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/%2")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname%2%3A")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname%%3Ae")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname%2g")
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_url_encoding},
+ check_url_encoding("/dbname%g2")
+ )
].
log_format_test() ->
?assertEqual(
"127.0.0.1:15984 127.0.0.1 undefined "
"GET /_cluster_setup 201 ok 10000",
- test_log_request("/_cluster_setup", undefined)),
+ test_log_request("/_cluster_setup", undefined)
+ ),
?assertEqual(
"127.0.0.1:15984 127.0.0.1 user_foo "
"GET /_all_dbs 201 ok 10000",
- test_log_request("/_all_dbs", #user_ctx{name = <<"user_foo">>})),
+ test_log_request("/_all_dbs", #user_ctx{name = <<"user_foo">>})
+ ),
%% Utf8Name = unicode:characters_to_binary(Something),
- Utf8User = <<227,130,136,227,129,134,227,129,147,227,129,157>>,
+ Utf8User = <<227, 130, 136, 227, 129, 134, 227, 129, 147, 227, 129, 157>>,
?assertEqual(
"127.0.0.1:15984 127.0.0.1 %E3%82%88%E3%81%86%E3%81%93%E3%81%9D "
"GET /_all_dbs 201 ok 10000",
- test_log_request("/_all_dbs", #user_ctx{name = Utf8User})),
+ test_log_request("/_all_dbs", #user_ctx{name = Utf8User})
+ ),
ok.
test_log_request(RawPath, UserCtx) ->
@@ -1345,14 +1549,14 @@ test_log_request(RawPath, UserCtx) ->
MochiReq = mochiweb_request:new(socket, [], 'POST', RawPath, version, Headers),
Req = #httpd{
mochi_req = MochiReq,
- begin_ts = {1458,588713,124003},
+ begin_ts = {1458, 588713, 124003},
original_method = 'GET',
peer = "127.0.0.1",
nonce = "nonce",
user_ctx = UserCtx
},
Resp = #httpd_resp{
- end_ts = {1458,588723,124303},
+ end_ts = {1458, 588723, 124303},
code = 201,
status = ok
},
@@ -1366,14 +1570,20 @@ test_log_request(RawPath, UserCtx) ->
handle_req_after_auth_test() ->
Headers = mochiweb_headers:make([{"HOST", "127.0.0.1:15984"}]),
- MochiReq = mochiweb_request:new(socket, [], 'PUT', "/newdb", version,
- Headers),
+ MochiReq = mochiweb_request:new(
+ socket,
+ [],
+ 'PUT',
+ "/newdb",
+ version,
+ Headers
+ ),
UserCtx = #user_ctx{name = <<"retain_user">>},
Roles = [<<"_reader">>],
AuthorizedCtx = #user_ctx{name = <<"retain_user">>, roles = Roles},
Req = #httpd{
mochi_req = MochiReq,
- begin_ts = {1458,588713,124003},
+ begin_ts = {1458, 588713, 124003},
original_method = 'PUT',
peer = "127.0.0.1",
nonce = "nonce",
@@ -1383,18 +1593,22 @@ handle_req_after_auth_test() ->
ok = meck:new(chttpd_handlers, [passthrough]),
ok = meck:new(chttpd_auth, [passthrough]),
ok = meck:expect(chttpd_handlers, url_handler, fun(_Key, _Fun) ->
- fun(_Req) -> handled_authorized_req end
+ fun(_Req) -> handled_authorized_req end
end),
ok = meck:expect(chttpd_auth, authorize, fun(_Req, _Fun) ->
AuthorizedReq
end),
- ?assertEqual({AuthorizedReq, handled_authorized_req},
- handle_req_after_auth(foo_key, Req)),
+ ?assertEqual(
+ {AuthorizedReq, handled_authorized_req},
+ handle_req_after_auth(foo_key, Req)
+ ),
ok = meck:expect(chttpd_auth, authorize, fun(_Req, _Fun) ->
meck:exception(throw, {http_abort, resp, some_reason})
end),
- ?assertEqual({Req, {aborted, resp, some_reason}},
- handle_req_after_auth(foo_key, Req)),
+ ?assertEqual(
+ {Req, {aborted, resp, some_reason}},
+ handle_req_after_auth(foo_key, Req)
+ ),
ok = meck:unload(chttpd_handlers),
ok = meck:unload(chttpd_auth).
diff --git a/src/chttpd/src/chttpd_auth.erl b/src/chttpd/src/chttpd_auth.erl
index ffae78171..20b5a05f1 100644
--- a/src/chttpd/src/chttpd_auth.erl
+++ b/src/chttpd/src/chttpd_auth.erl
@@ -27,7 +27,6 @@
-define(SERVICE_ID, chttpd_auth).
-
%% ------------------------------------------------------------------
%% API Function Definitions
%% ------------------------------------------------------------------
@@ -38,7 +37,6 @@ authenticate(HttpReq, Default) ->
authorize(HttpReq, Default) ->
maybe_handle(authorize, [HttpReq], Default).
-
%% ------------------------------------------------------------------
%% Default callbacks
%% ------------------------------------------------------------------
@@ -55,17 +53,20 @@ proxy_authentication_handler(Req) ->
jwt_authentication_handler(Req) ->
couch_httpd_auth:jwt_authentication_handler(Req).
-party_mode_handler(#httpd{method='POST', path_parts=[<<"_session">>]} = Req) ->
+party_mode_handler(#httpd{method = 'POST', path_parts = [<<"_session">>]} = Req) ->
% See #1947 - users should always be able to attempt a login
- Req#httpd{user_ctx=#user_ctx{}};
-party_mode_handler(#httpd{path_parts=[<<"_up">>]} = Req) ->
+ Req#httpd{user_ctx = #user_ctx{}};
+party_mode_handler(#httpd{path_parts = [<<"_up">>]} = Req) ->
RequireValidUser = config:get_boolean("chttpd", "require_valid_user", false),
- RequireValidUserExceptUp = config:get_boolean("chttpd", "require_valid_user_except_for_up", false),
+ RequireValidUserExceptUp = config:get_boolean(
+ "chttpd", "require_valid_user_except_for_up", false
+ ),
require_valid_user(Req, RequireValidUser andalso not RequireValidUserExceptUp);
-
party_mode_handler(Req) ->
RequireValidUser = config:get_boolean("chttpd", "require_valid_user", false),
- RequireValidUserExceptUp = config:get_boolean("chttpd", "require_valid_user_except_for_up", false),
+ RequireValidUserExceptUp = config:get_boolean(
+ "chttpd", "require_valid_user_except_for_up", false
+ ),
require_valid_user(Req, RequireValidUser orelse RequireValidUserExceptUp).
require_valid_user(_Req, true) ->
@@ -75,13 +76,12 @@ require_valid_user(Req, false) ->
[] ->
Req#httpd{user_ctx = ?ADMIN_USER};
_ ->
- Req#httpd{user_ctx=#user_ctx{}}
+ Req#httpd{user_ctx = #user_ctx{}}
end.
handle_session_req(Req) ->
couch_httpd_auth:handle_session_req(Req, chttpd_auth_cache).
-
%% ------------------------------------------------------------------
%% Internal Function Definitions
%% ------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd_auth_cache.erl b/src/chttpd/src/chttpd_auth_cache.erl
index 17a31bf55..2173eca95 100644
--- a/src/chttpd/src/chttpd_auth_cache.erl
+++ b/src/chttpd/src/chttpd_auth_cache.erl
@@ -14,8 +14,14 @@
-behaviour(gen_server).
-export([start_link/0, get_user_creds/2, update_user_creds/3, dbname/0]).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
-export([listen_for_changes/1, changes_callback/2]).
-include_lib("couch/include/couch_db.hrl").
@@ -25,7 +31,7 @@
-record(state, {
changes_pid,
- last_seq="0"
+ last_seq = "0"
}).
%% public functions
@@ -36,18 +42,21 @@ start_link() ->
get_user_creds(Req, UserName) when is_list(UserName) ->
get_user_creds(Req, ?l2b(UserName));
get_user_creds(_Req, UserName) when is_binary(UserName) ->
- Resp = case couch_auth_cache:get_admin(UserName) of
- nil ->
- get_from_cache(UserName);
- Props ->
- case get_from_cache(UserName) of
- nil ->
- Props;
- UserProps when is_list(UserProps) ->
- couch_auth_cache:add_roles(Props,
- couch_util:get_value(<<"roles">>, UserProps))
- end
- end,
+ Resp =
+ case couch_auth_cache:get_admin(UserName) of
+ nil ->
+ get_from_cache(UserName);
+ Props ->
+ case get_from_cache(UserName) of
+ nil ->
+ Props;
+ UserProps when is_list(UserProps) ->
+ couch_auth_cache:add_roles(
+ Props,
+ couch_util:get_value(<<"roles">>, UserProps)
+ )
+ end
+ end,
maybe_validate_user_creds(Resp).
update_user_creds(_Req, UserDoc, _Ctx) ->
@@ -109,19 +118,23 @@ handle_call(_Call, _From, State) ->
handle_cast(_Msg, State) ->
{noreply, State}.
-handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid=Pid} = State) ->
- Seq = case Reason of
- {seq, EndSeq} ->
- EndSeq;
- {database_does_not_exist, _} ->
- couch_log:notice("~p changes listener died because the _users database does not exist. Create the database to silence this notice.", [?MODULE]),
- 0;
- _ ->
- couch_log:notice("~p changes listener died ~r", [?MODULE, Reason]),
- 0
- end,
+handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid = Pid} = State) ->
+ Seq =
+ case Reason of
+ {seq, EndSeq} ->
+ EndSeq;
+ {database_does_not_exist, _} ->
+ couch_log:notice(
+ "~p changes listener died because the _users database does not exist. Create the database to silence this notice.",
+ [?MODULE]
+ ),
+ 0;
+ _ ->
+ couch_log:notice("~p changes listener died ~r", [?MODULE, Reason]),
+ 0
+ end,
erlang:send_after(5000, self(), {start_listener, Seq}),
- {noreply, State#state{last_seq=Seq}};
+ {noreply, State#state{last_seq = Seq}};
handle_info({start_listener, Seq}, State) ->
{noreply, State#state{changes_pid = spawn_changes(Seq)}};
handle_info(_Msg, State) ->
@@ -132,7 +145,7 @@ terminate(_Reason, #state{changes_pid = Pid}) when is_pid(Pid) ->
terminate(_Reason, _State) ->
ok.
-code_change(_OldVsn, #state{}=State, _Extra) ->
+code_change(_OldVsn, #state{} = State, _Extra) ->
{ok, State}.
%% private functions
@@ -175,14 +188,15 @@ changes_callback({error, _}, EndSeq) ->
load_user_from_db(UserName) ->
try fabric:open_doc(dbname(), docid(UserName), [?ADMIN_CTX, ejson_body, conflicts]) of
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- Props;
- _Else ->
- couch_log:debug("no record of user ~s", [UserName]),
- nil
- catch error:database_does_not_exist ->
- nil
+ {ok, Doc} ->
+ {Props} = couch_doc:to_json_obj(Doc, []),
+ Props;
+ _Else ->
+ couch_log:debug("no record of user ~s", [UserName]),
+ nil
+ catch
+ error:database_does_not_exist ->
+ nil
end.
dbname() ->
@@ -196,23 +210,30 @@ username(<<"org.couchdb.user:", UserName/binary>>) ->
ensure_auth_ddoc_exists(DbName, DDocId) ->
case fabric:open_doc(DbName, DDocId, [?ADMIN_CTX, ejson_body]) of
- {not_found, _Reason} ->
- {ok, AuthDesign} = couch_auth_cache:auth_design_doc(DDocId),
- update_doc_ignoring_conflict(DbName, AuthDesign, [?ADMIN_CTX]);
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- case couch_util:get_value(<<"validate_doc_update">>, Props, []) of
- ?AUTH_DB_DOC_VALIDATE_FUNCTION ->
- ok;
- _ ->
- Props1 = lists:keyreplace(<<"validate_doc_update">>, 1, Props,
- {<<"validate_doc_update">>,
- ?AUTH_DB_DOC_VALIDATE_FUNCTION}),
- update_doc_ignoring_conflict(DbName, couch_doc:from_json_obj({Props1}), [?ADMIN_CTX])
- end;
- {error, Reason} ->
- couch_log:notice("Failed to ensure auth ddoc ~s/~s exists for reason: ~p", [DbName, DDocId, Reason]),
- ok
+ {not_found, _Reason} ->
+ {ok, AuthDesign} = couch_auth_cache:auth_design_doc(DDocId),
+ update_doc_ignoring_conflict(DbName, AuthDesign, [?ADMIN_CTX]);
+ {ok, Doc} ->
+ {Props} = couch_doc:to_json_obj(Doc, []),
+ case couch_util:get_value(<<"validate_doc_update">>, Props, []) of
+ ?AUTH_DB_DOC_VALIDATE_FUNCTION ->
+ ok;
+ _ ->
+ Props1 = lists:keyreplace(
+ <<"validate_doc_update">>,
+ 1,
+ Props,
+ {<<"validate_doc_update">>, ?AUTH_DB_DOC_VALIDATE_FUNCTION}
+ ),
+ update_doc_ignoring_conflict(DbName, couch_doc:from_json_obj({Props1}), [
+ ?ADMIN_CTX
+ ])
+ end;
+ {error, Reason} ->
+ couch_log:notice("Failed to ensure auth ddoc ~s/~s exists for reason: ~p", [
+ DbName, DDocId, Reason
+ ]),
+ ok
end,
ok.
@@ -229,15 +250,18 @@ maybe_validate_user_creds(nil) ->
% throws if UserCreds includes a _conflicts member
% returns UserCreds otherwise
maybe_validate_user_creds(UserCreds) ->
- AllowConflictedUserDocs = config:get_boolean("chttpd_auth", "allow_conflicted_user_docs", false),
+ AllowConflictedUserDocs = config:get_boolean(
+ "chttpd_auth", "allow_conflicted_user_docs", false
+ ),
case {couch_util:get_value(<<"_conflicts">>, UserCreds), AllowConflictedUserDocs} of
{undefined, _} ->
{ok, UserCreds, nil};
{_, true} ->
{ok, UserCreds, nil};
{_ConflictList, false} ->
- throw({unauthorized,
- <<"User document conflicts must be resolved before the document",
- " is used for authentication purposes.">>
- })
+ throw(
+ {unauthorized,
+ <<"User document conflicts must be resolved before the document",
+ " is used for authentication purposes.">>}
+ )
end.
diff --git a/src/chttpd/src/chttpd_auth_request.erl b/src/chttpd/src/chttpd_auth_request.erl
index f36203efe..301cf8e7d 100644
--- a/src/chttpd/src/chttpd_auth_request.erl
+++ b/src/chttpd/src/chttpd_auth_request.erl
@@ -14,106 +14,105 @@
-export([authorize_request/1]).
-include_lib("couch/include/couch_db.hrl").
-authorize_request(#httpd{auth=Auth, user_ctx=Ctx} = Req) ->
+authorize_request(#httpd{auth = Auth, user_ctx = Ctx} = Req) ->
try
- authorize_request_int(Req)
+ authorize_request_int(Req)
catch
- throw:{forbidden, Msg} ->
- case {Auth, Ctx} of
- {{cookie_auth_failed, {Error, Reason}}, _} ->
- throw({forbidden, {Error, Reason}});
- {_, #user_ctx{name=null}} ->
- throw({unauthorized, Msg});
- {_, _} ->
- throw({forbidden, Msg})
- end
+ throw:{forbidden, Msg} ->
+ case {Auth, Ctx} of
+ {{cookie_auth_failed, {Error, Reason}}, _} ->
+ throw({forbidden, {Error, Reason}});
+ {_, #user_ctx{name = null}} ->
+ throw({unauthorized, Msg});
+ {_, _} ->
+ throw({forbidden, Msg})
+ end
end.
-authorize_request_int(#httpd{path_parts=[]}=Req) ->
+authorize_request_int(#httpd{path_parts = []} = Req) ->
Req;
-authorize_request_int(#httpd{path_parts=[<<"favicon.ico">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"favicon.ico">> | _]} = Req) ->
Req;
-authorize_request_int(#httpd{path_parts=[<<"_all_dbs">>|_]}=Req) ->
- case config:get_boolean("chttpd", "admin_only_all_dbs", true) of
- true -> require_admin(Req);
- false -> Req
- end;
-authorize_request_int(#httpd{path_parts=[<<"_dbs_info">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_all_dbs">> | _]} = Req) ->
+ case config:get_boolean("chttpd", "admin_only_all_dbs", true) of
+ true -> require_admin(Req);
+ false -> Req
+ end;
+authorize_request_int(#httpd{path_parts = [<<"_dbs_info">> | _]} = Req) ->
Req;
-authorize_request_int(#httpd{path_parts=[<<"_replicator">>], method='PUT'}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_replicator">>], method = 'PUT'} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_replicator">>], method='DELETE'}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_replicator">>], method = 'DELETE'} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_replicator">>,<<"_all_docs">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_replicator">>, <<"_all_docs">> | _]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_replicator">>,<<"_changes">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_replicator">>, <<"_changes">> | _]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_replicator">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_replicator">> | _]} = Req) ->
db_authorization_check(Req);
-authorize_request_int(#httpd{path_parts=[<<"_reshard">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_reshard">> | _]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_users">>], method='PUT'}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_users">>], method = 'PUT'} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_users">>], method='DELETE'}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_users">>], method = 'DELETE'} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_users">>,<<"_all_docs">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_users">>, <<"_all_docs">> | _]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_users">>,<<"_changes">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_users">>, <<"_changes">> | _]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[<<"_users">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_users">> | _]} = Req) ->
db_authorization_check(Req);
-authorize_request_int(#httpd{path_parts=[<<"_", _/binary>>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [<<"_", _/binary>> | _]} = Req) ->
server_authorization_check(Req);
-authorize_request_int(#httpd{path_parts=[_DbName], method='PUT'}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName], method = 'PUT'} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[_DbName], method='DELETE'}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName], method = 'DELETE'} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[_DbName, <<"_compact">>|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName, <<"_compact">> | _]} = Req) ->
require_db_admin(Req);
-authorize_request_int(#httpd{path_parts=[_DbName, <<"_view_cleanup">>]}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName, <<"_view_cleanup">>]} = Req) ->
require_db_admin(Req);
-authorize_request_int(#httpd{path_parts=[_DbName, <<"_sync_shards">>]}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName, <<"_sync_shards">>]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[_DbName, <<"_purge">>]}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName, <<"_purge">>]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[_DbName, <<"_purged_infos_limit">>]}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName, <<"_purged_infos_limit">>]} = Req) ->
require_admin(Req);
-authorize_request_int(#httpd{path_parts=[_DbName|_]}=Req) ->
+authorize_request_int(#httpd{path_parts = [_DbName | _]} = Req) ->
db_authorization_check(Req).
-
-server_authorization_check(#httpd{path_parts=[<<"_up">>]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_up">>]} = Req) ->
Req;
-server_authorization_check(#httpd{path_parts=[<<"_uuids">>]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_uuids">>]} = Req) ->
Req;
-server_authorization_check(#httpd{path_parts=[<<"_session">>]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_session">>]} = Req) ->
Req;
-server_authorization_check(#httpd{path_parts=[<<"_replicate">>]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_replicate">>]} = Req) ->
Req;
-server_authorization_check(#httpd{path_parts=[<<"_stats">>]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_stats">>]} = Req) ->
Req;
-server_authorization_check(#httpd{path_parts=[<<"_active_tasks">>]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_active_tasks">>]} = Req) ->
Req;
-server_authorization_check(#httpd{path_parts=[<<"_dbs_info">>]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_dbs_info">>]} = Req) ->
Req;
-server_authorization_check(#httpd{method=Method, path_parts=[<<"_utils">>|_]}=Req)
- when Method =:= 'HEAD' orelse Method =:= 'GET' ->
+server_authorization_check(#httpd{method = Method, path_parts = [<<"_utils">> | _]} = Req) when
+ Method =:= 'HEAD' orelse Method =:= 'GET'
+->
Req;
-server_authorization_check(#httpd{path_parts=[<<"_node">>,_ , <<"_stats">>|_]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_node">>, _, <<"_stats">> | _]} = Req) ->
require_metrics(Req);
-server_authorization_check(#httpd{path_parts=[<<"_node">>,_ , <<"_system">>|_]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_node">>, _, <<"_system">> | _]} = Req) ->
require_metrics(Req);
-server_authorization_check(#httpd{path_parts=[<<"_node">>,_ , <<"_prometheus">>|_]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_node">>, _, <<"_prometheus">> | _]} = Req) ->
require_metrics(Req);
-server_authorization_check(#httpd{path_parts=[<<"_", _/binary>>|_]}=Req) ->
+server_authorization_check(#httpd{path_parts = [<<"_", _/binary>> | _]} = Req) ->
require_admin(Req).
-db_authorization_check(#httpd{path_parts=[DbName|_],user_ctx=Ctx}=Req) ->
+db_authorization_check(#httpd{path_parts = [DbName | _], user_ctx = Ctx} = Req) ->
{_} = fabric:get_security(DbName, [{user_ctx, Ctx}]),
Req.
-
-require_metrics(#httpd{user_ctx=#user_ctx{roles=UserRoles}}=Req) ->
+require_metrics(#httpd{user_ctx = #user_ctx{roles = UserRoles}} = Req) ->
IsAdmin = lists:member(<<"_admin">>, UserRoles),
IsMetrics = lists:member(<<"_metrics">>, UserRoles),
case {IsAdmin, IsMetrics} of
@@ -126,15 +125,15 @@ require_admin(Req) ->
ok = couch_httpd:verify_is_server_admin(Req),
Req.
-require_db_admin(#httpd{path_parts=[DbName|_],user_ctx=Ctx}=Req) ->
+require_db_admin(#httpd{path_parts = [DbName | _], user_ctx = Ctx} = Req) ->
Sec = fabric:get_security(DbName, [{user_ctx, Ctx}]),
- case is_db_admin(Ctx,Sec) of
+ case is_db_admin(Ctx, Sec) of
true -> Req;
- false -> throw({unauthorized, <<"You are not a server or db admin.">>})
+ false -> throw({unauthorized, <<"You are not a server or db admin.">>})
end.
-is_db_admin(#user_ctx{name=UserName,roles=UserRoles}, {Security}) ->
+is_db_admin(#user_ctx{name = UserName, roles = UserRoles}, {Security}) ->
{Admins} = couch_util:get_value(<<"admins">>, Security, {[]}),
Names = couch_util:get_value(<<"names">>, Admins, []),
Roles = couch_util:get_value(<<"roles">>, Admins, []),
diff --git a/src/chttpd/src/chttpd_cors.erl b/src/chttpd/src/chttpd_cors.erl
index 62b19a0d2..70d3163ec 100644
--- a/src/chttpd/src/chttpd_cors.erl
+++ b/src/chttpd/src/chttpd_cors.erl
@@ -12,7 +12,6 @@
-module(chttpd_cors).
-
-export([
maybe_handle_preflight_request/1,
maybe_handle_preflight_request/2,
@@ -24,14 +23,12 @@
get_cors_config/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("chttpd/include/chttpd_cors.hrl").
-
%% http://www.w3.org/TR/cors/#resource-preflight-requests
-maybe_handle_preflight_request(#httpd{method=Method}) when Method /= 'OPTIONS' ->
+maybe_handle_preflight_request(#httpd{method = Method}) when Method /= 'OPTIONS' ->
not_preflight;
maybe_handle_preflight_request(Req) ->
case maybe_handle_preflight_request(Req, get_cors_config(Req)) of
@@ -41,8 +38,7 @@ maybe_handle_preflight_request(Req) ->
chttpd:send_response_no_cors(Req, 204, PreflightHeaders, <<>>)
end.
-
-maybe_handle_preflight_request(#httpd{}=Req, Config) ->
+maybe_handle_preflight_request(#httpd{} = Req, Config) ->
case is_cors_enabled(Config) of
true ->
case preflight_request(Req, Config) of
@@ -61,7 +57,6 @@ maybe_handle_preflight_request(#httpd{}=Req, Config) ->
not_preflight
end.
-
preflight_request(Req, Config) ->
case get_origin(Req) of
undefined ->
@@ -96,70 +91,85 @@ preflight_request(Req, Config) ->
end
end.
-
handle_preflight_request(Req, Config, Origin) ->
case chttpd:header_value(Req, "Access-Control-Request-Method") of
- undefined ->
- %% If there is no Access-Control-Request-Method header
- %% or if parsing failed, do not set any additional headers
- %% and terminate this set of steps. The request is outside
- %% the scope of this specification.
- %% http://www.w3.org/TR/cors/#resource-preflight-requests
- not_preflight;
- Method ->
- SupportedMethods = get_origin_config(Config, Origin,
- <<"allow_methods">>, ?SUPPORTED_METHODS),
-
- SupportedHeaders = get_origin_config(Config, Origin,
- <<"allow_headers">>, ?SUPPORTED_HEADERS),
-
-
- %% get max age
- MaxAge = couch_util:get_value(<<"max_age">>, Config,
- ?CORS_DEFAULT_MAX_AGE),
-
- PreflightHeaders0 = maybe_add_credentials(Config, Origin, [
- {"Access-Control-Allow-Origin", binary_to_list(Origin)},
- {"Access-Control-Max-Age", MaxAge},
- {"Access-Control-Allow-Methods",
- string:join(SupportedMethods, ", ")}]),
-
- case lists:member(Method, SupportedMethods) of
- true ->
- %% method ok , check headers
- AccessHeaders = chttpd:header_value(Req,
- "Access-Control-Request-Headers"),
- {FinalReqHeaders, ReqHeaders} = case AccessHeaders of
- undefined -> {"", []};
- "" -> {"", []};
- Headers ->
- %% transform header list in something we
- %% could check. make sure everything is a
- %% list
- RH = [to_lower(H)
- || H <- split_headers(Headers)],
- {Headers, RH}
- end,
- %% check if headers are supported
- case ReqHeaders -- SupportedHeaders of
- [] ->
- PreflightHeaders = PreflightHeaders0 ++
- [{"Access-Control-Allow-Headers",
- FinalReqHeaders}],
- {ok, PreflightHeaders};
- _ ->
- not_preflight
- end;
- false ->
- %% If method is not a case-sensitive match for any of
- %% the values in list of methods do not set any additional
- %% headers and terminate this set of steps.
+ undefined ->
+ %% If there is no Access-Control-Request-Method header
+ %% or if parsing failed, do not set any additional headers
+ %% and terminate this set of steps. The request is outside
+ %% the scope of this specification.
%% http://www.w3.org/TR/cors/#resource-preflight-requests
- not_preflight
- end
+ not_preflight;
+ Method ->
+ SupportedMethods = get_origin_config(
+ Config,
+ Origin,
+ <<"allow_methods">>,
+ ?SUPPORTED_METHODS
+ ),
+
+ SupportedHeaders = get_origin_config(
+ Config,
+ Origin,
+ <<"allow_headers">>,
+ ?SUPPORTED_HEADERS
+ ),
+
+ %% get max age
+ MaxAge = couch_util:get_value(
+ <<"max_age">>,
+ Config,
+ ?CORS_DEFAULT_MAX_AGE
+ ),
+
+ PreflightHeaders0 = maybe_add_credentials(Config, Origin, [
+ {"Access-Control-Allow-Origin", binary_to_list(Origin)},
+ {"Access-Control-Max-Age", MaxAge},
+ {"Access-Control-Allow-Methods", string:join(SupportedMethods, ", ")}
+ ]),
+
+ case lists:member(Method, SupportedMethods) of
+ true ->
+ %% method ok , check headers
+ AccessHeaders = chttpd:header_value(
+ Req,
+ "Access-Control-Request-Headers"
+ ),
+ {FinalReqHeaders, ReqHeaders} =
+ case AccessHeaders of
+ undefined ->
+ {"", []};
+ "" ->
+ {"", []};
+ Headers ->
+ %% transform header list in something we
+ %% could check. make sure everything is a
+ %% list
+ RH = [
+ to_lower(H)
+ || H <- split_headers(Headers)
+ ],
+ {Headers, RH}
+ end,
+ %% check if headers are supported
+ case ReqHeaders -- SupportedHeaders of
+ [] ->
+ PreflightHeaders =
+ PreflightHeaders0 ++
+ [{"Access-Control-Allow-Headers", FinalReqHeaders}],
+ {ok, PreflightHeaders};
+ _ ->
+ not_preflight
+ end;
+ false ->
+ %% If method is not a case-sensitive match for any of
+ %% the values in list of methods do not set any additional
+ %% headers and terminate this set of steps.
+ %% http://www.w3.org/TR/cors/#resource-preflight-requests
+ not_preflight
+ end
end.
-
headers(Req, RequestHeaders) ->
case get_origin(Req) of
undefined ->
@@ -172,7 +182,6 @@ headers(Req, RequestHeaders) ->
headers(Req, RequestHeaders, Origin, get_cors_config(Req))
end.
-
headers(_Req, RequestHeaders, undefined, _Config) ->
RequestHeaders;
headers(Req, RequestHeaders, Origin, Config) when is_list(Origin) ->
@@ -183,13 +192,13 @@ headers(Req, RequestHeaders, Origin, Config) ->
AcceptedOrigins = get_accepted_origins(Req, Config),
CorsHeaders = handle_headers(Config, Origin, AcceptedOrigins),
ExposedCouchHeaders = couch_util:get_value(
- <<"exposed_headers">>, Config, ?COUCH_HEADERS),
+ <<"exposed_headers">>, Config, ?COUCH_HEADERS
+ ),
maybe_apply_headers(CorsHeaders, RequestHeaders, ExposedCouchHeaders);
false ->
RequestHeaders
end.
-
maybe_apply_headers([], RequestHeaders, _ExposedCouchHeaders) ->
RequestHeaders;
maybe_apply_headers(CorsHeaders, RequestHeaders, ExposedCouchHeaders) ->
@@ -200,67 +209,64 @@ maybe_apply_headers(CorsHeaders, RequestHeaders, ExposedCouchHeaders) ->
%% need to be exposed.
%% return: RequestHeaders ++ CorsHeaders ++ ACEH
- ExposedHeaders0 = simple_headers([K || {K,_V} <- RequestHeaders]),
+ ExposedHeaders0 = simple_headers([K || {K, _V} <- RequestHeaders]),
%% If Content-Type is not in ExposedHeaders, and the Content-Type
%% is not a member of ?SIMPLE_CONTENT_TYPE_VALUES, then add it
%% into the list of ExposedHeaders
ContentType = proplists:get_value("content-type", ExposedHeaders0),
- IncludeContentType = case ContentType of
- undefined ->
- false;
- _ ->
- lists:member(string:to_lower(ContentType), ?SIMPLE_CONTENT_TYPE_VALUES)
+ IncludeContentType =
+ case ContentType of
+ undefined ->
+ false;
+ _ ->
+ lists:member(string:to_lower(ContentType), ?SIMPLE_CONTENT_TYPE_VALUES)
end,
- ExposedHeaders = case IncludeContentType of
- false ->
- ["content-type" | lists:delete("content-type", ExposedHeaders0)];
- true ->
- ExposedHeaders0
+ ExposedHeaders =
+ case IncludeContentType of
+ false ->
+ ["content-type" | lists:delete("content-type", ExposedHeaders0)];
+ true ->
+ ExposedHeaders0
end,
%% ExposedCouchHeaders may get added later, so expose them by default
- ACEH = [{"Access-Control-Expose-Headers",
- string:join(ExposedHeaders ++ ExposedCouchHeaders, ", ")}],
+ ACEH = [
+ {"Access-Control-Expose-Headers", string:join(ExposedHeaders ++ ExposedCouchHeaders, ", ")}
+ ],
CorsHeaders ++ RequestHeaders ++ ACEH.
-
simple_headers(Headers) ->
LCHeaders = [to_lower(H) || H <- Headers],
lists:filter(fun(H) -> lists:member(H, ?SIMPLE_HEADERS) end, LCHeaders).
-
to_lower(String) when is_binary(String) ->
to_lower(?b2l(String));
to_lower(String) ->
string:to_lower(String).
-
handle_headers(_Config, _Origin, []) ->
[];
handle_headers(Config, Origin, AcceptedOrigins) ->
AcceptAll = lists:member(<<"*">>, AcceptedOrigins),
case AcceptAll orelse lists:member(Origin, AcceptedOrigins) of
- true ->
- make_cors_header(Config, Origin);
- false ->
- %% If the value of the Origin header is not a
- %% case-sensitive match for any of the values
- %% in list of origins, do not set any additional
- %% headers and terminate this set of steps.
- %% http://www.w3.org/TR/cors/#resource-requests
- []
+ true ->
+ make_cors_header(Config, Origin);
+ false ->
+ %% If the value of the Origin header is not a
+ %% case-sensitive match for any of the values
+ %% in list of origins, do not set any additional
+ %% headers and terminate this set of steps.
+ %% http://www.w3.org/TR/cors/#resource-requests
+ []
end.
-
make_cors_header(Config, Origin) ->
Headers = [{"Access-Control-Allow-Origin", binary_to_list(Origin)}],
maybe_add_credentials(Config, Origin, Headers).
-
%% util
-
maybe_add_credentials(Config, Origin, Headers) ->
case allow_credentials(Config, Origin) of
false ->
@@ -269,13 +275,15 @@ maybe_add_credentials(Config, Origin, Headers) ->
Headers ++ [{"Access-Control-Allow-Credentials", "true"}]
end.
-
allow_credentials(_Config, <<"*">>) ->
false;
allow_credentials(Config, Origin) ->
- get_origin_config(Config, Origin, <<"allow_credentials">>,
- ?CORS_DEFAULT_ALLOW_CREDENTIALS).
-
+ get_origin_config(
+ Config,
+ Origin,
+ <<"allow_credentials">>,
+ ?CORS_DEFAULT_ALLOW_CREDENTIALS
+ ).
get_cors_config(#httpd{cors_config = undefined, mochi_req = MochiReq}) ->
Host = couch_httpd_vhost:host(MochiReq),
@@ -283,24 +291,27 @@ get_cors_config(#httpd{cors_config = undefined, mochi_req = MochiReq}) ->
EnableCors = chttpd_util:get_chttpd_config_boolean("enable_cors", false),
AllowCredentials = cors_config(Host, "credentials", "false") =:= "true",
- AllowHeaders = case cors_config(Host, "headers", undefined) of
- undefined ->
- ?SUPPORTED_HEADERS;
- AllowHeaders0 ->
- [to_lower(H) || H <- split_list(AllowHeaders0)]
- end,
- AllowMethods = case cors_config(Host, "methods", undefined) of
- undefined ->
- ?SUPPORTED_METHODS;
- AllowMethods0 ->
- split_list(AllowMethods0)
- end,
- ExposedHeaders = case cors_config(Host, "exposed_headers", undefined) of
- undefined ->
- ?COUCH_HEADERS;
- ExposedHeaders0 ->
- [to_lower(H) || H <- split_list(ExposedHeaders0)]
- end,
+ AllowHeaders =
+ case cors_config(Host, "headers", undefined) of
+ undefined ->
+ ?SUPPORTED_HEADERS;
+ AllowHeaders0 ->
+ [to_lower(H) || H <- split_list(AllowHeaders0)]
+ end,
+ AllowMethods =
+ case cors_config(Host, "methods", undefined) of
+ undefined ->
+ ?SUPPORTED_METHODS;
+ AllowMethods0 ->
+ split_list(AllowMethods0)
+ end,
+ ExposedHeaders =
+ case cors_config(Host, "exposed_headers", undefined) of
+ undefined ->
+ ?COUCH_HEADERS;
+ ExposedHeaders0 ->
+ [to_lower(H) || H <- split_list(ExposedHeaders0)]
+ end,
MaxAge = cors_config(Host, "max_age", ?CORS_DEFAULT_MAX_AGE),
Origins0 = binary_split_list(cors_config(Host, "origins", [])),
Origins = [{O, {[]}} || O <- Origins0],
@@ -316,25 +327,24 @@ get_cors_config(#httpd{cors_config = undefined, mochi_req = MochiReq}) ->
get_cors_config(#httpd{cors_config = Config}) ->
Config.
-
cors_config(Host, Key, Default) ->
- config:get(cors_section(Host), Key,
- config:get("cors", Key, Default)).
-
+ config:get(
+ cors_section(Host),
+ Key,
+ config:get("cors", Key, Default)
+ ).
cors_section(HostValue) ->
HostPort = maybe_strip_scheme(HostValue),
Host = hd(string:tokens(HostPort, ":")),
"cors:" ++ Host.
-
maybe_strip_scheme(Host) ->
case string:str(Host, "://") of
0 -> Host;
N -> string:substr(Host, N + 3)
end.
-
is_cors_enabled(Config) ->
case get(disable_couch_httpd_cors) of
undefined ->
@@ -344,7 +354,6 @@ is_cors_enabled(Config) ->
end,
couch_util:get_value(<<"enable_cors">>, Config, false).
-
%% Get a list of {Origin, OriginConfig} tuples
%% ie: get_origin_configs(Config) ->
%% [
@@ -362,7 +371,6 @@ get_origin_configs(Config) ->
{Origins} = couch_util:get_value(<<"origins">>, Config, {[]}),
Origins.
-
%% Get config for an individual Origin
%% ie: get_origin_config(Config, <<"http://foo.com">>) ->
%% [
@@ -374,15 +382,16 @@ get_origin_config(Config, Origin) ->
{OriginConfig} = couch_util:get_value(Origin, OriginConfigs, {[]}),
OriginConfig.
-
%% Get config of a single key for an individual Origin
%% ie: get_origin_config(Config, <<"http://foo.com">>, <<"allow_methods">>, [])
%% [<<"POST">>]
get_origin_config(Config, Origin, Key, Default) ->
OriginConfig = get_origin_config(Config, Origin),
- couch_util:get_value(Key, OriginConfig,
- couch_util:get_value(Key, Config, Default)).
-
+ couch_util:get_value(
+ Key,
+ OriginConfig,
+ couch_util:get_value(Key, Config, Default)
+ ).
get_origin(Req) ->
case chttpd:header_value(Req, "Origin") of
@@ -392,18 +401,14 @@ get_origin(Req) ->
?l2b(Origin)
end.
-
get_accepted_origins(_Req, Config) ->
- lists:map(fun({K,_V}) -> K end, get_origin_configs(Config)).
-
+ lists:map(fun({K, _V}) -> K end, get_origin_configs(Config)).
split_list(S) ->
re:split(S, "\\s*,\\s*", [trim, {return, list}]).
-
binary_split_list(S) ->
[list_to_binary(E) || E <- split_list(S)].
-
split_headers(H) ->
- re:split(H, ",\\s*", [{return,list}, trim]).
+ re:split(H, ",\\s*", [{return, list}, trim]).
diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl
index 8d45eb779..875df6e00 100644
--- a/src/chttpd/src/chttpd_db.erl
+++ b/src/chttpd/src/chttpd_db.erl
@@ -18,18 +18,35 @@
-include_lib("couch_mrview/include/couch_mrview.hrl").
-include_lib("mem3/include/mem3.hrl").
--export([handle_request/1, handle_compact_req/2, handle_design_req/2,
- db_req/2, couch_doc_open/4,handle_changes_req/2,
+-export([
+ handle_request/1,
+ handle_compact_req/2,
+ handle_design_req/2,
+ db_req/2,
+ couch_doc_open/4,
+ handle_changes_req/2,
update_doc_result_to_json/1, update_doc_result_to_json/2,
- handle_design_info_req/3, handle_view_cleanup_req/2,
- update_doc/4, http_code_from_status/1,
- handle_partition_req/2]).
-
--import(chttpd,
- [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
- start_json_response/2,send_chunk/2,end_json_response/1,
- start_chunked_response/3, absolute_uri/2, send/2,
- start_response_length/4]).
+ handle_design_info_req/3,
+ handle_view_cleanup_req/2,
+ update_doc/4,
+ http_code_from_status/1,
+ handle_partition_req/2
+]).
+
+-import(
+ chttpd,
+ [
+ send_json/2, send_json/3, send_json/4,
+ send_method_not_allowed/2,
+ start_json_response/2,
+ send_chunk/2,
+ end_json_response/1,
+ start_chunked_response/3,
+ absolute_uri/2,
+ send/2,
+ start_response_length/4
+ ]
+).
-record(doc_query_args, {
options = [],
@@ -52,38 +69,44 @@
threshold
}).
--define(IS_ALL_DOCS(T), (
- T == <<"_all_docs">>
- orelse T == <<"_local_docs">>
- orelse T == <<"_design_docs">>)).
+-define(IS_ALL_DOCS(T),
+ (T == <<"_all_docs">> orelse
+ T == <<"_local_docs">> orelse
+ T == <<"_design_docs">>)
+).
--define(IS_MANGO(T), (
- T == <<"_index">>
- orelse T == <<"_find">>
- orelse T == <<"_explain">>)).
+-define(IS_MANGO(T),
+ (T == <<"_index">> orelse
+ T == <<"_find">> orelse
+ T == <<"_explain">>)
+).
% Database request handlers
-handle_request(#httpd{path_parts=[DbName|RestParts],method=Method}=Req)->
+handle_request(#httpd{path_parts = [DbName | RestParts], method = Method} = Req) ->
case {Method, RestParts} of
- {'PUT', []} ->
- create_db_req(Req, DbName);
- {'DELETE', []} ->
- % if we get ?rev=... the user is using a faulty script where the
- % document id is empty by accident. Let them recover safely.
- case chttpd:qs_value(Req, "rev", false) of
- false -> delete_db_req(Req, DbName);
- _Rev -> throw({bad_request,
- "You tried to DELETE a database with a ?=rev parameter. "
- ++ "Did you mean to DELETE a document instead?"})
- end;
- {_, []} ->
- do_db_req(Req, fun db_req/2);
- {_, [SecondPart|_]} ->
- Handler = chttpd_handlers:db_handler(SecondPart, fun db_req/2),
- do_db_req(Req, Handler)
+ {'PUT', []} ->
+ create_db_req(Req, DbName);
+ {'DELETE', []} ->
+ % if we get ?rev=... the user is using a faulty script where the
+ % document id is empty by accident. Let them recover safely.
+ case chttpd:qs_value(Req, "rev", false) of
+ false ->
+ delete_db_req(Req, DbName);
+ _Rev ->
+ throw(
+ {bad_request,
+ "You tried to DELETE a database with a ?=rev parameter. " ++
+ "Did you mean to DELETE a document instead?"}
+ )
+ end;
+ {_, []} ->
+ do_db_req(Req, fun db_req/2);
+ {_, [SecondPart | _]} ->
+ Handler = chttpd_handlers:db_handler(SecondPart, fun db_req/2),
+ do_db_req(Req, Handler)
end.
-handle_changes_req(#httpd{method='POST'}=Req, Db) ->
+handle_changes_req(#httpd{method = 'POST'} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
case chttpd:body_length(Req) of
0 ->
@@ -92,50 +115,50 @@ handle_changes_req(#httpd{method='POST'}=Req, Db) ->
{JsonProps} = chttpd:json_body_obj(Req),
handle_changes_req1(Req#httpd{req_body = {JsonProps}}, Db)
end;
-handle_changes_req(#httpd{method='GET'}=Req, Db) ->
+handle_changes_req(#httpd{method = 'GET'} = Req, Db) ->
handle_changes_req1(Req, Db);
-handle_changes_req(#httpd{path_parts=[_,<<"_changes">>]}=Req, _Db) ->
+handle_changes_req(#httpd{path_parts = [_, <<"_changes">>]} = Req, _Db) ->
send_method_not_allowed(Req, "GET,POST,HEAD").
-handle_changes_req1(#httpd{}=Req, Db) ->
- #changes_args{filter=Raw, style=Style} = Args0 = parse_changes_query(Req),
+handle_changes_req1(#httpd{} = Req, Db) ->
+ #changes_args{filter = Raw, style = Style} = Args0 = parse_changes_query(Req),
ChangesArgs = Args0#changes_args{
filter_fun = couch_changes:configure_filter(Raw, Style, Req, Db),
db_open_options = [{user_ctx, couch_db:get_user_ctx(Db)}]
},
Max = chttpd:chunked_response_buffer_size(),
case ChangesArgs#changes_args.feed of
- "normal" ->
- T0 = os:timestamp(),
- {ok, Info} = fabric:get_db_info(Db),
- Suffix = mem3:shard_suffix(Db),
- Etag = chttpd:make_etag({Info, Suffix}),
- DeltaT = timer:now_diff(os:timestamp(), T0) / 1000,
- couch_stats:update_histogram([couchdb, dbinfo], DeltaT),
- chttpd:etag_respond(Req, Etag, fun() ->
+ "normal" ->
+ T0 = os:timestamp(),
+ {ok, Info} = fabric:get_db_info(Db),
+ Suffix = mem3:shard_suffix(Db),
+ Etag = chttpd:make_etag({Info, Suffix}),
+ DeltaT = timer:now_diff(os:timestamp(), T0) / 1000,
+ couch_stats:update_histogram([couchdb, dbinfo], DeltaT),
+ chttpd:etag_respond(Req, Etag, fun() ->
+ Acc0 = #cacc{
+ feed = normal,
+ etag = Etag,
+ mochi = Req,
+ threshold = Max
+ },
+ fabric:changes(Db, fun changes_callback/2, Acc0, ChangesArgs)
+ end);
+ Feed when Feed =:= "continuous"; Feed =:= "longpoll"; Feed =:= "eventsource" ->
+ couch_stats:increment_counter([couchdb, httpd, clients_requesting_changes]),
Acc0 = #cacc{
- feed = normal,
- etag = Etag,
+ feed = list_to_atom(Feed),
mochi = Req,
threshold = Max
},
- fabric:changes(Db, fun changes_callback/2, Acc0, ChangesArgs)
- end);
- Feed when Feed =:= "continuous"; Feed =:= "longpoll"; Feed =:= "eventsource" ->
- couch_stats:increment_counter([couchdb, httpd, clients_requesting_changes]),
- Acc0 = #cacc{
- feed = list_to_atom(Feed),
- mochi = Req,
- threshold = Max
- },
- try
- fabric:changes(Db, fun changes_callback/2, Acc0, ChangesArgs)
- after
- couch_stats:decrement_counter([couchdb, httpd, clients_requesting_changes])
- end;
- _ ->
- Msg = <<"Supported `feed` types: normal, continuous, live, longpoll, eventsource">>,
- throw({bad_request, Msg})
+ try
+ fabric:changes(Db, fun changes_callback/2, Acc0, ChangesArgs)
+ after
+ couch_stats:decrement_counter([couchdb, httpd, clients_requesting_changes])
+ end;
+ _ ->
+ Msg = <<"Supported `feed` types: normal, continuous, live, longpoll, eventsource">>,
+ throw({bad_request, Msg})
end.
% callbacks for continuous feed (newline-delimited JSON Objects)
@@ -149,14 +172,14 @@ changes_callback({change, Change}, #cacc{feed = continuous} = Acc) ->
maybe_flush_changes_feed(Acc, Data, Len);
changes_callback({stop, EndSeq, Pending}, #cacc{feed = continuous} = Acc) ->
#cacc{mochi = Resp, buffer = Buf} = Acc,
- Row = {[
- {<<"last_seq">>, EndSeq},
- {<<"pending">>, Pending}
- ]},
+ Row =
+ {[
+ {<<"last_seq">>, EndSeq},
+ {<<"pending">>, Pending}
+ ]},
Data = [Buf, ?JSON_ENCODE(Row) | "\n"],
{ok, Resp1} = chttpd:send_delayed_chunk(Resp, Data),
chttpd:end_delayed_json_response(Resp1);
-
% callbacks for eventsource feed (newline-delimited eventsource Objects)
changes_callback(start, #cacc{feed = eventsource} = Acc) ->
#cacc{mochi = Req} = Acc,
@@ -166,12 +189,15 @@ changes_callback(start, #cacc{feed = eventsource} = Acc) ->
],
{ok, Resp} = chttpd:start_delayed_json_response(Req, 200, Headers),
{ok, Acc#cacc{mochi = Resp, responding = true}};
-changes_callback({change, {ChangeProp}=Change}, #cacc{feed = eventsource} = Acc) ->
+changes_callback({change, {ChangeProp} = Change}, #cacc{feed = eventsource} = Acc) ->
chttpd_stats:incr_rows(),
Seq = proplists:get_value(seq, ChangeProp),
Chunk = [
- "data: ", ?JSON_ENCODE(Change),
- "\n", "id: ", ?JSON_ENCODE(Seq),
+ "data: ",
+ ?JSON_ENCODE(Change),
+ "\n",
+ "id: ",
+ ?JSON_ENCODE(Seq),
"\n\n"
],
Len = iolist_size(Chunk),
@@ -185,13 +211,16 @@ changes_callback({stop, _EndSeq}, #cacc{feed = eventsource} = Acc) ->
#cacc{mochi = Resp, buffer = Buf} = Acc,
{ok, Resp1} = chttpd:send_delayed_chunk(Resp, Buf),
chttpd:end_delayed_json_response(Resp1);
-
% callbacks for longpoll and normal (single JSON Object)
changes_callback(start, #cacc{feed = normal} = Acc) ->
#cacc{etag = Etag, mochi = Req} = Acc,
FirstChunk = "{\"results\":[\n",
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200,
- [{"ETag",Etag}], FirstChunk),
+ {ok, Resp} = chttpd:start_delayed_json_response(
+ Req,
+ 200,
+ [{"ETag", Etag}],
+ FirstChunk
+ ),
{ok, Acc#cacc{mochi = Resp, responding = true}};
changes_callback(start, Acc) ->
#cacc{mochi = Req} = Acc,
@@ -214,7 +243,6 @@ changes_callback({stop, EndSeq, Pending}, Acc) ->
],
{ok, Resp1} = chttpd:close_delayed_json_object(Resp, Buf, Terminator, Max),
chttpd:end_delayed_json_response(Resp1);
-
changes_callback(waiting_for_updates, #cacc{buffer = []} = Acc) ->
#cacc{mochi = Resp, chunks_sent = ChunksSent} = Acc,
case ChunksSent > 0 of
@@ -246,11 +274,12 @@ changes_callback({error, Reason}, #cacc{feed = normal, responding = false} = Acc
changes_callback({error, Reason}, Acc) ->
chttpd:send_delayed_error(Acc#cacc.mochi, Reason).
-maybe_flush_changes_feed(#cacc{bufsize=Size, threshold=Max} = Acc, Data, Len)
- when Size > 0 andalso (Size + Len) > Max ->
+maybe_flush_changes_feed(#cacc{bufsize = Size, threshold = Max} = Acc, Data, Len) when
+ Size > 0 andalso (Size + Len) > Max
+->
#cacc{buffer = Buffer, mochi = Resp} = Acc,
{ok, R1} = chttpd:send_delayed_chunk(Resp, Buffer),
- {ok, Acc#cacc{prepend = ",\r\n", buffer = Data, bufsize=Len, mochi = R1}};
+ {ok, Acc#cacc{prepend = ",\r\n", buffer = Data, bufsize = Len, mochi = R1}};
maybe_flush_changes_feed(Acc0, Data, Len) ->
#cacc{buffer = Buf, bufsize = Size, chunks_sent = ChunksSent} = Acc0,
Acc = Acc0#cacc{
@@ -261,7 +290,7 @@ maybe_flush_changes_feed(Acc0, Data, Len) ->
},
{ok, Acc}.
-handle_compact_req(#httpd{method='POST'}=Req, Db) ->
+handle_compact_req(#httpd{method = 'POST'} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
case Req#httpd.path_parts of
[_DbName, <<"_compact">>] ->
@@ -276,7 +305,6 @@ handle_compact_req(#httpd{method='POST'}=Req, Db) ->
throw(Error)
end
end;
-
handle_compact_req(Req, _Db) ->
send_method_not_allowed(Req, "POST").
@@ -284,11 +312,9 @@ handle_view_cleanup_req(Req, Db) ->
ok = fabric:cleanup_index_files_all_nodes(Db),
send_json(Req, 202, {[{ok, true}]}).
-
-handle_partition_req(#httpd{path_parts=[_,_]}=_Req, _Db) ->
+handle_partition_req(#httpd{path_parts = [_, _]} = _Req, _Db) ->
throw({bad_request, invalid_partition_req});
-
-handle_partition_req(#httpd{method='GET', path_parts=[_,_,PartId]}=Req, Db) ->
+handle_partition_req(#httpd{method = 'GET', path_parts = [_, _, PartId]} = Req, Db) ->
couch_partition:validate_partition(PartId),
case couch_db:is_partitioned(Db) of
true ->
@@ -297,25 +323,30 @@ handle_partition_req(#httpd{method='GET', path_parts=[_,_,PartId]}=Req, Db) ->
false ->
throw({bad_request, <<"database is not partitioned">>})
end;
-
-handle_partition_req(#httpd{method='POST',
- path_parts=[_, <<"_partition">>, <<"_", _/binary>>]}, _Db) ->
+handle_partition_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [_, <<"_partition">>, <<"_", _/binary>>]
+ },
+ _Db
+) ->
Msg = <<"Partition must not start with an underscore">>,
throw({illegal_partition, Msg});
-
-handle_partition_req(#httpd{path_parts = [_, _, _]}=Req, _Db) ->
+handle_partition_req(#httpd{path_parts = [_, _, _]} = Req, _Db) ->
send_method_not_allowed(Req, "GET");
-
-handle_partition_req(#httpd{path_parts=[DbName, _, PartId | Rest]}=Req, Db) ->
+handle_partition_req(#httpd{path_parts = [DbName, _, PartId | Rest]} = Req, Db) ->
case couch_db:is_partitioned(Db) of
true ->
couch_partition:validate_partition(PartId),
QS = chttpd:qs(Req),
PartIdStr = ?b2l(PartId),
QSPartIdStr = couch_util:get_value("partition", QS, PartIdStr),
- if QSPartIdStr == PartIdStr -> ok; true ->
- Msg = <<"Conflicting value for `partition` in query string">>,
- throw({bad_request, Msg})
+ if
+ QSPartIdStr == PartIdStr ->
+ ok;
+ true ->
+ Msg = <<"Conflicting value for `partition` in query string">>,
+ throw({bad_request, Msg})
end,
NewQS = lists:ukeysort(1, [{"partition", PartIdStr} | QS]),
NewReq = Req#httpd{
@@ -339,55 +370,59 @@ handle_partition_req(#httpd{path_parts=[DbName, _, PartId | Rest]}=Req, Db) ->
false ->
throw({bad_request, <<"database is not partitioned">>})
end;
-
handle_partition_req(Req, _Db) ->
chttpd:send_error(Req, not_found).
update_partition_stats(PathParts) ->
case PathParts of
- [<<"_design">> | _] ->
- couch_stats:increment_counter([couchdb, httpd, partition_view_requests]);
- [<<"_all_docs">> | _] ->
- couch_stats:increment_counter([couchdb, httpd, partition_all_docs_requests]);
- [<<"_find">> | _] ->
- couch_stats:increment_counter([couchdb, httpd, partition_find_requests]);
- [<<"_explain">> | _] ->
- couch_stats:increment_counter([couchdb, httpd, partition_explain_requests]);
- _ ->
- ok % ignore path that do not match
- end.
-
+ [<<"_design">> | _] ->
+ couch_stats:increment_counter([couchdb, httpd, partition_view_requests]);
+ [<<"_all_docs">> | _] ->
+ couch_stats:increment_counter([couchdb, httpd, partition_all_docs_requests]);
+ [<<"_find">> | _] ->
+ couch_stats:increment_counter([couchdb, httpd, partition_find_requests]);
+ [<<"_explain">> | _] ->
+ couch_stats:increment_counter([couchdb, httpd, partition_explain_requests]);
+ _ ->
+ % ignore path that do not match
+ ok
+ end.
-handle_design_req(#httpd{
- path_parts=[_DbName, _Design, Name, <<"_",_/binary>> = Action | _Rest]
- }=Req, Db) ->
+handle_design_req(
+ #httpd{
+ path_parts = [_DbName, _Design, Name, <<"_", _/binary>> = Action | _Rest]
+ } = Req,
+ Db
+) ->
DbName = mem3:dbname(couch_db:name(Db)),
case ddoc_cache:open(DbName, <<"_design/", Name/binary>>) of
- {ok, DDoc} ->
- Handler = chttpd_handlers:design_handler(Action, fun bad_action_req/3),
- Handler(Req, Db, DDoc);
- Error ->
- throw(Error)
+ {ok, DDoc} ->
+ Handler = chttpd_handlers:design_handler(Action, fun bad_action_req/3),
+ Handler(Req, Db, DDoc);
+ Error ->
+ throw(Error)
end;
-
handle_design_req(Req, Db) ->
db_req(Req, Db).
-bad_action_req(#httpd{path_parts=[_, _, Name|FileNameParts]}=Req, Db, _DDoc) ->
- db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts).
+bad_action_req(#httpd{path_parts = [_, _, Name | FileNameParts]} = Req, Db, _DDoc) ->
+ db_attachment_req(Req, Db, <<"_design/", Name/binary>>, FileNameParts).
-handle_design_info_req(#httpd{method='GET'}=Req, Db, #doc{} = DDoc) ->
+handle_design_info_req(#httpd{method = 'GET'} = Req, Db, #doc{} = DDoc) ->
[_, _, Name, _] = Req#httpd.path_parts,
{ok, GroupInfoList} = fabric:get_view_group_info(Db, DDoc),
- send_json(Req, 200, {[
- {name, Name},
- {view_index, {GroupInfoList}}
- ]});
-
+ send_json(
+ Req,
+ 200,
+ {[
+ {name, Name},
+ {view_index, {GroupInfoList}}
+ ]}
+ );
handle_design_info_req(Req, _Db, _DDoc) ->
send_method_not_allowed(Req, "GET").
-create_db_req(#httpd{}=Req, DbName) ->
+create_db_req(#httpd{} = Req, DbName) ->
couch_httpd:verify_is_server_admin(Req),
ShardsOpt = parse_shards_opt(Req),
EngineOpt = parse_engine_opt(Req),
@@ -395,202 +430,237 @@ create_db_req(#httpd{}=Req, DbName) ->
Options = lists:append([ShardsOpt, [{props, DbProps}], EngineOpt]),
DocUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
case fabric:create_db(DbName, Options) of
- ok ->
- send_json(Req, 201, [{"Location", DocUrl}], {[{ok, true}]});
- accepted ->
- send_json(Req, 202, [{"Location", DocUrl}], {[{ok, true}]});
- {error, file_exists} ->
- chttpd:send_error(Req, file_exists);
- Error ->
- throw(Error)
+ ok ->
+ send_json(Req, 201, [{"Location", DocUrl}], {[{ok, true}]});
+ accepted ->
+ send_json(Req, 202, [{"Location", DocUrl}], {[{ok, true}]});
+ {error, file_exists} ->
+ chttpd:send_error(Req, file_exists);
+ Error ->
+ throw(Error)
end.
-delete_db_req(#httpd{}=Req, DbName) ->
+delete_db_req(#httpd{} = Req, DbName) ->
couch_httpd:verify_is_server_admin(Req),
case fabric:delete_db(DbName, []) of
- ok ->
- send_json(Req, 200, {[{ok, true}]});
- accepted ->
- send_json(Req, 202, {[{ok, true}]});
- Error ->
- throw(Error)
+ ok ->
+ send_json(Req, 200, {[{ok, true}]});
+ accepted ->
+ send_json(Req, 202, {[{ok, true}]});
+ Error ->
+ throw(Error)
end.
-do_db_req(#httpd{path_parts=[DbName|_], user_ctx=Ctx}=Req, Fun) ->
+do_db_req(#httpd{path_parts = [DbName | _], user_ctx = Ctx} = Req, Fun) ->
Shard = hd(mem3:shards(DbName)),
Props = couch_util:get_value(props, Shard#shard.opts, []),
- Opts = case Ctx of
- undefined ->
- [{props, Props}];
- #user_ctx{} ->
- [{user_ctx, Ctx}, {props, Props}]
- end,
+ Opts =
+ case Ctx of
+ undefined ->
+ [{props, Props}];
+ #user_ctx{} ->
+ [{user_ctx, Ctx}, {props, Props}]
+ end,
{ok, Db} = couch_db:clustered_db(DbName, Opts),
Fun(Req, Db).
-db_req(#httpd{method='GET',path_parts=[DbName]}=Req, _Db) ->
+db_req(#httpd{method = 'GET', path_parts = [DbName]} = Req, _Db) ->
% measure the time required to generate the etag, see if it's worth it
T0 = os:timestamp(),
{ok, DbInfo} = fabric:get_db_info(DbName),
DeltaT = timer:now_diff(os:timestamp(), T0) / 1000,
couch_stats:update_histogram([couchdb, dbinfo], DeltaT),
send_json(Req, {DbInfo});
-
-db_req(#httpd{method='POST', path_parts=[DbName], user_ctx=Ctx}=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [DbName], user_ctx = Ctx} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- Options = [{user_ctx,Ctx}, {w,W}],
+ Options = [{user_ctx, Ctx}, {w, W}],
Doc = couch_db:doc_from_json_obj_validate(Db, chttpd:json_body(Req)),
validate_attachment_names(Doc),
- Doc2 = case Doc#doc.id of
- <<"">> ->
- Doc#doc{id=couch_uuids:new(), revs={0, []}};
- _ ->
- Doc
- end,
+ Doc2 =
+ case Doc#doc.id of
+ <<"">> ->
+ Doc#doc{id = couch_uuids:new(), revs = {0, []}};
+ _ ->
+ Doc
+ end,
DocId = Doc2#doc.id,
case chttpd:qs_value(Req, "batch") of
- "ok" ->
- % async_batching
- spawn(fun() ->
- case catch(fabric:update_doc(Db, Doc2, Options)) of
- {ok, _} ->
- chttpd_stats:incr_writes(),
- ok;
- {accepted, _} ->
- chttpd_stats:incr_writes(),
- ok;
- Error ->
- couch_log:debug("Batch doc error (~s): ~p",[DocId, Error])
+ "ok" ->
+ % async_batching
+ spawn(fun() ->
+ case catch (fabric:update_doc(Db, Doc2, Options)) of
+ {ok, _} ->
+ chttpd_stats:incr_writes(),
+ ok;
+ {accepted, _} ->
+ chttpd_stats:incr_writes(),
+ ok;
+ Error ->
+ couch_log:debug("Batch doc error (~s): ~p", [DocId, Error])
end
end),
- send_json(Req, 202, [], {[
- {ok, true},
- {id, DocId}
- ]});
- _Normal ->
- % normal
- DocUrl = absolute_uri(Req, [$/, couch_util:url_encode(DbName),
- $/, couch_util:url_encode(DocId)]),
- case fabric:update_doc(Db, Doc2, Options) of
- {ok, NewRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 201;
- {accepted, NewRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 202
- end,
- send_json(Req, HttpCode, [{"Location", DocUrl}], {[
- {ok, true},
- {id, DocId},
- {rev, couch_doc:rev_to_str(NewRev)}
- ]})
+ send_json(
+ Req,
+ 202,
+ [],
+ {[
+ {ok, true},
+ {id, DocId}
+ ]}
+ );
+ _Normal ->
+ % normal
+ DocUrl = absolute_uri(Req, [
+ $/,
+ couch_util:url_encode(DbName),
+ $/,
+ couch_util:url_encode(DocId)
+ ]),
+ case fabric:update_doc(Db, Doc2, Options) of
+ {ok, NewRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 201;
+ {accepted, NewRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 202
+ end,
+ send_json(
+ Req,
+ HttpCode,
+ [{"Location", DocUrl}],
+ {[
+ {ok, true},
+ {id, DocId},
+ {rev, couch_doc:rev_to_str(NewRev)}
+ ]}
+ )
end;
-
-db_req(#httpd{path_parts=[_DbName]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_DbName]} = Req, _Db) ->
send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
-
-db_req(#httpd{method='POST', path_parts=[DbName, <<"_ensure_full_commit">>],
- user_ctx=Ctx}=Req, _Db) ->
+db_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [DbName, <<"_ensure_full_commit">>],
+ user_ctx = Ctx
+ } = Req,
+ _Db
+) ->
chttpd:validate_ctype(Req, "application/json"),
%% use fabric call to trigger a database_does_not_exist exception
%% for missing databases that'd return error 404 from chttpd
%% get_security used to prefer shards on the same node over other nodes
fabric:get_security(DbName, [{user_ctx, Ctx}]),
- send_json(Req, 201, {[
- {ok, true},
- {instance_start_time, <<"0">>}
- ]});
-
-db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
+ send_json(
+ Req,
+ 201,
+ {[
+ {ok, true},
+ {instance_start_time, <<"0">>}
+ ]}
+ );
+db_req(#httpd{path_parts = [_, <<"_ensure_full_commit">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>], user_ctx=Ctx}=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [_, <<"_bulk_docs">>], user_ctx = Ctx} = Req, Db) ->
couch_stats:increment_counter([couchdb, httpd, bulk_requests]),
chttpd:validate_ctype(Req, "application/json"),
{JsonProps} = chttpd:json_body_obj(Req),
- DocsArray = case couch_util:get_value(<<"docs">>, JsonProps) of
- undefined ->
- throw({bad_request, <<"POST body must include `docs` parameter.">>});
- DocsArray0 when not is_list(DocsArray0) ->
- throw({bad_request, <<"`docs` parameter must be an array.">>});
- DocsArray0 ->
- DocsArray0
- end,
+ DocsArray =
+ case couch_util:get_value(<<"docs">>, JsonProps) of
+ undefined ->
+ throw({bad_request, <<"POST body must include `docs` parameter.">>});
+ DocsArray0 when not is_list(DocsArray0) ->
+ throw({bad_request, <<"`docs` parameter must be an array.">>});
+ DocsArray0 ->
+ DocsArray0
+ end,
couch_stats:update_histogram([couchdb, httpd, bulk_docs], length(DocsArray)),
- W = case couch_util:get_value(<<"w">>, JsonProps) of
- Value when is_integer(Value) ->
- integer_to_list(Value);
- _ ->
- chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db)))
- end,
+ W =
+ case couch_util:get_value(<<"w">>, JsonProps) of
+ Value when is_integer(Value) ->
+ integer_to_list(Value);
+ _ ->
+ chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db)))
+ end,
case chttpd:header_value(Req, "X-Couch-Full-Commit") of
- "true" ->
- Options = [full_commit, {user_ctx,Ctx}, {w,W}];
- "false" ->
- Options = [delay_commit, {user_ctx,Ctx}, {w,W}];
- _ ->
- Options = [{user_ctx,Ctx}, {w,W}]
+ "true" ->
+ Options = [full_commit, {user_ctx, Ctx}, {w, W}];
+ "false" ->
+ Options = [delay_commit, {user_ctx, Ctx}, {w, W}];
+ _ ->
+ Options = [{user_ctx, Ctx}, {w, W}]
end,
NewEdits = couch_util:get_value(<<"new_edits">>, JsonProps, true),
- Docs = lists:map(fun(JsonObj) ->
- Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj),
- validate_revs(Doc, NewEdits),
- validate_attachment_names(Doc),
- case Doc#doc.id of
- <<>> -> Doc#doc{id = couch_uuids:new()};
- _ -> Doc
- end
- end, DocsArray),
- case NewEdits of
- true ->
- Options2 =
- case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
- true -> [all_or_nothing|Options];
- _ -> Options
+ Docs = lists:map(
+ fun(JsonObj) ->
+ Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj),
+ validate_revs(Doc, NewEdits),
+ validate_attachment_names(Doc),
+ case Doc#doc.id of
+ <<>> -> Doc#doc{id = couch_uuids:new()};
+ _ -> Doc
+ end
end,
- case fabric:update_docs(Db, Docs, Options2) of
- {ok, Results} ->
- % output the results
- chttpd_stats:incr_writes(length(Results)),
- DocResults = lists:zipwith(fun update_doc_result_to_json/2,
- Docs, Results),
- send_json(Req, 201, DocResults);
- {accepted, Results} ->
- % output the results
- chttpd_stats:incr_writes(length(Results)),
- DocResults = lists:zipwith(fun update_doc_result_to_json/2,
- Docs, Results),
- send_json(Req, 202, DocResults);
- {aborted, Errors} ->
- ErrorsJson =
- lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 417, ErrorsJson)
- end;
- false ->
- case fabric:update_docs(Db, Docs, [replicated_changes|Options]) of
- {ok, Errors} ->
- chttpd_stats:incr_writes(length(Docs)),
- ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 201, ErrorsJson);
- {accepted, Errors} ->
- chttpd_stats:incr_writes(length(Docs)),
- ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 202, ErrorsJson)
- end;
- _ ->
- throw({bad_request, <<"`new_edits` parameter must be a boolean.">>})
+ DocsArray
+ ),
+ case NewEdits of
+ true ->
+ Options2 =
+ case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
+ true -> [all_or_nothing | Options];
+ _ -> Options
+ end,
+ case fabric:update_docs(Db, Docs, Options2) of
+ {ok, Results} ->
+ % output the results
+ chttpd_stats:incr_writes(length(Results)),
+ DocResults = lists:zipwith(
+ fun update_doc_result_to_json/2,
+ Docs,
+ Results
+ ),
+ send_json(Req, 201, DocResults);
+ {accepted, Results} ->
+ % output the results
+ chttpd_stats:incr_writes(length(Results)),
+ DocResults = lists:zipwith(
+ fun update_doc_result_to_json/2,
+ Docs,
+ Results
+ ),
+ send_json(Req, 202, DocResults);
+ {aborted, Errors} ->
+ ErrorsJson =
+ lists:map(fun update_doc_result_to_json/1, Errors),
+ send_json(Req, 417, ErrorsJson)
+ end;
+ false ->
+ case fabric:update_docs(Db, Docs, [replicated_changes | Options]) of
+ {ok, Errors} ->
+ chttpd_stats:incr_writes(length(Docs)),
+ ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
+ send_json(Req, 201, ErrorsJson);
+ {accepted, Errors} ->
+ chttpd_stats:incr_writes(length(Docs)),
+ ErrorsJson = lists:map(fun update_doc_result_to_json/1, Errors),
+ send_json(Req, 202, ErrorsJson)
+ end;
+ _ ->
+ throw({bad_request, <<"`new_edits` parameter must be a boolean.">>})
end;
-
-db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_bulk_docs">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-
-db_req(#httpd{method='POST', path_parts=[_, <<"_bulk_get">>],
- mochi_req=MochiReq}=Req, Db) ->
+db_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [_, <<"_bulk_get">>],
+ mochi_req = MochiReq
+ } = Req,
+ Db
+) ->
couch_stats:increment_counter([couchdb, httpd, bulk_requests]),
couch_httpd:validate_ctype(Req, "application/json"),
{JsonProps} = chttpd:json_body_obj(Req),
@@ -603,7 +673,7 @@ db_req(#httpd{method='POST', path_parts=[_, <<"_bulk_get">>],
} = bulk_get_parse_doc_query(Req),
Options = [{user_ctx, Req#httpd.user_ctx} | Options0],
- AcceptJson = MochiReq:accepts_content_type("application/json"),
+ AcceptJson = MochiReq:accepts_content_type("application/json"),
AcceptMixedMp = MochiReq:accepts_content_type("multipart/mixed"),
AcceptRelatedMp = MochiReq:accepts_content_type("multipart/related"),
AcceptMp = not AcceptJson andalso (AcceptMixedMp orelse AcceptRelatedMp),
@@ -611,60 +681,80 @@ db_req(#httpd{method='POST', path_parts=[_, <<"_bulk_get">>],
false ->
{ok, Resp} = start_json_response(Req, 200),
send_chunk(Resp, <<"{\"results\": [">>),
- lists:foldl(fun(Doc, Sep) ->
- {DocId, Results, Options1} = bulk_get_open_doc_revs(Db, Doc,
- Options),
- bulk_get_send_docs_json(Resp, DocId, Results, Options1, Sep),
- <<",">>
- end, <<"">>, Docs),
+ lists:foldl(
+ fun(Doc, Sep) ->
+ {DocId, Results, Options1} = bulk_get_open_doc_revs(
+ Db,
+ Doc,
+ Options
+ ),
+ bulk_get_send_docs_json(Resp, DocId, Results, Options1, Sep),
+ <<",">>
+ end,
+ <<"">>,
+ Docs
+ ),
send_chunk(Resp, <<"]}">>),
end_json_response(Resp);
true ->
OuterBoundary = bulk_get_multipart_boundary(),
- MpType = case AcceptMixedMp of
- true ->
- "multipart/mixed";
- _ ->
- "multipart/related"
- end,
- CType = {"Content-Type", MpType ++ "; boundary=\"" ++
- ?b2l(OuterBoundary) ++ "\""},
+ MpType =
+ case AcceptMixedMp of
+ true ->
+ "multipart/mixed";
+ _ ->
+ "multipart/related"
+ end,
+ CType =
+ {"Content-Type",
+ MpType ++ "; boundary=\"" ++
+ ?b2l(OuterBoundary) ++ "\""},
{ok, Resp} = start_chunked_response(Req, 200, [CType]),
- lists:foldl(fun(Doc, _Pre) ->
- case bulk_get_open_doc_revs(Db, Doc, Options) of
- {_, {ok, []}, _Options1} ->
- ok;
- {_, {ok, Results}, Options1} ->
- send_docs_multipart_bulk_get(Results, Options1,
- OuterBoundary, Resp);
- {DocId, {error, {RevId, Error, Reason}}, _Options1} ->
- Json = ?JSON_ENCODE({[
- {<<"id">>, DocId},
- {<<"rev">>, RevId},
- {<<"error">>, Error},
- {<<"reason">>, Reason}
- ]}),
- couch_httpd:send_chunk(Resp,[
- <<"\r\n--", OuterBoundary/binary>>,
- <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
- Json
- ])
- end
- end, <<"">>, Docs),
+ lists:foldl(
+ fun(Doc, _Pre) ->
+ case bulk_get_open_doc_revs(Db, Doc, Options) of
+ {_, {ok, []}, _Options1} ->
+ ok;
+ {_, {ok, Results}, Options1} ->
+ send_docs_multipart_bulk_get(
+ Results,
+ Options1,
+ OuterBoundary,
+ Resp
+ );
+ {DocId, {error, {RevId, Error, Reason}}, _Options1} ->
+ Json = ?JSON_ENCODE(
+ {[
+ {<<"id">>, DocId},
+ {<<"rev">>, RevId},
+ {<<"error">>, Error},
+ {<<"reason">>, Reason}
+ ]}
+ ),
+ couch_httpd:send_chunk(Resp, [
+ <<"\r\n--", OuterBoundary/binary>>,
+ <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
+ Json
+ ])
+ end
+ end,
+ <<"">>,
+ Docs
+ ),
case Docs of
[] ->
ok;
_ ->
- couch_httpd:send_chunk(Resp, <<"\r\n", "--", OuterBoundary/binary, "--\r\n">>)
+ couch_httpd:send_chunk(
+ Resp, <<"\r\n", "--", OuterBoundary/binary, "--\r\n">>
+ )
end,
couch_httpd:last_chunk(Resp)
end
end;
-db_req(#httpd{path_parts=[_, <<"_bulk_get">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_bulk_get">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [_, <<"_purge">>]} = Req, Db) ->
couch_stats:increment_counter([couchdb, httpd, purge_requests]),
chttpd:validate_ctype(Req, "application/json"),
W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
@@ -676,88 +766,94 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
false -> throw({bad_request, "Exceeded maximum number of documents."});
true -> ok
end,
- RevsLen = lists:foldl(fun({_Id, Revs}, Acc) ->
- length(Revs) + Acc
- end, 0, IdsRevs2),
+ RevsLen = lists:foldl(
+ fun({_Id, Revs}, Acc) ->
+ length(Revs) + Acc
+ end,
+ 0,
+ IdsRevs2
+ ),
MaxRevs = config:get_integer("purge", "max_revisions_number", 1000),
case RevsLen =< MaxRevs of
false -> throw({bad_request, "Exceeded maximum number of revisions."});
true -> ok
end,
couch_stats:increment_counter([couchdb, document_purges, total], length(IdsRevs2)),
- Results2 = case fabric:purge_docs(Db, IdsRevs2, Options) of
- {ok, Results} ->
- chttpd_stats:incr_writes(length(Results)),
- Results;
- {accepted, Results} ->
- chttpd_stats:incr_writes(length(Results)),
- Results
- end,
+ Results2 =
+ case fabric:purge_docs(Db, IdsRevs2, Options) of
+ {ok, Results} ->
+ chttpd_stats:incr_writes(length(Results)),
+ Results;
+ {accepted, Results} ->
+ chttpd_stats:incr_writes(length(Results)),
+ Results
+ end,
{Code, Json} = purge_results_to_json(IdsRevs2, Results2),
send_json(Req, Code, {[{<<"purge_seq">>, null}, {<<"purged">>, {Json}}]});
-
-db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_purge">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-
-db_req(#httpd{method='GET',path_parts=[_,OP]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
+db_req(#httpd{method = 'GET', path_parts = [_, OP]} = Req, Db) when ?IS_ALL_DOCS(OP) ->
case chttpd:qs_json_value(Req, "keys", nil) of
- Keys when is_list(Keys) ->
- all_docs_view(Req, Db, Keys, OP);
- nil ->
- all_docs_view(Req, Db, undefined, OP);
- _ ->
- throw({bad_request, "`keys` parameter must be an array."})
+ Keys when is_list(Keys) ->
+ all_docs_view(Req, Db, Keys, OP);
+ nil ->
+ all_docs_view(Req, Db, undefined, OP);
+ _ ->
+ throw({bad_request, "`keys` parameter must be an array."})
end;
-
-db_req(#httpd{method='POST',
- path_parts=[_, OP, <<"queries">>]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
+db_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [_, OP, <<"queries">>]
+ } = Req,
+ Db
+) when ?IS_ALL_DOCS(OP) ->
Props = chttpd:json_body_obj(Req),
case couch_mrview_util:get_view_queries(Props) of
undefined ->
- throw({bad_request,
- <<"POST body must include `queries` parameter.">>});
+ throw({bad_request, <<"POST body must include `queries` parameter.">>});
Queries ->
multi_all_docs_view(Req, Db, OP, Queries)
end;
-
-db_req(#httpd{path_parts=[_, OP, <<"queries">>]}=Req,
- _Db) when ?IS_ALL_DOCS(OP) ->
+db_req(
+ #httpd{path_parts = [_, OP, <<"queries">>]} = Req,
+ _Db
+) when ?IS_ALL_DOCS(OP) ->
send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,OP]}=Req, Db) when ?IS_ALL_DOCS(OP) ->
+db_req(#httpd{method = 'POST', path_parts = [_, OP]} = Req, Db) when ?IS_ALL_DOCS(OP) ->
chttpd:validate_ctype(Req, "application/json"),
{Fields} = chttpd:json_body_obj(Req),
case couch_util:get_value(<<"keys">>, Fields, nil) of
- Keys when is_list(Keys) ->
- all_docs_view(Req, Db, Keys, OP);
- nil ->
- all_docs_view(Req, Db, undefined, OP);
- _ ->
- throw({bad_request, "`keys` body member must be an array."})
+ Keys when is_list(Keys) ->
+ all_docs_view(Req, Db, Keys, OP);
+ nil ->
+ all_docs_view(Req, Db, undefined, OP);
+ _ ->
+ throw({bad_request, "`keys` body member must be an array."})
end;
-
-db_req(#httpd{path_parts=[_,OP]}=Req, _Db) when ?IS_ALL_DOCS(OP) ->
+db_req(#httpd{path_parts = [_, OP]} = Req, _Db) when ?IS_ALL_DOCS(OP) ->
send_method_not_allowed(Req, "GET,HEAD,POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [_, <<"_missing_revs">>]} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
{JsonDocIdRevs} = chttpd:json_body_obj(Req),
case fabric:get_missing_revs(Db, JsonDocIdRevs) of
{error, Reason} ->
chttpd:send_error(Req, Reason);
{ok, Results} ->
- Results2 = [{Id, couch_doc:revs_to_strs(Revs)} ||
- {Id, Revs, _} <- Results],
- send_json(Req, {[
- {missing_revs, {Results2}}
- ]})
+ Results2 = [
+ {Id, couch_doc:revs_to_strs(Revs)}
+ || {Id, Revs, _} <- Results
+ ],
+ send_json(
+ Req,
+ {[
+ {missing_revs, {Results2}}
+ ]}
+ )
end;
-
-db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_missing_revs">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [_, <<"_revs_diff">>]} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
{JsonDocIdRevs} = chttpd:json_body_obj(Req),
case fabric:get_missing_revs(Db, JsonDocIdRevs) of
@@ -765,24 +861,31 @@ db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
chttpd:send_error(Req, Reason);
{ok, Results} ->
Results2 =
- lists:map(fun({Id, MissingRevs, PossibleAncestors}) ->
- {Id,
- {[{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
- if PossibleAncestors == [] ->
- [];
- true ->
- [{possible_ancestors,
- couch_doc:revs_to_strs(PossibleAncestors)}]
- end}}
- end, Results),
+ lists:map(
+ fun({Id, MissingRevs, PossibleAncestors}) ->
+ {Id, {
+ [{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
+ if
+ PossibleAncestors == [] ->
+ [];
+ true ->
+ [
+ {possible_ancestors,
+ couch_doc:revs_to_strs(PossibleAncestors)}
+ ]
+ end
+ }}
+ end,
+ Results
+ ),
send_json(Req, {Results2})
end;
-
-db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_revs_diff">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>],user_ctx=Ctx}=Req,
- Db) ->
+db_req(
+ #httpd{method = 'PUT', path_parts = [_, <<"_security">>], user_ctx = Ctx} = Req,
+ Db
+) ->
DbName = ?b2l(couch_db:name(Db)),
validate_security_can_be_edited(DbName),
SecObj = chttpd:json_body(Req),
@@ -792,26 +895,22 @@ db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>],user_ctx=Ctx}=Req,
Else ->
throw(Else)
end;
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_security">>]}=Req, Db) ->
+db_req(#httpd{method = 'GET', path_parts = [_, <<"_security">>]} = Req, Db) ->
send_json(Req, fabric:get_security(Db));
-
-db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_security">>]} = Req, _Db) ->
send_method_not_allowed(Req, "PUT,GET");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>],user_ctx=Ctx}=Req,
- Db) ->
+db_req(
+ #httpd{method = 'PUT', path_parts = [_, <<"_revs_limit">>], user_ctx = Ctx} = Req,
+ Db
+) ->
Limit = chttpd:json_body(Req),
- ok = fabric:set_revs_limit(Db, Limit, [{user_ctx,Ctx}]),
+ ok = fabric:set_revs_limit(Db, Limit, [{user_ctx, Ctx}]),
send_json(Req, {[{<<"ok">>, true}]});
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
+db_req(#httpd{method = 'GET', path_parts = [_, <<"_revs_limit">>]} = Req, Db) ->
send_json(Req, fabric:get_revs_limit(Db));
-
-db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_revs_limit">>]} = Req, _Db) ->
send_method_not_allowed(Req, "PUT,GET");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_purged_infos_limit">>]}=Req, Db) ->
+db_req(#httpd{method = 'PUT', path_parts = [_, <<"_purged_infos_limit">>]} = Req, Db) ->
Options = [{user_ctx, Req#httpd.user_ctx}],
case chttpd:json_body(Req) of
Limit when is_integer(Limit), Limit > 0 ->
@@ -821,81 +920,95 @@ db_req(#httpd{method='PUT',path_parts=[_,<<"_purged_infos_limit">>]}=Req, Db) ->
Error ->
throw(Error)
end;
- _->
+ _ ->
throw({bad_request, "`purge_infos_limit` must be positive integer"})
end;
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_purged_infos_limit">>]}=Req, Db) ->
+db_req(#httpd{method = 'GET', path_parts = [_, <<"_purged_infos_limit">>]} = Req, Db) ->
send_json(Req, fabric:get_purge_infos_limit(Db));
-
% Special case to enable using an unencoded slash in the URL of design docs,
% as slashes in document IDs must otherwise be URL encoded.
-db_req(#httpd{method='GET', mochi_req=MochiReq, path_parts=[_DbName, <<"_design/", _/binary>> | _]}=Req, _Db) ->
+db_req(
+ #httpd{
+ method = 'GET', mochi_req = MochiReq, path_parts = [_DbName, <<"_design/", _/binary>> | _]
+ } = Req,
+ _Db
+) ->
[Head | Tail] = re:split(MochiReq:get(raw_path), "_design%2F", [{return, list}, caseless]),
chttpd:send_redirect(Req, Head ++ "_design/" ++ Tail);
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) ->
- db_doc_req(Req, Db, <<"_design/",Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) ->
- db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts);
-
-
+db_req(#httpd{path_parts = [_DbName, <<"_design">>, Name]} = Req, Db) ->
+ db_doc_req(Req, Db, <<"_design/", Name/binary>>);
+db_req(#httpd{path_parts = [_DbName, <<"_design">>, Name | FileNameParts]} = Req, Db) ->
+ db_attachment_req(Req, Db, <<"_design/", Name/binary>>, FileNameParts);
% Special case to allow for accessing local documents without %2F
% encoding the docid. Throws out requests that don't have the second
% path part or that specify an attachment name.
-db_req(#httpd{path_parts=[_DbName, <<"_local">>]}, _Db) ->
+db_req(#httpd{path_parts = [_DbName, <<"_local">>]}, _Db) ->
throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local/">>]}, _Db) ->
+db_req(#httpd{path_parts = [_DbName, <<"_local/">>]}, _Db) ->
throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">>, Name]}=Req, Db) ->
+db_req(#httpd{path_parts = [_DbName, <<"_local">>, Name]} = Req, Db) ->
db_doc_req(Req, Db, <<"_local/", Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">> | _Rest]}, _Db) ->
+db_req(#httpd{path_parts = [_DbName, <<"_local">> | _Rest]}, _Db) ->
throw({bad_request, <<"_local documents do not accept attachments.">>});
-
-db_req(#httpd{path_parts=[_, DocId]}=Req, Db) ->
+db_req(#httpd{path_parts = [_, DocId]} = Req, Db) ->
db_doc_req(Req, Db, DocId);
-
-db_req(#httpd{method='DELETE', path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
+db_req(#httpd{method = 'DELETE', path_parts = [_, DocId | FileNameParts]} = Req, Db) ->
chttpd:body(Req),
db_attachment_req(Req, Db, DocId, FileNameParts);
-db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
+db_req(#httpd{path_parts = [_, DocId | FileNameParts]} = Req, Db) ->
db_attachment_req(Req, Db, DocId, FileNameParts).
multi_all_docs_view(Req, Db, OP, Queries) ->
Args0 = couch_mrview_http:parse_params(Req, undefined),
- Args1 = Args0#mrargs{view_type=map},
- ArgQueries = lists:map(fun({Query}) ->
- QueryArg1 = couch_mrview_http:parse_params(Query, undefined,
- Args1, [decoded]),
- QueryArgs2 = fabric_util:validate_all_docs_args(Db, QueryArg1),
- set_namespace(OP, QueryArgs2)
- end, Queries),
+ Args1 = Args0#mrargs{view_type = map},
+ ArgQueries = lists:map(
+ fun({Query}) ->
+ QueryArg1 = couch_mrview_http:parse_params(
+ Query,
+ undefined,
+ Args1,
+ [decoded]
+ ),
+ QueryArgs2 = fabric_util:validate_all_docs_args(Db, QueryArg1),
+ set_namespace(OP, QueryArgs2)
+ end,
+ Queries
+ ),
Options = [{user_ctx, Req#httpd.user_ctx}],
- VAcc0 = #vacc{db=Db, req=Req, prepend="\r\n"},
+ VAcc0 = #vacc{db = Db, req = Req, prepend = "\r\n"},
FirstChunk = "{\"results\":[",
- {ok, Resp0} = chttpd:start_delayed_json_response(VAcc0#vacc.req,
- 200, [], FirstChunk),
- VAcc1 = VAcc0#vacc{resp=Resp0},
- VAcc2 = lists:foldl(fun(Args, Acc0) ->
- {ok, Acc1} = fabric:all_docs(Db, Options,
- fun view_cb/2, Acc0, Args),
- Acc1
- end, VAcc1, ArgQueries),
+ {ok, Resp0} = chttpd:start_delayed_json_response(
+ VAcc0#vacc.req,
+ 200,
+ [],
+ FirstChunk
+ ),
+ VAcc1 = VAcc0#vacc{resp = Resp0},
+ VAcc2 = lists:foldl(
+ fun(Args, Acc0) ->
+ {ok, Acc1} = fabric:all_docs(
+ Db,
+ Options,
+ fun view_cb/2,
+ Acc0,
+ Args
+ ),
+ Acc1
+ end,
+ VAcc1,
+ ArgQueries
+ ),
{ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"),
chttpd:end_delayed_json_response(Resp1).
all_docs_view(Req, Db, Keys, OP) ->
Args0 = couch_mrview_http:parse_body_and_query(Req, Keys),
- Args1 = Args0#mrargs{view_type=map},
+ Args1 = Args0#mrargs{view_type = map},
Args2 = fabric_util:validate_all_docs_args(Db, Args1),
Args3 = set_namespace(OP, Args2),
Options = [{user_ctx, Req#httpd.user_ctx}],
Max = chttpd:chunked_response_buffer_size(),
- VAcc = #vacc{db=Db, req=Req, threshold=Max},
+ VAcc = #vacc{db = Db, req = Req, threshold = Max},
{ok, Resp} = fabric:all_docs(Db, Options, fun view_cb/2, VAcc, Args3),
{ok, Resp#vacc.resp}.
@@ -906,23 +1019,21 @@ view_cb({row, Row} = Msg, Acc) ->
end,
chttpd_stats:incr_rows(),
couch_mrview_http:view_cb(Msg, Acc);
-
view_cb(Msg, Acc) ->
couch_mrview_http:view_cb(Msg, Acc).
-db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
+db_doc_req(#httpd{method = 'DELETE'} = Req, Db, DocId) ->
% check for the existence of the doc to handle the 404 case.
couch_doc_open(Db, DocId, nil, []),
case chttpd:qs_value(Req, "rev") of
- undefined ->
- Body = {[{<<"_deleted">>,true}]};
- Rev ->
- Body = {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]}
+ undefined ->
+ Body = {[{<<"_deleted">>, true}]};
+ Rev ->
+ Body = {[{<<"_rev">>, ?l2b(Rev)}, {<<"_deleted">>, true}]}
end,
Doc = couch_doc_from_req(Req, Db, DocId, Body),
send_updated_doc(Req, Db, DocId, Doc);
-
-db_doc_req(#httpd{method='GET', mochi_req=MochiReq}=Req, Db, DocId) ->
+db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
#doc_query_args{
rev = Rev0,
open_revs = Revs,
@@ -931,116 +1042,130 @@ db_doc_req(#httpd{method='GET', mochi_req=MochiReq}=Req, Db, DocId) ->
} = parse_doc_query(Req),
Options = [{user_ctx, Req#httpd.user_ctx} | Options0],
case Revs of
- [] ->
- Options2 =
- if AttsSince /= nil ->
- [{atts_since, AttsSince}, attachments | Options];
- true -> Options
- end,
- Rev = case lists:member(latest, Options) of
- % couch_doc_open will open the winning rev despite of a rev passed
- % https://docs.couchdb.org/en/stable/api/document/common.html?highlight=latest#get--db-docid
- true -> nil;
- false -> Rev0
- end,
- Doc = couch_doc_open(Db, DocId, Rev, Options2),
- send_doc(Req, Doc, Options2);
- _ ->
- case fabric:open_revs(Db, DocId, Revs, Options) of
- {ok, []} when Revs == all ->
- chttpd:send_error(Req, {not_found, missing});
- {ok, Results} ->
- chttpd_stats:incr_reads(length(Results)),
- case MochiReq:accepts_content_type("multipart/mixed") of
- false ->
- {ok, Resp} = start_json_response(Req, 200),
- send_chunk(Resp, "["),
- % We loop through the docs. The first time through the separator
- % is whitespace, then a comma on subsequent iterations.
- lists:foldl(
- fun(Result, AccSeparator) ->
- case Result of
- {ok, Doc} ->
- JsonDoc = couch_doc:to_json_obj(Doc, Options),
- Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
- send_chunk(Resp, AccSeparator ++ Json);
- {{not_found, missing}, RevId} ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
- send_chunk(Resp, AccSeparator ++ Json)
- end,
- "," % AccSeparator now has a comma
- end,
- "", Results),
- send_chunk(Resp, "]"),
- end_json_response(Resp);
- true ->
- send_docs_multipart(Req, Results, Options)
- end;
- {error, Error} ->
- chttpd:send_error(Req, Error)
- end
+ [] ->
+ Options2 =
+ if
+ AttsSince /= nil ->
+ [{atts_since, AttsSince}, attachments | Options];
+ true ->
+ Options
+ end,
+ Rev =
+ case lists:member(latest, Options) of
+ % couch_doc_open will open the winning rev despite of a rev passed
+ % https://docs.couchdb.org/en/stable/api/document/common.html?highlight=latest#get--db-docid
+ true -> nil;
+ false -> Rev0
+ end,
+ Doc = couch_doc_open(Db, DocId, Rev, Options2),
+ send_doc(Req, Doc, Options2);
+ _ ->
+ case fabric:open_revs(Db, DocId, Revs, Options) of
+ {ok, []} when Revs == all ->
+ chttpd:send_error(Req, {not_found, missing});
+ {ok, Results} ->
+ chttpd_stats:incr_reads(length(Results)),
+ case MochiReq:accepts_content_type("multipart/mixed") of
+ false ->
+ {ok, Resp} = start_json_response(Req, 200),
+ send_chunk(Resp, "["),
+ % We loop through the docs. The first time through the separator
+ % is whitespace, then a comma on subsequent iterations.
+ lists:foldl(
+ fun(Result, AccSeparator) ->
+ case Result of
+ {ok, Doc} ->
+ JsonDoc = couch_doc:to_json_obj(Doc, Options),
+ Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
+ send_chunk(Resp, AccSeparator ++ Json);
+ {{not_found, missing}, RevId} ->
+ RevStr = couch_doc:rev_to_str(RevId),
+ Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
+ send_chunk(Resp, AccSeparator ++ Json)
+ end,
+ % AccSeparator now has a comma
+ ","
+ end,
+ "",
+ Results
+ ),
+ send_chunk(Resp, "]"),
+ end_json_response(Resp);
+ true ->
+ send_docs_multipart(Req, Results, Options)
+ end;
+ {error, Error} ->
+ chttpd:send_error(Req, Error)
+ end
end;
-
-db_doc_req(#httpd{method='POST', user_ctx=Ctx}=Req, Db, DocId) ->
+db_doc_req(#httpd{method = 'POST', user_ctx = Ctx} = Req, Db, DocId) ->
couch_httpd:validate_referer(Req),
couch_db:validate_docid(Db, DocId),
chttpd:validate_ctype(Req, "multipart/form-data"),
W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- Options = [{user_ctx,Ctx}, {w,W}],
+ Options = [{user_ctx, Ctx}, {w, W}],
Form = couch_httpd:parse_form(Req),
case proplists:is_defined("_doc", Form) of
- true ->
- Json = ?JSON_DECODE(couch_util:get_value("_doc", Form)),
- Doc = couch_doc_from_req(Req, Db, DocId, Json);
- false ->
- Rev = couch_doc:parse_rev(list_to_binary(couch_util:get_value("_rev", Form))),
- Doc = case fabric:open_revs(Db, DocId, [Rev], []) of
- {ok, [{ok, Doc0}]} ->
- chttpd_stats:incr_reads(),
- Doc0;
- {error, Error} ->
- throw(Error)
- end
+ true ->
+ Json = ?JSON_DECODE(couch_util:get_value("_doc", Form)),
+ Doc = couch_doc_from_req(Req, Db, DocId, Json);
+ false ->
+ Rev = couch_doc:parse_rev(list_to_binary(couch_util:get_value("_rev", Form))),
+ Doc =
+ case fabric:open_revs(Db, DocId, [Rev], []) of
+ {ok, [{ok, Doc0}]} ->
+ chttpd_stats:incr_reads(),
+ Doc0;
+ {error, Error} ->
+ throw(Error)
+ end
end,
UpdatedAtts = [
couch_att:new([
{name, validate_attachment_name(Name)},
{type, list_to_binary(ContentType)},
{data, Content}
- ]) ||
- {Name, {ContentType, _}, Content} <-
- proplists:get_all_values("_attachments", Form)
+ ])
+ || {Name, {ContentType, _}, Content} <-
+ proplists:get_all_values("_attachments", Form)
],
- #doc{atts=OldAtts} = Doc,
+ #doc{atts = OldAtts} = Doc,
OldAtts2 = lists:flatmap(
fun(Att) ->
OldName = couch_att:fetch(name, Att),
case [1 || A <- UpdatedAtts, couch_att:fetch(name, A) == OldName] of
- [] -> [Att]; % the attachment wasn't in the UpdatedAtts, return it
- _ -> [] % the attachment was in the UpdatedAtts, drop it
+ % the attachment wasn't in the UpdatedAtts, return it
+ [] -> [Att];
+ % the attachment was in the UpdatedAtts, drop it
+ _ -> []
end
- end, OldAtts),
+ end,
+ OldAtts
+ ),
NewDoc = Doc#doc{
atts = UpdatedAtts ++ OldAtts2
},
case fabric:update_doc(Db, NewDoc, Options) of
- {ok, NewRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 201;
- {accepted, NewRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 202
+ {ok, NewRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 201;
+ {accepted, NewRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 202
end,
- send_json(Req, HttpCode, [{"ETag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewRev)) ++ "\""}], {[
- {ok, true},
- {id, DocId},
- {rev, couch_doc:rev_to_str(NewRev)}
- ]});
-
-db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
+ send_json(
+ Req,
+ HttpCode,
+ [{"ETag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewRev)) ++ "\""}],
+ {[
+ {ok, true},
+ {id, DocId},
+ {rev, couch_doc:rev_to_str(NewRev)}
+ ]}
+ );
+db_doc_req(#httpd{method = 'PUT', user_ctx = Ctx} = Req, Db, DocId) ->
#doc_query_args{
update_type = UpdateType
} = parse_doc_query(Req),
@@ -1048,194 +1173,257 @@ db_doc_req(#httpd{method='PUT', user_ctx=Ctx}=Req, Db, DocId) ->
couch_db:validate_docid(Db, DocId),
W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- Options = [{user_ctx,Ctx}, {w,W}],
-
- Loc = absolute_uri(Req, [$/, couch_util:url_encode(DbName),
- $/, couch_util:url_encode(DocId)]),
+ Options = [{user_ctx, Ctx}, {w, W}],
+
+ Loc = absolute_uri(Req, [
+ $/,
+ couch_util:url_encode(DbName),
+ $/,
+ couch_util:url_encode(DocId)
+ ]),
RespHeaders = [{"Location", Loc}],
case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
- ("multipart/related;" ++ _) = ContentType ->
- couch_httpd:check_max_request_length(Req),
- couch_httpd_multipart:num_mp_writers(mem3:n(mem3:dbname(DbName), DocId)),
- {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(ContentType,
- fun() -> receive_request_data(Req) end),
- Doc = couch_doc_from_req(Req, Db, DocId, Doc0),
- try
- Result = send_updated_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType),
- WaitFun(),
- Result
- catch throw:Err ->
- % Document rejected by a validate_doc_update function.
- couch_httpd_multipart:abort_multipart_stream(Parser),
- throw(Err)
- end;
- _Else ->
- case chttpd:qs_value(Req, "batch") of
- "ok" ->
- % batch
- Doc = couch_doc_from_req(Req, Db, DocId, chttpd:json_body(Req)),
-
- spawn(fun() ->
- case catch(fabric:update_doc(Db, Doc, Options)) of
- {ok, _} ->
- chttpd_stats:incr_writes(),
- ok;
- {accepted, _} ->
- chttpd_stats:incr_writes(),
- ok;
- Error ->
- couch_log:notice("Batch doc error (~s): ~p",[DocId, Error])
- end
- end),
- send_json(Req, 202, [], {[
- {ok, true},
- {id, DocId}
- ]});
- _Normal ->
- % normal
- Body = chttpd:json_body(Req),
- Doc = couch_doc_from_req(Req, Db, DocId, Body),
- send_updated_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType)
- end
+ ("multipart/related;" ++ _) = ContentType ->
+ couch_httpd:check_max_request_length(Req),
+ couch_httpd_multipart:num_mp_writers(mem3:n(mem3:dbname(DbName), DocId)),
+ {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(
+ ContentType,
+ fun() -> receive_request_data(Req) end
+ ),
+ Doc = couch_doc_from_req(Req, Db, DocId, Doc0),
+ try
+ Result = send_updated_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType),
+ WaitFun(),
+ Result
+ catch
+ throw:Err ->
+ % Document rejected by a validate_doc_update function.
+ couch_httpd_multipart:abort_multipart_stream(Parser),
+ throw(Err)
+ end;
+ _Else ->
+ case chttpd:qs_value(Req, "batch") of
+ "ok" ->
+ % batch
+ Doc = couch_doc_from_req(Req, Db, DocId, chttpd:json_body(Req)),
+
+ spawn(fun() ->
+ case catch (fabric:update_doc(Db, Doc, Options)) of
+ {ok, _} ->
+ chttpd_stats:incr_writes(),
+ ok;
+ {accepted, _} ->
+ chttpd_stats:incr_writes(),
+ ok;
+ Error ->
+ couch_log:notice("Batch doc error (~s): ~p", [DocId, Error])
+ end
+ end),
+ send_json(
+ Req,
+ 202,
+ [],
+ {[
+ {ok, true},
+ {id, DocId}
+ ]}
+ );
+ _Normal ->
+ % normal
+ Body = chttpd:json_body(Req),
+ Doc = couch_doc_from_req(Req, Db, DocId, Body),
+ send_updated_doc(Req, Db, DocId, Doc, RespHeaders, UpdateType)
+ end
end;
-
-db_doc_req(#httpd{method='COPY', user_ctx=Ctx}=Req, Db, SourceDocId) ->
+db_doc_req(#httpd{method = 'COPY', user_ctx = Ctx} = Req, Db, SourceDocId) ->
SourceRev =
- case extract_header_rev(Req, chttpd:qs_value(Req, "rev")) of
- missing_rev -> nil;
- Rev -> Rev
- end,
+ case extract_header_rev(Req, chttpd:qs_value(Req, "rev")) of
+ missing_rev -> nil;
+ Rev -> Rev
+ end,
{TargetDocId0, TargetRevs} = couch_httpd_db:parse_copy_destination_header(Req),
TargetDocId = list_to_binary(chttpd:unquote(TargetDocId0)),
% open old doc
Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
% save new doc
- case fabric:update_doc(Db,
- Doc#doc{id=TargetDocId, revs=TargetRevs}, [{user_ctx,Ctx}]) of
- {ok, NewTargetRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 201;
- {accepted, NewTargetRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 202
+ case
+ fabric:update_doc(
+ Db,
+ Doc#doc{id = TargetDocId, revs = TargetRevs},
+ [{user_ctx, Ctx}]
+ )
+ of
+ {ok, NewTargetRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 201;
+ {accepted, NewTargetRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 202
end,
% respond
DbName = couch_db:name(Db),
{PartRes} = update_doc_result_to_json(TargetDocId, {ok, NewTargetRev}),
- Loc = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(TargetDocId)),
- send_json(Req, HttpCode,
- [{"Location", Loc},
- {"ETag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewTargetRev)) ++ "\""}],
- {PartRes});
-
+ Loc = absolute_uri(
+ Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(TargetDocId)
+ ),
+ send_json(
+ Req,
+ HttpCode,
+ [
+ {"Location", Loc},
+ {"ETag", "\"" ++ ?b2l(couch_doc:rev_to_str(NewTargetRev)) ++ "\""}
+ ],
+ {PartRes}
+ );
db_doc_req(Req, _Db, _DocId) ->
send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY").
send_doc(Req, Doc, Options) ->
case Doc#doc.meta of
- [] ->
- DiskEtag = couch_httpd:doc_etag(Doc),
- % output etag only when we have no meta
- chttpd:etag_respond(Req, DiskEtag, fun() ->
- send_doc_efficiently(Req, Doc, [{"ETag", DiskEtag}], Options)
- end);
- _ ->
- send_doc_efficiently(Req, Doc, [], Options)
+ [] ->
+ DiskEtag = couch_httpd:doc_etag(Doc),
+ % output etag only when we have no meta
+ chttpd:etag_respond(Req, DiskEtag, fun() ->
+ send_doc_efficiently(Req, Doc, [{"ETag", DiskEtag}], Options)
+ end);
+ _ ->
+ send_doc_efficiently(Req, Doc, [], Options)
end.
-send_doc_efficiently(Req, #doc{atts=[]}=Doc, Headers, Options) ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
-send_doc_efficiently(#httpd{mochi_req=MochiReq}=Req, #doc{atts=Atts}=Doc, Headers, Options) ->
+send_doc_efficiently(Req, #doc{atts = []} = Doc, Headers, Options) ->
+ send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
+send_doc_efficiently(#httpd{mochi_req = MochiReq} = Req, #doc{atts = Atts} = Doc, Headers, Options) ->
case lists:member(attachments, Options) of
- true ->
- Refs = monitor_attachments(Atts),
- try
- case MochiReq:accepts_content_type("multipart/related") of
- false ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
true ->
- Boundary = couch_uuids:random(),
- JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc,
- [attachments, follows, att_encoding_info | Options])),
- {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
- Boundary,JsonBytes, Atts, true),
- CType = {"Content-Type", ContentType},
- {ok, Resp} = start_response_length(Req, 200, [CType|Headers], Len),
- couch_doc:doc_to_multi_part_stream(Boundary,JsonBytes,Atts,
- fun(Data) -> couch_httpd:send(Resp, Data) end, true)
- end
- after
- demonitor_refs(Refs)
- end;
- false ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
+ Refs = monitor_attachments(Atts),
+ try
+ case MochiReq:accepts_content_type("multipart/related") of
+ false ->
+ send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
+ true ->
+ Boundary = couch_uuids:random(),
+ JsonBytes = ?JSON_ENCODE(
+ couch_doc:to_json_obj(
+ Doc,
+ [attachments, follows, att_encoding_info | Options]
+ )
+ ),
+ {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
+ Boundary, JsonBytes, Atts, true
+ ),
+ CType = {"Content-Type", ContentType},
+ {ok, Resp} = start_response_length(Req, 200, [CType | Headers], Len),
+ couch_doc:doc_to_multi_part_stream(
+ Boundary,
+ JsonBytes,
+ Atts,
+ fun(Data) -> couch_httpd:send(Resp, Data) end,
+ true
+ )
+ end
+ after
+ demonitor_refs(Refs)
+ end;
+ false ->
+ send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
end.
send_docs_multipart_bulk_get(Results, Options0, OuterBoundary, Resp) ->
InnerBoundary = bulk_get_multipart_boundary(),
Options = [attachments, follows, att_encoding_info | Options0],
lists:foreach(
- fun({ok, #doc{id=Id, revs=Revs, atts=Atts}=Doc}) ->
- Refs = monitor_attachments(Doc#doc.atts),
- try
- JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
- couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>),
- case Atts of
- [] ->
- couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: application/json\r\n\r\n">>);
- _ ->
- lists:foreach(fun(Header) -> couch_httpd:send_chunk(Resp, Header) end,
- bulk_get_multipart_headers(Revs, Id, InnerBoundary))
- end,
- couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
- fun(Data) -> couch_httpd:send_chunk(Resp, Data)
- end, true)
- after
- demonitor_refs(Refs)
- end;
- ({{not_found, missing}, RevId}) ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{<<"rev">>, RevStr},
- {<<"error">>, <<"not_found">>},
- {<<"reason">>, <<"missing">>}]}),
- couch_httpd:send_chunk(Resp,
- [<<"\r\n--", OuterBoundary/binary>>,
- <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
- Json])
- end, Results).
+ fun
+ ({ok, #doc{id = Id, revs = Revs, atts = Atts} = Doc}) ->
+ Refs = monitor_attachments(Doc#doc.atts),
+ try
+ JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
+ couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>),
+ case Atts of
+ [] ->
+ couch_httpd:send_chunk(
+ Resp, <<"\r\nContent-Type: application/json\r\n\r\n">>
+ );
+ _ ->
+ lists:foreach(
+ fun(Header) -> couch_httpd:send_chunk(Resp, Header) end,
+ bulk_get_multipart_headers(Revs, Id, InnerBoundary)
+ )
+ end,
+ couch_doc:doc_to_multi_part_stream(
+ InnerBoundary,
+ JsonBytes,
+ Atts,
+ fun(Data) -> couch_httpd:send_chunk(Resp, Data) end,
+ true
+ )
+ after
+ demonitor_refs(Refs)
+ end;
+ ({{not_found, missing}, RevId}) ->
+ RevStr = couch_doc:rev_to_str(RevId),
+ Json = ?JSON_ENCODE(
+ {[
+ {<<"rev">>, RevStr},
+ {<<"error">>, <<"not_found">>},
+ {<<"reason">>, <<"missing">>}
+ ]}
+ ),
+ couch_httpd:send_chunk(
+ Resp,
+ [
+ <<"\r\n--", OuterBoundary/binary>>,
+ <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
+ Json
+ ]
+ )
+ end,
+ Results
+ ).
send_docs_multipart(Req, Results, Options1) ->
OuterBoundary = couch_uuids:random(),
InnerBoundary = couch_uuids:random(),
Options = [attachments, follows, att_encoding_info | Options1],
- CType = {"Content-Type",
- "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
+ CType = {"Content-Type", "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
{ok, Resp} = start_chunked_response(Req, 200, [CType]),
couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
lists:foreach(
- fun({ok, #doc{atts=Atts}=Doc}) ->
- Refs = monitor_attachments(Doc#doc.atts),
- try
- JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
- {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
- InnerBoundary, JsonBytes, Atts, true),
- couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ",
- ContentType/binary, "\r\n\r\n">>),
- couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
- fun(Data) -> couch_httpd:send_chunk(Resp, Data)
- end, true),
- couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>)
- after
- demonitor_refs(Refs)
- end;
- ({{not_found, missing}, RevId}) ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
- couch_httpd:send_chunk(Resp,
- [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
- Json,
- <<"\r\n--", OuterBoundary/binary>>])
- end, Results),
+ fun
+ ({ok, #doc{atts = Atts} = Doc}) ->
+ Refs = monitor_attachments(Doc#doc.atts),
+ try
+ JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
+ {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
+ InnerBoundary, JsonBytes, Atts, true
+ ),
+ couch_httpd:send_chunk(
+ Resp, <<"\r\nContent-Type: ", ContentType/binary, "\r\n\r\n">>
+ ),
+ couch_doc:doc_to_multi_part_stream(
+ InnerBoundary,
+ JsonBytes,
+ Atts,
+ fun(Data) -> couch_httpd:send_chunk(Resp, Data) end,
+ true
+ ),
+ couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>)
+ after
+ demonitor_refs(Refs)
+ end;
+ ({{not_found, missing}, RevId}) ->
+ RevStr = couch_doc:rev_to_str(RevId),
+ Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
+ couch_httpd:send_chunk(
+ Resp,
+ [
+ <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
+ Json,
+ <<"\r\n--", OuterBoundary/binary>>
+ ]
+ )
+ end,
+ Results
+ ),
couch_httpd:send_chunk(Resp, <<"--">>),
couch_httpd:last_chunk(Resp).
@@ -1244,7 +1432,7 @@ bulk_get_multipart_headers({0, []}, Id, Boundary) ->
<<"\r\nX-Doc-Id: ", Id/binary>>,
<<"\r\nContent-Type: multipart/related; boundary=", Boundary/binary, "\r\n\r\n">>
];
-bulk_get_multipart_headers({Start, [FirstRevId|_]}, Id, Boundary) ->
+bulk_get_multipart_headers({Start, [FirstRevId | _]}, Id, Boundary) ->
RevStr = couch_doc:rev_to_str({Start, FirstRevId}),
[
<<"\r\nX-Doc-Id: ", Id/binary>>,
@@ -1265,9 +1453,17 @@ receive_request_data(Req, Len) when Len == chunked ->
self() ! {chunk, Ref, Binary}
end,
couch_httpd:recv_chunked(Req, 4096, ChunkFun, ok),
- GetChunk = fun GC() -> receive {chunk, Ref, Binary} -> {Binary, GC} end end,
- {receive {chunk, Ref, Binary} -> Binary end, GetChunk};
-
+ GetChunk = fun GC() ->
+ receive
+ {chunk, Ref, Binary} -> {Binary, GC}
+ end
+ end,
+ {
+ receive
+ {chunk, Ref, Binary} -> Binary
+ end,
+ GetChunk
+ };
receive_request_data(Req, LenLeft) when LenLeft > 0 ->
Len = erlang:min(4096, LenLeft),
Data = chttpd:recv(Req, Len),
@@ -1276,11 +1472,15 @@ receive_request_data(_Req, _) ->
throw(<<"expected more data">>).
update_doc_result_to_json({{Id, Rev}, Error}) ->
- {_Code, Err, Msg} = chttpd:error_info(Error),
- {[{id, Id}, {rev, couch_doc:rev_to_str(Rev)},
- {error, Err}, {reason, Msg}]}.
-
-update_doc_result_to_json(#doc{id=DocId}, Result) ->
+ {_Code, Err, Msg} = chttpd:error_info(Error),
+ {[
+ {id, Id},
+ {rev, couch_doc:rev_to_str(Rev)},
+ {error, Err},
+ {reason, Msg}
+ ]}.
+
+update_doc_result_to_json(#doc{id = DocId}, Result) ->
update_doc_result_to_json(DocId, Result);
update_doc_result_to_json(DocId, {ok, NewRev}) ->
{[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
@@ -1314,20 +1514,30 @@ send_updated_doc(Req, Db, DocId, Json) ->
send_updated_doc(Req, Db, DocId, Doc, Headers) ->
send_updated_doc(Req, Db, DocId, Doc, Headers, interactive_edit).
-send_updated_doc(#httpd{user_ctx=Ctx} = Req, Db, DocId, #doc{deleted=Deleted}=Doc,
- Headers, UpdateType) ->
+send_updated_doc(
+ #httpd{user_ctx = Ctx} = Req,
+ Db,
+ DocId,
+ #doc{deleted = Deleted} = Doc,
+ Headers,
+ UpdateType
+) ->
W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
Options =
case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
- "true" ->
- [full_commit, UpdateType, {user_ctx,Ctx}, {w,W}];
- "false" ->
- [delay_commit, UpdateType, {user_ctx,Ctx}, {w,W}];
- _ ->
- [UpdateType, {user_ctx,Ctx}, {w,W}]
+ "true" ->
+ [full_commit, UpdateType, {user_ctx, Ctx}, {w, W}];
+ "false" ->
+ [delay_commit, UpdateType, {user_ctx, Ctx}, {w, W}];
+ _ ->
+ [UpdateType, {user_ctx, Ctx}, {w, W}]
end,
- {Status, {etag, Etag}, Body} = update_doc(Db, DocId,
- #doc{deleted=Deleted}=Doc, Options),
+ {Status, {etag, Etag}, Body} = update_doc(
+ Db,
+ DocId,
+ #doc{deleted = Deleted} = Doc,
+ Options
+ ),
HttpCode = http_code_from_status(Status),
ResponseHeaders = [{"ETag", Etag} | Headers],
send_json(Req, HttpCode, ResponseHeaders, Body).
@@ -1342,7 +1552,7 @@ http_code_from_status(Status) ->
200
end.
-update_doc(Db, DocId, #doc{deleted=Deleted, body=DocBody}=Doc, Options) ->
+update_doc(Db, DocId, #doc{deleted = Deleted, body = DocBody} = Doc, Options) ->
{_, Ref} = spawn_monitor(fun() ->
try fabric:update_doc(Db, Doc, Options) of
Resp ->
@@ -1356,68 +1566,74 @@ update_doc(Db, DocId, #doc{deleted=Deleted, body=DocBody}=Doc, Options) ->
exit({exit_exit, Reason})
end
end),
- Result = receive
- {'DOWN', Ref, _, _, {exit_ok, Ret}} ->
- Ret;
- {'DOWN', Ref, _, _, {exit_throw, Reason}} ->
- throw(Reason);
- {'DOWN', Ref, _, _, {exit_error, Reason}} ->
- erlang:error(Reason);
- {'DOWN', Ref, _, _, {exit_exit, Reason}} ->
- erlang:exit(Reason)
- end,
+ Result =
+ receive
+ {'DOWN', Ref, _, _, {exit_ok, Ret}} ->
+ Ret;
+ {'DOWN', Ref, _, _, {exit_throw, Reason}} ->
+ throw(Reason);
+ {'DOWN', Ref, _, _, {exit_error, Reason}} ->
+ erlang:error(Reason);
+ {'DOWN', Ref, _, _, {exit_exit, Reason}} ->
+ erlang:exit(Reason)
+ end,
case Result of
- {ok, NewRev} ->
- Accepted = false;
- {accepted, NewRev} ->
- Accepted = true
+ {ok, NewRev} ->
+ Accepted = false;
+ {accepted, NewRev} ->
+ Accepted = true
end,
Etag = couch_httpd:doc_etag(DocId, DocBody, NewRev),
- Status = case {Accepted, Deleted} of
- {true, _} ->
- accepted;
- {false, true} ->
- ok;
- {false, false} ->
- created
- end,
+ Status =
+ case {Accepted, Deleted} of
+ {true, _} ->
+ accepted;
+ {false, true} ->
+ ok;
+ {false, false} ->
+ created
+ end,
NewRevStr = couch_doc:rev_to_str(NewRev),
Body = {[{ok, true}, {id, DocId}, {rev, NewRevStr}]},
{Status, {etag, Etag}, Body}.
-couch_doc_from_req(Req, _Db, DocId, #doc{revs=Revs} = Doc) ->
+couch_doc_from_req(Req, _Db, DocId, #doc{revs = Revs} = Doc) ->
validate_attachment_names(Doc),
- Rev = case chttpd:qs_value(Req, "rev") of
- undefined ->
- undefined;
- QSRev ->
- couch_doc:parse_rev(QSRev)
- end,
+ Rev =
+ case chttpd:qs_value(Req, "rev") of
+ undefined ->
+ undefined;
+ QSRev ->
+ couch_doc:parse_rev(QSRev)
+ end,
Revs2 =
- case Revs of
- {Start, [RevId|_]} ->
- if Rev /= undefined andalso Rev /= {Start, RevId} ->
- throw({bad_request, "Document rev from request body and query "
- "string have different values"});
- true ->
- case extract_header_rev(Req, {Start, RevId}) of
- missing_rev -> {0, []};
- _ -> Revs
- end
- end;
- _ ->
- case extract_header_rev(Req, Rev) of
- missing_rev -> {0, []};
- {Pos, RevId2} -> {Pos, [RevId2]}
- end
- end,
- Doc#doc{id=DocId, revs=Revs2};
+ case Revs of
+ {Start, [RevId | _]} ->
+ if
+ Rev /= undefined andalso Rev /= {Start, RevId} ->
+ throw(
+ {bad_request,
+ "Document rev from request body and query "
+ "string have different values"}
+ );
+ true ->
+ case extract_header_rev(Req, {Start, RevId}) of
+ missing_rev -> {0, []};
+ _ -> Revs
+ end
+ end;
+ _ ->
+ case extract_header_rev(Req, Rev) of
+ missing_rev -> {0, []};
+ {Pos, RevId2} -> {Pos, [RevId2]}
+ end
+ end,
+ Doc#doc{id = DocId, revs = Revs2};
couch_doc_from_req(Req, Db, DocId, Json) ->
Doc = couch_db:doc_from_json_obj_validate(Db, Json),
couch_doc_from_req(Req, Db, DocId, Doc).
-
% Useful for debugging
% couch_doc_open(Db, DocId) ->
% couch_doc_open(Db, DocId, nil, []).
@@ -1425,28 +1641,29 @@ couch_doc_from_req(Req, Db, DocId, Json) ->
couch_doc_open(Db, DocId, Rev, Options0) ->
Options = [{user_ctx, couch_db:get_user_ctx(Db)} | Options0],
case Rev of
- nil -> % open most recent rev
- case fabric:open_doc(Db, DocId, Options) of
- {ok, Doc} ->
- chttpd_stats:incr_reads(),
- Doc;
- Error ->
- throw(Error)
- end;
- _ -> % open a specific rev (deletions come back as stubs)
- case fabric:open_revs(Db, DocId, [Rev], Options) of
- {ok, [{ok, Doc}]} ->
- chttpd_stats:incr_reads(),
- Doc;
- {ok, [{{not_found, missing}, Rev}]} ->
- throw(not_found);
- {ok, [Else]} ->
- throw(Else);
- {error, Error} ->
- throw(Error)
- end
- end.
-
+ % open most recent rev
+ nil ->
+ case fabric:open_doc(Db, DocId, Options) of
+ {ok, Doc} ->
+ chttpd_stats:incr_reads(),
+ Doc;
+ Error ->
+ throw(Error)
+ end;
+ % open a specific rev (deletions come back as stubs)
+ _ ->
+ case fabric:open_revs(Db, DocId, [Rev], Options) of
+ {ok, [{ok, Doc}]} ->
+ chttpd_stats:incr_reads(),
+ Doc;
+ {ok, [{{not_found, missing}, Rev}]} ->
+ throw(not_found);
+ {ok, [Else]} ->
+ throw(Else);
+ {error, Error} ->
+ throw(Error)
+ end
+ end.
get_existing_attachment(Atts, FileName) ->
% Check if attachment exists, if not throw not_found
@@ -1457,228 +1674,298 @@ get_existing_attachment(Atts, FileName) ->
% Attachment request handlers
-db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNameParts) ->
- FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1,
- FileNameParts),"/")),
+db_attachment_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId, FileNameParts) ->
+ FileName = list_to_binary(
+ mochiweb_util:join(
+ lists:map(
+ fun binary_to_list/1,
+ FileNameParts
+ ),
+ "/"
+ )
+ ),
#doc_query_args{
- rev=Rev,
- options=Options
+ rev = Rev,
+ options = Options
} = parse_doc_query(Req),
#doc{
- atts=Atts
+ atts = Atts
} = Doc = couch_doc_open(Db, DocId, Rev, Options),
Att = get_existing_attachment(Atts, FileName),
- [Type, Enc, DiskLen, AttLen, Md5] = couch_att:fetch([type, encoding, disk_len, att_len, md5], Att),
+ [Type, Enc, DiskLen, AttLen, Md5] = couch_att:fetch(
+ [type, encoding, disk_len, att_len, md5], Att
+ ),
Refs = monitor_attachments(Att),
try
- Etag = case Md5 of
- <<>> -> chttpd:doc_etag(Doc);
- _ -> "\"" ++ ?b2l(base64:encode(Md5)) ++ "\""
- end,
- ReqAcceptsAttEnc = lists:member(
- atom_to_list(Enc),
- couch_httpd:accepted_encodings(Req)
- ),
- Headers0 = [
- {"ETag", Etag},
- {"Cache-Control", "must-revalidate"},
- {"Content-Type", binary_to_list(Type)}
- ] ++ case ReqAcceptsAttEnc of
- true when Enc =/= identity ->
- % RFC 2616 says that the 'identify' encoding should not be used in
- % the Content-Encoding header
- [{"Content-Encoding", atom_to_list(Enc)}];
- _ ->
- []
- end ++ case Enc of
- identity ->
- [{"Accept-Ranges", "bytes"}];
- _ ->
- [{"Accept-Ranges", "none"}]
- end,
- Headers = chttpd_util:maybe_add_csp_header("attachments", Headers0, "sandbox"),
- Len = case {Enc, ReqAcceptsAttEnc} of
- {identity, _} ->
- % stored and served in identity form
- DiskLen;
- {_, false} when DiskLen =/= AttLen ->
- % Stored encoded, but client doesn't accept the encoding we used,
- % so we need to decode on the fly. DiskLen is the identity length
- % of the attachment.
- DiskLen;
- {_, true} ->
- % Stored and served encoded. AttLen is the encoded length.
- AttLen;
- _ ->
- % We received an encoded attachment and stored it as such, so we
- % don't know the identity length. The client doesn't accept the
- % encoding, and since we cannot serve a correct Content-Length
- % header we'll fall back to a chunked response.
- undefined
- end,
- AttFun = case ReqAcceptsAttEnc of
- false ->
- fun couch_att:foldl_decode/3;
- true ->
- fun couch_att:foldl/3
- end,
- chttpd:etag_respond(
- Req,
- Etag,
- fun() ->
- case Len of
- undefined ->
- {ok, Resp} = start_chunked_response(Req, 200, Headers),
- AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
- couch_httpd:last_chunk(Resp);
- _ ->
- Ranges = parse_ranges(MochiReq:get(range), Len),
- case {Enc, Ranges} of
- {identity, [{From, To}]} ->
- Headers1 = [{"Content-Range", make_content_range(From, To, Len)}]
- ++ Headers,
- {ok, Resp} = start_response_length(Req, 206, Headers1, To - From + 1),
- couch_att:range_foldl(Att, From, To + 1,
- fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp});
- {identity, Ranges} when is_list(Ranges) andalso length(Ranges) < 10 ->
- send_ranges_multipart(Req, Type, Len, Att, Ranges);
+ Etag =
+ case Md5 of
+ <<>> -> chttpd:doc_etag(Doc);
+ _ -> "\"" ++ ?b2l(base64:encode(Md5)) ++ "\""
+ end,
+ ReqAcceptsAttEnc = lists:member(
+ atom_to_list(Enc),
+ couch_httpd:accepted_encodings(Req)
+ ),
+ Headers0 =
+ [
+ {"ETag", Etag},
+ {"Cache-Control", "must-revalidate"},
+ {"Content-Type", binary_to_list(Type)}
+ ] ++
+ case ReqAcceptsAttEnc of
+ true when Enc =/= identity ->
+ % RFC 2616 says that the 'identify' encoding should not be used in
+ % the Content-Encoding header
+ [{"Content-Encoding", atom_to_list(Enc)}];
_ ->
- Headers1 = Headers ++
- if Enc =:= identity orelse ReqAcceptsAttEnc =:= true ->
- [{"Content-MD5", base64:encode(couch_att:fetch(md5, Att))}];
- true ->
- []
- end,
- {ok, Resp} = start_response_length(Req, 200, Headers1, Len),
- AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
+ []
+ end ++
+ case Enc of
+ identity ->
+ [{"Accept-Ranges", "bytes"}];
+ _ ->
+ [{"Accept-Ranges", "none"}]
+ end,
+ Headers = chttpd_util:maybe_add_csp_header("attachments", Headers0, "sandbox"),
+ Len =
+ case {Enc, ReqAcceptsAttEnc} of
+ {identity, _} ->
+ % stored and served in identity form
+ DiskLen;
+ {_, false} when DiskLen =/= AttLen ->
+ % Stored encoded, but client doesn't accept the encoding we used,
+ % so we need to decode on the fly. DiskLen is the identity length
+ % of the attachment.
+ DiskLen;
+ {_, true} ->
+ % Stored and served encoded. AttLen is the encoded length.
+ AttLen;
+ _ ->
+ % We received an encoded attachment and stored it as such, so we
+ % don't know the identity length. The client doesn't accept the
+ % encoding, and since we cannot serve a correct Content-Length
+ % header we'll fall back to a chunked response.
+ undefined
+ end,
+ AttFun =
+ case ReqAcceptsAttEnc of
+ false ->
+ fun couch_att:foldl_decode/3;
+ true ->
+ fun couch_att:foldl/3
+ end,
+ chttpd:etag_respond(
+ Req,
+ Etag,
+ fun() ->
+ case Len of
+ undefined ->
+ {ok, Resp} = start_chunked_response(Req, 200, Headers),
+ AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
+ couch_httpd:last_chunk(Resp);
+ _ ->
+ Ranges = parse_ranges(MochiReq:get(range), Len),
+ case {Enc, Ranges} of
+ {identity, [{From, To}]} ->
+ Headers1 =
+ [{"Content-Range", make_content_range(From, To, Len)}] ++
+ Headers,
+ {ok, Resp} = start_response_length(
+ Req, 206, Headers1, To - From + 1
+ ),
+ couch_att:range_foldl(
+ Att,
+ From,
+ To + 1,
+ fun(Seg, _) -> send(Resp, Seg) end,
+ {ok, Resp}
+ );
+ {identity, Ranges} when is_list(Ranges) andalso length(Ranges) < 10 ->
+ send_ranges_multipart(Req, Type, Len, Att, Ranges);
+ _ ->
+ Headers1 =
+ Headers ++
+ if
+ Enc =:= identity orelse ReqAcceptsAttEnc =:= true ->
+ [
+ {"Content-MD5",
+ base64:encode(couch_att:fetch(md5, Att))}
+ ];
+ true ->
+ []
+ end,
+ {ok, Resp} = start_response_length(Req, 200, Headers1, Len),
+ AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
+ end
end
end
- end
- )
+ )
after
demonitor_refs(Refs)
end;
-
-
-db_attachment_req(#httpd{method=Method, user_ctx=Ctx}=Req, Db, DocId, FileNameParts)
- when (Method == 'PUT') or (Method == 'DELETE') ->
+db_attachment_req(#httpd{method = Method, user_ctx = Ctx} = Req, Db, DocId, FileNameParts) when
+ (Method == 'PUT') or (Method == 'DELETE')
+->
FileName = validate_attachment_name(
- mochiweb_util:join(
- lists:map(fun binary_to_list/1,
- FileNameParts),"/")),
+ mochiweb_util:join(
+ lists:map(
+ fun binary_to_list/1,
+ FileNameParts
+ ),
+ "/"
+ )
+ ),
- NewAtt = case Method of
- 'DELETE' ->
- [];
- _ ->
- MimeType = case couch_httpd:header_value(Req,"Content-Type") of
- % We could throw an error here or guess by the FileName.
- % Currently, just giving it a default.
- undefined -> <<"application/octet-stream">>;
- CType -> list_to_binary(CType)
- end,
- Data = fabric:att_receiver(Req, couch_db:name(Db), chttpd:body_length(Req)),
- ContentLen = case couch_httpd:header_value(Req,"Content-Length") of
- undefined -> undefined;
- Length -> list_to_integer(Length)
- end,
- ContentEnc = string:to_lower(string:strip(
- couch_httpd:header_value(Req, "Content-Encoding", "identity")
- )),
- Encoding = case ContentEnc of
- "identity" ->
- identity;
- "gzip" ->
- gzip;
- _ ->
- throw({
- bad_ctype,
- "Only gzip and identity content-encodings are supported"
- })
- end,
- [couch_att:new([
- {name, FileName},
- {type, MimeType},
- {data, Data},
- {att_len, ContentLen},
- {md5, get_md5_header(Req)},
- {encoding, Encoding}
- ])]
- end,
+ NewAtt =
+ case Method of
+ 'DELETE' ->
+ [];
+ _ ->
+ MimeType =
+ case couch_httpd:header_value(Req, "Content-Type") of
+ % We could throw an error here or guess by the FileName.
+ % Currently, just giving it a default.
+ undefined -> <<"application/octet-stream">>;
+ CType -> list_to_binary(CType)
+ end,
+ Data = fabric:att_receiver(Req, couch_db:name(Db), chttpd:body_length(Req)),
+ ContentLen =
+ case couch_httpd:header_value(Req, "Content-Length") of
+ undefined -> undefined;
+ Length -> list_to_integer(Length)
+ end,
+ ContentEnc = string:to_lower(
+ string:strip(
+ couch_httpd:header_value(Req, "Content-Encoding", "identity")
+ )
+ ),
+ Encoding =
+ case ContentEnc of
+ "identity" ->
+ identity;
+ "gzip" ->
+ gzip;
+ _ ->
+ throw({
+ bad_ctype,
+ "Only gzip and identity content-encodings are supported"
+ })
+ end,
+ [
+ couch_att:new([
+ {name, FileName},
+ {type, MimeType},
+ {data, Data},
+ {att_len, ContentLen},
+ {md5, get_md5_header(Req)},
+ {encoding, Encoding}
+ ])
+ ]
+ end,
- Doc = case extract_header_rev(Req, chttpd:qs_value(Req, "rev")) of
- missing_rev -> % make the new doc
- if Method =/= 'DELETE' -> ok; true ->
- % check for the existence of the doc and attachment
- CurrDoc = #doc{} = couch_doc_open(Db, DocId, nil, []),
- get_existing_attachment(CurrDoc#doc.atts, FileName)
- end,
- couch_db:validate_docid(Db, DocId),
- #doc{id=DocId};
- Rev ->
- case fabric:open_revs(Db, DocId, [Rev], [{user_ctx,Ctx}]) of
- {ok, [{ok, Doc0}]} ->
- chttpd_stats:incr_reads(),
- if Method =/= 'DELETE' -> ok; true ->
- % check if attachment exists
- get_existing_attachment(Doc0#doc.atts, FileName)
+ Doc =
+ case extract_header_rev(Req, chttpd:qs_value(Req, "rev")) of
+ % make the new doc
+ missing_rev ->
+ if
+ Method =/= 'DELETE' ->
+ ok;
+ true ->
+ % check for the existence of the doc and attachment
+ CurrDoc = #doc{} = couch_doc_open(Db, DocId, nil, []),
+ get_existing_attachment(CurrDoc#doc.atts, FileName)
end,
- Doc0;
- {ok, [Error]} ->
- throw(Error);
- {error, Error} ->
- throw(Error)
- end
- end,
+ couch_db:validate_docid(Db, DocId),
+ #doc{id = DocId};
+ Rev ->
+ case fabric:open_revs(Db, DocId, [Rev], [{user_ctx, Ctx}]) of
+ {ok, [{ok, Doc0}]} ->
+ chttpd_stats:incr_reads(),
+ if
+ Method =/= 'DELETE' ->
+ ok;
+ true ->
+ % check if attachment exists
+ get_existing_attachment(Doc0#doc.atts, FileName)
+ end,
+ Doc0;
+ {ok, [Error]} ->
+ throw(Error);
+ {error, Error} ->
+ throw(Error)
+ end
+ end,
- #doc{atts=Atts} = Doc,
+ #doc{atts = Atts} = Doc,
DocEdited = Doc#doc{
atts = NewAtt ++ [A || A <- Atts, couch_att:fetch(name, A) /= FileName]
},
W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
- case fabric:update_doc(Db, DocEdited, [{user_ctx,Ctx}, {w,W}]) of
- {ok, UpdatedRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 201;
- {accepted, UpdatedRev} ->
- chttpd_stats:incr_writes(),
- HttpCode = 202
+ case fabric:update_doc(Db, DocEdited, [{user_ctx, Ctx}, {w, W}]) of
+ {ok, UpdatedRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 201;
+ {accepted, UpdatedRev} ->
+ chttpd_stats:incr_writes(),
+ HttpCode = 202
end,
erlang:put(mochiweb_request_recv, true),
DbName = couch_db:name(Db),
- {Status, Headers} = case Method of
- 'DELETE' ->
- {200, []};
- _ ->
- {HttpCode, [{"Location", absolute_uri(Req, [$/, DbName, $/, couch_util:url_encode(DocId), $/,
- couch_util:url_encode(FileName)])}]}
+ {Status, Headers} =
+ case Method of
+ 'DELETE' ->
+ {200, []};
+ _ ->
+ {HttpCode, [
+ {"Location",
+ absolute_uri(Req, [
+ $/,
+ DbName,
+ $/,
+ couch_util:url_encode(DocId),
+ $/,
+ couch_util:url_encode(FileName)
+ ])}
+ ]}
end,
- send_json(Req,Status, Headers, {[
- {ok, true},
- {id, DocId},
- {rev, couch_doc:rev_to_str(UpdatedRev)}
- ]});
-
+ send_json(
+ Req,
+ Status,
+ Headers,
+ {[
+ {ok, true},
+ {id, DocId},
+ {rev, couch_doc:rev_to_str(UpdatedRev)}
+ ]}
+ );
db_attachment_req(Req, _Db, _DocId, _FileNameParts) ->
send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT").
send_ranges_multipart(Req, ContentType, Len, Att, Ranges) ->
Boundary = couch_uuids:random(),
- CType = {"Content-Type",
- "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
+ CType = {"Content-Type", "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
{ok, Resp} = start_chunked_response(Req, 206, [CType]),
couch_httpd:send_chunk(Resp, <<"--", Boundary/binary>>),
- lists:foreach(fun({From, To}) ->
- ContentRange = make_content_range(From, To, Len),
- couch_httpd:send_chunk(Resp,
- <<"\r\nContent-Type: ", ContentType/binary, "\r\n",
- "Content-Range: ", ContentRange/binary, "\r\n",
- "\r\n">>),
- couch_att:range_foldl(Att, From, To + 1,
- fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
- couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
- end, Ranges),
+ lists:foreach(
+ fun({From, To}) ->
+ ContentRange = make_content_range(From, To, Len),
+ couch_httpd:send_chunk(
+ Resp,
+ <<"\r\nContent-Type: ", ContentType/binary, "\r\n", "Content-Range: ",
+ ContentRange/binary, "\r\n", "\r\n">>
+ ),
+ couch_att:range_foldl(
+ Att,
+ From,
+ To + 1,
+ fun(Seg, _) -> send_chunk(Resp, Seg) end,
+ {ok, Resp}
+ ),
+ couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
+ end,
+ Ranges
+ ),
couch_httpd:send_chunk(Resp, <<"--">>),
couch_httpd:last_chunk(Resp),
{ok, Resp}.
@@ -1692,18 +1979,21 @@ parse_ranges(Ranges, Len) ->
parse_ranges([], _Len, Acc) ->
lists:reverse(Acc);
-parse_ranges([{0, none}|_], _Len, _Acc) ->
+parse_ranges([{0, none} | _], _Len, _Acc) ->
undefined;
-parse_ranges([{From, To}|_], _Len, _Acc) when is_integer(From) andalso is_integer(To) andalso To < From ->
+parse_ranges([{From, To} | _], _Len, _Acc) when
+ is_integer(From) andalso is_integer(To) andalso To < From
+->
throw(requested_range_not_satisfiable);
-parse_ranges([{From, To}|Rest], Len, Acc)
- when is_integer(To) andalso To >= Len ->
- parse_ranges([{From, Len-1}] ++ Rest, Len, Acc);
-parse_ranges([{none, To}|Rest], Len, Acc) ->
+parse_ranges([{From, To} | Rest], Len, Acc) when
+ is_integer(To) andalso To >= Len
+->
+ parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
+parse_ranges([{none, To} | Rest], Len, Acc) ->
parse_ranges([{Len - To, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From, none}|Rest], Len, Acc) ->
+parse_ranges([{From, none} | Rest], Len, Acc) ->
parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From,To}|Rest], Len, Acc) ->
+parse_ranges([{From, To} | Rest], Len, Acc) ->
parse_ranges(Rest, Len, [{From, To}] ++ Acc).
make_content_range(From, To, Len) ->
@@ -1736,27 +2026,34 @@ parse_shards_opt(Req) ->
[
{n, parse_shards_opt("n", Req, config:get_integer("cluster", "n", 3))},
{q, parse_shards_opt("q", Req, config:get_integer("cluster", "q", 2))},
- {placement, parse_shards_opt(
- "placement", Req, config:get("cluster", "placement"))}
+ {placement,
+ parse_shards_opt(
+ "placement", Req, config:get("cluster", "placement")
+ )}
].
parse_shards_opt("placement", Req, Default) ->
Err = <<"The `placement` value should be in a format `zone:n`.">>,
case chttpd:qs_value(Req, "placement", Default) of
- Default -> Default;
- [] -> throw({bad_request, Err});
+ Default ->
+ Default;
+ [] ->
+ throw({bad_request, Err});
Val ->
try
- true = lists:all(fun(Rule) ->
- [_, N] = string:tokens(Rule, ":"),
- couch_util:validate_positive_int(N)
- end, string:tokens(Val, ",")),
+ true = lists:all(
+ fun(Rule) ->
+ [_, N] = string:tokens(Rule, ":"),
+ couch_util:validate_positive_int(N)
+ end,
+ string:tokens(Val, ",")
+ ),
Val
- catch _:_ ->
- throw({bad_request, Err})
+ catch
+ _:_ ->
+ throw({bad_request, Err})
end
end;
-
parse_shards_opt(Param, Req, Default) ->
Val = chttpd:qs_value(Req, Param, Default),
Err = ?l2b(["The `", Param, "` value should be a positive integer."]),
@@ -1765,7 +2062,6 @@ parse_shards_opt(Param, Req, Default) ->
false -> throw({bad_request, Err})
end.
-
parse_engine_opt(Req) ->
case chttpd:qs_value(Req, "engine") of
undefined ->
@@ -1780,7 +2076,6 @@ parse_engine_opt(Req) ->
end
end.
-
parse_partitioned_opt(Req) ->
case chttpd:qs_value(Req, "partitioned") of
undefined ->
@@ -1797,130 +2092,142 @@ parse_partitioned_opt(Req) ->
throw({bad_request, <<"Invalid `partitioned` parameter">>})
end.
-
validate_partitioned_db_enabled(Req) ->
case couch_flags:is_enabled(partitioned, Req) of
- true ->
+ true ->
ok;
false ->
throw({bad_request, <<"Partitioned feature is not enabled.">>})
end.
-
parse_doc_query({Key, Value}, Args) ->
case {Key, Value} of
{"attachments", "true"} ->
Options = [attachments | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"meta", "true"} ->
Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"revs", "true"} ->
Options = [revs | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"local_seq", "true"} ->
Options = [local_seq | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"revs_info", "true"} ->
Options = [revs_info | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"conflicts", "true"} ->
Options = [conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"deleted", "true"} ->
Options = [deleted | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"deleted_conflicts", "true"} ->
Options = [deleted_conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"rev", Rev} ->
- Args#doc_query_args{rev=couch_doc:parse_rev(Rev)};
+ Args#doc_query_args{rev = couch_doc:parse_rev(Rev)};
{"open_revs", "all"} ->
- Args#doc_query_args{open_revs=all};
+ Args#doc_query_args{open_revs = all};
{"open_revs", RevsJsonStr} ->
JsonArray = ?JSON_DECODE(RevsJsonStr),
- Args#doc_query_args{open_revs=couch_doc:parse_revs(JsonArray)};
+ Args#doc_query_args{open_revs = couch_doc:parse_revs(JsonArray)};
{"latest", "true"} ->
Options = [latest | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"atts_since", RevsJsonStr} ->
JsonArray = ?JSON_DECODE(RevsJsonStr),
Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)};
{"new_edits", "false"} ->
- Args#doc_query_args{update_type=replicated_changes};
+ Args#doc_query_args{update_type = replicated_changes};
{"new_edits", "true"} ->
- Args#doc_query_args{update_type=interactive_edit};
+ Args#doc_query_args{update_type = interactive_edit};
{"att_encoding_info", "true"} ->
Options = [att_encoding_info | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Args#doc_query_args{options = Options};
{"r", R} ->
- Options = [{r,R} | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
+ Options = [{r, R} | Args#doc_query_args.options],
+ Args#doc_query_args{options = Options};
{"w", W} ->
- Options = [{w,W} | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- _Else -> % unknown key value pair, ignore.
+ Options = [{w, W} | Args#doc_query_args.options],
+ Args#doc_query_args{options = Options};
+ % unknown key value pair, ignore.
+ _Else ->
Args
end.
parse_changes_query(Req) ->
erlang:erase(changes_seq_interval),
- ChangesArgs = lists:foldl(fun({Key, Value}, Args) ->
- case {string:to_lower(Key), Value} of
- {"feed", "live"} ->
- %% sugar for continuous
- Args#changes_args{feed="continuous"};
- {"feed", _} ->
- Args#changes_args{feed=Value};
- {"descending", "true"} ->
- Args#changes_args{dir=rev};
- {"since", _} ->
- Args#changes_args{since=Value};
- {"last-event-id", _} ->
- Args#changes_args{since=Value};
- {"limit", _} ->
- Args#changes_args{limit=list_to_integer(Value)};
- {"style", _} ->
- Args#changes_args{style=list_to_existing_atom(Value)};
- {"heartbeat", "true"} ->
- Args#changes_args{heartbeat=true};
- {"heartbeat", _} ->
- try list_to_integer(Value) of
- HeartbeatInteger when HeartbeatInteger > 0 ->
- Args#changes_args{heartbeat=HeartbeatInteger};
- _ ->
- throw({bad_request, <<"The heartbeat value should be a positive integer (in milliseconds).">>})
- catch error:badarg ->
- throw({bad_request, <<"Invalid heartbeat value. Expecting a positive integer value (in milliseconds).">>})
- end;
- {"timeout", _} ->
- Args#changes_args{timeout=list_to_integer(Value)};
- {"include_docs", "true"} ->
- Args#changes_args{include_docs=true};
- {"conflicts", "true"} ->
- Args#changes_args{conflicts=true};
- {"attachments", "true"} ->
- Options = [attachments | Args#changes_args.doc_options],
- Args#changes_args{doc_options=Options};
- {"att_encoding_info", "true"} ->
- Options = [att_encoding_info | Args#changes_args.doc_options],
- Args#changes_args{doc_options=Options};
- {"filter", _} ->
- Args#changes_args{filter=Value};
- {"seq_interval", _} ->
- try list_to_integer(Value) of
- V when V > 0 ->
- erlang:put(changes_seq_interval, V),
- Args;
- _ ->
- throw({bad_request, invalid_seq_interval})
- catch error:badarg ->
- throw({bad_request, invalid_seq_interval})
- end;
- _Else -> % unknown key value pair, ignore.
- Args
- end
- end, #changes_args{}, chttpd:qs(Req)),
+ ChangesArgs = lists:foldl(
+ fun({Key, Value}, Args) ->
+ case {string:to_lower(Key), Value} of
+ {"feed", "live"} ->
+ %% sugar for continuous
+ Args#changes_args{feed = "continuous"};
+ {"feed", _} ->
+ Args#changes_args{feed = Value};
+ {"descending", "true"} ->
+ Args#changes_args{dir = rev};
+ {"since", _} ->
+ Args#changes_args{since = Value};
+ {"last-event-id", _} ->
+ Args#changes_args{since = Value};
+ {"limit", _} ->
+ Args#changes_args{limit = list_to_integer(Value)};
+ {"style", _} ->
+ Args#changes_args{style = list_to_existing_atom(Value)};
+ {"heartbeat", "true"} ->
+ Args#changes_args{heartbeat = true};
+ {"heartbeat", _} ->
+ try list_to_integer(Value) of
+ HeartbeatInteger when HeartbeatInteger > 0 ->
+ Args#changes_args{heartbeat = HeartbeatInteger};
+ _ ->
+ throw(
+ {bad_request,
+ <<"The heartbeat value should be a positive integer (in milliseconds).">>}
+ )
+ catch
+ error:badarg ->
+ throw(
+ {bad_request,
+ <<"Invalid heartbeat value. Expecting a positive integer value (in milliseconds).">>}
+ )
+ end;
+ {"timeout", _} ->
+ Args#changes_args{timeout = list_to_integer(Value)};
+ {"include_docs", "true"} ->
+ Args#changes_args{include_docs = true};
+ {"conflicts", "true"} ->
+ Args#changes_args{conflicts = true};
+ {"attachments", "true"} ->
+ Options = [attachments | Args#changes_args.doc_options],
+ Args#changes_args{doc_options = Options};
+ {"att_encoding_info", "true"} ->
+ Options = [att_encoding_info | Args#changes_args.doc_options],
+ Args#changes_args{doc_options = Options};
+ {"filter", _} ->
+ Args#changes_args{filter = Value};
+ {"seq_interval", _} ->
+ try list_to_integer(Value) of
+ V when V > 0 ->
+ erlang:put(changes_seq_interval, V),
+ Args;
+ _ ->
+ throw({bad_request, invalid_seq_interval})
+ catch
+ error:badarg ->
+ throw({bad_request, invalid_seq_interval})
+ end;
+ % unknown key value pair, ignore.
+ _Else ->
+ Args
+ end
+ end,
+ #changes_args{},
+ chttpd:qs(Req)
+ ),
%% if it's an EventSource request with a Last-event-ID header
%% that should override the `since` query string, since it's
%% probably the browser reconnecting.
@@ -1930,57 +2237,68 @@ parse_changes_query(Req) ->
undefined ->
ChangesArgs;
Value ->
- ChangesArgs#changes_args{since=Value}
+ ChangesArgs#changes_args{since = Value}
end;
_ ->
ChangesArgs
end.
-extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)->
+extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev) ->
extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
extract_header_rev(Req, ExplicitRev) ->
- Etag = case chttpd:header_value(Req, "If-Match") of
- undefined -> undefined;
- Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
- end,
+ Etag =
+ case chttpd:header_value(Req, "If-Match") of
+ undefined -> undefined;
+ Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
+ end,
case {ExplicitRev, Etag} of
- {undefined, undefined} -> missing_rev;
- {_, undefined} -> ExplicitRev;
- {undefined, _} -> Etag;
- _ when ExplicitRev == Etag -> Etag;
- _ ->
- throw({bad_request, "Document rev and etag have different values"})
+ {undefined, undefined} -> missing_rev;
+ {_, undefined} -> ExplicitRev;
+ {undefined, _} -> Etag;
+ _ when ExplicitRev == Etag -> Etag;
+ _ -> throw({bad_request, "Document rev and etag have different values"})
end.
validate_security_can_be_edited(DbName) ->
UserDbName = config:get("chttpd_auth", "authentication_db", "_users"),
- CanEditUserSecurityObject = config:get("couchdb","users_db_security_editable","false"),
- case {DbName,CanEditUserSecurityObject} of
- {UserDbName,"false"} ->
+ CanEditUserSecurityObject = config:get("couchdb", "users_db_security_editable", "false"),
+ case {DbName, CanEditUserSecurityObject} of
+ {UserDbName, "false"} ->
Msg = "You can't edit the security object of the user database.",
throw({forbidden, Msg});
- {_,_} -> ok
+ {_, _} ->
+ ok
end.
validate_revs(_Doc, true) ->
ok;
validate_revs(#doc{revs = {0, []}}, false) ->
- throw({bad_request, ?l2b("When `new_edits: false`, " ++
- "the document needs `_rev` or `_revisions` specified")});
+ throw(
+ {bad_request,
+ ?l2b(
+ "When `new_edits: false`, " ++
+ "the document needs `_rev` or `_revisions` specified"
+ )}
+ );
validate_revs(_Doc, false) ->
ok.
validate_attachment_names(Doc) ->
- lists:foreach(fun(Att) ->
- Name = couch_att:fetch(name, Att),
- validate_attachment_name(Name)
- end, Doc#doc.atts).
+ lists:foreach(
+ fun(Att) ->
+ Name = couch_att:fetch(name, Att),
+ validate_attachment_name(Name)
+ end,
+ Doc#doc.atts
+ ).
validate_attachment_name(Name) when is_list(Name) ->
validate_attachment_name(list_to_binary(Name));
-validate_attachment_name(<<"_",Rest/binary>>) ->
- throw({bad_request, <<"Attachment name '_", Rest/binary,
- "' starts with prohibited character '_'">>});
+validate_attachment_name(<<"_", Rest/binary>>) ->
+ throw(
+ {bad_request,
+ <<"Attachment name '_", Rest/binary, "' starts with prohibited character '_'">>}
+ );
validate_attachment_name(Name) ->
case couch_util:validate_utf8(Name) of
true -> Name;
@@ -1989,17 +2307,21 @@ validate_attachment_name(Name) ->
-spec monitor_attachments(couch_att:att() | [couch_att:att()]) -> [reference()].
monitor_attachments(Atts) when is_list(Atts) ->
- lists:foldl(fun(Att, Monitors) ->
- case couch_att:fetch(data, Att) of
- {Fd, _} ->
- [monitor(process, Fd) | Monitors];
- stub ->
- Monitors;
- Else ->
- couch_log:error("~p from couch_att:fetch(data, ~p)", [Else, Att]),
- Monitors
- end
- end, [], Atts);
+ lists:foldl(
+ fun(Att, Monitors) ->
+ case couch_att:fetch(data, Att) of
+ {Fd, _} ->
+ [monitor(process, Fd) | Monitors];
+ stub ->
+ Monitors;
+ Else ->
+ couch_log:error("~p from couch_att:fetch(data, ~p)", [Else, Att]),
+ Monitors
+ end
+ end,
+ [],
+ Atts
+ );
monitor_attachments(Att) ->
monitor_attachments([Att]).
@@ -2015,25 +2337,27 @@ set_namespace(<<"_design_docs">>, Args) ->
set_namespace(NS, #mrargs{} = Args) ->
couch_mrview_util:set_extra(Args, namespace, NS).
-
%% /db/_bulk_get stuff
bulk_get_parse_doc_query(Req) ->
- lists:foldl(fun({Key, Value}, Args) ->
- ok = validate_query_param(Key),
- parse_doc_query({Key, Value}, Args)
- end, #doc_query_args{}, chttpd:qs(Req)).
-
+ lists:foldl(
+ fun({Key, Value}, Args) ->
+ ok = validate_query_param(Key),
+ parse_doc_query({Key, Value}, Args)
+ end,
+ #doc_query_args{},
+ chttpd:qs(Req)
+ ).
-validate_query_param("open_revs"=Key) ->
+validate_query_param("open_revs" = Key) ->
throw_bad_query_param(Key);
-validate_query_param("new_edits"=Key) ->
+validate_query_param("new_edits" = Key) ->
throw_bad_query_param(Key);
-validate_query_param("w"=Key) ->
+validate_query_param("w" = Key) ->
throw_bad_query_param(Key);
-validate_query_param("rev"=Key) ->
+validate_query_param("rev" = Key) ->
throw_bad_query_param(Key);
-validate_query_param("atts_since"=Key) ->
+validate_query_param("atts_since" = Key) ->
throw_bad_query_param(Key);
validate_query_param(_) ->
ok.
@@ -2044,11 +2368,9 @@ throw_bad_query_param(Key) when is_binary(Key) ->
Msg = <<"\"", Key/binary, "\" query parameter is not acceptable">>,
throw({bad_request, Msg}).
-
bulk_get_open_doc_revs(Db, {Props}, Options) ->
bulk_get_open_doc_revs1(Db, Props, Options, {}).
-
bulk_get_open_doc_revs1(Db, Props, Options, {}) ->
case couch_util:get_value(<<"id">>, Props) of
undefined ->
@@ -2058,8 +2380,9 @@ bulk_get_open_doc_revs1(Db, Props, Options, {}) ->
try
couch_db:validate_docid(Db, DocId),
bulk_get_open_doc_revs1(Db, Props, Options, {DocId})
- catch throw:{Error, Reason} ->
- {DocId, {error, {null, Error, Reason}}, Options}
+ catch
+ throw:{Error, Reason} ->
+ {DocId, {error, {null, Error, Reason}}, Options}
end
end;
bulk_get_open_doc_revs1(Db, Props, Options, {DocId}) ->
@@ -2068,10 +2391,8 @@ bulk_get_open_doc_revs1(Db, Props, Options, {DocId}) ->
case parse_field(<<"rev">>, RevStr) of
{error, {RevStr, Error, Reason}} ->
{DocId, {error, {RevStr, Error, Reason}}, Options};
-
{ok, undefined} ->
bulk_get_open_doc_revs1(Db, Props, Options, {DocId, all});
-
{ok, Rev} ->
bulk_get_open_doc_revs1(Db, Props, Options, {DocId, [Rev]})
end;
@@ -2081,10 +2402,8 @@ bulk_get_open_doc_revs1(Db, Props, Options, {DocId, Revs}) ->
case parse_field(<<"atts_since">>, AttsSinceStr) of
{error, {BadAttsSinceRev, Error, Reason}} ->
{DocId, {error, {BadAttsSinceRev, Error, Reason}}, Options};
-
{ok, []} ->
bulk_get_open_doc_revs1(Db, Props, Options, {DocId, Revs, Options});
-
{ok, RevList} ->
Options1 = [{atts_since, RevList}, attachments | Options],
bulk_get_open_doc_revs1(Db, Props, Options, {DocId, Revs, Options1})
@@ -2102,7 +2421,6 @@ bulk_get_open_doc_revs1(Db, Props, _, {DocId, Revs, Options}) ->
{DocId, Else, Options}
end.
-
parse_field(<<"rev">>, undefined) ->
{ok, undefined};
parse_field(<<"rev">>, Value) ->
@@ -2110,7 +2428,7 @@ parse_field(<<"rev">>, Value) ->
Rev = couch_doc:parse_rev(Value),
{ok, Rev}
catch
- throw:{bad_request=Error, Reason} ->
+ throw:{bad_request = Error, Reason} ->
{error, {Value, Error, Reason}}
end;
parse_field(<<"atts_since">>, undefined) ->
@@ -2122,18 +2440,16 @@ parse_field(<<"atts_since">>, Value) when is_list(Value) ->
parse_field(<<"atts_since">>, Value) ->
{error, {Value, bad_request, <<"att_since value must be array of revs.">>}}.
-
parse_atts_since([], Acc) ->
{ok, lists:reverse(Acc)};
parse_atts_since([RevStr | Rest], Acc) ->
case parse_field(<<"rev">>, RevStr) of
{ok, Rev} ->
parse_atts_since(Rest, [Rev | Acc]);
- {error, _}=Error ->
+ {error, _} = Error ->
Error
end.
-
bulk_get_send_docs_json(Resp, DocId, Results, Options, Sep) ->
Id = ?JSON_ENCODE(DocId),
send_chunk(Resp, [Sep, <<"{\"id\": ">>, Id, <<", \"docs\": [">>]),
@@ -2145,37 +2461,45 @@ bulk_get_send_docs_json1(Resp, DocId, {error, {Rev, Error, Reason}}, _) ->
bulk_get_send_docs_json1(_Resp, _DocId, {ok, []}, _) ->
ok;
bulk_get_send_docs_json1(Resp, DocId, {ok, Docs}, Options) ->
- lists:foldl(fun(Result, AccSeparator) ->
- case Result of
- {ok, Doc} ->
- JsonDoc = couch_doc:to_json_obj(Doc, Options),
- Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
- send_chunk(Resp, [AccSeparator, Json]);
- {{Error, Reason}, RevId} ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = bulk_get_json_error(DocId, RevStr, Error, Reason),
- send_chunk(Resp, [AccSeparator, Json])
+ lists:foldl(
+ fun(Result, AccSeparator) ->
+ case Result of
+ {ok, Doc} ->
+ JsonDoc = couch_doc:to_json_obj(Doc, Options),
+ Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
+ send_chunk(Resp, [AccSeparator, Json]);
+ {{Error, Reason}, RevId} ->
+ RevStr = couch_doc:rev_to_str(RevId),
+ Json = bulk_get_json_error(DocId, RevStr, Error, Reason),
+ send_chunk(Resp, [AccSeparator, Json])
+ end,
+ <<",">>
end,
- <<",">>
- end, <<"">>, Docs).
+ <<"">>,
+ Docs
+ ).
bulk_get_json_error(DocId, Rev, Error, Reason) ->
- ?JSON_ENCODE({[{error, {[{<<"id">>, DocId},
- {<<"rev">>, Rev},
- {<<"error">>, Error},
- {<<"reason">>, Reason}]}}]}).
-
+ ?JSON_ENCODE(
+ {[
+ {error,
+ {[
+ {<<"id">>, DocId},
+ {<<"rev">>, Rev},
+ {<<"error">>, Error},
+ {<<"reason">>, Reason}
+ ]}}
+ ]}
+ ).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
monitor_attachments_test_() ->
- {"ignore stubs",
- fun () ->
- Atts = [couch_att:new([{data, stub}])],
- ?_assertEqual([], monitor_attachments(Atts))
- end
- }.
+ {"ignore stubs", fun() ->
+ Atts = [couch_att:new([{data, stub}])],
+ ?_assertEqual([], monitor_attachments(Atts))
+ end}.
parse_partitioned_opt_test_() ->
{
@@ -2296,18 +2620,20 @@ t_should_allow_valid_placement() ->
fun() -> ok end,
[
{"single zone",
- ?_test(begin
- Req = mock_request("/all-test21?placement=az:1"),
- Opts = parse_shards_opt(Req),
- ?assertEqual("az:1", couch_util:get_value(placement, Opts))
- end)},
+ ?_test(begin
+ Req = mock_request("/all-test21?placement=az:1"),
+ Opts = parse_shards_opt(Req),
+ ?assertEqual("az:1", couch_util:get_value(placement, Opts))
+ end)},
{"multi zone",
- ?_test(begin
- Req = mock_request("/all-test21?placement=az:1,co:3"),
- Opts = parse_shards_opt(Req),
- ?assertEqual("az:1,co:3",
- couch_util:get_value(placement, Opts))
- end)}
+ ?_test(begin
+ Req = mock_request("/all-test21?placement=az:1,co:3"),
+ Opts = parse_shards_opt(Req),
+ ?assertEqual(
+ "az:1,co:3",
+ couch_util:get_value(placement, Opts)
+ )
+ end)}
]
}.
@@ -2325,25 +2651,25 @@ t_should_throw_on_invalid_placement() ->
fun() -> ok end,
[
{"empty placement",
- ?_test(begin
- Req = mock_request("/all-test21?placement="),
- ?assertThrow({bad_request, Err}, parse_shards_opt(Req))
- end)},
+ ?_test(begin
+ Req = mock_request("/all-test21?placement="),
+ ?assertThrow({bad_request, Err}, parse_shards_opt(Req))
+ end)},
{"invalid format",
- ?_test(begin
- Req = mock_request("/all-test21?placement=moon"),
- ?assertThrow({bad_request, Err}, parse_shards_opt(Req))
- end)},
+ ?_test(begin
+ Req = mock_request("/all-test21?placement=moon"),
+ ?assertThrow({bad_request, Err}, parse_shards_opt(Req))
+ end)},
{"invalid n",
- ?_test(begin
- Req = mock_request("/all-test21?placement=moon:eagle"),
- ?assertThrow({bad_request, Err}, parse_shards_opt(Req))
- end)},
+ ?_test(begin
+ Req = mock_request("/all-test21?placement=moon:eagle"),
+ ?assertThrow({bad_request, Err}, parse_shards_opt(Req))
+ end)},
{"one invalid zone",
- ?_test(begin
- Req = mock_request("/all-test21?placement=az:1,co:moon"),
- ?assertThrow({bad_request, Err}, parse_shards_opt(Req))
- end)}
+ ?_test(begin
+ Req = mock_request("/all-test21?placement=az:1,co:moon"),
+ ?assertThrow({bad_request, Err}, parse_shards_opt(Req))
+ end)}
]
}.
diff --git a/src/chttpd/src/chttpd_epi.erl b/src/chttpd/src/chttpd_epi.erl
index ffbd87a07..5536c9e4d 100644
--- a/src/chttpd/src/chttpd_epi.erl
+++ b/src/chttpd/src/chttpd_epi.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(chttpd_epi).
-behaviour(couch_epi_plugin).
@@ -33,7 +32,6 @@ providers() ->
{chttpd_handlers, chttpd_httpd_handlers}
].
-
services() ->
[
{chttpd_auth, chttpd_auth},
diff --git a/src/chttpd/src/chttpd_external.erl b/src/chttpd/src/chttpd_external.erl
index 12d1fe555..84cfc9620 100644
--- a/src/chttpd/src/chttpd_external.erl
+++ b/src/chttpd/src/chttpd_external.erl
@@ -18,7 +18,7 @@
-export([json_req_obj_fields/0, json_req_obj/2, json_req_obj/3, json_req_obj/4]).
-export([default_or_content_type/2, parse_external_response/1]).
--import(chttpd,[send_error/4]).
+-import(chttpd, [send_error/4]).
-include_lib("couch/include/couch_db.hrl").
@@ -33,9 +33,23 @@ json_req_obj(Req, Db, DocId, Fields) when is_list(Fields) ->
{[{Field, json_req_obj_field(Field, Req, Db, DocId)} || Field <- Fields]}.
json_req_obj_fields() ->
- [<<"info">>, <<"uuid">>, <<"id">>, <<"method">>, <<"requested_path">>,
- <<"path">>, <<"raw_path">>, <<"query">>, <<"headers">>, <<"body">>,
- <<"peer">>, <<"form">>, <<"cookie">>, <<"userCtx">>, <<"secObj">>].
+ [
+ <<"info">>,
+ <<"uuid">>,
+ <<"id">>,
+ <<"method">>,
+ <<"requested_path">>,
+ <<"path">>,
+ <<"raw_path">>,
+ <<"query">>,
+ <<"headers">>,
+ <<"body">>,
+ <<"peer">>,
+ <<"form">>,
+ <<"cookie">>,
+ <<"userCtx">>,
+ <<"secObj">>
+ ].
json_req_obj_field(<<"info">>, #httpd{}, Db, _DocId) ->
{ok, Info} = get_db_info(Db),
@@ -44,51 +58,55 @@ json_req_obj_field(<<"uuid">>, #httpd{}, _Db, _DocId) ->
couch_uuids:new();
json_req_obj_field(<<"id">>, #httpd{}, _Db, DocId) ->
DocId;
-json_req_obj_field(<<"method">>, #httpd{method=Method}, _Db, _DocId) ->
+json_req_obj_field(<<"method">>, #httpd{method = Method}, _Db, _DocId) ->
Method;
-json_req_obj_field(<<"requested_path">>, #httpd{requested_path_parts=Path}, _Db, _DocId) ->
+json_req_obj_field(<<"requested_path">>, #httpd{requested_path_parts = Path}, _Db, _DocId) ->
Path;
-json_req_obj_field(<<"path">>, #httpd{path_parts=Path}, _Db, _DocId) ->
+json_req_obj_field(<<"path">>, #httpd{path_parts = Path}, _Db, _DocId) ->
Path;
-json_req_obj_field(<<"raw_path">>, #httpd{mochi_req=Req}, _Db, _DocId) ->
+json_req_obj_field(<<"raw_path">>, #httpd{mochi_req = Req}, _Db, _DocId) ->
?l2b(Req:get(raw_path));
-json_req_obj_field(<<"query">>, #httpd{mochi_req=Req}, _Db, _DocId) ->
+json_req_obj_field(<<"query">>, #httpd{mochi_req = Req}, _Db, _DocId) ->
json_query_keys(to_json_terms(Req:parse_qs()));
-json_req_obj_field(<<"headers">>, #httpd{mochi_req=Req}, _Db, _DocId) ->
+json_req_obj_field(<<"headers">>, #httpd{mochi_req = Req}, _Db, _DocId) ->
Headers = Req:get(headers),
Hlist = mochiweb_headers:to_list(Headers),
to_json_terms(Hlist);
-json_req_obj_field(<<"body">>, #httpd{req_body=undefined, mochi_req=Req}, _Db, _DocId) ->
+json_req_obj_field(<<"body">>, #httpd{req_body = undefined, mochi_req = Req}, _Db, _DocId) ->
MaxSize = chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", 4294967296),
+ "max_http_request_size", 4294967296
+ ),
try
Req:recv_body(MaxSize)
- catch exit:normal ->
- exit({bad_request, <<"Invalid request body">>})
+ catch
+ exit:normal ->
+ exit({bad_request, <<"Invalid request body">>})
end;
-json_req_obj_field(<<"body">>, #httpd{req_body=Body}, _Db, _DocId) ->
+json_req_obj_field(<<"body">>, #httpd{req_body = Body}, _Db, _DocId) ->
Body;
-json_req_obj_field(<<"peer">>, #httpd{peer=undefined, mochi_req=Req}, _, _) ->
+json_req_obj_field(<<"peer">>, #httpd{peer = undefined, mochi_req = Req}, _, _) ->
?l2b(Req:get(peer));
-json_req_obj_field(<<"peer">>, #httpd{peer=Peer}, _Db, _DocId) ->
+json_req_obj_field(<<"peer">>, #httpd{peer = Peer}, _Db, _DocId) ->
?l2b(Peer);
-json_req_obj_field(<<"form">>, #httpd{mochi_req=Req, method=Method}=HttpReq, Db, DocId) ->
+json_req_obj_field(<<"form">>, #httpd{mochi_req = Req, method = Method} = HttpReq, Db, DocId) ->
Body = json_req_obj_field(<<"body">>, HttpReq, Db, DocId),
- ParsedForm = case Req:get_primary_header_value("content-type") of
- "application/x-www-form-urlencoded" ++ _ when Method =:= 'POST' orelse Method =:= 'PUT' ->
- mochiweb_util:parse_qs(Body);
- _ ->
- []
- end,
+ ParsedForm =
+ case Req:get_primary_header_value("content-type") of
+ "application/x-www-form-urlencoded" ++ _ when
+ Method =:= 'POST' orelse Method =:= 'PUT'
+ ->
+ mochiweb_util:parse_qs(Body);
+ _ ->
+ []
+ end,
to_json_terms(ParsedForm);
-json_req_obj_field(<<"cookie">>, #httpd{mochi_req=Req}, _Db, _DocId) ->
+json_req_obj_field(<<"cookie">>, #httpd{mochi_req = Req}, _Db, _DocId) ->
to_json_terms(Req:parse_cookie());
json_req_obj_field(<<"userCtx">>, #httpd{}, Db, _DocId) ->
couch_util:json_user_ctx(Db);
-json_req_obj_field(<<"secObj">>, #httpd{user_ctx=UserCtx}, Db, _DocId) ->
+json_req_obj_field(<<"secObj">>, #httpd{user_ctx = UserCtx}, Db, _DocId) ->
get_db_security(Db, UserCtx).
-
get_db_info(Db) ->
case couch_db:is_clustered(Db) of
true ->
@@ -97,7 +115,6 @@ get_db_info(Db) ->
couch_db:get_db_info(Db)
end.
-
get_db_security(Db, #user_ctx{}) ->
case couch_db:is_clustered(Db) of
true ->
@@ -106,7 +123,6 @@ get_db_security(Db, #user_ctx{}) ->
couch_db:get_security(Db)
end.
-
to_json_terms(Data) ->
to_json_terms(Data, []).
to_json_terms([], Acc) ->
@@ -121,15 +137,15 @@ json_query_keys({Json}) ->
json_query_keys([], Acc) ->
{lists:reverse(Acc)};
json_query_keys([{<<"startkey">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"startkey">>, ?JSON_DECODE(Value)}|Acc]);
+ json_query_keys(Rest, [{<<"startkey">>, ?JSON_DECODE(Value)} | Acc]);
json_query_keys([{<<"endkey">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"endkey">>, ?JSON_DECODE(Value)}|Acc]);
+ json_query_keys(Rest, [{<<"endkey">>, ?JSON_DECODE(Value)} | Acc]);
json_query_keys([{<<"key">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"key">>, ?JSON_DECODE(Value)}|Acc]);
+ json_query_keys(Rest, [{<<"key">>, ?JSON_DECODE(Value)} | Acc]);
json_query_keys([{<<"descending">>, Value} | Rest], Acc) ->
- json_query_keys(Rest, [{<<"descending">>, ?JSON_DECODE(Value)}|Acc]);
+ json_query_keys(Rest, [{<<"descending">>, ?JSON_DECODE(Value)} | Acc]);
json_query_keys([Term | Rest], Acc) ->
- json_query_keys(Rest, [Term|Acc]).
+ json_query_keys(Rest, [Term | Acc]).
send_external_response(Req, Response) ->
#extern_resp_args{
@@ -141,49 +157,60 @@ send_external_response(Req, Response) ->
} = parse_external_response(Response),
Headers1 = default_or_content_type(CType, Headers0),
case Json of
- nil ->
- Headers2 = chttpd_util:maybe_add_csp_header("showlist", Headers1, "sandbox"),
- chttpd:send_response(Req, Code, Headers2, Data);
- Json ->
- chttpd:send_json(Req, Code, Headers1, Json)
+ nil ->
+ Headers2 = chttpd_util:maybe_add_csp_header("showlist", Headers1, "sandbox"),
+ chttpd:send_response(Req, Code, Headers2, Data);
+ Json ->
+ chttpd:send_json(Req, Code, Headers1, Json)
end.
parse_external_response({Response}) ->
- lists:foldl(fun({Key,Value}, Args) ->
- case {Key, Value} of
- {"", _} ->
- Args;
- {<<"code">>, Value} ->
- Args#extern_resp_args{code=Value};
- {<<"stop">>, true} ->
- Args#extern_resp_args{stop=true};
- {<<"json">>, Value} ->
- Args#extern_resp_args{
- json=Value,
- ctype="application/json"};
- {<<"body">>, Value} ->
- Args#extern_resp_args{data=Value, ctype="text/html; charset=utf-8"};
- {<<"base64">>, Value} ->
- Args#extern_resp_args{
- data=base64:decode(Value),
- ctype="application/binary"
- };
- {<<"headers">>, {Headers}} ->
- NewHeaders = lists:map(fun({Header, HVal}) ->
- {couch_util:to_list(Header), couch_util:to_list(HVal)}
- end, Headers),
- Args#extern_resp_args{headers=NewHeaders};
- _ -> % unknown key
- Msg = lists:flatten(io_lib:format("Invalid data from external server: ~p", [{Key, Value}])),
- throw({external_response_error, Msg})
+ lists:foldl(
+ fun({Key, Value}, Args) ->
+ case {Key, Value} of
+ {"", _} ->
+ Args;
+ {<<"code">>, Value} ->
+ Args#extern_resp_args{code = Value};
+ {<<"stop">>, true} ->
+ Args#extern_resp_args{stop = true};
+ {<<"json">>, Value} ->
+ Args#extern_resp_args{
+ json = Value,
+ ctype = "application/json"
+ };
+ {<<"body">>, Value} ->
+ Args#extern_resp_args{data = Value, ctype = "text/html; charset=utf-8"};
+ {<<"base64">>, Value} ->
+ Args#extern_resp_args{
+ data = base64:decode(Value),
+ ctype = "application/binary"
+ };
+ {<<"headers">>, {Headers}} ->
+ NewHeaders = lists:map(
+ fun({Header, HVal}) ->
+ {couch_util:to_list(Header), couch_util:to_list(HVal)}
+ end,
+ Headers
+ ),
+ Args#extern_resp_args{headers = NewHeaders};
+ % unknown key
+ _ ->
+ Msg = lists:flatten(
+ io_lib:format("Invalid data from external server: ~p", [{Key, Value}])
+ ),
+ throw({external_response_error, Msg})
end
- end, #extern_resp_args{}, Response).
+ end,
+ #extern_resp_args{},
+ Response
+ ).
default_or_content_type(DefaultContentType, Headers) ->
IsContentType = fun({X, _}) -> string:to_lower(X) == "content-type" end,
case lists:any(IsContentType, Headers) of
- false ->
- [{"Content-Type", DefaultContentType} | Headers];
- true ->
- Headers
+ false ->
+ [{"Content-Type", DefaultContentType} | Headers];
+ true ->
+ Headers
end.
diff --git a/src/chttpd/src/chttpd_handlers.erl b/src/chttpd/src/chttpd_handlers.erl
index 930563230..82eee7365 100644
--- a/src/chttpd/src/chttpd_handlers.erl
+++ b/src/chttpd/src/chttpd_handlers.erl
@@ -57,9 +57,9 @@ select(Handlers, _Default) ->
do_select([], Acc) ->
Acc;
-do_select([{override, Handler}|_], _Acc) ->
+do_select([{override, Handler} | _], _Acc) ->
[Handler];
-do_select([{default, _}|Rest], Acc) ->
+do_select([{default, _} | Rest], Acc) ->
do_select(Rest, Acc);
do_select([Handler], Acc) ->
[Handler | Acc];
@@ -73,14 +73,16 @@ select_override_test() ->
?assertEqual(selected, select([{override, selected}, foo], default)),
?assertEqual(selected, select([foo, {override, selected}], default)),
?assertEqual(selected, select([{override, selected}, {override, bar}], default)),
- ?assertError({badmatch,[bar, foo]}, select([foo, bar], default)).
+ ?assertError({badmatch, [bar, foo]}, select([foo, bar], default)).
select_default_override_test() ->
?assertEqual(selected, select([{default, new_default}, selected], old_default)),
?assertEqual(selected, select([selected, {default, new_default}], old_default)),
?assertEqual(selected, select([{default, selected}], old_default)),
?assertEqual(selected, select([], selected)),
- ?assertEqual(selected,
- select([{default, new_default}, {override, selected}, bar], old_default)).
+ ?assertEqual(
+ selected,
+ select([{default, new_default}, {override, selected}, bar], old_default)
+ ).
-endif.
diff --git a/src/chttpd/src/chttpd_httpd_handlers.erl b/src/chttpd/src/chttpd_httpd_handlers.erl
index 5e86ea87d..932b52e5f 100644
--- a/src/chttpd/src/chttpd_httpd_handlers.erl
+++ b/src/chttpd/src/chttpd_httpd_handlers.erl
@@ -14,33 +14,33 @@
-export([url_handler/1, db_handler/1, design_handler/1]).
-url_handler(<<>>) -> fun chttpd_misc:handle_welcome_req/1;
-url_handler(<<"favicon.ico">>) -> fun chttpd_misc:handle_favicon_req/1;
-url_handler(<<"_utils">>) -> fun chttpd_misc:handle_utils_dir_req/1;
-url_handler(<<"_all_dbs">>) -> fun chttpd_misc:handle_all_dbs_req/1;
-url_handler(<<"_dbs_info">>) -> fun chttpd_misc:handle_dbs_info_req/1;
-url_handler(<<"_active_tasks">>) -> fun chttpd_misc:handle_task_status_req/1;
-url_handler(<<"_scheduler">>) -> fun couch_replicator_httpd:handle_scheduler_req/1;
-url_handler(<<"_node">>) -> fun chttpd_node:handle_node_req/1;
+url_handler(<<>>) -> fun chttpd_misc:handle_welcome_req/1;
+url_handler(<<"favicon.ico">>) -> fun chttpd_misc:handle_favicon_req/1;
+url_handler(<<"_utils">>) -> fun chttpd_misc:handle_utils_dir_req/1;
+url_handler(<<"_all_dbs">>) -> fun chttpd_misc:handle_all_dbs_req/1;
+url_handler(<<"_dbs_info">>) -> fun chttpd_misc:handle_dbs_info_req/1;
+url_handler(<<"_active_tasks">>) -> fun chttpd_misc:handle_task_status_req/1;
+url_handler(<<"_scheduler">>) -> fun couch_replicator_httpd:handle_scheduler_req/1;
+url_handler(<<"_node">>) -> fun chttpd_node:handle_node_req/1;
url_handler(<<"_reload_query_servers">>) -> fun chttpd_misc:handle_reload_query_servers_req/1;
-url_handler(<<"_replicate">>) -> fun chttpd_misc:handle_replicate_req/1;
-url_handler(<<"_uuids">>) -> fun chttpd_misc:handle_uuids_req/1;
-url_handler(<<"_session">>) -> fun chttpd_auth:handle_session_req/1;
-url_handler(<<"_up">>) -> fun chttpd_misc:handle_up_req/1;
+url_handler(<<"_replicate">>) -> fun chttpd_misc:handle_replicate_req/1;
+url_handler(<<"_uuids">>) -> fun chttpd_misc:handle_uuids_req/1;
+url_handler(<<"_session">>) -> fun chttpd_auth:handle_session_req/1;
+url_handler(<<"_up">>) -> fun chttpd_misc:handle_up_req/1;
url_handler(_) -> no_match.
db_handler(<<"_view_cleanup">>) -> fun chttpd_db:handle_view_cleanup_req/2;
-db_handler(<<"_compact">>) -> fun chttpd_db:handle_compact_req/2;
-db_handler(<<"_design">>) -> fun chttpd_db:handle_design_req/2;
-db_handler(<<"_partition">>) -> fun chttpd_db:handle_partition_req/2;
-db_handler(<<"_temp_view">>) -> fun chttpd_view:handle_temp_view_req/2;
-db_handler(<<"_changes">>) -> fun chttpd_db:handle_changes_req/2;
+db_handler(<<"_compact">>) -> fun chttpd_db:handle_compact_req/2;
+db_handler(<<"_design">>) -> fun chttpd_db:handle_design_req/2;
+db_handler(<<"_partition">>) -> fun chttpd_db:handle_partition_req/2;
+db_handler(<<"_temp_view">>) -> fun chttpd_view:handle_temp_view_req/2;
+db_handler(<<"_changes">>) -> fun chttpd_db:handle_changes_req/2;
db_handler(_) -> no_match.
-design_handler(<<"_view">>) -> fun chttpd_view:handle_view_req/3;
-design_handler(<<"_show">>) -> fun chttpd_show:handle_doc_show_req/3;
-design_handler(<<"_list">>) -> fun chttpd_show:handle_view_list_req/3;
-design_handler(<<"_update">>) -> fun chttpd_show:handle_doc_update_req/3;
-design_handler(<<"_info">>) -> fun chttpd_db:handle_design_info_req/3;
+design_handler(<<"_view">>) -> fun chttpd_view:handle_view_req/3;
+design_handler(<<"_show">>) -> fun chttpd_show:handle_doc_show_req/3;
+design_handler(<<"_list">>) -> fun chttpd_show:handle_view_list_req/3;
+design_handler(<<"_update">>) -> fun chttpd_show:handle_doc_update_req/3;
+design_handler(<<"_info">>) -> fun chttpd_db:handle_design_info_req/3;
design_handler(<<"_rewrite">>) -> fun chttpd_rewrite:handle_rewrite_req/3;
design_handler(_) -> no_match.
diff --git a/src/chttpd/src/chttpd_misc.erl b/src/chttpd/src/chttpd_misc.erl
index 25a0fa77f..6d119572d 100644
--- a/src/chttpd/src/chttpd_misc.erl
+++ b/src/chttpd/src/chttpd_misc.erl
@@ -31,9 +31,15 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
--import(chttpd,
- [send_json/2,send_json/3,send_method_not_allowed/2,
- send_chunk/2,start_chunked_response/3]).
+-import(
+ chttpd,
+ [
+ send_json/2, send_json/3,
+ send_method_not_allowed/2,
+ send_chunk/2,
+ start_chunked_response/3
+ ]
+).
-define(MAX_DB_NUM_FOR_DBS_INFO, 100).
@@ -42,19 +48,21 @@
handle_welcome_req(Req) ->
handle_welcome_req(Req, <<"Welcome">>).
-handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) ->
- send_json(Req, {[
- {couchdb, WelcomeMessage},
- {version, list_to_binary(couch_server:get_version())},
- {git_sha, list_to_binary(couch_server:get_git_sha())},
- {uuid, couch_server:get_uuid()},
- {features, get_features()}
- ] ++ case config:get("vendor") of
- [] ->
- [];
- Properties ->
- [{vendor, {[{?l2b(K), ?l2b(V)} || {K, V} <- Properties]}}]
- end
+handle_welcome_req(#httpd{method = 'GET'} = Req, WelcomeMessage) ->
+ send_json(Req, {
+ [
+ {couchdb, WelcomeMessage},
+ {version, list_to_binary(couch_server:get_version())},
+ {git_sha, list_to_binary(couch_server:get_git_sha())},
+ {uuid, couch_server:get_uuid()},
+ {features, get_features()}
+ ] ++
+ case config:get("vendor") of
+ [] ->
+ [];
+ Properties ->
+ [{vendor, {[{?l2b(K), ?l2b(V)} || {K, V} <- Properties]}}]
+ end
});
handle_welcome_req(Req, _) ->
send_method_not_allowed(Req, "GET,HEAD").
@@ -70,7 +78,7 @@ get_features() ->
handle_favicon_req(Req) ->
handle_favicon_req(Req, get_docroot()).
-handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) ->
+handle_favicon_req(#httpd{method = 'GET'} = Req, DocumentRoot) ->
{DateNow, TimeNow} = calendar:universal_time(),
DaysNow = calendar:date_to_gregorian_days(DateNow),
DaysWhenExpires = DaysNow + 365,
@@ -87,25 +95,26 @@ handle_favicon_req(Req, _) ->
handle_utils_dir_req(Req) ->
handle_utils_dir_req(Req, get_docroot()).
-handle_utils_dir_req(#httpd{method='GET'}=Req, DocumentRoot) ->
+handle_utils_dir_req(#httpd{method = 'GET'} = Req, DocumentRoot) ->
"/" ++ UrlPath = chttpd:path(Req),
case chttpd:partition(UrlPath) of
- {_ActionKey, "/", RelativePath} ->
- % GET /_utils/path or GET /_utils/
- CachingHeaders = [{"Cache-Control", "private, must-revalidate"}],
- DefaultValues = "child-src 'self' data: blob:; default-src 'self'; img-src 'self' data:; font-src 'self'; "
- "script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';",
- Headers = chttpd_util:maybe_add_csp_header("utils", CachingHeaders, DefaultValues),
- chttpd:serve_file(Req, RelativePath, DocumentRoot, Headers);
- {_ActionKey, "", _RelativePath} ->
- % GET /_utils
- RedirectPath = chttpd:path(Req) ++ "/",
- chttpd:send_redirect(Req, RedirectPath)
+ {_ActionKey, "/", RelativePath} ->
+ % GET /_utils/path or GET /_utils/
+ CachingHeaders = [{"Cache-Control", "private, must-revalidate"}],
+ DefaultValues =
+ "child-src 'self' data: blob:; default-src 'self'; img-src 'self' data:; font-src 'self'; "
+ "script-src 'self' 'unsafe-eval'; style-src 'self' 'unsafe-inline';",
+ Headers = chttpd_util:maybe_add_csp_header("utils", CachingHeaders, DefaultValues),
+ chttpd:serve_file(Req, RelativePath, DocumentRoot, Headers);
+ {_ActionKey, "", _RelativePath} ->
+ % GET /_utils
+ RedirectPath = chttpd:path(Req) ++ "/",
+ chttpd:send_redirect(Req, RedirectPath)
end;
handle_utils_dir_req(Req, _) ->
send_method_not_allowed(Req, "GET,HEAD").
-handle_all_dbs_req(#httpd{method='GET'}=Req) ->
+handle_all_dbs_req(#httpd{method = 'GET'} = Req) ->
Args = couch_mrview_http:parse_params(Req, undefined),
ShardDbName = config:get("mem3", "shards_db", "_dbs"),
%% shard_db is not sharded but mem3:shards treats it as an edge case
@@ -114,8 +123,8 @@ handle_all_dbs_req(#httpd{method='GET'}=Req) ->
Etag = couch_httpd:make_etag({Info}),
Options = [{user_ctx, Req#httpd.user_ctx}],
{ok, Resp} = chttpd:etag_respond(Req, Etag, fun() ->
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [{"ETag",Etag}]),
- VAcc = #vacc{req=Req,resp=Resp},
+ {ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [{"ETag", Etag}]),
+ VAcc = #vacc{req = Req, resp = Resp},
fabric:all_docs(ShardDbName, Options, fun all_dbs_callback/2, VAcc, Args)
end),
case is_record(Resp, vacc) of
@@ -125,26 +134,27 @@ handle_all_dbs_req(#httpd{method='GET'}=Req) ->
handle_all_dbs_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD").
-all_dbs_callback({meta, _Meta}, #vacc{resp=Resp0}=Acc) ->
+all_dbs_callback({meta, _Meta}, #vacc{resp = Resp0} = Acc) ->
{ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "["),
- {ok, Acc#vacc{resp=Resp1}};
-all_dbs_callback({row, Row}, #vacc{resp=Resp0}=Acc) ->
+ {ok, Acc#vacc{resp = Resp1}};
+all_dbs_callback({row, Row}, #vacc{resp = Resp0} = Acc) ->
Prepend = couch_mrview_http:prepend_val(Acc),
- case couch_util:get_value(id, Row) of <<"_design", _/binary>> ->
- {ok, Acc};
- DbName ->
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, [Prepend, ?JSON_ENCODE(DbName)]),
- {ok, Acc#vacc{prepend=",", resp=Resp1}}
+ case couch_util:get_value(id, Row) of
+ <<"_design", _/binary>> ->
+ {ok, Acc};
+ DbName ->
+ {ok, Resp1} = chttpd:send_delayed_chunk(Resp0, [Prepend, ?JSON_ENCODE(DbName)]),
+ {ok, Acc#vacc{prepend = ",", resp = Resp1}}
end;
-all_dbs_callback(complete, #vacc{resp=Resp0}=Acc) ->
+all_dbs_callback(complete, #vacc{resp = Resp0} = Acc) ->
{ok, Resp1} = chttpd:send_delayed_chunk(Resp0, "]"),
{ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
- {ok, Acc#vacc{resp=Resp2}};
-all_dbs_callback({error, Reason}, #vacc{resp=Resp0}=Acc) ->
+ {ok, Acc#vacc{resp = Resp2}};
+all_dbs_callback({error, Reason}, #vacc{resp = Resp0} = Acc) ->
{ok, Resp1} = chttpd:send_delayed_error(Resp0, Reason),
- {ok, Acc#vacc{resp=Resp1}}.
+ {ok, Acc#vacc{resp = Resp1}}.
-handle_dbs_info_req(#httpd{method='POST'}=Req) ->
+handle_dbs_info_req(#httpd{method = 'POST'} = Req) ->
chttpd:validate_ctype(Req, "application/json"),
Props = chttpd:json_body_obj(Req),
Keys = couch_mrview_util:get_view_keys(Props),
@@ -152,41 +162,52 @@ handle_dbs_info_req(#httpd{method='POST'}=Req) ->
undefined -> throw({bad_request, "`keys` member must exist."});
_ -> ok
end,
- MaxNumber = config:get_integer("chttpd",
- "max_db_number_for_dbs_info_req", ?MAX_DB_NUM_FOR_DBS_INFO),
+ MaxNumber = config:get_integer(
+ "chttpd",
+ "max_db_number_for_dbs_info_req",
+ ?MAX_DB_NUM_FOR_DBS_INFO
+ ),
case length(Keys) =< MaxNumber of
true -> ok;
false -> throw({bad_request, too_many_keys})
end,
{ok, Resp} = chttpd:start_json_response(Req, 200),
send_chunk(Resp, "["),
- lists:foldl(fun(DbName, AccSeparator) ->
- case catch fabric:get_db_info(DbName) of
- {ok, Result} ->
- Json = ?JSON_ENCODE({[{key, DbName}, {info, {Result}}]}),
- send_chunk(Resp, AccSeparator ++ Json);
- _ ->
- Json = ?JSON_ENCODE({[{key, DbName}, {error, not_found}]}),
- send_chunk(Resp, AccSeparator ++ Json)
+ lists:foldl(
+ fun(DbName, AccSeparator) ->
+ case catch fabric:get_db_info(DbName) of
+ {ok, Result} ->
+ Json = ?JSON_ENCODE({[{key, DbName}, {info, {Result}}]}),
+ send_chunk(Resp, AccSeparator ++ Json);
+ _ ->
+ Json = ?JSON_ENCODE({[{key, DbName}, {error, not_found}]}),
+ send_chunk(Resp, AccSeparator ++ Json)
+ end,
+ % AccSeparator now has a comma
+ ","
end,
- "," % AccSeparator now has a comma
- end, "", Keys),
+ "",
+ Keys
+ ),
send_chunk(Resp, "]"),
chttpd:end_json_response(Resp);
handle_dbs_info_req(Req) ->
send_method_not_allowed(Req, "POST").
-handle_task_status_req(#httpd{method='GET'}=Req) ->
+handle_task_status_req(#httpd{method = 'GET'} = Req) ->
ok = chttpd:verify_is_server_admin(Req),
{Replies, _BadNodes} = gen_server:multi_call(couch_task_status, all),
- Response = lists:flatmap(fun({Node, Tasks}) ->
- [{[{node,Node} | Task]} || Task <- Tasks]
- end, Replies),
+ Response = lists:flatmap(
+ fun({Node, Tasks}) ->
+ [{[{node, Node} | Task]} || Task <- Tasks]
+ end,
+ Replies
+ ),
send_json(Req, lists:sort(Response));
handle_task_status_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD").
-handle_replicate_req(#httpd{method='POST', user_ctx=Ctx, req_body=PostBody} = Req) ->
+handle_replicate_req(#httpd{method = 'POST', user_ctx = Ctx, req_body = PostBody} = Req) ->
chttpd:validate_ctype(Req, "application/json"),
%% see HACK in chttpd.erl about replication
case replicate(PostBody, Ctx) of
@@ -198,11 +219,11 @@ handle_replicate_req(#httpd{method='POST', user_ctx=Ctx, req_body=PostBody} = Re
send_json(Req, {[{ok, true} | JsonResults]});
{ok, stopped} ->
send_json(Req, 200, {[{ok, stopped}]});
- {error, not_found=Error} ->
+ {error, not_found = Error} ->
chttpd:send_error(Req, Error);
- {error, {_, _}=Error} ->
+ {error, {_, _} = Error} ->
chttpd:send_error(Req, Error);
- {_, _}=Error ->
+ {_, _} = Error ->
chttpd:send_error(Req, Error)
end;
handle_replicate_req(Req) ->
@@ -210,50 +231,50 @@ handle_replicate_req(Req) ->
replicate({Props} = PostBody, Ctx) ->
case couch_util:get_value(<<"cancel">>, Props) of
- true ->
- cancel_replication(PostBody, Ctx);
- _ ->
- Node = choose_node([
- couch_util:get_value(<<"source">>, Props),
- couch_util:get_value(<<"target">>, Props)
- ]),
- case rpc:call(Node, couch_replicator, replicate, [PostBody, Ctx]) of
- {badrpc, Reason} ->
- erlang:error(Reason);
- Res ->
- Res
- end
+ true ->
+ cancel_replication(PostBody, Ctx);
+ _ ->
+ Node = choose_node([
+ couch_util:get_value(<<"source">>, Props),
+ couch_util:get_value(<<"target">>, Props)
+ ]),
+ case rpc:call(Node, couch_replicator, replicate, [PostBody, Ctx]) of
+ {badrpc, Reason} ->
+ erlang:error(Reason);
+ Res ->
+ Res
+ end
end.
cancel_replication(PostBody, Ctx) ->
{Res, _Bad} = rpc:multicall(couch_replicator, replicate, [PostBody, Ctx]),
case [X || {ok, {cancelled, _}} = X <- Res] of
- [Success|_] ->
- % Report success if at least one node canceled the replication
- Success;
- [] ->
- case lists:usort(Res) of
- [UniqueReply] ->
- % Report a universally agreed-upon reply
- UniqueReply;
+ [Success | _] ->
+ % Report success if at least one node canceled the replication
+ Success;
[] ->
- {error, badrpc};
- Else ->
- % Unclear what to do here -- pick the first error?
- % Except try ignoring any {error, not_found} responses
- % because we'll always get two of those
- hd(Else -- [{error, not_found}])
- end
+ case lists:usort(Res) of
+ [UniqueReply] ->
+ % Report a universally agreed-upon reply
+ UniqueReply;
+ [] ->
+ {error, badrpc};
+ Else ->
+ % Unclear what to do here -- pick the first error?
+ % Except try ignoring any {error, not_found} responses
+ % because we'll always get two of those
+ hd(Else -- [{error, not_found}])
+ end
end.
choose_node(Key) when is_binary(Key) ->
Checksum = erlang:crc32(Key),
- Nodes = lists:sort([node()|erlang:nodes()]),
+ Nodes = lists:sort([node() | erlang:nodes()]),
lists:nth(1 + Checksum rem length(Nodes), Nodes);
choose_node(Key) ->
choose_node(term_to_binary(Key)).
-handle_reload_query_servers_req(#httpd{method='POST'}=Req) ->
+handle_reload_query_servers_req(#httpd{method = 'POST'} = Req) ->
chttpd:validate_ctype(Req, "application/json"),
ok = couch_proc_manager:reload(),
send_json(Req, 200, {[{ok, true}]});
@@ -263,23 +284,21 @@ handle_reload_query_servers_req(Req) ->
handle_uuids_req(Req) ->
couch_httpd_misc_handlers:handle_uuids_req(Req).
-
-handle_up_req(#httpd{method='GET'} = Req) ->
+handle_up_req(#httpd{method = 'GET'} = Req) ->
case config:get("couchdb", "maintenance_mode") of
- "true" ->
- send_json(Req, 404, {[{status, maintenance_mode}]});
- "nolb" ->
- send_json(Req, 404, {[{status, nolb}]});
- _ ->
- {ok, {Status}} = mem3_seeds:get_status(),
- case couch_util:get_value(status, Status) of
- ok ->
- send_json(Req, 200, {Status});
- seeding ->
- send_json(Req, 404, {Status})
- end
+ "true" ->
+ send_json(Req, 404, {[{status, maintenance_mode}]});
+ "nolb" ->
+ send_json(Req, 404, {[{status, nolb}]});
+ _ ->
+ {ok, {Status}} = mem3_seeds:get_status(),
+ case couch_util:get_value(status, Status) of
+ ok ->
+ send_json(Req, 200, {Status});
+ seeding ->
+ send_json(Req, 404, {Status})
+ end
end;
-
handle_up_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD").
diff --git a/src/chttpd/src/chttpd_node.erl b/src/chttpd/src/chttpd_node.erl
index e92a1e506..7379dba02 100644
--- a/src/chttpd/src/chttpd_node.erl
+++ b/src/chttpd/src/chttpd_node.erl
@@ -21,18 +21,24 @@
-include_lib("couch/include/couch_db.hrl").
--import(chttpd,
- [send_json/2,send_json/3,send_method_not_allowed/2,
- send_chunk/2,start_chunked_response/3]).
+-import(
+ chttpd,
+ [
+ send_json/2, send_json/3,
+ send_method_not_allowed/2,
+ send_chunk/2,
+ start_chunked_response/3
+ ]
+).
% Node-specific request handler (_config and _stats)
% Support _local meaning this node
-handle_node_req(#httpd{path_parts=[_, <<"_local">>]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, <<"_local">>]} = Req) ->
send_json(Req, 200, {[{name, node()}]});
-handle_node_req(#httpd{path_parts=[A, <<"_local">>|Rest]}=Req) ->
- handle_node_req(Req#httpd{path_parts=[A, node()] ++ Rest});
+handle_node_req(#httpd{path_parts = [A, <<"_local">> | Rest]} = Req) ->
+ handle_node_req(Req#httpd{path_parts = [A, node()] ++ Rest});
% GET /_node/$node/_versions
-handle_node_req(#httpd{method='GET', path_parts=[_, _Node, <<"_versions">>]}=Req) ->
+handle_node_req(#httpd{method = 'GET', path_parts = [_, _Node, <<"_versions">>]} = Req) ->
IcuVer = couch_ejson_compare:get_icu_version(),
UcaVer = couch_ejson_compare:get_uca_version(),
send_json(Req, 200, #{
@@ -47,45 +53,56 @@ handle_node_req(#httpd{method='GET', path_parts=[_, _Node, <<"_versions">>]}=Req
version => couch_server:get_spidermonkey_version()
}
});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_versions">>]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_versions">>]} = Req) ->
send_method_not_allowed(Req, "GET");
-
% GET /_node/$node/_config
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>]}=Req) ->
- Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
- case dict:is_key(Section, Acc) of
- true ->
- dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
- false ->
- dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
- end
- end, dict:new(), call_node(Node, config, all, [])),
- KVs = dict:fold(fun(Section, Values, Acc) ->
- [{list_to_binary(Section), {Values}} | Acc]
- end, [], Grouped),
+handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_config">>]} = Req) ->
+ Grouped = lists:foldl(
+ fun({{Section, Key}, Value}, Acc) ->
+ case dict:is_key(Section, Acc) of
+ true ->
+ dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
+ false ->
+ dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
+ end
+ end,
+ dict:new(),
+ call_node(Node, config, all, [])
+ ),
+ KVs = dict:fold(
+ fun(Section, Values, Acc) ->
+ [{list_to_binary(Section), {Values}} | Acc]
+ end,
+ [],
+ Grouped
+ ),
send_json(Req, 200, {KVs});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_config">>]} = Req) ->
send_method_not_allowed(Req, "GET");
% POST /_node/$node/_config/_reload - Flushes unpersisted config values from RAM
-handle_node_req(#httpd{method='POST', path_parts=[_, Node, <<"_config">>, <<"_reload">>]}=Req) ->
+handle_node_req(
+ #httpd{method = 'POST', path_parts = [_, Node, <<"_config">>, <<"_reload">>]} = Req
+) ->
case call_node(Node, config, reload, []) of
ok ->
send_json(Req, 200, {[{ok, true}]});
{error, Reason} ->
chttpd:send_error(Req, {bad_request, Reason})
end;
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, <<"_reload">>]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_config">>, <<"_reload">>]} = Req) ->
send_method_not_allowed(Req, "POST");
% GET /_node/$node/_config/Section
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section]}=Req) ->
- KVs = [{list_to_binary(Key), list_to_binary(Value)}
- || {Key, Value} <- call_node(Node, config, get, [Section])],
+handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_config">>, Section]} = Req) ->
+ KVs = [
+ {list_to_binary(Key), list_to_binary(Value)}
+ || {Key, Value} <- call_node(Node, config, get, [Section])
+ ],
send_json(Req, 200, {KVs});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_config">>, _Section]} = Req) ->
send_method_not_allowed(Req, "GET");
% PUT /_node/$node/_config/Section/Key
% "value"
-handle_node_req(#httpd{method='PUT', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+handle_node_req(#httpd{method = 'PUT', path_parts = [_, Node, <<"_config">>, Section, Key]} = Req) ->
couch_util:check_config_blacklist(Section),
Value = couch_util:trim(chttpd:json_body(Req)),
Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
@@ -99,34 +116,36 @@ handle_node_req(#httpd{method='PUT', path_parts=[_, Node, <<"_config">>, Section
chttpd:send_error(Req, {bad_request, Reason})
end;
% GET /_node/$node/_config/Section/Key
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_config">>, Section, Key]} = Req) ->
case call_node(Node, config, get, [Section, Key, undefined]) of
- undefined ->
- throw({not_found, unknown_config_value});
- Value ->
- send_json(Req, 200, list_to_binary(Value))
+ undefined ->
+ throw({not_found, unknown_config_value});
+ Value ->
+ send_json(Req, 200, list_to_binary(Value))
end;
% DELETE /_node/$node/_config/Section/Key
-handle_node_req(#httpd{method='DELETE',path_parts=[_, Node, <<"_config">>, Section, Key]}=Req) ->
+handle_node_req(
+ #httpd{method = 'DELETE', path_parts = [_, Node, <<"_config">>, Section, Key]} = Req
+) ->
couch_util:check_config_blacklist(Section),
Persist = chttpd:header_value(Req, "X-Couch-Persist") /= "false",
case call_node(Node, config, get, [Section, Key, undefined]) of
- undefined ->
- throw({not_found, unknown_config_value});
- OldValue ->
- case call_node(Node, config, delete, [Section, Key, Persist]) of
- ok ->
- send_json(Req, 200, list_to_binary(OldValue));
- {error, Reason} ->
- chttpd:send_error(Req, {bad_request, Reason})
- end
+ undefined ->
+ throw({not_found, unknown_config_value});
+ OldValue ->
+ case call_node(Node, config, delete, [Section, Key, Persist]) of
+ ok ->
+ send_json(Req, 200, list_to_binary(OldValue));
+ {error, Reason} ->
+ chttpd:send_error(Req, {bad_request, Reason})
+ end
end;
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_config">>, _Section, _Key]} = Req) ->
send_method_not_allowed(Req, "GET,PUT,DELETE");
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_config">>, _Section, _Key | _]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_config">>, _Section, _Key | _]} = Req) ->
chttpd:send_error(Req, not_found);
% GET /_node/$node/_stats
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_stats">> | Path]}=Req) ->
+handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_stats">> | Path]} = Req) ->
flush(Node, Req),
Stats0 = call_node(Node, couch_stats, fetch, []),
Stats = couch_stats_httpd:transform_stats(Stats0),
@@ -134,49 +153,54 @@ handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_stats">> | Path]}=
EJSON0 = couch_stats_httpd:to_ejson(Nested),
EJSON1 = couch_stats_httpd:extract_path(Path, EJSON0),
chttpd:send_json(Req, EJSON1);
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_stats">>]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_stats">>]} = Req) ->
send_method_not_allowed(Req, "GET");
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_prometheus">>]}=Req) ->
+handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_prometheus">>]} = Req) ->
Metrics = call_node(Node, couch_prometheus_server, scrape, []),
Version = call_node(Node, couch_prometheus_server, version, []),
- Type = "text/plain; version=" ++ Version,
+ Type = "text/plain; version=" ++ Version,
Header = [{<<"Content-Type">>, ?l2b(Type)}],
chttpd:send_response(Req, 200, Header, Metrics);
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_prometheus">>]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_prometheus">>]} = Req) ->
send_method_not_allowed(Req, "GET");
% GET /_node/$node/_system
-handle_node_req(#httpd{method='GET', path_parts=[_, Node, <<"_system">>]}=Req) ->
+handle_node_req(#httpd{method = 'GET', path_parts = [_, Node, <<"_system">>]} = Req) ->
Stats = call_node(Node, chttpd_node, get_stats, []),
EJSON = couch_stats_httpd:to_ejson(Stats),
send_json(Req, EJSON);
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_system">>]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_system">>]} = Req) ->
send_method_not_allowed(Req, "GET");
% POST /_node/$node/_restart
-handle_node_req(#httpd{method='POST', path_parts=[_, Node, <<"_restart">>]}=Req) ->
+handle_node_req(#httpd{method = 'POST', path_parts = [_, Node, <<"_restart">>]} = Req) ->
call_node(Node, init, restart, []),
send_json(Req, 200, {[{ok, true}]});
-handle_node_req(#httpd{path_parts=[_, _Node, <<"_restart">>]}=Req) ->
+handle_node_req(#httpd{path_parts = [_, _Node, <<"_restart">>]} = Req) ->
send_method_not_allowed(Req, "POST");
-handle_node_req(#httpd{path_parts=[_, Node | PathParts],
- mochi_req=MochiReq0}) ->
+handle_node_req(#httpd{
+ path_parts = [_, Node | PathParts],
+ mochi_req = MochiReq0
+}) ->
% strip /_node/{node} from Req0 before descending further
RawUri = MochiReq0:get(raw_path),
{_, Query, Fragment} = mochiweb_util:urlsplit_path(RawUri),
NewPath0 = "/" ++ lists:join("/", [couch_util:url_encode(P) || P <- PathParts]),
NewRawPath = mochiweb_util:urlunsplit_path({NewPath0, Query, Fragment}),
MaxSize = chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", 4294967296),
+ "max_http_request_size", 4294967296
+ ),
NewOpts = [{body, MochiReq0:recv_body(MaxSize)} | MochiReq0:get(opts)],
Ref = erlang:make_ref(),
- MochiReq = mochiweb_request:new({remote, self(), Ref},
- NewOpts,
- MochiReq0:get(method),
- NewRawPath,
- MochiReq0:get(version),
- MochiReq0:get(headers)),
+ MochiReq = mochiweb_request:new(
+ {remote, self(), Ref},
+ NewOpts,
+ MochiReq0:get(method),
+ NewRawPath,
+ MochiReq0:get(version),
+ MochiReq0:get(headers)
+ ),
call_node(Node, couch_httpd, handle_request, [MochiReq]),
recv_loop(Ref, MochiReq0);
-handle_node_req(#httpd{path_parts=[_]}=Req) ->
+handle_node_req(#httpd{path_parts = [_]} = Req) ->
chttpd:send_error(Req, {bad_request, <<"Incomplete path to _node request">>});
handle_node_req(Req) ->
chttpd:send_error(Req, not_found).
@@ -207,12 +231,13 @@ recv_loop(Ref, ReqResp) ->
end.
call_node(Node0, Mod, Fun, Args) when is_binary(Node0) ->
- Node1 = try
- list_to_existing_atom(?b2l(Node0))
- catch
- error:badarg ->
- throw({not_found, <<"no such node: ", Node0/binary>>})
- end,
+ Node1 =
+ try
+ list_to_existing_atom(?b2l(Node0))
+ catch
+ error:badarg ->
+ throw({not_found, <<"no such node: ", Node0/binary>>})
+ end,
call_node(Node1, Mod, Fun, Args);
call_node(Node, Mod, Fun, Args) when is_atom(Node) ->
case rpc:call(Node, Mod, Fun, Args) of
@@ -232,15 +257,33 @@ flush(Node, Req) ->
end.
get_stats() ->
- Other = erlang:memory(system) - lists:sum([X || {_,X} <-
- erlang:memory([atom, code, binary, ets])]),
- Memory = [{other, Other} | erlang:memory([atom, atom_used, processes,
- processes_used, binary, code, ets])],
+ Other =
+ erlang:memory(system) -
+ lists:sum([
+ X
+ || {_, X} <-
+ erlang:memory([atom, code, binary, ets])
+ ]),
+ Memory = [
+ {other, Other}
+ | erlang:memory([
+ atom,
+ atom_used,
+ processes,
+ processes_used,
+ binary,
+ code,
+ ets
+ ])
+ ],
{NumberOfGCs, WordsReclaimed, _} = statistics(garbage_collection),
{{input, Input}, {output, Output}} = statistics(io),
{CF, CDU} = db_pid_stats(),
- MessageQueues0 = [{couch_file, {CF}}, {couch_db_updater, {CDU}},
- {couch_server, couch_server:aggregate_queue_len()}],
+ MessageQueues0 = [
+ {couch_file, {CF}},
+ {couch_db_updater, {CDU}},
+ {couch_server, couch_server:aggregate_queue_len()}
+ ],
MessageQueues = MessageQueues0 ++ message_queues(registered()),
{SQ, DCQ} = run_queues(),
[
@@ -283,13 +326,15 @@ db_pid_stats(Mod, Candidates) ->
{Mod, init, 1} ->
case proplists:get_value(message_queue_len, PI) of
undefined -> Acc;
- Len -> [Len|Acc]
+ Len -> [Len | Acc]
end;
- _ ->
+ _ ->
Acc
end
end
- end, [], Candidates
+ end,
+ [],
+ Candidates
),
format_pid_stats(Mailboxes).
@@ -308,17 +353,23 @@ format_pid_stats(Mailboxes) ->
].
get_distribution_stats() ->
- lists:map(fun({Node, Socket}) ->
- {ok, Stats} = inet:getstat(Socket),
- {Node, {Stats}}
- end, erlang:system_info(dist_ctrl)).
+ lists:map(
+ fun({Node, Socket}) ->
+ {ok, Stats} = inet:getstat(Socket),
+ {Node, {Stats}}
+ end,
+ erlang:system_info(dist_ctrl)
+ ).
message_queues(Registered) ->
- lists:map(fun(Name) ->
- Type = message_queue_len,
- {Type, Length} = process_info(whereis(Name), Type),
- {Name, Length}
- end, Registered).
+ lists:map(
+ fun(Name) ->
+ Type = message_queue_len,
+ {Type, Length} = process_info(whereis(Name), Type),
+ {Name, Length}
+ end,
+ Registered
+ ).
%% Workaround for https://bugs.erlang.org/browse/ERL-1355
run_queues() ->
diff --git a/src/chttpd/src/chttpd_plugin.erl b/src/chttpd/src/chttpd_plugin.erl
index 7ab458170..03d8ad6ac 100644
--- a/src/chttpd/src/chttpd_plugin.erl
+++ b/src/chttpd/src/chttpd_plugin.erl
@@ -48,7 +48,8 @@ before_response(HttpReq0, Code0, Headers0, Value0) ->
before_serve_file(Req0, Code0, Headers0, RelativePath0, DocumentRoot0) ->
[HttpReq, Code, Headers, RelativePath, DocumentRoot] =
with_pipe(before_serve_file, [
- Req0, Code0, Headers0, RelativePath0, DocumentRoot0]),
+ Req0, Code0, Headers0, RelativePath0, DocumentRoot0
+ ]),
{ok, {HttpReq, Code, Headers, RelativePath, DocumentRoot}}.
%% ------------------------------------------------------------------
diff --git a/src/chttpd/src/chttpd_prefer_header.erl b/src/chttpd/src/chttpd_prefer_header.erl
index de2660399..dbce54e65 100644
--- a/src/chttpd/src/chttpd_prefer_header.erl
+++ b/src/chttpd/src/chttpd_prefer_header.erl
@@ -18,44 +18,44 @@
maybe_return_minimal/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-
-define(DEFAULT_PREFER_MINIMAL,
"Cache-Control, Content-Length, Content-Range, "
- "Content-Type, ETag, Server, Transfer-Encoding, Vary").
-
+ "Content-Type, ETag, Server, Transfer-Encoding, Vary"
+).
maybe_return_minimal(#httpd{mochi_req = MochiReq}, Headers) ->
case get_prefer_header(MochiReq) of
- "return=minimal" ->
+ "return=minimal" ->
filter_headers(Headers, get_header_list());
- _ ->
+ _ ->
Headers
end.
-
get_prefer_header(Req) ->
case Req:get_header_value("Prefer") of
Value when is_list(Value) ->
string:to_lower(Value);
- undefined ->
+ undefined ->
undefined
end.
-
filter_headers(Headers, IncludeList) ->
- lists:filter(fun({HeaderName, _}) ->
- lists:member(HeaderName, IncludeList)
- end, Headers).
-
+ lists:filter(
+ fun({HeaderName, _}) ->
+ lists:member(HeaderName, IncludeList)
+ end,
+ Headers
+ ).
get_header_list() ->
- SectionStr = config:get("chttpd",
- "prefer_minimal", ?DEFAULT_PREFER_MINIMAL),
+ SectionStr = config:get(
+ "chttpd",
+ "prefer_minimal",
+ ?DEFAULT_PREFER_MINIMAL
+ ),
split_list(SectionStr).
-
split_list(S) ->
- re:split(S, "\\s*,\\s*", [trim, {return, list}]).
+ re:split(S, "\\s*,\\s*", [trim, {return, list}]).
diff --git a/src/chttpd/src/chttpd_rewrite.erl b/src/chttpd/src/chttpd_rewrite.erl
index 22edb3fac..4e77597d4 100644
--- a/src/chttpd/src/chttpd_rewrite.erl
+++ b/src/chttpd/src/chttpd_rewrite.erl
@@ -12,7 +12,6 @@
%
% bind_path is based on bind method from Webmachine
-
%% @doc Module for URL rewriting by pattern matching.
-module(chttpd_rewrite).
@@ -25,8 +24,7 @@
-define(SEPARATOR, $\/).
-define(MATCH_ALL, {bind, <<"*">>}).
-
-handle_rewrite_req(#httpd{}=Req, Db, DDoc) ->
+handle_rewrite_req(#httpd{} = Req, Db, DDoc) ->
RewritesSoFar = erlang:get(?REWRITE_COUNT),
MaxRewrites = chttpd_util:get_chttpd_config_integer("rewrite_limit", 100),
case RewritesSoFar >= MaxRewrites of
@@ -41,36 +39,45 @@ handle_rewrite_req(#httpd{}=Req, Db, DDoc) ->
Rules when is_binary(Rules) ->
case couch_query_servers:rewrite(Req, Db, DDoc) of
undefined ->
- chttpd:send_error(Req, 404, <<"rewrite_error">>,
- <<"Invalid path.">>);
+ chttpd:send_error(
+ Req,
+ 404,
+ <<"rewrite_error">>,
+ <<"Invalid path.">>
+ );
Rewrite ->
do_rewrite(Req, Rewrite)
end;
undefined ->
- chttpd:send_error(Req, 404, <<"rewrite_error">>,
- <<"Invalid path.">>)
+ chttpd:send_error(
+ Req,
+ 404,
+ <<"rewrite_error">>,
+ <<"Invalid path.">>
+ )
end.
-
-get_rules(#doc{body={Props}}) ->
+get_rules(#doc{body = {Props}}) ->
couch_util:get_value(<<"rewrites">>, Props).
-
-do_rewrite(#httpd{mochi_req=MochiReq}=Req, {Props}=Rewrite) when is_list(Props) ->
+do_rewrite(#httpd{mochi_req = MochiReq} = Req, {Props} = Rewrite) when is_list(Props) ->
case couch_util:get_value(<<"code">>, Props) of
undefined ->
Method = rewrite_method(Req, Rewrite),
Headers = rewrite_headers(Req, Rewrite),
Path = ?b2l(rewrite_path(Req, Rewrite)),
- NewMochiReq = mochiweb_request:new(MochiReq:get(socket),
- Method,
- Path,
- MochiReq:get(version),
- Headers),
- Body = case couch_util:get_value(<<"body">>, Props) of
- undefined -> erlang:get(mochiweb_request_body);
- B -> B
- end,
+ NewMochiReq = mochiweb_request:new(
+ MochiReq:get(socket),
+ Method,
+ Path,
+ MochiReq:get(version),
+ Headers
+ ),
+ Body =
+ case couch_util:get_value(<<"body">>, Props) of
+ undefined -> erlang:get(mochiweb_request_body);
+ B -> B
+ end,
NewMochiReq:cleanup(),
case Body of
undefined -> [];
@@ -86,37 +93,49 @@ do_rewrite(#httpd{mochi_req=MochiReq}=Req, {Props}=Rewrite) when is_list(Props)
undefined -> [];
{H1} -> H1
end,
- rewrite_body(Rewrite))
+ rewrite_body(Rewrite)
+ )
end;
-do_rewrite(#httpd{method=Method,
- path_parts=[_DbName, <<"_design">>, _DesignName, _Rewrite|PathParts],
- mochi_req=MochiReq}=Req,
- Rules) when is_list(Rules) ->
+do_rewrite(
+ #httpd{
+ method = Method,
+ path_parts = [_DbName, <<"_design">>, _DesignName, _Rewrite | PathParts],
+ mochi_req = MochiReq
+ } = Req,
+ Rules
+) when is_list(Rules) ->
% create dispatch list from rules
Prefix = path_prefix(Req),
QueryList = lists:map(fun decode_query_value/1, chttpd:qs(Req)),
- DispatchList = [make_rule(Rule) || {Rule} <- Rules],
+ DispatchList = [make_rule(Rule) || {Rule} <- Rules],
Method1 = couch_util:to_binary(Method),
%% get raw path by matching url to a rule.
- RawPath = case try_bind_path(DispatchList, Method1,
- PathParts, QueryList) of
- no_dispatch_path ->
- throw(not_found);
- {NewPathParts, Bindings} ->
- Parts = [quote_plus(X) || X <- NewPathParts],
-
- % build new path, reencode query args, eventually convert
- % them to json
- Bindings1 = maybe_encode_bindings(Bindings),
- Path = iolist_to_binary([
- string:join(Parts, [?SEPARATOR]),
- [["?", mochiweb_util:urlencode(Bindings1)] || Bindings1 =/= []]
- ]),
-
- % if path is relative detect it and rewrite path
- safe_relative_path(Prefix, Path)
+ RawPath =
+ case
+ try_bind_path(
+ DispatchList,
+ Method1,
+ PathParts,
+ QueryList
+ )
+ of
+ no_dispatch_path ->
+ throw(not_found);
+ {NewPathParts, Bindings} ->
+ Parts = [quote_plus(X) || X <- NewPathParts],
+
+ % build new path, reencode query args, eventually convert
+ % them to json
+ Bindings1 = maybe_encode_bindings(Bindings),
+ Path = iolist_to_binary([
+ string:join(Parts, [?SEPARATOR]),
+ [["?", mochiweb_util:urlencode(Bindings1)] || Bindings1 =/= []]
+ ]),
+
+ % if path is relative detect it and rewrite path
+ safe_relative_path(Prefix, Path)
end,
% normalize final path (fix levels "." and "..")
@@ -125,30 +144,32 @@ do_rewrite(#httpd{method=Method,
couch_log:debug("rewrite to ~p ~n", [RawPath1]),
% build a new mochiweb request
- MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
- MochiReq:get(method),
- RawPath1,
- MochiReq:get(version),
- MochiReq:get(headers)),
+ MochiReq1 = mochiweb_request:new(
+ MochiReq:get(socket),
+ MochiReq:get(method),
+ RawPath1,
+ MochiReq:get(version),
+ MochiReq:get(headers)
+ ),
% cleanup, It force mochiweb to reparse raw uri.
MochiReq1:cleanup(),
chttpd:handle_request_int(MochiReq1).
-
-rewrite_method(#httpd{method=Method}, {Props}) ->
+rewrite_method(#httpd{method = Method}, {Props}) ->
DefaultMethod = couch_util:to_binary(Method),
couch_util:get_value(<<"method">>, Props, DefaultMethod).
-rewrite_path(#httpd{}=Req, {Props}=Rewrite) ->
+rewrite_path(#httpd{} = Req, {Props} = Rewrite) ->
Prefix = path_prefix(Req),
- RewritePath = case couch_util:get_value(<<"path">>, Props) of
- undefined ->
- throw({<<"rewrite_error">>,
- <<"Rewrite result must produce a new path.">>});
- P -> P
- end,
+ RewritePath =
+ case couch_util:get_value(<<"path">>, Props) of
+ undefined ->
+ throw({<<"rewrite_error">>, <<"Rewrite result must produce a new path.">>});
+ P ->
+ P
+ end,
SafeRelativePath = safe_relative_path(Prefix, RewritePath),
NormalizedPath = normalize_path(SafeRelativePath),
QueryParams = rewrite_query_params(Req, Rewrite),
@@ -159,30 +180,33 @@ rewrite_path(#httpd{}=Req, {Props}=Rewrite) ->
<<NormalizedPath/binary, "?", QueryParams/binary>>
end.
-rewrite_query_params(#httpd{}=Req, {Props}) ->
+rewrite_query_params(#httpd{} = Req, {Props}) ->
RequestQS = chttpd:qs(Req),
- RewriteQS = case couch_util:get_value(<<"query">>, Props) of
- undefined -> RequestQS;
- {V} -> V
- end,
+ RewriteQS =
+ case couch_util:get_value(<<"query">>, Props) of
+ undefined -> RequestQS;
+ {V} -> V
+ end,
RewriteQSEsc = [{chttpd:quote(K), chttpd:quote(V)} || {K, V} <- RewriteQS],
iolist_to_binary(string:join([[K, "=", V] || {K, V} <- RewriteQSEsc], "&")).
-rewrite_headers(#httpd{mochi_req=MochiReq}, {Props}) ->
+rewrite_headers(#httpd{mochi_req = MochiReq}, {Props}) ->
case couch_util:get_value(<<"headers">>, Props) of
undefined ->
MochiReq:get(headers);
{H} ->
mochiweb_headers:enter_from_list(
lists:map(fun({Key, Val}) -> {?b2l(Key), ?b2l(Val)} end, H),
- MochiReq:get(headers))
+ MochiReq:get(headers)
+ )
end.
rewrite_body({Props}) ->
- Body = case couch_util:get_value(<<"body">>, Props) of
- undefined -> erlang:get(mochiweb_request_body);
- B -> B
- end,
+ Body =
+ case couch_util:get_value(<<"body">>, Props) of
+ undefined -> erlang:get(mochiweb_request_body);
+ B -> B
+ end,
case Body of
undefined ->
[];
@@ -191,8 +215,7 @@ rewrite_body({Props}) ->
Body
end.
-
-path_prefix(#httpd{path_parts=[DbName, <<"_design">>, DesignName | _]}) ->
+path_prefix(#httpd{path_parts = [DbName, <<"_design">>, DesignName | _]}) ->
EscapedDesignName = ?l2b(couch_util:url_encode(DesignName)),
EscapedDbName = ?l2b(couch_util:url_encode(DbName)),
DesignId = <<"_design/", EscapedDesignName/binary>>,
@@ -207,7 +230,6 @@ safe_relative_path(Prefix, Path) ->
<<Prefix/binary, "/", V1/binary>>
end.
-
quote_plus({bind, X}) ->
mochiweb_util:quote_plus(X);
quote_plus(X) ->
@@ -217,7 +239,7 @@ quote_plus(X) ->
%% 404 error not_found is raised
try_bind_path([], _Method, _PathParts, _QueryList) ->
no_dispatch_path;
-try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
+try_bind_path([Dispatch | Rest], Method, PathParts, QueryList) ->
[{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch,
case bind_method(Method1, Method) of
true ->
@@ -226,22 +248,35 @@ try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
Bindings1 = Bindings ++ QueryList,
% we parse query args from the rule and fill
% it eventually with bindings vars
- QueryArgs1 = make_query_list(QueryArgs, Bindings1,
- Formats, []),
+ QueryArgs1 = make_query_list(
+ QueryArgs,
+ Bindings1,
+ Formats,
+ []
+ ),
% remove params in QueryLists1 that are already in
% QueryArgs1
- Bindings2 = lists:foldl(fun({K, V}, Acc) ->
- K1 = to_binding(K),
- KV = case couch_util:get_value(K1, QueryArgs1) of
- undefined -> [{K1, V}];
- _V1 -> []
+ Bindings2 = lists:foldl(
+ fun({K, V}, Acc) ->
+ K1 = to_binding(K),
+ KV =
+ case couch_util:get_value(K1, QueryArgs1) of
+ undefined -> [{K1, V}];
+ _V1 -> []
+ end,
+ Acc ++ KV
end,
- Acc ++ KV
- end, [], Bindings1),
+ [],
+ Bindings1
+ ),
FinalBindings = Bindings2 ++ QueryArgs1,
- NewPathParts = make_new_path(RedirectPath, FinalBindings,
- Remaining, []),
+ NewPathParts = make_new_path(
+ RedirectPath,
+ FinalBindings,
+ Remaining,
+ []
+ ),
{NewPathParts, FinalBindings};
fail ->
try_bind_path(Rest, Method, PathParts, QueryList)
@@ -255,37 +290,51 @@ try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
%% passed in url.
make_query_list([], _Bindings, _Formats, Acc) ->
Acc;
-make_query_list([{Key, {Value}}|Rest], Bindings, Formats, Acc) ->
+make_query_list([{Key, {Value}} | Rest], Bindings, Formats, Acc) ->
Value1 = {Value},
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_binary(Value) ->
+ make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1} | Acc]);
+make_query_list([{Key, Value} | Rest], Bindings, Formats, Acc) when is_binary(Value) ->
Value1 = replace_var(Value, Bindings, Formats),
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_list(Value) ->
+ make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1} | Acc]);
+make_query_list([{Key, Value} | Rest], Bindings, Formats, Acc) when is_list(Value) ->
Value1 = replace_var(Value, Bindings, Formats),
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) ->
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value}|Acc]).
+ make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1} | Acc]);
+make_query_list([{Key, Value} | Rest], Bindings, Formats, Acc) ->
+ make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value} | Acc]).
-replace_var(<<"*">>=Value, Bindings, Formats) ->
+replace_var(<<"*">> = Value, Bindings, Formats) ->
get_var(Value, Bindings, Value, Formats);
replace_var(<<":", Var/binary>> = Value, Bindings, Formats) ->
get_var(Var, Bindings, Value, Formats);
replace_var(Value, _Bindings, _Formats) when is_binary(Value) ->
Value;
replace_var(Value, Bindings, Formats) when is_list(Value) ->
- lists:reverse(lists:foldl(fun
- (<<":", Var/binary>>=Value1, Acc) ->
- [get_var(Var, Bindings, Value1, Formats)|Acc];
+ lists:reverse(
+ lists:foldl(
+ fun
+ (<<":", Var/binary>> = Value1, Acc) ->
+ [get_var(Var, Bindings, Value1, Formats) | Acc];
(Value1, Acc) ->
- [Value1|Acc]
- end, [], Value));
+ [Value1 | Acc]
+ end,
+ [],
+ Value
+ )
+ );
replace_var(Value, _Bindings, _Formats) ->
Value.
maybe_json(Key, Value) ->
- case lists:member(Key, [<<"key">>, <<"startkey">>, <<"start_key">>,
- <<"endkey">>, <<"end_key">>, <<"keys">>]) of
+ case
+ lists:member(Key, [
+ <<"key">>,
+ <<"startkey">>,
+ <<"start_key">>,
+ <<"endkey">>,
+ <<"end_key">>,
+ <<"keys">>
+ ])
+ of
true ->
?JSON_ENCODE(Value);
false ->
@@ -300,7 +349,7 @@ get_var(VarName, Props, Default, Formats) ->
maybe_format(VarName, Value, Formats) ->
case couch_util:get_value(VarName, Formats) of
undefined ->
- Value;
+ Value;
Format ->
format(Format, Value)
end.
@@ -325,7 +374,7 @@ format(<<"bool">>, Value) when is_list(Value) ->
_ -> Value
end;
format(_Format, Value) ->
- Value.
+ Value.
%% doc: build new patch from bindings. bindings are query args
%% (+ dynamic query rewritten if needed) and bindings found in
@@ -335,19 +384,18 @@ make_new_path([], _Bindings, _Remaining, Acc) ->
make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
Acc1 = lists:reverse(Acc) ++ Remaining,
Acc1;
-make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
+make_new_path([?MATCH_ALL | _Rest], _Bindings, Remaining, Acc) ->
Acc1 = lists:reverse(Acc) ++ Remaining,
Acc1;
-make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) ->
- P2 = case couch_util:get_value({bind, P}, Bindings) of
- undefined -> << "undefined">>;
- P1 ->
- iolist_to_binary(P1)
- end,
- make_new_path(Rest, Bindings, Remaining, [P2|Acc]);
-make_new_path([P|Rest], Bindings, Remaining, Acc) ->
- make_new_path(Rest, Bindings, Remaining, [P|Acc]).
-
+make_new_path([{bind, P} | Rest], Bindings, Remaining, Acc) ->
+ P2 =
+ case couch_util:get_value({bind, P}, Bindings) of
+ undefined -> <<"undefined">>;
+ P1 -> iolist_to_binary(P1)
+ end,
+ make_new_path(Rest, Bindings, Remaining, [P2 | Acc]);
+make_new_path([P | Rest], Bindings, Remaining, Acc) ->
+ make_new_path(Rest, Bindings, Remaining, [P | Acc]).
%% @doc If method of the query fith the rule method. If the
%% method rule is '*', which is the default, all
@@ -360,7 +408,6 @@ bind_method({bind, Method}, Method) ->
bind_method(_, _) ->
false.
-
%% @doc bind path. Using the rule from we try to bind variables given
%% to the current url by pattern matching
bind_path([], [], Bindings) ->
@@ -369,63 +416,65 @@ bind_path([?MATCH_ALL], Rest, Bindings) when is_list(Rest) ->
{ok, Rest, Bindings};
bind_path(_, [], _) ->
fail;
-bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) ->
- bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]);
-bind_path([Token|RestToken], [Token|RestMatch], Bindings) ->
+bind_path([{bind, Token} | RestToken], [Match | RestMatch], Bindings) ->
+ bind_path(RestToken, RestMatch, [{{bind, Token}, Match} | Bindings]);
+bind_path([Token | RestToken], [Token | RestMatch], Bindings) ->
bind_path(RestToken, RestMatch, Bindings);
bind_path(_, _, _) ->
fail.
-
%% normalize path.
-normalize_path(Path) when is_binary(Path)->
+normalize_path(Path) when is_binary(Path) ->
normalize_path(?b2l(Path));
-normalize_path(Path) when is_list(Path)->
+normalize_path(Path) when is_list(Path) ->
Segments = normalize_path1(string:tokens(Path, "/"), []),
NormalizedPath = string:join(Segments, [?SEPARATOR]),
iolist_to_binary(["/", NormalizedPath]).
-
normalize_path1([], Acc) ->
lists:reverse(Acc);
-normalize_path1([".."|Rest], Acc) ->
- Acc1 = case Acc of
- [] -> [".."|Acc];
- [T|_] when T =:= ".." -> [".."|Acc];
- [_|R] -> R
- end,
+normalize_path1([".." | Rest], Acc) ->
+ Acc1 =
+ case Acc of
+ [] -> [".." | Acc];
+ [T | _] when T =:= ".." -> [".." | Acc];
+ [_ | R] -> R
+ end,
normalize_path1(Rest, Acc1);
-normalize_path1(["."|Rest], Acc) ->
+normalize_path1(["." | Rest], Acc) ->
normalize_path1(Rest, Acc);
-normalize_path1([Path|Rest], Acc) ->
- normalize_path1(Rest, [Path|Acc]).
-
+normalize_path1([Path | Rest], Acc) ->
+ normalize_path1(Rest, [Path | Acc]).
%% @doc transform json rule in erlang for pattern matching
make_rule(Rule) ->
- Method = case couch_util:get_value(<<"method">>, Rule) of
- undefined -> ?MATCH_ALL;
- M -> to_binding(M)
- end,
- QueryArgs = case couch_util:get_value(<<"query">>, Rule) of
- undefined -> [];
- {Args} -> Args
+ Method =
+ case couch_util:get_value(<<"method">>, Rule) of
+ undefined -> ?MATCH_ALL;
+ M -> to_binding(M)
end,
- FromParts = case couch_util:get_value(<<"from">>, Rule) of
- undefined -> [?MATCH_ALL];
- From ->
- parse_path(From)
+ QueryArgs =
+ case couch_util:get_value(<<"query">>, Rule) of
+ undefined -> [];
+ {Args} -> Args
end,
- ToParts = case couch_util:get_value(<<"to">>, Rule) of
- undefined ->
- throw({error, invalid_rewrite_target});
- To ->
- parse_path(To)
+ FromParts =
+ case couch_util:get_value(<<"from">>, Rule) of
+ undefined -> [?MATCH_ALL];
+ From -> parse_path(From)
+ end,
+ ToParts =
+ case couch_util:get_value(<<"to">>, Rule) of
+ undefined ->
+ throw({error, invalid_rewrite_target});
+ To ->
+ parse_path(To)
+ end,
+ Formats =
+ case couch_util:get_value(<<"formats">>, Rule) of
+ undefined -> [];
+ {Fmts} -> Fmts
end,
- Formats = case couch_util:get_value(<<"formats">>, Rule) of
- undefined -> [];
- {Fmts} -> Fmts
- end,
[{FromParts, Method}, ToParts, QueryArgs, Formats].
parse_path(Path) ->
@@ -437,43 +486,59 @@ parse_path(Path) ->
%% in erlang atom.
path_to_list([], Acc, _DotDotCount) ->
lists:reverse(Acc);
-path_to_list([<<>>|R], Acc, DotDotCount) ->
+path_to_list([<<>> | R], Acc, DotDotCount) ->
path_to_list(R, Acc, DotDotCount);
-path_to_list([<<"*">>|R], Acc, DotDotCount) ->
- path_to_list(R, [?MATCH_ALL|Acc], DotDotCount);
-path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 ->
+path_to_list([<<"*">> | R], Acc, DotDotCount) ->
+ path_to_list(R, [?MATCH_ALL | Acc], DotDotCount);
+path_to_list([<<"..">> | R], Acc, DotDotCount) when DotDotCount == 2 ->
case chttpd_util:get_chttpd_config_boolean("secure_rewrites", true) of
false ->
- path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
+ path_to_list(R, [<<"..">> | Acc], DotDotCount + 1);
true ->
- couch_log:notice("insecure_rewrite_rule ~p blocked",
- [lists:reverse(Acc) ++ [<<"..">>] ++ R]),
+ couch_log:notice(
+ "insecure_rewrite_rule ~p blocked",
+ [lists:reverse(Acc) ++ [<<"..">>] ++ R]
+ ),
throw({insecure_rewrite_rule, "too many ../.. segments"})
end;
-path_to_list([<<"..">>|R], Acc, DotDotCount) ->
- path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-path_to_list([P|R], Acc, DotDotCount) ->
- P1 = case P of
- <<":", Var/binary>> ->
- to_binding(Var);
- _ -> P
- end,
- path_to_list(R, [P1|Acc], DotDotCount).
+path_to_list([<<"..">> | R], Acc, DotDotCount) ->
+ path_to_list(R, [<<"..">> | Acc], DotDotCount + 1);
+path_to_list([P | R], Acc, DotDotCount) ->
+ P1 =
+ case P of
+ <<":", Var/binary>> ->
+ to_binding(Var);
+ _ ->
+ P
+ end,
+ path_to_list(R, [P1 | Acc], DotDotCount).
maybe_encode_bindings([]) ->
[];
maybe_encode_bindings(Props) ->
- lists:foldl(fun
+ lists:foldl(
+ fun
({{bind, <<"*">>}, _V}, Acc) ->
Acc;
({{bind, K}, V}, Acc) ->
V1 = iolist_to_binary(maybe_json(K, V)),
- [{K, V1}|Acc]
- end, [], Props).
-
-decode_query_value({K,V}) ->
- case lists:member(K, ["key", "startkey", "start_key",
- "endkey", "end_key", "keys"]) of
+ [{K, V1} | Acc]
+ end,
+ [],
+ Props
+ ).
+
+decode_query_value({K, V}) ->
+ case
+ lists:member(K, [
+ "key",
+ "startkey",
+ "start_key",
+ "endkey",
+ "end_key",
+ "keys"
+ ])
+ of
true ->
{to_binding(K), ?JSON_DECODE(V)};
false ->
diff --git a/src/chttpd/src/chttpd_show.erl b/src/chttpd/src/chttpd_show.erl
index c2c37c66d..e798a98d6 100644
--- a/src/chttpd/src/chttpd_show.erl
+++ b/src/chttpd/src/chttpd_show.erl
@@ -24,17 +24,20 @@
maybe_open_doc(Db, DocId, Options) ->
case fabric:open_doc(Db, DocId, Options) of
- {ok, Doc} ->
- chttpd_stats:incr_reads(),
- Doc;
- {not_found, _} ->
- nil
+ {ok, Doc} ->
+ chttpd_stats:incr_reads(),
+ Doc;
+ {not_found, _} ->
+ nil
end.
-handle_doc_show_req(#httpd{
- path_parts=[_, _, _, _, ShowName, DocId]
- }=Req, Db, DDoc) ->
-
+handle_doc_show_req(
+ #httpd{
+ path_parts = [_, _, _, _, ShowName, DocId]
+ } = Req,
+ Db,
+ DDoc
+) ->
% open the doc
Options = [conflicts, {user_ctx, Req#httpd.user_ctx}],
Doc = maybe_open_doc(Db, DocId, Options),
@@ -42,13 +45,15 @@ handle_doc_show_req(#httpd{
% we don't handle revs here b/c they are an internal api
% returns 404 if there is no doc with DocId
handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId);
-
-handle_doc_show_req(#httpd{
- path_parts=[_, _, _, _, ShowName, DocId|Rest]
- }=Req, Db, DDoc) ->
-
- DocParts = [DocId|Rest],
- DocId1 = ?l2b(string:join([?b2l(P)|| P <- DocParts], "/")),
+handle_doc_show_req(
+ #httpd{
+ path_parts = [_, _, _, _, ShowName, DocId | Rest]
+ } = Req,
+ Db,
+ DDoc
+) ->
+ DocParts = [DocId | Rest],
+ DocId1 = ?l2b(string:join([?b2l(P) || P <- DocParts], "/")),
% open the doc
Options = [conflicts, {user_ctx, Req#httpd.user_ctx}],
@@ -57,13 +62,15 @@ handle_doc_show_req(#httpd{
% we don't handle revs here b/c they are an internal api
% pass 404 docs to the show function
handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId1);
-
-handle_doc_show_req(#httpd{
- path_parts=[_, _, _, _, ShowName]
- }=Req, Db, DDoc) ->
+handle_doc_show_req(
+ #httpd{
+ path_parts = [_, _, _, _, ShowName]
+ } = Req,
+ Db,
+ DDoc
+) ->
% with no docid the doc is nil
handle_doc_show(Req, Db, DDoc, ShowName, nil);
-
handle_doc_show_req(Req, _Db, _DDoc) ->
chttpd:send_error(Req, 404, <<"show_error">>, <<"Invalid path.">>).
@@ -79,21 +86,25 @@ handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId) ->
JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
JsonDoc = couch_query_servers:json_doc(Doc),
[<<"resp">>, ExternalResp] =
- couch_query_servers:ddoc_prompt(DDoc, [<<"shows">>, ShowName],
- [JsonDoc, JsonReq]),
+ couch_query_servers:ddoc_prompt(
+ DDoc,
+ [<<"shows">>, ShowName],
+ [JsonDoc, JsonReq]
+ ),
JsonResp = apply_etag(ExternalResp, CurrentEtag),
chttpd_external:send_external_response(Req, JsonResp)
end).
-
-show_etag(#httpd{user_ctx=UserCtx}=Req, Doc, DDoc, More) ->
+show_etag(#httpd{user_ctx = UserCtx} = Req, Doc, DDoc, More) ->
Accept = chttpd:header_value(Req, "Accept"),
- DocPart = case Doc of
- nil -> nil;
- Doc -> chttpd:doc_etag(Doc)
- end,
- couch_httpd:make_etag({couch_httpd:doc_etag(DDoc), DocPart, Accept,
- UserCtx#user_ctx.roles, More}).
+ DocPart =
+ case Doc of
+ nil -> nil;
+ Doc -> chttpd:doc_etag(Doc)
+ end,
+ couch_httpd:make_etag({
+ couch_httpd:doc_etag(DDoc), DocPart, Accept, UserCtx#user_ctx.roles, More
+ }).
% /db/_design/foo/update/bar/docid
% updates a doc based on a request
@@ -101,19 +112,25 @@ show_etag(#httpd{user_ctx=UserCtx}=Req, Doc, DDoc, More) ->
% % anything but GET
% send_method_not_allowed(Req, "POST,PUT,DELETE,ETC");
-handle_doc_update_req(#httpd{
- path_parts=[_, _, _, _, UpdateName]
- }=Req, Db, DDoc) ->
+handle_doc_update_req(
+ #httpd{
+ path_parts = [_, _, _, _, UpdateName]
+ } = Req,
+ Db,
+ DDoc
+) ->
send_doc_update_response(Req, Db, DDoc, UpdateName, nil, null);
-
-handle_doc_update_req(#httpd{
- path_parts=[_, _, _, _, UpdateName | DocIdParts]
- }=Req, Db, DDoc) ->
+handle_doc_update_req(
+ #httpd{
+ path_parts = [_, _, _, _, UpdateName | DocIdParts]
+ } = Req,
+ Db,
+ DDoc
+) ->
DocId = ?l2b(string:join([?b2l(P) || P <- DocIdParts], "/")),
Options = [conflicts, {user_ctx, Req#httpd.user_ctx}],
Doc = maybe_open_doc(Db, DocId, Options),
send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId);
-
handle_doc_update_req(Req, _Db, _DDoc) ->
chttpd:send_error(Req, 404, <<"update_error">>, <<"Invalid path.">>).
@@ -125,74 +142,107 @@ send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
Cmd = [<<"updates">>, UpdateName],
W = chttpd:qs_value(Req, "w", integer_to_list(mem3:quorum(Db))),
UpdateResp = couch_query_servers:ddoc_prompt(DDoc, Cmd, [JsonDoc, JsonReq]),
- JsonResp = case UpdateResp of
- [<<"up">>, {NewJsonDoc}, {JsonResp0}] ->
- case chttpd:header_value(Req, "X-Couch-Full-Commit", "false") of
- "true" ->
- Options = [full_commit, {user_ctx, Req#httpd.user_ctx}, {w, W}];
- _ ->
- Options = [{user_ctx, Req#httpd.user_ctx}, {w, W}]
- end,
- NewDoc = couch_db:doc_from_json_obj_validate(Db, {NewJsonDoc}),
- couch_doc:validate_docid(NewDoc#doc.id),
- {UpdateResult, NewRev} = fabric:update_doc(Db, NewDoc, Options),
- chttpd_stats:incr_writes(),
- NewRevStr = couch_doc:rev_to_str(NewRev),
- case {UpdateResult, NewRev} of
- {ok, _} ->
- Code = 201;
- {accepted, _} ->
- Code = 202
- end,
- {JsonResp1} = apply_headers(JsonResp0, [
- {<<"X-Couch-Update-NewRev">>, NewRevStr},
- {<<"X-Couch-Id">>, couch_util:url_encode(NewDoc#doc.id)}
- ]),
- {[{<<"code">>, Code} | JsonResp1]};
- [<<"up">>, _Other, {JsonResp0}] ->
- {[{<<"code">>, 200} | JsonResp0]}
- end,
+ JsonResp =
+ case UpdateResp of
+ [<<"up">>, {NewJsonDoc}, {JsonResp0}] ->
+ case chttpd:header_value(Req, "X-Couch-Full-Commit", "false") of
+ "true" ->
+ Options = [full_commit, {user_ctx, Req#httpd.user_ctx}, {w, W}];
+ _ ->
+ Options = [{user_ctx, Req#httpd.user_ctx}, {w, W}]
+ end,
+ NewDoc = couch_db:doc_from_json_obj_validate(Db, {NewJsonDoc}),
+ couch_doc:validate_docid(NewDoc#doc.id),
+ {UpdateResult, NewRev} = fabric:update_doc(Db, NewDoc, Options),
+ chttpd_stats:incr_writes(),
+ NewRevStr = couch_doc:rev_to_str(NewRev),
+ case {UpdateResult, NewRev} of
+ {ok, _} ->
+ Code = 201;
+ {accepted, _} ->
+ Code = 202
+ end,
+ {JsonResp1} = apply_headers(JsonResp0, [
+ {<<"X-Couch-Update-NewRev">>, NewRevStr},
+ {<<"X-Couch-Id">>, couch_util:url_encode(NewDoc#doc.id)}
+ ]),
+ {[{<<"code">>, Code} | JsonResp1]};
+ [<<"up">>, _Other, {JsonResp0}] ->
+ {[{<<"code">>, 200} | JsonResp0]}
+ end,
% todo set location field
chttpd_external:send_external_response(Req, JsonResp).
-
% view-list request with view and list from same design doc.
-handle_view_list_req(#httpd{method=Method,
- path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc)
- when Method =:= 'GET' orelse Method =:= 'OPTIONS' ->
+handle_view_list_req(
+ #httpd{
+ method = Method,
+ path_parts = [_, _, DesignName, _, ListName, ViewName]
+ } = Req,
+ Db,
+ DDoc
+) when
+ Method =:= 'GET' orelse Method =:= 'OPTIONS'
+->
Keys = chttpd:qs_json_value(Req, "keys", undefined),
handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, Keys);
-
% view-list request with view and list from different design docs.
-handle_view_list_req(#httpd{method=Method,
- path_parts=[_, _, _, _, ListName, DesignName, ViewName]}=Req, Db, DDoc)
- when Method =:= 'GET' orelse Method =:= 'OPTIONS' ->
+handle_view_list_req(
+ #httpd{
+ method = Method,
+ path_parts = [_, _, _, _, ListName, DesignName, ViewName]
+ } = Req,
+ Db,
+ DDoc
+) when
+ Method =:= 'GET' orelse Method =:= 'OPTIONS'
+->
Keys = chttpd:qs_json_value(Req, "keys", undefined),
handle_view_list(Req, Db, DDoc, ListName, {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method=Method}=Req, _Db, _DDoc)
- when Method =:= 'GET' orelse Method =:= 'OPTIONS' ->
+handle_view_list_req(#httpd{method = Method} = Req, _Db, _DDoc) when
+ Method =:= 'GET' orelse Method =:= 'OPTIONS'
+->
chttpd:send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
-
-handle_view_list_req(#httpd{method='POST',
- path_parts=[_, _, DesignName, _, ListName, ViewName]}=Req, Db, DDoc) ->
+handle_view_list_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [_, _, DesignName, _, ListName, ViewName]
+ } = Req,
+ Db,
+ DDoc
+) ->
chttpd:validate_ctype(Req, "application/json"),
{Props} = chttpd:json_body(Req),
Keys = proplists:get_value(<<"keys">>, Props, undefined),
- handle_view_list(Req#httpd{req_body={Props}}, Db, DDoc, ListName,
- {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method='POST',
- path_parts=[_, _, _, _, ListName, DesignName, ViewName]}=Req, Db, DDoc) ->
+ handle_view_list(
+ Req#httpd{req_body = {Props}},
+ Db,
+ DDoc,
+ ListName,
+ {DesignName, ViewName},
+ Keys
+ );
+handle_view_list_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [_, _, _, _, ListName, DesignName, ViewName]
+ } = Req,
+ Db,
+ DDoc
+) ->
chttpd:validate_ctype(Req, "application/json"),
{Props} = chttpd:json_body(Req),
Keys = proplists:get_value(<<"keys">>, Props, undefined),
- handle_view_list(Req#httpd{req_body={Props}}, Db, DDoc, ListName,
- {DesignName, ViewName}, Keys);
-
-handle_view_list_req(#httpd{method='POST'}=Req, _Db, _DDoc) ->
+ handle_view_list(
+ Req#httpd{req_body = {Props}},
+ Db,
+ DDoc,
+ ListName,
+ {DesignName, ViewName},
+ Keys
+ );
+handle_view_list_req(#httpd{method = 'POST'} = Req, _Db, _DDoc) ->
chttpd:send_error(Req, 404, <<"list_error">>, <<"Invalid path.">>);
-
handle_view_list_req(Req, _Db, _DDoc) ->
chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
@@ -215,12 +265,18 @@ handle_view_list(Req, Db, DDoc, LName, {ViewDesignName, ViewName}, Keys) ->
<<"_all_docs">> ->
fabric:all_docs(Db, Options, CB, Acc, QueryArgs);
_ ->
- fabric:query_view(Db, Options, VDoc, ViewName,
- CB, Acc, QueryArgs)
+ fabric:query_view(
+ Db,
+ Options,
+ VDoc,
+ ViewName,
+ CB,
+ Acc,
+ QueryArgs
+ )
end
end).
-
list_cb({row, Row} = Msg, Acc) ->
case lists:keymember(doc, 1, Row) of
true -> chttpd_stats:incr_reads();
@@ -228,11 +284,9 @@ list_cb({row, Row} = Msg, Acc) ->
end,
chttpd_stats:incr_rows(),
couch_mrview_show:list_cb(Msg, Acc);
-
list_cb(Msg, Acc) ->
couch_mrview_show:list_cb(Msg, Acc).
-
% Maybe this is in the proplists API
% todo move to couch_util
json_apply_field(H, {L}) ->
@@ -245,7 +299,7 @@ json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
json_apply_field({Key, NewValue}, [], Acc) ->
% end of list, add ours
- {[{Key, NewValue}|Acc]}.
+ {[{Key, NewValue} | Acc]}.
apply_etag(JsonResp, undefined) ->
JsonResp;
@@ -264,7 +318,7 @@ apply_headers(JsonResp, []) ->
apply_headers(JsonResp, NewHeaders) ->
case couch_util:get_value(<<"headers">>, JsonResp) of
undefined ->
- {[{<<"headers">>, {NewHeaders}}| JsonResp]};
+ {[{<<"headers">>, {NewHeaders}} | JsonResp]};
JsonHeaders ->
Headers = apply_headers1(JsonHeaders, NewHeaders),
NewKV = {<<"headers">>, Headers},
diff --git a/src/chttpd/src/chttpd_stats.erl b/src/chttpd/src/chttpd_stats.erl
index b76c5618b..f6eb01659 100644
--- a/src/chttpd/src/chttpd_stats.erl
+++ b/src/chttpd/src/chttpd_stats.erl
@@ -1,4 +1,3 @@
-
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
@@ -30,21 +29,17 @@
incr_rows/1
]).
-
-record(st, {
reads = 0,
writes = 0,
rows = 0
}).
-
-define(KEY, chttpd_stats).
-
init() ->
put(?KEY, #st{}).
-
report(HttpReq, HttpResp) ->
try
case get(?KEY) of
@@ -58,7 +53,6 @@ report(HttpReq, HttpResp) ->
couch_log:error(Fmt, [T, R, S])
end.
-
report(HttpReq, HttpResp, St) ->
case config:get("chttpd", "stats_reporter") of
undefined ->
@@ -73,31 +67,24 @@ report(HttpReq, HttpResp, St) ->
Mod:report(HttpReq, HttpResp, Reads, Writes, Rows)
end.
-
incr_reads() ->
incr(#st.reads, 1).
-
incr_reads(N) when is_integer(N), N >= 0 ->
incr(#st.reads, N).
-
incr_writes() ->
incr(#st.writes, 1).
-
incr_writes(N) when is_integer(N), N >= 0 ->
incr(#st.writes, N).
-
incr_rows() ->
incr(#st.rows, 1).
-
incr_rows(N) when is_integer(N), N >= 0 ->
incr(#st.rows, N).
-
incr(Idx, Count) ->
case get(?KEY) of
#st{} = St ->
diff --git a/src/chttpd/src/chttpd_sup.erl b/src/chttpd/src/chttpd_sup.erl
index 78fc66979..ea4e62f80 100644
--- a/src/chttpd/src/chttpd_sup.erl
+++ b/src/chttpd/src/chttpd_sup.erl
@@ -51,13 +51,11 @@ init([]) ->
},
?CHILD(chttpd, worker),
?CHILD(chttpd_auth_cache, worker),
- {chttpd_auth_cache_lru,
- {ets_lru, start_link, [chttpd_auth_cache_lru, lru_opts()]},
- permanent, 5000, worker, [ets_lru]}
+ {chttpd_auth_cache_lru, {ets_lru, start_link, [chttpd_auth_cache_lru, lru_opts()]},
+ permanent, 5000, worker, [ets_lru]}
],
- {ok, {{one_for_one, 3, 10},
- couch_epi:register_service(chttpd_epi, Children)}}.
+ {ok, {{one_for_one, 3, 10}, couch_epi:register_service(chttpd_epi, Children)}}.
handle_config_change("chttpd", "bind_address", Value, _, Settings) ->
maybe_replace(bind_address, Value, Settings);
@@ -78,17 +76,21 @@ settings() ->
{bind_address, config:get("chttpd", "bind_address")},
{port, config:get("chttpd", "port")},
{backlog, config:get_integer("chttpd", "backlog", ?DEFAULT_BACKLOG)},
- {server_options, config:get("chttpd",
- "server_options", ?DEFAULT_SERVER_OPTIONS)}
+ {server_options,
+ config:get(
+ "chttpd",
+ "server_options",
+ ?DEFAULT_SERVER_OPTIONS
+ )}
].
maybe_replace(Key, Value, Settings) ->
case couch_util:get_value(Key, Settings) of
- Value ->
- {ok, Settings};
- _ ->
- chttpd:stop(),
- {ok, lists:keyreplace(Key, 1, Settings, {Key, Value})}
+ Value ->
+ {ok, Settings};
+ _ ->
+ chttpd:stop(),
+ {ok, lists:keyreplace(Key, 1, Settings, {Key, Value})}
end.
lru_opts() ->
@@ -105,7 +107,9 @@ append_if_set({_Key, 0}, Opts) ->
append_if_set({Key, Value}, Opts) ->
couch_log:error(
"The value for `~s` should be string convertable "
- "to integer which is >= 0 (got `~p`)", [Key, Value]),
+ "to integer which is >= 0 (got `~p`)",
+ [Key, Value]
+ ),
Opts.
notify_started() ->
@@ -115,9 +119,12 @@ notify_error(Error) ->
couch_log:error("Error starting Apache CouchDB:~n~n ~p~n~n", [Error]).
notify_uris() ->
- lists:foreach(fun(Uri) ->
- couch_log:info("Apache CouchDB has started on ~s", [Uri])
- end, get_uris()).
+ lists:foreach(
+ fun(Uri) ->
+ couch_log:info("Apache CouchDB has started on ~s", [Uri])
+ end,
+ get_uris()
+ ).
write_uris() ->
case config:get("couchdb", "uri_file", undefined) of
@@ -130,12 +137,15 @@ write_uris() ->
get_uris() ->
Ip = config:get("chttpd", "bind_address"),
- lists:flatmap(fun(Uri) ->
- case get_uri(Uri, Ip) of
- undefined -> [];
- Else -> [Else]
- end
- end, [chttpd, couch_httpd, https]).
+ lists:flatmap(
+ fun(Uri) ->
+ case get_uri(Uri, Ip) of
+ undefined -> [];
+ Else -> [Else]
+ end
+ end,
+ [chttpd, couch_httpd, https]
+ ).
get_uri(Name, Ip) ->
case get_port(Name) of
diff --git a/src/chttpd/src/chttpd_test_util.erl b/src/chttpd/src/chttpd_test_util.erl
index a1a08eff4..8a849acda 100644
--- a/src/chttpd/src/chttpd_test_util.erl
+++ b/src/chttpd/src/chttpd_test_util.erl
@@ -16,7 +16,6 @@
-include_lib("couch/include/couch_eunit.hrl").
-
start_couch() ->
start_couch(?CONFIG_CHAIN).
diff --git a/src/chttpd/src/chttpd_util.erl b/src/chttpd/src/chttpd_util.erl
index 6c68568fe..ca4ffc8a7 100644
--- a/src/chttpd/src/chttpd_util.erl
+++ b/src/chttpd/src/chttpd_util.erl
@@ -12,7 +12,6 @@
-module(chttpd_util).
-
-export([
get_chttpd_config/1,
get_chttpd_config/2,
@@ -25,43 +24,49 @@
maybe_add_csp_header/3
]).
-
get_chttpd_config(Key) ->
config:get("chttpd", Key, config:get("httpd", Key)).
-
get_chttpd_config(Key, Default) ->
config:get("chttpd", Key, config:get("httpd", Key, Default)).
-
get_chttpd_config_integer(Key, Default) ->
- config:get_integer("chttpd", Key,
- config:get_integer("httpd", Key, Default)).
-
+ config:get_integer(
+ "chttpd",
+ Key,
+ config:get_integer("httpd", Key, Default)
+ ).
get_chttpd_config_boolean(Key, Default) ->
- config:get_boolean("chttpd", Key,
- config:get_boolean("httpd", Key, Default)).
-
+ config:get_boolean(
+ "chttpd",
+ Key,
+ config:get_boolean("httpd", Key, Default)
+ ).
get_chttpd_auth_config(Key) ->
config:get("chttpd_auth", Key, config:get("couch_httpd_auth", Key)).
-
get_chttpd_auth_config(Key, Default) ->
- config:get("chttpd_auth", Key,
- config:get("couch_httpd_auth", Key, Default)).
-
+ config:get(
+ "chttpd_auth",
+ Key,
+ config:get("couch_httpd_auth", Key, Default)
+ ).
get_chttpd_auth_config_integer(Key, Default) ->
- config:get_integer("chttpd_auth", Key,
- config:get_integer("couch_httpd_auth", Key, Default)).
-
+ config:get_integer(
+ "chttpd_auth",
+ Key,
+ config:get_integer("couch_httpd_auth", Key, Default)
+ ).
get_chttpd_auth_config_boolean(Key, Default) ->
- config:get_boolean("chttpd_auth", Key,
- config:get_boolean("couch_httpd_auth", Key, Default)).
-
+ config:get_boolean(
+ "chttpd_auth",
+ Key,
+ config:get_boolean("couch_httpd_auth", Key, Default)
+ ).
maybe_add_csp_header(Component, OriginalHeaders, DefaultHeaderValue) ->
Enabled = config:get_boolean("csp", Component ++ "_enable", true),
diff --git a/src/chttpd/src/chttpd_view.erl b/src/chttpd/src/chttpd_view.erl
index f73a8b7b1..1d721d189 100644
--- a/src/chttpd/src/chttpd_view.erl
+++ b/src/chttpd/src/chttpd_view.erl
@@ -18,24 +18,42 @@
multi_query_view(Req, Db, DDoc, ViewName, Queries) ->
Args0 = couch_mrview_http:parse_params(Req, undefined),
- {ok, #mrst{views=Views}} = couch_mrview_util:ddoc_to_mrst(Db, DDoc),
+ {ok, #mrst{views = Views}} = couch_mrview_util:ddoc_to_mrst(Db, DDoc),
Args1 = couch_mrview_util:set_view_type(Args0, ViewName, Views),
- ArgQueries = lists:map(fun({Query}) ->
- QueryArg = couch_mrview_http:parse_params(Query, undefined,
- Args1, [decoded]),
- QueryArg1 = couch_mrview_util:set_view_type(QueryArg, ViewName, Views),
- fabric_util:validate_args(Db, DDoc, QueryArg1)
- end, Queries),
+ ArgQueries = lists:map(
+ fun({Query}) ->
+ QueryArg = couch_mrview_http:parse_params(
+ Query,
+ undefined,
+ Args1,
+ [decoded]
+ ),
+ QueryArg1 = couch_mrview_util:set_view_type(QueryArg, ViewName, Views),
+ fabric_util:validate_args(Db, DDoc, QueryArg1)
+ end,
+ Queries
+ ),
Options = [{user_ctx, Req#httpd.user_ctx}],
- VAcc0 = #vacc{db=Db, req=Req, prepend="\r\n"},
+ VAcc0 = #vacc{db = Db, req = Req, prepend = "\r\n"},
FirstChunk = "{\"results\":[",
{ok, Resp0} = chttpd:start_delayed_json_response(VAcc0#vacc.req, 200, [], FirstChunk),
- VAcc1 = VAcc0#vacc{resp=Resp0},
- VAcc2 = lists:foldl(fun(Args, Acc0) ->
- {ok, Acc1} = fabric:query_view(Db, Options, DDoc, ViewName,
- fun view_cb/2, Acc0, Args),
- Acc1
- end, VAcc1, ArgQueries),
+ VAcc1 = VAcc0#vacc{resp = Resp0},
+ VAcc2 = lists:foldl(
+ fun(Args, Acc0) ->
+ {ok, Acc1} = fabric:query_view(
+ Db,
+ Options,
+ DDoc,
+ ViewName,
+ fun view_cb/2,
+ Acc0,
+ Args
+ ),
+ Acc1
+ end,
+ VAcc1,
+ ArgQueries
+ ),
{ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"),
chttpd:end_delayed_json_response(Resp1).
@@ -49,13 +67,19 @@ design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
fabric_query_view(Db, Req, DDoc, ViewName, Args) ->
Max = chttpd:chunked_response_buffer_size(),
- VAcc = #vacc{db=Db, req=Req, threshold=Max},
+ VAcc = #vacc{db = Db, req = Req, threshold = Max},
Options = [{user_ctx, Req#httpd.user_ctx}],
- {ok, Resp} = fabric:query_view(Db, Options, DDoc, ViewName,
- fun view_cb/2, VAcc, Args),
+ {ok, Resp} = fabric:query_view(
+ Db,
+ Options,
+ DDoc,
+ ViewName,
+ fun view_cb/2,
+ VAcc,
+ Args
+ ),
{ok, Resp#vacc.resp}.
-
view_cb({row, Row} = Msg, Acc) ->
case lists:keymember(doc, 1, Row) of
true -> chttpd_stats:incr_reads();
@@ -63,42 +87,56 @@ view_cb({row, Row} = Msg, Acc) ->
end,
chttpd_stats:incr_rows(),
couch_mrview_http:view_cb(Msg, Acc);
-
view_cb(Msg, Acc) ->
couch_mrview_http:view_cb(Msg, Acc).
-
-handle_view_req(#httpd{method='POST',
- path_parts=[_, _, _, _, ViewName, <<"queries">>]}=Req, Db, DDoc) ->
+handle_view_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [_, _, _, _, ViewName, <<"queries">>]
+ } = Req,
+ Db,
+ DDoc
+) ->
chttpd:validate_ctype(Req, "application/json"),
Props = couch_httpd:json_body_obj(Req),
case couch_mrview_util:get_view_queries(Props) of
undefined ->
- throw({bad_request,
- <<"POST body must include `queries` parameter.">>});
+ throw({bad_request, <<"POST body must include `queries` parameter.">>});
Queries ->
multi_query_view(Req, Db, DDoc, ViewName, Queries)
end;
-
-handle_view_req(#httpd{path_parts=[_, _, _, _, _, <<"queries">>]}=Req,
- _Db, _DDoc) ->
+handle_view_req(
+ #httpd{path_parts = [_, _, _, _, _, <<"queries">>]} = Req,
+ _Db,
+ _DDoc
+) ->
chttpd:send_method_not_allowed(Req, "POST");
-
-handle_view_req(#httpd{method='GET',
- path_parts=[_, _, _, _, ViewName]}=Req, Db, DDoc) ->
+handle_view_req(
+ #httpd{
+ method = 'GET',
+ path_parts = [_, _, _, _, ViewName]
+ } = Req,
+ Db,
+ DDoc
+) ->
couch_stats:increment_counter([couchdb, httpd, view_reads]),
Keys = chttpd:qs_json_value(Req, "keys", undefined),
design_doc_view(Req, Db, DDoc, ViewName, Keys);
-
-handle_view_req(#httpd{method='POST',
- path_parts=[_, _, _, _, ViewName]}=Req, Db, DDoc) ->
+handle_view_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [_, _, _, _, ViewName]
+ } = Req,
+ Db,
+ DDoc
+) ->
chttpd:validate_ctype(Req, "application/json"),
Props = couch_httpd:json_body_obj(Req),
assert_no_queries_param(couch_mrview_util:get_view_queries(Props)),
Keys = couch_mrview_util:get_view_keys(Props),
couch_stats:increment_counter([couchdb, httpd, view_reads]),
design_doc_post_view(Req, Props, Db, DDoc, ViewName, Keys);
-
handle_view_req(Req, _Db, _DDoc) ->
chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
@@ -115,12 +153,10 @@ assert_no_queries_param(_) ->
"The `queries` parameter is no longer supported at this endpoint"
}).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
check_multi_query_reduce_view_overrides_test_() ->
{
setup,
@@ -137,7 +173,6 @@ check_multi_query_reduce_view_overrides_test_() ->
}
}.
-
t_check_include_docs_throw_validation_error() ->
?_test(begin
Req = #httpd{qs = []},
@@ -147,7 +182,6 @@ t_check_include_docs_throw_validation_error() ->
?assertThrow(Throw, multi_query_view(Req, Db, ddoc, <<"v">>, [Query]))
end).
-
t_check_user_can_override_individual_query_type() ->
?_test(begin
Req = #httpd{qs = []},
@@ -157,7 +191,6 @@ t_check_user_can_override_individual_query_type() ->
?assertEqual(1, meck:num_calls(chttpd, start_delayed_json_response, '_'))
end).
-
setup_all() ->
Views = [#mrview{reduce_funs = [{<<"v">>, <<"_count">>}]}],
meck:expect(couch_mrview_util, ddoc_to_mrst, 2, {ok, #mrst{views = Views}}),
@@ -166,11 +199,9 @@ setup_all() ->
meck:expect(chttpd, send_delayed_chunk, 2, {ok, resp}),
meck:expect(chttpd, end_delayed_json_response, 1, ok).
-
teardown_all(_) ->
meck:unload().
-
setup() ->
meck:reset([
chttpd,
@@ -178,9 +209,7 @@ setup() ->
fabric
]).
-
teardown(_) ->
ok.
-
-endif.
diff --git a/src/chttpd/src/chttpd_xframe_options.erl b/src/chttpd/src/chttpd_xframe_options.erl
index 2a43617fa..15865057b 100644
--- a/src/chttpd/src/chttpd_xframe_options.erl
+++ b/src/chttpd/src/chttpd_xframe_options.erl
@@ -21,7 +21,6 @@
-define(SAMEORIGIN, "SAMEORIGIN").
-define(ALLOWFROM, "ALLOW-FROM ").
-
-include_lib("couch/include/couch_db.hrl").
% X-Frame-Options protects against clickjacking by limiting whether a response can be used in a
@@ -30,33 +29,28 @@
header(Req, Headers) ->
header(Req, Headers, get_xframe_config(Req)).
-
-
header(Req, Headers, Config) ->
case lists:keyfind(enabled, 1, Config) of
- {enabled, true} ->
+ {enabled, true} ->
generate_xframe_header(Req, Headers, Config);
- _ ->
+ _ ->
Headers
end.
-
-
generate_xframe_header(Req, Headers, Config) ->
- XframeOption = case lists:keyfind(same_origin, 1, Config) of
- {same_origin, true} ->
- ?SAMEORIGIN;
- _ ->
- check_host(Req, Config)
- end,
- [{"X-Frame-Options", XframeOption } | Headers].
-
-
+ XframeOption =
+ case lists:keyfind(same_origin, 1, Config) of
+ {same_origin, true} ->
+ ?SAMEORIGIN;
+ _ ->
+ check_host(Req, Config)
+ end,
+ [{"X-Frame-Options", XframeOption} | Headers].
check_host(#httpd{mochi_req = MochiReq} = Req, Config) ->
Host = couch_httpd_vhost:host(MochiReq),
case Host of
- [] ->
+ [] ->
?DENY;
Host ->
FullHost = chttpd:absolute_uri(Req, ""),
@@ -66,18 +60,18 @@ check_host(#httpd{mochi_req = MochiReq} = Req, Config) ->
true -> ?ALLOWFROM ++ FullHost;
false -> ?DENY
end
- end.
-
-
+ end.
get_xframe_config(#httpd{xframe_config = undefined}) ->
EnableXFrame = chttpd_util:get_chttpd_config_boolean(
- "enable_xframe_options", false),
+ "enable_xframe_options", false
+ ),
SameOrigin = config:get("x_frame_options", "same_origin", "false") =:= "true",
- AcceptedHosts = case config:get("x_frame_options", "hosts") of
- undefined -> [];
- Hosts -> split_list(Hosts)
- end,
+ AcceptedHosts =
+ case config:get("x_frame_options", "hosts") of
+ undefined -> [];
+ Hosts -> split_list(Hosts)
+ end,
[
{enabled, EnableXFrame},
{same_origin, SameOrigin},
@@ -86,15 +80,11 @@ get_xframe_config(#httpd{xframe_config = undefined}) ->
get_xframe_config(#httpd{xframe_config = Config}) ->
Config.
-
-
get_accepted_hosts(Config) ->
case lists:keyfind(hosts, 1, Config) of
false -> [];
{hosts, AcceptedHosts} -> AcceptedHosts
end.
-
-
split_list(S) ->
re:split(S, "\\s*,\\s*", [trim, {return, list}]).
diff --git a/src/chttpd/test/eunit/chttpd_auth_tests.erl b/src/chttpd/test/eunit/chttpd_auth_tests.erl
index b4a8eabfb..7beda9bc7 100644
--- a/src/chttpd/test/eunit/chttpd_auth_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_auth_tests.erl
@@ -15,7 +15,6 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
setup() ->
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
@@ -25,7 +24,6 @@ setup() ->
teardown(_Url) ->
ok.
-
require_valid_user_exception_test_() ->
{
"_up",
@@ -35,7 +33,8 @@ require_valid_user_exception_test_() ->
fun chttpd_test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_handle_require_valid_user_except_up_on_up_route/1,
fun should_handle_require_valid_user_except_up_on_non_up_routes/1
@@ -45,85 +44,84 @@ require_valid_user_exception_test_() ->
}.
set_require_user_false() ->
- ok = config:set("chttpd", "require_valid_user", "false", _Persist=false).
+ ok = config:set("chttpd", "require_valid_user", "false", _Persist = false).
set_require_user_true() ->
- ok = config:set("chttpd", "require_valid_user", "true", _Persist=false).
+ ok = config:set("chttpd", "require_valid_user", "true", _Persist = false).
set_require_user_except_for_up_false() ->
- ok = config:set("chttpd", "require_valid_user_except_for_up", "false", _Persist=false).
+ ok = config:set("chttpd", "require_valid_user_except_for_up", "false", _Persist = false).
set_require_user_except_for_up_true() ->
- ok = config:set("chttpd", "require_valid_user_except_for_up", "true", _Persist=false).
+ ok = config:set("chttpd", "require_valid_user_except_for_up", "true", _Persist = false).
should_handle_require_valid_user_except_up_on_up_route(_Url) ->
- ?_test(begin
- % require_valid_user | require_valid_user_except_up | up needs auth
- % 1 F | F | F
- % 2 F | T | F
- % 3 T | F | T
- % 4 T | T | F
-
- UpRequest = #httpd{path_parts=[<<"_up">>]},
- % we use ?ADMIN_USER here because these tests run under admin party
- % so this is equivalent to an unauthenticated request
- ExpectAuth = {unauthorized, <<"Authentication required.">>},
- ExpectNoAuth = #httpd{user_ctx=?ADMIN_USER,path_parts=[<<"_up">>]},
-
- % 1
- set_require_user_false(),
- set_require_user_except_for_up_false(),
- Result1 = chttpd_auth:party_mode_handler(UpRequest),
- ?assertEqual(ExpectNoAuth, Result1),
-
- % 2
- set_require_user_false(),
- set_require_user_except_for_up_true(),
- Result2 = chttpd_auth:party_mode_handler(UpRequest),
- ?assertEqual(ExpectNoAuth, Result2),
-
- % 3
- set_require_user_true(),
- set_require_user_except_for_up_false(),
- ?assertThrow(ExpectAuth, chttpd_auth:party_mode_handler(UpRequest)),
-
- % 4
- set_require_user_true(),
- set_require_user_except_for_up_true(),
- Result4 = chttpd_auth:party_mode_handler(UpRequest),
- ?assertEqual(ExpectNoAuth, Result4)
-
- end).
+ ?_test(begin
+ % require_valid_user | require_valid_user_except_up | up needs auth
+ % 1 F | F | F
+ % 2 F | T | F
+ % 3 T | F | T
+ % 4 T | T | F
+
+ UpRequest = #httpd{path_parts = [<<"_up">>]},
+ % we use ?ADMIN_USER here because these tests run under admin party
+ % so this is equivalent to an unauthenticated request
+ ExpectAuth = {unauthorized, <<"Authentication required.">>},
+ ExpectNoAuth = #httpd{user_ctx = ?ADMIN_USER, path_parts = [<<"_up">>]},
+
+ % 1
+ set_require_user_false(),
+ set_require_user_except_for_up_false(),
+ Result1 = chttpd_auth:party_mode_handler(UpRequest),
+ ?assertEqual(ExpectNoAuth, Result1),
+
+ % 2
+ set_require_user_false(),
+ set_require_user_except_for_up_true(),
+ Result2 = chttpd_auth:party_mode_handler(UpRequest),
+ ?assertEqual(ExpectNoAuth, Result2),
+
+ % 3
+ set_require_user_true(),
+ set_require_user_except_for_up_false(),
+ ?assertThrow(ExpectAuth, chttpd_auth:party_mode_handler(UpRequest)),
+
+ % 4
+ set_require_user_true(),
+ set_require_user_except_for_up_true(),
+ Result4 = chttpd_auth:party_mode_handler(UpRequest),
+ ?assertEqual(ExpectNoAuth, Result4)
+ end).
should_handle_require_valid_user_except_up_on_non_up_routes(_Url) ->
- ?_test(begin
- % require_valid_user | require_valid_user_except_up | everything not _up requires auth
- % 5 F | F | F
- % 6 F | T | T
- % 7 T | F | T
- % 8 T | T | T
-
- NonUpRequest = #httpd{path_parts=[<<"/">>]},
- ExpectAuth = {unauthorized, <<"Authentication required.">>},
- ExpectNoAuth = #httpd{user_ctx=?ADMIN_USER,path_parts=[<<"/">>]},
- % 5
- set_require_user_false(),
- set_require_user_except_for_up_false(),
- Result5 = chttpd_auth:party_mode_handler(NonUpRequest),
- ?assertEqual(ExpectNoAuth, Result5),
-
- % 6
- set_require_user_false(),
- set_require_user_except_for_up_true(),
- ?assertThrow(ExpectAuth, chttpd_auth:party_mode_handler(NonUpRequest)),
-
- % 7
- set_require_user_true(),
- set_require_user_except_for_up_false(),
- ?assertThrow(ExpectAuth, chttpd_auth:party_mode_handler(NonUpRequest)),
-
- % 8
- set_require_user_true(),
- set_require_user_except_for_up_true(),
- ?assertThrow(ExpectAuth, chttpd_auth:party_mode_handler(NonUpRequest))
- end).
+ ?_test(begin
+ % require_valid_user | require_valid_user_except_up | everything not _up requires auth
+ % 5 F | F | F
+ % 6 F | T | T
+ % 7 T | F | T
+ % 8 T | T | T
+
+ NonUpRequest = #httpd{path_parts = [<<"/">>]},
+ ExpectAuth = {unauthorized, <<"Authentication required.">>},
+ ExpectNoAuth = #httpd{user_ctx = ?ADMIN_USER, path_parts = [<<"/">>]},
+ % 5
+ set_require_user_false(),
+ set_require_user_except_for_up_false(),
+ Result5 = chttpd_auth:party_mode_handler(NonUpRequest),
+ ?assertEqual(ExpectNoAuth, Result5),
+
+ % 6
+ set_require_user_false(),
+ set_require_user_except_for_up_true(),
+ ?assertThrow(ExpectAuth, chttpd_auth:party_mode_handler(NonUpRequest)),
+
+ % 7
+ set_require_user_true(),
+ set_require_user_except_for_up_false(),
+ ?assertThrow(ExpectAuth, chttpd_auth:party_mode_handler(NonUpRequest)),
+
+ % 8
+ set_require_user_true(),
+ set_require_user_except_for_up_true(),
+ ?assertThrow(ExpectAuth, chttpd_auth:party_mode_handler(NonUpRequest))
+ end).
diff --git a/src/chttpd/test/eunit/chttpd_cors_test.erl b/src/chttpd/test/eunit/chttpd_cors_test.erl
index 44644b571..93b080fc6 100644
--- a/src/chttpd/test/eunit/chttpd_cors_test.erl
+++ b/src/chttpd/test/eunit/chttpd_cors_test.erl
@@ -12,17 +12,16 @@
-module(chttpd_cors_test).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("chttpd/include/chttpd_cors.hrl").
-
-define(DEFAULT_ORIGIN, "http://example.com").
-define(DEFAULT_ORIGIN_HTTPS, "https://example.com").
-define(EXPOSED_HEADERS,
"content-type, accept-ranges, etag, server, x-couch-request-id, " ++
- "x-couch-update-newrev, x-couchdb-body-time").
+ "x-couch-update-newrev, x-couchdb-body-time"
+).
-define(CUSTOM_SUPPORTED_METHODS, ?SUPPORTED_METHODS -- ["CONNECT"]).
-define(CUSTOM_SUPPORTED_HEADERS, ["extra" | ?SUPPORTED_HEADERS -- ["pragma"]]).
@@ -32,33 +31,31 @@
%% Test helpers
-
empty_cors_config() ->
[].
-
minimal_cors_config() ->
[
{<<"enable_cors">>, true},
{<<"origins">>, {[]}}
].
-
simple_cors_config() ->
[
{<<"enable_cors">>, true},
- {<<"origins">>, {[
- {list_to_binary(?DEFAULT_ORIGIN), {[]}}
- ]}}
+ {<<"origins">>,
+ {[
+ {list_to_binary(?DEFAULT_ORIGIN), {[]}}
+ ]}}
].
-
wildcard_cors_config() ->
[
{<<"enable_cors">>, true},
- {<<"origins">>, {[
- {<<"*">>, {[]}}
- ]}}
+ {<<"origins">>,
+ {[
+ {<<"*">>, {[]}}
+ ]}}
].
custom_cors_config() ->
@@ -68,55 +65,59 @@ custom_cors_config() ->
{<<"allow_headers">>, ?CUSTOM_SUPPORTED_HEADERS},
{<<"exposed_headers">>, ?CUSTOM_EXPOSED_HEADERS},
{<<"max_age">>, ?CUSTOM_MAX_AGE},
- {<<"origins">>, {[
- {<<"*">>, {[]}}
- ]}}
+ {<<"origins">>,
+ {[
+ {<<"*">>, {[]}}
+ ]}}
].
access_control_cors_config(AllowCredentials) ->
[
{<<"enable_cors">>, true},
{<<"allow_credentials">>, AllowCredentials},
- {<<"origins">>, {[
- {list_to_binary(?DEFAULT_ORIGIN), {[]}}
- ]}}].
-
+ {<<"origins">>,
+ {[
+ {list_to_binary(?DEFAULT_ORIGIN), {[]}}
+ ]}}
+ ].
multiple_cors_config() ->
[
{<<"enable_cors">>, true},
- {<<"origins">>, {[
- {list_to_binary(?DEFAULT_ORIGIN), {[]}},
- {<<"https://example.com">>, {[]}},
- {<<"http://example.com:5984">>, {[]}},
- {<<"https://example.com:5984">>, {[]}}
- ]}}
+ {<<"origins">>,
+ {[
+ {list_to_binary(?DEFAULT_ORIGIN), {[]}},
+ {<<"https://example.com">>, {[]}},
+ {<<"http://example.com:5984">>, {[]}},
+ {<<"https://example.com:5984">>, {[]}}
+ ]}}
].
-
mock_request(Method, Path, Headers0) ->
HeaderKey = "Access-Control-Request-Method",
- Headers = case proplists:get_value(HeaderKey, Headers0, undefined) of
- nil ->
- proplists:delete(HeaderKey, Headers0);
- undefined ->
- case Method of
- 'OPTIONS' ->
- [{HeaderKey, atom_to_list(Method)} | Headers0];
- _ ->
- Headers0
- end;
- _ ->
- Headers0
- end,
+ Headers =
+ case proplists:get_value(HeaderKey, Headers0, undefined) of
+ nil ->
+ proplists:delete(HeaderKey, Headers0);
+ undefined ->
+ case Method of
+ 'OPTIONS' ->
+ [{HeaderKey, atom_to_list(Method)} | Headers0];
+ _ ->
+ Headers0
+ end;
+ _ ->
+ Headers0
+ end,
Headers1 = mochiweb_headers:make(Headers),
MochiReq = mochiweb_request:new(nil, Method, Path, {1, 1}, Headers1),
- PathParts = [list_to_binary(chttpd:unquote(Part))
- || Part <- string:tokens(Path, "/")],
- #httpd{method=Method, mochi_req=MochiReq, path_parts=PathParts}.
-
+ PathParts = [
+ list_to_binary(chttpd:unquote(Part))
+ || Part <- string:tokens(Path, "/")
+ ],
+ #httpd{method = Method, mochi_req = MochiReq, path_parts = PathParts}.
-header(#httpd{}=Req, Key) ->
+header(#httpd{} = Req, Key) ->
chttpd:header_value(Req, Key);
header({mochiweb_response, [_, _, Headers]}, Key) ->
%% header(Headers, Key);
@@ -124,57 +125,40 @@ header({mochiweb_response, [_, _, Headers]}, Key) ->
header(Headers, Key) ->
couch_util:get_value(Key, Headers, undefined).
-
string_headers(H) ->
string:join(H, ", ").
-
assert_not_preflight_(Val) ->
?_assertEqual(not_preflight, Val).
-
%% CORS disabled tests
-
cors_disabled_test_() ->
{"CORS disabled tests", [
{"Empty user",
- {setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
+ {setup, fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
{foreach, fun empty_cors_config/0, [
fun test_no_access_control_method_preflight_request_/1,
fun test_no_headers_/1,
fun test_no_headers_server_/1,
fun test_no_headers_db_/1
- ]}
- }
- }
+ ]}}}
]}.
-
%% CORS enabled tests
-
cors_enabled_minimal_config_test_() ->
{"Minimal CORS enabled, no Origins",
- {setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
+ {setup, fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
{foreach, fun minimal_cors_config/0, [
fun test_no_access_control_method_preflight_request_/1,
fun test_incorrect_origin_simple_request_/1,
fun test_incorrect_origin_preflight_request_/1
- ]}
- }
- }.
-
+ ]}}}.
cors_enabled_simple_config_test_() ->
{"Simple CORS config",
- {setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
+ {setup, fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
{foreach, fun simple_cors_config/0, [
fun test_no_access_control_method_preflight_request_/1,
fun test_preflight_request_/1,
@@ -187,28 +171,19 @@ cors_enabled_simple_config_test_() ->
fun test_preflight_with_scheme_no_origin_/1,
fun test_preflight_with_scheme_port_no_origin_/1,
fun test_case_sensitive_mismatch_of_allowed_origins_/1
- ]}
- }
- }.
+ ]}}}.
cors_enabled_custom_config_test_() ->
{"Simple CORS config with custom allow_methods/allow_headers/exposed_headers",
- {setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
+ {setup, fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
{foreach, fun custom_cors_config/0, [
fun test_good_headers_preflight_request_with_custom_config_/1,
fun test_db_request_with_custom_config_/1
- ]}
- }
- }.
-
+ ]}}}.
cors_enabled_multiple_config_test_() ->
{"Multiple options CORS config",
- {setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
+ {setup, fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
{foreach, fun multiple_cors_config/0, [
fun test_no_access_control_method_preflight_request_/1,
fun test_preflight_request_/1,
@@ -218,14 +193,10 @@ cors_enabled_multiple_config_test_() ->
fun test_preflight_with_port_with_origin_/1,
fun test_preflight_with_scheme_with_origin_/1,
fun test_preflight_with_scheme_port_with_origin_/1
- ]}
- }
- }.
-
+ ]}}}.
%% Access-Control-Allow-Credentials tests
-
%% http://www.w3.org/TR/cors/#supports-credentials
%% 6.1.3
%% If the resource supports credentials add a single
@@ -245,10 +216,7 @@ db_request_credentials_header_off_test_() ->
fun() ->
access_control_cors_config(false)
end,
- fun test_db_request_credentials_header_off_/1
- }
- }.
-
+ fun test_db_request_credentials_header_off_/1}}.
db_request_credentials_header_on_test_() ->
{"Allow credentials enabled",
@@ -256,19 +224,13 @@ db_request_credentials_header_on_test_() ->
fun() ->
access_control_cors_config(true)
end,
- fun test_db_request_credentials_header_on_/1
- }
- }.
-
+ fun test_db_request_credentials_header_on_/1}}.
%% CORS wildcard tests
-
cors_enabled_wildcard_test_() ->
{"Wildcard CORS config",
- {setup,
- fun chttpd_test_util:start_couch/0,
- fun chttpd_test_util:stop_couch/1,
+ {setup, fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
{foreach, fun wildcard_cors_config/0, [
fun test_no_access_control_method_preflight_request_/1,
fun test_preflight_request_/1,
@@ -281,30 +243,23 @@ cors_enabled_wildcard_test_() ->
fun test_preflight_with_scheme_with_origin_/1,
fun test_preflight_with_scheme_port_with_origin_/1,
fun test_case_sensitive_mismatch_of_allowed_origins_/1
- ]}
- }
- }.
-
+ ]}}}.
%% Test generators
-
test_no_headers_(OwnerConfig) ->
Req = mock_request('GET', "/", []),
assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)).
-
test_no_headers_server_(OwnerConfig) ->
Req = mock_request('GET', "/", [{"Origin", "http://127.0.0.1"}]),
assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)).
-
test_no_headers_db_(OwnerConfig) ->
Headers = [{"Origin", "http://127.0.0.1"}],
Req = mock_request('GET', "/my_db", Headers),
assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)).
-
test_incorrect_origin_simple_request_(OwnerConfig) ->
Req = mock_request('GET', "/", [{"Origin", "http://127.0.0.1"}]),
[
@@ -312,7 +267,6 @@ test_incorrect_origin_simple_request_(OwnerConfig) ->
assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig))
].
-
test_incorrect_origin_preflight_request_(OwnerConfig) ->
Headers = [
{"Origin", "http://127.0.0.1"},
@@ -324,7 +278,6 @@ test_incorrect_origin_preflight_request_(OwnerConfig) ->
assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig))
].
-
test_bad_headers_preflight_request_(OwnerConfig) ->
Headers = [
{"Origin", ?DEFAULT_ORIGIN},
@@ -337,7 +290,6 @@ test_bad_headers_preflight_request_(OwnerConfig) ->
assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig))
].
-
test_good_headers_preflight_request_(OwnerConfig) ->
Headers = [
{"Origin", ?DEFAULT_ORIGIN},
@@ -348,12 +300,18 @@ test_good_headers_preflight_request_(OwnerConfig) ->
?assert(chttpd_cors:is_cors_enabled(OwnerConfig)),
{ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig),
[
- ?_assertEqual(?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")),
- ?_assertEqual(string_headers(?SUPPORTED_METHODS),
- header(Headers1, "Access-Control-Allow-Methods")),
- ?_assertEqual(string_headers(["accept-language"]),
- header(Headers1, "Access-Control-Allow-Headers"))
+ ?_assertEqual(
+ ?DEFAULT_ORIGIN,
+ header(Headers1, "Access-Control-Allow-Origin")
+ ),
+ ?_assertEqual(
+ string_headers(?SUPPORTED_METHODS),
+ header(Headers1, "Access-Control-Allow-Methods")
+ ),
+ ?_assertEqual(
+ string_headers(["accept-language"]),
+ header(Headers1, "Access-Control-Allow-Headers")
+ )
].
test_good_headers_preflight_request_with_custom_config_(OwnerConfig) ->
@@ -366,22 +324,31 @@ test_good_headers_preflight_request_with_custom_config_(OwnerConfig) ->
Req = mock_request('OPTIONS', "/", Headers),
?assert(chttpd_cors:is_cors_enabled(OwnerConfig)),
AllowMethods = couch_util:get_value(
- <<"allow_methods">>, OwnerConfig, ?SUPPORTED_METHODS),
+ <<"allow_methods">>, OwnerConfig, ?SUPPORTED_METHODS
+ ),
MaxAge = couch_util:get_value(
- <<"max_age">>, OwnerConfig, ?CORS_DEFAULT_MAX_AGE),
+ <<"max_age">>, OwnerConfig, ?CORS_DEFAULT_MAX_AGE
+ ),
{ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig),
[
- ?_assertEqual(?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")),
- ?_assertEqual(string_headers(AllowMethods),
- header(Headers1, "Access-Control-Allow-Methods")),
- ?_assertEqual(string_headers(["accept-language", "extra"]),
- header(Headers1, "Access-Control-Allow-Headers")),
- ?_assertEqual(MaxAge,
- header(Headers1, "Access-Control-Max-Age"))
+ ?_assertEqual(
+ ?DEFAULT_ORIGIN,
+ header(Headers1, "Access-Control-Allow-Origin")
+ ),
+ ?_assertEqual(
+ string_headers(AllowMethods),
+ header(Headers1, "Access-Control-Allow-Methods")
+ ),
+ ?_assertEqual(
+ string_headers(["accept-language", "extra"]),
+ header(Headers1, "Access-Control-Allow-Headers")
+ ),
+ ?_assertEqual(
+ MaxAge,
+ header(Headers1, "Access-Control-Max-Age")
+ )
].
-
test_preflight_request_(OwnerConfig) ->
Headers = [
{"Origin", ?DEFAULT_ORIGIN},
@@ -390,13 +357,16 @@ test_preflight_request_(OwnerConfig) ->
Req = mock_request('OPTIONS', "/", Headers),
{ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig),
[
- ?_assertEqual(?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")),
- ?_assertEqual(string_headers(?SUPPORTED_METHODS),
- header(Headers1, "Access-Control-Allow-Methods"))
+ ?_assertEqual(
+ ?DEFAULT_ORIGIN,
+ header(Headers1, "Access-Control-Allow-Origin")
+ ),
+ ?_assertEqual(
+ string_headers(?SUPPORTED_METHODS),
+ header(Headers1, "Access-Control-Allow-Methods")
+ )
].
-
test_no_access_control_method_preflight_request_(OwnerConfig) ->
Headers = [
{"Origin", ?DEFAULT_ORIGIN},
@@ -405,7 +375,6 @@ test_no_access_control_method_preflight_request_(OwnerConfig) ->
Req = mock_request('OPTIONS', "/", Headers),
assert_not_preflight_(chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig)).
-
test_preflight_request_no_allow_credentials_(OwnerConfig) ->
Headers = [
{"Origin", ?DEFAULT_ORIGIN},
@@ -414,15 +383,20 @@ test_preflight_request_no_allow_credentials_(OwnerConfig) ->
Req = mock_request('OPTIONS', "/", Headers),
{ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig),
[
- ?_assertEqual(?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")),
- ?_assertEqual(string_headers(?SUPPORTED_METHODS),
- header(Headers1, "Access-Control-Allow-Methods")),
- ?_assertEqual(undefined,
- header(Headers1, "Access-Control-Allow-Credentials"))
+ ?_assertEqual(
+ ?DEFAULT_ORIGIN,
+ header(Headers1, "Access-Control-Allow-Origin")
+ ),
+ ?_assertEqual(
+ string_headers(?SUPPORTED_METHODS),
+ header(Headers1, "Access-Control-Allow-Methods")
+ ),
+ ?_assertEqual(
+ undefined,
+ header(Headers1, "Access-Control-Allow-Credentials")
+ )
].
-
test_preflight_request_empty_request_headers_(OwnerConfig) ->
Headers = [
{"Origin", ?DEFAULT_ORIGIN},
@@ -432,25 +406,34 @@ test_preflight_request_empty_request_headers_(OwnerConfig) ->
Req = mock_request('OPTIONS', "/", Headers),
{ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig),
[
- ?_assertEqual(?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")),
- ?_assertEqual(string_headers(?SUPPORTED_METHODS),
- header(Headers1, "Access-Control-Allow-Methods")),
- ?_assertEqual("",
- header(Headers1, "Access-Control-Allow-Headers"))
+ ?_assertEqual(
+ ?DEFAULT_ORIGIN,
+ header(Headers1, "Access-Control-Allow-Origin")
+ ),
+ ?_assertEqual(
+ string_headers(?SUPPORTED_METHODS),
+ header(Headers1, "Access-Control-Allow-Methods")
+ ),
+ ?_assertEqual(
+ "",
+ header(Headers1, "Access-Control-Allow-Headers")
+ )
].
-
test_db_request_(OwnerConfig) ->
Origin = ?DEFAULT_ORIGIN,
Headers = [{"Origin", Origin}],
Req = mock_request('GET', "/my_db", Headers),
Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig),
[
- ?_assertEqual(?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")),
- ?_assertEqual(?EXPOSED_HEADERS,
- header(Headers1, "Access-Control-Expose-Headers"))
+ ?_assertEqual(
+ ?DEFAULT_ORIGIN,
+ header(Headers1, "Access-Control-Allow-Origin")
+ ),
+ ?_assertEqual(
+ ?EXPOSED_HEADERS,
+ header(Headers1, "Access-Control-Expose-Headers")
+ )
].
test_db_request_with_custom_config_(OwnerConfig) ->
@@ -459,16 +442,21 @@ test_db_request_with_custom_config_(OwnerConfig) ->
Req = mock_request('GET', "/my_db", Headers),
Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig),
ExposedHeaders = couch_util:get_value(
- <<"exposed_headers">>, OwnerConfig, ?COUCH_HEADERS),
+ <<"exposed_headers">>, OwnerConfig, ?COUCH_HEADERS
+ ),
[
- ?_assertEqual(?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")),
- ?_assertEqual(lists:sort(["content-type" | ExposedHeaders]),
+ ?_assertEqual(
+ ?DEFAULT_ORIGIN,
+ header(Headers1, "Access-Control-Allow-Origin")
+ ),
+ ?_assertEqual(
+ lists:sort(["content-type" | ExposedHeaders]),
lists:sort(
- split_list(header(Headers1, "Access-Control-Expose-Headers"))))
+ split_list(header(Headers1, "Access-Control-Expose-Headers"))
+ )
+ )
].
-
test_db_preflight_request_(OwnerConfig) ->
Headers = [
{"Origin", ?DEFAULT_ORIGIN}
@@ -476,13 +464,16 @@ test_db_preflight_request_(OwnerConfig) ->
Req = mock_request('OPTIONS', "/my_db", Headers),
{ok, Headers1} = chttpd_cors:maybe_handle_preflight_request(Req, OwnerConfig),
[
- ?_assertEqual(?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")),
- ?_assertEqual(string_headers(?SUPPORTED_METHODS),
- header(Headers1, "Access-Control-Allow-Methods"))
+ ?_assertEqual(
+ ?DEFAULT_ORIGIN,
+ header(Headers1, "Access-Control-Allow-Origin")
+ ),
+ ?_assertEqual(
+ string_headers(?SUPPORTED_METHODS),
+ header(Headers1, "Access-Control-Allow-Methods")
+ )
].
-
test_db_host_origin_request_(OwnerConfig) ->
Origin = ?DEFAULT_ORIGIN,
Headers = [
@@ -492,13 +483,16 @@ test_db_host_origin_request_(OwnerConfig) ->
Req = mock_request('GET', "/my_db", Headers),
Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig),
[
- ?_assertEqual(?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")),
- ?_assertEqual(?EXPOSED_HEADERS,
- header(Headers1, "Access-Control-Expose-Headers"))
+ ?_assertEqual(
+ ?DEFAULT_ORIGIN,
+ header(Headers1, "Access-Control-Allow-Origin")
+ ),
+ ?_assertEqual(
+ ?EXPOSED_HEADERS,
+ header(Headers1, "Access-Control-Expose-Headers")
+ )
].
-
test_preflight_origin_helper_(OwnerConfig, Origin, ExpectedOrigin) ->
Headers = [
{"Origin", Origin},
@@ -506,76 +500,82 @@ test_preflight_origin_helper_(OwnerConfig, Origin, ExpectedOrigin) ->
],
Req = mock_request('OPTIONS', "/", Headers),
Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig),
- [?_assertEqual(ExpectedOrigin,
- header(Headers1, "Access-Control-Allow-Origin"))
+ [
+ ?_assertEqual(
+ ExpectedOrigin,
+ header(Headers1, "Access-Control-Allow-Origin")
+ )
].
-
test_preflight_with_port_no_origin_(OwnerConfig) ->
Origin = ?DEFAULT_ORIGIN ++ ":5984",
test_preflight_origin_helper_(OwnerConfig, Origin, undefined).
-
test_preflight_with_port_with_origin_(OwnerConfig) ->
Origin = ?DEFAULT_ORIGIN ++ ":5984",
test_preflight_origin_helper_(OwnerConfig, Origin, Origin).
-
test_preflight_with_scheme_no_origin_(OwnerConfig) ->
test_preflight_origin_helper_(OwnerConfig, ?DEFAULT_ORIGIN_HTTPS, undefined).
-
test_preflight_with_scheme_with_origin_(OwnerConfig) ->
Origin = ?DEFAULT_ORIGIN_HTTPS,
test_preflight_origin_helper_(OwnerConfig, Origin, Origin).
-
test_preflight_with_scheme_port_no_origin_(OwnerConfig) ->
Origin = ?DEFAULT_ORIGIN_HTTPS ++ ":5984",
test_preflight_origin_helper_(OwnerConfig, Origin, undefined).
-
test_preflight_with_scheme_port_with_origin_(OwnerConfig) ->
Origin = ?DEFAULT_ORIGIN_HTTPS ++ ":5984",
test_preflight_origin_helper_(OwnerConfig, Origin, Origin).
-
test_case_sensitive_mismatch_of_allowed_origins_(OwnerConfig) ->
Origin = "http://EXAMPLE.COM",
Headers = [{"Origin", Origin}],
Req = mock_request('GET', "/", Headers),
Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig),
[
- ?_assertEqual(?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")),
- ?_assertEqual(?EXPOSED_HEADERS,
- header(Headers1, "Access-Control-Expose-Headers"))
+ ?_assertEqual(
+ ?DEFAULT_ORIGIN,
+ header(Headers1, "Access-Control-Allow-Origin")
+ ),
+ ?_assertEqual(
+ ?EXPOSED_HEADERS,
+ header(Headers1, "Access-Control-Expose-Headers")
+ )
].
-
test_db_request_credentials_header_off_(OwnerConfig) ->
Origin = ?DEFAULT_ORIGIN,
Headers = [{"Origin", Origin}],
Req = mock_request('GET', "/", Headers),
Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig),
[
- ?_assertEqual(?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")),
- ?_assertEqual(undefined,
- header(Headers1, "Access-Control-Allow-Credentials"))
+ ?_assertEqual(
+ ?DEFAULT_ORIGIN,
+ header(Headers1, "Access-Control-Allow-Origin")
+ ),
+ ?_assertEqual(
+ undefined,
+ header(Headers1, "Access-Control-Allow-Credentials")
+ )
].
-
test_db_request_credentials_header_on_(OwnerConfig) ->
Origin = ?DEFAULT_ORIGIN,
Headers = [{"Origin", Origin}],
Req = mock_request('GET', "/", Headers),
Headers1 = chttpd_cors:headers(Req, Headers, Origin, OwnerConfig),
[
- ?_assertEqual(?DEFAULT_ORIGIN,
- header(Headers1, "Access-Control-Allow-Origin")),
- ?_assertEqual("true",
- header(Headers1, "Access-Control-Allow-Credentials"))
+ ?_assertEqual(
+ ?DEFAULT_ORIGIN,
+ header(Headers1, "Access-Control-Allow-Origin")
+ ),
+ ?_assertEqual(
+ "true",
+ header(Headers1, "Access-Control-Allow-Credentials")
+ )
].
split_list(S) ->
diff --git a/src/chttpd/test/eunit/chttpd_csp_tests.erl b/src/chttpd/test/eunit/chttpd_csp_tests.erl
index 88194fdf8..4c77c5ab0 100644
--- a/src/chttpd/test/eunit/chttpd_csp_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_csp_tests.erl
@@ -34,7 +34,9 @@
-define(TDEF(Name), {atom_to_list(Name), fun Name/1}).
-define(TDEF(Name, Timeout), {atom_to_list(Name), Timeout, fun Name/1}).
-define(TDEF_FE(Name), fun(Arg) -> {atom_to_list(Name), ?_test(Name(Arg))} end).
--define(TDEF_FE(Name, Timeout), fun(Arg) -> {atom_to_list(Name), {timeout, Timeout, ?_test(Name(Arg))}} end).
+-define(TDEF_FE(Name, Timeout), fun(Arg) ->
+ {atom_to_list(Name), {timeout, Timeout, ?_test(Name(Arg))}}
+end).
csp_test_() ->
{
@@ -101,7 +103,7 @@ sandbox_ddoc_attachments(DbName) ->
sandbox_shows(DbName) ->
DbUrl = base_url() ++ "/" ++ DbName,
- DDocUrl = DbUrl ++ "/" ++ ?DDOC1,
+ DDocUrl = DbUrl ++ "/" ++ ?DDOC1,
Url = DDocUrl ++ "/_show/" ++ ?SHOW1 ++ "/" ++ ?DOC1,
?assertEqual({200, true}, req(get, ?ACC, Url)),
config:set("csp", "showlist_enable", "false", false),
@@ -109,21 +111,22 @@ sandbox_shows(DbName) ->
sandbox_lists(DbName) ->
DbUrl = base_url() ++ "/" ++ DbName,
- DDocUrl = DbUrl ++ "/" ++ ?DDOC1,
+ DDocUrl = DbUrl ++ "/" ++ ?DDOC1,
Url = DDocUrl ++ "/_list/" ++ ?LIST1 ++ "/" ++ ?VIEW1,
?assertEqual({200, true}, req(get, ?ACC, Url)),
config:set("csp", "showlist_enable", "false", false),
?assertEqual({200, false}, req(get, ?ACC, Url)).
-
should_not_return_any_csp_headers_when_disabled(_DbName) ->
- ?_assertEqual(undefined,
+ ?_assertEqual(
+ undefined,
begin
ok = config:set("csp", "utils_enable", "false", false),
ok = config:set("csp", "enable", "false", false),
{ok, _, Headers, _} = test_request:get(base_url() ++ "/_utils/"),
proplists:get_value("Content-Security-Policy", Headers)
- end).
+ end
+ ).
should_apply_default_policy(_DbName) ->
?_assertEqual(
@@ -132,7 +135,8 @@ should_apply_default_policy(_DbName) ->
begin
{ok, _, Headers, _} = test_request:get(base_url() ++ "/_utils/"),
proplists:get_value("Content-Security-Policy", Headers)
- end).
+ end
+ ).
should_apply_default_policy_with_legacy_config(_DbName) ->
?_assertEqual(
@@ -143,17 +147,23 @@ should_apply_default_policy_with_legacy_config(_DbName) ->
ok = config:set("csp", "enable", "true", false),
{ok, _, Headers, _} = test_request:get(base_url() ++ "/_utils/"),
proplists:get_value("Content-Security-Policy", Headers)
- end).
+ end
+ ).
should_return_custom_policy(_DbName) ->
- ?_assertEqual("default-src 'http://example.com';",
+ ?_assertEqual(
+ "default-src 'http://example.com';",
begin
- ok = config:set("csp", "utils_header_value",
- "default-src 'http://example.com';", false),
+ ok = config:set(
+ "csp",
+ "utils_header_value",
+ "default-src 'http://example.com';",
+ false
+ ),
{ok, _, Headers, _} = test_request:get(base_url() ++ "/_utils/"),
proplists:get_value("Content-Security-Policy", Headers)
- end).
-
+ end
+ ).
% Utility functions
@@ -204,19 +214,18 @@ setup() ->
<<?SHOW1>> => <<"function(doc, req) {return '<h1>show1!</h1>';}">>
},
<<"lists">> => #{
- <<?LIST1>> => <<"function(head, req) {",
- "var row;",
- "while(row = getRow()){ send(row.key); };",
- "}">>
+ <<?LIST1>> =>
+ <<"function(head, req) {", "var row;", "while(row = getRow()){ send(row.key); };",
+ "}">>
}
}),
ok = create_doc(?ACC, DbName, #{<<"_id">> => <<?LDOC1>>}),
DbName.
cleanup(DbName) ->
- config:delete("csp", "utils_enable", _Persist=false),
- config:delete("csp", "attachments_enable", _Persist=false),
- config:delete("csp", "showlist_enable", _Persist=false),
+ config:delete("csp", "utils_enable", _Persist = false),
+ config:delete("csp", "attachments_enable", _Persist = false),
+ config:delete("csp", "showlist_enable", _Persist = false),
DbUrl = base_url() ++ "/" ++ DbName,
{200, _} = req(delete, ?ADM, DbUrl),
UsersDb = config:get("chttpd_auth", "authentication_db"),
@@ -229,8 +238,12 @@ base_url() ->
Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
"http://" ++ Addr ++ ":" ++ Port.
-create_user(UsersDb, Name, Pass, Roles) when is_list(UsersDb),
- is_binary(Name), is_binary(Pass), is_list(Roles) ->
+create_user(UsersDb, Name, Pass, Roles) when
+ is_list(UsersDb),
+ is_binary(Name),
+ is_binary(Pass),
+ is_list(Roles)
+->
Body = #{
<<"name">> => Name,
<<"type">> => <<"user">>,
diff --git a/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl b/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl
index 227b29c5b..e3975bb6e 100644
--- a/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_db_attachment_size_tests.erl
@@ -19,14 +19,12 @@
-define(PASS, "pass").
-define(AUTH, {basic_auth, {?USER, ?PASS}}).
-define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(CONTENT_MULTI_RELATED, {"Content-Type",
- "multipart/related;boundary=\"bound\""}).
-
+-define(CONTENT_MULTI_RELATED, {"Content-Type", "multipart/related;boundary=\"bound\""}).
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
- ok = config:set("couchdb", "max_attachment_size", "50", _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
+ ok = config:set("couchdb", "max_attachment_size", "50", _Persist = false),
TmpDb = ?tempdb(),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
@@ -35,13 +33,11 @@ setup() ->
add_doc(Url, "doc1"),
Url.
-
teardown(Url) ->
delete_db(Url),
- ok = config:delete("admins", ?USER, _Persist=false),
+ ok = config:delete("admins", ?USER, _Persist = false),
ok = config:delete("couchdb", "max_attachment_size").
-
attachment_size_test_() ->
{
"chttpd max_attachment_size tests",
@@ -51,7 +47,8 @@ attachment_size_test_() ->
fun chttpd_test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun put_inline/1,
fun put_simple/1,
@@ -63,14 +60,12 @@ attachment_size_test_() ->
}
}.
-
put_inline(Url) ->
- ?_test(begin
- Status = put_inline(Url, "doc2", 50),
- ?assert(Status =:= 201 orelse Status =:= 202),
- ?assertEqual(413, put_inline(Url, "doc3", 51))
- end).
-
+ ?_test(begin
+ Status = put_inline(Url, "doc2", 50),
+ ?assert(Status =:= 201 orelse Status =:= 202),
+ ?assertEqual(413, put_inline(Url, "doc3", 51))
+ end).
put_simple(Url) ->
?_test(begin
@@ -85,9 +80,8 @@ put_simple(Url) ->
?assertEqual(413, Status2)
end).
-
put_simple_chunked(Url) ->
- ?_test(begin
+ ?_test(begin
Headers = [{"Content-Type", "app/binary"}],
Rev1 = doc_rev(Url, "doc1"),
DataFun1 = data_stream_fun(50),
@@ -99,7 +93,6 @@ put_simple_chunked(Url) ->
?assertEqual(413, Status2)
end).
-
put_mp_related(Url) ->
?_test(begin
Headers = [?CONTENT_MULTI_RELATED],
@@ -111,89 +104,84 @@ put_mp_related(Url) ->
?assertEqual(413, Status2)
end).
-
put_chunked_mp_related(Url) ->
?_test(begin
- Headers = [?CONTENT_MULTI_RELATED],
- Body = mp_body(50),
- Status = put_req_chunked(Url ++ "/doc4", Headers, Body),
- ?assert(Status =:= 201 orelse Status =:= 202)
+ Headers = [?CONTENT_MULTI_RELATED],
+ Body = mp_body(50),
+ Status = put_req_chunked(Url ++ "/doc4", Headers, Body),
+ ?assert(Status =:= 201 orelse Status =:= 202)
end).
-
% Helper functions
create_db(Url) ->
Status = put_req(Url, "{}"),
?assert(Status =:= 201 orelse Status =:= 202).
-
add_doc(Url, DocId) ->
Status = put_req(Url ++ "/" ++ DocId, "{}"),
?assert(Status =:= 201 orelse Status =:= 202).
-
delete_db(Url) ->
{ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
put_inline(Url, DocId, Size) ->
- Doc = "{\"_attachments\": {\"att1\":{"
+ Doc =
+ "{\"_attachments\": {\"att1\":{"
"\"content_type\": \"app/binary\", "
- "\"data\": \"" ++ data_b64(Size) ++ "\""
- "}}}",
+ "\"data\": \"" ++ data_b64(Size) ++
+ "\""
+ "}}}",
put_req(Url ++ "/" ++ DocId, Doc).
-
mp_body(AttSize) ->
AttData = data(AttSize),
SizeStr = integer_to_list(AttSize),
- string:join([
- "--bound",
-
- "Content-Type: application/json",
+ string:join(
+ [
+ "--bound",
- "",
+ "Content-Type: application/json",
- "{\"_id\":\"doc2\", \"_attachments\":{\"att\":"
- "{\"content_type\":\"app/binary\", \"length\":" ++ SizeStr ++ ","
- "\"follows\":true}}}",
+ "",
- "--bound",
+ "{\"_id\":\"doc2\", \"_attachments\":{\"att\":"
+ "{\"content_type\":\"app/binary\", \"length\":" ++ SizeStr ++
+ ","
+ "\"follows\":true}}}",
- "Content-Disposition: attachment; filename=\"att\"",
+ "--bound",
- "Content-Type: app/binary",
+ "Content-Disposition: attachment; filename=\"att\"",
- "",
+ "Content-Type: app/binary",
- AttData,
+ "",
- "--bound--"
- ], "\r\n").
+ AttData,
+ "--bound--"
+ ],
+ "\r\n"
+ ).
doc_rev(Url, DocId) ->
{200, ResultProps} = get_req(Url ++ "/" ++ DocId),
{<<"_rev">>, BinRev} = lists:keyfind(<<"_rev">>, 1, ResultProps),
binary_to_list(BinRev).
-
put_req(Url, Body) ->
put_req(Url, [], Body).
-
put_req(Url, Headers, Body) ->
{ok, Status, _, _} = test_request:put(Url, Headers ++ [?AUTH], Body),
Status.
-
put_req_chunked(Url, Headers, Body) ->
Opts = [{transfer_encoding, {chunked, 1}}],
{ok, Status, _, _} = test_request:put(Url, Headers ++ [?AUTH], Body, Opts),
Status.
-
get_req(Url) ->
{ok, Status, _, ResultBody} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]),
{[_ | _] = ResultProps} = ?JSON_DECODE(ResultBody),
@@ -202,15 +190,14 @@ get_req(Url) ->
% Data streaming generator for ibrowse client. ibrowse will repeatedly call the
% function with State and it should return {ok, Data, NewState} or eof at end.
data_stream_fun(Size) ->
- Fun = fun(0) -> eof; (BytesLeft) ->
- {ok, <<"x">>, BytesLeft - 1}
+ Fun = fun
+ (0) -> eof;
+ (BytesLeft) -> {ok, <<"x">>, BytesLeft - 1}
end,
{Fun, Size}.
-
data(Size) ->
string:copies("x", Size).
-
data_b64(Size) ->
base64:encode_to_string(data(Size)).
diff --git a/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl b/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl
index 86a8eab1a..91a3eaf19 100644
--- a/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl
+++ b/src/chttpd/test/eunit/chttpd_db_bulk_get_multipart_test.erl
@@ -17,7 +17,6 @@
-define(TIMEOUT, 3000).
-
setup_all() ->
mock(config),
mock(chttpd),
@@ -27,11 +26,9 @@ setup_all() ->
mock(fabric),
mock(mochireq).
-
teardown_all(_) ->
meck:unload().
-
setup() ->
meck:reset([
config,
@@ -44,11 +41,9 @@ setup() ->
]),
spawn_accumulator().
-
teardown(Pid) ->
ok = stop_accumulator(Pid).
-
bulk_get_test_() ->
{
"/db/_bulk_get tests",
@@ -75,36 +70,36 @@ bulk_get_test_() ->
}
}.
-
should_require_docs_field(_) ->
Req = fake_request({[{}]}),
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = test_util:fake_db([{name, <<"foo">>}]),
?_assertThrow({bad_request, _}, chttpd_db:db_req(Req, Db)).
-
should_not_accept_specific_query_params(_) ->
Req = fake_request({[{<<"docs">>, []}]}),
- Db = test_util:fake_db([{name, <<"foo">>}]),
- lists:map(fun (Param) ->
- {Param, ?_assertThrow({bad_request, _}, begin
- BadReq = Req#httpd{qs = [{Param, ""}]},
- chttpd_db:db_req(BadReq, Db)
- end)}
- end, ["rev", "open_revs", "atts_since", "w", "new_edits"]).
-
+ Db = test_util:fake_db([{name, <<"foo">>}]),
+ lists:map(
+ fun(Param) ->
+ {Param,
+ ?_assertThrow({bad_request, _}, begin
+ BadReq = Req#httpd{qs = [{Param, ""}]},
+ chttpd_db:db_req(BadReq, Db)
+ end)}
+ end,
+ ["rev", "open_revs", "atts_since", "w", "new_edits"]
+ ).
should_return_empty_results_on_no_docs(Pid) ->
Req = fake_request({[{<<"docs">>, []}]}),
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = test_util:fake_db([{name, <<"foo">>}]),
chttpd_db:db_req(Req, Db),
Results = get_results_from_response(Pid),
?_assertEqual([], Results).
-
should_get_doc_with_all_revs(Pid) ->
DocId = <<"docudoc">>,
Req = fake_request(DocId),
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = test_util:fake_db([{name, <<"foo">>}]),
DocRevA = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-ABC">>}]}},
DocRevB = #doc{id = DocId, body = {[{<<"_rev">>, <<"1-CDE">>}]}},
@@ -115,95 +110,126 @@ should_get_doc_with_all_revs(Pid) ->
Result = get_results_from_response(Pid),
?_assertEqual(DocId, couch_util:get_value(<<"_id">>, Result)).
-
should_validate_doc_with_bad_id(Pid) ->
DocId = <<"_docudoc">>,
Req = fake_request(DocId),
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = test_util:fake_db([{name, <<"foo">>}]),
chttpd_db:db_req(Req, Db),
Result = get_results_from_response(Pid),
?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
- ?_assertMatch([{<<"id">>, DocId},
- {<<"rev">>, null},
- {<<"error">>, <<"illegal_docid">>},
- {<<"reason">>, _}], Result).
-
+ ?_assertMatch(
+ [
+ {<<"id">>, DocId},
+ {<<"rev">>, null},
+ {<<"error">>, <<"illegal_docid">>},
+ {<<"reason">>, _}
+ ],
+ Result
+ ).
should_validate_doc_with_bad_rev(Pid) ->
DocId = <<"docudoc">>,
Rev = <<"revorev">>,
Req = fake_request(DocId, Rev),
- Db = test_util:fake_db([{name, <<"foo">>}]),
+ Db = test_util:fake_db([{name, <<"foo">>}]),
chttpd_db:db_req(Req, Db),
Result = get_results_from_response(Pid),
?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
- ?_assertMatch([{<<"id">>, DocId},
- {<<"rev">>, Rev},
- {<<"error">>, <<"bad_request">>},
- {<<"reason">>, _}], Result).
-
+ ?_assertMatch(
+ [
+ {<<"id">>, DocId},
+ {<<"rev">>, Rev},
+ {<<"error">>, <<"bad_request">>},
+ {<<"reason">>, _}
+ ],
+ Result
+ ).
should_validate_missing_doc(Pid) ->
DocId = <<"docudoc">>,
Rev = <<"1-revorev">>,
Req = fake_request(DocId, Rev),
- Db = test_util:fake_db([{name, <<"foo">>}]),
- mock_open_revs([{1,<<"revorev">>}], {ok, []}),
+ Db = test_util:fake_db([{name, <<"foo">>}]),
+ mock_open_revs([{1, <<"revorev">>}], {ok, []}),
chttpd_db:db_req(Req, Db),
Result = get_results_from_response(Pid),
?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
- ?_assertMatch([{<<"id">>, DocId},
- {<<"rev">>, Rev},
- {<<"error">>, <<"not_found">>},
- {<<"reason">>, _}], Result).
-
+ ?_assertMatch(
+ [
+ {<<"id">>, DocId},
+ {<<"rev">>, Rev},
+ {<<"error">>, <<"not_found">>},
+ {<<"reason">>, _}
+ ],
+ Result
+ ).
should_validate_bad_atts_since(Pid) ->
DocId = <<"docudoc">>,
Rev = <<"1-revorev">>,
Req = fake_request(DocId, Rev, <<"badattsince">>),
- Db = test_util:fake_db([{name, <<"foo">>}]),
- mock_open_revs([{1,<<"revorev">>}], {ok, []}),
+ Db = test_util:fake_db([{name, <<"foo">>}]),
+ mock_open_revs([{1, <<"revorev">>}], {ok, []}),
chttpd_db:db_req(Req, Db),
Result = get_results_from_response(Pid),
?assertEqual(DocId, couch_util:get_value(<<"id">>, Result)),
- ?_assertMatch([{<<"id">>, DocId},
- {<<"rev">>, <<"badattsince">>},
- {<<"error">>, <<"bad_request">>},
- {<<"reason">>, _}], Result).
-
+ ?_assertMatch(
+ [
+ {<<"id">>, DocId},
+ {<<"rev">>, <<"badattsince">>},
+ {<<"error">>, <<"bad_request">>},
+ {<<"reason">>, _}
+ ],
+ Result
+ ).
should_include_attachments_when_atts_since_specified(_) ->
DocId = <<"docudoc">>,
Rev = <<"1-revorev">>,
Req = fake_request(DocId, Rev, [<<"1-abc">>]),
- Db = test_util:fake_db([{name, <<"foo">>}]),
- mock_open_revs([{1,<<"revorev">>}], {ok, []}),
+ Db = test_util:fake_db([{name, <<"foo">>}]),
+ mock_open_revs([{1, <<"revorev">>}], {ok, []}),
chttpd_db:db_req(Req, Db),
- ?_assert(meck:called(fabric, open_revs,
- ['_', DocId, [{1, <<"revorev">>}],
- [{atts_since, [{1, <<"abc">>}]}, attachments,
- {user_ctx, undefined}]])).
+ ?_assert(
+ meck:called(
+ fabric,
+ open_revs,
+ [
+ '_',
+ DocId,
+ [{1, <<"revorev">>}],
+ [
+ {atts_since, [{1, <<"abc">>}]},
+ attachments,
+ {user_ctx, undefined}
+ ]
+ ]
+ )
+ ).
%% helpers
fake_request(Payload) when is_tuple(Payload) ->
- #httpd{method='POST', path_parts=[<<"db">>, <<"_bulk_get">>],
- mochi_req=mochireq, req_body=Payload};
+ #httpd{
+ method = 'POST',
+ path_parts = [<<"db">>, <<"_bulk_get">>],
+ mochi_req = mochireq,
+ req_body = Payload
+ };
fake_request(DocId) when is_binary(DocId) ->
fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}]}]}]}).
@@ -211,25 +237,36 @@ fake_request(DocId, Rev) ->
fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}, {<<"rev">>, Rev}]}]}]}).
fake_request(DocId, Rev, AttsSince) ->
- fake_request({[{<<"docs">>, [{[{<<"id">>, DocId},
- {<<"rev">>, Rev},
- {<<"atts_since">>, AttsSince}]}]}]}).
-
+ fake_request(
+ {[
+ {<<"docs">>, [
+ {[
+ {<<"id">>, DocId},
+ {<<"rev">>, Rev},
+ {<<"atts_since">>, AttsSince}
+ ]}
+ ]}
+ ]}
+ ).
mock_open_revs(RevsReq0, RevsResp) ->
- ok = meck:expect(fabric, open_revs,
- fun(_, _, RevsReq1, _) ->
- ?assertEqual(RevsReq0, RevsReq1),
- RevsResp
- end).
-
+ ok = meck:expect(
+ fabric,
+ open_revs,
+ fun(_, _, RevsReq1, _) ->
+ ?assertEqual(RevsReq0, RevsReq1),
+ RevsResp
+ end
+ ).
mock(mochireq) ->
ok = meck:new(mochireq, [non_strict]),
ok = meck:expect(mochireq, parse_qs, fun() -> [] end),
- ok = meck:expect(mochireq, accepts_content_type, fun("multipart/mixed") -> true;
- ("multipart/related") -> true;
- (_) -> false end),
+ ok = meck:expect(mochireq, accepts_content_type, fun
+ ("multipart/mixed") -> true;
+ ("multipart/related") -> true;
+ (_) -> false
+ end),
ok;
mock(couch_httpd) ->
ok = meck:new(couch_httpd, [passthrough]),
@@ -243,7 +280,7 @@ mock(chttpd) ->
ok = meck:expect(chttpd, start_chunked_response, fun(_, _, _) -> {ok, nil} end),
ok = meck:expect(chttpd, end_json_response, fun(_) -> ok end),
ok = meck:expect(chttpd, send_chunk, fun send_chunk/2),
- ok = meck:expect(chttpd, json_body_obj, fun (#httpd{req_body=Body}) -> Body end),
+ ok = meck:expect(chttpd, json_body_obj, fun(#httpd{req_body = Body}) -> Body end),
ok;
mock(couch_epi) ->
ok = meck:new(couch_epi, [passthrough]),
@@ -266,7 +303,6 @@ mock(config) ->
ok = meck:expect(config, get, fun(_, _, Default) -> Default end),
ok.
-
spawn_accumulator() ->
Parent = self(),
Pid = spawn(fun() -> accumulator_loop(Parent, []) end),
@@ -282,7 +318,7 @@ accumulator_loop(Parent, Acc) ->
accumulator_loop(Parent, Acc);
{put, Ref, Chunk} ->
Parent ! {ok, Ref},
- accumulator_loop(Parent, [Chunk|Acc])
+ accumulator_loop(Parent, [Chunk | Acc])
end.
stop_accumulator(Pid) ->
@@ -295,10 +331,9 @@ stop_accumulator(Pid) ->
throw({timeout, <<"process stop timeout">>})
end.
-
send_chunk(_, []) ->
{ok, nil};
-send_chunk(_Req, [H|T]=Chunk) when is_list(Chunk) ->
+send_chunk(_Req, [H | T] = Chunk) when is_list(Chunk) ->
send_chunk(_Req, H),
send_chunk(_Req, T);
send_chunk(_, Chunk) ->
@@ -311,7 +346,6 @@ send_chunk(_, Chunk) ->
throw({timeout, <<"send chunk timeout">>})
end.
-
get_response(Pid) ->
Ref = make_ref(),
Pid ! {get, Ref},
@@ -325,8 +359,8 @@ get_response(Pid) ->
get_results_from_response(Pid) ->
case get_response(Pid) of
[] ->
- [];
+ [];
Result ->
- {Result1} = ?JSON_DECODE(lists:nth(2, Result)),
- Result1
+ {Result1} = ?JSON_DECODE(lists:nth(2, Result)),
+ Result1
end.
diff --git a/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl b/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl
index 1a3411254..81dfe098b 100644
--- a/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl
+++ b/src/chttpd/test/eunit/chttpd_db_bulk_get_test.erl
@@ -17,7 +17,6 @@
-define(TIMEOUT, 3000).
-
setup_all() ->
mock(config),
mock(chttpd),
@@ -27,19 +26,15 @@ setup_all() ->
mock(fabric),
mock(mochireq).
-
teardown_all(_) ->
meck:unload().
-
setup() ->
spawn_accumulator().
-
teardown(Pid) ->
ok = stop_accumulator(Pid).
-
bulk_get_test_() ->
{
"/db/_bulk_get tests",
@@ -66,21 +61,22 @@ bulk_get_test_() ->
}
}.
-
should_require_docs_field(_) ->
Req = fake_request({[{}]}),
?_assertThrow({bad_request, _}, chttpd_db:db_req(Req, nil)).
-
should_not_accept_specific_query_params(_) ->
Req = fake_request({[{<<"docs">>, []}]}),
- lists:map(fun (Param) ->
- {Param, ?_assertThrow({bad_request, _}, begin
- BadReq = Req#httpd{qs = [{Param, ""}]},
- chttpd_db:db_req(BadReq, nil)
- end)}
- end, ["rev", "open_revs", "atts_since", "w", "new_edits"]).
-
+ lists:map(
+ fun(Param) ->
+ {Param,
+ ?_assertThrow({bad_request, _}, begin
+ BadReq = Req#httpd{qs = [{Param, ""}]},
+ chttpd_db:db_req(BadReq, nil)
+ end)}
+ end,
+ ["rev", "open_revs", "atts_since", "w", "new_edits"]
+ ).
should_return_empty_results_on_no_docs(Pid) ->
Req = fake_request({[{<<"docs">>, []}]}),
@@ -88,7 +84,6 @@ should_return_empty_results_on_no_docs(Pid) ->
Results = get_results_from_response(Pid),
?_assertEqual([], Results).
-
should_get_doc_with_all_revs(Pid) ->
DocId = <<"docudoc">>,
Req = fake_request(DocId),
@@ -114,7 +109,6 @@ should_get_doc_with_all_revs(Pid) ->
?_assertEqual([RevA, RevB], [DocA, DocB]).
-
should_validate_doc_with_bad_id(Pid) ->
DocId = <<"_docudoc">>,
@@ -130,12 +124,15 @@ should_validate_doc_with_bad_id(Pid) ->
Doc = couch_util:get_value(<<"error">>, DocResult),
- ?_assertMatch({[{<<"id">>, DocId},
- {<<"rev">>, null},
- {<<"error">>, <<"illegal_docid">>},
- {<<"reason">>, _}]},
- Doc).
-
+ ?_assertMatch(
+ {[
+ {<<"id">>, DocId},
+ {<<"rev">>, null},
+ {<<"error">>, <<"illegal_docid">>},
+ {<<"reason">>, _}
+ ]},
+ Doc
+ ).
should_validate_doc_with_bad_rev(Pid) ->
DocId = <<"docudoc">>,
@@ -153,19 +150,22 @@ should_validate_doc_with_bad_rev(Pid) ->
Doc = couch_util:get_value(<<"error">>, DocResult),
- ?_assertMatch({[{<<"id">>, DocId},
- {<<"rev">>, Rev},
- {<<"error">>, <<"bad_request">>},
- {<<"reason">>, _}]},
- Doc).
-
+ ?_assertMatch(
+ {[
+ {<<"id">>, DocId},
+ {<<"rev">>, Rev},
+ {<<"error">>, <<"bad_request">>},
+ {<<"reason">>, _}
+ ]},
+ Doc
+ ).
should_validate_missing_doc(Pid) ->
DocId = <<"docudoc">>,
Rev = <<"1-revorev">>,
Req = fake_request(DocId, Rev),
- mock_open_revs([{1,<<"revorev">>}], {ok, []}),
+ mock_open_revs([{1, <<"revorev">>}], {ok, []}),
chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])),
[{Result}] = get_results_from_response(Pid),
@@ -177,19 +177,22 @@ should_validate_missing_doc(Pid) ->
Doc = couch_util:get_value(<<"error">>, DocResult),
- ?_assertMatch({[{<<"id">>, DocId},
- {<<"rev">>, Rev},
- {<<"error">>, <<"not_found">>},
- {<<"reason">>, _}]},
- Doc).
-
+ ?_assertMatch(
+ {[
+ {<<"id">>, DocId},
+ {<<"rev">>, Rev},
+ {<<"error">>, <<"not_found">>},
+ {<<"reason">>, _}
+ ]},
+ Doc
+ ).
should_validate_bad_atts_since(Pid) ->
DocId = <<"docudoc">>,
Rev = <<"1-revorev">>,
Req = fake_request(DocId, Rev, <<"badattsince">>),
- mock_open_revs([{1,<<"revorev">>}], {ok, []}),
+ mock_open_revs([{1, <<"revorev">>}], {ok, []}),
chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])),
[{Result}] = get_results_from_response(Pid),
@@ -201,31 +204,50 @@ should_validate_bad_atts_since(Pid) ->
Doc = couch_util:get_value(<<"error">>, DocResult),
- ?_assertMatch({[{<<"id">>, DocId},
- {<<"rev">>, <<"badattsince">>},
- {<<"error">>, <<"bad_request">>},
- {<<"reason">>, _}]},
- Doc).
-
+ ?_assertMatch(
+ {[
+ {<<"id">>, DocId},
+ {<<"rev">>, <<"badattsince">>},
+ {<<"error">>, <<"bad_request">>},
+ {<<"reason">>, _}
+ ]},
+ Doc
+ ).
should_include_attachments_when_atts_since_specified(_) ->
DocId = <<"docudoc">>,
Rev = <<"1-revorev">>,
Req = fake_request(DocId, Rev, [<<"1-abc">>]),
- mock_open_revs([{1,<<"revorev">>}], {ok, []}),
+ mock_open_revs([{1, <<"revorev">>}], {ok, []}),
chttpd_db:db_req(Req, test_util:fake_db([{name, <<"foo">>}])),
- ?_assert(meck:called(fabric, open_revs,
- ['_', DocId, [{1, <<"revorev">>}],
- [{atts_since, [{1, <<"abc">>}]}, attachments,
- {user_ctx, undefined}]])).
+ ?_assert(
+ meck:called(
+ fabric,
+ open_revs,
+ [
+ '_',
+ DocId,
+ [{1, <<"revorev">>}],
+ [
+ {atts_since, [{1, <<"abc">>}]},
+ attachments,
+ {user_ctx, undefined}
+ ]
+ ]
+ )
+ ).
%% helpers
fake_request(Payload) when is_tuple(Payload) ->
- #httpd{method='POST', path_parts=[<<"db">>, <<"_bulk_get">>],
- mochi_req=mochireq, req_body=Payload};
+ #httpd{
+ method = 'POST',
+ path_parts = [<<"db">>, <<"_bulk_get">>],
+ mochi_req = mochireq,
+ req_body = Payload
+ };
fake_request(DocId) when is_binary(DocId) ->
fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}]}]}]}).
@@ -233,18 +255,27 @@ fake_request(DocId, Rev) ->
fake_request({[{<<"docs">>, [{[{<<"id">>, DocId}, {<<"rev">>, Rev}]}]}]}).
fake_request(DocId, Rev, AttsSince) ->
- fake_request({[{<<"docs">>, [{[{<<"id">>, DocId},
- {<<"rev">>, Rev},
- {<<"atts_since">>, AttsSince}]}]}]}).
-
+ fake_request(
+ {[
+ {<<"docs">>, [
+ {[
+ {<<"id">>, DocId},
+ {<<"rev">>, Rev},
+ {<<"atts_since">>, AttsSince}
+ ]}
+ ]}
+ ]}
+ ).
mock_open_revs(RevsReq0, RevsResp) ->
- ok = meck:expect(fabric, open_revs,
- fun(_, _, RevsReq1, _) ->
- ?assertEqual(RevsReq0, RevsReq1),
- RevsResp
- end).
-
+ ok = meck:expect(
+ fabric,
+ open_revs,
+ fun(_, _, RevsReq1, _) ->
+ ?assertEqual(RevsReq0, RevsReq1),
+ RevsResp
+ end
+ ).
mock(mochireq) ->
ok = meck:new(mochireq, [non_strict]),
@@ -260,7 +291,7 @@ mock(chttpd) ->
ok = meck:expect(chttpd, start_json_response, fun(_, _) -> {ok, nil} end),
ok = meck:expect(chttpd, end_json_response, fun(_) -> ok end),
ok = meck:expect(chttpd, send_chunk, fun send_chunk/2),
- ok = meck:expect(chttpd, json_body_obj, fun (#httpd{req_body=Body}) -> Body end),
+ ok = meck:expect(chttpd, json_body_obj, fun(#httpd{req_body = Body}) -> Body end),
ok;
mock(couch_epi) ->
ok = meck:new(couch_epi, [passthrough]),
@@ -283,7 +314,6 @@ mock(config) ->
ok = meck:expect(config, get, fun(_, _, Default) -> Default end),
ok.
-
spawn_accumulator() ->
Parent = self(),
Pid = spawn(fun() -> accumulator_loop(Parent, []) end),
@@ -299,7 +329,7 @@ accumulator_loop(Parent, Acc) ->
accumulator_loop(Parent, Acc);
{put, Ref, Chunk} ->
Parent ! {ok, Ref},
- accumulator_loop(Parent, [Chunk|Acc])
+ accumulator_loop(Parent, [Chunk | Acc])
end.
stop_accumulator(Pid) ->
@@ -312,10 +342,9 @@ stop_accumulator(Pid) ->
throw({timeout, <<"process stop timeout">>})
end.
-
send_chunk(_, []) ->
{ok, nil};
-send_chunk(_Req, [H|T]=Chunk) when is_list(Chunk) ->
+send_chunk(_Req, [H | T] = Chunk) when is_list(Chunk) ->
send_chunk(_Req, H),
send_chunk(_Req, T);
send_chunk(_, Chunk) ->
@@ -328,7 +357,6 @@ send_chunk(_, Chunk) ->
throw({timeout, <<"send chunk timeout">>})
end.
-
get_response(Pid) ->
Ref = make_ref(),
Pid ! {get, Ref},
@@ -339,7 +367,6 @@ get_response(Pid) ->
throw({timeout, <<"get response timeout">>})
end.
-
get_results_from_response(Pid) ->
{Resp} = get_response(Pid),
couch_util:get_value(<<"results">>, Resp).
diff --git a/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl b/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl
index 88e2797a3..01ef16f23 100644
--- a/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_db_doc_size_tests.erl
@@ -19,15 +19,12 @@
-define(PASS, "pass").
-define(AUTH, {basic_auth, {?USER, ?PASS}}).
-define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(CONTENT_MULTI_RELATED, {"Content-Type",
- "multipart/related;boundary=\"bound\""}).
--define(CONTENT_MULTI_FORM, {"Content-Type",
- "multipart/form-data;boundary=\"bound\""}).
-
+-define(CONTENT_MULTI_RELATED, {"Content-Type", "multipart/related;boundary=\"bound\""}).
+-define(CONTENT_MULTI_FORM, {"Content-Type", "multipart/form-data;boundary=\"bound\""}).
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
ok = config:set("couchdb", "max_document_size", "50"),
TmpDb = ?tempdb(),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
@@ -38,7 +35,7 @@ setup() ->
teardown(Url) ->
delete_db(Url),
- ok = config:delete("admins", ?USER, _Persist=false),
+ ok = config:delete("admins", ?USER, _Persist = false),
ok = config:delete("couchdb", "max_document_size").
create_db(Url) ->
@@ -62,7 +59,8 @@ all_test_() ->
fun chttpd_test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun post_single_doc/1,
fun put_single_doc/1,
@@ -76,52 +74,80 @@ all_test_() ->
}.
post_single_doc(Url) ->
- NewDoc = "{\"post_single_doc\": \"some_doc\",
- \"_id\": \"testdoc\", \"should_be\" : \"too_large\"}",
- {ok, _, _, ResultBody} = test_request:post(Url,
- [?CONTENT_JSON, ?AUTH], NewDoc),
+ NewDoc =
+ "{\"post_single_doc\": \"some_doc\",\n"
+ " \"_id\": \"testdoc\", \"should_be\" : \"too_large\"}",
+ {ok, _, _, ResultBody} = test_request:post(
+ Url,
+ [?CONTENT_JSON, ?AUTH],
+ NewDoc
+ ),
{[ErrorMsg | _]} = ?JSON_DECODE(ResultBody),
?_assertEqual({<<"error">>, <<"document_too_large">>}, ErrorMsg).
put_single_doc(Url) ->
- NewDoc = "{\"post_single_doc\": \"some_doc\",
- \"_id\": \"testdoc\", \"should_be\" : \"too_large\"}",
- {ok, _, _, ResultBody} = test_request:put(Url ++ "/" ++ "testid",
- [?CONTENT_JSON, ?AUTH], NewDoc),
+ NewDoc =
+ "{\"post_single_doc\": \"some_doc\",\n"
+ " \"_id\": \"testdoc\", \"should_be\" : \"too_large\"}",
+ {ok, _, _, ResultBody} = test_request:put(
+ Url ++ "/" ++ "testid",
+ [?CONTENT_JSON, ?AUTH],
+ NewDoc
+ ),
{[ErrorMsg | _]} = ?JSON_DECODE(ResultBody),
?_assertEqual({<<"error">>, <<"document_too_large">>}, ErrorMsg).
bulk_doc(Url) ->
- NewDoc = "{\"docs\": [{\"doc1\": 1}, {\"errordoc\":
- \"this_should_be_the_too_large_error_document\"}]}",
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_bulk_docs/",
- [?CONTENT_JSON, ?AUTH], NewDoc),
+ NewDoc =
+ "{\"docs\": [{\"doc1\": 1}, {\"errordoc\":\n"
+ " \"this_should_be_the_too_large_error_document\"}]}",
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_bulk_docs/",
+ [?CONTENT_JSON, ?AUTH],
+ NewDoc
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
- Expect = {[{<<"error">>,<<"document_too_large">>},{<<"reason">>,<<>>}]},
+ Expect = {[{<<"error">>, <<"document_too_large">>}, {<<"reason">>, <<>>}]},
?_assertEqual(Expect, ResultJson).
put_post_doc_attach_inline(Url) ->
Body1 = "{\"body\":\"This is a body.\",",
- Body2 = lists:concat(["{\"body\":\"This is a body it should fail",
- "because there are too many characters.\","]),
- DocRest = lists:concat(["\"_attachments\":{\"foo.txt\":{",
+ Body2 = lists:concat([
+ "{\"body\":\"This is a body it should fail",
+ "because there are too many characters.\","
+ ]),
+ DocRest = lists:concat([
+ "\"_attachments\":{\"foo.txt\":{",
"\"content_type\":\"text/plain\",",
- "\"data\": \"VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ=\"}}}"]),
+ "\"data\": \"VGhpcyBpcyBhIGJhc2U2NCBlbmNvZGVkIHRleHQ=\"}}}"
+ ]),
Doc1 = lists:concat([Body1, DocRest]),
Doc2 = lists:concat([Body2, DocRest]),
- {ok, _, _, ResultBody} = test_request:post(Url,
- [?CONTENT_JSON, ?AUTH], Doc1),
+ {ok, _, _, ResultBody} = test_request:post(
+ Url,
+ [?CONTENT_JSON, ?AUTH],
+ Doc1
+ ),
{[Msg | _]} = ?JSON_DECODE(ResultBody),
- {ok, _, _, ResultBody1} = test_request:post(Url,
- [?CONTENT_JSON, ?AUTH], Doc2),
+ {ok, _, _, ResultBody1} = test_request:post(
+ Url,
+ [?CONTENT_JSON, ?AUTH],
+ Doc2
+ ),
{[Msg1 | _]} = ?JSON_DECODE(ResultBody1),
- {ok, _, _, ResultBody2} = test_request:put(Url ++ "/" ++ "accept",
- [?CONTENT_JSON, ?AUTH], Doc1),
+ {ok, _, _, ResultBody2} = test_request:put(
+ Url ++ "/" ++ "accept",
+ [?CONTENT_JSON, ?AUTH],
+ Doc1
+ ),
{[Msg2 | _]} = ?JSON_DECODE(ResultBody2),
- {ok, _, _, ResultBody3} = test_request:put(Url ++ "/" ++ "fail",
- [?CONTENT_JSON, ?AUTH], Doc2),
+ {ok, _, _, ResultBody3} = test_request:put(
+ Url ++ "/" ++ "fail",
+ [?CONTENT_JSON, ?AUTH],
+ Doc2
+ ),
{[Msg3 | _]} = ?JSON_DECODE(ResultBody3),
[
?_assertEqual({<<"ok">>, true}, Msg),
@@ -132,21 +158,31 @@ put_post_doc_attach_inline(Url) ->
put_multi_part_related(Url) ->
Body1 = "{\"body\":\"This is a body.\",",
- Body2 = lists:concat(["{\"body\":\"This is a body it should fail",
- "because there are too many characters.\","]),
+ Body2 = lists:concat([
+ "{\"body\":\"This is a body it should fail",
+ "because there are too many characters.\","
+ ]),
DocBeg = "--bound\r\nContent-Type: application/json\r\n\r\n",
- DocRest = lists:concat(["\"_attachments\":{\"foo.txt\":{\"follows\":true,",
+ DocRest = lists:concat([
+ "\"_attachments\":{\"foo.txt\":{\"follows\":true,",
"\"content_type\":\"text/plain\",\"length\":21},\"bar.txt\":",
"{\"follows\":true,\"content_type\":\"text/plain\",",
"\"length\":20}}}\r\n--bound\r\n\r\nthis is 21 chars long",
- "\r\n--bound\r\n\r\nthis is 20 chars lon\r\n--bound--epilogue"]),
+ "\r\n--bound\r\n\r\nthis is 20 chars lon\r\n--bound--epilogue"
+ ]),
Doc1 = lists:concat([DocBeg, Body1, DocRest]),
Doc2 = lists:concat([DocBeg, Body2, DocRest]),
- {ok, _, _, ResultBody} = test_request:put(Url ++ "/" ++ "accept",
- [?CONTENT_MULTI_RELATED, ?AUTH], Doc1),
+ {ok, _, _, ResultBody} = test_request:put(
+ Url ++ "/" ++ "accept",
+ [?CONTENT_MULTI_RELATED, ?AUTH],
+ Doc1
+ ),
{[Msg | _]} = ?JSON_DECODE(ResultBody),
- {ok, _, _, ResultBody1} = test_request:put(Url ++ "/" ++ "faildoc",
- [?CONTENT_MULTI_RELATED, ?AUTH], Doc2),
+ {ok, _, _, ResultBody1} = test_request:put(
+ Url ++ "/" ++ "faildoc",
+ [?CONTENT_MULTI_RELATED, ?AUTH],
+ Doc2
+ ),
{[Msg1 | _]} = ?JSON_DECODE(ResultBody1),
[
?_assertEqual({<<"ok">>, true}, Msg),
@@ -155,23 +191,33 @@ put_multi_part_related(Url) ->
post_multi_part_form(Url) ->
Port = mochiweb_socket_server:get(chttpd, port),
- Host = lists:concat([ "http://127.0.0.1:", Port]),
+ Host = lists:concat(["http://127.0.0.1:", Port]),
Referer = {"Referer", Host},
Body1 = "{\"body\":\"This is a body.\"}",
- Body2 = lists:concat(["{\"body\":\"This is a body it should fail",
- "because there are too many characters.\"}"]),
+ Body2 = lists:concat([
+ "{\"body\":\"This is a body it should fail",
+ "because there are too many characters.\"}"
+ ]),
DocBeg = "--bound\r\nContent-Disposition: form-data; name=\"_doc\"\r\n\r\n",
- DocRest = lists:concat(["\r\n--bound\r\nContent-Disposition:",
+ DocRest = lists:concat([
+ "\r\n--bound\r\nContent-Disposition:",
"form-data; name=\"_attachments\"; filename=\"file.txt\"\r\n",
"Content-Type: text/plain\r\n\r\ncontents of file.txt\r\n\r\n",
- "--bound--"]),
+ "--bound--"
+ ]),
Doc1 = lists:concat([DocBeg, Body1, DocRest]),
Doc2 = lists:concat([DocBeg, Body2, DocRest]),
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/" ++ "accept",
- [?CONTENT_MULTI_FORM, ?AUTH, Referer], Doc1),
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/" ++ "accept",
+ [?CONTENT_MULTI_FORM, ?AUTH, Referer],
+ Doc1
+ ),
{[Msg | _]} = ?JSON_DECODE(ResultBody),
- {ok, _, _, ResultBody1} = test_request:post(Url ++ "/" ++ "fail",
- [?CONTENT_MULTI_FORM, ?AUTH, Referer], Doc2),
+ {ok, _, _, ResultBody1} = test_request:post(
+ Url ++ "/" ++ "fail",
+ [?CONTENT_MULTI_FORM, ?AUTH, Referer],
+ Doc2
+ ),
{[Msg1 | _]} = ?JSON_DECODE(ResultBody1),
[
?_assertEqual({<<"ok">>, true}, Msg),
diff --git a/src/chttpd/test/eunit/chttpd_db_test.erl b/src/chttpd/test/eunit/chttpd_db_test.erl
index 15e75393c..07661733f 100644
--- a/src/chttpd/test/eunit/chttpd_db_test.erl
+++ b/src/chttpd/test/eunit/chttpd_db_test.erl
@@ -23,11 +23,12 @@
-define(DESTHEADER2, {"Destination", "foo%2Fbar%23baz%3Fpow%3Afiz"}).
-define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
-define(i2l(I), integer_to_list(I)).
--define(TIMEOUT, 60). % seconds
+% seconds
+-define(TIMEOUT, 60).
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
TmpDb = ?tempdb(),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
@@ -37,16 +38,18 @@ setup() ->
teardown(Url) ->
delete_db(Url),
- ok = config:delete("admins", ?USER, _Persist=false).
+ ok = config:delete("admins", ?USER, _Persist = false).
create_db(Url) ->
{ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
?assert(Status =:= 201 orelse Status =:= 202).
-
create_doc(Url, Id) ->
- test_request:put(Url ++ "/" ++ Id,
- [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}").
+ test_request:put(
+ Url ++ "/" ++ Id,
+ [?CONTENT_JSON, ?AUTH],
+ "{\"mr\": \"rockoartischocko\"}"
+ ).
delete_db(Url) ->
{ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
@@ -56,10 +59,12 @@ all_test_() ->
"chttpd db tests",
{
setup,
- fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
+ fun chttpd_test_util:start_couch/0,
+ fun chttpd_test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_return_ok_true_on_bulk_update/1,
fun should_return_201_new_edits_false_with_revs_on_bulk_update/1,
@@ -92,424 +97,511 @@ all_test_() ->
}
}.
-
should_return_ok_true_on_bulk_update(Url) ->
- {timeout, ?TIMEOUT, ?_assertEqual(true,
- begin
- {ok, _, _, Body} = create_doc(Url, "testdoc"),
- {Json} = ?JSON_DECODE(Body),
- Ref = couch_util:get_value(<<"rev">>, Json, undefined),
- NewDoc = "{\"docs\": [{\"_rev\": \"" ++ ?b2l(Ref) ++ "\", \"_id\": \"testdoc\"}]}",
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_bulk_docs/",
- [?CONTENT_JSON, ?AUTH], NewDoc),
- ResultJson = ?JSON_DECODE(ResultBody),
- {InnerJson} = lists:nth(1, ResultJson),
- couch_util:get_value(<<"ok">>, InnerJson, undefined)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_assertEqual(
+ true,
+ begin
+ {ok, _, _, Body} = create_doc(Url, "testdoc"),
+ {Json} = ?JSON_DECODE(Body),
+ Ref = couch_util:get_value(<<"rev">>, Json, undefined),
+ NewDoc = "{\"docs\": [{\"_rev\": \"" ++ ?b2l(Ref) ++ "\", \"_id\": \"testdoc\"}]}",
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_bulk_docs/",
+ [?CONTENT_JSON, ?AUTH],
+ NewDoc
+ ),
+ ResultJson = ?JSON_DECODE(ResultBody),
+ {InnerJson} = lists:nth(1, ResultJson),
+ couch_util:get_value(<<"ok">>, InnerJson, undefined)
+ end
+ )}.
should_return_201_new_edits_false_with_revs_on_bulk_update(Url) ->
- {timeout, ?TIMEOUT, ?_test(
- begin
- {ok, _, _, Body} = create_doc(Url, "dochasrev"),
- {Json} = ?JSON_DECODE(Body),
- Ref = couch_util:get_value(<<"rev">>, Json, undefined),
- NewDoc = "{\"docs\": [{\"_rev\": \"" ++ ?b2l(Ref) ++
- "\", \"_id\": \"dochasrev\"}], \"new_edits\": false}",
- {ok, Status, _, ResultBody} = test_request:post(Url ++
- "/_bulk_docs/", [?CONTENT_JSON, ?AUTH], NewDoc),
- ?assertEqual(201, Status),
- ?assertEqual([], ?JSON_DECODE(ResultBody))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(
+ begin
+ {ok, _, _, Body} = create_doc(Url, "dochasrev"),
+ {Json} = ?JSON_DECODE(Body),
+ Ref = couch_util:get_value(<<"rev">>, Json, undefined),
+ NewDoc =
+ "{\"docs\": [{\"_rev\": \"" ++ ?b2l(Ref) ++
+ "\", \"_id\": \"dochasrev\"}], \"new_edits\": false}",
+ {ok, Status, _, ResultBody} = test_request:post(
+ Url ++
+ "/_bulk_docs/",
+ [?CONTENT_JSON, ?AUTH],
+ NewDoc
+ ),
+ ?assertEqual(201, Status),
+ ?assertEqual([], ?JSON_DECODE(ResultBody))
+ end
+ )}.
should_return_400_new_edits_false_no_revs_on_bulk_update(Url) ->
- {timeout, ?TIMEOUT, ?_test(
- begin
- {ok, _, _, _} = create_doc(Url, "docnorev"),
- NewDoc = "{\"docs\": [{\"_id\": \"docnorev\"}], "
- ++ "\"new_edits\": false}",
- {ok, Status, _, ResultBody} = test_request:post(Url ++
- "/_bulk_docs/", [?CONTENT_JSON, ?AUTH], NewDoc),
- {ResultJson} = ?JSON_DECODE(ResultBody),
- ?assertEqual(400, Status),
- ?assertEqual(<<"bad_request">>,
- couch_util:get_value(<<"error">>, ResultJson))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(
+ begin
+ {ok, _, _, _} = create_doc(Url, "docnorev"),
+ NewDoc =
+ "{\"docs\": [{\"_id\": \"docnorev\"}], " ++
+ "\"new_edits\": false}",
+ {ok, Status, _, ResultBody} = test_request:post(
+ Url ++
+ "/_bulk_docs/",
+ [?CONTENT_JSON, ?AUTH],
+ NewDoc
+ ),
+ {ResultJson} = ?JSON_DECODE(ResultBody),
+ ?assertEqual(400, Status),
+ ?assertEqual(
+ <<"bad_request">>,
+ couch_util:get_value(<<"error">>, ResultJson)
+ )
+ end
+ )}.
should_return_ok_true_on_ensure_full_commit(Url0) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Url = Url0 ++ "/_ensure_full_commit",
- {ok, RC, _, Body} = test_request:post(Url, [?CONTENT_JSON, ?AUTH], []),
- {Json} = ?JSON_DECODE(Body),
- ?assertEqual(201, RC),
- ?assert(couch_util:get_value(<<"ok">>, Json))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ Url = Url0 ++ "/_ensure_full_commit",
+ {ok, RC, _, Body} = test_request:post(Url, [?CONTENT_JSON, ?AUTH], []),
+ {Json} = ?JSON_DECODE(Body),
+ ?assertEqual(201, RC),
+ ?assert(couch_util:get_value(<<"ok">>, Json))
+ end)}.
should_return_404_for_ensure_full_commit_on_no_db(Url0) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Url = Url0 ++ "-missing-db" ++ "/_ensure_full_commit",
- {ok, RC, _, Body} = test_request:post(Url, [?CONTENT_JSON, ?AUTH], []),
- {Json} = ?JSON_DECODE(Body),
- ?assertEqual(404, RC),
- ?assertEqual(<<"not_found">>, couch_util:get_value(<<"error">>, Json))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ Url = Url0 ++ "-missing-db" ++ "/_ensure_full_commit",
+ {ok, RC, _, Body} = test_request:post(Url, [?CONTENT_JSON, ?AUTH], []),
+ {Json} = ?JSON_DECODE(Body),
+ ?assertEqual(404, RC),
+ ?assertEqual(<<"not_found">>, couch_util:get_value(<<"error">>, Json))
+ end)}.
should_accept_live_as_an_alias_for_continuous(Url) ->
GetLastSeq = fun(Chunks) ->
LastSeqBin = lists:last(Chunks),
- {Result} = try ?JSON_DECODE(LastSeqBin) of
- Data -> Data
- catch
- _:_ ->
- ?assert(false) % should not happen, abort
- end,
+ {Result} =
+ try ?JSON_DECODE(LastSeqBin) of
+ Data -> Data
+ catch
+ _:_ ->
+ % should not happen, abort
+ ?assert(false)
+ end,
couch_util:get_value(<<"last_seq">>, Result, undefined)
end,
- {timeout, ?TIMEOUT, ?_test(begin
- LastSeq1 = GetLastSeq(wait_non_empty_chunk(Url)),
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ LastSeq1 = GetLastSeq(wait_non_empty_chunk(Url)),
- {ok, _, _, _} = create_doc(Url, "testdoc2"),
+ {ok, _, _, _} = create_doc(Url, "testdoc2"),
- LastSeq2 = GetLastSeq(wait_non_empty_chunk(Url)),
-
- ?assertNotEqual(LastSeq1, LastSeq2)
- end)}.
+ LastSeq2 = GetLastSeq(wait_non_empty_chunk(Url)),
+ ?assertNotEqual(LastSeq1, LastSeq2)
+ end)}.
should_return_404_for_delete_att_on_notadoc(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- {ok, RC, _, RespBody} = test_request:delete(
- Url ++ "/notadoc/att.pdf",
- [?CONTENT_JSON, ?AUTH],
- []
- ),
- ?assertEqual(404, RC),
- ?assertEqual(
- {[{<<"error">>,<<"not_found">>},
- {<<"reason">>,<<"missing">>}]},
- jiffy:decode(RespBody)
- ),
- {ok, RC1, _, _} = test_request:get(
- Url ++ "/notadoc",
- [?CONTENT_JSON, ?AUTH],
- []
- ),
- ?assertEqual(404, RC1)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ {ok, RC, _, RespBody} = test_request:delete(
+ Url ++ "/notadoc/att.pdf",
+ [?CONTENT_JSON, ?AUTH],
+ []
+ ),
+ ?assertEqual(404, RC),
+ ?assertEqual(
+ {[
+ {<<"error">>, <<"not_found">>},
+ {<<"reason">>, <<"missing">>}
+ ]},
+ jiffy:decode(RespBody)
+ ),
+ {ok, RC1, _, _} = test_request:get(
+ Url ++ "/notadoc",
+ [?CONTENT_JSON, ?AUTH],
+ []
+ ),
+ ?assertEqual(404, RC1)
+ end)}.
should_return_409_for_del_att_without_rev(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- {ok, RC, _, _} = test_request:put(
- Url ++ "/testdoc3",
- [?CONTENT_JSON, ?AUTH],
- jiffy:encode(attachment_doc())
- ),
- ?assertEqual(201, RC),
-
- {ok, RC1, _, _} = test_request:delete(
- Url ++ "/testdoc3/file.erl",
- [?CONTENT_JSON, ?AUTH],
- []
- ),
- ?assertEqual(409, RC1)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ {ok, RC, _, _} = test_request:put(
+ Url ++ "/testdoc3",
+ [?CONTENT_JSON, ?AUTH],
+ jiffy:encode(attachment_doc())
+ ),
+ ?assertEqual(201, RC),
+
+ {ok, RC1, _, _} = test_request:delete(
+ Url ++ "/testdoc3/file.erl",
+ [?CONTENT_JSON, ?AUTH],
+ []
+ ),
+ ?assertEqual(409, RC1)
+ end)}.
should_return_200_for_del_att_with_rev(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- {ok, RC, _Headers, RespBody} = test_request:put(
- Url ++ "/testdoc4",
- [?CONTENT_JSON, ?AUTH],
- jiffy:encode(attachment_doc())
- ),
- ?assertEqual(201, RC),
-
- {ResultJson} = ?JSON_DECODE(RespBody),
- Rev = couch_util:get_value(<<"rev">>, ResultJson, undefined),
-
- {ok, RC1, _, _} = test_request:delete(
- Url ++ "/testdoc4/file.erl?rev=" ++ Rev,
- [?CONTENT_JSON, ?AUTH],
- []
- ),
- ?assertEqual(200, RC1)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ {ok, RC, _Headers, RespBody} = test_request:put(
+ Url ++ "/testdoc4",
+ [?CONTENT_JSON, ?AUTH],
+ jiffy:encode(attachment_doc())
+ ),
+ ?assertEqual(201, RC),
+
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ Rev = couch_util:get_value(<<"rev">>, ResultJson, undefined),
+
+ {ok, RC1, _, _} = test_request:delete(
+ Url ++ "/testdoc4/file.erl?rev=" ++ Rev,
+ [?CONTENT_JSON, ?AUTH],
+ []
+ ),
+ ?assertEqual(200, RC1)
+ end)}.
should_return_409_for_put_att_nonexistent_rev(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- {ok, RC, _Headers, RespBody} = test_request:put(
- Url ++ "/should_return_404/file.erl?rev=1-000",
- [?CONTENT_JSON, ?AUTH],
- jiffy:encode(attachment_doc())
- ),
- ?assertEqual(409, RC),
- ?assertMatch({[
- {<<"error">>,<<"not_found">>},
- {<<"reason">>,<<"missing_rev">>}]},
- ?JSON_DECODE(RespBody))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ {ok, RC, _Headers, RespBody} = test_request:put(
+ Url ++ "/should_return_404/file.erl?rev=1-000",
+ [?CONTENT_JSON, ?AUTH],
+ jiffy:encode(attachment_doc())
+ ),
+ ?assertEqual(409, RC),
+ ?assertMatch(
+ {[
+ {<<"error">>, <<"not_found">>},
+ {<<"reason">>, <<"missing_rev">>}
+ ]},
+ ?JSON_DECODE(RespBody)
+ )
+ end)}.
should_return_update_seq_when_set_on_all_docs(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)],
- {ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/"
- ++ "?update_seq=true&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ?assertNotEqual(undefined,
- couch_util:get_value(<<"update_seq">>, ResultJson)),
- ?assertNotEqual(undefined,
- couch_util:get_value(<<"offset">>, ResultJson))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)],
+ {ok, RC, _, RespBody} = test_request:get(
+ Url ++ "/_all_docs/" ++
+ "?update_seq=true&keys=[\"testdoc1\"]",
+ [?CONTENT_JSON, ?AUTH]
+ ),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ?assertNotEqual(
+ undefined,
+ couch_util:get_value(<<"update_seq">>, ResultJson)
+ ),
+ ?assertNotEqual(
+ undefined,
+ couch_util:get_value(<<"offset">>, ResultJson)
+ )
+ end)}.
should_not_return_update_seq_when_unset_on_all_docs(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)],
- {ok, RC, _, RespBody} = test_request:get(Url ++ "/_all_docs/"
- ++ "?update_seq=false&keys=[\"testdoc1\"]",[?CONTENT_JSON, ?AUTH]),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ?assertEqual(undefined,
- couch_util:get_value(<<"update_seq">>, ResultJson)),
- ?assertNotEqual(undefined,
- couch_util:get_value(<<"offset">>, ResultJson))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 3)],
+ {ok, RC, _, RespBody} = test_request:get(
+ Url ++ "/_all_docs/" ++
+ "?update_seq=false&keys=[\"testdoc1\"]",
+ [?CONTENT_JSON, ?AUTH]
+ ),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ?assertEqual(
+ undefined,
+ couch_util:get_value(<<"update_seq">>, ResultJson)
+ ),
+ ?assertNotEqual(
+ undefined,
+ couch_util:get_value(<<"offset">>, ResultJson)
+ )
+ end)}.
should_return_correct_id_on_doc_copy(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- {ok, _, _, _} = create_doc(Url, "testdoc"),
- {_, _, _, ResultBody1} = test_request:copy(Url ++ "/testdoc/",
- [?CONTENT_JSON, ?AUTH, ?DESTHEADER1]),
- {ResultJson1} = ?JSON_DECODE(ResultBody1),
- Id1 = couch_util:get_value(<<"id">>, ResultJson1),
-
- {_, _, _, ResultBody2} = test_request:copy(Url ++ "/testdoc/",
- [?CONTENT_JSON, ?AUTH, ?DESTHEADER2]),
- {ResultJson2} = ?JSON_DECODE(ResultBody2),
- Id2 = couch_util:get_value(<<"id">>, ResultJson2),
- [
- ?assertEqual(<<102,111,111,229,149,138,98,97,114>>, Id1),
- ?assertEqual(<<"foo/bar#baz?pow:fiz">>, Id2)
- ]
- end)}.
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ {ok, _, _, _} = create_doc(Url, "testdoc"),
+ {_, _, _, ResultBody1} = test_request:copy(
+ Url ++ "/testdoc/",
+ [?CONTENT_JSON, ?AUTH, ?DESTHEADER1]
+ ),
+ {ResultJson1} = ?JSON_DECODE(ResultBody1),
+ Id1 = couch_util:get_value(<<"id">>, ResultJson1),
+
+ {_, _, _, ResultBody2} = test_request:copy(
+ Url ++ "/testdoc/",
+ [?CONTENT_JSON, ?AUTH, ?DESTHEADER2]
+ ),
+ {ResultJson2} = ?JSON_DECODE(ResultBody2),
+ Id2 = couch_util:get_value(<<"id">>, ResultJson2),
+ [
+ ?assertEqual(<<102, 111, 111, 229, 149, 138, 98, 97, 114>>, Id1),
+ ?assertEqual(<<"foo/bar#baz?pow:fiz">>, Id2)
+ ]
+ end)}.
should_return_only_one_ok_on_doc_copy(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- {ok, _, _, _} = create_doc(Url, "testdoc"),
- {_, _, _, ResultBody} = test_request:copy(Url ++ "/testdoc",
- [?CONTENT_JSON, ?AUTH, ?DESTHEADER1]),
- {ResultJson} = jiffy:decode(ResultBody),
- NumOks = length(lists:filter(fun({Key, _Value}) -> Key == <<"ok">> end, ResultJson)),
- [
- ?assertEqual(1, NumOks)
- ]
- end)}.
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ {ok, _, _, _} = create_doc(Url, "testdoc"),
+ {_, _, _, ResultBody} = test_request:copy(
+ Url ++ "/testdoc",
+ [?CONTENT_JSON, ?AUTH, ?DESTHEADER1]
+ ),
+ {ResultJson} = jiffy:decode(ResultBody),
+ NumOks = length(lists:filter(fun({Key, _Value}) -> Key == <<"ok">> end, ResultJson)),
+ [
+ ?assertEqual(1, NumOks)
+ ]
+ end)}.
attachment_doc() ->
{ok, Data} = file:read_file(?FIXTURE_TXT),
{[
- {<<"_attachments">>, {[
- {<<"file.erl">>, {[
- {<<"content_type">>, <<"text/plain">>},
- {<<"data">>, base64:encode(Data)}
- ]}
- }]}}
+ {<<"_attachments">>,
+ {[
+ {<<"file.erl">>,
+ {[
+ {<<"content_type">>, <<"text/plain">>},
+ {<<"data">>, base64:encode(Data)}
+ ]}}
+ ]}}
]}.
-
should_return_400_for_bad_engine(_) ->
- {timeout, ?TIMEOUT, ?_test(begin
- TmpDb = ?tempdb(),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
- BaseUrl = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
- Url = BaseUrl ++ "?engine=cowabunga",
- {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
- ?assertEqual(400, Status)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ TmpDb = ?tempdb(),
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
+ BaseUrl = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
+ Url = BaseUrl ++ "?engine=cowabunga",
+ {ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
+ ?assertEqual(400, Status)
+ end)}.
should_not_change_db_proper_after_rewriting_shardmap(_) ->
- {timeout, ?TIMEOUT, ?_test(begin
- TmpDb = ?tempdb(),
- Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
- Port = mochiweb_socket_server:get(chttpd, port),
-
- BaseUrl = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
- Url = BaseUrl ++ "?partitioned=true&q=1",
- {ok, 201, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
-
- ShardDbName = ?l2b(config:get("mem3", "shards_db", "_dbs")),
- {ok, ShardDb} = mem3_util:ensure_exists(ShardDbName),
- {ok, #doc{body = {Props}}} = couch_db:open_doc(
- ShardDb, TmpDb, [ejson_body]),
- Shards = mem3_util:build_shards(TmpDb, Props),
-
- {Prop2} = ?JSON_DECODE(?JSON_ENCODE({Props})),
- Shards2 = mem3_util:build_shards(TmpDb, Prop2),
- ?assertEqual(Shards2, Shards)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ TmpDb = ?tempdb(),
+ Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
+ Port = mochiweb_socket_server:get(chttpd, port),
+
+ BaseUrl = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
+ Url = BaseUrl ++ "?partitioned=true&q=1",
+ {ok, 201, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
+
+ ShardDbName = ?l2b(config:get("mem3", "shards_db", "_dbs")),
+ {ok, ShardDb} = mem3_util:ensure_exists(ShardDbName),
+ {ok, #doc{body = {Props}}} = couch_db:open_doc(
+ ShardDb, TmpDb, [ejson_body]
+ ),
+ Shards = mem3_util:build_shards(TmpDb, Props),
+
+ {Prop2} = ?JSON_DECODE(?JSON_ENCODE({Props})),
+ Shards2 = mem3_util:build_shards(TmpDb, Prop2),
+ ?assertEqual(Shards2, Shards)
+ end)}.
should_succeed_on_all_docs_with_queries_keys(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}]}",
- {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/",
- [?CONTENT_JSON, ?AUTH], QueryDoc),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]}]}",
+ {ok, RC, _, RespBody} = test_request:post(
+ Url ++ "/_all_docs/queries/",
+ [?CONTENT_JSON, ?AUTH],
+ QueryDoc
+ ),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end)}.
should_succeed_on_all_docs_with_queries_limit_skip(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/",
- [?CONTENT_JSON, ?AUTH], QueryDoc),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(
+ Url ++ "/_all_docs/queries/",
+ [?CONTENT_JSON, ?AUTH],
+ QueryDoc
+ ),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end)}.
should_succeed_on_all_docs_with_multiple_queries(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]},
- {\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(Url ++ "/_all_docs/queries/",
- [?CONTENT_JSON, ?AUTH], QueryDoc),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson1} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
- {InnerJson2} = lists:nth(2, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc =
+ "{\"queries\": [{\"keys\": [ \"testdoc3\", \"testdoc8\"]},\n"
+ " {\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(
+ Url ++ "/_all_docs/queries/",
+ [?CONTENT_JSON, ?AUTH],
+ QueryDoc
+ ),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson1} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
+ {InnerJson2} = lists:nth(2, ResultJsonBody),
+ ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
+ end)}.
should_succeed_on_design_docs_with_queries_keys(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc = "{\"queries\": [{\"keys\": [ \"_design/ddoc3\",
- \"_design/ddoc8\"]}]}",
- {ok, RC, _, RespBody} = test_request:post(Url ++
- "/_design_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc =
+ "{\"queries\": [{\"keys\": [ \"_design/ddoc3\",\n"
+ " \"_design/ddoc8\"]}]}",
+ {ok, RC, _, RespBody} = test_request:post(
+ Url ++
+ "/_design_docs/queries/",
+ [?CONTENT_JSON, ?AUTH],
+ QueryDoc
+ ),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end)}.
should_succeed_on_design_docs_with_queries_limit_skip(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(Url ++
- "/_design_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(
+ Url ++
+ "/_design_docs/queries/",
+ [?CONTENT_JSON, ?AUTH],
+ QueryDoc
+ ),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end)}.
should_succeed_on_design_docs_with_multiple_queries(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc = "{\"queries\": [{\"keys\": [ \"_design/ddoc3\",
- \"_design/ddoc8\"]}, {\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(Url ++
- "/_design_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson1} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
- {InnerJson2} = lists:nth(2, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [create_doc(Url, "_design/ddoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc =
+ "{\"queries\": [{\"keys\": [ \"_design/ddoc3\",\n"
+ " \"_design/ddoc8\"]}, {\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(
+ Url ++
+ "/_design_docs/queries/",
+ [?CONTENT_JSON, ?AUTH],
+ QueryDoc
+ ),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson1} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
+ {InnerJson2} = lists:nth(2, ResultJsonBody),
+ ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
+ end)}.
should_succeed_on_local_docs_with_queries_keys(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc = "{\"queries\": [{\"keys\":
- [ \"_local/doc3\", \"_local/doc8\"]}]}",
- {ok, RC, _, RespBody} = test_request:post(Url ++ "/_local_docs/queries/",
- [?CONTENT_JSON, ?AUTH], QueryDoc),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc =
+ "{\"queries\": [{\"keys\":\n"
+ " [ \"_local/doc3\", \"_local/doc8\"]}]}",
+ {ok, RC, _, RespBody} = test_request:post(
+ Url ++ "/_local_docs/queries/",
+ [?CONTENT_JSON, ?AUTH],
+ QueryDoc
+ ),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end)}.
should_succeed_on_local_docs_with_queries_limit_skip(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(Url ++
- "/_local_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(
+ Url ++
+ "/_local_docs/queries/",
+ [?CONTENT_JSON, ?AUTH],
+ QueryDoc
+ ),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end)}.
should_succeed_on_local_docs_with_multiple_queries(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- QueryDoc = "{\"queries\": [{\"keys\": [ \"_local/doc3\",
- \"_local/doc8\"]}, {\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(Url ++
- "/_local_docs/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson1} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
- {InnerJson2} = lists:nth(2, ResultJsonBody),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [create_doc(Url, "_local/doc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ QueryDoc =
+ "{\"queries\": [{\"keys\": [ \"_local/doc3\",\n"
+ " \"_local/doc8\"]}, {\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(
+ Url ++
+ "/_local_docs/queries/",
+ [?CONTENT_JSON, ?AUTH],
+ QueryDoc
+ ),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson1} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
+ {InnerJson2} = lists:nth(2, ResultJsonBody),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
+ end)}.
should_return_headers_after_starting_continious(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- {ok, _, _, Bin} =
- test_request:get(Url ++ "/_changes?feed=live&timeout=1", [?AUTH]),
-
- Parts = binary:split(Bin, <<"\n">>, [global]),
- %% we should receive at least one part even when timeout=1
- ?assertNotEqual([], Parts)
- end)}.
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ {ok, _, _, Bin} =
+ test_request:get(Url ++ "/_changes?feed=live&timeout=1", [?AUTH]),
+
+ Parts = binary:split(Bin, <<"\n">>, [global]),
+ %% we should receive at least one part even when timeout=1
+ ?assertNotEqual([], Parts)
+ end)}.
wait_non_empty_chunk(Url) ->
test_util:wait(fun() ->
diff --git a/src/chttpd/test/eunit/chttpd_dbs_info_test.erl b/src/chttpd/test/eunit/chttpd_dbs_info_test.erl
index 5b61d8831..b4027eead 100644
--- a/src/chttpd/test/eunit/chttpd_dbs_info_test.erl
+++ b/src/chttpd/test/eunit/chttpd_dbs_info_test.erl
@@ -20,10 +20,9 @@
-define(AUTH, {basic_auth, {?USER, ?PASS}}).
-define(CONTENT_JSON, {"Content-Type", "application/json"}).
-
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
Url = lists:concat(["http://", Addr, ":", Port, "/"]),
@@ -38,7 +37,7 @@ teardown(Url) ->
Db2Url = lists:concat([Url, "db2"]),
delete_db(Db1Url),
delete_db(Db2Url),
- ok = config:delete("admins", ?USER, _Persist=false).
+ ok = config:delete("admins", ?USER, _Persist = false).
create_db(Url) ->
{ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
@@ -52,10 +51,12 @@ dbs_info_test_() ->
"chttpd dbs info tests",
{
setup,
- fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
+ fun chttpd_test_util:start_couch/0,
+ fun chttpd_test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_return_error_for_get_db_info/1,
fun should_return_dbs_info_for_single_db/1,
@@ -68,102 +69,136 @@ dbs_info_test_() ->
}
}.
-
should_return_error_for_get_db_info(Url) ->
?_test(begin
- {ok, Code, _, ResultBody} = test_request:get(Url ++ "/_dbs_info?"
- ++ "keys=[\"db1\"]", [?CONTENT_JSON, ?AUTH]),
+ {ok, Code, _, ResultBody} = test_request:get(
+ Url ++ "/_dbs_info?" ++
+ "keys=[\"db1\"]",
+ [?CONTENT_JSON, ?AUTH]
+ ),
{Body} = jiffy:decode(ResultBody),
[
- ?assertEqual(<<"method_not_allowed">>,
- couch_util:get_value(<<"error">>, Body)),
+ ?assertEqual(
+ <<"method_not_allowed">>,
+ couch_util:get_value(<<"error">>, Body)
+ ),
?assertEqual(405, Code)
]
end).
-
should_return_dbs_info_for_single_db(Url) ->
?_test(begin
NewDoc = "{\"keys\": [\"db1\"]}",
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/",
- [?CONTENT_JSON, ?AUTH], NewDoc),
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_dbs_info/",
+ [?CONTENT_JSON, ?AUTH],
+ NewDoc
+ ),
BodyJson = jiffy:decode(ResultBody),
{Db1Data} = lists:nth(1, BodyJson),
[
- ?assertEqual(<<"db1">>,
- couch_util:get_value(<<"key">>, Db1Data)),
- ?assertNotEqual(undefined,
- couch_util:get_value(<<"info">>, Db1Data))
+ ?assertEqual(
+ <<"db1">>,
+ couch_util:get_value(<<"key">>, Db1Data)
+ ),
+ ?assertNotEqual(
+ undefined,
+ couch_util:get_value(<<"info">>, Db1Data)
+ )
]
end).
-
should_return_dbs_info_for_multiple_dbs(Url) ->
?_test(begin
NewDoc = "{\"keys\": [\"db1\", \"db2\"]}",
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/",
- [?CONTENT_JSON, ?AUTH], NewDoc),
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_dbs_info/",
+ [?CONTENT_JSON, ?AUTH],
+ NewDoc
+ ),
BodyJson = jiffy:decode(ResultBody),
{Db1Data} = lists:nth(1, BodyJson),
{Db2Data} = lists:nth(2, BodyJson),
[
- ?assertEqual(<<"db1">>,
- couch_util:get_value(<<"key">>, Db1Data)),
- ?assertNotEqual(undefined,
- couch_util:get_value(<<"info">>, Db1Data)),
- ?assertEqual(<<"db2">>,
- couch_util:get_value(<<"key">>, Db2Data)),
- ?assertNotEqual(undefined,
- couch_util:get_value(<<"info">>, Db2Data))
+ ?assertEqual(
+ <<"db1">>,
+ couch_util:get_value(<<"key">>, Db1Data)
+ ),
+ ?assertNotEqual(
+ undefined,
+ couch_util:get_value(<<"info">>, Db1Data)
+ ),
+ ?assertEqual(
+ <<"db2">>,
+ couch_util:get_value(<<"key">>, Db2Data)
+ ),
+ ?assertNotEqual(
+ undefined,
+ couch_util:get_value(<<"info">>, Db2Data)
+ )
]
end).
-
should_return_error_for_exceeded_keys(Url) ->
?_test(begin
NewDoc = "{\"keys\": [\"db1\", \"db2\"]}",
ok = config:set("chttpd", "max_db_number_for_dbs_info_req", "1"),
- {ok, Code, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/",
- [?CONTENT_JSON, ?AUTH], NewDoc),
+ {ok, Code, _, ResultBody} = test_request:post(
+ Url ++ "/_dbs_info/",
+ [?CONTENT_JSON, ?AUTH],
+ NewDoc
+ ),
{Body} = jiffy:decode(ResultBody),
ok = config:delete("chttpd", "max_db_number_for_dbs_info_req"),
[
- ?assertEqual(<<"bad_request">>,
- couch_util:get_value(<<"error">>, Body)),
+ ?assertEqual(
+ <<"bad_request">>,
+ couch_util:get_value(<<"error">>, Body)
+ ),
?assertEqual(400, Code)
]
end).
-
should_return_error_for_missing_keys(Url) ->
?_test(begin
NewDoc = "{\"missingkeys\": [\"db1\", \"db2\"]}",
- {ok, Code, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/",
- [?CONTENT_JSON, ?AUTH], NewDoc),
+ {ok, Code, _, ResultBody} = test_request:post(
+ Url ++ "/_dbs_info/",
+ [?CONTENT_JSON, ?AUTH],
+ NewDoc
+ ),
{Body} = jiffy:decode(ResultBody),
[
- ?assertEqual(<<"bad_request">>,
- couch_util:get_value(<<"error">>, Body)),
+ ?assertEqual(
+ <<"bad_request">>,
+ couch_util:get_value(<<"error">>, Body)
+ ),
?assertEqual(400, Code)
]
end).
-
should_return_dbs_info_for_dbs_with_mixed_state(Url) ->
?_test(begin
NewDoc = "{\"keys\": [\"db1\", \"noexisteddb\"]}",
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_dbs_info/",
- [?CONTENT_JSON, ?AUTH], NewDoc),
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_dbs_info/",
+ [?CONTENT_JSON, ?AUTH],
+ NewDoc
+ ),
Json = jiffy:decode(ResultBody),
{Db1Data} = lists:nth(1, Json),
{Db2Data} = lists:nth(2, Json),
[
?assertEqual(
- <<"db1">>, couch_util:get_value(<<"key">>, Db1Data)),
- ?assertNotEqual(undefined,
- couch_util:get_value(<<"info">>, Db1Data)),
+ <<"db1">>, couch_util:get_value(<<"key">>, Db1Data)
+ ),
+ ?assertNotEqual(
+ undefined,
+ couch_util:get_value(<<"info">>, Db1Data)
+ ),
?assertEqual(
- <<"noexisteddb">>, couch_util:get_value(<<"key">>, Db2Data)),
+ <<"noexisteddb">>, couch_util:get_value(<<"key">>, Db2Data)
+ ),
?assertEqual(undefined, couch_util:get_value(<<"info">>, Db2Data))
]
end).
diff --git a/src/chttpd/test/eunit/chttpd_delayed_test.erl b/src/chttpd/test/eunit/chttpd_delayed_test.erl
index 63e6cb0e5..4b0fbd55b 100644
--- a/src/chttpd/test/eunit/chttpd_delayed_test.erl
+++ b/src/chttpd/test/eunit/chttpd_delayed_test.erl
@@ -7,17 +7,20 @@
-define(PASS, "pass").
-define(AUTH, {basic_auth, {?USER, ?PASS}}).
-define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(DDOC, "{\"_id\": \"_design/bar\", \"views\": {\"baz\":
- {\"map\": \"function(doc) {emit(doc._id, doc._id);}\"}}}").
+-define(DDOC,
+ "{\"_id\": \"_design/bar\", \"views\": {\"baz\":\n"
+ " {\"map\": \"function(doc) {emit(doc._id, doc._id);}\"}}}"
+).
-define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
-define(i2l(I), integer_to_list(I)).
--define(TIMEOUT, 60). % seconds
+% seconds
+-define(TIMEOUT, 60).
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
- ok = config:set("chttpd", "buffer_response", "true", _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
+ ok = config:set("chttpd", "buffer_response", "true", _Persist = false),
TmpDb = ?tempdb(),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
@@ -27,26 +30,26 @@ setup() ->
teardown(Url) ->
delete_db(Url),
- ok = config:delete("admins", ?USER, _Persist=false).
+ ok = config:delete("admins", ?USER, _Persist = false).
create_db(Url) ->
{ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
?assert(Status =:= 201 orelse Status =:= 202).
-
delete_db(Url) ->
{ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
all_test_() ->
{
"chttpd delay tests",
{
setup,
- fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
+ fun chttpd_test_util:start_couch/0,
+ fun chttpd_test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun test_buffer_response_all_docs/1,
fun test_buffer_response_changes/1
@@ -55,18 +58,15 @@ all_test_() ->
}
}.
-
test_buffer_response_all_docs(Url) ->
assert_successful_response(Url ++ "/_all_docs").
-
test_buffer_response_changes(Url) ->
assert_successful_response(Url ++ "/_changes").
-
assert_successful_response(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- {ok, Code, _Headers, _Body} = test_request:get(Url, [?AUTH]),
- ?assertEqual(200, Code)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ {ok, Code, _Headers, _Body} = test_request:get(Url, [?AUTH]),
+ ?assertEqual(200, Code)
+ end)}.
diff --git a/src/chttpd/test/eunit/chttpd_error_info_tests.erl b/src/chttpd/test/eunit/chttpd_error_info_tests.erl
index fdb015c08..aefb3bdc5 100644
--- a/src/chttpd/test/eunit/chttpd_error_info_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_error_info_tests.erl
@@ -14,7 +14,6 @@
-include_lib("eunit/include/eunit.hrl").
-
error_info_test() ->
Error = <<"error">>,
Reason = <<"reason">>,
@@ -75,7 +74,7 @@ error_info_test() ->
{
file_exists,
{412, <<"file_exists">>,
- <<"The database could not be created, the file already exists.">>}
+ <<"The database could not be created, the file already exists.">>}
},
{
{error, {nodedown, Reason}}, {412, <<"nodedown">>, Reason}
@@ -102,18 +101,18 @@ error_info_test() ->
},
{
requested_range_not_satisfiable,
- {416, <<"requested_range_not_satisfiable">>,
- <<"Requested range not satisfiable">>}
+ {416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>}
},
{
{error, {illegal_database_name, <<"foo">>}},
- {400, <<"illegal_database_name">>,
- <<"Name: 'foo'. Only lowercase characters (a-z), digits (0-9), and any of"
- " the characters _, $, (, ), +, -, and / are allowed."
- " Must begin with a letter.">>}
+ {400, <<"illegal_database_name">>, <<
+ "Name: 'foo'. Only lowercase characters (a-z), digits (0-9), and any of"
+ " the characters _, $, (, ), +, -, and / are allowed."
+ " Must begin with a letter."
+ >>}
},
{
- {Error, {illegal_docid,1}},
+ {Error, {illegal_docid, 1}},
{400, <<"illegal_docid">>, 1}
},
{
@@ -126,20 +125,21 @@ error_info_test() ->
},
{
not_implemented,
- {501, <<"not_implemented">>,
- <<"this feature is not yet implemented">>}
+ {501, <<"not_implemented">>, <<"this feature is not yet implemented">>}
},
{
timeout,
- {500, <<"timeout">>,
- <<"The request could not be processed in a reasonable"
- " amount of time.">>}
+ {500, <<"timeout">>, <<
+ "The request could not be processed in a reasonable"
+ " amount of time."
+ >>}
},
{
{timeout, Error},
- {500, <<"timeout">>,
- <<"The request could not be processed in a reasonable"
- " amount of time.">>}
+ {500, <<"timeout">>, <<
+ "The request could not be processed in a reasonable"
+ " amount of time."
+ >>}
},
{
{Error, null},
@@ -163,6 +163,9 @@ error_info_test() ->
}
],
- lists:foreach(fun({Arg, Result}) ->
- ?assertEqual(Result, chttpd:error_info(Arg))
- end, ArgResult).
+ lists:foreach(
+ fun({Arg, Result}) ->
+ ?assertEqual(Result, chttpd:error_info(Arg))
+ end,
+ ArgResult
+ ).
diff --git a/src/chttpd/test/eunit/chttpd_external_test.erl b/src/chttpd/test/eunit/chttpd_external_test.erl
index 4ed31891d..cd691fbaa 100644
--- a/src/chttpd/test/eunit/chttpd_external_test.erl
+++ b/src/chttpd/test/eunit/chttpd_external_test.erl
@@ -15,7 +15,6 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
setup_mock() ->
ok = meck:new([config, couch], [passthrough]),
ok = meck:expect(couch_db, is_clustered, 1, false),
@@ -91,9 +90,12 @@ json_req_obj_remote_httpd_req_test_() ->
should_convert_req_to_json_obj(HttpdReq) ->
Expect = expect(),
{Result} = chttpd_external:json_req_obj(HttpdReq, <<"fake">>),
- lists:map(fun({K, V}) ->
- {K, ?_assertEqual(couch_util:get_value(K, Expect), V)}
- end, Result).
+ lists:map(
+ fun({K, V}) ->
+ {K, ?_assertEqual(couch_util:get_value(K, Expect), V)}
+ end,
+ Result
+ ).
expect() ->
[
@@ -110,10 +112,11 @@ expect() ->
{<<"peer">>, <<"127.0.0.1">>},
{<<"form">>, {[]}},
{<<"cookie">>, {[]}},
- {<<"userCtx">>, {[
- {<<"db">>,<<"fake">>},
- {<<"name">>,null},
- {<<"roles">>,[]}
- ]}},
+ {<<"userCtx">>,
+ {[
+ {<<"db">>, <<"fake">>},
+ {<<"name">>, null},
+ {<<"roles">>, []}
+ ]}},
{<<"secObj">>, []}
].
diff --git a/src/chttpd/test/eunit/chttpd_handlers_tests.erl b/src/chttpd/test/eunit/chttpd_handlers_tests.erl
index 649d82e86..7cca6659d 100644
--- a/src/chttpd/test/eunit/chttpd_handlers_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_handlers_tests.erl
@@ -15,7 +15,6 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
setup() ->
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
@@ -25,7 +24,6 @@ setup() ->
teardown(_Url) ->
ok.
-
replicate_test_() ->
{
"_replicate",
@@ -35,7 +33,8 @@ replicate_test_() ->
fun chttpd_test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_escape_dbname_on_replicate/1
]
@@ -43,16 +42,17 @@ replicate_test_() ->
}
}.
-
should_escape_dbname_on_replicate(Url) ->
?_test(
begin
UrlBin = ?l2b(Url),
- Request = couch_util:json_encode({[
- {<<"source">>, <<UrlBin/binary, "/foo%2Fbar">>},
- {<<"target">>, <<"bar/baz">>},
- {<<"create_target">>, true}
- ]}),
+ Request = couch_util:json_encode(
+ {[
+ {<<"source">>, <<UrlBin/binary, "/foo%2Fbar">>},
+ {<<"target">>, <<"bar/baz">>},
+ {<<"create_target">>, true}
+ ]}
+ ),
{ok, 200, _, Body} = request_replicate(Url ++ "/_replicate", Request),
JSON = couch_util:json_decode(Body),
@@ -60,8 +60,8 @@ should_escape_dbname_on_replicate(Url) ->
Target = json_value(JSON, [<<"target">>, <<"url">>]),
?assertEqual(<<UrlBin/binary, "/foo%2Fbar">>, Source),
?assertEqual(<<UrlBin/binary, "/bar%2Fbaz">>, Target)
- end).
-
+ end
+ ).
json_value(JSON, Keys) ->
couch_util:get_nested_json_value(JSON, Keys).
@@ -80,8 +80,9 @@ request(Method, Url, Headers, Body, {M, F}, MockFun) ->
Result = test_request:Method(Url, Headers, Body),
?assert(meck:validate(M)),
Result
- catch Kind:Reason ->
- {Kind, Reason}
+ catch
+ Kind:Reason ->
+ {Kind, Reason}
after
meck:unload(M)
end.
diff --git a/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl b/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl
index d53d370f8..3eda08ae0 100644
--- a/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl
+++ b/src/chttpd/test/eunit/chttpd_open_revs_error_test.erl
@@ -19,12 +19,11 @@
-define(PASS, "pass").
-define(AUTH, {basic_auth, {?USER, ?PASS}}).
-define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(CONTENT_MULTI_FORM, {"Content-Type",
- "multipart/form-data;boundary=\"bound\""}).
+-define(CONTENT_MULTI_FORM, {"Content-Type", "multipart/form-data;boundary=\"bound\""}).
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
TmpDb = ?tempdb(),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
@@ -36,16 +35,18 @@ setup() ->
teardown(Url) ->
delete_db(Url),
(catch meck:unload(fabric)),
- ok = config:delete("admins", ?USER, _Persist=false).
+ ok = config:delete("admins", ?USER, _Persist = false).
create_db(Url) ->
{ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
?assert(Status =:= 201 orelse Status =:= 202).
-
create_doc(Url, Id) ->
- test_request:put(Url ++ "/" ++ Id,
- [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}").
+ test_request:put(
+ Url ++ "/" ++ Id,
+ [?CONTENT_JSON, ?AUTH],
+ "{\"mr\": \"rockoartischocko\"}"
+ ).
delete_db(Url) ->
{ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
@@ -59,7 +60,8 @@ open_revs_error_test_() ->
fun chttpd_test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_return_503_error_for_open_revs_get/1,
fun should_return_503_error_for_open_revs_post_form/1
@@ -73,31 +75,41 @@ should_return_503_error_for_open_revs_get(Url) ->
{Json} = ?JSON_DECODE(Body),
Ref = couch_util:get_value(<<"rev">>, Json, undefined),
mock_open_revs({error, all_workers_died}),
- {ok, Code, _, _} = test_request:get(Url ++
- "/testdoc?rev=" ++ ?b2l(Ref), [?AUTH]),
+ {ok, Code, _, _} = test_request:get(
+ Url ++
+ "/testdoc?rev=" ++ ?b2l(Ref),
+ [?AUTH]
+ ),
?_assertEqual(503, Code).
should_return_503_error_for_open_revs_post_form(Url) ->
Port = mochiweb_socket_server:get(chttpd, port),
- Host = lists:concat([ "http://127.0.0.1:", Port]),
+ Host = lists:concat(["http://127.0.0.1:", Port]),
Referer = {"Referer", Host},
Body1 = "{\"body\":\"This is a body.\"}",
DocBeg = "--bound\r\nContent-Disposition: form-data; name=\"_doc\"\r\n\r\n",
DocRev = "--bound\r\nContent-Disposition: form-data; name=\"_rev\"\r\n\r\n",
- DocRest = "\r\n--bound\r\nContent-Disposition:"
+ DocRest =
+ "\r\n--bound\r\nContent-Disposition:"
"form-data; name=\"_attachments\"; filename=\"file.txt\"\r\n"
"Content-Type: text/plain\r\n\r\ncontents of file.txt\r\n\r\n"
"--bound--",
Doc1 = lists:concat([DocBeg, Body1, DocRest]),
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/" ++ "RevDoc",
- [?CONTENT_MULTI_FORM, ?AUTH, Referer], Doc1),
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/" ++ "RevDoc",
+ [?CONTENT_MULTI_FORM, ?AUTH, Referer],
+ Doc1
+ ),
{Json} = ?JSON_DECODE(ResultBody),
Ref = couch_util:get_value(<<"rev">>, Json, undefined),
- Doc2 = lists:concat([DocRev, ?b2l(Ref) , DocRest]),
+ Doc2 = lists:concat([DocRev, ?b2l(Ref), DocRest]),
mock_open_revs({error, all_workers_died}),
- {ok, Code, _, ResultBody1} = test_request:post(Url ++ "/" ++ "RevDoc",
- [?CONTENT_MULTI_FORM, ?AUTH, Referer], Doc2),
+ {ok, Code, _, ResultBody1} = test_request:post(
+ Url ++ "/" ++ "RevDoc",
+ [?CONTENT_MULTI_FORM, ?AUTH, Referer],
+ Doc2
+ ),
{Json1} = ?JSON_DECODE(ResultBody1),
ErrorMessage = couch_util:get_value(<<"error">>, Json1),
[
diff --git a/src/chttpd/test/eunit/chttpd_plugin_tests.erl b/src/chttpd/test/eunit/chttpd_plugin_tests.erl
index 36572a419..effef589a 100644
--- a/src/chttpd/test/eunit/chttpd_plugin_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_plugin_tests.erl
@@ -20,7 +20,8 @@
before_serve_file/5
]).
--export([ %% couch_epi_plugin behaviour
+%% couch_epi_plugin behaviour
+-export([
app/0,
providers/0,
services/0,
@@ -43,7 +44,6 @@ data_subscriptions() -> [].
processes() -> [].
notify(_, _, _) -> ok.
-
setup() ->
couch_tests:setup([
couch_epi_dispatch:dispatch(chttpd, ?MODULE)
@@ -57,7 +57,7 @@ before_request({false, Id}) -> [{false, Id}];
before_request({fail, Id}) -> throw({before_request, Id}).
after_request({true, Id}, A) -> [{true, [{after_request, Id}]}, A];
-after_request({false, Id}, A) -> [{false, Id}, A];
+after_request({false, Id}, A) -> [{false, Id}, A];
after_request({fail, Id}, _A) -> throw({after_request, Id}).
handle_error({true, Id}) -> [{true, [{handle_error, Id}]}];
@@ -82,7 +82,9 @@ callback_test_() ->
{
"callback tests",
{
- setup, fun setup/0, fun teardown/1,
+ setup,
+ fun setup/0,
+ fun teardown/1,
[
fun before_request_match/0,
fun before_request_no_match/0,
@@ -107,81 +109,92 @@ callback_test_() ->
}
}.
-
before_request_match() ->
?assertEqual(
{ok, {true, [{before_request, foo}]}},
- chttpd_plugin:before_request({true, foo})).
+ chttpd_plugin:before_request({true, foo})
+ ).
before_request_no_match() ->
?assertEqual(
{ok, {false, foo}},
- chttpd_plugin:before_request({false, foo})).
+ chttpd_plugin:before_request({false, foo})
+ ).
before_request_throw() ->
?assertThrow(
{before_request, foo},
- chttpd_plugin:before_request({fail, foo})).
-
+ chttpd_plugin:before_request({fail, foo})
+ ).
after_request_match() ->
?assertEqual(
{ok, bar},
- chttpd_plugin:after_request({true, foo}, bar)).
+ chttpd_plugin:after_request({true, foo}, bar)
+ ).
after_request_no_match() ->
?assertEqual(
{ok, bar},
- chttpd_plugin:after_request({false, foo}, bar)).
+ chttpd_plugin:after_request({false, foo}, bar)
+ ).
after_request_throw() ->
?assertThrow(
{after_request, foo},
- chttpd_plugin:after_request({fail, foo}, bar)).
-
+ chttpd_plugin:after_request({fail, foo}, bar)
+ ).
handle_error_match() ->
?assertEqual(
{true, [{handle_error, foo}]},
- chttpd_plugin:handle_error({true, foo})).
+ chttpd_plugin:handle_error({true, foo})
+ ).
handle_error_no_match() ->
?assertEqual(
{false, foo},
- chttpd_plugin:handle_error({false, foo})).
+ chttpd_plugin:handle_error({false, foo})
+ ).
handle_error_throw() ->
?assertThrow(
{handle_error, foo},
- chttpd_plugin:handle_error({fail, foo})).
+ chttpd_plugin:handle_error({fail, foo})
+ ).
before_response_match() ->
?assertEqual(
{ok, {{true, [{before_response, foo}]}, 1, 2, 3}},
- chttpd_plugin:before_response({true, foo}, 1, 2, 3)).
+ chttpd_plugin:before_response({true, foo}, 1, 2, 3)
+ ).
before_response_no_match() ->
?assertEqual(
{ok, {{false, foo}, 1, 2, 3}},
- chttpd_plugin:before_response({false, foo}, 1, 2, 3)).
+ chttpd_plugin:before_response({false, foo}, 1, 2, 3)
+ ).
before_response_throw() ->
?assertThrow(
{before_response, foo},
- chttpd_plugin:before_response({fail, foo}, 1, 2, 3)).
-
+ chttpd_plugin:before_response({fail, foo}, 1, 2, 3)
+ ).
before_serve_file_match() ->
?assertEqual(
{ok, {{true, [{before_serve_file, foo}]}, 1, 2, 3, 4}},
- chttpd_plugin:before_serve_file({true, foo}, 1, 2, 3, 4)).
+ chttpd_plugin:before_serve_file({true, foo}, 1, 2, 3, 4)
+ ).
before_serve_file_no_match() ->
?assertEqual(
{ok, {{false, foo}, 1, 2, 3, 4}},
- chttpd_plugin:before_serve_file({false, foo}, 1, 2, 3, 4)).
+ chttpd_plugin:before_serve_file({false, foo}, 1, 2, 3, 4)
+ ).
before_serve_file_throw() ->
?assertThrow(
before_serve_file,
- chttpd_plugin:before_serve_file({fail, foo}, 1, 2, 3, 4)).
+ chttpd_plugin:before_serve_file({fail, foo}, 1, 2, 3, 4)
+ ).
diff --git a/src/chttpd/test/eunit/chttpd_prefer_header_test.erl b/src/chttpd/test/eunit/chttpd_prefer_header_test.erl
index 1b1195418..55c9f350e 100644
--- a/src/chttpd/test/eunit/chttpd_prefer_header_test.erl
+++ b/src/chttpd/test/eunit/chttpd_prefer_header_test.erl
@@ -17,52 +17,46 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("eunit/include/eunit.hrl").
-
mock_request(ExcludeHeader) ->
Headers = mochiweb_headers:make(ExcludeHeader),
MochiReq = mochiweb_request:new(nil, 'GET', "/", {1, 1}, Headers),
MochiReq:cleanup(),
#httpd{mochi_req = MochiReq}.
-
default_headers() ->
[
- {"Cache-Control","must-revalidate"},
- {"Content-Type","application/json"},
+ {"Cache-Control", "must-revalidate"},
+ {"Content-Type", "application/json"},
{"Content-Length", "100"},
- {"ETag","\"12343\""},
- {"X-Couch-Request-ID","7bd1adab86"},
- {"X-CouchDB-Body-Time","0"},
+ {"ETag", "\"12343\""},
+ {"X-Couch-Request-ID", "7bd1adab86"},
+ {"X-CouchDB-Body-Time", "0"},
{"Vary", "Accept-Encoding"},
- {"Server","CouchDB/2.1.0-f1a1d7f1c (Erlang OTP/19)"}
+ {"Server", "CouchDB/2.1.0-f1a1d7f1c (Erlang OTP/19)"}
].
-
minimal_options_headers() ->
[
- {"Cache-Control","must-revalidate"},
- {"Content-Type","application/json"},
+ {"Cache-Control", "must-revalidate"},
+ {"Content-Type", "application/json"},
{"Content-Length", "100"},
- {"ETag","\"12343\""},
+ {"ETag", "\"12343\""},
{"Vary", "Accept-Encoding"},
- {"Server","CouchDB/2.1.0-f1a1d7f1c (Erlang OTP/19)"}
+ {"Server", "CouchDB/2.1.0-f1a1d7f1c (Erlang OTP/19)"}
].
-
default_no_exclude_header_test() ->
Headers = chttpd_prefer_header:maybe_return_minimal(
mock_request([]),
default_headers()
- ),
+ ),
?assertEqual(default_headers(), Headers).
-
unsupported_exclude_header_test() ->
Req = mock_request([{"prefer", "Wrong"}]),
Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()),
?assertEqual(default_headers(), Headers).
-
empty_header_test() ->
Req = mock_request([{"prefer", ""}]),
Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()),
@@ -70,24 +64,20 @@ empty_header_test() ->
setup_all() ->
ok = meck:new(config),
- ok = meck:expect(config, get, fun("chttpd", "prefer_minimal", _) ->
+ ok = meck:expect(config, get, fun("chttpd", "prefer_minimal", _) ->
"Cache-Control, Content-Length, Content-Type, ETag, Server, Vary"
end),
ok.
-
teardown_all(_) ->
meck:unload().
-
setup() ->
meck:reset([config]).
-
teardown(_) ->
ok.
-
exclude_headers_test_() ->
{
"Test Prefer headers",
@@ -108,19 +98,16 @@ exclude_headers_test_() ->
}
}.
-
minimal_options(_) ->
Req = mock_request([{"Prefer", "return=minimal"}]),
Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()),
?_assertEqual(minimal_options_headers(), Headers).
-
minimal_options_check_header_case(_) ->
Req = mock_request([{"prefer", "return=minimal"}]),
Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()),
?_assertEqual(minimal_options_headers(), Headers).
-
minimal_options_check_header_value_case(_) ->
Req = mock_request([{"prefer", "RETURN=MINIMAL"}]),
Headers = chttpd_prefer_header:maybe_return_minimal(Req, default_headers()),
diff --git a/src/chttpd/test/eunit/chttpd_purge_tests.erl b/src/chttpd/test/eunit/chttpd_purge_tests.erl
index ab435682a..a8e1a955d 100644
--- a/src/chttpd/test/eunit/chttpd_purge_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_purge_tests.erl
@@ -12,20 +12,17 @@
-module(chttpd_purge_tests).
-
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-define(USER, "chttpd_db_test_admin").
-define(PASS, "pass").
-define(AUTH, {basic_auth, {?USER, ?PASS}}).
-define(CONTENT_JSON, {"Content-Type", "application/json"}).
-
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
TmpDb = ?tempdb(),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
@@ -33,35 +30,38 @@ setup() ->
create_db(Url),
Url.
-
teardown(Url) ->
delete_db(Url),
- ok = config:delete("admins", ?USER, _Persist=false).
-
+ ok = config:delete("admins", ?USER, _Persist = false).
create_db(Url) ->
{ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
?assert(Status =:= 201 orelse Status =:= 202).
-
create_doc(Url, Id) ->
- test_request:put(Url ++ "/" ++ Id,
- [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}").
+ test_request:put(
+ Url ++ "/" ++ Id,
+ [?CONTENT_JSON, ?AUTH],
+ "{\"mr\": \"rockoartischocko\"}"
+ ).
create_doc(Url, Id, Content) ->
- test_request:put(Url ++ "/" ++ Id,
- [?CONTENT_JSON, ?AUTH], "{\"mr\": \"" ++ Content ++ "\"}").
-
+ test_request:put(
+ Url ++ "/" ++ Id,
+ [?CONTENT_JSON, ?AUTH],
+ "{\"mr\": \"" ++ Content ++ "\"}"
+ ).
create_docs(Url, Docs) ->
- test_request:post(Url ++ "/_bulk_docs",
- [?CONTENT_JSON, ?AUTH], ?JSON_ENCODE({[{docs, Docs}]})).
-
+ test_request:post(
+ Url ++ "/_bulk_docs",
+ [?CONTENT_JSON, ?AUTH],
+ ?JSON_ENCODE({[{docs, Docs}]})
+ ).
delete_db(Url) ->
{ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
purge_test_() ->
{
"chttpd db tests",
@@ -89,24 +89,25 @@ purge_test_() ->
}
}.
-
test_empty_purge_request(Url) ->
?_test(begin
IdsRevs = "{}",
- {ok, Status, _, ResultBody} = test_request:post(Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH], IdsRevs),
+ {ok, Status, _, ResultBody} = test_request:post(
+ Url ++ "/_purge/",
+ [?CONTENT_JSON, ?AUTH],
+ IdsRevs
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
?assert(Status =:= 201 orelse Status =:= 202),
?assertEqual(
- {[
- {<<"purge_seq">>, null},
- {<<"purged">>,{[]}}
- ]},
- ResultJson
- )
+ {[
+ {<<"purge_seq">>, null},
+ {<<"purged">>, {[]}}
+ ]},
+ ResultJson
+ )
end).
-
test_ok_purge_request(Url) ->
?_test(begin
{ok, _, _, Body} = create_doc(Url, "doc1"),
@@ -119,230 +120,300 @@ test_ok_purge_request(Url) ->
{Json3} = ?JSON_DECODE(Body3),
Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined),
- IdsRevsEJson = {[
- {<<"doc1">>, [Rev1]},
- {<<"doc2">>, [Rev2]},
- {<<"doc3">>, [Rev3]}
- ]},
+ IdsRevsEJson =
+ {[
+ {<<"doc1">>, [Rev1]},
+ {<<"doc2">>, [Rev2]},
+ {<<"doc3">>, [Rev3]}
+ ]},
IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
- {ok, Status, _, ResultBody} = test_request:post(Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH], IdsRevs),
+ {ok, Status, _, ResultBody} = test_request:post(
+ Url ++ "/_purge/",
+ [?CONTENT_JSON, ?AUTH],
+ IdsRevs
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
?assert(Status =:= 201 orelse Status =:= 202),
?assertEqual(
- {[
- {<<"purge_seq">>, null},
- {<<"purged">>, {[
+ {[
+ {<<"purge_seq">>, null},
+ {<<"purged">>,
+ {[
{<<"doc1">>, [Rev1]},
{<<"doc2">>, [Rev2]},
{<<"doc3">>, [Rev3]}
]}}
- ]},
- ResultJson
- )
+ ]},
+ ResultJson
+ )
end).
-
test_ok_purge_request_with_101_docid(Url) ->
?_test(begin
PurgedDocsNum = 101,
- Docs = lists:foldl(fun(I, Acc) ->
- Id = list_to_binary(integer_to_list(I)),
- Doc = {[{<<"_id">>, Id}, {value, I}]},
- [Doc | Acc]
- end, [], lists:seq(1, PurgedDocsNum)),
+ Docs = lists:foldl(
+ fun(I, Acc) ->
+ Id = list_to_binary(integer_to_list(I)),
+ Doc = {[{<<"_id">>, Id}, {value, I}]},
+ [Doc | Acc]
+ end,
+ [],
+ lists:seq(1, PurgedDocsNum)
+ ),
{ok, _, _, Body} = create_docs(Url, Docs),
BodyJson = ?JSON_DECODE(Body),
- PurgeBody = lists:map(fun({DocResp}) ->
- Id = couch_util:get_value(<<"id">>, DocResp, undefined),
- Rev = couch_util:get_value(<<"rev">>, DocResp, undefined),
- {Id, [Rev]}
- end, BodyJson),
+ PurgeBody = lists:map(
+ fun({DocResp}) ->
+ Id = couch_util:get_value(<<"id">>, DocResp, undefined),
+ Rev = couch_util:get_value(<<"rev">>, DocResp, undefined),
+ {Id, [Rev]}
+ end,
+ BodyJson
+ ),
ok = config:set("purge", "max_document_id_number", "101"),
try
- {ok, Status, _, _} = test_request:post(Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH], ?JSON_ENCODE({PurgeBody})),
+ {ok, Status, _, _} = test_request:post(
+ Url ++ "/_purge/",
+ [?CONTENT_JSON, ?AUTH],
+ ?JSON_ENCODE({PurgeBody})
+ ),
?assert(Status =:= 201 orelse Status =:= 202)
after
ok = config:delete("purge", "max_document_id_number")
end
end).
-
test_accepted_purge_request(Url) ->
?_test(begin
{ok, _, _, Body} = create_doc(Url, "doc1"),
{Json} = ?JSON_DECODE(Body),
Rev1 = couch_util:get_value(<<"rev">>, Json, undefined),
- IdsRevsEJson = {[
- {<<"doc1">>, [Rev1]}
- ]},
+ IdsRevsEJson =
+ {[
+ {<<"doc1">>, [Rev1]}
+ ]},
IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
meck:new(fabric, [passthrough]),
- meck:expect(fabric, purge_docs,
- fun(_, _, _) -> {accepted,[{accepted,[{1,
- <<57,27,64,134,152,18,73,243,40,1,141,214,135,104,79,188>>}]}]}
+ meck:expect(
+ fabric,
+ purge_docs,
+ fun(_, _, _) ->
+ {accepted, [
+ {accepted, [
+ {1,
+ <<57, 27, 64, 134, 152, 18, 73, 243, 40, 1, 141, 214, 135, 104, 79,
+ 188>>}
+ ]}
+ ]}
end
),
- {ok, Status, _, ResultBody} = test_request:post(Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH], IdsRevs),
+ {ok, Status, _, ResultBody} = test_request:post(
+ Url ++ "/_purge/",
+ [?CONTENT_JSON, ?AUTH],
+ IdsRevs
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
meck:unload(fabric),
?assert(Status =:= 202),
?assertEqual(
{[
{<<"purge_seq">>, null},
- {<<"purged">>, {[
- {<<"doc1">>, [Rev1]}
- ]}}
+ {<<"purged">>,
+ {[
+ {<<"doc1">>, [Rev1]}
+ ]}}
]},
ResultJson
)
end).
-
test_partial_purge_request(Url) ->
?_test(begin
{ok, _, _, Body} = create_doc(Url, "doc1"),
{Json} = ?JSON_DECODE(Body),
Rev1 = couch_util:get_value(<<"rev">>, Json, undefined),
- NewDoc = "{\"new_edits\": false, \"docs\": [{\"_id\": \"doc1\",
- \"_revisions\": {\"start\": 1, \"ids\": [\"12345\", \"67890\"]},
- \"content\": \"updated\", \"_rev\": \"" ++ ?b2l(Rev1) ++ "\"}]}",
- {ok, _, _, _} = test_request:post(Url ++ "/_bulk_docs/",
- [?CONTENT_JSON, ?AUTH], NewDoc),
+ NewDoc =
+ "{\"new_edits\": false, \"docs\": [{\"_id\": \"doc1\",\n"
+ " \"_revisions\": {\"start\": 1, \"ids\": [\"12345\", \"67890\"]},\n"
+ " \"content\": \"updated\", \"_rev\": \"" ++ ?b2l(Rev1) ++ "\"}]}",
+ {ok, _, _, _} = test_request:post(
+ Url ++ "/_bulk_docs/",
+ [?CONTENT_JSON, ?AUTH],
+ NewDoc
+ ),
IdsRevsEJson = {[{<<"doc1">>, [Rev1]}]},
IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
- {ok, Status, _, ResultBody} = test_request:post(Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH], IdsRevs),
+ {ok, Status, _, ResultBody} = test_request:post(
+ Url ++ "/_purge/",
+ [?CONTENT_JSON, ?AUTH],
+ IdsRevs
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
?assert(Status =:= 201 orelse Status =:= 202),
?assertEqual(
{[
{<<"purge_seq">>, null},
- {<<"purged">>, {[
- {<<"doc1">>, [Rev1]}
- ]}}
+ {<<"purged">>,
+ {[
+ {<<"doc1">>, [Rev1]}
+ ]}}
]},
ResultJson
),
- {ok, Status2, _, ResultBody2} = test_request:get(Url
- ++ "/doc1/", [?AUTH]),
+ {ok, Status2, _, ResultBody2} = test_request:get(
+ Url ++
+ "/doc1/",
+ [?AUTH]
+ ),
{Json2} = ?JSON_DECODE(ResultBody2),
Content = couch_util:get_value(<<"content">>, Json2, undefined),
?assertEqual(<<"updated">>, Content),
?assert(Status2 =:= 200)
end).
-
test_mixed_purge_request(Url) ->
?_test(begin
{ok, _, _, Body} = create_doc(Url, "doc1"),
{Json} = ?JSON_DECODE(Body),
Rev1 = couch_util:get_value(<<"rev">>, Json, undefined),
- NewDoc = "{\"new_edits\": false, \"docs\": [{\"_id\": \"doc1\",
- \"_revisions\": {\"start\": 1, \"ids\": [\"12345\", \"67890\"]},
- \"content\": \"updated\", \"_rev\": \"" ++ ?b2l(Rev1) ++ "\"}]}",
- {ok, _, _, _} = test_request:post(Url ++ "/_bulk_docs/",
- [?CONTENT_JSON, ?AUTH], NewDoc),
+ NewDoc =
+ "{\"new_edits\": false, \"docs\": [{\"_id\": \"doc1\",\n"
+ " \"_revisions\": {\"start\": 1, \"ids\": [\"12345\", \"67890\"]},\n"
+ " \"content\": \"updated\", \"_rev\": \"" ++ ?b2l(Rev1) ++ "\"}]}",
+ {ok, _, _, _} = test_request:post(
+ Url ++ "/_bulk_docs/",
+ [?CONTENT_JSON, ?AUTH],
+ NewDoc
+ ),
{ok, _, _, _Body2} = create_doc(Url, "doc2", "content2"),
{ok, _, _, Body3} = create_doc(Url, "doc3", "content3"),
{Json3} = ?JSON_DECODE(Body3),
Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined),
-
- IdsRevsEJson = {[
- {<<"doc1">>, [Rev1]}, % partial purge
- {<<"doc2">>, [Rev3, Rev1]}, % correct format, but invalid rev
- {<<"doc3">>, [Rev3]} % correct format and rev
- ]},
+ IdsRevsEJson =
+ {[
+ % partial purge
+ {<<"doc1">>, [Rev1]},
+ % correct format, but invalid rev
+ {<<"doc2">>, [Rev3, Rev1]},
+ % correct format and rev
+ {<<"doc3">>, [Rev3]}
+ ]},
IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
- {ok, Status, _, Body4} = test_request:post(Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH], IdsRevs),
+ {ok, Status, _, Body4} = test_request:post(
+ Url ++ "/_purge/",
+ [?CONTENT_JSON, ?AUTH],
+ IdsRevs
+ ),
ResultJson = ?JSON_DECODE(Body4),
?assert(Status =:= 201 orelse Status =:= 202),
?assertEqual(
{[
{<<"purge_seq">>, null},
- {<<"purged">>, {[
- {<<"doc1">>, [Rev1]},
- {<<"doc2">>, []},
- {<<"doc3">>, [Rev3]}
- ]}}
+ {<<"purged">>,
+ {[
+ {<<"doc1">>, [Rev1]},
+ {<<"doc2">>, []},
+ {<<"doc3">>, [Rev3]}
+ ]}}
]},
ResultJson
),
- {ok, Status2, _, Body5} = test_request:get(Url
- ++ "/doc1/", [?AUTH]),
+ {ok, Status2, _, Body5} = test_request:get(
+ Url ++
+ "/doc1/",
+ [?AUTH]
+ ),
{Json5} = ?JSON_DECODE(Body5),
Content = couch_util:get_value(<<"content">>, Json5, undefined),
?assertEqual(<<"updated">>, Content),
?assert(Status2 =:= 200)
end).
-
test_overmany_ids_or_revs_purge_request(Url) ->
?_test(begin
{ok, _, _, Body} = create_doc(Url, "doc1"),
{Json} = ?JSON_DECODE(Body),
Rev1 = couch_util:get_value(<<"rev">>, Json, undefined),
- NewDoc = "{\"new_edits\": false, \"docs\": [{\"_id\": \"doc1\",
- \"_revisions\": {\"start\": 1, \"ids\": [\"12345\", \"67890\"]},
- \"content\": \"updated\", \"_rev\": \"" ++ ?b2l(Rev1) ++ "\"}]}",
- {ok, _, _, _} = test_request:post(Url ++ "/_bulk_docs/",
- [?CONTENT_JSON, ?AUTH], NewDoc),
+ NewDoc =
+ "{\"new_edits\": false, \"docs\": [{\"_id\": \"doc1\",\n"
+ " \"_revisions\": {\"start\": 1, \"ids\": [\"12345\", \"67890\"]},\n"
+ " \"content\": \"updated\", \"_rev\": \"" ++ ?b2l(Rev1) ++ "\"}]}",
+ {ok, _, _, _} = test_request:post(
+ Url ++ "/_bulk_docs/",
+ [?CONTENT_JSON, ?AUTH],
+ NewDoc
+ ),
{ok, _, _, _Body2} = create_doc(Url, "doc2", "content2"),
{ok, _, _, Body3} = create_doc(Url, "doc3", "content3"),
{Json3} = ?JSON_DECODE(Body3),
Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined),
- IdsRevsEJson = {[
- {<<"doc1">>, [Rev1]}, % partial purge
- {<<"doc2">>, [Rev3, Rev1]}, % correct format, but invalid rev
- {<<"doc3">>, [Rev3]} % correct format and rev
- ]},
+ IdsRevsEJson =
+ {[
+ % partial purge
+ {<<"doc1">>, [Rev1]},
+ % correct format, but invalid rev
+ {<<"doc2">>, [Rev3, Rev1]},
+ % correct format and rev
+ {<<"doc3">>, [Rev3]}
+ ]},
IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
% Ids larger than expected
config:set("purge", "max_document_id_number", "1"),
- {ok, Status, _, Body4} = test_request:post(Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH], IdsRevs),
+ {ok, Status, _, Body4} = test_request:post(
+ Url ++ "/_purge/",
+ [?CONTENT_JSON, ?AUTH],
+ IdsRevs
+ ),
config:delete("purge", "max_document_id_number"),
ResultJson = ?JSON_DECODE(Body4),
?assertEqual(400, Status),
- ?assertMatch({[
- {<<"error">>,<<"bad_request">>},
- {<<"reason">>,<<"Exceeded maximum number of documents.">>}]},
- ResultJson),
+ ?assertMatch(
+ {[
+ {<<"error">>, <<"bad_request">>},
+ {<<"reason">>, <<"Exceeded maximum number of documents.">>}
+ ]},
+ ResultJson
+ ),
% Revs larger than expected
config:set("purge", "max_revisions_number", "1"),
- {ok, Status2, _, Body5} = test_request:post(Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH], IdsRevs),
+ {ok, Status2, _, Body5} = test_request:post(
+ Url ++ "/_purge/",
+ [?CONTENT_JSON, ?AUTH],
+ IdsRevs
+ ),
config:delete("purge", "max_revisions_number"),
ResultJson2 = ?JSON_DECODE(Body5),
?assertEqual(400, Status2),
- ?assertMatch({[
- {<<"error">>,<<"bad_request">>},
- {<<"reason">>,<<"Exceeded maximum number of revisions.">>}]},
- ResultJson2)
+ ?assertMatch(
+ {[
+ {<<"error">>, <<"bad_request">>},
+ {<<"reason">>, <<"Exceeded maximum number of revisions.">>}
+ ]},
+ ResultJson2
+ )
end).
-
test_exceed_limits_on_purge_infos(Url) ->
?_test(begin
- {ok, Status1, _, _} = test_request:put(Url ++ "/_purged_infos_limit/",
- [?CONTENT_JSON, ?AUTH], "2"),
+ {ok, Status1, _, _} = test_request:put(
+ Url ++ "/_purged_infos_limit/",
+ [?CONTENT_JSON, ?AUTH],
+ "2"
+ ),
?assert(Status1 =:= 200),
{ok, _, _, Body} = create_doc(Url, "doc1"),
@@ -355,53 +426,66 @@ test_exceed_limits_on_purge_infos(Url) ->
{Json3} = ?JSON_DECODE(Body3),
Rev3 = couch_util:get_value(<<"rev">>, Json3, undefined),
- IdsRevsEJson = {[
- {<<"doc1">>, [Rev1]},
- {<<"doc2">>, [Rev2]},
- {<<"doc3">>, [Rev3]}
- ]},
+ IdsRevsEJson =
+ {[
+ {<<"doc1">>, [Rev1]},
+ {<<"doc2">>, [Rev2]},
+ {<<"doc3">>, [Rev3]}
+ ]},
IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)),
- {ok, Status2, _, ResultBody} = test_request:post(Url ++ "/_purge/",
- [?CONTENT_JSON, ?AUTH], IdsRevs),
+ {ok, Status2, _, ResultBody} = test_request:post(
+ Url ++ "/_purge/",
+ [?CONTENT_JSON, ?AUTH],
+ IdsRevs
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
?assert(Status2 =:= 201 orelse Status2 =:= 202),
?assertEqual(
{[
{<<"purge_seq">>, null},
- {<<"purged">>, {[
- {<<"doc1">>, [Rev1]},
- {<<"doc2">>, [Rev2]},
- {<<"doc3">>, [Rev3]}
- ]}}
+ {<<"purged">>,
+ {[
+ {<<"doc1">>, [Rev1]},
+ {<<"doc2">>, [Rev2]},
+ {<<"doc3">>, [Rev3]}
+ ]}}
]},
ResultJson
)
-
end).
-
should_error_set_purged_docs_limit_to0(Url) ->
?_test(begin
- {ok, Status, _, _} = test_request:put(Url ++ "/_purged_infos_limit/",
- [?CONTENT_JSON, ?AUTH], "0"),
+ {ok, Status, _, _} = test_request:put(
+ Url ++ "/_purged_infos_limit/",
+ [?CONTENT_JSON, ?AUTH],
+ "0"
+ ),
?assert(Status =:= 400)
end).
-
test_timeout_set_purged_infos_limit(Url) ->
?_test(begin
meck:new(fabric, [passthrough]),
meck:expect(fabric, set_purge_infos_limit, fun(_, _, _) ->
- {error, timeout} end),
- {ok, Status, _, ResultBody} = test_request:put(Url
- ++ "/_purged_infos_limit/", [?CONTENT_JSON, ?AUTH], "2"),
+ {error, timeout}
+ end),
+ {ok, Status, _, ResultBody} = test_request:put(
+ Url ++
+ "/_purged_infos_limit/",
+ [?CONTENT_JSON, ?AUTH],
+ "2"
+ ),
meck:unload(fabric),
ResultJson = ?JSON_DECODE(ResultBody),
?assert(Status =:= 500),
- ?assertMatch({[
- {<<"error">>,<<"error">>},
- {<<"reason">>,<<"timeout">>}]},
- ResultJson)
+ ?assertMatch(
+ {[
+ {<<"error">>, <<"error">>},
+ {<<"reason">>, <<"timeout">>}
+ ]},
+ ResultJson
+ )
end).
diff --git a/src/chttpd/test/eunit/chttpd_security_tests.erl b/src/chttpd/test/eunit/chttpd_security_tests.erl
index 0bea9dbcd..d8a39ffc8 100644
--- a/src/chttpd/test/eunit/chttpd_security_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_security_tests.erl
@@ -27,8 +27,6 @@
-define(TEST_ADMIN_PASS, "test_admin_pass").
-define(TEST_ADMIN_AUTH, {basic_auth, {?TEST_ADMIN, ?TEST_ADMIN_PASS}}).
-
-
-define(CONTENT_JSON, {"Content-Type", "application/json"}).
-define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
@@ -48,30 +46,33 @@ setup() ->
create_db(UsersUrl),
create_db(Url),
create_design_doc(Url),
- create_user(UsersUrl,?TEST_MEMBER,?TEST_MEMBER_PASS,[<<?TEST_MEMBER>>]),
- create_user(UsersUrl,?TEST_ADMIN,?TEST_ADMIN_PASS,[<<?TEST_ADMIN>>]),
+ create_user(UsersUrl, ?TEST_MEMBER, ?TEST_MEMBER_PASS, [<<?TEST_MEMBER>>]),
+ create_user(UsersUrl, ?TEST_ADMIN, ?TEST_ADMIN_PASS, [<<?TEST_ADMIN>>]),
set_security(Url),
[Url, UsersUrl].
-teardown([Url,UsersUrl]) ->
+teardown([Url, UsersUrl]) ->
delete_db(Url),
delete_db(UsersUrl),
- ok = config:delete("admins", ?USER, _Persist=false).
+ ok = config:delete("admins", ?USER, _Persist = false).
create_db(Url) ->
{ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
?assert(Status =:= 201 orelse Status =:= 202).
create_design_doc(Url) ->
- {ok, Status, _, _} = test_request:put(lists:concat([Url, '/_design/test']), [?CONTENT_JSON, ?AUTH],
- "{\"id\":\"_design/test\"}"),
+ {ok, Status, _, _} = test_request:put(
+ lists:concat([Url, '/_design/test']),
+ [?CONTENT_JSON, ?AUTH],
+ "{\"id\":\"_design/test\"}"
+ ),
?assert(Status =:= 201 orelse Status =:= 202).
set_security(Url) ->
SecurityUrl = lists:concat([Url, "/_security"]),
SecurityProperties = [
- {<<"admins">>,{[{<<"roles">>,[<<?TEST_ADMIN>>]}]}},
- {<<"members">>,{[{<<"roles">>,[<<?TEST_MEMBER>>]}]}}
+ {<<"admins">>, {[{<<"roles">>, [<<?TEST_ADMIN>>]}]}},
+ {<<"members">>, {[{<<"roles">>, [<<?TEST_MEMBER>>]}]}}
],
Body = jiffy:encode({SecurityProperties}),
@@ -82,24 +83,27 @@ delete_db(Url) ->
{ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
create_user(UsersUrl, Name, Password, Roles) ->
-
- Body = "{\"name\":\"" ++ Name ++
- "\",\"type\":\"user\",\"roles\":" ++ erlang:binary_to_list(jiffy:encode(Roles)) ++ ",\"password\":\"" ++ Password ++"\"}",
+ Body =
+ "{\"name\":\"" ++ Name ++
+ "\",\"type\":\"user\",\"roles\":" ++ erlang:binary_to_list(jiffy:encode(Roles)) ++
+ ",\"password\":\"" ++ Password ++ "\"}",
Url = lists:concat([
- UsersUrl, "/org.couchdb.user:", Name]),
+ UsersUrl, "/org.couchdb.user:", Name
+ ]),
{ok, 201, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], Body).
-
all_test_() ->
{
"chttpd security tests",
{
setup,
- fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
+ fun chttpd_test_util:start_couch/0,
+ fun chttpd_test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_allow_admin_db_compaction/1,
fun should_allow_valid_password_to_create_user/1,
@@ -127,10 +131,12 @@ security_object_validate_test_() ->
"chttpd security object validate tests",
{
setup,
- fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
+ fun chttpd_test_util:start_couch/0,
+ fun chttpd_test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_return_ok_for_sec_obj_with_roles/1,
fun should_return_ok_for_sec_obj_with_names/1,
@@ -144,263 +150,372 @@ security_object_validate_test_() ->
}
}.
-should_allow_admin_db_compaction([Url,_UsersUrl]) ->
- ?_assertEqual(true,
+should_allow_admin_db_compaction([Url, _UsersUrl]) ->
+ ?_assertEqual(
+ true,
begin
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact",
- [?CONTENT_JSON, ?AUTH], ""),
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_compact",
+ [?CONTENT_JSON, ?AUTH],
+ ""
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
couch_util:get_value(<<"ok">>, InnerJson, undefined)
- end).
+ end
+ ).
should_allow_valid_password_to_create_user([_Url, UsersUrl]) ->
- UserDoc = "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\",
- \"type\": \"user\", \"roles\": [], \"password\": \"bar\"}",
- {ok, _, _, ResultBody} = test_request:post(UsersUrl,
- [?CONTENT_JSON, ?AUTH], UserDoc),
+ UserDoc =
+ "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\",\n"
+ " \"type\": \"user\", \"roles\": [], \"password\": \"bar\"}",
+ {ok, _, _, ResultBody} = test_request:post(
+ UsersUrl,
+ [?CONTENT_JSON, ?AUTH],
+ UserDoc
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
?_assertEqual(true, couch_util:get_value(<<"ok">>, InnerJson)).
should_disallow_invalid_password_to_create_user([_Url, UsersUrl]) ->
- UserDoc = "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\",
- \"type\": \"user\", \"roles\": [], \"password\": 123}",
- {ok, _, _, ResultBody} = test_request:post(UsersUrl,
- [?CONTENT_JSON, ?AUTH], UserDoc),
+ UserDoc =
+ "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\",\n"
+ " \"type\": \"user\", \"roles\": [], \"password\": 123}",
+ {ok, _, _, ResultBody} = test_request:post(
+ UsersUrl,
+ [?CONTENT_JSON, ?AUTH],
+ UserDoc
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
ErrType = couch_util:get_value(<<"error">>, InnerJson),
?_assertEqual(<<"forbidden">>, ErrType).
-should_disallow_anonymous_db_compaction([Url,_UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact",
- [?CONTENT_JSON], ""),
+should_disallow_anonymous_db_compaction([Url, _UsersUrl]) ->
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_compact",
+ [?CONTENT_JSON],
+ ""
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
ErrType = couch_util:get_value(<<"error">>, InnerJson),
- ?_assertEqual(<<"unauthorized">>,ErrType).
+ ?_assertEqual(<<"unauthorized">>, ErrType).
-should_disallow_db_member_db_compaction([Url,_UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact",
- [?CONTENT_JSON, ?TEST_MEMBER_AUTH], ""),
+should_disallow_db_member_db_compaction([Url, _UsersUrl]) ->
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_compact",
+ [?CONTENT_JSON, ?TEST_MEMBER_AUTH],
+ ""
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
ErrType = couch_util:get_value(<<"error">>, InnerJson),
- ?_assertEqual(<<"unauthorized">>,ErrType).
+ ?_assertEqual(<<"unauthorized">>, ErrType).
-should_allow_db_admin_db_compaction([Url,_UsersUrl]) ->
- ?_assertEqual(true,
+should_allow_db_admin_db_compaction([Url, _UsersUrl]) ->
+ ?_assertEqual(
+ true,
begin
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact",
- [?CONTENT_JSON, ?TEST_ADMIN_AUTH], ""),
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_compact",
+ [?CONTENT_JSON, ?TEST_ADMIN_AUTH],
+ ""
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
couch_util:get_value(<<"ok">>, InnerJson, undefined)
- end).
+ end
+ ).
-should_allow_admin_view_compaction([Url,_UsersUrl]) ->
- ?_assertEqual(true,
+should_allow_admin_view_compaction([Url, _UsersUrl]) ->
+ ?_assertEqual(
+ true,
begin
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact/test",
- [?CONTENT_JSON, ?AUTH], ""),
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_compact/test",
+ [?CONTENT_JSON, ?AUTH],
+ ""
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
couch_util:get_value(<<"ok">>, InnerJson, undefined)
- end).
-
-should_disallow_anonymous_view_compaction([Url,_UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_compact/test",
- [?CONTENT_JSON], ""),
+ end
+ ).
+
+should_disallow_anonymous_view_compaction([Url, _UsersUrl]) ->
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_compact/test",
+ [?CONTENT_JSON],
+ ""
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
ErrType = couch_util:get_value(<<"error">>, InnerJson),
- ?_assertEqual(<<"unauthorized">>,ErrType).
+ ?_assertEqual(<<"unauthorized">>, ErrType).
-should_allow_admin_db_view_cleanup([Url,_UsersUrl]) ->
- ?_assertEqual(true,
+should_allow_admin_db_view_cleanup([Url, _UsersUrl]) ->
+ ?_assertEqual(
+ true,
begin
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_view_cleanup",
- [?CONTENT_JSON, ?AUTH], ""),
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_view_cleanup",
+ [?CONTENT_JSON, ?AUTH],
+ ""
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
couch_util:get_value(<<"ok">>, InnerJson, undefined)
- end).
-
-should_disallow_anonymous_db_view_cleanup([Url,_UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_view_cleanup",
- [?CONTENT_JSON], ""),
+ end
+ ).
+
+should_disallow_anonymous_db_view_cleanup([Url, _UsersUrl]) ->
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_view_cleanup",
+ [?CONTENT_JSON],
+ ""
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
ErrType = couch_util:get_value(<<"error">>, InnerJson),
?_assertEqual(<<"unauthorized">>, ErrType).
-should_allow_admin_purge([Url,_UsersUrl]) ->
- ?_assertEqual(null,
+should_allow_admin_purge([Url, _UsersUrl]) ->
+ ?_assertEqual(
+ null,
begin
IdsRevs = "{}",
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_purge",
- [?CONTENT_JSON, ?AUTH], IdsRevs),
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_purge",
+ [?CONTENT_JSON, ?AUTH],
+ IdsRevs
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
couch_util:get_value(<<"purge_seq">>, InnerJson, undefined)
- end).
-
-should_disallow_anonymous_purge([Url,_UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_purge",
- [?CONTENT_JSON], ""),
+ end
+ ).
+
+should_disallow_anonymous_purge([Url, _UsersUrl]) ->
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_purge",
+ [?CONTENT_JSON],
+ ""
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
ErrType = couch_util:get_value(<<"error">>, InnerJson),
?_assertEqual(<<"unauthorized">>, ErrType).
-should_disallow_db_member_purge([Url,_UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:post(Url ++ "/_purge",
- [?CONTENT_JSON, ?TEST_MEMBER_AUTH], ""),
+should_disallow_db_member_purge([Url, _UsersUrl]) ->
+ {ok, _, _, ResultBody} = test_request:post(
+ Url ++ "/_purge",
+ [?CONTENT_JSON, ?TEST_MEMBER_AUTH],
+ ""
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
ErrType = couch_util:get_value(<<"error">>, InnerJson),
- ?_assertEqual(<<"unauthorized">>,ErrType).
+ ?_assertEqual(<<"unauthorized">>, ErrType).
-should_allow_admin_purged_infos_limit([Url,_UsersUrl]) ->
- ?_assertEqual(true,
+should_allow_admin_purged_infos_limit([Url, _UsersUrl]) ->
+ ?_assertEqual(
+ true,
begin
- {ok, _, _, ResultBody} = test_request:put(Url
- ++ "/_purged_infos_limit/", [?CONTENT_JSON, ?AUTH], "2"),
+ {ok, _, _, ResultBody} = test_request:put(
+ Url ++
+ "/_purged_infos_limit/",
+ [?CONTENT_JSON, ?AUTH],
+ "2"
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
couch_util:get_value(<<"ok">>, InnerJson, undefined)
- end).
-
-should_disallow_anonymous_purged_infos_limit([Url,_UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:put(Url ++ "/_purged_infos_limit/",
- [?CONTENT_JSON, ?TEST_MEMBER_AUTH], "2"),
+ end
+ ).
+
+should_disallow_anonymous_purged_infos_limit([Url, _UsersUrl]) ->
+ {ok, _, _, ResultBody} = test_request:put(
+ Url ++ "/_purged_infos_limit/",
+ [?CONTENT_JSON, ?TEST_MEMBER_AUTH],
+ "2"
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
ErrType = couch_util:get_value(<<"error">>, InnerJson),
?_assertEqual(<<"unauthorized">>, ErrType).
-should_disallow_db_member_purged_infos_limit([Url,_UsersUrl]) ->
- {ok, _, _, ResultBody} = test_request:put(Url ++ "/_purged_infos_limit/",
- [?CONTENT_JSON, ?TEST_MEMBER_AUTH], "2"),
+should_disallow_db_member_purged_infos_limit([Url, _UsersUrl]) ->
+ {ok, _, _, ResultBody} = test_request:put(
+ Url ++ "/_purged_infos_limit/",
+ [?CONTENT_JSON, ?TEST_MEMBER_AUTH],
+ "2"
+ ),
ResultJson = ?JSON_DECODE(ResultBody),
{InnerJson} = ResultJson,
ErrType = couch_util:get_value(<<"error">>, InnerJson),
- ?_assertEqual(<<"unauthorized">>,ErrType).
+ ?_assertEqual(<<"unauthorized">>, ErrType).
-should_return_ok_for_sec_obj_with_roles([Url,_UsersUrl]) ->
+should_return_ok_for_sec_obj_with_roles([Url, _UsersUrl]) ->
SecurityUrl = lists:concat([Url, "/_security"]),
SecurityProperties = [
- {<<"admins">>,{[{<<"roles">>,[<<?TEST_ADMIN>>]}]}},
- {<<"members">>,{[{<<"roles">>,[<<?TEST_MEMBER>>]}]}}
+ {<<"admins">>, {[{<<"roles">>, [<<?TEST_ADMIN>>]}]}},
+ {<<"members">>, {[{<<"roles">>, [<<?TEST_MEMBER>>]}]}}
],
Body = jiffy:encode({SecurityProperties}),
- {ok, Status, _, _} = test_request:put(SecurityUrl,
- [?CONTENT_JSON, ?AUTH], Body),
+ {ok, Status, _, _} = test_request:put(
+ SecurityUrl,
+ [?CONTENT_JSON, ?AUTH],
+ Body
+ ),
?_assertEqual(200, Status).
-should_return_ok_for_sec_obj_with_names([Url,_UsersUrl]) ->
+should_return_ok_for_sec_obj_with_names([Url, _UsersUrl]) ->
SecurityUrl = lists:concat([Url, "/_security"]),
SecurityProperties = [
- {<<"admins">>,{[{<<"names">>,[<<?TEST_ADMIN>>]}]}},
- {<<"members">>,{[{<<"names">>,[<<?TEST_MEMBER>>]}]}}
+ {<<"admins">>, {[{<<"names">>, [<<?TEST_ADMIN>>]}]}},
+ {<<"members">>, {[{<<"names">>, [<<?TEST_MEMBER>>]}]}}
],
Body = jiffy:encode({SecurityProperties}),
- {ok, Status, _, _} = test_request:put(SecurityUrl,
- [?CONTENT_JSON, ?AUTH], Body),
+ {ok, Status, _, _} = test_request:put(
+ SecurityUrl,
+ [?CONTENT_JSON, ?AUTH],
+ Body
+ ),
?_assertEqual(200, Status).
-should_return_ok_for_sec_obj_with_roles_and_names([Url,_UsersUrl]) ->
+should_return_ok_for_sec_obj_with_roles_and_names([Url, _UsersUrl]) ->
SecurityUrl = lists:concat([Url, "/_security"]),
SecurityProperties = [
- {<<"admins">>, {[{<<"names">>,[<<?TEST_ADMIN>>]},
- {<<"roles">>,[<<?TEST_ADMIN>>]}]}},
- {<<"members">>,{[{<<"names">>,[<<?TEST_MEMBER>>]},
- {<<"roles">>,[<<?TEST_MEMBER>>]}]}}
+ {<<"admins">>,
+ {[
+ {<<"names">>, [<<?TEST_ADMIN>>]},
+ {<<"roles">>, [<<?TEST_ADMIN>>]}
+ ]}},
+ {<<"members">>,
+ {[
+ {<<"names">>, [<<?TEST_MEMBER>>]},
+ {<<"roles">>, [<<?TEST_MEMBER>>]}
+ ]}}
],
Body = jiffy:encode({SecurityProperties}),
- {ok, Status, _, _} = test_request:put(SecurityUrl,
- [?CONTENT_JSON, ?AUTH], Body),
+ {ok, Status, _, _} = test_request:put(
+ SecurityUrl,
+ [?CONTENT_JSON, ?AUTH],
+ Body
+ ),
?_assertEqual(200, Status).
should_return_error_for_sec_obj_with_incorrect_roles_and_names(
- [Url,_UsersUrl]) ->
+ [Url, _UsersUrl]
+) ->
SecurityUrl = lists:concat([Url, "/_security"]),
SecurityProperties = [
- {<<"admins">>,{[{<<"names">>,[123]}]}},
- {<<"members">>,{[{<<"roles">>,["foo"]}]}}
+ {<<"admins">>, {[{<<"names">>, [123]}]}},
+ {<<"members">>, {[{<<"roles">>, ["foo"]}]}}
],
Body = jiffy:encode({SecurityProperties}),
- {ok, Status, _, RespBody} = test_request:put(SecurityUrl,
- [?CONTENT_JSON, ?AUTH], Body),
+ {ok, Status, _, RespBody} = test_request:put(
+ SecurityUrl,
+ [?CONTENT_JSON, ?AUTH],
+ Body
+ ),
ResultJson = ?JSON_DECODE(RespBody),
[
?_assertEqual(500, Status),
- ?_assertEqual({[
- {<<"error">>,<<"error">>},
- {<<"reason">>,<<"no_majority">>}
- ]}, ResultJson)
+ ?_assertEqual(
+ {[
+ {<<"error">>, <<"error">>},
+ {<<"reason">>, <<"no_majority">>}
+ ]},
+ ResultJson
+ )
].
-should_return_error_for_sec_obj_with_incorrect_roles([Url,_UsersUrl]) ->
+should_return_error_for_sec_obj_with_incorrect_roles([Url, _UsersUrl]) ->
SecurityUrl = lists:concat([Url, "/_security"]),
SecurityProperties = [
- {<<"admins">>,{[{<<"roles">>,[?TEST_ADMIN]}]}},
- {<<"members">>,{[{<<"roles">>,[<<?TEST_MEMBER>>]}]}}
+ {<<"admins">>, {[{<<"roles">>, [?TEST_ADMIN]}]}},
+ {<<"members">>, {[{<<"roles">>, [<<?TEST_MEMBER>>]}]}}
],
Body = jiffy:encode({SecurityProperties}),
- {ok, Status, _, RespBody} = test_request:put(SecurityUrl,
- [?CONTENT_JSON, ?AUTH], Body),
+ {ok, Status, _, RespBody} = test_request:put(
+ SecurityUrl,
+ [?CONTENT_JSON, ?AUTH],
+ Body
+ ),
ResultJson = ?JSON_DECODE(RespBody),
[
?_assertEqual(500, Status),
- ?_assertEqual({[
- {<<"error">>,<<"error">>},
- {<<"reason">>,<<"no_majority">>}
- ]}, ResultJson)
+ ?_assertEqual(
+ {[
+ {<<"error">>, <<"error">>},
+ {<<"reason">>, <<"no_majority">>}
+ ]},
+ ResultJson
+ )
].
-should_return_error_for_sec_obj_with_incorrect_names([Url,_UsersUrl]) ->
+should_return_error_for_sec_obj_with_incorrect_names([Url, _UsersUrl]) ->
SecurityUrl = lists:concat([Url, "/_security"]),
SecurityProperties = [
- {<<"admins">>,{[{<<"names">>,[<<?TEST_ADMIN>>]}]}},
- {<<"members">>,{[{<<"names">>,[?TEST_MEMBER]}]}}
+ {<<"admins">>, {[{<<"names">>, [<<?TEST_ADMIN>>]}]}},
+ {<<"members">>, {[{<<"names">>, [?TEST_MEMBER]}]}}
],
Body = jiffy:encode({SecurityProperties}),
- {ok, Status, _, RespBody} = test_request:put(SecurityUrl,
- [?CONTENT_JSON, ?AUTH], Body),
+ {ok, Status, _, RespBody} = test_request:put(
+ SecurityUrl,
+ [?CONTENT_JSON, ?AUTH],
+ Body
+ ),
ResultJson = ?JSON_DECODE(RespBody),
[
?_assertEqual(500, Status),
- ?_assertEqual({[
- {<<"error">>,<<"error">>},
- {<<"reason">>,<<"no_majority">>}
- ]}, ResultJson)
+ ?_assertEqual(
+ {[
+ {<<"error">>, <<"error">>},
+ {<<"reason">>, <<"no_majority">>}
+ ]},
+ ResultJson
+ )
].
-should_return_error_for_sec_obj_in_user_db([_,_UsersUrl]) ->
+should_return_error_for_sec_obj_in_user_db([_, _UsersUrl]) ->
SecurityUrl = lists:concat([_UsersUrl, "/_security"]),
SecurityProperties = [
- {<<"admins">>, {[{<<"names">>,[<<?TEST_ADMIN>>]},
- {<<"roles">>,[<<?TEST_ADMIN>>]}]}},
- {<<"members">>,{[{<<"names">>,[<<?TEST_MEMBER>>]},
- {<<"roles">>,[<<?TEST_MEMBER>>]}]}}
+ {<<"admins">>,
+ {[
+ {<<"names">>, [<<?TEST_ADMIN>>]},
+ {<<"roles">>, [<<?TEST_ADMIN>>]}
+ ]}},
+ {<<"members">>,
+ {[
+ {<<"names">>, [<<?TEST_MEMBER>>]},
+ {<<"roles">>, [<<?TEST_MEMBER>>]}
+ ]}}
],
Body = jiffy:encode({SecurityProperties}),
- {ok, Status, _, RespBody} = test_request:put(SecurityUrl,
- [?CONTENT_JSON, ?AUTH], Body),
+ {ok, Status, _, RespBody} = test_request:put(
+ SecurityUrl,
+ [?CONTENT_JSON, ?AUTH],
+ Body
+ ),
ResultJson = ?JSON_DECODE(RespBody),
[
?_assertEqual(403, Status),
- ?_assertEqual({[
- {<<"error">>,<<"forbidden">>},
- {<<"reason">>,<<"You can't edit the security object of the user database.">>}
- ]}, ResultJson)
+ ?_assertEqual(
+ {[
+ {<<"error">>, <<"forbidden">>},
+ {<<"reason">>, <<"You can't edit the security object of the user database.">>}
+ ]},
+ ResultJson
+ )
].
diff --git a/src/chttpd/test/eunit/chttpd_session_tests.erl b/src/chttpd/test/eunit/chttpd_session_tests.erl
index 1e1fbf5e4..3d99e3b10 100644
--- a/src/chttpd/test/eunit/chttpd_session_tests.erl
+++ b/src/chttpd/test/eunit/chttpd_session_tests.erl
@@ -18,18 +18,15 @@
-define(USER, "chttpd_test_admin").
-define(PASS, "pass").
-
setup() ->
- ok = config:delete("chttpd_auth", "authentication_db", _Persist=false),
+ ok = config:delete("chttpd_auth", "authentication_db", _Persist = false),
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, binary_to_list(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, binary_to_list(Hashed), _Persist = false),
root_url() ++ "/_session".
-
cleanup(_) ->
- ok = config:delete("chttpd_auth", "authentication_db", _Persist=false),
- ok = config:delete("admins", ?USER, _Persist=false).
-
+ ok = config:delete("chttpd_auth", "authentication_db", _Persist = false),
+ ok = config:delete("admins", ?USER, _Persist = false).
session_test_() ->
{
@@ -51,32 +48,32 @@ session_test_() ->
}
}.
-
session_authentication_db_absent(Url) ->
- ok = config:delete("chttpd_auth", "authentication_db", _Persist=false),
+ ok = config:delete("chttpd_auth", "authentication_db", _Persist = false),
?assertThrow({not_found, _}, session_authentication_db(Url)).
-
session_authentication_db_present(Url) ->
Name = "_users",
ok = config:set("chttpd_auth", "authentication_db", Name, false),
?assertEqual(list_to_binary(Name), session_authentication_db(Url)).
-
session_authentication_gzip_request(Url) ->
{ok, 200, _, Body} = test_request:request(
post,
Url,
[{"Content-Type", "application/json"}, {"Content-Encoding", "gzip"}],
- zlib:gzip(jiffy:encode({[{username, list_to_binary(?USER)}, {password, list_to_binary(?PASS)}]}))),
+ zlib:gzip(
+ jiffy:encode({[{username, list_to_binary(?USER)}, {password, list_to_binary(?PASS)}]})
+ )
+ ),
{BodyJson} = jiffy:decode(Body),
?assert(lists:member({<<"name">>, list_to_binary(?USER)}, BodyJson)).
session_authentication_db(Url) ->
{ok, 200, _, Body} = test_request:get(Url, [{basic_auth, {?USER, ?PASS}}]),
couch_util:get_nested_json_value(
- jiffy:decode(Body), [<<"info">>, <<"authentication_db">>]).
-
+ jiffy:decode(Body), [<<"info">>, <<"authentication_db">>]
+ ).
root_url() ->
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
diff --git a/src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl b/src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl
index 937880621..bde2c8512 100644
--- a/src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl
+++ b/src/chttpd/test/eunit/chttpd_socket_buffer_size_test.erl
@@ -20,26 +20,24 @@
-define(AUTH, {basic_auth, {?USER, ?PASS}}).
-define(CONTENT_JSON, {"Content-Type", "application/json"}).
-
setup(SocketOpts) ->
StartCtx = start_couch_with_cfg(SocketOpts),
Db = ?tempdb(),
create_db(url(Db)),
{StartCtx, Db}.
-
teardown(_, {StartCtx, Db}) ->
delete_db(url(Db)),
- ok = config:delete("admins", ?USER, _Persist=false),
+ ok = config:delete("admins", ?USER, _Persist = false),
test_util:stop_couch(StartCtx).
-
socket_buffer_size_test_() ->
{
"chttpd socket_buffer_size_test",
{
foreachx,
- fun setup/1, fun teardown/2,
+ fun setup/1,
+ fun teardown/2,
[
{"[{recbuf, undefined}]", fun default_buffer/2},
{"[{recbuf, 1024}]", fun small_recbuf/2},
@@ -48,31 +46,30 @@ socket_buffer_size_test_() ->
}
}.
-
small_recbuf(_, {_, Db}) ->
- {timeout, 30, ?_test(begin
- Id = data(2048),
- Response = put_req(url(Db) ++ "/" ++ Id, "{}"),
- ?assert(Response =:= 400 orelse Response =:= request_failed)
- end)}.
-
+ {timeout, 30,
+ ?_test(begin
+ Id = data(2048),
+ Response = put_req(url(Db) ++ "/" ++ Id, "{}"),
+ ?assert(Response =:= 400 orelse Response =:= request_failed)
+ end)}.
small_buffer(_, {_, Db}) ->
- {timeout, 30, ?_test(begin
- Id = data(2048),
- Response = put_req(url(Db) ++ "/" ++ Id, "{}"),
- ?assert(Response =:= 400 orelse Response =:= request_failed)
- end)}.
-
+ {timeout, 30,
+ ?_test(begin
+ Id = data(2048),
+ Response = put_req(url(Db) ++ "/" ++ Id, "{}"),
+ ?assert(Response =:= 400 orelse Response =:= request_failed)
+ end)}.
default_buffer(_, {_, Db}) ->
- {timeout, 30, ?_test(begin
- Id = data(7000),
- Headers = [{"Blah", data(7000)}],
- Status = put_req(url(Db) ++ "/" ++ Id, Headers, "{}"),
- ?assert(Status =:= 201 orelse Status =:= 202)
- end)}.
-
+ {timeout, 30,
+ ?_test(begin
+ Id = data(7000),
+ Headers = [{"Blah", data(7000)}],
+ Status = put_req(url(Db) ++ "/" ++ Id, Headers, "{}"),
+ ?assert(Status =:= 201 orelse Status =:= 202)
+ end)}.
% Helper functions
@@ -81,24 +78,19 @@ url() ->
Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
"http://" ++ Addr ++ ":" ++ Port.
-
url(Db) ->
url() ++ "/" ++ ?b2l(Db).
-
create_db(Url) ->
Status = put_req(Url ++ "?q=1&n=1", "{}"),
?assert(Status =:= 201 orelse Status =:= 202).
-
delete_db(Url) ->
{ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
put_req(Url, Body) ->
put_req(Url, [], Body).
-
put_req(Url, Headers, Body) ->
AllHeaders = Headers ++ [?CONTENT_JSON, ?AUTH],
case test_request:put(Url, AllHeaders, Body) of
@@ -106,11 +98,9 @@ put_req(Url, Headers, Body) ->
{error, Error} -> Error
end.
-
data(Size) ->
string:copies("x", Size).
-
append_to_cfg_chain(Cfg) ->
CfgDir = filename:dirname(lists:last(?CONFIG_CHAIN)),
CfgFile = filename:join([CfgDir, "chttpd_socket_buffer_extra_cfg.ini"]),
@@ -118,10 +108,9 @@ append_to_cfg_chain(Cfg) ->
ok = file:write_file(CfgFile, CfgSect),
?CONFIG_CHAIN ++ [CfgFile].
-
start_couch_with_cfg(Cfg) ->
CfgChain = append_to_cfg_chain(Cfg),
StartCtx = test_util:start_couch(CfgChain, [chttpd]),
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
StartCtx.
diff --git a/src/chttpd/test/eunit/chttpd_util_test.erl b/src/chttpd/test/eunit/chttpd_util_test.erl
index 41fe6cb23..4ad2b8b83 100644
--- a/src/chttpd/test/eunit/chttpd_util_test.erl
+++ b/src/chttpd/test/eunit/chttpd_util_test.erl
@@ -12,19 +12,24 @@
-module(chttpd_util_test).
-
-include_lib("couch/include/couch_eunit.hrl").
-include("chttpd_test.hrl").
-
setup() ->
- ok = lists:foreach(fun(Section) ->
- ok = config_delete_all_keys(Section)
- end, ["httpd", "chttpd", "couch_httpd_auth", "chttpd_auth"]),
-
- ok = config:set("httpd", "authentication_handlers",
+ ok = lists:foreach(
+ fun(Section) ->
+ ok = config_delete_all_keys(Section)
+ end,
+ ["httpd", "chttpd", "couch_httpd_auth", "chttpd_auth"]
+ ),
+
+ ok = config:set(
+ "httpd",
+ "authentication_handlers",
"{couch_httpd_auth, cookie_authentication_handler}, "
- "{couch_httpd_auth, default_authentication_handler}", _Persist = false),
+ "{couch_httpd_auth, default_authentication_handler}",
+ _Persist = false
+ ),
ok = config:set("httpd", "backlog", "512", _Persist = false),
ok = config:set("chttpd", "require_valid_user", "false", _Persist = false),
ok = config:set("httpd", "both_exist", "get_in_httpd", _Persist = false),
@@ -36,7 +41,6 @@ setup() ->
ok = config:set("couch_httpd_auth", "cha_only", "true", _Persist = false),
ok = config:set("chttpd_auth", "ca_only", "1", _Persist = false).
-
teardown(_) ->
ok = config:delete("httpd", "authentication_handlers", _Persist = false),
ok = config:delete("httpd", "backlog", _Persist = false),
@@ -50,12 +54,13 @@ teardown(_) ->
ok = config:delete("couch_httpd_auth", "cha_only", _Persist = false),
ok = config:delete("chttpd_auth", "ca_only", _Persist = false).
-
config_delete_all_keys(Section) ->
- lists:foreach(fun({Key, _Val}) ->
- ok = config:delete(Section, Key, _Persist = false)
- end, config:get(Section)).
-
+ lists:foreach(
+ fun({Key, _Val}) ->
+ ok = config:delete(Section, Key, _Persist = false)
+ end,
+ config:get(Section)
+ ).
chttpd_util_config_test_() ->
{
@@ -78,13 +83,11 @@ chttpd_util_config_test_() ->
}
}.
-
test_chttpd_behavior(_) ->
?assertEqual("get_in_chttpd", chttpd_util:get_chttpd_config("both_exist")),
?assertEqual(1, chttpd_util:get_chttpd_config_integer("chttpd_only", 0)),
?assert(chttpd_util:get_chttpd_config_boolean("httpd_only", false)).
-
test_with_undefined_option(_) ->
?assertEqual(undefined, chttpd_util:get_chttpd_config("undefined_option")),
?assertEqual(abc, chttpd_util:get_chttpd_config("undefined_option", abc)),
@@ -95,13 +98,11 @@ test_with_undefined_option(_) ->
?assert(chttpd_util:get_chttpd_config("undefined_option", true)),
?assertNot(chttpd_util:get_chttpd_config("undefined_option", false)).
-
test_auth_behavior(_) ->
?assertEqual("ca", chttpd_util:get_chttpd_auth_config("both_exist")),
?assertEqual(1, chttpd_util:get_chttpd_auth_config_integer("ca_only", 0)),
?assert(chttpd_util:get_chttpd_auth_config_boolean("cha_only", false)).
-
test_auth_with_undefined_option(_) ->
?assertEqual(undefined, chttpd_util:get_chttpd_auth_config("undefine")),
?assertEqual(abc, chttpd_util:get_chttpd_auth_config("undefine", abc)),
diff --git a/src/chttpd/test/eunit/chttpd_view_test.erl b/src/chttpd/test/eunit/chttpd_view_test.erl
index 4c224bb4e..ceff2a902 100644
--- a/src/chttpd/test/eunit/chttpd_view_test.erl
+++ b/src/chttpd/test/eunit/chttpd_view_test.erl
@@ -19,16 +19,19 @@
-define(PASS, "pass").
-define(AUTH, {basic_auth, {?USER, ?PASS}}).
-define(CONTENT_JSON, {"Content-Type", "application/json"}).
--define(DDOC, "{\"_id\": \"_design/bar\", \"views\": {\"baz\":
- {\"map\": \"function(doc) {emit(doc._id, doc._id);}\"}}}").
+-define(DDOC,
+ "{\"_id\": \"_design/bar\", \"views\": {\"baz\":\n"
+ " {\"map\": \"function(doc) {emit(doc._id, doc._id);}\"}}}"
+).
-define(FIXTURE_TXT, ?ABS_PATH(?FILE)).
-define(i2l(I), integer_to_list(I)).
--define(TIMEOUT, 60). % seconds
+% seconds
+-define(TIMEOUT, 60).
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
TmpDb = ?tempdb(),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
@@ -38,16 +41,18 @@ setup() ->
teardown(Url) ->
delete_db(Url),
- ok = config:delete("admins", ?USER, _Persist=false).
+ ok = config:delete("admins", ?USER, _Persist = false).
create_db(Url) ->
{ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
?assert(Status =:= 201 orelse Status =:= 202).
-
create_doc(Url, Id) ->
- test_request:put(Url ++ "/" ++ Id,
- [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}").
+ test_request:put(
+ Url ++ "/" ++ Id,
+ [?CONTENT_JSON, ?AUTH],
+ "{\"mr\": \"rockoartischocko\"}"
+ ).
delete_db(Url) ->
{ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
@@ -57,10 +62,12 @@ all_view_test_() ->
"chttpd view tests",
{
setup,
- fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
+ fun chttpd_test_util:start_couch/0,
+ fun chttpd_test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_succeed_on_view_with_queries_keys/1,
fun should_succeed_on_view_with_queries_limit_skip/1,
@@ -70,55 +77,78 @@ all_view_test_() ->
}
}.
-
should_succeed_on_view_with_queries_keys(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- {ok, _, _, _} = test_request:put(Url ++ "/_design/bar",
- [?CONTENT_JSON, ?AUTH], ?DDOC),
- QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\",
- \"testdoc8\"]}]}",
- {ok, _, _, RespBody} = test_request:post(Url ++ "/_design/bar/"
- ++ "_view/baz/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ {ok, _, _, _} = test_request:put(
+ Url ++ "/_design/bar",
+ [?CONTENT_JSON, ?AUTH],
+ ?DDOC
+ ),
+ QueryDoc =
+ "{\"queries\": [{\"keys\": [ \"testdoc3\",\n"
+ " \"testdoc8\"]}]}",
+ {ok, _, _, RespBody} = test_request:post(
+ Url ++ "/_design/bar/" ++
+ "_view/baz/queries/",
+ [?CONTENT_JSON, ?AUTH],
+ QueryDoc
+ ),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end)}.
should_succeed_on_view_with_queries_limit_skip(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- {ok, _, _, _} = test_request:put(Url ++ "/_design/bar",
- [?CONTENT_JSON, ?AUTH], ?DDOC),
- QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(Url ++ "/_design/bar/"
- ++ "_view/baz/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ {ok, _, _, _} = test_request:put(
+ Url ++ "/_design/bar",
+ [?CONTENT_JSON, ?AUTH],
+ ?DDOC
+ ),
+ QueryDoc = "{\"queries\": [{\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(
+ Url ++ "/_design/bar/" ++
+ "_view/baz/queries/",
+ [?CONTENT_JSON, ?AUTH],
+ QueryDoc
+ ),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson)),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson)))
+ end)}.
should_succeed_on_view_with_multiple_queries(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
- {ok, _, _, _} = test_request:put(Url ++ "/_design/bar",
- [?CONTENT_JSON, ?AUTH], ?DDOC),
- QueryDoc = "{\"queries\": [{\"keys\": [ \"testdoc3\",
- \"testdoc8\"]}, {\"limit\": 5, \"skip\": 2}]}",
- {ok, RC, _, RespBody} = test_request:post(Url ++ "/_design/bar/"
- ++ "_view/baz/queries/", [?CONTENT_JSON, ?AUTH], QueryDoc),
- ?assertEqual(200, RC),
- {ResultJson} = ?JSON_DECODE(RespBody),
- ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
- {InnerJson1} = lists:nth(1, ResultJsonBody),
- ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
- {InnerJson2} = lists:nth(2, ResultJsonBody),
- ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
- ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
- end)}.
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [create_doc(Url, "testdoc" ++ ?i2l(I)) || I <- lists:seq(1, 10)],
+ {ok, _, _, _} = test_request:put(
+ Url ++ "/_design/bar",
+ [?CONTENT_JSON, ?AUTH],
+ ?DDOC
+ ),
+ QueryDoc =
+ "{\"queries\": [{\"keys\": [ \"testdoc3\",\n"
+ " \"testdoc8\"]}, {\"limit\": 5, \"skip\": 2}]}",
+ {ok, RC, _, RespBody} = test_request:post(
+ Url ++ "/_design/bar/" ++
+ "_view/baz/queries/",
+ [?CONTENT_JSON, ?AUTH],
+ QueryDoc
+ ),
+ ?assertEqual(200, RC),
+ {ResultJson} = ?JSON_DECODE(RespBody),
+ ResultJsonBody = couch_util:get_value(<<"results">>, ResultJson),
+ {InnerJson1} = lists:nth(1, ResultJsonBody),
+ ?assertEqual(2, length(couch_util:get_value(<<"rows">>, InnerJson1))),
+ {InnerJson2} = lists:nth(2, ResultJsonBody),
+ ?assertEqual(2, couch_util:get_value(<<"offset">>, InnerJson2)),
+ ?assertEqual(5, length(couch_util:get_value(<<"rows">>, InnerJson2)))
+ end)}.
diff --git a/src/chttpd/test/eunit/chttpd_welcome_test.erl b/src/chttpd/test/eunit/chttpd_welcome_test.erl
index e427f4dff..7a24efb71 100644
--- a/src/chttpd/test/eunit/chttpd_welcome_test.erl
+++ b/src/chttpd/test/eunit/chttpd_welcome_test.erl
@@ -20,29 +20,28 @@
-define(AUTH, {basic_auth, {?USER, ?PASS}}).
-define(CONTENT_JSON, {"Content-Type", "application/json"}).
-
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
Url = lists:concat(["http://", Addr, ":", Port, "/"]),
Url.
-
teardown(_Url) ->
- ok = config:delete("admins", ?USER, _Persist=false).
-
+ ok = config:delete("admins", ?USER, _Persist = false).
welcome_test_() ->
{
"chttpd welcome endpoint tests",
{
setup,
- fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1,
+ fun chttpd_test_util:start_couch/0,
+ fun chttpd_test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_have_version/1,
fun should_have_features/1,
@@ -69,7 +68,6 @@ should_have_uuid(Url) ->
?assert(is_list(Features))
end).
-
should_have_version(Url) ->
?_test(begin
{ok, Status, _, Body} = test_request:get(Url, [?CONTENT_JSON, ?AUTH]),
@@ -86,7 +84,6 @@ should_have_version(Url) ->
?assert(is_list(Features))
end).
-
should_have_features(Url) ->
?_test(begin
config:enable_feature(snek),
diff --git a/src/chttpd/test/eunit/chttpd_xframe_test.erl b/src/chttpd/test/eunit/chttpd_xframe_test.erl
index f3e6165bb..ee2a0996b 100644
--- a/src/chttpd/test/eunit/chttpd_xframe_test.erl
+++ b/src/chttpd/test/eunit/chttpd_xframe_test.erl
@@ -1,6 +1,5 @@
-module(chttpd_xframe_test).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("eunit/include/eunit.hrl").
@@ -63,7 +62,6 @@ enabled_with_same_origin_test() ->
Headers = chttpd_xframe_options:header(mock_request(), [], config_sameorigin()),
?assertEqual(Headers, [{"X-Frame-Options", "SAMEORIGIN"}]).
-
xframe_host_test_() ->
{
"xframe host tests",
diff --git a/src/couch/src/couch.erl b/src/couch/src/couch.erl
index 1c912ac2a..6952c16c8 100644
--- a/src/couch/src/couch.erl
+++ b/src/couch/src/couch.erl
@@ -18,7 +18,6 @@
restart/0
]).
-
deps() ->
[
sasl,
@@ -32,7 +31,6 @@ deps() ->
couch_log
].
-
start() ->
catch erlang:system_flag(scheduler_bind_type, default_bind),
case start_apps(deps()) of
@@ -42,26 +40,23 @@ start() ->
throw(Else)
end.
-
stop() ->
application:stop(couch).
-
restart() ->
init:restart().
-
start_apps([]) ->
ok;
-start_apps([App|Rest]) ->
+start_apps([App | Rest]) ->
case application:start(App) of
- ok ->
- start_apps(Rest);
- {error, {already_started, App}} ->
- start_apps(Rest);
- {error, _Reason} when App =:= public_key ->
- % ignore on R12B5
- start_apps(Rest);
- {error, _Reason} ->
- {error, {app_would_not_start, App}}
+ ok ->
+ start_apps(Rest);
+ {error, {already_started, App}} ->
+ start_apps(Rest);
+ {error, _Reason} when App =:= public_key ->
+ % ignore on R12B5
+ start_apps(Rest);
+ {error, _Reason} ->
+ {error, {app_would_not_start, App}}
end.
diff --git a/src/couch/src/couch_att.erl b/src/couch/src/couch_att.erl
index 12ac4874c..b3b2f23eb 100644
--- a/src/couch/src/couch_att.erl
+++ b/src/couch/src/couch_att.erl
@@ -60,7 +60,6 @@
-include_lib("couch/include/couch_db.hrl").
-
%% Legacy attachment record. This is going to be phased out by the new proplist
%% based structure. It's needed for now to allow code to perform lazy upgrades
%% while the patch is rolled out to the cluster. Attachments passed as records
@@ -79,8 +78,13 @@
md5 = <<>> :: binary(),
revpos = 0 :: non_neg_integer(),
- data :: stub | follows | binary() | {any(), any()} |
- {follows, pid(), reference()} | fun(() -> binary()),
+ data ::
+ stub
+ | follows
+ | binary()
+ | {any(), any()}
+ | {follows, pid(), reference()}
+ | fun(() -> binary()),
%% Encoding of the attachment
%% currently supported values are:
@@ -90,7 +94,6 @@
encoding = identity :: identity | gzip
}).
-
%% Extensible Attachment Type
%%
%% The following types describe the known properties for attachment fields
@@ -99,57 +102,57 @@
%% be used by upgraded code. If you plan on operating on new data, please add
%% an entry here as documentation.
-
%% The name of the attachment is also used as the mime-part name for file
%% downloads. These must be unique per document.
-type name_prop() :: {name, binary()}.
-
%% The mime type of the attachment. This does affect compression of certain
%% attachments if the type is found to be configured as a compressable type.
%% This is commonly reserved for text/* types but could include other custom
%% cases as well. See definition and use of couch_util:compressable_att_type/1.
-type type_prop() :: {type, binary()}.
-
%% The attachment length is similar to disk-length but ignores additional
%% encoding that may have occurred.
-type att_len_prop() :: {att_len, non_neg_integer()}.
-
%% The size of the attachment as stored in a disk stream.
-type disk_len_prop() :: {disk_len, non_neg_integer()}.
-
%% This is a digest of the original attachment data as uploaded by the client.
%% it's useful for checking validity of contents against other attachment data
%% as well as quick digest computation of the enclosing document.
-type md5_prop() :: {md5, binary()}.
-
-type revpos_prop() :: {revpos, 0}.
-
%% This field is currently overloaded with just about everything. The
%% {any(), any()} type is just there until I have time to check the actual
%% values expected. Over time this should be split into more than one property
%% to allow simpler handling.
-type data_prop() :: {
- data, stub | follows | binary() | {any(), any()} |
- {follows, pid(), reference()} | fun(() -> binary())
+ data,
+ stub
+ | follows
+ | binary()
+ | {any(), any()}
+ | {follows, pid(), reference()}
+ | fun(() -> binary())
}.
-
%% We will occasionally compress our data. See type_prop() for more information
%% on when this happens.
-type encoding_prop() :: {encoding, identity | gzip}.
-
-type attachment() :: [
- name_prop() | type_prop() |
- att_len_prop() | disk_len_prop() |
- md5_prop() | revpos_prop() |
- data_prop() | encoding_prop()
+ name_prop()
+ | type_prop()
+ | att_len_prop()
+ | disk_len_prop()
+ | md5_prop()
+ | revpos_prop()
+ | data_prop()
+ | encoding_prop()
].
-type disk_att_v1() :: {
@@ -178,7 +181,7 @@
-type att() :: #att{} | attachment() | disk_att().
--define(GB, (1024*1024*1024)).
+-define(GB, (1024 * 1024 * 1024)).
new() ->
%% We construct a record by default for compatability. This will be
@@ -189,14 +192,13 @@ new() ->
%% undefined.
#att{}.
-
-spec new([{atom(), any()}]) -> att().
new(Props) ->
store(Props, new()).
-
--spec fetch([atom()], att()) -> [any()];
- (atom(), att()) -> any().
+-spec fetch
+ ([atom()], att()) -> [any()];
+ (atom(), att()) -> any().
fetch(Fields, Att) when is_list(Fields) ->
[fetch(Field, Att) || Field <- Fields];
fetch(Field, Att) when is_list(Att) ->
@@ -223,13 +225,15 @@ fetch(encoding, #att{encoding = Encoding}) ->
fetch(_, _) ->
undefined.
-
-spec store([{atom(), any()}], att()) -> att().
store(Props, Att0) ->
- lists:foldl(fun({Field, Value}, Att) ->
- store(Field, Value, Att)
- end, Att0, Props).
-
+ lists:foldl(
+ fun({Field, Value}, Att) ->
+ store(Field, Value, Att)
+ end,
+ Att0,
+ Props
+ ).
-spec store(atom(), any(), att()) -> att().
store(Field, undefined, Att) when is_list(Att) ->
@@ -255,17 +259,14 @@ store(encoding, Encoding, Att) ->
store(Field, Value, Att) ->
store(Field, Value, upgrade(Att)).
-
-spec transform(atom(), fun(), att()) -> att().
transform(Field, Fun, Att) ->
NewValue = Fun(fetch(Field, Att)),
store(Field, NewValue, Att).
-
is_stub(Att) ->
stub == fetch(data, Att).
-
%% merge_stubs takes all stub attachments and replaces them with on disk
%% attachments. It will return {missing, Name} if a stub isn't matched with
%% an existing attachment on disk. If the revpos is supplied with the stub
@@ -276,7 +277,6 @@ merge_stubs(MemAtts, DiskAtts) ->
),
merge_stubs(MemAtts, OnDisk, []).
-
%% restore spec when R14 support is dropped
%% -spec merge_stubs([att()], dict:dict(), [att()]) -> [att()].
merge_stubs([Att | Rest], OnDisk, Merged) ->
@@ -305,23 +305,24 @@ merge_stubs([Att | Rest], OnDisk, Merged) ->
merge_stubs([], _, Merged) ->
{ok, lists:reverse(Merged)}.
-
size_info([]) ->
{ok, []};
size_info(Atts) ->
- Info = lists:map(fun(Att) ->
- AttLen = fetch(att_len, Att),
- case fetch(data, Att) of
- {stream, StreamEngine} ->
- {ok, SPos} = couch_stream:to_disk_term(StreamEngine),
- {SPos, AttLen};
- {_, SPos} ->
- {SPos, AttLen}
- end
- end, Atts),
+ Info = lists:map(
+ fun(Att) ->
+ AttLen = fetch(att_len, Att),
+ case fetch(data, Att) of
+ {stream, StreamEngine} ->
+ {ok, SPos} = couch_stream:to_disk_term(StreamEngine),
+ {SPos, AttLen};
+ {_, SPos} ->
+ {SPos, AttLen}
+ end
+ end,
+ Atts
+ ),
{ok, lists:usort(Info)}.
-
%% When converting an attachment to disk term format, attempt to stay with the
%% old format when possible. This should help make the attachment lazy upgrade
%% as safe as possible, avoiding the need for complicated disk versioning
@@ -364,7 +365,6 @@ to_disk_term(Att) ->
),
{list_to_tuple(lists:reverse(Base)), Extended}.
-
%% The new disk term format is a simple wrapper around the legacy format. Base
%% properties will remain in a tuple while the new fields and possibly data from
%% future extensions will be stored in a list of atom/value pairs. While this is
@@ -372,45 +372,45 @@ to_disk_term(Att) ->
%% compression to remove these sorts of common bits (block level compression
%% with something like a shared dictionary that is checkpointed every now and
%% then).
-from_disk_term(StreamSrc, {Base, Extended})
- when is_tuple(Base), is_list(Extended) ->
+from_disk_term(StreamSrc, {Base, Extended}) when
+ is_tuple(Base), is_list(Extended)
+->
store(Extended, from_disk_term(StreamSrc, Base));
-from_disk_term(StreamSrc, {Name,Type,Sp,AttLen,DiskLen,RevPos,Md5,Enc}) ->
+from_disk_term(StreamSrc, {Name, Type, Sp, AttLen, DiskLen, RevPos, Md5, Enc}) ->
{ok, Stream} = open_stream(StreamSrc, Sp),
#att{
- name=Name,
- type=Type,
- att_len=AttLen,
- disk_len=DiskLen,
- md5=Md5,
- revpos=RevPos,
- data={stream, Stream},
- encoding=upgrade_encoding(Enc)
+ name = Name,
+ type = Type,
+ att_len = AttLen,
+ disk_len = DiskLen,
+ md5 = Md5,
+ revpos = RevPos,
+ data = {stream, Stream},
+ encoding = upgrade_encoding(Enc)
};
-from_disk_term(StreamSrc, {Name,Type,Sp,AttLen,RevPos,Md5}) ->
+from_disk_term(StreamSrc, {Name, Type, Sp, AttLen, RevPos, Md5}) ->
{ok, Stream} = open_stream(StreamSrc, Sp),
#att{
- name=Name,
- type=Type,
- att_len=AttLen,
- disk_len=AttLen,
- md5=Md5,
- revpos=RevPos,
- data={stream, Stream}
+ name = Name,
+ type = Type,
+ att_len = AttLen,
+ disk_len = AttLen,
+ md5 = Md5,
+ revpos = RevPos,
+ data = {stream, Stream}
};
-from_disk_term(StreamSrc, {Name,{Type,Sp,AttLen}}) ->
+from_disk_term(StreamSrc, {Name, {Type, Sp, AttLen}}) ->
{ok, Stream} = open_stream(StreamSrc, Sp),
#att{
- name=Name,
- type=Type,
- att_len=AttLen,
- disk_len=AttLen,
- md5= <<>>,
- revpos=0,
- data={stream, Stream}
+ name = Name,
+ type = Type,
+ att_len = AttLen,
+ disk_len = AttLen,
+ md5 = <<>>,
+ revpos = 0,
+ data = {stream, Stream}
}.
-
%% from_json reads in embedded JSON attachments and creates usable attachment
%% values. The attachment may be a stub,
from_json(Name, Props) ->
@@ -426,7 +426,6 @@ from_json(Name, Props) ->
true -> inline_from_json(Att, Props)
end.
-
stub_from_json(Att, Props) ->
{DiskLen, EncodedLen, Encoding} = encoded_lengths_from_json(Props),
Digest = digest_from_json(Props),
@@ -434,21 +433,33 @@ stub_from_json(Att, Props) ->
%% the revpos consistency check on stubs when it's not provided in the
%% json object. See merge_stubs/3 for the stub check.
RevPos = couch_util:get_value(<<"revpos">>, Props),
- store([
- {md5, Digest}, {revpos, RevPos}, {data, stub}, {disk_len, DiskLen},
- {att_len, EncodedLen}, {encoding, Encoding}
- ], Att).
-
+ store(
+ [
+ {md5, Digest},
+ {revpos, RevPos},
+ {data, stub},
+ {disk_len, DiskLen},
+ {att_len, EncodedLen},
+ {encoding, Encoding}
+ ],
+ Att
+ ).
follow_from_json(Att, Props) ->
{DiskLen, EncodedLen, Encoding} = encoded_lengths_from_json(Props),
Digest = digest_from_json(Props),
RevPos = couch_util:get_value(<<"revpos">>, Props, 0),
- store([
- {md5, Digest}, {revpos, RevPos}, {data, follows}, {disk_len, DiskLen},
- {att_len, EncodedLen}, {encoding, Encoding}
- ], Att).
-
+ store(
+ [
+ {md5, Digest},
+ {revpos, RevPos},
+ {data, follows},
+ {disk_len, DiskLen},
+ {att_len, EncodedLen},
+ {encoding, Encoding}
+ ],
+ Att
+ ).
inline_from_json(Att, Props) ->
B64Data = couch_util:get_value(<<"data">>, Props),
@@ -456,19 +467,22 @@ inline_from_json(Att, Props) ->
Data ->
Length = size(Data),
RevPos = couch_util:get_value(<<"revpos">>, Props, 0),
- store([
- {data, Data}, {revpos, RevPos}, {disk_len, Length},
- {att_len, Length}
- ], Att)
+ store(
+ [
+ {data, Data},
+ {revpos, RevPos},
+ {disk_len, Length},
+ {att_len, Length}
+ ],
+ Att
+ )
catch
_:_ ->
Name = fetch(name, Att),
- ErrMsg = <<"Invalid attachment data for ", Name/binary>>,
+ ErrMsg = <<"Invalid attachment data for ", Name/binary>>,
throw({bad_request, ErrMsg})
end.
-
-
encoded_lengths_from_json(Props) ->
Len = couch_util:get_value(<<"length">>, Props),
case couch_util:get_value(<<"encoding">>, Props) of
@@ -481,14 +495,12 @@ encoded_lengths_from_json(Props) ->
end,
{Len, EncodedLen, Encoding}.
-
digest_from_json(Props) ->
case couch_util:get_value(<<"digest">>, Props) of
<<"md5-", EncodedMd5/binary>> -> base64:decode(EncodedMd5);
_ -> <<>>
end.
-
to_json(Att, OutputData, DataToFollow, ShowEncoding) ->
[Name, Data, DiskLen, AttLen, Enc, Type, RevPos, Md5] = fetch(
[name, data, disk_len, att_len, encoding, type, revpos, md5], Att
@@ -497,42 +509,45 @@ to_json(Att, OutputData, DataToFollow, ShowEncoding) ->
{<<"content_type">>, Type},
{<<"revpos">>, RevPos}
],
- DigestProp = case base64:encode(Md5) of
- <<>> -> [];
- Digest -> [{<<"digest">>, <<"md5-", Digest/binary>>}]
- end,
- DataProps = if
- not OutputData orelse Data == stub ->
- [{<<"length">>, DiskLen}, {<<"stub">>, true}];
- DataToFollow ->
- [{<<"length">>, DiskLen}, {<<"follows">>, true}];
- true ->
- AttData = case Enc of
- gzip -> zlib:gunzip(to_binary(Att));
- identity -> to_binary(Att)
- end,
- [{<<"data">>, base64:encode(AttData)}]
- end,
- EncodingProps = if
- ShowEncoding andalso Enc /= identity ->
- [
- {<<"encoding">>, couch_util:to_binary(Enc)},
- {<<"encoded_length">>, AttLen}
- ];
- true ->
- []
- end,
- HeadersProp = case fetch(headers, Att) of
- undefined -> [];
- Headers -> [{<<"headers">>, Headers}]
- end,
+ DigestProp =
+ case base64:encode(Md5) of
+ <<>> -> [];
+ Digest -> [{<<"digest">>, <<"md5-", Digest/binary>>}]
+ end,
+ DataProps =
+ if
+ not OutputData orelse Data == stub ->
+ [{<<"length">>, DiskLen}, {<<"stub">>, true}];
+ DataToFollow ->
+ [{<<"length">>, DiskLen}, {<<"follows">>, true}];
+ true ->
+ AttData =
+ case Enc of
+ gzip -> zlib:gunzip(to_binary(Att));
+ identity -> to_binary(Att)
+ end,
+ [{<<"data">>, base64:encode(AttData)}]
+ end,
+ EncodingProps =
+ if
+ ShowEncoding andalso Enc /= identity ->
+ [
+ {<<"encoding">>, couch_util:to_binary(Enc)},
+ {<<"encoded_length">>, AttLen}
+ ];
+ true ->
+ []
+ end,
+ HeadersProp =
+ case fetch(headers, Att) of
+ undefined -> [];
+ Headers -> [{<<"headers">>, Headers}]
+ end,
{Name, {Props ++ DigestProp ++ DataProps ++ EncodingProps ++ HeadersProp}}.
-
flush(Db, Att) ->
flush_data(Db, fetch(data, Att), Att).
-
flush_data(Db, Data, Att) when is_binary(Data) ->
couch_db:with_stream(Db, Att, fun(OutputStream) ->
couch_stream:write(OutputStream, Data)
@@ -545,25 +560,29 @@ flush_data(Db, Fun, Att) when is_function(Fun) ->
couch_db:with_stream(Db, Att, fun(OutputStream) ->
% Fun(MaxChunkSize, WriterFun) must call WriterFun
% once for each chunk of the attachment,
- Fun(4096,
+ Fun(
+ 4096,
% WriterFun({Length, Binary}, State)
% WriterFun({0, _Footers}, State)
% Called with Length == 0 on the last time.
% WriterFun returns NewState.
- fun({0, Footers}, _Total) ->
- F = mochiweb_headers:from_binary(Footers),
- case mochiweb_headers:get_value("Content-MD5", F) of
- undefined ->
- ok;
- Md5 ->
- {md5, base64:decode(Md5)}
- end;
- ({Length, Chunk}, Total0) ->
- Total = Total0 + Length,
- validate_attachment_size(AttName, Total, MaxAttSize),
- couch_stream:write(OutputStream, Chunk),
- Total
- end, 0)
+ fun
+ ({0, Footers}, _Total) ->
+ F = mochiweb_headers:from_binary(Footers),
+ case mochiweb_headers:get_value("Content-MD5", F) of
+ undefined ->
+ ok;
+ Md5 ->
+ {md5, base64:decode(Md5)}
+ end;
+ ({Length, Chunk}, Total0) ->
+ Total = Total0 + Length,
+ validate_attachment_size(AttName, Total, MaxAttSize),
+ couch_stream:write(OutputStream, Chunk),
+ Total
+ end,
+ 0
+ )
end);
AttLen ->
validate_attachment_size(AttName, AttLen, MaxAttSize),
@@ -600,17 +619,18 @@ flush_data(Db, {stream, StreamEngine}, Att) ->
end)
end.
-
write_streamed_attachment(_Stream, _F, 0) ->
ok;
write_streamed_attachment(_Stream, _F, LenLeft) when LenLeft < 0 ->
throw({bad_request, <<"attachment longer than expected">>});
write_streamed_attachment(Stream, F, LenLeft) when LenLeft > 0 ->
- Bin = try read_next_chunk(F, LenLeft)
- catch
- {mp_parser_died, normal} ->
- throw({bad_request, <<"attachment shorter than expected">>})
- end,
+ Bin =
+ try
+ read_next_chunk(F, LenLeft)
+ catch
+ {mp_parser_died, normal} ->
+ throw({bad_request, <<"attachment shorter than expected">>})
+ end,
ok = couch_stream:write(Stream, Bin),
write_streamed_attachment(Stream, F, LenLeft - iolist_size(Bin)).
@@ -619,11 +639,9 @@ read_next_chunk(F, _) when is_function(F, 0) ->
read_next_chunk(F, LenLeft) when is_function(F, 1) ->
F(lists:min([LenLeft, 16#2000])).
-
foldl(Att, Fun, Acc) ->
foldl(fetch(data, Att), Att, Fun, Acc).
-
foldl(Bin, _Att, Fun, Acc) when is_binary(Bin) ->
Fun(Bin, Acc);
foldl({stream, StreamEngine}, Att, Fun, Acc) ->
@@ -651,54 +669,51 @@ foldl({follows, Parser, Ref}, Att, Fun, Acc) ->
erlang:demonitor(ParserRef, [flush])
end.
-
range_foldl(Att, From, To, Fun, Acc) ->
{stream, StreamEngine} = fetch(data, Att),
couch_stream:range_foldl(StreamEngine, From, To, Fun, Acc).
-
foldl_decode(Att, Fun, Acc) ->
case fetch([data, encoding], Att) of
[{stream, StreamEngine}, Enc] ->
couch_stream:foldl_decode(
- StreamEngine, fetch(md5, Att), Enc, Fun, Acc);
+ StreamEngine, fetch(md5, Att), Enc, Fun, Acc
+ );
[Fun2, identity] ->
fold_streamed_data(Fun2, fetch(att_len, Att), Fun, Acc)
end.
-
to_binary(Att) ->
to_binary(fetch(data, Att), Att).
-
to_binary(Bin, _Att) when is_binary(Bin) ->
Bin;
to_binary(Iolist, _Att) when is_list(Iolist) ->
iolist_to_binary(Iolist);
to_binary({stream, _StreamEngine}, Att) ->
iolist_to_binary(
- lists:reverse(foldl(Att, fun(Bin,Acc) -> [Bin|Acc] end, []))
+ lists:reverse(foldl(Att, fun(Bin, Acc) -> [Bin | Acc] end, []))
);
-to_binary(DataFun, Att) when is_function(DataFun)->
+to_binary(DataFun, Att) when is_function(DataFun) ->
Len = fetch(att_len, Att),
iolist_to_binary(
- lists:reverse(fold_streamed_data(
- DataFun,
- Len,
- fun(Data, Acc) -> [Data | Acc] end,
- []
- ))
+ lists:reverse(
+ fold_streamed_data(
+ DataFun,
+ Len,
+ fun(Data, Acc) -> [Data | Acc] end,
+ []
+ )
+ )
).
-
fold_streamed_data(_RcvFun, 0, _Fun, Acc) ->
Acc;
-fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0->
+fold_streamed_data(RcvFun, LenLeft, Fun, Acc) when LenLeft > 0 ->
Bin = RcvFun(),
ResultAcc = Fun(Bin, Acc),
fold_streamed_data(RcvFun, LenLeft - size(Bin), Fun, ResultAcc).
-
%% Upgrade an attachment record to a property list on demand. This is a one-way
%% operation as downgrading potentially truncates fields with important data.
-spec upgrade(#att{}) -> attachment().
@@ -712,7 +727,6 @@ upgrade(#att{} = Att) ->
upgrade(Att) ->
Att.
-
%% Downgrade is exposed for interactive convenience. In practice, unless done
%% manually, upgrades are always one-way.
downgrade(#att{} = Att) ->
@@ -729,7 +743,6 @@ downgrade(Att) ->
encoding = fetch(encoding, Att)
}.
-
upgrade_encoding(true) -> gzip;
upgrade_encoding(false) -> identity;
upgrade_encoding(Encoding) -> Encoding.
@@ -744,9 +757,12 @@ max_attachment_size(MaxAttSizeConfig) ->
MaxAttSize when is_list(MaxAttSize) ->
try list_to_integer(MaxAttSize) of
Result -> Result
- catch _:_ ->
- couch_log:error("invalid config value for max attachment size: ~p ", [MaxAttSize]),
- throw(internal_server_error)
+ catch
+ _:_ ->
+ couch_log:error("invalid config value for max attachment size: ~p ", [
+ MaxAttSize
+ ]),
+ throw(internal_server_error)
end;
MaxAttSize when is_integer(MaxAttSize) ->
MaxAttSize;
@@ -755,14 +771,13 @@ max_attachment_size(MaxAttSizeConfig) ->
throw(internal_server_error)
end.
-
-validate_attachment_size(AttName, AttSize, MaxAttSize)
- when is_integer(AttSize), AttSize > MaxAttSize ->
+validate_attachment_size(AttName, AttSize, MaxAttSize) when
+ is_integer(AttSize), AttSize > MaxAttSize
+->
throw({request_entity_too_large, {attachment, AttName}});
validate_attachment_size(_AttName, _AttSize, _MAxAttSize) ->
ok.
-
open_stream(StreamSrc, Data) ->
case couch_db:is_db(StreamSrc) of
true ->
@@ -776,7 +791,6 @@ open_stream(StreamSrc, Data) ->
end
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -785,28 +799,20 @@ open_stream(StreamSrc, Data) ->
%% Test utilities
-
empty_att() -> new().
-
upgraded_empty_att() ->
new([{headers, undefined}]).
-
%% Test groups
-
attachment_upgrade_test_() ->
{"Lazy record upgrade tests", [
{"Existing record fields don't upgrade",
- {with, empty_att(), [fun test_non_upgrading_fields/1]}
- },
- {"New fields upgrade",
- {with, empty_att(), [fun test_upgrading_fields/1]}
- }
+ {with, empty_att(), [fun test_non_upgrading_fields/1]}},
+ {"New fields upgrade", {with, empty_att(), [fun test_upgrading_fields/1]}}
]}.
-
attachment_defaults_test_() ->
{"Attachment defaults tests", [
{"Records retain old default values", [
@@ -827,14 +833,13 @@ attachment_field_api_test_() ->
fun test_transform/0
]}.
-
attachment_disk_term_test_() ->
BaseAttachment = new([
{name, <<"empty">>},
{type, <<"application/octet-stream">>},
{att_len, 0},
{disk_len, 0},
- {md5, <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>},
+ {md5, <<212, 29, 140, 217, 143, 0, 178, 4, 233, 128, 9, 152, 236, 248, 66, 126>>},
{revpos, 4},
{data, {stream, {couch_bt_engine_stream, {fake_fd, fake_sp}}}},
{encoding, identity}
@@ -843,14 +848,16 @@ attachment_disk_term_test_() ->
<<"empty">>,
<<"application/octet-stream">>,
fake_sp,
- 0, 0, 4,
- <<212,29,140,217,143,0,178,4,233,128,9,152,236,248,66,126>>,
+ 0,
+ 0,
+ 4,
+ <<212, 29, 140, 217, 143, 0, 178, 4, 233, 128, 9, 152, 236, 248, 66, 126>>,
identity
},
Headers = [{<<"X-Foo">>, <<"bar">>}],
ExtendedAttachment = store(headers, Headers, BaseAttachment),
ExtendedDiskTerm = {BaseDiskTerm, [{headers, Headers}]},
- FakeDb = test_util:fake_db([{engine, {couch_bt_engine, #st{fd=fake_fd}}}]),
+ FakeDb = test_util:fake_db([{engine, {couch_bt_engine, #st{fd = fake_fd}}}]),
{"Disk term tests", [
?_assertEqual(BaseDiskTerm, to_disk_term(BaseAttachment)),
?_assertEqual(BaseAttachment, from_disk_term(FakeDb, BaseDiskTerm)),
@@ -858,7 +865,6 @@ attachment_disk_term_test_() ->
?_assertEqual(ExtendedAttachment, from_disk_term(FakeDb, ExtendedDiskTerm))
]}.
-
attachment_json_term_test_() ->
Props = [
{<<"content_type">>, <<"application/json">>},
@@ -892,16 +898,13 @@ attachment_json_term_test_() ->
?_assertThrow({bad_request, _}, inline_from_json(Att, InvalidProps))
]}.
-
attachment_stub_merge_test_() ->
%% Stub merging needs to demonstrate revpos matching, skipping, and missing
%% attachment errors.
{"Attachment stub merging tests", []}.
-
%% Test generators
-
test_non_upgrading_fields(Attachment) ->
Pairs = [
{name, "cat.gif"},
@@ -919,8 +922,8 @@ test_non_upgrading_fields(Attachment) ->
Updated = store(Field, Value, Attachment),
?assertMatch(#att{}, Updated)
end,
- Pairs).
-
+ Pairs
+ ).
test_upgrading_fields(Attachment) ->
?assertMatch(#att{}, Attachment),
@@ -929,13 +932,11 @@ test_upgrading_fields(Attachment) ->
UpdatedHeadersUndefined = store(headers, undefined, Attachment),
?assertMatch(X when is_list(X), UpdatedHeadersUndefined).
-
test_legacy_defaults(Attachment) ->
?assertEqual(<<>>, fetch(md5, Attachment)),
?assertEqual(0, fetch(revpos, Attachment)),
?assertEqual(identity, fetch(encoding, Attachment)).
-
test_elided_entries(Attachment) ->
?assertNot(lists:keymember(name, 1, Attachment)),
?assertNot(lists:keymember(type, 1, Attachment)),
@@ -943,26 +944,22 @@ test_elided_entries(Attachment) ->
?assertNot(lists:keymember(disk_len, 1, Attachment)),
?assertNot(lists:keymember(data, 1, Attachment)).
-
test_construction() ->
?assert(new() == new()),
Initialized = new([{name, <<"foo.bar">>}, {type, <<"application/qux">>}]),
?assertEqual(<<"foo.bar">>, fetch(name, Initialized)),
?assertEqual(<<"application/qux">>, fetch(type, Initialized)).
-
test_store_and_fetch() ->
Attachment = empty_att(),
?assertEqual(<<"abc">>, fetch(name, store(name, <<"abc">>, Attachment))),
?assertEqual(42, fetch(ans, store(ans, 42, Attachment))).
-
test_transform() ->
Attachment = new([{counter, 0}]),
Transformed = transform(counter, fun(Count) -> Count + 1 end, Attachment),
?assertEqual(1, fetch(counter, Transformed)).
-
max_attachment_size_test_() ->
{"Max attachment size tests", [
?_assertEqual(infinity, max_attachment_size("infinity")),
diff --git a/src/couch/src/couch_auth_cache.erl b/src/couch/src/couch_auth_cache.erl
index c564cee00..f361ab231 100644
--- a/src/couch/src/couch_auth_cache.erl
+++ b/src/couch/src/couch_auth_cache.erl
@@ -12,7 +12,6 @@
-module(couch_auth_cache).
-
-export([
get_user_creds/1,
get_user_creds/2,
@@ -23,35 +22,33 @@
ensure_users_db_exists/0
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_js_functions.hrl").
-
--spec get_user_creds(UserName::string() | binary()) ->
- {ok, Credentials::list(), term()} | nil.
+-spec get_user_creds(UserName :: string() | binary()) ->
+ {ok, Credentials :: list(), term()} | nil.
get_user_creds(UserName) ->
get_user_creds(nil, UserName).
--spec get_user_creds(Req::#httpd{} | nil, UserName::string() | binary()) ->
- {ok, Credentials::list(), term()} | nil.
+-spec get_user_creds(Req :: #httpd{} | nil, UserName :: string() | binary()) ->
+ {ok, Credentials :: list(), term()} | nil.
get_user_creds(Req, UserName) when is_list(UserName) ->
get_user_creds(Req, ?l2b(UserName));
-
get_user_creds(_Req, UserName) ->
- UserCreds = case get_admin(UserName) of
- nil ->
- get_from_db(UserName);
- Props ->
- case get_from_db(UserName) of
- nil ->
- Props;
- UserProps when is_list(UserProps) ->
- add_roles(Props, couch_util:get_value(<<"roles">>, UserProps))
- end
- end,
+ UserCreds =
+ case get_admin(UserName) of
+ nil ->
+ get_from_db(UserName);
+ Props ->
+ case get_from_db(UserName) of
+ nil ->
+ Props;
+ UserProps when is_list(UserProps) ->
+ add_roles(Props, couch_util:get_value(<<"roles">>, UserProps))
+ end
+ end,
validate_user_creds(UserCreds).
update_user_creds(_Req, UserDoc, _AuthCtx) ->
@@ -69,31 +66,34 @@ get_admin(UserName) when is_binary(UserName) ->
get_admin(?b2l(UserName));
get_admin(UserName) when is_list(UserName) ->
case config:get("admins", UserName) of
- "-hashed-" ++ HashedPwdAndSalt ->
- % the name is an admin, now check to see if there is a user doc
- % which has a matching name, salt, and password_sha
- [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
- make_admin_doc(HashedPwd, Salt);
- "-pbkdf2-" ++ HashedPwdSaltAndIterations ->
- [HashedPwd, Salt, Iterations] = string:tokens(HashedPwdSaltAndIterations, ","),
- make_admin_doc(HashedPwd, Salt, Iterations);
- _Else ->
- nil
+ "-hashed-" ++ HashedPwdAndSalt ->
+ % the name is an admin, now check to see if there is a user doc
+ % which has a matching name, salt, and password_sha
+ [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
+ make_admin_doc(HashedPwd, Salt);
+ "-pbkdf2-" ++ HashedPwdSaltAndIterations ->
+ [HashedPwd, Salt, Iterations] = string:tokens(HashedPwdSaltAndIterations, ","),
+ make_admin_doc(HashedPwd, Salt, Iterations);
+ _Else ->
+ nil
end.
make_admin_doc(HashedPwd, Salt) ->
- [{<<"roles">>, [<<"_admin">>]},
- {<<"salt">>, ?l2b(Salt)},
- {<<"password_scheme">>, <<"simple">>},
- {<<"password_sha">>, ?l2b(HashedPwd)}].
+ [
+ {<<"roles">>, [<<"_admin">>]},
+ {<<"salt">>, ?l2b(Salt)},
+ {<<"password_scheme">>, <<"simple">>},
+ {<<"password_sha">>, ?l2b(HashedPwd)}
+ ].
make_admin_doc(DerivedKey, Salt, Iterations) ->
- [{<<"roles">>, [<<"_admin">>]},
- {<<"salt">>, ?l2b(Salt)},
- {<<"iterations">>, list_to_integer(Iterations)},
- {<<"password_scheme">>, <<"pbkdf2">>},
- {<<"derived_key">>, ?l2b(DerivedKey)}].
-
+ [
+ {<<"roles">>, [<<"_admin">>]},
+ {<<"salt">>, ?l2b(Salt)},
+ {<<"iterations">>, list_to_integer(Iterations)},
+ {<<"password_scheme">>, <<"pbkdf2">>},
+ {<<"derived_key">>, ?l2b(DerivedKey)}
+ ].
get_from_db(UserName) ->
ok = ensure_users_db_exists(),
@@ -104,69 +104,69 @@ get_from_db(UserName) ->
{DocProps} = couch_doc:to_json_obj(Doc, []),
DocProps
catch
- _:_Error ->
- nil
+ _:_Error ->
+ nil
end
end).
-
validate_user_creds(nil) ->
nil;
validate_user_creds(UserCreds) ->
case couch_util:get_value(<<"_conflicts">>, UserCreds) of
- undefined ->
- ok;
- _ConflictList ->
- throw({unauthorized,
- <<"User document conflicts must be resolved before the document",
- " is used for authentication purposes.">>
- })
+ undefined ->
+ ok;
+ _ConflictList ->
+ throw(
+ {unauthorized,
+ <<"User document conflicts must be resolved before the document",
+ " is used for authentication purposes.">>}
+ )
end,
{ok, UserCreds, nil}.
-
users_db() ->
DbNameList = config:get("couch_httpd_auth", "authentication_db", "_users"),
?l2b(DbNameList).
-
ensure_users_db_exists() ->
Options = [?ADMIN_CTX, nologifmissing],
case couch_db:open(users_db(), Options) of
- {ok, Db} ->
- ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
- couch_db:close(Db);
- _Error ->
- {ok, Db} = couch_db:create(users_db(), Options),
- ok = ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
- couch_db:close(Db)
+ {ok, Db} ->
+ ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
+ couch_db:close(Db);
+ _Error ->
+ {ok, Db} = couch_db:create(users_db(), Options),
+ ok = ensure_auth_ddoc_exists(Db, <<"_design/_auth">>),
+ couch_db:close(Db)
end,
ok.
-
ensure_auth_ddoc_exists(Db, DDocId) ->
case couch_db:open_doc(Db, DDocId) of
- {not_found, _Reason} ->
- {ok, AuthDesign} = auth_design_doc(DDocId),
- {ok, _Rev} = couch_db:update_doc(Db, AuthDesign, []);
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- case couch_util:get_value(<<"validate_doc_update">>, Props, []) of
- ?AUTH_DB_DOC_VALIDATE_FUNCTION ->
- ok;
- _ ->
- Props1 = lists:keyreplace(<<"validate_doc_update">>, 1, Props,
- {<<"validate_doc_update">>,
- ?AUTH_DB_DOC_VALIDATE_FUNCTION}),
- couch_db:update_doc(Db, couch_doc:from_json_obj({Props1}), [])
- end
+ {not_found, _Reason} ->
+ {ok, AuthDesign} = auth_design_doc(DDocId),
+ {ok, _Rev} = couch_db:update_doc(Db, AuthDesign, []);
+ {ok, Doc} ->
+ {Props} = couch_doc:to_json_obj(Doc, []),
+ case couch_util:get_value(<<"validate_doc_update">>, Props, []) of
+ ?AUTH_DB_DOC_VALIDATE_FUNCTION ->
+ ok;
+ _ ->
+ Props1 = lists:keyreplace(
+ <<"validate_doc_update">>,
+ 1,
+ Props,
+ {<<"validate_doc_update">>, ?AUTH_DB_DOC_VALIDATE_FUNCTION}
+ ),
+ couch_db:update_doc(Db, couch_doc:from_json_obj({Props1}), [])
+ end
end,
ok.
auth_design_doc(DocId) ->
DocProps = [
{<<"_id">>, DocId},
- {<<"language">>,<<"javascript">>},
+ {<<"language">>, <<"javascript">>},
{<<"validate_doc_update">>, ?AUTH_DB_DOC_VALIDATE_FUNCTION}
],
{ok, couch_doc:from_json_obj({DocProps})}.
diff --git a/src/couch/src/couch_base32.erl b/src/couch/src/couch_base32.erl
index d8d754f5e..776fe773d 100644
--- a/src/couch/src/couch_base32.erl
+++ b/src/couch/src/couch_base32.erl
@@ -16,7 +16,6 @@
-define(SET, <<"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567">>).
-
-spec encode(binary()) -> binary().
encode(Plain) when is_binary(Plain) ->
IoList = encode(Plain, 0, byte_size(Plain) * 8, []),
@@ -24,54 +23,63 @@ encode(Plain) when is_binary(Plain) ->
encode(_Plain, _ByteOffset, 0, Acc) ->
Acc;
-
encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 8 ->
<<A:5, B:3>> = binary:part(Plain, ByteOffset, 1),
- [<<(binary:at(?SET, A)),
- (binary:at(?SET, B bsl 2)),
- "======">> | Acc];
-
+ [<<(binary:at(?SET, A)), (binary:at(?SET, B bsl 2)), "======">> | Acc];
encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 16 ->
<<A:5, B:5, C:5, D:1>> = binary:part(Plain, ByteOffset, 2),
- [<<(binary:at(?SET, A)),
- (binary:at(?SET, B)),
- (binary:at(?SET, C)),
- (binary:at(?SET, D bsl 4)),
- "====">> | Acc];
-
+ [
+ <<
+ (binary:at(?SET, A)),
+ (binary:at(?SET, B)),
+ (binary:at(?SET, C)),
+ (binary:at(?SET, D bsl 4)),
+ "===="
+ >>
+ | Acc
+ ];
encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 24 ->
<<A:5, B:5, C:5, D:5, E:4>> = binary:part(Plain, ByteOffset, 3),
- [<<(binary:at(?SET, A)),
- (binary:at(?SET, B)),
- (binary:at(?SET, C)),
- (binary:at(?SET, D)),
- (binary:at(?SET, E bsl 1)),
- "===">> | Acc];
-
+ [
+ <<
+ (binary:at(?SET, A)),
+ (binary:at(?SET, B)),
+ (binary:at(?SET, C)),
+ (binary:at(?SET, D)),
+ (binary:at(?SET, E bsl 1)),
+ "==="
+ >>
+ | Acc
+ ];
encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining == 32 ->
<<A:5, B:5, C:5, D:5, E:5, F:5, G:2>> = binary:part(Plain, ByteOffset, 4),
- [<<(binary:at(?SET, A)),
- (binary:at(?SET, B)),
- (binary:at(?SET, C)),
- (binary:at(?SET, D)),
- (binary:at(?SET, E)),
- (binary:at(?SET, F)),
- (binary:at(?SET, G bsl 3)),
- "=">> | Acc];
-
+ [
+ <<
+ (binary:at(?SET, A)),
+ (binary:at(?SET, B)),
+ (binary:at(?SET, C)),
+ (binary:at(?SET, D)),
+ (binary:at(?SET, E)),
+ (binary:at(?SET, F)),
+ (binary:at(?SET, G bsl 3)),
+ "="
+ >>
+ | Acc
+ ];
encode(Plain, ByteOffset, BitsRemaining, Acc) when BitsRemaining >= 40 ->
<<A:5, B:5, C:5, D:5, E:5, F:5, G:5, H:5>> =
binary:part(Plain, ByteOffset, 5),
- Output = <<(binary:at(?SET, A)),
- (binary:at(?SET, B)),
- (binary:at(?SET, C)),
- (binary:at(?SET, D)),
- (binary:at(?SET, E)),
- (binary:at(?SET, F)),
- (binary:at(?SET, G)),
- (binary:at(?SET, H))>>,
- encode(Plain, ByteOffset + 5, BitsRemaining - 40, [Output | Acc]).
-
+ Output = <<
+ (binary:at(?SET, A)),
+ (binary:at(?SET, B)),
+ (binary:at(?SET, C)),
+ (binary:at(?SET, D)),
+ (binary:at(?SET, E)),
+ (binary:at(?SET, F)),
+ (binary:at(?SET, G)),
+ (binary:at(?SET, H))
+ >>,
+ encode(Plain, ByteOffset + 5, BitsRemaining - 40, [Output | Acc]).
-spec decode(binary()) -> binary().
decode(Encoded) when is_binary(Encoded) ->
@@ -83,39 +91,60 @@ decode(Encoded, ByteOffset, Acc) when ByteOffset == byte_size(Encoded) ->
decode(Encoded, ByteOffset, Acc) ->
case binary:part(Encoded, ByteOffset, 8) of
<<A:1/binary, B:1/binary, "======">> ->
- [<<(find_in_set(A)):5,
- (find_in_set(B) bsr 2):3>> | Acc];
+ [<<(find_in_set(A)):5, (find_in_set(B) bsr 2):3>> | Acc];
<<A:1/binary, B:1/binary, C:1/binary, D:1/binary, "====">> ->
- [<<(find_in_set(A)):5,
- (find_in_set(B)):5,
- (find_in_set(C)):5,
- (find_in_set(D) bsr 4):1>> | Acc];
+ [
+ <<
+ (find_in_set(A)):5,
+ (find_in_set(B)):5,
+ (find_in_set(C)):5,
+ (find_in_set(D) bsr 4):1
+ >>
+ | Acc
+ ];
<<A:1/binary, B:1/binary, C:1/binary, D:1/binary, E:1/binary, "===">> ->
- [<<(find_in_set(A)):5,
- (find_in_set(B)):5,
- (find_in_set(C)):5,
- (find_in_set(D)):5,
- (find_in_set(E) bsr 1):4>> | Acc];
- <<A:1/binary, B:1/binary, C:1/binary, D:1/binary,
- E:1/binary, F:1/binary, G:1/binary, "=">> ->
- [<<(find_in_set(A)):5,
- (find_in_set(B)):5,
- (find_in_set(C)):5,
- (find_in_set(D)):5,
- (find_in_set(E)):5,
- (find_in_set(F)):5,
- (find_in_set(G) bsr 3):2>> | Acc];
- <<A:1/binary, B:1/binary, C:1/binary, D:1/binary,
- E:1/binary, F:1/binary, G:1/binary, H:1/binary>> ->
- decode(Encoded, ByteOffset + 8,
- [<<(find_in_set(A)):5,
- (find_in_set(B)):5,
- (find_in_set(C)):5,
- (find_in_set(D)):5,
- (find_in_set(E)):5,
- (find_in_set(F)):5,
- (find_in_set(G)):5,
- (find_in_set(H)):5>> | Acc])
+ [
+ <<
+ (find_in_set(A)):5,
+ (find_in_set(B)):5,
+ (find_in_set(C)):5,
+ (find_in_set(D)):5,
+ (find_in_set(E) bsr 1):4
+ >>
+ | Acc
+ ];
+ <<A:1/binary, B:1/binary, C:1/binary, D:1/binary, E:1/binary, F:1/binary, G:1/binary, "=">> ->
+ [
+ <<
+ (find_in_set(A)):5,
+ (find_in_set(B)):5,
+ (find_in_set(C)):5,
+ (find_in_set(D)):5,
+ (find_in_set(E)):5,
+ (find_in_set(F)):5,
+ (find_in_set(G) bsr 3):2
+ >>
+ | Acc
+ ];
+ <<A:1/binary, B:1/binary, C:1/binary, D:1/binary, E:1/binary, F:1/binary, G:1/binary,
+ H:1/binary>> ->
+ decode(
+ Encoded,
+ ByteOffset + 8,
+ [
+ <<
+ (find_in_set(A)):5,
+ (find_in_set(B)):5,
+ (find_in_set(C)):5,
+ (find_in_set(D)):5,
+ (find_in_set(E)):5,
+ (find_in_set(F)):5,
+ (find_in_set(G)):5,
+ (find_in_set(H)):5
+ >>
+ | Acc
+ ]
+ )
end.
find_in_set(Char) ->
diff --git a/src/couch/src/couch_bt_engine.erl b/src/couch/src/couch_bt_engine.erl
index 48e751a82..7d2390556 100644
--- a/src/couch/src/couch_bt_engine.erl
+++ b/src/couch/src/couch_bt_engine.erl
@@ -80,12 +80,10 @@
finish_compaction/4
]).
-
-export([
init_state/4
]).
-
-export([
id_tree_split/1,
id_tree_join/2,
@@ -105,7 +103,6 @@
purge_seq_tree_join/2
]).
-
% Used by the compactor
-export([
update_header/2,
@@ -113,12 +110,10 @@
copy_props/2
]).
-
-include_lib("kernel/include/file.hrl").
-include_lib("couch/include/couch_db.hrl").
-include("couch_bt_engine.hrl").
-
exists(FilePath) ->
case is_file(FilePath) of
true ->
@@ -127,7 +122,6 @@ exists(FilePath) ->
is_file(FilePath ++ ".compact")
end.
-
delete(RootDir, FilePath, Async) ->
%% Delete any leftover compaction files. If we don't do this a
%% subsequent request for this DB will try to open them to use
@@ -137,70 +131,69 @@ delete(RootDir, FilePath, Async) ->
% Delete the actual database file
couch_file:delete(RootDir, FilePath, Async).
-
delete_compaction_files(RootDir, FilePath, DelOpts) ->
- lists:foreach(fun(Ext) ->
- couch_file:delete(RootDir, FilePath ++ Ext, DelOpts)
- end, [".compact", ".compact.data", ".compact.meta"]).
-
+ lists:foreach(
+ fun(Ext) ->
+ couch_file:delete(RootDir, FilePath ++ Ext, DelOpts)
+ end,
+ [".compact", ".compact.data", ".compact.meta"]
+ ).
init(FilePath, Options) ->
{ok, Fd} = open_db_file(FilePath, Options),
- Header = case lists:member(create, Options) of
- true ->
- delete_compaction_files(FilePath),
- Header0 = couch_bt_engine_header:new(),
- Header1 = init_set_props(Fd, Header0, Options),
- ok = couch_file:write_header(Fd, Header1),
- Header1;
- false ->
- case couch_file:read_header(Fd) of
- {ok, Header0} ->
- Header0;
- no_valid_header ->
- delete_compaction_files(FilePath),
- Header0 = couch_bt_engine_header:new(),
- ok = couch_file:write_header(Fd, Header0),
- Header0
- end
- end,
+ Header =
+ case lists:member(create, Options) of
+ true ->
+ delete_compaction_files(FilePath),
+ Header0 = couch_bt_engine_header:new(),
+ Header1 = init_set_props(Fd, Header0, Options),
+ ok = couch_file:write_header(Fd, Header1),
+ Header1;
+ false ->
+ case couch_file:read_header(Fd) of
+ {ok, Header0} ->
+ Header0;
+ no_valid_header ->
+ delete_compaction_files(FilePath),
+ Header0 = couch_bt_engine_header:new(),
+ ok = couch_file:write_header(Fd, Header0),
+ Header0
+ end
+ end,
{ok, init_state(FilePath, Fd, Header, Options)}.
-
terminate(_Reason, St) ->
% If the reason we died is because our fd disappeared
% then we don't need to try closing it again.
Ref = St#st.fd_monitor,
- if Ref == closed -> ok; true ->
- ok = couch_file:close(St#st.fd),
- receive
- {'DOWN', Ref, _, _, _} ->
- ok
+ if
+ Ref == closed ->
+ ok;
+ true ->
+ ok = couch_file:close(St#st.fd),
+ receive
+ {'DOWN', Ref, _, _, _} ->
+ ok
after 500 ->
ok
- end
+ end
end,
couch_util:shutdown_sync(St#st.fd),
ok.
-
handle_db_updater_call(Msg, St) ->
{stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
-handle_db_updater_info({'DOWN', Ref, _, _, _}, #st{fd_monitor=Ref} = St) ->
- {stop, normal, St#st{fd=undefined, fd_monitor=closed}}.
-
+handle_db_updater_info({'DOWN', Ref, _, _, _}, #st{fd_monitor = Ref} = St) ->
+ {stop, normal, St#st{fd = undefined, fd_monitor = closed}}.
incref(St) ->
{ok, St#st{fd_monitor = erlang:monitor(process, St#st.fd)}}.
-
decref(St) ->
true = erlang:demonitor(St#st.fd_monitor, [flush]),
ok.
-
monitored_by(St) ->
case erlang:process_info(St#st.fd, monitored_by) of
{monitored_by, Pids} ->
@@ -209,33 +202,26 @@ monitored_by(St) ->
[]
end.
-
last_activity(#st{fd = Fd}) ->
couch_file:last_read(Fd).
-
get_compacted_seq(#st{header = Header}) ->
couch_bt_engine_header:get(Header, compacted_seq).
-
get_del_doc_count(#st{} = St) ->
{ok, Reds} = couch_btree:full_reduce(St#st.id_tree),
element(2, Reds).
-
get_disk_version(#st{header = Header}) ->
couch_bt_engine_header:get(Header, disk_version).
-
get_doc_count(#st{} = St) ->
{ok, Reds} = couch_btree:full_reduce(St#st.id_tree),
element(1, Reds).
-
get_epochs(#st{header = Header}) ->
couch_bt_engine_header:get(Header, epochs).
-
get_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) ->
Fun = fun({PurgeSeq, _, _, _}, _Reds, _Acc) ->
{stop, PurgeSeq}
@@ -243,7 +229,6 @@ get_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) ->
{ok, _, PurgeSeq} = couch_btree:fold(PurgeSeqTree, Fun, 0, [{dir, rev}]),
PurgeSeq.
-
get_oldest_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) ->
Fun = fun({PurgeSeq, _, _, _}, _Reds, _Acc) ->
{stop, PurgeSeq}
@@ -251,27 +236,25 @@ get_oldest_purge_seq(#st{purge_seq_tree = PurgeSeqTree}) ->
{ok, _, PurgeSeq} = couch_btree:fold(PurgeSeqTree, Fun, 0, []),
PurgeSeq.
-
get_purge_infos_limit(#st{header = Header}) ->
couch_bt_engine_header:get(Header, purge_infos_limit).
-
get_revs_limit(#st{header = Header}) ->
couch_bt_engine_header:get(Header, revs_limit).
-
get_size_info(#st{} = St) ->
{ok, FileSize} = couch_file:bytes(St#st.fd),
{ok, DbReduction} = couch_btree:full_reduce(St#st.id_tree),
SizeInfo0 = element(3, DbReduction),
- SizeInfo = case SizeInfo0 of
- SI when is_record(SI, size_info) ->
- SI;
- {AS, ES} ->
- #size_info{active=AS, external=ES};
- AS ->
- #size_info{active=AS}
- end,
+ SizeInfo =
+ case SizeInfo0 of
+ SI when is_record(SI, size_info) ->
+ SI;
+ {AS, ES} ->
+ #size_info{active = AS, external = ES};
+ AS ->
+ #size_info{active = AS}
+ end,
ActiveSize = active_size(St, SizeInfo),
ExternalSize = SizeInfo#size_info.external,
[
@@ -280,7 +263,6 @@ get_size_info(#st{} = St) ->
{file, FileSize}
].
-
partition_size_cb(traverse, Key, {DC, DDC, Sizes}, {Partition, DCAcc, DDCAcc, SizesAcc}) ->
case couch_partition:is_member(Key, Partition) of
true ->
@@ -288,22 +270,18 @@ partition_size_cb(traverse, Key, {DC, DDC, Sizes}, {Partition, DCAcc, DDCAcc, Si
false ->
{ok, {Partition, DCAcc, DDCAcc, SizesAcc}}
end;
-
partition_size_cb(visit, FDI, _PrevReds, {Partition, DCAcc, DDCAcc, Acc}) ->
InPartition = couch_partition:is_member(FDI#full_doc_info.id, Partition),
Deleted = FDI#full_doc_info.deleted,
case {InPartition, Deleted} of
{true, true} ->
- {ok, {Partition, DCAcc, DDCAcc + 1,
- reduce_sizes(FDI#full_doc_info.sizes, Acc)}};
+ {ok, {Partition, DCAcc, DDCAcc + 1, reduce_sizes(FDI#full_doc_info.sizes, Acc)}};
{true, false} ->
- {ok, {Partition, DCAcc + 1, DDCAcc,
- reduce_sizes(FDI#full_doc_info.sizes, Acc)}};
+ {ok, {Partition, DCAcc + 1, DDCAcc, reduce_sizes(FDI#full_doc_info.sizes, Acc)}};
{false, _} ->
{ok, {Partition, DCAcc, DDCAcc, Acc}}
end.
-
get_partition_info(#st{} = St, Partition) ->
StartKey = couch_partition:start_key(Partition),
EndKey = couch_partition:end_key(Partition),
@@ -322,7 +300,6 @@ get_partition_info(#st{} = St, Partition) ->
]}
].
-
get_security(#st{header = Header} = St) ->
case couch_bt_engine_header:get(Header, security_ptr) of
undefined ->
@@ -332,7 +309,6 @@ get_security(#st{header = Header} = St) ->
SecProps
end.
-
get_props(#st{header = Header} = St) ->
case couch_bt_engine_header:get(Header, props_ptr) of
undefined ->
@@ -342,15 +318,12 @@ get_props(#st{header = Header} = St) ->
Props
end.
-
get_update_seq(#st{header = Header}) ->
couch_bt_engine_header:get(Header, update_seq).
-
get_uuid(#st{header = Header}) ->
couch_bt_engine_header:get(Header, uuid).
-
set_revs_limit(#st{header = Header} = St, RevsLimit) ->
NewSt = St#st{
header = couch_bt_engine_header:set(Header, [
@@ -360,7 +333,6 @@ set_revs_limit(#st{header = Header} = St, RevsLimit) ->
},
{ok, increment_update_seq(NewSt)}.
-
set_purge_infos_limit(#st{header = Header} = St, PurgeInfosLimit) ->
NewSt = St#st{
header = couch_bt_engine_header:set(Header, [
@@ -370,7 +342,6 @@ set_purge_infos_limit(#st{header = Header} = St, PurgeInfosLimit) ->
},
{ok, increment_update_seq(NewSt)}.
-
set_security(#st{header = Header} = St, NewSecurity) ->
Options = [{compression, St#st.compression}],
{ok, Ptr, _} = couch_file:append_term(St#st.fd, NewSecurity, Options),
@@ -382,7 +353,6 @@ set_security(#st{header = Header} = St, NewSecurity) ->
},
{ok, increment_update_seq(NewSt)}.
-
set_props(#st{header = Header} = St, Props) ->
Options = [{compression, St#st.compression}],
{ok, Ptr, _} = couch_file:append_term(St#st.fd, Props, Options),
@@ -394,22 +364,25 @@ set_props(#st{header = Header} = St, Props) ->
},
{ok, increment_update_seq(NewSt)}.
-
open_docs(#st{} = St, DocIds) ->
Results = couch_btree:lookup(St#st.id_tree, DocIds),
- lists:map(fun
- ({ok, FDI}) -> FDI;
- (not_found) -> not_found
- end, Results).
-
+ lists:map(
+ fun
+ ({ok, FDI}) -> FDI;
+ (not_found) -> not_found
+ end,
+ Results
+ ).
open_local_docs(#st{} = St, DocIds) ->
Results = couch_btree:lookup(St#st.local_tree, DocIds),
- lists:map(fun
- ({ok, Doc}) -> Doc;
- (not_found) -> not_found
- end, Results).
-
+ lists:map(
+ fun
+ ({ok, Doc}) -> Doc;
+ (not_found) -> not_found
+ end,
+ Results
+ ).
read_doc_body(#st{} = St, #doc{} = Doc) ->
{ok, {Body, Atts}} = couch_file:pread_term(St#st.fd, Doc#doc.body),
@@ -418,14 +391,15 @@ read_doc_body(#st{} = St, #doc{} = Doc) ->
atts = Atts
}.
-
load_purge_infos(St, UUIDs) ->
Results = couch_btree:lookup(St#st.purge_tree, UUIDs),
- lists:map(fun
- ({ok, Info}) -> Info;
- (not_found) -> not_found
- end, Results).
-
+ lists:map(
+ fun
+ ({ok, Info}) -> Info;
+ (not_found) -> not_found
+ end,
+ Results
+ ).
serialize_doc(#st{} = St, #doc{} = Doc) ->
Compress = fun(Term) ->
@@ -449,7 +423,6 @@ serialize_doc(#st{} = St, #doc{} = Doc) ->
meta = [{comp_body, Body} | Doc#doc.meta]
}.
-
write_doc_body(St, #doc{} = Doc) ->
#st{
fd = Fd
@@ -457,46 +430,57 @@ write_doc_body(St, #doc{} = Doc) ->
{ok, Ptr, Written} = couch_file:append_raw_chunk(Fd, Doc#doc.body),
{ok, Doc#doc{body = Ptr}, Written}.
-
write_doc_infos(#st{} = St, Pairs, LocalDocs) ->
#st{
id_tree = IdTree,
seq_tree = SeqTree,
local_tree = LocalTree
} = St,
- FinalAcc = lists:foldl(fun({OldFDI, NewFDI}, Acc) ->
- {AddAcc, RemIdsAcc, RemSeqsAcc} = Acc,
- case {OldFDI, NewFDI} of
- {not_found, #full_doc_info{}} ->
- {[NewFDI | AddAcc], RemIdsAcc, RemSeqsAcc};
- {#full_doc_info{id = Id}, #full_doc_info{id = Id}} ->
- NewAddAcc = [NewFDI | AddAcc],
- NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
- {NewAddAcc, RemIdsAcc, NewRemSeqsAcc};
- {#full_doc_info{id = Id}, not_found} ->
- NewRemIdsAcc = [Id | RemIdsAcc],
- NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
- {AddAcc, NewRemIdsAcc, NewRemSeqsAcc}
- end
- end, {[], [], []}, Pairs),
+ FinalAcc = lists:foldl(
+ fun({OldFDI, NewFDI}, Acc) ->
+ {AddAcc, RemIdsAcc, RemSeqsAcc} = Acc,
+ case {OldFDI, NewFDI} of
+ {not_found, #full_doc_info{}} ->
+ {[NewFDI | AddAcc], RemIdsAcc, RemSeqsAcc};
+ {#full_doc_info{id = Id}, #full_doc_info{id = Id}} ->
+ NewAddAcc = [NewFDI | AddAcc],
+ NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
+ {NewAddAcc, RemIdsAcc, NewRemSeqsAcc};
+ {#full_doc_info{id = Id}, not_found} ->
+ NewRemIdsAcc = [Id | RemIdsAcc],
+ NewRemSeqsAcc = [OldFDI#full_doc_info.update_seq | RemSeqsAcc],
+ {AddAcc, NewRemIdsAcc, NewRemSeqsAcc}
+ end
+ end,
+ {[], [], []},
+ Pairs
+ ),
{Add, RemIds, RemSeqs} = FinalAcc,
{ok, IdTree2} = couch_btree:add_remove(IdTree, Add, RemIds),
{ok, SeqTree2} = couch_btree:add_remove(SeqTree, Add, RemSeqs),
- {AddLDocs, RemLDocIds} = lists:foldl(fun(Doc, {AddAcc, RemAcc}) ->
- case Doc#doc.deleted of
- true ->
- {AddAcc, [Doc#doc.id | RemAcc]};
- false ->
- {[Doc | AddAcc], RemAcc}
- end
- end, {[], []}, LocalDocs),
+ {AddLDocs, RemLDocIds} = lists:foldl(
+ fun(Doc, {AddAcc, RemAcc}) ->
+ case Doc#doc.deleted of
+ true ->
+ {AddAcc, [Doc#doc.id | RemAcc]};
+ false ->
+ {[Doc | AddAcc], RemAcc}
+ end
+ end,
+ {[], []},
+ LocalDocs
+ ),
{ok, LocalTree2} = couch_btree:add_remove(LocalTree, AddLDocs, RemLDocIds),
- NewUpdateSeq = lists:foldl(fun(#full_doc_info{update_seq=Seq}, Acc) ->
- erlang:max(Seq, Acc)
- end, get_update_seq(St), Add),
+ NewUpdateSeq = lists:foldl(
+ fun(#full_doc_info{update_seq = Seq}, Acc) ->
+ erlang:max(Seq, Acc)
+ end,
+ get_update_seq(St),
+ Add
+ ),
NewHeader = couch_bt_engine_header:set(St#st.header, [
{update_seq, NewUpdateSeq}
@@ -510,7 +494,6 @@ write_doc_infos(#st{} = St, Pairs, LocalDocs) ->
needs_commit = true
}}.
-
purge_docs(#st{} = St, Pairs, PurgeInfos) ->
#st{
id_tree = IdTree,
@@ -529,10 +512,11 @@ purge_docs(#st{} = St, Pairs, PurgeInfos) ->
% We bump NewUpdateSeq because we have to ensure that
% indexers see that they need to process the new purge
% information.
- UpdateSeq = case NewSeq == CurrSeq of
- true -> CurrSeq + 1;
- false -> NewSeq
- end,
+ UpdateSeq =
+ case NewSeq == CurrSeq of
+ true -> CurrSeq + 1;
+ false -> NewSeq
+ end,
Header = couch_bt_engine_header:set(St#st.header, [
{update_seq, UpdateSeq}
]),
@@ -550,7 +534,6 @@ purge_docs(#st{} = St, Pairs, PurgeInfos) ->
needs_commit = true
}}.
-
copy_purge_infos(#st{} = St, PurgeInfos) ->
#st{
purge_tree = PurgeTree,
@@ -559,12 +542,11 @@ copy_purge_infos(#st{} = St, PurgeInfos) ->
{ok, PurgeTree2} = couch_btree:add(PurgeTree, PurgeInfos),
{ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, PurgeInfos),
{ok, St#st{
- purge_tree = PurgeTree2,
- purge_seq_tree = PurgeSeqTree2,
- needs_commit = true
+ purge_tree = PurgeTree2,
+ purge_seq_tree = PurgeSeqTree2,
+ needs_commit = true
}}.
-
commit_data(St) ->
#st{
fd = Fd,
@@ -587,32 +569,26 @@ commit_data(St) ->
{ok, St}
end.
-
open_write_stream(#st{} = St, Options) ->
couch_stream:open({couch_bt_engine_stream, {St#st.fd, []}}, Options).
-
open_read_stream(#st{} = St, StreamSt) ->
{ok, {couch_bt_engine_stream, {St#st.fd, StreamSt}}}.
-
is_active_stream(#st{} = St, {couch_bt_engine_stream, {Fd, _}}) ->
St#st.fd == Fd;
is_active_stream(_, _) ->
false.
-
fold_docs(St, UserFun, UserAcc, Options) ->
fold_docs_int(St, St#st.id_tree, UserFun, UserAcc, Options).
-
fold_local_docs(St, UserFun, UserAcc, Options) ->
case fold_docs_int(St, St#st.local_tree, UserFun, UserAcc, Options) of
{ok, _Reds, FinalAcc} -> {ok, null, FinalAcc};
{ok, FinalAcc} -> {ok, FinalAcc}
end.
-
fold_changes(St, SinceSeq, UserFun, UserAcc, Options) ->
Fun = fun drop_reductions/4,
InAcc = {UserFun, UserAcc},
@@ -621,13 +597,13 @@ fold_changes(St, SinceSeq, UserFun, UserAcc, Options) ->
{_, FinalUserAcc} = OutAcc,
{ok, FinalUserAcc}.
-
fold_purge_infos(St, StartSeq0, UserFun, UserAcc, Options) ->
PurgeSeqTree = St#st.purge_seq_tree,
StartSeq = StartSeq0 + 1,
MinSeq = get_oldest_purge_seq(St),
- if MinSeq =< StartSeq -> ok; true ->
- erlang:error({invalid_start_purge_seq, StartSeq0})
+ if
+ MinSeq =< StartSeq -> ok;
+ true -> erlang:error({invalid_start_purge_seq, StartSeq0})
end,
Wrapper = fun(Info, _Reds, UAcc) ->
UserFun(Info, UAcc)
@@ -636,7 +612,6 @@ fold_purge_infos(St, StartSeq0, UserFun, UserAcc, Options) ->
{ok, _, OutAcc} = couch_btree:fold(PurgeSeqTree, Wrapper, UserAcc, Opts),
{ok, OutAcc}.
-
count_changes_since(St, SinceSeq) ->
BTree = St#st.seq_tree,
FoldFun = fun(_SeqStart, PartialReds, 0) ->
@@ -646,13 +621,11 @@ count_changes_since(St, SinceSeq) ->
{ok, Changes} = couch_btree:fold_reduce(BTree, FoldFun, 0, Opts),
Changes.
-
start_compaction(St, DbName, Options, Parent) ->
Args = [St, DbName, Options, Parent],
Pid = spawn_link(couch_bt_engine_compactor, start, Args),
{ok, St, Pid}.
-
finish_compaction(OldState, DbName, Options, CompactFilePath) ->
{ok, NewState1} = ?MODULE:init(CompactFilePath, Options),
OldSeq = get_update_seq(OldState),
@@ -661,15 +634,16 @@ finish_compaction(OldState, DbName, Options, CompactFilePath) ->
true ->
finish_compaction_int(OldState, NewState1);
false ->
- couch_log:info("Compaction file still behind main file "
- "(update seq=~p. compact update seq=~p). Retrying.",
- [OldSeq, NewSeq]),
+ couch_log:info(
+ "Compaction file still behind main file "
+ "(update seq=~p. compact update seq=~p). Retrying.",
+ [OldSeq, NewSeq]
+ ),
ok = decref(NewState1),
start_compaction(OldState, DbName, Options, self())
end.
-
-id_tree_split(#full_doc_info{}=Info) ->
+id_tree_split(#full_doc_info{} = Info) ->
#full_doc_info{
id = Id,
update_seq = Seq,
@@ -679,11 +653,9 @@ id_tree_split(#full_doc_info{}=Info) ->
} = Info,
{Id, {Seq, ?b2i(Deleted), split_sizes(SizeInfo), disk_tree(Tree)}}.
-
id_tree_join(Id, {HighSeq, Deleted, DiskTree}) ->
% Handle old formats before data_size was added
id_tree_join(Id, {HighSeq, Deleted, #size_info{}, DiskTree});
-
id_tree_join(Id, {HighSeq, Deleted, Sizes, DiskTree}) ->
#full_doc_info{
id = Id,
@@ -693,29 +665,35 @@ id_tree_join(Id, {HighSeq, Deleted, Sizes, DiskTree}) ->
rev_tree = rev_tree(DiskTree)
}.
-
id_tree_reduce(reduce, FullDocInfos) ->
- lists:foldl(fun(Info, {NotDeleted, Deleted, Sizes}) ->
- Sizes2 = reduce_sizes(Sizes, Info#full_doc_info.sizes),
- case Info#full_doc_info.deleted of
- true ->
- {NotDeleted, Deleted + 1, Sizes2};
- false ->
- {NotDeleted + 1, Deleted, Sizes2}
- end
- end, {0, 0, #size_info{}}, FullDocInfos);
+ lists:foldl(
+ fun(Info, {NotDeleted, Deleted, Sizes}) ->
+ Sizes2 = reduce_sizes(Sizes, Info#full_doc_info.sizes),
+ case Info#full_doc_info.deleted of
+ true ->
+ {NotDeleted, Deleted + 1, Sizes2};
+ false ->
+ {NotDeleted + 1, Deleted, Sizes2}
+ end
+ end,
+ {0, 0, #size_info{}},
+ FullDocInfos
+ );
id_tree_reduce(rereduce, Reds) ->
- lists:foldl(fun
- ({NotDeleted, Deleted}, {AccNotDeleted, AccDeleted, _AccSizes}) ->
- % pre 1.2 format, will be upgraded on compaction
- {AccNotDeleted + NotDeleted, AccDeleted + Deleted, nil};
- ({NotDeleted, Deleted, Sizes}, {AccNotDeleted, AccDeleted, AccSizes}) ->
- AccSizes2 = reduce_sizes(AccSizes, Sizes),
- {AccNotDeleted + NotDeleted, AccDeleted + Deleted, AccSizes2}
- end, {0, 0, #size_info{}}, Reds).
-
-
-seq_tree_split(#full_doc_info{}=Info) ->
+ lists:foldl(
+ fun
+ ({NotDeleted, Deleted}, {AccNotDeleted, AccDeleted, _AccSizes}) ->
+ % pre 1.2 format, will be upgraded on compaction
+ {AccNotDeleted + NotDeleted, AccDeleted + Deleted, nil};
+ ({NotDeleted, Deleted, Sizes}, {AccNotDeleted, AccDeleted, AccSizes}) ->
+ AccSizes2 = reduce_sizes(AccSizes, Sizes),
+ {AccNotDeleted + NotDeleted, AccDeleted + Deleted, AccSizes2}
+ end,
+ {0, 0, #size_info{}},
+ Reds
+ ).
+
+seq_tree_split(#full_doc_info{} = Info) ->
#full_doc_info{
id = Id,
update_seq = Seq,
@@ -725,10 +703,8 @@ seq_tree_split(#full_doc_info{}=Info) ->
} = Info,
{Seq, {Id, ?b2i(Del), split_sizes(SizeInfo), disk_tree(Tree)}}.
-
seq_tree_join(Seq, {Id, Del, DiskTree}) when is_integer(Del) ->
seq_tree_join(Seq, {Id, Del, {0, 0}, DiskTree});
-
seq_tree_join(Seq, {Id, Del, Sizes, DiskTree}) when is_integer(Del) ->
#full_doc_info{
id = Id,
@@ -737,37 +713,39 @@ seq_tree_join(Seq, {Id, Del, Sizes, DiskTree}) when is_integer(Del) ->
sizes = join_sizes(Sizes),
rev_tree = rev_tree(DiskTree)
};
-
seq_tree_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) ->
% Older versions stored #doc_info records in the seq_tree.
% Compact to upgrade.
- Revs = lists:map(fun({Rev, Seq, Bp}) ->
- #rev_info{rev = Rev, seq = Seq, deleted = false, body_sp = Bp}
- end, RevInfos),
- DeletedRevs = lists:map(fun({Rev, Seq, Bp}) ->
- #rev_info{rev = Rev, seq = Seq, deleted = true, body_sp = Bp}
- end, DeletedRevInfos),
+ Revs = lists:map(
+ fun({Rev, Seq, Bp}) ->
+ #rev_info{rev = Rev, seq = Seq, deleted = false, body_sp = Bp}
+ end,
+ RevInfos
+ ),
+ DeletedRevs = lists:map(
+ fun({Rev, Seq, Bp}) ->
+ #rev_info{rev = Rev, seq = Seq, deleted = true, body_sp = Bp}
+ end,
+ DeletedRevInfos
+ ),
#doc_info{
id = Id,
high_seq = KeySeq,
revs = Revs ++ DeletedRevs
}.
-
seq_tree_reduce(reduce, DocInfos) ->
% count the number of documents
length(DocInfos);
seq_tree_reduce(rereduce, Reds) ->
lists:sum(Reds).
-
local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_binary(Rev) ->
#doc{
id = Id,
body = BodyData
} = Doc,
{Id, {binary_to_integer(Rev), BodyData}};
-
local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_integer(Rev) ->
#doc{
id = Id,
@@ -775,14 +753,12 @@ local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_integer(Rev) ->
} = Doc,
{Id, {Rev, BodyData}}.
-
local_tree_join(Id, {Rev, BodyData}) when is_binary(Rev) ->
#doc{
id = Id,
revs = {0, [Rev]},
body = BodyData
};
-
local_tree_join(Id, {Rev, BodyData}) when is_integer(Rev) ->
#doc{
id = Id,
@@ -790,30 +766,24 @@ local_tree_join(Id, {Rev, BodyData}) when is_integer(Rev) ->
body = BodyData
}.
-
purge_tree_split({PurgeSeq, UUID, DocId, Revs}) ->
{UUID, {PurgeSeq, DocId, Revs}}.
-
purge_tree_join(UUID, {PurgeSeq, DocId, Revs}) ->
{PurgeSeq, UUID, DocId, Revs}.
-
purge_seq_tree_split({PurgeSeq, UUID, DocId, Revs}) ->
{PurgeSeq, {UUID, DocId, Revs}}.
-
purge_seq_tree_join(PurgeSeq, {UUID, DocId, Revs}) ->
{PurgeSeq, UUID, DocId, Revs}.
-
purge_tree_reduce(reduce, IdRevs) ->
% count the number of purge requests
length(IdRevs);
purge_tree_reduce(rereduce, Reds) ->
lists:sum(Reds).
-
set_update_seq(#st{header = Header} = St, UpdateSeq) ->
{ok, St#st{
header = couch_bt_engine_header:set(Header, [
@@ -822,7 +792,6 @@ set_update_seq(#st{header = Header} = St, UpdateSeq) ->
needs_commit = true
}}.
-
copy_security(#st{header = Header} = St, SecProps) ->
Options = [{compression, St#st.compression}],
{ok, Ptr, _} = couch_file:append_term(St#st.fd, SecProps, Options),
@@ -833,7 +802,6 @@ copy_security(#st{header = Header} = St, SecProps) ->
needs_commit = true
}}.
-
copy_props(#st{header = Header} = St, Props) ->
Options = [{compression, St#st.compression}],
{ok, Ptr, _} = couch_file:append_term(St#st.fd, Props, Options),
@@ -844,7 +812,6 @@ copy_props(#st{header = Header} = St, Props) ->
needs_commit = true
}}.
-
open_db_file(FilePath, Options) ->
case couch_file:open(FilePath, Options) of
{ok, Fd} ->
@@ -866,7 +833,6 @@ open_db_file(FilePath, Options) ->
throw(Error)
end.
-
init_state(FilePath, Fd, Header0, Options) ->
ok = couch_file:sync(Fd),
@@ -878,26 +844,26 @@ init_state(FilePath, Fd, Header0, Options) ->
IdTreeState = couch_bt_engine_header:id_tree_state(Header),
{ok, IdTree} = couch_btree:open(IdTreeState, Fd, [
- {split, fun ?MODULE:id_tree_split/1},
- {join, fun ?MODULE:id_tree_join/2},
- {reduce, fun ?MODULE:id_tree_reduce/2},
- {compression, Compression}
- ]),
+ {split, fun ?MODULE:id_tree_split/1},
+ {join, fun ?MODULE:id_tree_join/2},
+ {reduce, fun ?MODULE:id_tree_reduce/2},
+ {compression, Compression}
+ ]),
SeqTreeState = couch_bt_engine_header:seq_tree_state(Header),
{ok, SeqTree} = couch_btree:open(SeqTreeState, Fd, [
- {split, fun ?MODULE:seq_tree_split/1},
- {join, fun ?MODULE:seq_tree_join/2},
- {reduce, fun ?MODULE:seq_tree_reduce/2},
- {compression, Compression}
- ]),
+ {split, fun ?MODULE:seq_tree_split/1},
+ {join, fun ?MODULE:seq_tree_join/2},
+ {reduce, fun ?MODULE:seq_tree_reduce/2},
+ {compression, Compression}
+ ]),
LocalTreeState = couch_bt_engine_header:local_tree_state(Header),
{ok, LocalTree} = couch_btree:open(LocalTreeState, Fd, [
- {split, fun ?MODULE:local_tree_split/1},
- {join, fun ?MODULE:local_tree_join/2},
- {compression, Compression}
- ]),
+ {split, fun ?MODULE:local_tree_split/1},
+ {join, fun ?MODULE:local_tree_join/2},
+ {compression, Compression}
+ ]),
PurgeTreeState = couch_bt_engine_header:purge_tree_state(Header),
{ok, PurgeTree} = couch_btree:open(PurgeTreeState, Fd, [
@@ -940,7 +906,6 @@ init_state(FilePath, Fd, Header0, Options) ->
St
end.
-
update_header(St, Header) ->
couch_bt_engine_header:set(Header, [
{seq_tree_state, couch_btree:get_state(St#st.seq_tree)},
@@ -950,7 +915,6 @@ update_header(St, Header) ->
{purge_seq_tree_state, couch_btree:get_state(St#st.purge_seq_tree)}
]).
-
increment_update_seq(#st{header = Header} = St) ->
UpdateSeq = couch_bt_engine_header:get(Header, update_seq),
St#st{
@@ -959,7 +923,6 @@ increment_update_seq(#st{header = Header} = St) ->
])
}.
-
set_default_security_object(Fd, Header, Compression, Options) ->
case couch_bt_engine_header:get(Header, security_ptr) of
Pointer when is_integer(Pointer) ->
@@ -971,7 +934,6 @@ set_default_security_object(Fd, Header, Compression, Options) ->
couch_bt_engine_header:set(Header, security_ptr, Ptr)
end.
-
% This function is here, and not in couch_bt_engine_header
% because it requires modifying file contents
upgrade_purge_info(Fd, Header) ->
@@ -980,24 +942,32 @@ upgrade_purge_info(Fd, Header) ->
Header;
Ptr when is_tuple(Ptr) ->
Header;
- PurgeSeq when is_integer(PurgeSeq)->
+ PurgeSeq when is_integer(PurgeSeq) ->
% Pointer to old purged ids/revs is in purge_seq_tree_state
Ptr = couch_bt_engine_header:get(Header, purge_seq_tree_state),
case Ptr of
nil ->
PTS = couch_bt_engine_header:purge_tree_state(Header),
- PurgeTreeSt = case PTS of 0 -> nil; Else -> Else end,
+ PurgeTreeSt =
+ case PTS of
+ 0 -> nil;
+ Else -> Else
+ end,
couch_bt_engine_header:set(Header, [
{purge_tree_state, PurgeTreeSt}
]);
_ ->
{ok, PurgedIdsRevs} = couch_file:pread_term(Fd, Ptr),
- {Infos, _} = lists:foldl(fun({Id, Revs}, {InfoAcc, PSeq}) ->
- Info = {PSeq, couch_uuids:random(), Id, Revs},
- {[Info | InfoAcc], PSeq + 1}
- end, {[], PurgeSeq}, PurgedIdsRevs),
+ {Infos, _} = lists:foldl(
+ fun({Id, Revs}, {InfoAcc, PSeq}) ->
+ Info = {PSeq, couch_uuids:random(), Id, Revs},
+ {[Info | InfoAcc], PSeq + 1}
+ end,
+ {[], PurgeSeq},
+ PurgedIdsRevs
+ ),
{ok, PurgeTree} = couch_btree:open(nil, Fd, [
{split, fun ?MODULE:purge_tree_split/1},
@@ -1022,7 +992,6 @@ upgrade_purge_info(Fd, Header) ->
end
end.
-
init_set_props(Fd, Header, Options) ->
case couch_util:get_value(props, Options) of
undefined ->
@@ -1034,70 +1003,70 @@ init_set_props(Fd, Header, Options) ->
couch_bt_engine_header:set(Header, props_ptr, Ptr)
end.
-
delete_compaction_files(FilePath) ->
RootDir = config:get("couchdb", "database_dir", "."),
DelOpts = [{context, compaction}],
delete_compaction_files(RootDir, FilePath, DelOpts).
-
rev_tree(DiskTree) ->
- couch_key_tree:map(fun
- (_RevId, {Del, Ptr, Seq}) ->
- #leaf{
- deleted = ?i2b(Del),
- ptr = Ptr,
- seq = Seq
- };
- (_RevId, {Del, Ptr, Seq, Size}) ->
- #leaf{
- deleted = ?i2b(Del),
- ptr = Ptr,
- seq = Seq,
- sizes = couch_db_updater:upgrade_sizes(Size)
- };
- (_RevId, {Del, Ptr, Seq, Sizes, Atts}) ->
- #leaf{
- deleted = ?i2b(Del),
- ptr = Ptr,
- seq = Seq,
- sizes = couch_db_updater:upgrade_sizes(Sizes),
- atts = Atts
- };
- (_RevId, ?REV_MISSING) ->
- ?REV_MISSING
- end, DiskTree).
-
+ couch_key_tree:map(
+ fun
+ (_RevId, {Del, Ptr, Seq}) ->
+ #leaf{
+ deleted = ?i2b(Del),
+ ptr = Ptr,
+ seq = Seq
+ };
+ (_RevId, {Del, Ptr, Seq, Size}) ->
+ #leaf{
+ deleted = ?i2b(Del),
+ ptr = Ptr,
+ seq = Seq,
+ sizes = couch_db_updater:upgrade_sizes(Size)
+ };
+ (_RevId, {Del, Ptr, Seq, Sizes, Atts}) ->
+ #leaf{
+ deleted = ?i2b(Del),
+ ptr = Ptr,
+ seq = Seq,
+ sizes = couch_db_updater:upgrade_sizes(Sizes),
+ atts = Atts
+ };
+ (_RevId, ?REV_MISSING) ->
+ ?REV_MISSING
+ end,
+ DiskTree
+ ).
disk_tree(RevTree) ->
- couch_key_tree:map(fun
- (_RevId, ?REV_MISSING) ->
- ?REV_MISSING;
- (_RevId, #leaf{} = Leaf) ->
- #leaf{
- deleted = Del,
- ptr = Ptr,
- seq = Seq,
- sizes = Sizes,
- atts = Atts
- } = Leaf,
- {?b2i(Del), Ptr, Seq, split_sizes(Sizes), Atts}
- end, RevTree).
-
-
-split_sizes(#size_info{}=SI) ->
+ couch_key_tree:map(
+ fun
+ (_RevId, ?REV_MISSING) ->
+ ?REV_MISSING;
+ (_RevId, #leaf{} = Leaf) ->
+ #leaf{
+ deleted = Del,
+ ptr = Ptr,
+ seq = Seq,
+ sizes = Sizes,
+ atts = Atts
+ } = Leaf,
+ {?b2i(Del), Ptr, Seq, split_sizes(Sizes), Atts}
+ end,
+ RevTree
+ ).
+
+split_sizes(#size_info{} = SI) ->
{SI#size_info.active, SI#size_info.external}.
-
join_sizes({Active, External}) when is_integer(Active), is_integer(External) ->
- #size_info{active=Active, external=External}.
-
+ #size_info{active = Active, external = External}.
reduce_sizes(nil, _) ->
nil;
reduce_sizes(_, nil) ->
nil;
-reduce_sizes(#size_info{}=S1, #size_info{}=S2) ->
+reduce_sizes(#size_info{} = S1, #size_info{} = S2) ->
#size_info{
active = S1#size_info.active + S2#size_info.active,
external = S1#size_info.external + S2#size_info.external
@@ -1107,7 +1076,6 @@ reduce_sizes(S1, S2) ->
US2 = couch_db_updater:upgrade_sizes(S2),
reduce_sizes(US1, US2).
-
active_size(#st{} = St, #size_info{} = SI) ->
Trees = [
St#st.id_tree,
@@ -1116,27 +1084,32 @@ active_size(#st{} = St, #size_info{} = SI) ->
St#st.purge_tree,
St#st.purge_seq_tree
],
- lists:foldl(fun(T, Acc) ->
- case couch_btree:size(T) of
- _ when Acc == null ->
- null;
- nil ->
- null;
- Size ->
- Acc + Size
- end
- end, SI#size_info.active, Trees).
-
+ lists:foldl(
+ fun(T, Acc) ->
+ case couch_btree:size(T) of
+ _ when Acc == null ->
+ null;
+ nil ->
+ null;
+ Size ->
+ Acc + Size
+ end
+ end,
+ SI#size_info.active,
+ Trees
+ ).
fold_docs_int(St, Tree, UserFun, UserAcc, Options) ->
- Fun = case lists:member(include_deleted, Options) of
- true -> fun include_deleted/4;
- false -> fun skip_deleted/4
- end,
- RedFun = case lists:member(include_reductions, Options) of
- true -> fun include_reductions/4;
- false -> fun drop_reductions/4
- end,
+ Fun =
+ case lists:member(include_deleted, Options) of
+ true -> fun include_deleted/4;
+ false -> fun skip_deleted/4
+ end,
+ RedFun =
+ case lists:member(include_reductions, Options) of
+ true -> fun include_reductions/4;
+ false -> fun drop_reductions/4
+ end,
InAcc = {RedFun, {UserFun, UserAcc}},
{ok, Reds, OutAcc} = couch_btree:fold(Tree, Fun, InAcc, Options),
{_, {_, FinalUserAcc}} = OutAcc,
@@ -1149,12 +1122,10 @@ fold_docs_int(St, Tree, UserFun, UserAcc, Options) ->
{ok, FinalUserAcc}
end.
-
include_deleted(Case, Entry, Reds, {UserFun, UserAcc}) ->
{Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc),
{Go, {UserFun, NewUserAcc}}.
-
% First element of the reductions is the total
% number of undeleted documents.
skip_deleted(traverse, _Entry, {0, _, _} = _Reds, Acc) ->
@@ -1165,27 +1136,23 @@ skip_deleted(Case, Entry, Reds, {UserFun, UserAcc}) ->
{Go, NewUserAcc} = UserFun(Case, Entry, Reds, UserAcc),
{Go, {UserFun, NewUserAcc}}.
-
include_reductions(visit, FDI, Reds, {UserFun, UserAcc}) ->
{Go, NewUserAcc} = UserFun(FDI, Reds, UserAcc),
{Go, {UserFun, NewUserAcc}};
include_reductions(_, _, _, Acc) ->
{ok, Acc}.
-
drop_reductions(visit, FDI, _Reds, {UserFun, UserAcc}) ->
{Go, NewUserAcc} = UserFun(FDI, UserAcc),
{Go, {UserFun, NewUserAcc}};
drop_reductions(_, _, _, Acc) ->
{ok, Acc}.
-
fold_docs_reduce_to_count(Reds) ->
RedFun = fun id_tree_reduce/2,
FinalRed = couch_btree:final_reduce(RedFun, Reds),
element(1, FinalRed).
-
finish_compaction_int(#st{} = OldSt, #st{} = NewSt1) ->
#st{
filepath = FilePath,
@@ -1233,10 +1200,11 @@ finish_compaction_int(#st{} = OldSt, #st{} = NewSt1) ->
decref(OldSt),
% And return our finished new state
- {ok, NewSt2#st{
- filepath = FilePath
- }, undefined}.
-
+ {ok,
+ NewSt2#st{
+ filepath = FilePath
+ },
+ undefined}.
is_file(Path) ->
case file:read_file_info(Path, [raw]) of
diff --git a/src/couch/src/couch_bt_engine_compactor.erl b/src/couch/src/couch_bt_engine_compactor.erl
index 3e356e2e3..8ed55b5c3 100644
--- a/src/couch/src/couch_bt_engine_compactor.erl
+++ b/src/couch/src/couch_bt_engine_compactor.erl
@@ -12,16 +12,13 @@
-module(couch_bt_engine_compactor).
-
-export([
start/4
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("couch_bt_engine.hrl").
-
-record(comp_st, {
db_name,
old_st,
@@ -44,14 +41,12 @@
locs
}).
-
-ifdef(TEST).
-define(COMP_EVENT(Name), couch_bt_engine_compactor_ev:event(Name)).
-else.
-define(COMP_EVENT(Name), ignore).
-endif.
-
start(#st{} = St, DbName, Options, Parent) ->
erlang:put(io_priority, {db_compact, DbName}),
couch_log:debug("Compaction process spawned for db \"~s\"", [DbName]),
@@ -72,9 +67,13 @@ start(#st{} = St, DbName, Options, Parent) ->
fun compact_final_sync/1
],
- FinalCompSt = lists:foldl(fun(Stage, CompSt) ->
- Stage(CompSt)
- end, InitCompSt, Stages),
+ FinalCompSt = lists:foldl(
+ fun(Stage, CompSt) ->
+ Stage(CompSt)
+ end,
+ InitCompSt,
+ Stages
+ ),
#comp_st{
new_st = FinalNewSt,
@@ -88,7 +87,6 @@ start(#st{} = St, DbName, Options, Parent) ->
Msg = {compact_done, couch_bt_engine, FinalNewSt#st.filepath},
gen_server:cast(Parent, Msg).
-
open_compaction_files(DbName, OldSt, Options) ->
#st{
filepath = DbFilePath,
@@ -99,57 +97,59 @@ open_compaction_files(DbName, OldSt, Options) ->
{ok, DataFd, DataHdr} = open_compaction_file(DataFile),
{ok, MetaFd, MetaHdr} = open_compaction_file(MetaFile),
DataHdrIsDbHdr = couch_bt_engine_header:is_header(DataHdr),
- CompSt = case {DataHdr, MetaHdr} of
- {#comp_header{}=A, #comp_header{}=A} ->
- % We're restarting a compaction that did not finish
- % before trying to swap out with the original db
- DbHeader = A#comp_header.db_header,
- St0 = couch_bt_engine:init_state(
- DataFile, DataFd, DbHeader, Options),
- St1 = bind_emsort(St0, MetaFd, A#comp_header.meta_st),
- #comp_st{
- db_name = DbName,
- old_st = OldSt,
- new_st = St1,
- meta_fd = MetaFd,
- retry = St0#st.id_tree
- };
- _ when DataHdrIsDbHdr ->
- % We tried to swap out the compaction but there were
- % writes to the database during compaction. Start
- % a compaction retry.
- Header = couch_bt_engine_header:from(SrcHdr),
- ok = reset_compaction_file(MetaFd, Header),
- St0 = couch_bt_engine:init_state(
- DataFile, DataFd, DataHdr, Options),
- St1 = bind_emsort(St0, MetaFd, nil),
- #comp_st{
- db_name = DbName,
- old_st = OldSt,
- new_st = St1,
- meta_fd = MetaFd,
- retry = St0#st.id_tree
- };
- _ ->
- % We're starting a compaction from scratch
- Header = couch_bt_engine_header:from(SrcHdr),
- ok = reset_compaction_file(DataFd, Header),
- ok = reset_compaction_file(MetaFd, Header),
- St0 = couch_bt_engine:init_state(DataFile, DataFd, Header, Options),
- St1 = bind_emsort(St0, MetaFd, nil),
- #comp_st{
- db_name = DbName,
- old_st = OldSt,
- new_st = St1,
- meta_fd = MetaFd,
- retry = nil
- }
- end,
+ CompSt =
+ case {DataHdr, MetaHdr} of
+ {#comp_header{} = A, #comp_header{} = A} ->
+ % We're restarting a compaction that did not finish
+ % before trying to swap out with the original db
+ DbHeader = A#comp_header.db_header,
+ St0 = couch_bt_engine:init_state(
+ DataFile, DataFd, DbHeader, Options
+ ),
+ St1 = bind_emsort(St0, MetaFd, A#comp_header.meta_st),
+ #comp_st{
+ db_name = DbName,
+ old_st = OldSt,
+ new_st = St1,
+ meta_fd = MetaFd,
+ retry = St0#st.id_tree
+ };
+ _ when DataHdrIsDbHdr ->
+ % We tried to swap out the compaction but there were
+ % writes to the database during compaction. Start
+ % a compaction retry.
+ Header = couch_bt_engine_header:from(SrcHdr),
+ ok = reset_compaction_file(MetaFd, Header),
+ St0 = couch_bt_engine:init_state(
+ DataFile, DataFd, DataHdr, Options
+ ),
+ St1 = bind_emsort(St0, MetaFd, nil),
+ #comp_st{
+ db_name = DbName,
+ old_st = OldSt,
+ new_st = St1,
+ meta_fd = MetaFd,
+ retry = St0#st.id_tree
+ };
+ _ ->
+ % We're starting a compaction from scratch
+ Header = couch_bt_engine_header:from(SrcHdr),
+ ok = reset_compaction_file(DataFd, Header),
+ ok = reset_compaction_file(MetaFd, Header),
+ St0 = couch_bt_engine:init_state(DataFile, DataFd, Header, Options),
+ St1 = bind_emsort(St0, MetaFd, nil),
+ #comp_st{
+ db_name = DbName,
+ old_st = OldSt,
+ new_st = St1,
+ meta_fd = MetaFd,
+ retry = nil
+ }
+ end,
unlink(DataFd),
erlang:monitor(process, MetaFd),
{ok, CompSt}.
-
copy_purge_info(#comp_st{} = CompSt) ->
#comp_st{
db_name = DbName,
@@ -164,25 +164,30 @@ copy_purge_info(#comp_st{} = CompSt) ->
OldPSTree = OldSt#st.purge_seq_tree,
StartSeq = couch_bt_engine:get_purge_seq(NewSt) + 1,
BufferSize = config:get_integer(
- "database_compaction", "doc_buffer_size", 524288),
+ "database_compaction", "doc_buffer_size", 524288
+ ),
CheckpointAfter = config:get(
- "database_compaction", "checkpoint_after", BufferSize * 10),
+ "database_compaction", "checkpoint_after", BufferSize * 10
+ ),
EnumFun = fun(Info, _Reds, {StAcc0, InfosAcc, InfosSize, CopiedSize}) ->
NewInfosSize = InfosSize + ?term_size(Info),
- if NewInfosSize >= BufferSize ->
- StAcc1 = copy_purge_infos(
- OldSt, StAcc0, [Info | InfosAcc], MinPurgeSeq, Retry),
- NewCopiedSize = CopiedSize + NewInfosSize,
- if NewCopiedSize >= CheckpointAfter ->
- StAcc2 = commit_compaction_data(StAcc1),
- {ok, {StAcc2, [], 0, 0}};
+ if
+ NewInfosSize >= BufferSize ->
+ StAcc1 = copy_purge_infos(
+ OldSt, StAcc0, [Info | InfosAcc], MinPurgeSeq, Retry
+ ),
+ NewCopiedSize = CopiedSize + NewInfosSize,
+ if
+ NewCopiedSize >= CheckpointAfter ->
+ StAcc2 = commit_compaction_data(StAcc1),
+ {ok, {StAcc2, [], 0, 0}};
+ true ->
+ {ok, {StAcc1, [], 0, NewCopiedSize}}
+ end;
true ->
- {ok, {StAcc1, [], 0, NewCopiedSize}}
- end;
- true ->
- NewInfosAcc = [Info | InfosAcc],
- {ok, {StAcc0, NewInfosAcc, NewInfosSize, CopiedSize}}
+ NewInfosAcc = [Info | InfosAcc],
+ {ok, {StAcc0, NewInfosAcc, NewInfosSize, CopiedSize}}
end
end,
@@ -197,7 +202,6 @@ copy_purge_info(#comp_st{} = CompSt) ->
new_st = FinalNewSt
}.
-
copy_purge_infos(OldSt, NewSt0, Infos, MinPurgeSeq, Retry) ->
#st{
id_tree = OldIdTree
@@ -217,9 +221,12 @@ copy_purge_infos(OldSt, NewSt0, Infos, MinPurgeSeq, Retry) ->
} = NewSt1,
% Copy over the purge infos
- InfosToAdd = lists:filter(fun({PSeq, _, _, _}) ->
- PSeq > MinPurgeSeq
- end, Infos),
+ InfosToAdd = lists:filter(
+ fun({PSeq, _, _, _}) ->
+ PSeq > MinPurgeSeq
+ end,
+ Infos
+ ),
{ok, NewPurgeTree1} = couch_btree:add(NewPurgeTree0, InfosToAdd),
{ok, NewPurgeSeqTree1} = couch_btree:add(NewPurgeSeqTree0, InfosToAdd),
@@ -232,35 +239,44 @@ copy_purge_infos(OldSt, NewSt0, Infos, MinPurgeSeq, Retry) ->
% any of the referenced docs have been completely purged
% from the database. Any doc that has been completely purged
% must then be removed from our partially compacted database.
- NewSt3 = if Retry == nil -> NewSt2; true ->
- AllDocIds = [DocId || {_PurgeSeq, _UUID, DocId, _Revs} <- Infos],
- UniqDocIds = lists:usort(AllDocIds),
- OldIdResults = couch_btree:lookup(OldIdTree, UniqDocIds),
- OldZipped = lists:zip(UniqDocIds, OldIdResults),
-
- % The list of non-existant docs in the database being compacted
- MaybeRemDocIds = [DocId || {DocId, not_found} <- OldZipped],
-
- % Removing anything that exists in the partially compacted database
- NewIdResults = couch_btree:lookup(NewIdTree0, MaybeRemDocIds),
- ToRemove = [Doc || {ok, Doc} <- NewIdResults, Doc /= {ok, not_found}],
-
- {RemIds, RemSeqs} = lists:unzip(lists:map(fun(FDI) ->
- #full_doc_info{
- id = Id,
- update_seq = Seq
- } = FDI,
- {Id, Seq}
- end, ToRemove)),
-
- {ok, NewIdTree1} = couch_btree:add_remove(NewIdTree0, [], RemIds),
- {ok, NewSeqTree1} = couch_btree:add_remove(NewSeqTree0, [], RemSeqs),
-
- NewSt2#st{
- id_tree = NewIdTree1,
- seq_tree = NewSeqTree1
- }
- end,
+ NewSt3 =
+ if
+ Retry == nil ->
+ NewSt2;
+ true ->
+ AllDocIds = [DocId || {_PurgeSeq, _UUID, DocId, _Revs} <- Infos],
+ UniqDocIds = lists:usort(AllDocIds),
+ OldIdResults = couch_btree:lookup(OldIdTree, UniqDocIds),
+ OldZipped = lists:zip(UniqDocIds, OldIdResults),
+
+ % The list of non-existant docs in the database being compacted
+ MaybeRemDocIds = [DocId || {DocId, not_found} <- OldZipped],
+
+ % Removing anything that exists in the partially compacted database
+ NewIdResults = couch_btree:lookup(NewIdTree0, MaybeRemDocIds),
+ ToRemove = [Doc || {ok, Doc} <- NewIdResults, Doc /= {ok, not_found}],
+
+ {RemIds, RemSeqs} = lists:unzip(
+ lists:map(
+ fun(FDI) ->
+ #full_doc_info{
+ id = Id,
+ update_seq = Seq
+ } = FDI,
+ {Id, Seq}
+ end,
+ ToRemove
+ )
+ ),
+
+ {ok, NewIdTree1} = couch_btree:add_remove(NewIdTree0, [], RemIds),
+ {ok, NewSeqTree1} = couch_btree:add_remove(NewSeqTree0, [], RemSeqs),
+
+ NewSt2#st{
+ id_tree = NewIdTree1,
+ seq_tree = NewSeqTree1
+ }
+ end,
Header = couch_bt_engine:update_header(NewSt3, NewSt3#st.header),
NewSt4 = NewSt3#st{
@@ -268,7 +284,6 @@ copy_purge_infos(OldSt, NewSt0, Infos, MinPurgeSeq, Retry) ->
},
bind_emsort(NewSt4, MetaFd, MetaState).
-
copy_compact(#comp_st{} = CompSt) ->
#comp_st{
db_name = DbName,
@@ -282,39 +297,49 @@ copy_compact(#comp_st{} = CompSt) ->
NewUpdateSeq = couch_bt_engine:get_update_seq(NewSt0),
TotalChanges = couch_bt_engine:count_changes_since(St, NewUpdateSeq),
BufferSize = list_to_integer(
- config:get("database_compaction", "doc_buffer_size", "524288")),
+ config:get("database_compaction", "doc_buffer_size", "524288")
+ ),
CheckpointAfter = couch_util:to_integer(
- config:get("database_compaction", "checkpoint_after",
- BufferSize * 10)),
+ config:get(
+ "database_compaction",
+ "checkpoint_after",
+ BufferSize * 10
+ )
+ ),
EnumBySeqFun =
- fun(DocInfo, _Offset,
- {AccNewSt, AccUncopied, AccUncopiedSize, AccCopiedSize}) ->
+ fun(
+ DocInfo,
+ _Offset,
+ {AccNewSt, AccUncopied, AccUncopiedSize, AccCopiedSize}
+ ) ->
+ Seq =
+ case DocInfo of
+ #full_doc_info{} -> DocInfo#full_doc_info.update_seq;
+ #doc_info{} -> DocInfo#doc_info.high_seq
+ end,
- Seq = case DocInfo of
- #full_doc_info{} -> DocInfo#full_doc_info.update_seq;
- #doc_info{} -> DocInfo#doc_info.high_seq
+ AccUncopiedSize2 = AccUncopiedSize + ?term_size(DocInfo),
+ if
+ AccUncopiedSize2 >= BufferSize ->
+ NewSt2 = copy_docs(
+ St, AccNewSt, lists:reverse([DocInfo | AccUncopied]), Retry
+ ),
+ AccCopiedSize2 = AccCopiedSize + AccUncopiedSize2,
+ if
+ AccCopiedSize2 >= CheckpointAfter ->
+ {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
+ CommNewSt3 = commit_compaction_data(NewSt3),
+ {ok, {CommNewSt3, [], 0, 0}};
+ true ->
+ {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
+ {ok, {NewSt3, [], 0, AccCopiedSize2}}
+ end;
+ true ->
+ {ok, {AccNewSt, [DocInfo | AccUncopied], AccUncopiedSize2, AccCopiedSize}}
+ end
end,
- AccUncopiedSize2 = AccUncopiedSize + ?term_size(DocInfo),
- if AccUncopiedSize2 >= BufferSize ->
- NewSt2 = copy_docs(
- St, AccNewSt, lists:reverse([DocInfo | AccUncopied]), Retry),
- AccCopiedSize2 = AccCopiedSize + AccUncopiedSize2,
- if AccCopiedSize2 >= CheckpointAfter ->
- {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
- CommNewSt3 = commit_compaction_data(NewSt3),
- {ok, {CommNewSt3, [], 0, 0}};
- true ->
- {ok, NewSt3} = couch_bt_engine:set_update_seq(NewSt2, Seq),
- {ok, {NewSt3, [], 0, AccCopiedSize2}}
- end;
- true ->
- {ok, {AccNewSt, [DocInfo | AccUncopied], AccUncopiedSize2,
- AccCopiedSize}}
- end
- end,
-
TaskProps0 = [
{type, database_compaction},
{database, DbName},
@@ -324,24 +349,27 @@ copy_compact(#comp_st{} = CompSt) ->
{total_changes, TotalChanges}
],
case (Retry =/= nil) and couch_task_status:is_task_added() of
- true ->
- couch_task_status:update([
- {retry, true},
- {phase, document_copy},
- {progress, 0},
- {changes_done, 0},
- {total_changes, TotalChanges}
- ]);
- false ->
- couch_task_status:add_task(TaskProps0),
- couch_task_status:set_update_frequency(500)
+ true ->
+ couch_task_status:update([
+ {retry, true},
+ {phase, document_copy},
+ {progress, 0},
+ {changes_done, 0},
+ {total_changes, TotalChanges}
+ ]);
+ false ->
+ couch_task_status:add_task(TaskProps0),
+ couch_task_status:set_update_frequency(500)
end,
?COMP_EVENT(seq_init),
{ok, _, {NewSt2, Uncopied, _, _}} =
- couch_btree:foldl(St#st.seq_tree, EnumBySeqFun,
+ couch_btree:foldl(
+ St#st.seq_tree,
+ EnumBySeqFun,
{NewSt, [], 0, 0},
- [{start_key, NewUpdateSeq + 1}]),
+ [{start_key, NewUpdateSeq + 1}]
+ ),
NewSt3 = copy_docs(St, NewSt2, lists:reverse(Uncopied), Retry),
@@ -362,146 +390,168 @@ copy_compact(#comp_st{} = CompSt) ->
new_st = NewSt6
}.
-
copy_docs(St, #st{} = NewSt, MixedInfos, Retry) ->
- DocInfoIds = [Id || #doc_info{id=Id} <- MixedInfos],
+ DocInfoIds = [Id || #doc_info{id = Id} <- MixedInfos],
LookupResults = couch_btree:lookup(St#st.id_tree, DocInfoIds),
% COUCHDB-968, make sure we prune duplicates during compaction
- NewInfos0 = lists:usort(fun(#full_doc_info{id=A}, #full_doc_info{id=B}) ->
- A =< B
- end, merge_lookups(MixedInfos, LookupResults)),
-
- NewInfos1 = lists:map(fun(Info) ->
- {NewRevTree, FinalAcc} = couch_key_tree:mapfold(fun
- ({RevPos, RevId}, #leaf{ptr=Sp}=Leaf, leaf, SizesAcc) ->
- {Body, AttInfos} = copy_doc_attachments(St, Sp, NewSt),
- #size_info{external = OldExternalSize} = Leaf#leaf.sizes,
- ExternalSize = case OldExternalSize of
- 0 when is_binary(Body) ->
- couch_compress:uncompressed_size(Body);
- 0 ->
- couch_ejson_size:encoded_size(Body);
- N -> N
+ NewInfos0 = lists:usort(
+ fun(#full_doc_info{id = A}, #full_doc_info{id = B}) ->
+ A =< B
+ end,
+ merge_lookups(MixedInfos, LookupResults)
+ ),
+
+ NewInfos1 = lists:map(
+ fun(Info) ->
+ {NewRevTree, FinalAcc} = couch_key_tree:mapfold(
+ fun
+ ({RevPos, RevId}, #leaf{ptr = Sp} = Leaf, leaf, SizesAcc) ->
+ {Body, AttInfos} = copy_doc_attachments(St, Sp, NewSt),
+ #size_info{external = OldExternalSize} = Leaf#leaf.sizes,
+ ExternalSize =
+ case OldExternalSize of
+ 0 when is_binary(Body) ->
+ couch_compress:uncompressed_size(Body);
+ 0 ->
+ couch_ejson_size:encoded_size(Body);
+ N ->
+ N
+ end,
+ Doc0 = #doc{
+ id = Info#full_doc_info.id,
+ revs = {RevPos, [RevId]},
+ deleted = Leaf#leaf.deleted,
+ body = Body,
+ atts = AttInfos
+ },
+ Doc1 = couch_bt_engine:serialize_doc(NewSt, Doc0),
+ {ok, Doc2, ActiveSize} =
+ couch_bt_engine:write_doc_body(NewSt, Doc1),
+ AttSizes = [{element(3, A), element(4, A)} || A <- AttInfos],
+ NewLeaf = Leaf#leaf{
+ ptr = Doc2#doc.body,
+ sizes = #size_info{
+ active = ActiveSize,
+ external = ExternalSize
+ },
+ atts = AttSizes
+ },
+ {NewLeaf, couch_db_updater:add_sizes(leaf, NewLeaf, SizesAcc)};
+ (_Rev, _Leaf, branch, SizesAcc) ->
+ {?REV_MISSING, SizesAcc}
end,
- Doc0 = #doc{
- id = Info#full_doc_info.id,
- revs = {RevPos, [RevId]},
- deleted = Leaf#leaf.deleted,
- body = Body,
- atts = AttInfos
- },
- Doc1 = couch_bt_engine:serialize_doc(NewSt, Doc0),
- {ok, Doc2, ActiveSize} =
- couch_bt_engine:write_doc_body(NewSt, Doc1),
- AttSizes = [{element(3,A), element(4,A)} || A <- AttInfos],
- NewLeaf = Leaf#leaf{
- ptr = Doc2#doc.body,
- sizes = #size_info{
- active = ActiveSize,
- external = ExternalSize
- },
- atts = AttSizes
- },
- {NewLeaf, couch_db_updater:add_sizes(leaf, NewLeaf, SizesAcc)};
- (_Rev, _Leaf, branch, SizesAcc) ->
- {?REV_MISSING, SizesAcc}
- end, {0, 0, []}, Info#full_doc_info.rev_tree),
- {FinalAS, FinalES, FinalAtts} = FinalAcc,
- TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
- NewActiveSize = FinalAS + TotalAttSize,
- NewExternalSize = FinalES + TotalAttSize,
- ?COMP_EVENT(seq_copy),
- Info#full_doc_info{
- rev_tree = NewRevTree,
- sizes = #size_info{
- active = NewActiveSize,
- external = NewExternalSize
+ {0, 0, []},
+ Info#full_doc_info.rev_tree
+ ),
+ {FinalAS, FinalES, FinalAtts} = FinalAcc,
+ TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
+ NewActiveSize = FinalAS + TotalAttSize,
+ NewExternalSize = FinalES + TotalAttSize,
+ ?COMP_EVENT(seq_copy),
+ Info#full_doc_info{
+ rev_tree = NewRevTree,
+ sizes = #size_info{
+ active = NewActiveSize,
+ external = NewExternalSize
+ }
}
- }
- end, NewInfos0),
+ end,
+ NewInfos0
+ ),
Limit = couch_bt_engine:get_revs_limit(St),
- NewInfos = lists:map(fun(FDI) ->
- FDI#full_doc_info{
- rev_tree = couch_key_tree:stem(FDI#full_doc_info.rev_tree, Limit)
- }
- end, NewInfos1),
+ NewInfos = lists:map(
+ fun(FDI) ->
+ FDI#full_doc_info{
+ rev_tree = couch_key_tree:stem(FDI#full_doc_info.rev_tree, Limit)
+ }
+ end,
+ NewInfos1
+ ),
RemoveSeqs =
- case Retry of
- nil ->
- [];
- OldDocIdTree ->
- % Compaction is being rerun to catch up to writes during the
- % first pass. This means we may have docs that already exist
- % in the seq_tree in the .data file. Here we lookup any old
- % update_seqs so that they can be removed.
- Ids = [Id || #full_doc_info{id=Id} <- NewInfos],
- Existing = couch_btree:lookup(OldDocIdTree, Ids),
- [Seq || {ok, #full_doc_info{update_seq=Seq}} <- Existing]
- end,
+ case Retry of
+ nil ->
+ [];
+ OldDocIdTree ->
+ % Compaction is being rerun to catch up to writes during the
+ % first pass. This means we may have docs that already exist
+ % in the seq_tree in the .data file. Here we lookup any old
+ % update_seqs so that they can be removed.
+ Ids = [Id || #full_doc_info{id = Id} <- NewInfos],
+ Existing = couch_btree:lookup(OldDocIdTree, Ids),
+ [Seq || {ok, #full_doc_info{update_seq = Seq}} <- Existing]
+ end,
{ok, SeqTree} = couch_btree:add_remove(
- NewSt#st.seq_tree, NewInfos, RemoveSeqs),
+ NewSt#st.seq_tree, NewInfos, RemoveSeqs
+ ),
EMSortFd = couch_emsort:get_fd(NewSt#st.id_tree),
{ok, LocSizes} = couch_file:append_terms(EMSortFd, NewInfos),
- EMSortEntries = lists:zipwith(fun(FDI, {Loc, _}) ->
- #full_doc_info{
- id = Id,
- update_seq = Seq
- } = FDI,
- {{Id, Seq}, Loc}
- end, NewInfos, LocSizes),
+ EMSortEntries = lists:zipwith(
+ fun(FDI, {Loc, _}) ->
+ #full_doc_info{
+ id = Id,
+ update_seq = Seq
+ } = FDI,
+ {{Id, Seq}, Loc}
+ end,
+ NewInfos,
+ LocSizes
+ ),
{ok, IdEms} = couch_emsort:add(NewSt#st.id_tree, EMSortEntries),
update_compact_task(length(NewInfos)),
- NewSt#st{id_tree=IdEms, seq_tree=SeqTree}.
-
+ NewSt#st{id_tree = IdEms, seq_tree = SeqTree}.
copy_doc_attachments(#st{} = SrcSt, SrcSp, DstSt) ->
{ok, {BodyData, BinInfos0}} = couch_file:pread_term(SrcSt#st.fd, SrcSp),
- BinInfos = case BinInfos0 of
- _ when is_binary(BinInfos0) ->
- couch_compress:decompress(BinInfos0);
- _ when is_list(BinInfos0) ->
- % pre 1.2 file format
- BinInfos0
- end,
+ BinInfos =
+ case BinInfos0 of
+ _ when is_binary(BinInfos0) ->
+ couch_compress:decompress(BinInfos0);
+ _ when is_list(BinInfos0) ->
+ % pre 1.2 file format
+ BinInfos0
+ end,
% copy the bin values
NewBinInfos = lists:map(
- fun({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}) ->
- % 010 UPGRADE CODE
- {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
- {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
- ok = couch_stream:copy(SrcStream, DstStream),
- {NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} =
- couch_stream:close(DstStream),
- {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
- couch_util:check_md5(ExpectedMd5, ActualMd5),
- {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity};
- ({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc1}) ->
- {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
- {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
- ok = couch_stream:copy(SrcStream, DstStream),
- {NewStream, AttLen, _, ActualMd5, _IdentityMd5} =
- couch_stream:close(DstStream),
- {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
- couch_util:check_md5(ExpectedMd5, ActualMd5),
- Enc = case Enc1 of
- true ->
- % 0110 UPGRADE CODE
- gzip;
- false ->
- % 0110 UPGRADE CODE
- identity;
- _ ->
- Enc1
- end,
- {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc}
- end, BinInfos),
+ fun
+ ({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}) ->
+ % 010 UPGRADE CODE
+ {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
+ {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
+ ok = couch_stream:copy(SrcStream, DstStream),
+ {NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} =
+ couch_stream:close(DstStream),
+ {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
+ couch_util:check_md5(ExpectedMd5, ActualMd5),
+ {Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity};
+ ({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc1}) ->
+ {ok, SrcStream} = couch_bt_engine:open_read_stream(SrcSt, BinSp),
+ {ok, DstStream} = couch_bt_engine:open_write_stream(DstSt, []),
+ ok = couch_stream:copy(SrcStream, DstStream),
+ {NewStream, AttLen, _, ActualMd5, _IdentityMd5} =
+ couch_stream:close(DstStream),
+ {ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
+ couch_util:check_md5(ExpectedMd5, ActualMd5),
+ Enc =
+ case Enc1 of
+ true ->
+ % 0110 UPGRADE CODE
+ gzip;
+ false ->
+ % 0110 UPGRADE CODE
+ identity;
+ _ ->
+ Enc1
+ end,
+ {Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc}
+ end,
+ BinInfos
+ ),
{BodyData, NewBinInfos}.
-
sort_meta_data(#comp_st{new_st = St0} = CompSt) ->
?COMP_EVENT(md_sort_init),
NumKVs = couch_emsort:num_kvs(St0#st.id_tree),
@@ -521,7 +571,6 @@ sort_meta_data(#comp_st{new_st = St0} = CompSt) ->
}
}.
-
copy_meta_data(#comp_st{new_st = St} = CompSt) ->
#st{
fd = Fd,
@@ -537,11 +586,11 @@ copy_meta_data(#comp_st{new_st = St} = CompSt) ->
]),
{ok, Iter} = couch_emsort:iter(Src),
Acc0 = #merge_st{
- src_fd=SrcFd,
- id_tree=IdTree0,
- seq_tree=St#st.seq_tree,
- rem_seqs=[],
- locs=[]
+ src_fd = SrcFd,
+ id_tree = IdTree0,
+ seq_tree = St#st.seq_tree,
+ rem_seqs = [],
+ locs = []
},
?COMP_EVENT(md_copy_init),
NumKVs = couch_emsort:num_kvs(Src),
@@ -566,7 +615,6 @@ copy_meta_data(#comp_st{new_st = St} = CompSt) ->
}
}.
-
compact_final_sync(#comp_st{new_st = St0} = CompSt) ->
?COMP_EVENT(before_final_sync),
{ok, St1} = couch_bt_engine:commit_data(St0),
@@ -575,7 +623,6 @@ compact_final_sync(#comp_st{new_st = St0} = CompSt) ->
new_st = St1
}.
-
open_compaction_file(FilePath) ->
case couch_file:open(FilePath, [nologifmissing]) of
{ok, Fd} ->
@@ -588,12 +635,10 @@ open_compaction_file(FilePath) ->
{ok, Fd, nil}
end.
-
reset_compaction_file(Fd, Header) ->
ok = couch_file:truncate(Fd, 0),
ok = couch_file:write_header(Fd, Header).
-
commit_compaction_data(#comp_st{new_st = St} = CompSt) ->
% Compaction needs to write headers to both the data file
% and the meta file so if we need to restart we can pick
@@ -601,12 +646,10 @@ commit_compaction_data(#comp_st{new_st = St} = CompSt) ->
CompSt#comp_st{
new_st = commit_compaction_data(St)
};
-
commit_compaction_data(#st{} = St) ->
commit_compaction_data(St, couch_emsort:get_fd(St#st.id_tree)),
commit_compaction_data(St, St#st.fd).
-
commit_compaction_data(#st{header = OldHeader} = St0, Fd) ->
DataState = couch_bt_engine_header:id_tree_state(OldHeader),
MetaFd = couch_emsort:get_fd(St0#st.id_tree),
@@ -624,17 +667,15 @@ commit_compaction_data(#st{header = OldHeader} = St0, Fd) ->
},
bind_emsort(St2, MetaFd, MetaState).
-
bind_emsort(St, Fd, nil) ->
{ok, Ems} = couch_emsort:open(Fd),
- St#st{id_tree=Ems};
+ St#st{id_tree = Ems};
bind_emsort(St, Fd, {BB, _} = Root) when is_list(BB) ->
% Upgrade clause when we find old compaction files
bind_emsort(St, Fd, [{root, Root}]);
bind_emsort(St, Fd, State) ->
{ok, Ems} = couch_emsort:open(Fd, State),
- St#st{id_tree=Ems}.
-
+ St#st{id_tree = Ems}.
bind_id_tree(St, Fd, State) ->
{ok, IdBtree} = couch_btree:open(State, Fd, [
@@ -642,42 +683,41 @@ bind_id_tree(St, Fd, State) ->
{join, fun couch_bt_engine:id_tree_join/2},
{reduce, fun couch_bt_engine:id_tree_reduce/2}
]),
- St#st{id_tree=IdBtree}.
-
+ St#st{id_tree = IdBtree}.
merge_lookups(Infos, []) ->
Infos;
merge_lookups([], _) ->
[];
-merge_lookups([#doc_info{}=DI | RestInfos], [{ok, FDI} | RestLookups]) ->
+merge_lookups([#doc_info{} = DI | RestInfos], [{ok, FDI} | RestLookups]) ->
% Assert we've matched our lookups
- if DI#doc_info.id == FDI#full_doc_info.id -> ok; true ->
- erlang:error({mismatched_doc_infos, DI#doc_info.id})
+ if
+ DI#doc_info.id == FDI#full_doc_info.id -> ok;
+ true -> erlang:error({mismatched_doc_infos, DI#doc_info.id})
end,
[FDI | merge_lookups(RestInfos, RestLookups)];
merge_lookups([FDI | RestInfos], Lookups) ->
[FDI | merge_lookups(RestInfos, Lookups)].
-
-merge_docids(Iter, #merge_st{locs=Locs}=Acc) when length(Locs) > 1000 ->
+merge_docids(Iter, #merge_st{locs = Locs} = Acc) when length(Locs) > 1000 ->
#merge_st{
- src_fd=SrcFd,
- id_tree=IdTree0,
- seq_tree=SeqTree0,
- rem_seqs=RemSeqs
+ src_fd = SrcFd,
+ id_tree = IdTree0,
+ seq_tree = SeqTree0,
+ rem_seqs = RemSeqs
} = Acc,
{ok, Infos} = couch_file:pread_terms(SrcFd, Locs),
{ok, IdTree1} = couch_btree:add(IdTree0, Infos),
{ok, SeqTree1} = couch_btree:add_remove(SeqTree0, [], RemSeqs),
Acc1 = Acc#merge_st{
- id_tree=IdTree1,
- seq_tree=SeqTree1,
- rem_seqs=[],
- locs=[]
+ id_tree = IdTree1,
+ seq_tree = SeqTree1,
+ rem_seqs = [],
+ locs = []
},
update_compact_task(length(Locs)),
merge_docids(Iter, Acc1);
-merge_docids(Iter, #merge_st{curr=Curr}=Acc) ->
+merge_docids(Iter, #merge_st{curr = Curr} = Acc) ->
case next_info(Iter, Curr, []) of
{NextIter, NewCurr, Loc, Seqs} ->
Acc1 = Acc#merge_st{
@@ -697,7 +737,6 @@ merge_docids(Iter, #merge_st{curr=Curr}=Acc) ->
Acc
end.
-
next_info(Iter, undefined, []) ->
case couch_emsort:next(Iter) of
{ok, {{Id, Seq}, Loc}, NextIter} ->
@@ -715,15 +754,14 @@ next_info(Iter, {Id, Seq, Loc}, Seqs) ->
{finished, Loc, Seqs}
end.
-
update_compact_task(NumChanges) ->
[Changes, Total] = couch_task_status:get([changes_done, total_changes]),
Changes2 = Changes + NumChanges,
- Progress = case Total of
- 0 ->
- 0;
- _ ->
- (Changes2 * 100) div Total
- end,
+ Progress =
+ case Total of
+ 0 ->
+ 0;
+ _ ->
+ (Changes2 * 100) div Total
+ end,
couch_task_status:update([{changes_done, Changes2}, {progress, Progress}]).
-
diff --git a/src/couch/src/couch_bt_engine_header.erl b/src/couch/src/couch_bt_engine_header.erl
index 3f9f51821..e28f07723 100644
--- a/src/couch/src/couch_bt_engine_header.erl
+++ b/src/couch/src/couch_bt_engine_header.erl
@@ -12,7 +12,6 @@
-module(couch_bt_engine_header).
-
-export([
new/0,
from/1,
@@ -42,7 +41,6 @@
compacted_seq/1
]).
-
% This should be updated anytime a header change happens that requires more
% than filling in new defaults.
%
@@ -63,7 +61,8 @@
seq_tree_state = nil,
local_tree_state = nil,
purge_tree_state = nil,
- purge_seq_tree_state = nil, %purge tree: purge_seq -> uuid
+ %purge tree: purge_seq -> uuid
+ purge_seq_tree_state = nil,
security_ptr = nil,
revs_limit = 1000,
uuid,
@@ -73,17 +72,14 @@
props_ptr
}).
-
-define(PARTITION_DISK_VERSION, 8).
-
new() ->
#db_header{
uuid = couch_uuids:random(),
epochs = [{node(), 0}]
}.
-
from(Header0) ->
Header = upgrade(Header0),
#db_header{
@@ -92,16 +88,15 @@ from(Header0) ->
compacted_seq = Header#db_header.compacted_seq
}.
-
is_header(Header) ->
try
upgrade(Header),
true
- catch _:_ ->
- false
+ catch
+ _:_ ->
+ false
end.
-
upgrade(Header) ->
Funs = [
fun upgrade_tuple/1,
@@ -110,93 +105,81 @@ upgrade(Header) ->
fun upgrade_epochs/1,
fun upgrade_compacted_seq/1
],
- lists:foldl(fun(F, HdrAcc) ->
- F(HdrAcc)
- end, Header, Funs).
-
+ lists:foldl(
+ fun(F, HdrAcc) ->
+ F(HdrAcc)
+ end,
+ Header,
+ Funs
+ ).
get(Header, Key) ->
?MODULE:get(Header, Key, undefined).
-
get(Header, Key, Default) ->
get_field(Header, Key, Default).
-
set(Header, Key, Value) ->
?MODULE:set(Header, [{Key, Value}]).
-
set(Header0, Fields) ->
% A subtlety here is that if a database was open during
% the release upgrade that updates to uuids and epochs then
% this dynamic upgrade also assigns a uuid and epoch.
Header = upgrade(Header0),
- lists:foldl(fun({Field, Value}, HdrAcc) ->
- set_field(HdrAcc, Field, Value)
- end, Header, Fields).
-
+ lists:foldl(
+ fun({Field, Value}, HdrAcc) ->
+ set_field(HdrAcc, Field, Value)
+ end,
+ Header,
+ Fields
+ ).
disk_version(Header) ->
get_field(Header, disk_version).
-
latest_disk_version() ->
- ?LATEST_DISK_VERSION.
-
+ ?LATEST_DISK_VERSION.
update_seq(Header) ->
get_field(Header, update_seq).
-
id_tree_state(Header) ->
get_field(Header, id_tree_state).
-
seq_tree_state(Header) ->
get_field(Header, seq_tree_state).
-
local_tree_state(Header) ->
get_field(Header, local_tree_state).
-
purge_tree_state(Header) ->
get_field(Header, purge_tree_state).
-
purge_seq_tree_state(Header) ->
get_field(Header, purge_seq_tree_state).
-
security_ptr(Header) ->
get_field(Header, security_ptr).
-
revs_limit(Header) ->
get_field(Header, revs_limit).
-
uuid(Header) ->
get_field(Header, uuid).
-
epochs(Header) ->
get_field(Header, epochs).
-
compacted_seq(Header) ->
get_field(Header, compacted_seq).
-
purge_infos_limit(Header) ->
get_field(Header, purge_infos_limit).
-
get_field(Header, Field) ->
get_field(Header, Field, undefined).
-
get_field(Header, Field, Default) ->
Idx = index(Field),
case Idx > tuple_size(Header) of
@@ -204,90 +187,103 @@ get_field(Header, Field, Default) ->
false -> element(index(Field), Header)
end.
-
set_field(Header, Field, Value) ->
setelement(index(Field), Header, Value).
-
index(Field) ->
couch_util:get_value(Field, indexes()).
-
indexes() ->
Fields = record_info(fields, db_header),
Indexes = lists:seq(2, record_info(size, db_header)),
lists:zip(Fields, Indexes).
-
upgrade_tuple(Old) when is_record(Old, db_header) ->
Old;
upgrade_tuple(Old) when is_tuple(Old) ->
NewSize = record_info(size, db_header),
- if tuple_size(Old) < NewSize -> ok; true ->
- erlang:error({invalid_header_size, Old})
+ if
+ tuple_size(Old) < NewSize -> ok;
+ true -> erlang:error({invalid_header_size, Old})
end,
- {_, New} = lists:foldl(fun(Val, {Idx, Hdr}) ->
- {Idx+1, setelement(Idx, Hdr, Val)}
- end, {1, #db_header{}}, tuple_to_list(Old)),
- if is_record(New, db_header) -> ok; true ->
- erlang:error({invalid_header_extension, {Old, New}})
+ {_, New} = lists:foldl(
+ fun(Val, {Idx, Hdr}) ->
+ {Idx + 1, setelement(Idx, Hdr, Val)}
+ end,
+ {1, #db_header{}},
+ tuple_to_list(Old)
+ ),
+ if
+ is_record(New, db_header) -> ok;
+ true -> erlang:error({invalid_header_extension, {Old, New}})
end,
New.
-define(OLD_DISK_VERSION_ERROR,
- "Database files from versions smaller than 0.10.0 are no longer supported").
+ "Database files from versions smaller than 0.10.0 are no longer supported"
+).
-upgrade_disk_version(#db_header{}=Header) ->
+upgrade_disk_version(#db_header{} = Header) ->
case element(2, Header) of
- 1 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 4 -> Header#db_header{security_ptr = nil}; % [0.10 - 0.11)
- 5 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre 1.2
- 6 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre clustered purge
- 7 -> Header#db_header{disk_version = ?LATEST_DISK_VERSION}; % pre partitioned dbs
- ?LATEST_DISK_VERSION -> Header;
+ 1 ->
+ throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
+ 2 ->
+ throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
+ 3 ->
+ throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
+ % [0.10 - 0.11)
+ 4 ->
+ Header#db_header{security_ptr = nil};
+ % pre 1.2
+ 5 ->
+ Header#db_header{disk_version = ?LATEST_DISK_VERSION};
+ % pre clustered purge
+ 6 ->
+ Header#db_header{disk_version = ?LATEST_DISK_VERSION};
+ % pre partitioned dbs
+ 7 ->
+ Header#db_header{disk_version = ?LATEST_DISK_VERSION};
+ ?LATEST_DISK_VERSION ->
+ Header;
_ ->
Reason = "Incorrect disk header version",
throw({database_disk_version_error, Reason})
end.
-
-upgrade_uuid(#db_header{}=Header) ->
+upgrade_uuid(#db_header{} = Header) ->
case Header#db_header.uuid of
undefined ->
% Upgrading this old db file to a newer
% on disk format that includes a UUID.
- Header#db_header{uuid=couch_uuids:random()};
+ Header#db_header{uuid = couch_uuids:random()};
_ ->
Header
end.
-
-upgrade_epochs(#db_header{}=Header) ->
- NewEpochs = case Header#db_header.epochs of
- undefined ->
- % This node is taking over ownership of shard with
- % and old version of couch file. Before epochs there
- % was always an implicit assumption that a file was
- % owned since eternity by the node it was on. This
- % just codifies that assumption.
- [{node(), 0}];
- [{Node, _} | _] = Epochs0 when Node == node() ->
- % Current node is the current owner of this db
- Epochs0;
- Epochs1 ->
- % This node is taking over ownership of this db
- % and marking the update sequence where it happened.
- [{node(), Header#db_header.update_seq} | Epochs1]
- end,
+upgrade_epochs(#db_header{} = Header) ->
+ NewEpochs =
+ case Header#db_header.epochs of
+ undefined ->
+ % This node is taking over ownership of shard with
+ % and old version of couch file. Before epochs there
+ % was always an implicit assumption that a file was
+ % owned since eternity by the node it was on. This
+ % just codifies that assumption.
+ [{node(), 0}];
+ [{Node, _} | _] = Epochs0 when Node == node() ->
+ % Current node is the current owner of this db
+ Epochs0;
+ Epochs1 ->
+ % This node is taking over ownership of this db
+ % and marking the update sequence where it happened.
+ [{node(), Header#db_header.update_seq} | Epochs1]
+ end,
% Its possible for a node to open a db and claim
% ownership but never make a write to the db. This
% removes nodes that claimed ownership but never
% changed the database.
DedupedEpochs = remove_dup_epochs(NewEpochs),
- Header#db_header{epochs=DedupedEpochs}.
-
+ Header#db_header{epochs = DedupedEpochs}.
% This is slightly relying on the udpate_seq's being sorted
% in epochs due to how we only ever push things onto the
@@ -296,12 +292,12 @@ upgrade_epochs(#db_header{}=Header) ->
% want to remove dupes (by calling a sort on the input to this
% function). So for now we don't sort but are relying on the
% idea that epochs is always sorted.
-remove_dup_epochs([_]=Epochs) ->
+remove_dup_epochs([_] = Epochs) ->
Epochs;
remove_dup_epochs([{N1, S}, {_N2, S}]) ->
% Seqs match, keep the most recent owner
[{N1, S}];
-remove_dup_epochs([_, _]=Epochs) ->
+remove_dup_epochs([_, _] = Epochs) ->
% Seqs don't match.
Epochs;
remove_dup_epochs([{N1, S}, {_N2, S} | Rest]) ->
@@ -311,11 +307,10 @@ remove_dup_epochs([{N1, S1}, {N2, S2} | Rest]) ->
% Seqs don't match, recurse to check others
[{N1, S1} | remove_dup_epochs([{N2, S2} | Rest])].
-
-upgrade_compacted_seq(#db_header{}=Header) ->
+upgrade_compacted_seq(#db_header{} = Header) ->
case Header#db_header.compacted_seq of
undefined ->
- Header#db_header{compacted_seq=0};
+ Header#db_header{compacted_seq = 0};
_ ->
Header
end.
@@ -332,20 +327,30 @@ latest(_Else) ->
mk_header(Vsn) ->
{
- db_header, % record name
- Vsn, % disk version
- 100, % update_seq
- 0, % unused
- foo, % id_tree_state
- bar, % seq_tree_state
- bam, % local_tree_state
- flam, % was purge_seq - now purge_tree_state
- baz, % was purged_docs - now purge_seq_tree_state
- bang, % security_ptr
- 999 % revs_limit
+ % record name
+ db_header,
+ % disk version
+ Vsn,
+ % update_seq
+ 100,
+ % unused
+ 0,
+ % id_tree_state
+ foo,
+ % seq_tree_state
+ bar,
+ % local_tree_state
+ bam,
+ % was purge_seq - now purge_tree_state
+ flam,
+ % was purged_docs - now purge_seq_tree_state
+ baz,
+ % security_ptr
+ bang,
+ % revs_limit
+ 999
}.
-
-ifdef(run_broken_tests).
upgrade_v3_test() ->
@@ -388,7 +393,6 @@ upgrade_v5_to_v8_test() ->
% Security ptr isn't changed for v5 headers
?assertEqual(bang, security_ptr(NewHeader)).
-
upgrade_uuid_test() ->
Vsn5Header = mk_header(5),
@@ -404,7 +408,6 @@ upgrade_uuid_test() ->
ResetHeader = from(NewNewHeader),
?assertEqual(uuid(NewHeader), uuid(ResetHeader)).
-
upgrade_epochs_test() ->
Vsn5Header = mk_header(5),
@@ -437,15 +440,12 @@ upgrade_epochs_test() ->
ResetHeader = from(NewNewHeader),
?assertEqual(OwnedEpochs, epochs(ResetHeader)).
-
get_uuid_from_old_header_test() ->
Vsn5Header = mk_header(5),
?assertEqual(undefined, uuid(Vsn5Header)).
-
get_epochs_from_old_header_test() ->
Vsn5Header = mk_header(5),
?assertEqual(undefined, epochs(Vsn5Header)).
-
-endif.
diff --git a/src/couch/src/couch_bt_engine_stream.erl b/src/couch/src/couch_bt_engine_stream.erl
index 431894a50..253877e77 100644
--- a/src/couch/src/couch_bt_engine_stream.erl
+++ b/src/couch/src/couch_bt_engine_stream.erl
@@ -20,23 +20,18 @@
to_disk_term/1
]).
-
foldl({_Fd, []}, _Fun, Acc) ->
Acc;
-
foldl({Fd, [{Pos, _} | Rest]}, Fun, Acc) ->
foldl({Fd, [Pos | Rest]}, Fun, Acc);
-
foldl({Fd, [Bin | Rest]}, Fun, Acc) when is_binary(Bin) ->
% We're processing the first bit of data
% after we did a seek for a range fold.
foldl({Fd, Rest}, Fun, Fun(Bin, Acc));
-
foldl({Fd, [Pos | Rest]}, Fun, Acc) when is_integer(Pos) ->
{ok, Bin} = couch_file:pread_binary(Fd, Pos),
foldl({Fd, Rest}, Fun, Fun(Bin, Acc)).
-
seek({Fd, [{Pos, Length} | Rest]}, Offset) ->
case Length =< Offset of
true ->
@@ -44,7 +39,6 @@ seek({Fd, [{Pos, Length} | Rest]}, Offset) ->
false ->
seek({Fd, [Pos | Rest]}, Offset)
end;
-
seek({Fd, [Pos | Rest]}, Offset) when is_integer(Pos) ->
{ok, Bin} = couch_file:pread_binary(Fd, Pos),
case iolist_size(Bin) =< Offset of
@@ -55,16 +49,12 @@ seek({Fd, [Pos | Rest]}, Offset) when is_integer(Pos) ->
{ok, {Fd, [Tail | Rest]}}
end.
-
write({Fd, Written}, Data) when is_pid(Fd) ->
{ok, Pos, _} = couch_file:append_binary(Fd, Data),
{ok, {Fd, [{Pos, iolist_size(Data)} | Written]}}.
-
finalize({Fd, Written}) ->
{ok, {Fd, lists:reverse(Written)}}.
-
to_disk_term({_Fd, Written}) ->
{ok, Written}.
-
diff --git a/src/couch/src/couch_btree.erl b/src/couch/src/couch_btree.erl
index 858ae2b90..b974a22ee 100644
--- a/src/couch/src/couch_btree.erl
+++ b/src/couch/src/couch_btree.erl
@@ -21,45 +21,45 @@
-define(FILL_RATIO, 0.5).
-extract(#btree{extract_kv=undefined}, Value) ->
+extract(#btree{extract_kv = undefined}, Value) ->
Value;
-extract(#btree{extract_kv=Extract}, Value) ->
+extract(#btree{extract_kv = Extract}, Value) ->
Extract(Value).
-assemble(#btree{assemble_kv=undefined}, Key, Value) ->
+assemble(#btree{assemble_kv = undefined}, Key, Value) ->
{Key, Value};
-assemble(#btree{assemble_kv=Assemble}, Key, Value) ->
+assemble(#btree{assemble_kv = Assemble}, Key, Value) ->
Assemble(Key, Value).
-less(#btree{less=undefined}, A, B) ->
+less(#btree{less = undefined}, A, B) ->
A < B;
-less(#btree{less=Less}, A, B) ->
+less(#btree{less = Less}, A, B) ->
Less(A, B).
% pass in 'nil' for State if a new Btree.
open(State, Fd) ->
- {ok, #btree{root=State, fd=Fd}}.
+ {ok, #btree{root = State, fd = Fd}}.
set_options(Bt, []) ->
Bt;
-set_options(Bt, [{split, Extract}|Rest]) ->
- set_options(Bt#btree{extract_kv=Extract}, Rest);
-set_options(Bt, [{join, Assemble}|Rest]) ->
- set_options(Bt#btree{assemble_kv=Assemble}, Rest);
-set_options(Bt, [{less, Less}|Rest]) ->
- set_options(Bt#btree{less=Less}, Rest);
-set_options(Bt, [{reduce, Reduce}|Rest]) ->
- set_options(Bt#btree{reduce=Reduce}, Rest);
-set_options(Bt, [{compression, Comp}|Rest]) ->
- set_options(Bt#btree{compression=Comp}, Rest).
+set_options(Bt, [{split, Extract} | Rest]) ->
+ set_options(Bt#btree{extract_kv = Extract}, Rest);
+set_options(Bt, [{join, Assemble} | Rest]) ->
+ set_options(Bt#btree{assemble_kv = Assemble}, Rest);
+set_options(Bt, [{less, Less} | Rest]) ->
+ set_options(Bt#btree{less = Less}, Rest);
+set_options(Bt, [{reduce, Reduce} | Rest]) ->
+ set_options(Bt#btree{reduce = Reduce}, Rest);
+set_options(Bt, [{compression, Comp} | Rest]) ->
+ set_options(Bt#btree{compression = Comp}, Rest).
open(State, Fd, Options) ->
- {ok, set_options(#btree{root=State, fd=Fd}, Options)}.
+ {ok, set_options(#btree{root = State, fd = Fd}, Options)}.
-get_state(#btree{root=Root}) ->
+get_state(#btree{root = Root}) ->
Root.
-final_reduce(#btree{reduce=Reduce}, Val) ->
+final_reduce(#btree{reduce = Reduce}, Val) ->
final_reduce(Reduce, Val);
final_reduce(Reduce, {[], []}) ->
Reduce(reduce, []);
@@ -71,30 +71,42 @@ final_reduce(Reduce, {KVs, Reductions}) ->
Red = Reduce(reduce, KVs),
final_reduce(Reduce, {[], [Red | Reductions]}).
-fold_reduce(#btree{root=Root}=Bt, Fun, Acc, Options) ->
+fold_reduce(#btree{root = Root} = Bt, Fun, Acc, Options) ->
Dir = couch_util:get_value(dir, Options, fwd),
StartKey = couch_util:get_value(start_key, Options),
InEndRangeFun = make_key_in_end_range_function(Bt, Dir, Options),
KeyGroupFun = get_group_fun(Bt, Options),
try
{ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
- reduce_stream_node(Bt, Dir, Root, StartKey, InEndRangeFun, undefined, [], [],
- KeyGroupFun, Fun, Acc),
- if GroupedKey2 == undefined ->
- {ok, Acc2};
- true ->
- case Fun(GroupedKey2, {GroupedKVsAcc2, GroupedRedsAcc2}, Acc2) of
- {ok, Acc3} -> {ok, Acc3};
- {stop, Acc3} -> {ok, Acc3}
- end
+ reduce_stream_node(
+ Bt,
+ Dir,
+ Root,
+ StartKey,
+ InEndRangeFun,
+ undefined,
+ [],
+ [],
+ KeyGroupFun,
+ Fun,
+ Acc
+ ),
+ if
+ GroupedKey2 == undefined ->
+ {ok, Acc2};
+ true ->
+ case Fun(GroupedKey2, {GroupedKVsAcc2, GroupedRedsAcc2}, Acc2) of
+ {ok, Acc3} -> {ok, Acc3};
+ {stop, Acc3} -> {ok, Acc3}
+ end
end
catch
throw:{stop, AccDone} -> {ok, AccDone}
end.
-full_reduce(#btree{root=nil,reduce=Reduce}) ->
+full_reduce(#btree{root = nil, reduce = Reduce}) ->
{ok, Reduce(reduce, [])};
-full_reduce(#btree{root=Root}) ->
+full_reduce(#btree{root = Root}) ->
{ok, element(2, Root)}.
size(#btree{root = nil}) ->
@@ -114,7 +126,7 @@ get_group_fun(Bt, Options) ->
N when is_integer(N), N > 0 ->
make_group_fun(Bt, N);
undefined ->
- couch_util:get_value(key_group_fun, Options, fun(_,_) -> true end)
+ couch_util:get_value(key_group_fun, Options, fun(_, _) -> true end)
end.
make_group_fun(Bt, exact) ->
@@ -135,7 +147,7 @@ make_group_fun(Bt, GroupLevel) when is_integer(GroupLevel), GroupLevel > 0 ->
fun
GF({{p, Partition, Key1}, Val1}, {{p, Partition, Key2}, Val2}) ->
GF({Key1, Val1}, {Key2, Val2});
- GF({[_|_] = Key1, _}, {[_|_] = Key2, _}) ->
+ GF({[_ | _] = Key1, _}, {[_ | _] = Key2, _}) ->
SL1 = lists:sublist(Key1, GroupLevel),
SL2 = lists:sublist(Key2, GroupLevel),
case less(Bt, {SL1, nil}, {SL2, nil}) of
@@ -175,61 +187,75 @@ convert_fun_arity(Fun) when is_function(Fun, 3) ->
(traverse, _K, _Red, AccIn) -> {ok, AccIn}
end;
convert_fun_arity(Fun) when is_function(Fun, 4) ->
- Fun. % Already arity 4
+ % Already arity 4
+ Fun.
make_key_in_end_range_function(Bt, fwd, Options) ->
case couch_util:get_value(end_key_gt, Options) of
- undefined ->
- case couch_util:get_value(end_key, Options) of
undefined ->
- fun(_Key) -> true end;
- LastKey ->
- fun(Key) -> not less(Bt, LastKey, Key) end
- end;
- EndKey ->
- fun(Key) -> less(Bt, Key, EndKey) end
+ case couch_util:get_value(end_key, Options) of
+ undefined ->
+ fun(_Key) -> true end;
+ LastKey ->
+ fun(Key) -> not less(Bt, LastKey, Key) end
+ end;
+ EndKey ->
+ fun(Key) -> less(Bt, Key, EndKey) end
end;
make_key_in_end_range_function(Bt, rev, Options) ->
case couch_util:get_value(end_key_gt, Options) of
- undefined ->
- case couch_util:get_value(end_key, Options) of
undefined ->
- fun(_Key) -> true end;
- LastKey ->
- fun(Key) -> not less(Bt, Key, LastKey) end
- end;
- EndKey ->
- fun(Key) -> less(Bt, EndKey, Key) end
+ case couch_util:get_value(end_key, Options) of
+ undefined ->
+ fun(_Key) -> true end;
+ LastKey ->
+ fun(Key) -> not less(Bt, Key, LastKey) end
+ end;
+ EndKey ->
+ fun(Key) -> less(Bt, EndKey, Key) end
end.
-
foldl(Bt, Fun, Acc) ->
fold(Bt, Fun, Acc, []).
foldl(Bt, Fun, Acc, Options) ->
fold(Bt, Fun, Acc, Options).
-
-fold(#btree{root=nil}, _Fun, Acc, _Options) ->
+fold(#btree{root = nil}, _Fun, Acc, _Options) ->
{ok, {[], []}, Acc};
-fold(#btree{root=Root}=Bt, Fun, Acc, Options) ->
+fold(#btree{root = Root} = Bt, Fun, Acc, Options) ->
Dir = couch_util:get_value(dir, Options, fwd),
InRange = make_key_in_end_range_function(Bt, Dir, Options),
Result =
- case couch_util:get_value(start_key, Options) of
- undefined ->
- stream_node(Bt, [], Bt#btree.root, InRange, Dir,
- convert_fun_arity(Fun), Acc);
- StartKey ->
- stream_node(Bt, [], Bt#btree.root, StartKey, InRange, Dir,
- convert_fun_arity(Fun), Acc)
- end,
+ case couch_util:get_value(start_key, Options) of
+ undefined ->
+ stream_node(
+ Bt,
+ [],
+ Bt#btree.root,
+ InRange,
+ Dir,
+ convert_fun_arity(Fun),
+ Acc
+ );
+ StartKey ->
+ stream_node(
+ Bt,
+ [],
+ Bt#btree.root,
+ StartKey,
+ InRange,
+ Dir,
+ convert_fun_arity(Fun),
+ Acc
+ )
+ end,
case Result of
- {ok, Acc2}->
- FullReduction = element(2, Root),
- {ok, {[], [FullReduction]}, Acc2};
- {stop, LastReduction, Acc2} ->
- {ok, LastReduction, Acc2}
+ {ok, Acc2} ->
+ FullReduction = element(2, Root),
+ {ok, {[], [FullReduction]}, Acc2};
+ {stop, LastReduction, Acc2} ->
+ {ok, LastReduction, Acc2}
end.
add(Bt, InsertKeyValues) ->
@@ -240,27 +266,28 @@ add_remove(Bt, InsertKeyValues, RemoveKeys) ->
{ok, Bt2}.
query_modify(Bt, LookupKeys, InsertValues, RemoveKeys) ->
- #btree{root=Root} = Bt,
+ #btree{root = Root} = Bt,
InsertActions = lists:map(
fun(KeyValue) ->
{Key, Value} = extract(Bt, KeyValue),
{insert, Key, Value}
- end, InsertValues),
+ end,
+ InsertValues
+ ),
RemoveActions = [{remove, Key, nil} || Key <- RemoveKeys],
FetchActions = [{fetch, Key, nil} || Key <- LookupKeys],
SortFun =
fun({OpA, A, _}, {OpB, B, _}) ->
case A == B of
- % A and B are equal, sort by op.
- true -> op_order(OpA) < op_order(OpB);
- false ->
- less(Bt, A, B)
+ % A and B are equal, sort by op.
+ true -> op_order(OpA) < op_order(OpB);
+ false -> less(Bt, A, B)
end
end,
Actions = lists:sort(SortFun, lists:append([InsertActions, RemoveActions, FetchActions])),
{ok, KeyPointers, QueryResults} = modify_node(Bt, Root, Actions, []),
{ok, NewRoot} = complete_root(Bt, KeyPointers),
- {ok, QueryResults, Bt#btree{root=NewRoot}}.
+ {ok, QueryResults, Bt#btree{root = NewRoot}}.
% for ordering different operations with the same key.
% fetch < remove < insert
@@ -268,11 +295,12 @@ op_order(fetch) -> 1;
op_order(remove) -> 2;
op_order(insert) -> 3.
-lookup(#btree{root=Root, less=Less}=Bt, Keys) ->
- SortedKeys = case Less of
- undefined -> lists:sort(Keys);
- _ -> lists:sort(Less, Keys)
- end,
+lookup(#btree{root = Root, less = Less} = Bt, Keys) ->
+ SortedKeys =
+ case Less of
+ undefined -> lists:sort(Keys);
+ _ -> lists:sort(Less, Keys)
+ end,
{ok, SortedResults} = lookup(Bt, Root, SortedKeys),
% We want to return the results in the same order as the keys were input
% but we may have changed the order when we sorted. So we need to put the
@@ -285,10 +313,10 @@ lookup(Bt, Node, Keys) ->
Pointer = element(1, Node),
{NodeType, NodeList} = get_node(Bt, Pointer),
case NodeType of
- kp_node ->
- lookup_kpnode(Bt, list_to_tuple(NodeList), 1, Keys, []);
- kv_node ->
- lookup_kvnode(Bt, list_to_tuple(NodeList), 1, Keys, [])
+ kp_node ->
+ lookup_kpnode(Bt, list_to_tuple(NodeList), 1, Keys, []);
+ kv_node ->
+ lookup_kvnode(Bt, list_to_tuple(NodeList), 1, Keys, [])
end.
lookup_kpnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
@@ -300,14 +328,13 @@ lookup_kpnode(Bt, NodeTuple, LowerBound, [FirstLookupKey | _] = LookupKeys, Outp
{Key, PointerInfo} = element(N, NodeTuple),
SplitFun = fun(LookupKey) -> not less(Bt, Key, LookupKey) end,
case lists:splitwith(SplitFun, LookupKeys) of
- {[], GreaterQueries} ->
- lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, Output);
- {LessEqQueries, GreaterQueries} ->
- {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries),
- lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, lists:reverse(Results, Output))
+ {[], GreaterQueries} ->
+ lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, Output);
+ {LessEqQueries, GreaterQueries} ->
+ {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries),
+ lookup_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, lists:reverse(Results, Output))
end.
-
lookup_kvnode(_Bt, _NodeTuple, _LowerBound, [], Output) ->
{ok, lists:reverse(Output)};
lookup_kvnode(_Bt, NodeTuple, LowerBound, Keys, Output) when tuple_size(NodeTuple) < LowerBound ->
@@ -317,24 +344,27 @@ lookup_kvnode(Bt, NodeTuple, LowerBound, [LookupKey | RestLookupKeys], Output) -
N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), LookupKey),
{Key, Value} = element(N, NodeTuple),
case less(Bt, LookupKey, Key) of
- true ->
- % LookupKey is less than Key
- lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, not_found} | Output]);
- false ->
- case less(Bt, Key, LookupKey) of
true ->
- % LookupKey is greater than Key
- lookup_kvnode(Bt, NodeTuple, N+1, RestLookupKeys, [{LookupKey, not_found} | Output]);
+ % LookupKey is less than Key
+ lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, not_found} | Output]);
false ->
- % LookupKey is equal to Key
- lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [{LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output])
- end
+ case less(Bt, Key, LookupKey) of
+ true ->
+ % LookupKey is greater than Key
+ lookup_kvnode(Bt, NodeTuple, N + 1, RestLookupKeys, [
+ {LookupKey, not_found} | Output
+ ]);
+ false ->
+ % LookupKey is equal to Key
+ lookup_kvnode(Bt, NodeTuple, N, RestLookupKeys, [
+ {LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output
+ ])
+ end
end.
-
complete_root(_Bt, []) ->
{ok, nil};
-complete_root(_Bt, [{_Key, PointerInfo}])->
+complete_root(_Bt, [{_Key, PointerInfo}]) ->
{ok, PointerInfo};
complete_root(Bt, KPs) ->
{ok, ResultKeyPointers} = write_node(Bt, kp_node, KPs),
@@ -348,12 +378,12 @@ complete_root(Bt, KPs) ->
chunkify(InList) ->
BaseChunkSize = get_chunk_size(),
case ?term_size(InList) of
- Size when Size > BaseChunkSize ->
- NumberOfChunksLikely = ((Size div BaseChunkSize) + 1),
- ChunkThreshold = Size div NumberOfChunksLikely,
- chunkify(InList, ChunkThreshold, [], 0, []);
- _Else ->
- [InList]
+ Size when Size > BaseChunkSize ->
+ NumberOfChunksLikely = ((Size div BaseChunkSize) + 1),
+ ChunkThreshold = Size div NumberOfChunksLikely,
+ chunkify(InList, ChunkThreshold, [], 0, []);
+ _Else ->
+ [InList]
end.
chunkify([], _ChunkThreshold, [], 0, OutputChunks) ->
@@ -365,58 +395,67 @@ chunkify([], _ChunkThreshold, OutList, _OutListSize, OutputChunks) ->
lists:reverse([lists:reverse(OutList) | OutputChunks]);
chunkify([InElement | RestInList], ChunkThreshold, OutList, OutListSize, OutputChunks) ->
case ?term_size(InElement) of
- Size when (Size + OutListSize) > ChunkThreshold andalso OutList /= [] ->
- chunkify(RestInList, ChunkThreshold, [], 0, [lists:reverse([InElement | OutList]) | OutputChunks]);
- Size ->
- chunkify(RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks)
+ Size when (Size + OutListSize) > ChunkThreshold andalso OutList /= [] ->
+ chunkify(RestInList, ChunkThreshold, [], 0, [
+ lists:reverse([InElement | OutList]) | OutputChunks
+ ]);
+ Size ->
+ chunkify(
+ RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks
+ )
end.
--compile({inline,[get_chunk_size/0]}).
+-compile({inline, [get_chunk_size/0]}).
get_chunk_size() ->
try
list_to_integer(config:get("couchdb", "btree_chunk_size", "1279"))
- catch error:badarg ->
- 1279
+ catch
+ error:badarg ->
+ 1279
end.
modify_node(Bt, RootPointerInfo, Actions, QueryOutput) ->
- {NodeType, NodeList} = case RootPointerInfo of
- nil ->
- {kv_node, []};
- _Tuple ->
- Pointer = element(1, RootPointerInfo),
- get_node(Bt, Pointer)
- end,
+ {NodeType, NodeList} =
+ case RootPointerInfo of
+ nil ->
+ {kv_node, []};
+ _Tuple ->
+ Pointer = element(1, RootPointerInfo),
+ get_node(Bt, Pointer)
+ end,
NodeTuple = list_to_tuple(NodeList),
{ok, NewNodeList, QueryOutput2} =
- case NodeType of
- kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput);
- kv_node -> modify_kvnode(Bt, NodeTuple, 1, Actions, [], QueryOutput)
- end,
+ case NodeType of
+ kp_node -> modify_kpnode(Bt, NodeTuple, 1, Actions, [], QueryOutput);
+ kv_node -> modify_kvnode(Bt, NodeTuple, 1, Actions, [], QueryOutput)
+ end,
case NewNodeList of
- [] -> % no nodes remain
- {ok, [], QueryOutput2};
- NodeList -> % nothing changed
- {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
- {ok, [{LastKey, RootPointerInfo}], QueryOutput2};
- _Else2 ->
- {ok, ResultList} = case RootPointerInfo of
- nil ->
- write_node(Bt, NodeType, NewNodeList);
- _ ->
+ % no nodes remain
+ [] ->
+ {ok, [], QueryOutput2};
+ % nothing changed
+ NodeList ->
{LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
- OldNode = {LastKey, RootPointerInfo},
- write_node(Bt, OldNode, NodeType, NodeList, NewNodeList)
- end,
- {ok, ResultList, QueryOutput2}
+ {ok, [{LastKey, RootPointerInfo}], QueryOutput2};
+ _Else2 ->
+ {ok, ResultList} =
+ case RootPointerInfo of
+ nil ->
+ write_node(Bt, NodeType, NewNodeList);
+ _ ->
+ {LastKey, _LastValue} = element(tuple_size(NodeTuple), NodeTuple),
+ OldNode = {LastKey, RootPointerInfo},
+ write_node(Bt, OldNode, NodeType, NodeList, NewNodeList)
+ end,
+ {ok, ResultList, QueryOutput2}
end.
-reduce_node(#btree{reduce=nil}, _NodeType, _NodeList) ->
+reduce_node(#btree{reduce = nil}, _NodeType, _NodeList) ->
[];
-reduce_node(#btree{reduce=R}, kp_node, NodeList) ->
+reduce_node(#btree{reduce = R}, kp_node, NodeList) ->
R(rereduce, [element(2, Node) || {_K, Node} <- NodeList]);
-reduce_node(#btree{reduce=R}=Bt, kv_node, NodeList) ->
+reduce_node(#btree{reduce = R} = Bt, kv_node, NodeList) ->
R(reduce, [assemble(Bt, K, V) || {K, V} <- NodeList]).
reduce_tree_size(kv_node, NodeSize, _KvList) ->
@@ -444,17 +483,14 @@ write_node(#btree{fd = Fd, compression = Comp} = Bt, NodeType, NodeList) ->
{ok, PtrSizes} = couch_file:append_terms(Fd, ToWrite, WriteOpts),
{ok, group_kps(Bt, NodeType, Chunks, PtrSizes)}.
-
group_kps(_Bt, _NodeType, [], []) ->
[];
-
group_kps(Bt, NodeType, [Chunk | RestChunks], [{Ptr, Size} | RestPtrSizes]) ->
{LastKey, _} = lists:last(Chunk),
SubTreeSize = reduce_tree_size(NodeType, Size, Chunk),
KP = {LastKey, {Ptr, reduce_node(Bt, NodeType, Chunk), SubTreeSize}},
[KP | group_kps(Bt, NodeType, RestChunks, RestPtrSizes)].
-
write_node(Bt, _OldNode, NodeType, [], NewList) ->
write_node(Bt, NodeType, NewList);
write_node(Bt, _OldNode, NodeType, [_], NewList) ->
@@ -462,14 +498,16 @@ write_node(Bt, _OldNode, NodeType, [_], NewList) ->
write_node(Bt, OldNode, NodeType, OldList, NewList) ->
case can_reuse_old_node(OldList, NewList) of
{true, Prefix, Suffix} ->
- {ok, PrefixKVs} = case Prefix of
- [] -> {ok, []};
- _ -> write_node(Bt, NodeType, Prefix)
- end,
- {ok, SuffixKVs} = case Suffix of
- [] -> {ok, []};
- _ -> write_node(Bt, NodeType, Suffix)
- end,
+ {ok, PrefixKVs} =
+ case Prefix of
+ [] -> {ok, []};
+ _ -> write_node(Bt, NodeType, Prefix)
+ end,
+ {ok, SuffixKVs} =
+ case Suffix of
+ [] -> {ok, []};
+ _ -> write_node(Bt, NodeType, Suffix)
+ end,
Result = PrefixKVs ++ [OldNode] ++ SuffixKVs,
{ok, Result};
false ->
@@ -481,8 +519,9 @@ can_reuse_old_node(OldList, NewList) ->
case old_list_is_prefix(OldList, RestNewList, 0) of
{true, Size, Suffix} ->
ReuseThreshold = get_chunk_size() * ?FILL_RATIO,
- if Size < ReuseThreshold -> false; true ->
- {true, Prefix, Suffix}
+ if
+ Size < ReuseThreshold -> false;
+ true -> {true, Prefix, Suffix}
end;
false ->
false
@@ -510,38 +549,67 @@ old_list_is_prefix(_OldList, _NewList, _Acc) ->
modify_kpnode(Bt, {}, _LowerBound, Actions, [], QueryOutput) ->
modify_node(Bt, nil, Actions, QueryOutput);
modify_kpnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
- {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
- tuple_size(NodeTuple), [])), QueryOutput};
-modify_kpnode(Bt, NodeTuple, LowerBound,
- [{_, FirstActionKey, _}|_]=Actions, ResultNode, QueryOutput) ->
+ {ok,
+ lists:reverse(
+ ResultNode,
+ bounded_tuple_to_list(
+ NodeTuple,
+ LowerBound,
+ tuple_size(NodeTuple),
+ []
+ )
+ ),
+ QueryOutput};
+modify_kpnode(
+ Bt,
+ NodeTuple,
+ LowerBound,
+ [{_, FirstActionKey, _} | _] = Actions,
+ ResultNode,
+ QueryOutput
+) ->
Sz = tuple_size(NodeTuple),
N = find_first_gteq(Bt, NodeTuple, LowerBound, Sz, FirstActionKey),
case N =:= Sz of
- true ->
- % perform remaining actions on last node
- {_, PointerInfo} = element(Sz, NodeTuple),
- {ok, ChildKPs, QueryOutput2} =
- modify_node(Bt, PointerInfo, Actions, QueryOutput),
- NodeList = lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound,
- Sz - 1, ChildKPs)),
- {ok, NodeList, QueryOutput2};
- false ->
- {NodeKey, PointerInfo} = element(N, NodeTuple),
- SplitFun = fun({_ActionType, ActionKey, _ActionValue}) ->
+ true ->
+ % perform remaining actions on last node
+ {_, PointerInfo} = element(Sz, NodeTuple),
+ {ok, ChildKPs, QueryOutput2} =
+ modify_node(Bt, PointerInfo, Actions, QueryOutput),
+ NodeList = lists:reverse(
+ ResultNode,
+ bounded_tuple_to_list(
+ NodeTuple,
+ LowerBound,
+ Sz - 1,
+ ChildKPs
+ )
+ ),
+ {ok, NodeList, QueryOutput2};
+ false ->
+ {NodeKey, PointerInfo} = element(N, NodeTuple),
+ SplitFun = fun({_ActionType, ActionKey, _ActionValue}) ->
not less(Bt, NodeKey, ActionKey)
end,
- {LessEqQueries, GreaterQueries} = lists:splitwith(SplitFun, Actions),
- {ok, ChildKPs, QueryOutput2} =
+ {LessEqQueries, GreaterQueries} = lists:splitwith(SplitFun, Actions),
+ {ok, ChildKPs, QueryOutput2} =
modify_node(Bt, PointerInfo, LessEqQueries, QueryOutput),
- ResultNode2 = lists:reverse(ChildKPs, bounded_tuple_to_revlist(NodeTuple,
- LowerBound, N - 1, ResultNode)),
- modify_kpnode(Bt, NodeTuple, N+1, GreaterQueries, ResultNode2, QueryOutput2)
+ ResultNode2 = lists:reverse(
+ ChildKPs,
+ bounded_tuple_to_revlist(
+ NodeTuple,
+ LowerBound,
+ N - 1,
+ ResultNode
+ )
+ ),
+ modify_kpnode(Bt, NodeTuple, N + 1, GreaterQueries, ResultNode2, QueryOutput2)
end.
bounded_tuple_to_revlist(_Tuple, Start, End, Tail) when Start > End ->
Tail;
bounded_tuple_to_revlist(Tuple, Start, End, Tail) ->
- bounded_tuple_to_revlist(Tuple, Start+1, End, [element(Start, Tuple)|Tail]).
+ bounded_tuple_to_revlist(Tuple, Start + 1, End, [element(Start, Tuple) | Tail]).
bounded_tuple_to_list(Tuple, Start, End, Tail) ->
bounded_tuple_to_list2(Tuple, Start, End, [], Tail).
@@ -557,191 +625,438 @@ find_first_gteq(Bt, Tuple, Start, End, Key) ->
Mid = Start + ((End - Start) div 2),
{TupleKey, _} = element(Mid, Tuple),
case less(Bt, TupleKey, Key) of
- true ->
- find_first_gteq(Bt, Tuple, Mid+1, End, Key);
- false ->
- find_first_gteq(Bt, Tuple, Start, Mid, Key)
+ true ->
+ find_first_gteq(Bt, Tuple, Mid + 1, End, Key);
+ false ->
+ find_first_gteq(Bt, Tuple, Start, Mid, Key)
end.
modify_kvnode(_Bt, NodeTuple, LowerBound, [], ResultNode, QueryOutput) ->
- {ok, lists:reverse(ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, tuple_size(NodeTuple), [])), QueryOutput};
-modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) when LowerBound > tuple_size(NodeTuple) ->
+ {ok,
+ lists:reverse(
+ ResultNode, bounded_tuple_to_list(NodeTuple, LowerBound, tuple_size(NodeTuple), [])
+ ),
+ QueryOutput};
+modify_kvnode(
+ Bt,
+ NodeTuple,
+ LowerBound,
+ [{ActionType, ActionKey, ActionValue} | RestActions],
+ ResultNode,
+ QueryOutput
+) when LowerBound > tuple_size(NodeTuple) ->
case ActionType of
- insert ->
- modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
- remove ->
- % just drop the action
- modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, QueryOutput);
- fetch ->
- % the key/value must not exist in the tree
- modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
+ insert ->
+ modify_kvnode(
+ Bt,
+ NodeTuple,
+ LowerBound,
+ RestActions,
+ [{ActionKey, ActionValue} | ResultNode],
+ QueryOutput
+ );
+ remove ->
+ % just drop the action
+ modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, QueryOutput);
+ fetch ->
+ % the key/value must not exist in the tree
+ modify_kvnode(Bt, NodeTuple, LowerBound, RestActions, ResultNode, [
+ {not_found, {ActionKey, nil}} | QueryOutput
+ ])
end;
-modify_kvnode(Bt, NodeTuple, LowerBound, [{ActionType, ActionKey, ActionValue} | RestActions], AccNode, QueryOutput) ->
+modify_kvnode(
+ Bt,
+ NodeTuple,
+ LowerBound,
+ [{ActionType, ActionKey, ActionValue} | RestActions],
+ AccNode,
+ QueryOutput
+) ->
N = find_first_gteq(Bt, NodeTuple, LowerBound, tuple_size(NodeTuple), ActionKey),
{Key, Value} = element(N, NodeTuple),
- ResultNode = bounded_tuple_to_revlist(NodeTuple, LowerBound, N - 1, AccNode),
+ ResultNode = bounded_tuple_to_revlist(NodeTuple, LowerBound, N - 1, AccNode),
case less(Bt, ActionKey, Key) of
- true ->
- case ActionType of
- insert ->
- % ActionKey is less than the Key, so insert
- modify_kvnode(Bt, NodeTuple, N, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
- remove ->
- % ActionKey is less than the Key, just drop the action
- modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, QueryOutput);
- fetch ->
- % ActionKey is less than the Key, the key/value must not exist in the tree
- modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
- end;
- false ->
- % ActionKey and Key are maybe equal.
- case less(Bt, Key, ActionKey) of
- false ->
+ true ->
case ActionType of
- insert ->
- modify_kvnode(Bt, NodeTuple, N+1, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
- remove ->
- modify_kvnode(Bt, NodeTuple, N+1, RestActions, ResultNode, QueryOutput);
- fetch ->
- % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node
- % since an identical action key can follow it.
- modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [{ok, assemble(Bt, Key, Value)} | QueryOutput])
+ insert ->
+ % ActionKey is less than the Key, so insert
+ modify_kvnode(
+ Bt,
+ NodeTuple,
+ N,
+ RestActions,
+ [{ActionKey, ActionValue} | ResultNode],
+ QueryOutput
+ );
+ remove ->
+ % ActionKey is less than the Key, just drop the action
+ modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, QueryOutput);
+ fetch ->
+ % ActionKey is less than the Key, the key/value must not exist in the tree
+ modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [
+ {not_found, {ActionKey, nil}} | QueryOutput
+ ])
end;
- true ->
- modify_kvnode(Bt, NodeTuple, N + 1, [{ActionType, ActionKey, ActionValue} | RestActions], [{Key, Value} | ResultNode], QueryOutput)
- end
+ false ->
+ % ActionKey and Key are maybe equal.
+ case less(Bt, Key, ActionKey) of
+ false ->
+ case ActionType of
+ insert ->
+ modify_kvnode(
+ Bt,
+ NodeTuple,
+ N + 1,
+ RestActions,
+ [{ActionKey, ActionValue} | ResultNode],
+ QueryOutput
+ );
+ remove ->
+ modify_kvnode(
+ Bt, NodeTuple, N + 1, RestActions, ResultNode, QueryOutput
+ );
+ fetch ->
+ % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node
+ % since an identical action key can follow it.
+ modify_kvnode(Bt, NodeTuple, N, RestActions, ResultNode, [
+ {ok, assemble(Bt, Key, Value)} | QueryOutput
+ ])
+ end;
+ true ->
+ modify_kvnode(
+ Bt,
+ NodeTuple,
+ N + 1,
+ [{ActionType, ActionKey, ActionValue} | RestActions],
+ [{Key, Value} | ResultNode],
+ QueryOutput
+ )
+ end
end.
-
-reduce_stream_node(_Bt, _Dir, nil, _KeyStart, _InEndRangeFun, GroupedKey, GroupedKVsAcc,
- GroupedRedsAcc, _KeyGroupFun, _Fun, Acc) ->
+reduce_stream_node(
+ _Bt,
+ _Dir,
+ nil,
+ _KeyStart,
+ _InEndRangeFun,
+ GroupedKey,
+ GroupedKVsAcc,
+ GroupedRedsAcc,
+ _KeyGroupFun,
+ _Fun,
+ Acc
+) ->
{ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_node(Bt, Dir, Node, KeyStart, InEndRangeFun, GroupedKey, GroupedKVsAcc,
- GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
+reduce_stream_node(
+ Bt,
+ Dir,
+ Node,
+ KeyStart,
+ InEndRangeFun,
+ GroupedKey,
+ GroupedKVsAcc,
+ GroupedRedsAcc,
+ KeyGroupFun,
+ Fun,
+ Acc
+) ->
P = element(1, Node),
case get_node(Bt, P) of
- {kp_node, NodeList} ->
- NodeList2 = adjust_dir(Dir, NodeList),
- reduce_stream_kp_node(Bt, Dir, NodeList2, KeyStart, InEndRangeFun, GroupedKey,
- GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc);
- {kv_node, KVs} ->
- KVs2 = adjust_dir(Dir, KVs),
- reduce_stream_kv_node(Bt, Dir, KVs2, KeyStart, InEndRangeFun, GroupedKey,
- GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc)
+ {kp_node, NodeList} ->
+ NodeList2 = adjust_dir(Dir, NodeList),
+ reduce_stream_kp_node(
+ Bt,
+ Dir,
+ NodeList2,
+ KeyStart,
+ InEndRangeFun,
+ GroupedKey,
+ GroupedKVsAcc,
+ GroupedRedsAcc,
+ KeyGroupFun,
+ Fun,
+ Acc
+ );
+ {kv_node, KVs} ->
+ KVs2 = adjust_dir(Dir, KVs),
+ reduce_stream_kv_node(
+ Bt,
+ Dir,
+ KVs2,
+ KeyStart,
+ InEndRangeFun,
+ GroupedKey,
+ GroupedKVsAcc,
+ GroupedRedsAcc,
+ KeyGroupFun,
+ Fun,
+ Acc
+ )
end.
-reduce_stream_kv_node(Bt, Dir, KVs, KeyStart, InEndRangeFun,
- GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
- KeyGroupFun, Fun, Acc) ->
-
+reduce_stream_kv_node(
+ Bt,
+ Dir,
+ KVs,
+ KeyStart,
+ InEndRangeFun,
+ GroupedKey,
+ GroupedKVsAcc,
+ GroupedRedsAcc,
+ KeyGroupFun,
+ Fun,
+ Acc
+) ->
GTEKeyStartKVs =
- case KeyStart of
- undefined ->
- KVs;
- _ ->
- DropFun = case Dir of
- fwd ->
- fun({Key, _}) -> less(Bt, Key, KeyStart) end;
- rev ->
- fun({Key, _}) -> less(Bt, KeyStart, Key) end
+ case KeyStart of
+ undefined ->
+ KVs;
+ _ ->
+ DropFun =
+ case Dir of
+ fwd ->
+ fun({Key, _}) -> less(Bt, Key, KeyStart) end;
+ rev ->
+ fun({Key, _}) -> less(Bt, KeyStart, Key) end
+ end,
+ lists:dropwhile(DropFun, KVs)
end,
- lists:dropwhile(DropFun, KVs)
- end,
KVs2 = lists:takewhile(
- fun({Key, _}) -> InEndRangeFun(Key) end, GTEKeyStartKVs),
- reduce_stream_kv_node2(Bt, KVs2, GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
- KeyGroupFun, Fun, Acc).
-
-
-reduce_stream_kv_node2(_Bt, [], GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
- _KeyGroupFun, _Fun, Acc) ->
+ fun({Key, _}) -> InEndRangeFun(Key) end, GTEKeyStartKVs
+ ),
+ reduce_stream_kv_node2(
+ Bt,
+ KVs2,
+ GroupedKey,
+ GroupedKVsAcc,
+ GroupedRedsAcc,
+ KeyGroupFun,
+ Fun,
+ Acc
+ ).
+
+reduce_stream_kv_node2(
+ _Bt,
+ [],
+ GroupedKey,
+ GroupedKVsAcc,
+ GroupedRedsAcc,
+ _KeyGroupFun,
+ _Fun,
+ Acc
+) ->
{ok, Acc, GroupedRedsAcc, GroupedKVsAcc, GroupedKey};
-reduce_stream_kv_node2(Bt, [{Key, Value}| RestKVs], GroupedKey, GroupedKVsAcc,
- GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
+reduce_stream_kv_node2(
+ Bt,
+ [{Key, Value} | RestKVs],
+ GroupedKey,
+ GroupedKVsAcc,
+ GroupedRedsAcc,
+ KeyGroupFun,
+ Fun,
+ Acc
+) ->
case GroupedKey of
- undefined ->
- reduce_stream_kv_node2(Bt, RestKVs, Key,
- [assemble(Bt,Key,Value)], [], KeyGroupFun, Fun, Acc);
- _ ->
-
- case KeyGroupFun(GroupedKey, Key) of
- true ->
- reduce_stream_kv_node2(Bt, RestKVs, GroupedKey,
- [assemble(Bt,Key,Value)|GroupedKVsAcc], GroupedRedsAcc, KeyGroupFun,
- Fun, Acc);
- false ->
- case Fun(GroupedKey, {GroupedKVsAcc, GroupedRedsAcc}, Acc) of
- {ok, Acc2} ->
- reduce_stream_kv_node2(Bt, RestKVs, Key, [assemble(Bt,Key,Value)],
- [], KeyGroupFun, Fun, Acc2);
- {stop, Acc2} ->
- throw({stop, Acc2})
+ undefined ->
+ reduce_stream_kv_node2(
+ Bt,
+ RestKVs,
+ Key,
+ [assemble(Bt, Key, Value)],
+ [],
+ KeyGroupFun,
+ Fun,
+ Acc
+ );
+ _ ->
+ case KeyGroupFun(GroupedKey, Key) of
+ true ->
+ reduce_stream_kv_node2(
+ Bt,
+ RestKVs,
+ GroupedKey,
+ [assemble(Bt, Key, Value) | GroupedKVsAcc],
+ GroupedRedsAcc,
+ KeyGroupFun,
+ Fun,
+ Acc
+ );
+ false ->
+ case Fun(GroupedKey, {GroupedKVsAcc, GroupedRedsAcc}, Acc) of
+ {ok, Acc2} ->
+ reduce_stream_kv_node2(
+ Bt,
+ RestKVs,
+ Key,
+ [assemble(Bt, Key, Value)],
+ [],
+ KeyGroupFun,
+ Fun,
+ Acc2
+ );
+ {stop, Acc2} ->
+ throw({stop, Acc2})
+ end
end
- end
end.
-reduce_stream_kp_node(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
- GroupedKey, GroupedKVsAcc, GroupedRedsAcc,
- KeyGroupFun, Fun, Acc) ->
+reduce_stream_kp_node(
+ Bt,
+ Dir,
+ NodeList,
+ KeyStart,
+ InEndRangeFun,
+ GroupedKey,
+ GroupedKVsAcc,
+ GroupedRedsAcc,
+ KeyGroupFun,
+ Fun,
+ Acc
+) ->
Nodes =
- case KeyStart of
- undefined ->
- NodeList;
- _ ->
- case Dir of
- fwd ->
- lists:dropwhile(fun({Key, _}) -> less(Bt, Key, KeyStart) end, NodeList);
- rev ->
- RevKPs = lists:reverse(NodeList),
- case lists:splitwith(fun({Key, _}) -> less(Bt, Key, KeyStart) end, RevKPs) of
- {_Before, []} ->
+ case KeyStart of
+ undefined ->
NodeList;
- {Before, [FirstAfter | _]} ->
- [FirstAfter | lists:reverse(Before)]
- end
- end
- end,
+ _ ->
+ case Dir of
+ fwd ->
+ lists:dropwhile(fun({Key, _}) -> less(Bt, Key, KeyStart) end, NodeList);
+ rev ->
+ RevKPs = lists:reverse(NodeList),
+ case
+ lists:splitwith(fun({Key, _}) -> less(Bt, Key, KeyStart) end, RevKPs)
+ of
+ {_Before, []} ->
+ NodeList;
+ {Before, [FirstAfter | _]} ->
+ [FirstAfter | lists:reverse(Before)]
+ end
+ end
+ end,
{InRange, MaybeInRange} = lists:splitwith(
- fun({Key, _}) -> InEndRangeFun(Key) end, Nodes),
- NodesInRange = case MaybeInRange of
- [FirstMaybeInRange | _] when Dir =:= fwd ->
- InRange ++ [FirstMaybeInRange];
- _ ->
- InRange
- end,
- reduce_stream_kp_node2(Bt, Dir, NodesInRange, KeyStart, InEndRangeFun,
- GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc).
-
-
-reduce_stream_kp_node2(Bt, Dir, [{_Key, NodeInfo} | RestNodeList], KeyStart, InEndRangeFun,
- undefined, [], [], KeyGroupFun, Fun, Acc) ->
+ fun({Key, _}) -> InEndRangeFun(Key) end, Nodes
+ ),
+ NodesInRange =
+ case MaybeInRange of
+ [FirstMaybeInRange | _] when Dir =:= fwd ->
+ InRange ++ [FirstMaybeInRange];
+ _ ->
+ InRange
+ end,
+ reduce_stream_kp_node2(
+ Bt,
+ Dir,
+ NodesInRange,
+ KeyStart,
+ InEndRangeFun,
+ GroupedKey,
+ GroupedKVsAcc,
+ GroupedRedsAcc,
+ KeyGroupFun,
+ Fun,
+ Acc
+ ).
+
+reduce_stream_kp_node2(
+ Bt,
+ Dir,
+ [{_Key, NodeInfo} | RestNodeList],
+ KeyStart,
+ InEndRangeFun,
+ undefined,
+ [],
+ [],
+ KeyGroupFun,
+ Fun,
+ Acc
+) ->
{ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
- reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, undefined,
- [], [], KeyGroupFun, Fun, Acc),
- reduce_stream_kp_node2(Bt, Dir, RestNodeList, KeyStart, InEndRangeFun, GroupedKey2,
- GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
-reduce_stream_kp_node2(Bt, Dir, NodeList, KeyStart, InEndRangeFun,
- GroupedKey, GroupedKVsAcc, GroupedRedsAcc, KeyGroupFun, Fun, Acc) ->
- {Grouped0, Ungrouped0} = lists:splitwith(fun({Key,_}) ->
- KeyGroupFun(GroupedKey, Key) end, NodeList),
+ reduce_stream_node(
+ Bt,
+ Dir,
+ NodeInfo,
+ KeyStart,
+ InEndRangeFun,
+ undefined,
+ [],
+ [],
+ KeyGroupFun,
+ Fun,
+ Acc
+ ),
+ reduce_stream_kp_node2(
+ Bt,
+ Dir,
+ RestNodeList,
+ KeyStart,
+ InEndRangeFun,
+ GroupedKey2,
+ GroupedKVsAcc2,
+ GroupedRedsAcc2,
+ KeyGroupFun,
+ Fun,
+ Acc2
+ );
+reduce_stream_kp_node2(
+ Bt,
+ Dir,
+ NodeList,
+ KeyStart,
+ InEndRangeFun,
+ GroupedKey,
+ GroupedKVsAcc,
+ GroupedRedsAcc,
+ KeyGroupFun,
+ Fun,
+ Acc
+) ->
+ {Grouped0, Ungrouped0} = lists:splitwith(
+ fun({Key, _}) ->
+ KeyGroupFun(GroupedKey, Key)
+ end,
+ NodeList
+ ),
{GroupedNodes, UngroupedNodes} =
- case Grouped0 of
- [] ->
- {Grouped0, Ungrouped0};
- _ ->
- [FirstGrouped | RestGrouped] = lists:reverse(Grouped0),
- {RestGrouped, [FirstGrouped | Ungrouped0]}
- end,
+ case Grouped0 of
+ [] ->
+ {Grouped0, Ungrouped0};
+ _ ->
+ [FirstGrouped | RestGrouped] = lists:reverse(Grouped0),
+ {RestGrouped, [FirstGrouped | Ungrouped0]}
+ end,
GroupedReds = [element(2, Node) || {_, Node} <- GroupedNodes],
case UngroupedNodes of
- [{_Key, NodeInfo}|RestNodes] ->
- {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
- reduce_stream_node(Bt, Dir, NodeInfo, KeyStart, InEndRangeFun, GroupedKey,
- GroupedKVsAcc, GroupedReds ++ GroupedRedsAcc, KeyGroupFun, Fun, Acc),
- reduce_stream_kp_node2(Bt, Dir, RestNodes, KeyStart, InEndRangeFun, GroupedKey2,
- GroupedKVsAcc2, GroupedRedsAcc2, KeyGroupFun, Fun, Acc2);
- [] ->
- {ok, Acc, GroupedReds ++ GroupedRedsAcc, GroupedKVsAcc, GroupedKey}
+ [{_Key, NodeInfo} | RestNodes] ->
+ {ok, Acc2, GroupedRedsAcc2, GroupedKVsAcc2, GroupedKey2} =
+ reduce_stream_node(
+ Bt,
+ Dir,
+ NodeInfo,
+ KeyStart,
+ InEndRangeFun,
+ GroupedKey,
+ GroupedKVsAcc,
+ GroupedReds ++ GroupedRedsAcc,
+ KeyGroupFun,
+ Fun,
+ Acc
+ ),
+ reduce_stream_kp_node2(
+ Bt,
+ Dir,
+ RestNodes,
+ KeyStart,
+ InEndRangeFun,
+ GroupedKey2,
+ GroupedKVsAcc2,
+ GroupedRedsAcc2,
+ KeyGroupFun,
+ Fun,
+ Acc2
+ );
+ [] ->
+ {ok, Acc, GroupedReds ++ GroupedRedsAcc, GroupedKVsAcc, GroupedKey}
end.
adjust_dir(fwd, List) ->
@@ -753,20 +1068,20 @@ stream_node(Bt, Reds, Node, StartKey, InRange, Dir, Fun, Acc) ->
Pointer = element(1, Node),
{NodeType, NodeList} = get_node(Bt, Pointer),
case NodeType of
- kp_node ->
- stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc);
- kv_node ->
- stream_kv_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc)
+ kp_node ->
+ stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc);
+ kv_node ->
+ stream_kv_node(Bt, Reds, adjust_dir(Dir, NodeList), StartKey, InRange, Dir, Fun, Acc)
end.
stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc) ->
Pointer = element(1, Node),
{NodeType, NodeList} = get_node(Bt, Pointer),
case NodeType of
- kp_node ->
- stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc);
- kv_node ->
- stream_kv_node2(Bt, Reds, [], adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc)
+ kp_node ->
+ stream_kp_node(Bt, Reds, adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc);
+ kv_node ->
+ stream_kv_node2(Bt, Reds, [], adjust_dir(Dir, NodeList), InRange, Dir, Fun, Acc)
end.
stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) ->
@@ -774,84 +1089,87 @@ stream_kp_node(_Bt, _Reds, [], _InRange, _Dir, _Fun, Acc) ->
stream_kp_node(Bt, Reds, [{Key, Node} | Rest], InRange, Dir, Fun, Acc) ->
Red = element(2, Node),
case Fun(traverse, Key, Red, Acc) of
- {ok, Acc2} ->
- case stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc2) of
- {ok, Acc3} ->
- stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc3);
- {stop, LastReds, Acc3} ->
- {stop, LastReds, Acc3}
- end;
- {skip, Acc2} ->
- stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2);
- {stop, Acc2} ->
- {stop, Reds, Acc2}
+ {ok, Acc2} ->
+ case stream_node(Bt, Reds, Node, InRange, Dir, Fun, Acc2) of
+ {ok, Acc3} ->
+ stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc3);
+ {stop, LastReds, Acc3} ->
+ {stop, LastReds, Acc3}
+ end;
+ {skip, Acc2} ->
+ stream_kp_node(Bt, [Red | Reds], Rest, InRange, Dir, Fun, Acc2);
+ {stop, Acc2} ->
+ {stop, Reds, Acc2}
end.
drop_nodes(_Bt, Reds, _StartKey, []) ->
{Reds, []};
drop_nodes(Bt, Reds, StartKey, [{NodeKey, Node} | RestKPs]) ->
case less(Bt, NodeKey, StartKey) of
- true ->
- drop_nodes(Bt, [element(2, Node) | Reds], StartKey, RestKPs);
- false ->
- {Reds, [{NodeKey, Node} | RestKPs]}
+ true ->
+ drop_nodes(Bt, [element(2, Node) | Reds], StartKey, RestKPs);
+ false ->
+ {Reds, [{NodeKey, Node} | RestKPs]}
end.
stream_kp_node(Bt, Reds, KPs, StartKey, InRange, Dir, Fun, Acc) ->
{NewReds, NodesToStream} =
- case Dir of
- fwd ->
- % drop all nodes sorting before the key
- drop_nodes(Bt, Reds, StartKey, KPs);
- rev ->
- % keep all nodes sorting before the key, AND the first node to sort after
- RevKPs = lists:reverse(KPs),
- case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of
- {_RevsBefore, []} ->
- % everything sorts before it
- {Reds, KPs};
- {RevBefore, [FirstAfter | Drop]} ->
- {[element(2, Node) || {_K, Node} <- Drop] ++ Reds,
- [FirstAfter | lists:reverse(RevBefore)]}
- end
- end,
+ case Dir of
+ fwd ->
+ % drop all nodes sorting before the key
+ drop_nodes(Bt, Reds, StartKey, KPs);
+ rev ->
+ % keep all nodes sorting before the key, AND the first node to sort after
+ RevKPs = lists:reverse(KPs),
+ case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of
+ {_RevsBefore, []} ->
+ % everything sorts before it
+ {Reds, KPs};
+ {RevBefore, [FirstAfter | Drop]} ->
+ {[element(2, Node) || {_K, Node} <- Drop] ++ Reds, [
+ FirstAfter | lists:reverse(RevBefore)
+ ]}
+ end
+ end,
case NodesToStream of
- [] ->
- {ok, Acc};
- [{_Key, Node} | Rest] ->
- case stream_node(Bt, NewReds, Node, StartKey, InRange, Dir, Fun, Acc) of
- {ok, Acc2} ->
- Red = element(2, Node),
- stream_kp_node(Bt, [Red | NewReds], Rest, InRange, Dir, Fun, Acc2);
- {stop, LastReds, Acc2} ->
- {stop, LastReds, Acc2}
- end
+ [] ->
+ {ok, Acc};
+ [{_Key, Node} | Rest] ->
+ case stream_node(Bt, NewReds, Node, StartKey, InRange, Dir, Fun, Acc) of
+ {ok, Acc2} ->
+ Red = element(2, Node),
+ stream_kp_node(Bt, [Red | NewReds], Rest, InRange, Dir, Fun, Acc2);
+ {stop, LastReds, Acc2} ->
+ {stop, LastReds, Acc2}
+ end
end.
stream_kv_node(Bt, Reds, KVs, StartKey, InRange, Dir, Fun, Acc) ->
DropFun =
- case Dir of
- fwd ->
- fun({Key, _}) -> less(Bt, Key, StartKey) end;
- rev ->
- fun({Key, _}) -> less(Bt, StartKey, Key) end
- end,
+ case Dir of
+ fwd ->
+ fun({Key, _}) -> less(Bt, Key, StartKey) end;
+ rev ->
+ fun({Key, _}) -> less(Bt, StartKey, Key) end
+ end,
{LTKVs, GTEKVs} = lists:splitwith(DropFun, KVs),
- AssembleLTKVs = [assemble(Bt,K,V) || {K,V} <- LTKVs],
+ AssembleLTKVs = [assemble(Bt, K, V) || {K, V} <- LTKVs],
stream_kv_node2(Bt, Reds, AssembleLTKVs, GTEKVs, InRange, Dir, Fun, Acc).
stream_kv_node2(_Bt, _Reds, _PrevKVs, [], _InRange, _Dir, _Fun, Acc) ->
{ok, Acc};
-stream_kv_node2(Bt, Reds, PrevKVs, [{K,V} | RestKVs], InRange, Dir, Fun, Acc) ->
+stream_kv_node2(Bt, Reds, PrevKVs, [{K, V} | RestKVs], InRange, Dir, Fun, Acc) ->
case InRange(K) of
- false ->
- {stop, {PrevKVs, Reds}, Acc};
- true ->
- AssembledKV = assemble(Bt, K, V),
- case Fun(visit, AssembledKV, {PrevKVs, Reds}, Acc) of
- {ok, Acc2} ->
- stream_kv_node2(Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2);
- {stop, Acc2} ->
- {stop, {PrevKVs, Reds}, Acc2}
- end
+ false ->
+ {stop, {PrevKVs, Reds}, Acc};
+ true ->
+ AssembledKV = assemble(Bt, K, V),
+ case Fun(visit, AssembledKV, {PrevKVs, Reds}, Acc) of
+ {ok, Acc2} ->
+ stream_kv_node2(
+ Bt, Reds, [AssembledKV | PrevKVs], RestKVs, InRange, Dir, Fun, Acc2
+ );
+ {stop, Acc2} ->
+ {stop, {PrevKVs, Reds}, Acc2}
+ end
end.
diff --git a/src/couch/src/couch_changes.erl b/src/couch/src/couch_changes.erl
index 2078fed3a..089cda975 100644
--- a/src/couch/src/couch_changes.erl
+++ b/src/couch/src/couch_changes.erl
@@ -70,62 +70,80 @@ handle_db_changes(Args0, Req, Db0) ->
end,
Start = fun() ->
{ok, Db} = couch_db:reopen(Db0),
- StartSeq = case Dir of
- rev ->
- couch_db:get_update_seq(Db);
- fwd ->
- Since
- end,
+ StartSeq =
+ case Dir of
+ rev ->
+ couch_db:get_update_seq(Db);
+ fwd ->
+ Since
+ end,
{Db, StartSeq}
end,
% begin timer to deal with heartbeat when filter function fails
case Args#changes_args.heartbeat of
- undefined ->
- erlang:erase(last_changes_heartbeat);
- Val when is_integer(Val); Val =:= true ->
- put(last_changes_heartbeat, os:timestamp())
+ undefined ->
+ erlang:erase(last_changes_heartbeat);
+ Val when is_integer(Val); Val =:= true ->
+ put(last_changes_heartbeat, os:timestamp())
end,
case lists:member(Feed, ["continuous", "longpoll", "eventsource"]) of
- true ->
- fun(CallbackAcc) ->
- {Callback, UserAcc} = get_callback_acc(CallbackAcc),
- {ok, Listener} = StartListenerFun(),
-
- {Db, StartSeq} = Start(),
- UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
- {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
- Acc0 = build_acc(Args, Callback, UserAcc2, Db, StartSeq,
- <<"">>, Timeout, TimeoutFun),
- try
- keep_sending_changes(
- Args#changes_args{dir=fwd},
- Acc0,
- true)
- after
- couch_event:stop_listener(Listener),
- get_rest_updated(ok) % clean out any remaining update messages
+ true ->
+ fun(CallbackAcc) ->
+ {Callback, UserAcc} = get_callback_acc(CallbackAcc),
+ {ok, Listener} = StartListenerFun(),
+
+ {Db, StartSeq} = Start(),
+ UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
+ {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
+ Acc0 = build_acc(
+ Args,
+ Callback,
+ UserAcc2,
+ Db,
+ StartSeq,
+ <<"">>,
+ Timeout,
+ TimeoutFun
+ ),
+ try
+ keep_sending_changes(
+ Args#changes_args{dir = fwd},
+ Acc0,
+ true
+ )
+ after
+ couch_event:stop_listener(Listener),
+ % clean out any remaining update messages
+ get_rest_updated(ok)
+ end
+ end;
+ false ->
+ fun(CallbackAcc) ->
+ {Callback, UserAcc} = get_callback_acc(CallbackAcc),
+ UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
+ {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
+ {Db, StartSeq} = Start(),
+ Acc0 = build_acc(
+ Args#changes_args{feed = "normal"},
+ Callback,
+ UserAcc2,
+ Db,
+ StartSeq,
+ <<>>,
+ Timeout,
+ TimeoutFun
+ ),
+ {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} =
+ send_changes(
+ Acc0,
+ Dir,
+ true
+ ),
+ end_sending_changes(Callback, UserAcc3, LastSeq, Feed)
end
- end;
- false ->
- fun(CallbackAcc) ->
- {Callback, UserAcc} = get_callback_acc(CallbackAcc),
- UserAcc2 = start_sending_changes(Callback, UserAcc, Feed),
- {Timeout, TimeoutFun} = get_changes_timeout(Args, Callback),
- {Db, StartSeq} = Start(),
- Acc0 = build_acc(Args#changes_args{feed="normal"}, Callback,
- UserAcc2, Db, StartSeq, <<>>,
- Timeout, TimeoutFun),
- {ok, #changes_acc{seq = LastSeq, user_acc = UserAcc3}} =
- send_changes(
- Acc0,
- Dir,
- true),
- end_sending_changes(Callback, UserAcc3, LastSeq, Feed)
- end
end.
-
handle_db_event(_DbName, updated, Parent) ->
Parent ! updated,
{ok, Parent};
@@ -135,7 +153,6 @@ handle_db_event(_DbName, deleted, Parent) ->
handle_db_event(_DbName, _Event, Parent) ->
{ok, Parent}.
-
handle_view_event(_DbName, Msg, {Parent, DDocId}) ->
case Msg of
{index_commit, DDocId} ->
@@ -152,17 +169,17 @@ get_callback_acc({Callback, _UserAcc} = Pair) when is_function(Callback, 3) ->
get_callback_acc(Callback) when is_function(Callback, 2) ->
{fun(Ev, Data, _) -> Callback(Ev, Data) end, ok}.
-
configure_filter("_doc_ids", Style, Req, _Db) ->
{doc_ids, Style, get_doc_ids(Req)};
configure_filter("_selector", Style, Req, _Db) ->
- {selector, Style, get_selector_and_fields(Req)};
+ {selector, Style, get_selector_and_fields(Req)};
configure_filter("_design", Style, _Req, _Db) ->
{design_docs, Style};
configure_filter("_view", Style, Req, Db) ->
ViewName = get_view_qs(Req),
- if ViewName /= "" -> ok; true ->
- throw({bad_request, "`view` filter parameter is not provided."})
+ if
+ ViewName /= "" -> ok;
+ true -> throw({bad_request, "`view` filter parameter is not provided."})
end,
ViewNameParts = string:tokens(ViewName, "/"),
case [?l2b(couch_httpd:unquote(Part)) || Part <- ViewNameParts] of
@@ -196,10 +213,9 @@ configure_filter(FilterName, Style, Req, Db) ->
true ->
DIR = fabric_util:doc_id_and_rev(DDoc),
{fetch, custom, Style, Req, DIR, FName};
- false->
+ false ->
{custom, Style, Req, DDoc, FName}
end;
-
[] ->
{default, Style};
_Else ->
@@ -207,8 +223,7 @@ configure_filter(FilterName, Style, Req, Db) ->
throw({bad_request, Msg})
end.
-
-filter(Db, #full_doc_info{}=FDI, Filter) ->
+filter(Db, #full_doc_info{} = FDI, Filter) ->
filter(Db, couch_doc:to_doc_info(FDI), Filter);
filter(_Db, DocInfo, {default, Style}) ->
apply_style(DocInfo, Style);
@@ -221,8 +236,10 @@ filter(_Db, DocInfo, {doc_ids, Style, DocIds}) ->
end;
filter(Db, DocInfo, {selector, Style, {Selector, _Fields}}) ->
Docs = open_revs(Db, DocInfo, Style),
- Passes = [mango_selector:match(Selector, couch_doc:to_json_obj(Doc, []))
- || Doc <- Docs],
+ Passes = [
+ mango_selector:match(Selector, couch_doc:to_json_obj(Doc, []))
+ || Doc <- Docs
+ ],
filter_revs(Passes, Docs);
filter(_Db, DocInfo, {design_docs, Style}) ->
case DocInfo#doc_info.id of
@@ -236,15 +253,15 @@ filter(Db, DocInfo, {view, Style, DDoc, VName}) ->
{ok, Passes} = couch_query_servers:filter_view(DDoc, VName, Docs),
filter_revs(Passes, Docs);
filter(Db, DocInfo, {custom, Style, Req0, DDoc, FName}) ->
- Req = case Req0 of
- {json_req, _} -> Req0;
- #httpd{} -> {json_req, chttpd_external:json_req_obj(Req0, Db)}
- end,
+ Req =
+ case Req0 of
+ {json_req, _} -> Req0;
+ #httpd{} -> {json_req, chttpd_external:json_req_obj(Req0, Db)}
+ end,
Docs = open_revs(Db, DocInfo, Style),
{ok, Passes} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs),
filter_revs(Passes, Docs).
-
get_view_qs({json_req, {Props}}) ->
{Query} = couch_util:get_value(<<"query">>, Props, {[]}),
binary_to_list(couch_util:get_value(<<"view">>, Query, ""));
@@ -253,42 +270,43 @@ get_view_qs(Req) ->
get_doc_ids({json_req, {Props}}) ->
check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method='POST'}=Req) ->
+get_doc_ids(#httpd{method = 'POST'} = Req) ->
couch_httpd:validate_ctype(Req, "application/json"),
{Props} = couch_httpd:json_body_obj(Req),
check_docids(couch_util:get_value(<<"doc_ids">>, Props));
-get_doc_ids(#httpd{method='GET'}=Req) ->
+get_doc_ids(#httpd{method = 'GET'} = Req) ->
DocIds = ?JSON_DECODE(couch_httpd:qs_value(Req, "doc_ids", "null")),
check_docids(DocIds);
get_doc_ids(_) ->
throw({bad_request, no_doc_ids_provided}).
-
get_selector_and_fields({json_req, {Props}}) ->
Selector = check_selector(couch_util:get_value(<<"selector">>, Props)),
Fields = check_fields(couch_util:get_value(<<"fields">>, Props, nil)),
{Selector, Fields};
-get_selector_and_fields(#httpd{method='POST'}=Req) ->
+get_selector_and_fields(#httpd{method = 'POST'} = Req) ->
couch_httpd:validate_ctype(Req, "application/json"),
- get_selector_and_fields({json_req, couch_httpd:json_body_obj(Req)});
+ get_selector_and_fields({json_req, couch_httpd:json_body_obj(Req)});
get_selector_and_fields(_) ->
throw({bad_request, "Selector must be specified in POST payload"}).
-
check_docids(DocIds) when is_list(DocIds) ->
- lists:foreach(fun
- (DocId) when not is_binary(DocId) ->
- Msg = "`doc_ids` filter parameter is not a list of doc ids.",
- throw({bad_request, Msg});
- (_) -> ok
- end, DocIds),
+ lists:foreach(
+ fun
+ (DocId) when not is_binary(DocId) ->
+ Msg = "`doc_ids` filter parameter is not a list of doc ids.",
+ throw({bad_request, Msg});
+ (_) ->
+ ok
+ end,
+ DocIds
+ ),
DocIds;
check_docids(_) ->
Msg = "`doc_ids` filter parameter is not a list of doc ids.",
throw({bad_request, Msg}).
-
-check_selector(Selector={_}) ->
+check_selector(Selector = {_}) ->
try
mango_selector:normalize(Selector)
catch
@@ -299,7 +317,6 @@ check_selector(Selector={_}) ->
check_selector(_Selector) ->
throw({bad_request, "Selector error: expected a JSON object"}).
-
check_fields(nil) ->
nil;
check_fields(Fields) when is_list(Fields) ->
@@ -314,7 +331,6 @@ check_fields(Fields) when is_list(Fields) ->
check_fields(_Fields) ->
throw({bad_request, "Selector error: fields must be JSON array"}).
-
open_ddoc(Db, DDocId) ->
DbName = couch_db:name(Db),
case couch_db:is_clustered(Db) of
@@ -330,39 +346,38 @@ open_ddoc(Db, DDocId) ->
end
end.
-
-check_member_exists(#doc{body={Props}}, Path) ->
+check_member_exists(#doc{body = {Props}}, Path) ->
couch_util:get_nested_json_value({Props}, Path).
-
-apply_style(#doc_info{revs=Revs}, main_only) ->
- [#rev_info{rev=Rev} | _] = Revs,
+apply_style(#doc_info{revs = Revs}, main_only) ->
+ [#rev_info{rev = Rev} | _] = Revs,
[{[{<<"rev">>, couch_doc:rev_to_str(Rev)}]}];
-apply_style(#doc_info{revs=Revs}, all_docs) ->
- [{[{<<"rev">>, couch_doc:rev_to_str(R)}]} || #rev_info{rev=R} <- Revs].
-
+apply_style(#doc_info{revs = Revs}, all_docs) ->
+ [{[{<<"rev">>, couch_doc:rev_to_str(R)}]} || #rev_info{rev = R} <- Revs].
open_revs(Db, DocInfo, Style) ->
- DocInfos = case Style of
- main_only -> [DocInfo];
- all_docs -> [DocInfo#doc_info{revs=[R]}|| R <- DocInfo#doc_info.revs]
- end,
+ DocInfos =
+ case Style of
+ main_only -> [DocInfo];
+ all_docs -> [DocInfo#doc_info{revs = [R]} || R <- DocInfo#doc_info.revs]
+ end,
OpenOpts = [deleted, conflicts],
% Relying on list comprehensions to silence errors
OpenResults = [couch_db:open_doc(Db, DI, OpenOpts) || DI <- DocInfos],
[Doc || {ok, Doc} <- OpenResults].
-
filter_revs(Passes, Docs) ->
- lists:flatmap(fun
- ({true, #doc{revs={RevPos, [RevId | _]}}}) ->
- RevStr = couch_doc:rev_to_str({RevPos, RevId}),
- Change = {[{<<"rev">>, RevStr}]},
- [Change];
- (_) ->
- []
- end, lists:zip(Passes, Docs)).
-
+ lists:flatmap(
+ fun
+ ({true, #doc{revs = {RevPos, [RevId | _]}}}) ->
+ RevStr = couch_doc:rev_to_str({RevPos, RevId}),
+ Change = {[{<<"rev">>, RevStr}]},
+ [Change];
+ (_) ->
+ []
+ end,
+ lists:zip(Passes, Docs)
+ ).
get_changes_timeout(Args, Callback) ->
#changes_args{
@@ -371,29 +386,30 @@ get_changes_timeout(Args, Callback) ->
feed = ResponseType
} = Args,
DefaultTimeout = chttpd_util:get_chttpd_config_integer(
- "changes_timeout", 60000),
+ "changes_timeout", 60000
+ ),
case Heartbeat of
- undefined ->
- case Timeout of
undefined ->
- {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
- infinity ->
- {infinity, fun(UserAcc) -> {stop, UserAcc} end};
+ case Timeout of
+ undefined ->
+ {DefaultTimeout, fun(UserAcc) -> {stop, UserAcc} end};
+ infinity ->
+ {infinity, fun(UserAcc) -> {stop, UserAcc} end};
+ _ ->
+ {lists:min([DefaultTimeout, Timeout]), fun(UserAcc) -> {stop, UserAcc} end}
+ end;
+ true ->
+ {DefaultTimeout, fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end};
_ ->
- {lists:min([DefaultTimeout, Timeout]),
- fun(UserAcc) -> {stop, UserAcc} end}
- end;
- true ->
- {DefaultTimeout,
- fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end};
- _ ->
- {lists:min([DefaultTimeout, Heartbeat]),
- fun(UserAcc) -> {ok, Callback(timeout, ResponseType, UserAcc)} end}
+ {lists:min([DefaultTimeout, Heartbeat]), fun(UserAcc) ->
+ {ok, Callback(timeout, ResponseType, UserAcc)}
+ end}
end.
-start_sending_changes(_Callback, UserAcc, ResponseType)
- when ResponseType =:= "continuous"
- orelse ResponseType =:= "eventsource" ->
+start_sending_changes(_Callback, UserAcc, ResponseType) when
+ ResponseType =:= "continuous" orelse
+ ResponseType =:= "eventsource"
+->
UserAcc;
start_sending_changes(Callback, UserAcc, ResponseType) ->
Callback(start, ResponseType, UserAcc).
@@ -421,8 +437,8 @@ build_acc(Args, Callback, UserAcc, Db, StartSeq, Prepend, Timeout, TimeoutFun) -
conflicts = Conflicts,
timeout = Timeout,
timeout_fun = TimeoutFun,
- aggregation_results=[],
- aggregation_kvs=[]
+ aggregation_results = [],
+ aggregation_kvs = []
}.
send_changes(Acc, Dir, FirstRound) ->
@@ -440,30 +456,35 @@ send_changes(Acc, Dir, FirstRound) ->
couch_db:fold_changes(Db, StartSeq, DbEnumFun, Acc, Opts)
end.
-
can_optimize(true, {doc_ids, _Style, DocIds}) ->
- MaxDocIds = config:get_integer("couchdb",
- "changes_doc_ids_optimization_threshold", 100),
- if length(DocIds) =< MaxDocIds ->
- {true, fun send_changes_doc_ids/6};
- true ->
- false
+ MaxDocIds = config:get_integer(
+ "couchdb",
+ "changes_doc_ids_optimization_threshold",
+ 100
+ ),
+ if
+ length(DocIds) =< MaxDocIds ->
+ {true, fun send_changes_doc_ids/6};
+ true ->
+ false
end;
can_optimize(true, {design_docs, _Style}) ->
{true, fun send_changes_design_docs/6};
can_optimize(_, _) ->
false.
-
send_changes_doc_ids(Db, StartSeq, Dir, Fun, Acc0, {doc_ids, _Style, DocIds}) ->
Results = couch_db:get_full_doc_infos(Db, DocIds),
- FullInfos = lists:foldl(fun
- (#full_doc_info{}=FDI, Acc) -> [FDI | Acc];
- (not_found, Acc) -> Acc
- end, [], Results),
+ FullInfos = lists:foldl(
+ fun
+ (#full_doc_info{} = FDI, Acc) -> [FDI | Acc];
+ (not_found, Acc) -> Acc
+ end,
+ [],
+ Results
+ ),
send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
Opts = [
@@ -474,49 +495,62 @@ send_changes_design_docs(Db, StartSeq, Dir, Fun, Acc0, {design_docs, _Style}) ->
{ok, FullInfos} = couch_db:fold_docs(Db, FoldFun, [], Opts),
send_lookup_changes(FullInfos, StartSeq, Dir, Db, Fun, Acc0).
-
send_lookup_changes(FullDocInfos, StartSeq, Dir, Db, Fun, Acc0) ->
- FoldFun = case Dir of
- fwd -> fun lists:foldl/3;
- rev -> fun lists:foldr/3
- end,
- GreaterFun = case Dir of
- fwd -> fun(A, B) -> A > B end;
- rev -> fun(A, B) -> A =< B end
- end,
- DocInfos = lists:foldl(fun(FDI, Acc) ->
- DI = couch_doc:to_doc_info(FDI),
- case GreaterFun(DI#doc_info.high_seq, StartSeq) of
- true -> [DI | Acc];
- false -> Acc
- end
- end, [], FullDocInfos),
- SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos),
- FinalAcc = try
- FoldFun(fun(DocInfo, Acc) ->
- case Fun(DocInfo, Acc) of
- {ok, NewAcc} ->
- NewAcc;
- {stop, NewAcc} ->
- throw({stop, NewAcc})
+ FoldFun =
+ case Dir of
+ fwd -> fun lists:foldl/3;
+ rev -> fun lists:foldr/3
+ end,
+ GreaterFun =
+ case Dir of
+ fwd -> fun(A, B) -> A > B end;
+ rev -> fun(A, B) -> A =< B end
+ end,
+ DocInfos = lists:foldl(
+ fun(FDI, Acc) ->
+ DI = couch_doc:to_doc_info(FDI),
+ case GreaterFun(DI#doc_info.high_seq, StartSeq) of
+ true -> [DI | Acc];
+ false -> Acc
end
- end, Acc0, SortedDocInfos)
- catch
- {stop, Acc} -> Acc
- end,
+ end,
+ [],
+ FullDocInfos
+ ),
+ SortedDocInfos = lists:keysort(#doc_info.high_seq, DocInfos),
+ FinalAcc =
+ try
+ FoldFun(
+ fun(DocInfo, Acc) ->
+ case Fun(DocInfo, Acc) of
+ {ok, NewAcc} ->
+ NewAcc;
+ {stop, NewAcc} ->
+ throw({stop, NewAcc})
+ end
+ end,
+ Acc0,
+ SortedDocInfos
+ )
+ catch
+ {stop, Acc} -> Acc
+ end,
case Dir of
fwd ->
- FinalAcc0 = case element(1, FinalAcc) of
- changes_acc -> % we came here via couch_http or internal call
- FinalAcc#changes_acc{seq = couch_db:get_update_seq(Db)};
- fabric_changes_acc -> % we came here via chttpd / fabric / rexi
- FinalAcc#fabric_changes_acc{seq = couch_db:get_update_seq(Db)}
- end,
+ FinalAcc0 =
+ case element(1, FinalAcc) of
+ % we came here via couch_http or internal call
+ changes_acc ->
+ FinalAcc#changes_acc{seq = couch_db:get_update_seq(Db)};
+ % we came here via chttpd / fabric / rexi
+ fabric_changes_acc ->
+ FinalAcc#fabric_changes_acc{seq = couch_db:get_update_seq(Db)}
+ end,
{ok, FinalAcc0};
- rev -> {ok, FinalAcc}
+ rev ->
+ {ok, FinalAcc}
end.
-
keep_sending_changes(Args, Acc0, FirstRound) ->
#changes_args{
feed = ResponseType,
@@ -527,36 +561,44 @@ keep_sending_changes(Args, Acc0, FirstRound) ->
{ok, ChangesAcc} = send_changes(Acc0, fwd, FirstRound),
#changes_acc{
- db = Db, callback = Callback,
- timeout = Timeout, timeout_fun = TimeoutFun, seq = EndSeq,
- prepend = Prepend2, user_acc = UserAcc2, limit = NewLimit
+ db = Db,
+ callback = Callback,
+ timeout = Timeout,
+ timeout_fun = TimeoutFun,
+ seq = EndSeq,
+ prepend = Prepend2,
+ user_acc = UserAcc2,
+ limit = NewLimit
} = maybe_upgrade_changes_acc(ChangesAcc),
couch_db:close(Db),
- if Limit > NewLimit, ResponseType == "longpoll" ->
- end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType);
- true ->
- case wait_updated(Timeout, TimeoutFun, UserAcc2) of
- {updated, UserAcc4} ->
- DbOptions1 = [{user_ctx, couch_db:get_user_ctx(Db)} | DbOptions],
- case couch_db:open(couch_db:name(Db), DbOptions1) of
- {ok, Db2} ->
- ?MODULE:keep_sending_changes(
- Args#changes_args{limit=NewLimit},
- ChangesAcc#changes_acc{
- db = Db2,
- user_acc = UserAcc4,
- seq = EndSeq,
- prepend = Prepend2,
- timeout = Timeout,
- timeout_fun = TimeoutFun},
- false);
- _Else ->
- end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType)
- end;
- {stop, UserAcc4} ->
- end_sending_changes(Callback, UserAcc4, EndSeq, ResponseType)
- end
+ if
+ Limit > NewLimit, ResponseType == "longpoll" ->
+ end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType);
+ true ->
+ case wait_updated(Timeout, TimeoutFun, UserAcc2) of
+ {updated, UserAcc4} ->
+ DbOptions1 = [{user_ctx, couch_db:get_user_ctx(Db)} | DbOptions],
+ case couch_db:open(couch_db:name(Db), DbOptions1) of
+ {ok, Db2} ->
+ ?MODULE:keep_sending_changes(
+ Args#changes_args{limit = NewLimit},
+ ChangesAcc#changes_acc{
+ db = Db2,
+ user_acc = UserAcc4,
+ seq = EndSeq,
+ prepend = Prepend2,
+ timeout = Timeout,
+ timeout_fun = TimeoutFun
+ },
+ false
+ );
+ _Else ->
+ end_sending_changes(Callback, UserAcc2, EndSeq, ResponseType)
+ end;
+ {stop, UserAcc4} ->
+ end_sending_changes(Callback, UserAcc4, EndSeq, ResponseType)
+ end
end.
end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) ->
@@ -564,46 +606,59 @@ end_sending_changes(Callback, UserAcc, EndSeq, ResponseType) ->
changes_enumerator(Value, Acc) ->
#changes_acc{
- filter = Filter, callback = Callback, prepend = Prepend,
- user_acc = UserAcc, limit = Limit, resp_type = ResponseType, db = Db,
- timeout = Timeout, timeout_fun = TimeoutFun
+ filter = Filter,
+ callback = Callback,
+ prepend = Prepend,
+ user_acc = UserAcc,
+ limit = Limit,
+ resp_type = ResponseType,
+ db = Db,
+ timeout = Timeout,
+ timeout_fun = TimeoutFun
} = maybe_upgrade_changes_acc(Acc),
Results0 = filter(Db, Value, Filter),
Results = [Result || Result <- Results0, Result /= null],
- Seq = case Value of
- #full_doc_info{} ->
- Value#full_doc_info.update_seq;
- #doc_info{} ->
- Value#doc_info.high_seq
- end,
- Go = if (Limit =< 1) andalso Results =/= [] -> stop; true -> ok end,
+ Seq =
+ case Value of
+ #full_doc_info{} ->
+ Value#full_doc_info.update_seq;
+ #doc_info{} ->
+ Value#doc_info.high_seq
+ end,
+ Go =
+ if
+ (Limit =< 1) andalso Results =/= [] -> stop;
+ true -> ok
+ end,
case Results of
- [] ->
- {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
- case Done of
- stop ->
- {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
- ok ->
- {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
- end;
- _ ->
- if ResponseType =:= "continuous" orelse ResponseType =:= "eventsource" ->
- ChangesRow = changes_row(Results, Value, Acc),
- UserAcc2 = Callback({change, ChangesRow, <<>>}, ResponseType, UserAcc),
- reset_heartbeat(),
- {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2, limit = Limit - 1}};
- true ->
- ChangesRow = changes_row(Results, Value, Acc),
- UserAcc2 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc),
- reset_heartbeat(),
- {Go, Acc#changes_acc{
- seq = Seq, prepend = <<",\n">>,
- user_acc = UserAcc2, limit = Limit - 1}}
- end
+ [] ->
+ {Done, UserAcc2} = maybe_heartbeat(Timeout, TimeoutFun, UserAcc),
+ case Done of
+ stop ->
+ {stop, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}};
+ ok ->
+ {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2}}
+ end;
+ _ ->
+ if
+ ResponseType =:= "continuous" orelse ResponseType =:= "eventsource" ->
+ ChangesRow = changes_row(Results, Value, Acc),
+ UserAcc2 = Callback({change, ChangesRow, <<>>}, ResponseType, UserAcc),
+ reset_heartbeat(),
+ {Go, Acc#changes_acc{seq = Seq, user_acc = UserAcc2, limit = Limit - 1}};
+ true ->
+ ChangesRow = changes_row(Results, Value, Acc),
+ UserAcc2 = Callback({change, ChangesRow, Prepend}, ResponseType, UserAcc),
+ reset_heartbeat(),
+ {Go, Acc#changes_acc{
+ seq = Seq,
+ prepend = <<",\n">>,
+ user_acc = UserAcc2,
+ limit = Limit - 1
+ }}
+ end
end.
-
-
changes_row(Results, #full_doc_info{} = FDI, Acc) ->
changes_row(Results, couch_doc:to_doc_info(FDI), Acc);
changes_row(Results, DocInfo, Acc0) ->
@@ -611,26 +666,27 @@ changes_row(Results, DocInfo, Acc0) ->
#doc_info{
id = Id, high_seq = Seq, revs = [#rev_info{deleted = Del} | _]
} = DocInfo,
- {[{<<"seq">>, Seq}, {<<"id">>, Id}, {<<"changes">>, Results}] ++
- deleted_item(Del) ++ maybe_get_changes_doc(DocInfo, Acc)}.
+ {
+ [{<<"seq">>, Seq}, {<<"id">>, Id}, {<<"changes">>, Results}] ++
+ deleted_item(Del) ++ maybe_get_changes_doc(DocInfo, Acc)
+ }.
-maybe_get_changes_doc(Value, #changes_acc{include_docs=true}=Acc) ->
+maybe_get_changes_doc(Value, #changes_acc{include_docs = true} = Acc) ->
#changes_acc{
db = Db,
doc_options = DocOpts,
conflicts = Conflicts,
filter = Filter
} = Acc,
- Opts = case Conflicts of
- true -> [deleted, conflicts];
- false -> [deleted]
- end,
+ Opts =
+ case Conflicts of
+ true -> [deleted, conflicts];
+ false -> [deleted]
+ end,
load_doc(Db, Value, Opts, DocOpts, Filter);
-
maybe_get_changes_doc(_Value, _Acc) ->
[].
-
load_doc(Db, Value, Opts, DocOpts, Filter) ->
case couch_index_util:load_doc(Db, Value, Opts) of
null ->
@@ -639,68 +695,66 @@ load_doc(Db, Value, Opts, DocOpts, Filter) ->
[{doc, doc_to_json(Doc, DocOpts, Filter)}]
end.
-
-doc_to_json(Doc, DocOpts, {selector, _Style, {_Selector, Fields}})
- when Fields =/= nil ->
+doc_to_json(Doc, DocOpts, {selector, _Style, {_Selector, Fields}}) when
+ Fields =/= nil
+->
mango_fields:extract(couch_doc:to_json_obj(Doc, DocOpts), Fields);
doc_to_json(Doc, DocOpts, _Filter) ->
couch_doc:to_json_obj(Doc, DocOpts).
-
deleted_item(true) -> [{<<"deleted">>, true}];
deleted_item(_) -> [].
% waits for a updated msg, if there are multiple msgs, collects them.
wait_updated(Timeout, TimeoutFun, UserAcc) ->
receive
- updated ->
- get_rest_updated(UserAcc);
- deleted ->
- {stop, UserAcc}
+ updated ->
+ get_rest_updated(UserAcc);
+ deleted ->
+ {stop, UserAcc}
after Timeout ->
{Go, UserAcc2} = TimeoutFun(UserAcc),
case Go of
- ok ->
- ?MODULE:wait_updated(Timeout, TimeoutFun, UserAcc2);
- stop ->
- {stop, UserAcc2}
+ ok ->
+ ?MODULE:wait_updated(Timeout, TimeoutFun, UserAcc2);
+ stop ->
+ {stop, UserAcc2}
end
end.
get_rest_updated(UserAcc) ->
receive
- updated ->
- get_rest_updated(UserAcc)
+ updated ->
+ get_rest_updated(UserAcc)
after 0 ->
{updated, UserAcc}
end.
reset_heartbeat() ->
case get(last_changes_heartbeat) of
- undefined ->
- ok;
- _ ->
- put(last_changes_heartbeat, os:timestamp())
+ undefined ->
+ ok;
+ _ ->
+ put(last_changes_heartbeat, os:timestamp())
end.
maybe_heartbeat(Timeout, TimeoutFun, Acc) ->
Before = get(last_changes_heartbeat),
case Before of
- undefined ->
- {ok, Acc};
- _ ->
- Now = os:timestamp(),
- case timer:now_diff(Now, Before) div 1000 >= Timeout of
- true ->
- Acc2 = TimeoutFun(Acc),
- put(last_changes_heartbeat, Now),
- Acc2;
- false ->
- {ok, Acc}
- end
+ undefined ->
+ {ok, Acc};
+ _ ->
+ Now = os:timestamp(),
+ case timer:now_diff(Now, Before) div 1000 >= Timeout of
+ true ->
+ Acc2 = TimeoutFun(Acc),
+ put(last_changes_heartbeat, Now),
+ Acc2;
+ false ->
+ {ok, Acc}
+ end
end.
-
maybe_upgrade_changes_acc(#changes_acc{} = Acc) ->
Acc;
maybe_upgrade_changes_acc(Acc) when tuple_size(Acc) == 19 ->
diff --git a/src/couch/src/couch_compress.erl b/src/couch/src/couch_compress.erl
index cfcc2a481..59d692058 100644
--- a/src/couch/src/couch_compress.erl
+++ b/src/couch/src/couch_compress.erl
@@ -25,21 +25,19 @@
-define(TERM_PREFIX, 131).
-define(COMPRESSED_TERM_PREFIX, 131, 80).
-
get_compression_method() ->
case config:get("couchdb", "file_compression") of
- undefined ->
- ?DEFAULT_COMPRESSION;
- Method1 ->
- case string:tokens(Method1, "_") of
- [Method] ->
- list_to_existing_atom(Method);
- [Method, Level] ->
- {list_to_existing_atom(Method), list_to_integer(Level)}
- end
+ undefined ->
+ ?DEFAULT_COMPRESSION;
+ Method1 ->
+ case string:tokens(Method1, "_") of
+ [Method] ->
+ list_to_existing_atom(Method);
+ [Method, Level] ->
+ {list_to_existing_atom(Method), list_to_integer(Level)}
+ end
end.
-
compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, snappy) ->
Bin;
compress(<<?SNAPPY_PREFIX, _/binary>> = Bin, Method) ->
@@ -57,11 +55,11 @@ compress(Term, snappy) ->
try
{ok, CompressedBin} = snappy:compress(Bin),
<<?SNAPPY_PREFIX, CompressedBin/binary>>
- catch exit:snappy_nif_not_loaded ->
- Bin
+ catch
+ exit:snappy_nif_not_loaded ->
+ Bin
end.
-
decompress(<<?SNAPPY_PREFIX, Rest/binary>>) ->
{ok, TermBin} = snappy:decompress(Rest),
binary_to_term(TermBin);
@@ -70,7 +68,6 @@ decompress(<<?TERM_PREFIX, _/binary>> = Bin) ->
decompress(_) ->
error(invalid_compression).
-
is_compressed(<<?SNAPPY_PREFIX, _/binary>>, Method) ->
Method =:= snappy;
is_compressed(<<?COMPRESSED_TERM_PREFIX, _/binary>>, {deflate, _Level}) ->
@@ -84,7 +81,6 @@ is_compressed(Term, _Method) when not is_binary(Term) ->
is_compressed(_, _) ->
error(invalid_compression).
-
uncompressed_size(<<?SNAPPY_PREFIX, Rest/binary>>) ->
{ok, Size} = snappy:uncompressed_length(Rest),
Size;
diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
index fdcf23e1b..18ef9c998 100644
--- a/src/couch/src/couch_db.erl
+++ b/src/couch/src/couch_db.erl
@@ -129,21 +129,22 @@
new_revid/1
]).
-
-export([
start_link/4
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("couch_db_int.hrl").
-define(DBNAME_REGEX,
- "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*" % use the stock CouchDB regex
- "(\\.[0-9]{10,})?$" % but allow an optional shard timestamp at the end
+ % use the stock CouchDB regex
+ "^[a-z][a-z0-9\\_\\$()\\+\\-\\/]*"
+ % but allow an optional shard timestamp at the end
+ "(\\.[0-9]{10,})?$"
).
-define(DEFAULT_COMPRESSIBLE_TYPES,
- "text/*, application/javascript, application/json, application/xml").
+ "text/*, application/javascript, application/json, application/xml"
+).
start_link(Engine, DbName, Filepath, Options) ->
Arg = {Engine, DbName, Filepath, Options},
@@ -170,10 +171,10 @@ open(DbName, Options) ->
close(Db),
throw(Error)
end;
- Else -> Else
+ Else ->
+ Else
end.
-
reopen(#db{} = Db) ->
% We could have just swapped out the storage engine
% for this database during a compaction so we just
@@ -184,7 +185,6 @@ reopen(#db{} = Db) ->
close(Db)
end.
-
% You shouldn't call this. Its part of the ref counting between
% couch_server and couch_db instances.
incref(#db{} = Db) ->
@@ -200,7 +200,6 @@ clustered_db(DbName, Options) when is_list(Options) ->
security = SecProps,
options = [{props, Props}]
}};
-
clustered_db(DbName, #user_ctx{} = UserCtx) ->
clustered_db(DbName, [{user_ctx, UserCtx}]).
@@ -231,7 +230,7 @@ close(#db{} = Db) ->
close(?OLD_DB_REC) ->
ok.
-is_idle(#db{compactor_pid=nil} = Db) ->
+is_idle(#db{compactor_pid = nil} = Db) ->
monitored_by(Db) == [];
is_idle(_Db) ->
false.
@@ -245,20 +244,19 @@ monitored_by(Db) ->
[]
end.
-
-monitor(#db{main_pid=MainPid}) ->
+monitor(#db{main_pid = MainPid}) ->
erlang:monitor(process, MainPid).
start_compact(#db{} = Db) ->
gen_server:call(Db#db.main_pid, start_compact).
-cancel_compact(#db{main_pid=Pid}) ->
+cancel_compact(#db{main_pid = Pid}) ->
gen_server:call(Pid, cancel_compact).
wait_for_compaction(Db) ->
wait_for_compaction(Db, infinity).
-wait_for_compaction(#db{main_pid=Pid}=Db, Timeout) ->
+wait_for_compaction(#db{main_pid = Pid} = Db, Timeout) ->
Start = os:timestamp(),
case gen_server:call(Pid, compactor_pid) of
CPid when is_pid(CPid) ->
@@ -280,7 +278,7 @@ wait_for_compaction(#db{main_pid=Pid}=Db, Timeout) ->
end.
delete_doc(Db, Id, Revisions) ->
- DeletedDocs = [#doc{id=Id, revs=[Rev], deleted=true} || Rev <- Revisions],
+ DeletedDocs = [#doc{id = Id, revs = [Rev], deleted = true} || Rev <- Revisions],
{ok, [Result]} = update_docs(Db, DeletedDocs, []),
{ok, Result}.
@@ -290,50 +288,55 @@ open_doc(Db, IdOrDocInfo) ->
open_doc(Db, Id, Options) ->
increment_stat(Db, [couchdb, database_reads]),
case open_doc_int(Db, Id, Options) of
- {ok, #doc{deleted=true}=Doc} ->
- case lists:member(deleted, Options) of
- true ->
- apply_open_options({ok, Doc},Options);
- false ->
- {not_found, deleted}
- end;
- Else ->
- apply_open_options(Else,Options)
+ {ok, #doc{deleted = true} = Doc} ->
+ case lists:member(deleted, Options) of
+ true ->
+ apply_open_options({ok, Doc}, Options);
+ false ->
+ {not_found, deleted}
+ end;
+ Else ->
+ apply_open_options(Else, Options)
end.
-apply_open_options({ok, Doc},Options) ->
- apply_open_options2(Doc,Options);
-apply_open_options(Else,_Options) ->
+apply_open_options({ok, Doc}, Options) ->
+ apply_open_options2(Doc, Options);
+apply_open_options(Else, _Options) ->
Else.
-apply_open_options2(Doc,[]) ->
+apply_open_options2(Doc, []) ->
{ok, Doc};
-apply_open_options2(#doc{atts=Atts0,revs=Revs}=Doc,
- [{atts_since, PossibleAncestors}|Rest]) ->
+apply_open_options2(
+ #doc{atts = Atts0, revs = Revs} = Doc,
+ [{atts_since, PossibleAncestors} | Rest]
+) ->
RevPos = find_ancestor_rev_pos(Revs, PossibleAncestors),
- Atts = lists:map(fun(Att) ->
- [AttPos, Data] = couch_att:fetch([revpos, data], Att),
- if AttPos > RevPos -> couch_att:store(data, Data, Att);
- true -> couch_att:store(data, stub, Att)
- end
- end, Atts0),
- apply_open_options2(Doc#doc{atts=Atts}, Rest);
+ Atts = lists:map(
+ fun(Att) ->
+ [AttPos, Data] = couch_att:fetch([revpos, data], Att),
+ if
+ AttPos > RevPos -> couch_att:store(data, Data, Att);
+ true -> couch_att:store(data, stub, Att)
+ end
+ end,
+ Atts0
+ ),
+ apply_open_options2(Doc#doc{atts = Atts}, Rest);
apply_open_options2(Doc, [ejson_body | Rest]) ->
apply_open_options2(couch_doc:with_ejson_body(Doc), Rest);
-apply_open_options2(Doc,[_|Rest]) ->
- apply_open_options2(Doc,Rest).
-
+apply_open_options2(Doc, [_ | Rest]) ->
+ apply_open_options2(Doc, Rest).
find_ancestor_rev_pos({_, []}, _AttsSinceRevs) ->
0;
find_ancestor_rev_pos(_DocRevs, []) ->
0;
-find_ancestor_rev_pos({RevPos, [RevId|Rest]}, AttsSinceRevs) ->
+find_ancestor_rev_pos({RevPos, [RevId | Rest]}, AttsSinceRevs) ->
case lists:member({RevPos, RevId}, AttsSinceRevs) of
- true ->
- RevPos;
- false ->
- find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs)
+ true ->
+ RevPos;
+ false ->
+ find_ancestor_rev_pos({RevPos - 1, Rest}, AttsSinceRevs)
end.
open_doc_revs(Db, Id, Revs, Options) ->
@@ -350,39 +353,52 @@ get_missing_revs(Db, IdRevsList) ->
find_missing([], []) ->
[];
-find_missing([{Id, Revs}|RestIdRevs], [FullInfo | RestLookupInfo])
- when is_record(FullInfo, full_doc_info) ->
+find_missing([{Id, Revs} | RestIdRevs], [FullInfo | RestLookupInfo]) when
+ is_record(FullInfo, full_doc_info)
+->
case couch_key_tree:find_missing(FullInfo#full_doc_info.rev_tree, Revs) of
- [] ->
- find_missing(RestIdRevs, RestLookupInfo);
- MissingRevs ->
- #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo),
- LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo],
- % Find the revs that are possible parents of this rev
- PossibleAncestors =
- lists:foldl(fun({LeafPos, LeafRevId}, Acc) ->
- % this leaf is a "possible ancenstor" of the missing
- % revs if this LeafPos lessthan any of the missing revs
- case lists:any(fun({MissingPos, _}) ->
- LeafPos < MissingPos end, MissingRevs) of
- true ->
- [{LeafPos, LeafRevId} | Acc];
- false ->
- Acc
- end
- end, [], LeafRevs),
- [{Id, MissingRevs, PossibleAncestors} |
- find_missing(RestIdRevs, RestLookupInfo)]
+ [] ->
+ find_missing(RestIdRevs, RestLookupInfo);
+ MissingRevs ->
+ #doc_info{revs = RevsInfo} = couch_doc:to_doc_info(FullInfo),
+ LeafRevs = [Rev || #rev_info{rev = Rev} <- RevsInfo],
+ % Find the revs that are possible parents of this rev
+ PossibleAncestors =
+ lists:foldl(
+ fun({LeafPos, LeafRevId}, Acc) ->
+ % this leaf is a "possible ancenstor" of the missing
+ % revs if this LeafPos lessthan any of the missing revs
+ case
+ lists:any(
+ fun({MissingPos, _}) ->
+ LeafPos < MissingPos
+ end,
+ MissingRevs
+ )
+ of
+ true ->
+ [{LeafPos, LeafRevId} | Acc];
+ false ->
+ Acc
+ end
+ end,
+ [],
+ LeafRevs
+ ),
+ [
+ {Id, MissingRevs, PossibleAncestors}
+ | find_missing(RestIdRevs, RestLookupInfo)
+ ]
end;
-find_missing([{Id, Revs}|RestIdRevs], [not_found | RestLookupInfo]) ->
+find_missing([{Id, Revs} | RestIdRevs], [not_found | RestLookupInfo]) ->
[{Id, Revs, []} | find_missing(RestIdRevs, RestLookupInfo)].
get_doc_info(Db, Id) ->
case get_full_doc_info(Db, Id) of
- #full_doc_info{} = FDI ->
- {ok, couch_doc:to_doc_info(FDI)};
- Else ->
- Else
+ #full_doc_info{} = FDI ->
+ {ok, couch_doc:to_doc_info(FDI)};
+ Else ->
+ Else
end.
get_full_doc_info(Db, Id) ->
@@ -396,27 +412,39 @@ purge_docs(Db, IdRevs) ->
purge_docs(Db, IdRevs, []).
-spec purge_docs(#db{}, [{UUId, Id, [Rev]}], [PurgeOption]) ->
- {ok, [Reply]} when
+ {ok, [Reply]}
+when
UUId :: binary(),
Id :: binary() | list(),
Rev :: {non_neg_integer(), binary()},
PurgeOption :: interactive_edit | replicated_changes,
Reply :: {ok, []} | {ok, [Rev]}.
purge_docs(#db{main_pid = Pid} = Db, UUIDsIdsRevs, Options) ->
- UUIDsIdsRevs2 = [{UUID, couch_util:to_binary(Id), Revs}
- || {UUID, Id, Revs} <- UUIDsIdsRevs],
+ UUIDsIdsRevs2 = [
+ {UUID, couch_util:to_binary(Id), Revs}
+ || {UUID, Id, Revs} <- UUIDsIdsRevs
+ ],
% Check here if any UUIDs already exist when
% we're not replicating purge infos
IsRepl = lists:member(replicated_changes, Options),
- if IsRepl -> ok; true ->
- UUIDs = [UUID || {UUID, _, _} <- UUIDsIdsRevs2],
- lists:foreach(fun(Resp) ->
- if Resp == not_found -> ok; true ->
- Fmt = "Duplicate purge info UIUD: ~s",
- Reason = io_lib:format(Fmt, [element(2, Resp)]),
- throw({badreq, Reason})
- end
- end, get_purge_infos(Db, UUIDs))
+ if
+ IsRepl ->
+ ok;
+ true ->
+ UUIDs = [UUID || {UUID, _, _} <- UUIDsIdsRevs2],
+ lists:foreach(
+ fun(Resp) ->
+ if
+ Resp == not_found ->
+ ok;
+ true ->
+ Fmt = "Duplicate purge info UIUD: ~s",
+ Reason = io_lib:format(Fmt, [element(2, Resp)]),
+ throw({badreq, Reason})
+ end
+ end,
+ get_purge_infos(Db, UUIDs)
+ )
end,
increment_stat(Db, [couchdb, database_purges]),
gen_server:call(Pid, {purge_docs, UUIDsIdsRevs2, Options}).
@@ -430,7 +458,6 @@ purge_docs(#db{main_pid = Pid} = Db, UUIDsIdsRevs, Options) ->
get_purge_infos(Db, UUIDs) ->
couch_db_engine:load_purge_infos(Db, UUIDs).
-
get_minimum_purge_seq(#db{} = Db) ->
PurgeSeq = couch_db_engine:get_purge_seq(Db),
OldestPurgeSeq = couch_db_engine:get_oldest_purge_seq(Db),
@@ -468,24 +495,31 @@ get_minimum_purge_seq(#db{} = Db) ->
{start_key, list_to_binary(?LOCAL_DOC_PREFIX ++ "purge-")}
],
{ok, MinIdxSeq} = couch_db:fold_local_docs(Db, FoldFun, InitMinSeq, Opts),
- FinalSeq = case MinIdxSeq < PurgeSeq - PurgeInfosLimit of
- true -> MinIdxSeq;
- false -> erlang:max(0, PurgeSeq - PurgeInfosLimit)
- end,
+ FinalSeq =
+ case MinIdxSeq < PurgeSeq - PurgeInfosLimit of
+ true -> MinIdxSeq;
+ false -> erlang:max(0, PurgeSeq - PurgeInfosLimit)
+ end,
% Log a warning if we've got a purge sequence exceeding the
% configured threshold.
- if FinalSeq >= (PurgeSeq - PurgeInfosLimit) -> ok; true ->
- Fmt = "The purge sequence for '~s' exceeds configured threshold",
- couch_log:warning(Fmt, [couch_db:name(Db)])
+ if
+ FinalSeq >= (PurgeSeq - PurgeInfosLimit) ->
+ ok;
+ true ->
+ Fmt = "The purge sequence for '~s' exceeds configured threshold",
+ couch_log:warning(Fmt, [couch_db:name(Db)])
end,
FinalSeq.
-
purge_client_exists(DbName, DocId, Props) ->
% Warn about clients that have not updated their purge
% checkpoints in the last "index_lag_warn_seconds"
LagWindow = config:get_integer(
- "purge", "index_lag_warn_seconds", 86400), % Default 24 hours
+ % Default 24 hours
+ "purge",
+ "index_lag_warn_seconds",
+ 86400
+ ),
{Mega, Secs, _} = os:timestamp(),
NowSecs = Mega * 1000000 + Secs,
@@ -493,43 +527,50 @@ purge_client_exists(DbName, DocId, Props) ->
try
Exists = couch_db_plugin:is_valid_purge_client(DbName, Props),
- if not Exists -> ok; true ->
- Updated = couch_util:get_value(<<"updated_on">>, Props),
- if is_integer(Updated) and Updated > LagThreshold -> ok; true ->
- Diff = NowSecs - Updated,
- Fmt1 = "Purge checkpoint '~s' not updated in ~p seconds
- in database ~p",
- couch_log:error(Fmt1, [DocId, Diff, DbName])
- end
+ if
+ not Exists ->
+ ok;
+ true ->
+ Updated = couch_util:get_value(<<"updated_on">>, Props),
+ if
+ is_integer(Updated) and Updated > LagThreshold ->
+ ok;
+ true ->
+ Diff = NowSecs - Updated,
+ Fmt1 =
+ "Purge checkpoint '~s' not updated in ~p seconds\n"
+ " in database ~p",
+ couch_log:error(Fmt1, [DocId, Diff, DbName])
+ end
end,
Exists
- catch _:_ ->
- % If we fail to check for a client we have to assume that
- % it exists.
- Fmt2 = "Failed to check purge checkpoint using
- document '~p' in database ~p",
- couch_log:error(Fmt2, [DocId, DbName]),
- true
+ catch
+ _:_ ->
+ % If we fail to check for a client we have to assume that
+ % it exists.
+ Fmt2 =
+ "Failed to check purge checkpoint using\n"
+ " document '~p' in database ~p",
+ couch_log:error(Fmt2, [DocId, DbName]),
+ true
end.
-
-set_purge_infos_limit(#db{main_pid=Pid}=Db, Limit) when Limit > 0 ->
+set_purge_infos_limit(#db{main_pid = Pid} = Db, Limit) when Limit > 0 ->
check_is_admin(Db),
gen_server:call(Pid, {set_purge_infos_limit, Limit}, infinity);
set_purge_infos_limit(_Db, _Limit) ->
throw(invalid_purge_infos_limit).
-
get_after_doc_read_fun(#db{after_doc_read = Fun}) ->
Fun.
get_before_doc_update_fun(#db{before_doc_update = Fun}) ->
Fun.
-get_committed_update_seq(#db{committed_update_seq=Seq}) ->
+get_committed_update_seq(#db{committed_update_seq = Seq}) ->
Seq.
-get_update_seq(#db{} = Db)->
+get_update_seq(#db{} = Db) ->
couch_db_engine:get_update_seq(Db).
get_user_ctx(#db{user_ctx = UserCtx}) ->
@@ -537,13 +578,13 @@ get_user_ctx(#db{user_ctx = UserCtx}) ->
get_user_ctx(?OLD_DB_REC = Db) ->
?OLD_DB_USER_CTX(Db).
-get_purge_seq(#db{}=Db) ->
+get_purge_seq(#db{} = Db) ->
couch_db_engine:get_purge_seq(Db).
-get_oldest_purge_seq(#db{}=Db) ->
+get_oldest_purge_seq(#db{} = Db) ->
couch_db_engine:get_oldest_purge_seq(Db).
-get_purge_infos_limit(#db{}=Db) ->
+get_purge_infos_limit(#db{} = Db) ->
couch_db_engine:get_purge_infos_limit(Db).
get_pid(#db{main_pid = Pid}) ->
@@ -555,10 +596,10 @@ get_del_doc_count(Db) ->
get_doc_count(Db) ->
{ok, couch_db_engine:get_doc_count(Db)}.
-get_uuid(#db{}=Db) ->
+get_uuid(#db{} = Db) ->
couch_db_engine:get_uuid(Db).
-get_epochs(#db{}=Db) ->
+get_epochs(#db{} = Db) ->
Epochs = couch_db_engine:get_epochs(Db),
validate_epochs(Epochs),
Epochs.
@@ -569,13 +610,13 @@ get_filepath(#db{filepath = FilePath}) ->
get_instance_start_time(#db{instance_start_time = IST}) ->
IST.
-get_compacted_seq(#db{}=Db) ->
+get_compacted_seq(#db{} = Db) ->
couch_db_engine:get_compacted_seq(Db).
get_compactor_pid(#db{compactor_pid = Pid}) ->
Pid.
-get_compactor_pid_sync(#db{main_pid=Pid}) ->
+get_compactor_pid_sync(#db{main_pid = Pid}) ->
case gen_server:call(Pid, compactor_pid, infinity) of
CPid when is_pid(CPid) ->
CPid;
@@ -594,18 +635,21 @@ get_db_info(Db) ->
{ok, DelDocCount} = get_del_doc_count(Db),
SizeInfo = couch_db_engine:get_size_info(Db),
DiskVersion = couch_db_engine:get_disk_version(Db),
- Uuid = case get_uuid(Db) of
- undefined -> null;
- Uuid0 -> Uuid0
- end,
- CompactedSeq = case get_compacted_seq(Db) of
- undefined -> null;
- Else1 -> Else1
- end,
- Props = case couch_db_engine:get_props(Db) of
- undefined -> null;
- Else2 -> {Else2}
- end,
+ Uuid =
+ case get_uuid(Db) of
+ undefined -> null;
+ Uuid0 -> Uuid0
+ end,
+ CompactedSeq =
+ case get_compacted_seq(Db) of
+ undefined -> null;
+ Else1 -> Else1
+ end,
+ Props =
+ case couch_db_engine:get_props(Db) of
+ undefined -> null;
+ Else2 -> {Else2}
+ end,
InfoList = [
{db_name, Name},
{engine, couch_db_engine:get_engine(Db)},
@@ -630,15 +674,15 @@ get_partition_info(#db{} = Db, Partition) when is_binary(Partition) ->
get_partition_info(_Db, _Partition) ->
throw({bad_request, <<"`partition` is not valid">>}).
-
get_design_doc(#db{name = <<"shards/", _/binary>> = ShardDbName}, DDocId0) ->
DDocId = couch_util:normalize_ddoc_id(DDocId0),
DbName = mem3:dbname(ShardDbName),
{_, Ref} = spawn_monitor(fun() ->
exit(fabric:open_doc(DbName, DDocId, []))
end),
- receive {'DOWN', Ref, _, _, Response} ->
- Response
+ receive
+ {'DOWN', Ref, _, _, Response} ->
+ Response
end;
get_design_doc(#db{} = Db, DDocId0) ->
DDocId = couch_util:normalize_ddoc_id(DDocId0),
@@ -647,8 +691,9 @@ get_design_doc(#db{} = Db, DDocId0) ->
get_design_docs(#db{name = <<"shards/", _/binary>> = ShardDbName}) ->
DbName = mem3:dbname(ShardDbName),
{_, Ref} = spawn_monitor(fun() -> exit(fabric:design_docs(DbName)) end),
- receive {'DOWN', Ref, _, _, Response} ->
- Response
+ receive
+ {'DOWN', Ref, _, _, Response} ->
+ Response
end;
get_design_docs(#db{} = Db) ->
FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
@@ -659,47 +704,51 @@ get_design_doc_count(#db{} = Db) ->
FoldFun = fun(_, Acc) -> {ok, Acc + 1} end,
fold_design_docs(Db, FoldFun, 0, []).
-check_is_admin(#db{user_ctx=UserCtx}=Db) ->
+check_is_admin(#db{user_ctx = UserCtx} = Db) ->
case is_admin(Db) of
- true -> ok;
+ true ->
+ ok;
false ->
Reason = <<"You are not a db or server admin.">>,
throw_security_error(UserCtx, Reason)
end.
-check_is_member(#db{user_ctx=UserCtx}=Db) ->
+check_is_member(#db{user_ctx = UserCtx} = Db) ->
case is_member(Db) of
true -> ok;
false -> throw_security_error(UserCtx)
end.
-is_admin(#db{user_ctx=UserCtx}=Db) ->
+is_admin(#db{user_ctx = UserCtx} = Db) ->
case couch_db_plugin:check_is_admin(Db) of
- true -> true;
+ true ->
+ true;
false ->
{Admins} = get_admins(Db),
is_authorized(UserCtx, Admins)
end.
-is_member(#db{user_ctx=UserCtx}=Db) ->
+is_member(#db{user_ctx = UserCtx} = Db) ->
case is_admin(Db) of
- true -> true;
+ true ->
+ true;
false ->
case is_public_db(Db) of
- true -> true;
+ true ->
+ true;
false ->
{Members} = get_members(Db),
is_authorized(UserCtx, Members)
end
end.
-is_public_db(#db{}=Db) ->
+is_public_db(#db{} = Db) ->
{Members} = get_members(Db),
Names = couch_util:get_value(<<"names">>, Members, []),
Roles = couch_util:get_value(<<"roles">>, Members, []),
Names =:= [] andalso Roles =:= [].
-is_authorized(#user_ctx{name=UserName,roles=UserRoles}, Security) ->
+is_authorized(#user_ctx{name = UserName, roles = UserRoles}, Security) ->
Names = couch_util:get_value(<<"names">>, Security, []),
Roles = couch_util:get_value(<<"roles">>, Security, []),
case check_security(roles, UserRoles, [<<"_admin">> | Roles]) of
@@ -720,36 +769,38 @@ check_security(names, null, _) ->
check_security(names, UserName, Names) ->
lists:member(UserName, Names).
-throw_security_error(#user_ctx{name=null}=UserCtx) ->
+throw_security_error(#user_ctx{name = null} = UserCtx) ->
Reason = <<"You are not authorized to access this db.">>,
throw_security_error(UserCtx, Reason);
-throw_security_error(#user_ctx{name=_}=UserCtx) ->
+throw_security_error(#user_ctx{name = _} = UserCtx) ->
Reason = <<"You are not allowed to access this db.">>,
throw_security_error(UserCtx, Reason).
-throw_security_error(#user_ctx{}=UserCtx, Reason) ->
+throw_security_error(#user_ctx{} = UserCtx, Reason) ->
Error = security_error_type(UserCtx),
throw({Error, Reason}).
-security_error_type(#user_ctx{name=null}) ->
+security_error_type(#user_ctx{name = null}) ->
unauthorized;
-security_error_type(#user_ctx{name=_}) ->
+security_error_type(#user_ctx{name = _}) ->
forbidden.
-
-get_admins(#db{security=SecProps}) ->
+get_admins(#db{security = SecProps}) ->
couch_util:get_value(<<"admins">>, SecProps, {[]}).
-get_members(#db{security=SecProps}) ->
+get_members(#db{security = SecProps}) ->
% we fallback to readers here for backwards compatibility
- couch_util:get_value(<<"members">>, SecProps,
- couch_util:get_value(<<"readers">>, SecProps, {[]})).
+ couch_util:get_value(
+ <<"members">>,
+ SecProps,
+ couch_util:get_value(<<"readers">>, SecProps, {[]})
+ ).
-get_security(#db{security=SecProps}) ->
+get_security(#db{security = SecProps}) ->
{SecProps};
get_security(?OLD_DB_REC = Db) ->
{?OLD_DB_SECURITY(Db)}.
-set_security(#db{main_pid=Pid}=Db, {NewSecProps}) when is_list(NewSecProps) ->
+set_security(#db{main_pid = Pid} = Db, {NewSecProps}) when is_list(NewSecProps) ->
check_is_admin(Db),
ok = validate_security_object(NewSecProps),
gen_server:call(Pid, {set_security, NewSecProps}, infinity);
@@ -762,8 +813,11 @@ set_user_ctx(#db{} = Db, UserCtx) ->
validate_security_object(SecProps) ->
Admins = couch_util:get_value(<<"admins">>, SecProps, {[]}),
% we fallback to readers here for backwards compatibility
- Members = couch_util:get_value(<<"members">>, SecProps,
- couch_util:get_value(<<"readers">>, SecProps, {[]})),
+ Members = couch_util:get_value(
+ <<"members">>,
+ SecProps,
+ couch_util:get_value(<<"readers">>, SecProps, {[]})
+ ),
ok = validate_names_and_roles(Admins),
ok = validate_names_and_roles(Members),
ok.
@@ -771,18 +825,18 @@ validate_security_object(SecProps) ->
% validate user input
validate_names_and_roles({Props}) when is_list(Props) ->
case couch_util:get_value(<<"names">>, Props, []) of
- Ns when is_list(Ns) ->
- [throw("names must be a JSON list of strings") ||N <- Ns, not is_binary(N)],
+ Ns when is_list(Ns) ->
+ [throw("names must be a JSON list of strings") || N <- Ns, not is_binary(N)],
Ns;
- _ ->
- throw("names must be a JSON list of strings")
+ _ ->
+ throw("names must be a JSON list of strings")
end,
case couch_util:get_value(<<"roles">>, Props, []) of
- Rs when is_list(Rs) ->
- [throw("roles must be a JSON list of strings") ||R <- Rs, not is_binary(R)],
- Rs;
- _ ->
- throw("roles must be a JSON list of strings")
+ Rs when is_list(Rs) ->
+ [throw("roles must be a JSON list of strings") || R <- Rs, not is_binary(R)],
+ Rs;
+ _ ->
+ throw("roles must be a JSON list of strings")
end,
ok;
validate_names_and_roles(_) ->
@@ -791,18 +845,17 @@ validate_names_and_roles(_) ->
get_revs_limit(#db{} = Db) ->
couch_db_engine:get_revs_limit(Db).
-set_revs_limit(#db{main_pid=Pid}=Db, Limit) when Limit > 0 ->
+set_revs_limit(#db{main_pid = Pid} = Db, Limit) when Limit > 0 ->
check_is_admin(Db),
gen_server:call(Pid, {set_revs_limit, Limit}, infinity);
set_revs_limit(_Db, _Limit) ->
throw(invalid_revs_limit).
-name(#db{name=Name}) ->
+name(#db{name = Name}) ->
Name;
name(?OLD_DB_REC = Db) ->
?OLD_DB_NAME(Db).
-
validate_docid(#db{} = Db, DocId) when is_binary(DocId) ->
couch_doc:validate_docid(DocId, name(Db)),
case is_partitioned(Db) of
@@ -812,7 +865,6 @@ validate_docid(#db{} = Db, DocId) when is_binary(DocId) ->
ok
end.
-
doc_from_json_obj_validate(#db{} = Db, DocJson) ->
Doc = couch_doc:from_json_obj_validate(DocJson, name(Db)),
{Props} = DocJson,
@@ -825,22 +877,21 @@ doc_from_json_obj_validate(#db{} = Db, DocJson) ->
end,
Doc.
-
update_doc(Db, Doc, Options) ->
update_doc(Db, Doc, Options, interactive_edit).
update_doc(Db, Doc, Options, UpdateType) ->
case update_docs(Db, [Doc], Options, UpdateType) of
- {ok, [{ok, NewRev}]} ->
- {ok, NewRev};
- {ok, [{{_Id, _Rev}, Error}]} ->
- throw(Error);
- {ok, [Error]} ->
- throw(Error);
- {ok, []} ->
- % replication success
- {Pos, [RevId | _]} = Doc#doc.revs,
- {ok, {Pos, RevId}}
+ {ok, [{ok, NewRev}]} ->
+ {ok, NewRev};
+ {ok, [{{_Id, _Rev}, Error}]} ->
+ throw(Error);
+ {ok, [Error]} ->
+ throw(Error);
+ {ok, []} ->
+ % replication success
+ {Pos, [RevId | _]} = Doc#doc.revs,
+ {ok, {Pos, RevId}}
end.
update_docs(Db, Docs) ->
@@ -860,30 +911,30 @@ group_alike_docs(Docs) ->
group_alike_docs([], Buckets) ->
lists:reverse(lists:map(fun lists:reverse/1, Buckets));
-group_alike_docs([Doc|Rest], []) ->
+group_alike_docs([Doc | Rest], []) ->
group_alike_docs(Rest, [[Doc]]);
-group_alike_docs([Doc|Rest], [Bucket|RestBuckets]) ->
- [#doc{id=BucketId}|_] = Bucket,
+group_alike_docs([Doc | Rest], [Bucket | RestBuckets]) ->
+ [#doc{id = BucketId} | _] = Bucket,
case Doc#doc.id == BucketId of
- true ->
- % add to existing bucket
- group_alike_docs(Rest, [[Doc|Bucket]|RestBuckets]);
- false ->
- % add to new bucket
- group_alike_docs(Rest, [[Doc]|[Bucket|RestBuckets]])
+ true ->
+ % add to existing bucket
+ group_alike_docs(Rest, [[Doc | Bucket] | RestBuckets]);
+ false ->
+ % add to new bucket
+ group_alike_docs(Rest, [[Doc] | [Bucket | RestBuckets]])
end.
-validate_doc_update(#db{}=Db, #doc{id= <<"_design/",_/binary>>}=Doc, _GetDiskDocFun) ->
+validate_doc_update(#db{} = Db, #doc{id = <<"_design/", _/binary>>} = Doc, _GetDiskDocFun) ->
case catch check_is_admin(Db) of
ok -> validate_ddoc(Db, Doc);
Error -> Error
end;
validate_doc_update(#db{validate_doc_funs = undefined} = Db, Doc, Fun) ->
ValidationFuns = load_validation_funs(Db),
- validate_doc_update(Db#db{validate_doc_funs=ValidationFuns}, Doc, Fun);
-validate_doc_update(#db{validate_doc_funs=[]}, _Doc, _GetDiskDocFun) ->
+ validate_doc_update(Db#db{validate_doc_funs = ValidationFuns}, Doc, Fun);
+validate_doc_update(#db{validate_doc_funs = []}, _Doc, _GetDiskDocFun) ->
ok;
-validate_doc_update(_Db, #doc{id= <<"_local/",_/binary>>}, _GetDiskDocFun) ->
+validate_doc_update(_Db, #doc{id = <<"_local/", _/binary>>}, _GetDiskDocFun) ->
ok;
validate_doc_update(Db, Doc, GetDiskDocFun) ->
case get(io_priority) of
@@ -911,22 +962,26 @@ validate_doc_update_int(Db, Doc, GetDiskDocFun) ->
JsonCtx = couch_util:json_user_ctx(Db),
SecObj = get_security(Db),
try
- [case Fun(Doc, DiskDoc, JsonCtx, SecObj) of
- ok -> ok;
- Error -> throw(Error)
- end || Fun <- Db#db.validate_doc_funs],
+ [
+ case Fun(Doc, DiskDoc, JsonCtx, SecObj) of
+ ok -> ok;
+ Error -> throw(Error)
+ end
+ || Fun <- Db#db.validate_doc_funs
+ ],
ok
catch
throw:Error ->
Error
end
end,
- couch_stats:update_histogram([couchdb, query_server, vdu_process_time],
- Fun).
-
+ couch_stats:update_histogram(
+ [couchdb, query_server, vdu_process_time],
+ Fun
+ ).
% to be safe, spawn a middleman here
-load_validation_funs(#db{main_pid=Pid, name = <<"shards/", _/binary>>}=Db) ->
+load_validation_funs(#db{main_pid = Pid, name = <<"shards/", _/binary>>} = Db) ->
{_, Ref} = spawn_monitor(fun() ->
exit(ddoc_cache:open(mem3:dbname(Db#db.name), validation_funs))
end),
@@ -941,242 +996,326 @@ load_validation_funs(#db{main_pid=Pid, name = <<"shards/", _/binary>>}=Db) ->
couch_log:error("could not load validation funs ~p", [Reason]),
throw(internal_server_error)
end;
-load_validation_funs(#db{main_pid=Pid}=Db) ->
+load_validation_funs(#db{main_pid = Pid} = Db) ->
{ok, DDocInfos} = get_design_docs(Db),
- OpenDocs = fun
- (#full_doc_info{}=D) ->
- {ok, Doc} = open_doc_int(Db, D, [ejson_body]),
- Doc
+ OpenDocs = fun(#full_doc_info{} = D) ->
+ {ok, Doc} = open_doc_int(Db, D, [ejson_body]),
+ Doc
end,
DDocs = lists:map(OpenDocs, DDocInfos),
- Funs = lists:flatmap(fun(DDoc) ->
- case couch_doc:get_validate_doc_fun(DDoc) of
- nil -> [];
- Fun -> [Fun]
- end
- end, DDocs),
+ Funs = lists:flatmap(
+ fun(DDoc) ->
+ case couch_doc:get_validate_doc_fun(DDoc) of
+ nil -> [];
+ Fun -> [Fun]
+ end
+ end,
+ DDocs
+ ),
gen_server:cast(Pid, {load_validation_funs, Funs}),
Funs.
reload_validation_funs(#db{} = Db) ->
gen_server:cast(Db#db.main_pid, {load_validation_funs, undefined}).
-prep_and_validate_update(Db, #doc{id=Id,revs={RevStart, Revs}}=Doc,
- OldFullDocInfo, LeafRevsDict, AllowConflict) ->
+prep_and_validate_update(
+ Db,
+ #doc{id = Id, revs = {RevStart, Revs}} = Doc,
+ OldFullDocInfo,
+ LeafRevsDict,
+ AllowConflict
+) ->
case Revs of
- [PrevRev|_] ->
- case dict:find({RevStart, PrevRev}, LeafRevsDict) of
- {ok, {#leaf{deleted=Deleted, ptr=DiskSp}, DiskRevs}} ->
- case couch_doc:has_stubs(Doc) of
- true ->
- DiskDoc = make_doc(Db, Id, Deleted, DiskSp, DiskRevs),
- Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
- {validate_doc_update(Db, Doc2, fun() -> DiskDoc end), Doc2};
- false ->
- LoadDiskDoc = fun() -> make_doc(Db,Id,Deleted,DiskSp,DiskRevs) end,
- {validate_doc_update(Db, Doc, LoadDiskDoc), Doc}
+ [PrevRev | _] ->
+ case dict:find({RevStart, PrevRev}, LeafRevsDict) of
+ {ok, {#leaf{deleted = Deleted, ptr = DiskSp}, DiskRevs}} ->
+ case couch_doc:has_stubs(Doc) of
+ true ->
+ DiskDoc = make_doc(Db, Id, Deleted, DiskSp, DiskRevs),
+ Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
+ {validate_doc_update(Db, Doc2, fun() -> DiskDoc end), Doc2};
+ false ->
+ LoadDiskDoc = fun() -> make_doc(Db, Id, Deleted, DiskSp, DiskRevs) end,
+ {validate_doc_update(Db, Doc, LoadDiskDoc), Doc}
+ end;
+ error when AllowConflict ->
+ % will generate error if
+ couch_doc:merge_stubs(Doc, #doc{}),
+ % there are stubs
+ {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
+ error ->
+ {conflict, Doc}
end;
- error when AllowConflict ->
- couch_doc:merge_stubs(Doc, #doc{}), % will generate error if
- % there are stubs
- {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
- error ->
- {conflict, Doc}
- end;
- [] ->
- % new doc, and we have existing revs.
- % reuse existing deleted doc
- if OldFullDocInfo#full_doc_info.deleted orelse AllowConflict ->
- {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
- true ->
- {conflict, Doc}
- end
+ [] ->
+ % new doc, and we have existing revs.
+ % reuse existing deleted doc
+ if
+ OldFullDocInfo#full_doc_info.deleted orelse AllowConflict ->
+ {validate_doc_update(Db, Doc, fun() -> nil end), Doc};
+ true ->
+ {conflict, Doc}
+ end
end.
-
-
-prep_and_validate_updates(_Db, [], [], _AllowConflict, AccPrepped,
- AccFatalErrors) ->
+prep_and_validate_updates(
+ _Db,
+ [],
+ [],
+ _AllowConflict,
+ AccPrepped,
+ AccFatalErrors
+) ->
AccPrepped2 = lists:reverse(lists:map(fun lists:reverse/1, AccPrepped)),
{AccPrepped2, AccFatalErrors};
-prep_and_validate_updates(Db, [DocBucket|RestBuckets], [not_found|RestLookups],
- AllowConflict, AccPrepped, AccErrors) ->
+prep_and_validate_updates(
+ Db,
+ [DocBucket | RestBuckets],
+ [not_found | RestLookups],
+ AllowConflict,
+ AccPrepped,
+ AccErrors
+) ->
% no existing revs are known,
{PreppedBucket, AccErrors3} = lists:foldl(
- fun(#doc{revs=Revs}=Doc, {AccBucket, AccErrors2}) ->
+ fun(#doc{revs = Revs} = Doc, {AccBucket, AccErrors2}) ->
case couch_doc:has_stubs(Doc) of
- true ->
- couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
- false -> ok
+ true ->
+ % will throw exception
+ couch_doc:merge_stubs(Doc, #doc{});
+ false ->
+ ok
end,
case Revs of
- {0, []} ->
- case validate_doc_update(Db, Doc, fun() -> nil end) of
- ok ->
- {[Doc | AccBucket], AccErrors2};
- Error ->
- {AccBucket, [{doc_tag(Doc), Error} | AccErrors2]}
- end;
- _ ->
- % old revs specified but none exist, a conflict
- {AccBucket, [{doc_tag(Doc), conflict} | AccErrors2]}
+ {0, []} ->
+ case validate_doc_update(Db, Doc, fun() -> nil end) of
+ ok ->
+ {[Doc | AccBucket], AccErrors2};
+ Error ->
+ {AccBucket, [{doc_tag(Doc), Error} | AccErrors2]}
+ end;
+ _ ->
+ % old revs specified but none exist, a conflict
+ {AccBucket, [{doc_tag(Doc), conflict} | AccErrors2]}
end
end,
- {[], AccErrors}, DocBucket),
+ {[], AccErrors},
+ DocBucket
+ ),
- prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
- [PreppedBucket | AccPrepped], AccErrors3);
-prep_and_validate_updates(Db, [DocBucket|RestBuckets],
- [#full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo|RestLookups],
- AllowConflict, AccPrepped, AccErrors) ->
+ prep_and_validate_updates(
+ Db,
+ RestBuckets,
+ RestLookups,
+ AllowConflict,
+ [PreppedBucket | AccPrepped],
+ AccErrors3
+ );
+prep_and_validate_updates(
+ Db,
+ [DocBucket | RestBuckets],
+ [#full_doc_info{rev_tree = OldRevTree} = OldFullDocInfo | RestLookups],
+ AllowConflict,
+ AccPrepped,
+ AccErrors
+) ->
Leafs = couch_key_tree:get_all_leafs(OldRevTree),
LeafRevsDict = dict:from_list([
- {{Start, RevId}, {Leaf, Revs}} ||
- {Leaf, {Start, [RevId | _]} = Revs} <- Leafs
+ {{Start, RevId}, {Leaf, Revs}}
+ || {Leaf, {Start, [RevId | _]} = Revs} <- Leafs
]),
{PreppedBucket, AccErrors3} = lists:foldl(
fun(Doc, {Docs2Acc, AccErrors2}) ->
- case prep_and_validate_update(Db, Doc, OldFullDocInfo,
- LeafRevsDict, AllowConflict) of
- {ok, Doc2} ->
- {[Doc2 | Docs2Acc], AccErrors2};
- {Error, _} ->
- % Record the error
- {Docs2Acc, [{doc_tag(Doc), Error} |AccErrors2]}
+ case
+ prep_and_validate_update(
+ Db,
+ Doc,
+ OldFullDocInfo,
+ LeafRevsDict,
+ AllowConflict
+ )
+ of
+ {ok, Doc2} ->
+ {[Doc2 | Docs2Acc], AccErrors2};
+ {Error, _} ->
+ % Record the error
+ {Docs2Acc, [{doc_tag(Doc), Error} | AccErrors2]}
end
end,
- {[], AccErrors}, DocBucket),
- prep_and_validate_updates(Db, RestBuckets, RestLookups, AllowConflict,
- [PreppedBucket | AccPrepped], AccErrors3).
-
+ {[], AccErrors},
+ DocBucket
+ ),
+ prep_and_validate_updates(
+ Db,
+ RestBuckets,
+ RestLookups,
+ AllowConflict,
+ [PreppedBucket | AccPrepped],
+ AccErrors3
+ ).
update_docs(Db, Docs, Options) ->
update_docs(Db, Docs, Options, interactive_edit).
-
prep_and_validate_replicated_updates(_Db, [], [], AccPrepped, AccErrors) ->
- Errors2 = [{{Id, {Pos, Rev}}, Error} ||
- {#doc{id=Id,revs={Pos,[Rev|_]}}, Error} <- AccErrors],
+ Errors2 = [
+ {{Id, {Pos, Rev}}, Error}
+ || {#doc{id = Id, revs = {Pos, [Rev | _]}}, Error} <- AccErrors
+ ],
AccPrepped2 = lists:reverse(lists:map(fun lists:reverse/1, AccPrepped)),
{AccPrepped2, lists:reverse(Errors2)};
-prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldInfo], AccPrepped, AccErrors) ->
+prep_and_validate_replicated_updates(
+ Db, [Bucket | RestBuckets], [OldInfo | RestOldInfo], AccPrepped, AccErrors
+) ->
case OldInfo of
- not_found ->
- {ValidatedBucket, AccErrors3} = lists:foldl(
- fun(Doc, {AccPrepped2, AccErrors2}) ->
- case couch_doc:has_stubs(Doc) of
- true ->
- couch_doc:merge_stubs(Doc, #doc{}); % will throw exception
- false -> ok
- end,
- case validate_doc_update(Db, Doc, fun() -> nil end) of
- ok ->
- {[Doc | AccPrepped2], AccErrors2};
- Error ->
- {AccPrepped2, [{Doc, Error} | AccErrors2]}
- end
- end,
- {[], AccErrors}, Bucket),
- prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3);
- #full_doc_info{rev_tree=OldTree} ->
- OldLeafs = couch_key_tree:get_all_leafs_full(OldTree),
- OldLeafsLU = [{Start, RevId} || {Start, [{RevId, _}|_]} <- OldLeafs],
- NewPaths = lists:map(fun couch_doc:to_path/1, Bucket),
- NewRevTree = couch_key_tree:multi_merge(OldTree, NewPaths),
- Leafs = couch_key_tree:get_all_leafs_full(NewRevTree),
- LeafRevsFullDict = dict:from_list( [{{Start, RevId}, FullPath} || {Start, [{RevId, _}|_]}=FullPath <- Leafs]),
- {ValidatedBucket, AccErrors3} =
- lists:foldl(
- fun(#doc{id=Id,revs={Pos, [RevId|_]}}=Doc, {AccValidated, AccErrors2}) ->
- IsOldLeaf = lists:member({Pos, RevId}, OldLeafsLU),
- case dict:find({Pos, RevId}, LeafRevsFullDict) of
- {ok, {Start, Path}} when not IsOldLeaf ->
- % our unflushed doc is a leaf node. Go back on the path
- % to find the previous rev that's on disk.
-
- LoadPrevRevFun = fun() ->
- make_first_doc_on_disk(Db,Id,Start-1, tl(Path))
- end,
-
+ not_found ->
+ {ValidatedBucket, AccErrors3} = lists:foldl(
+ fun(Doc, {AccPrepped2, AccErrors2}) ->
case couch_doc:has_stubs(Doc) of
- true ->
- DiskDoc = case LoadPrevRevFun() of
- #doc{} = DiskDoc0 ->
- DiskDoc0;
+ true ->
+ % will throw exception
+ couch_doc:merge_stubs(Doc, #doc{});
+ false ->
+ ok
+ end,
+ case validate_doc_update(Db, Doc, fun() -> nil end) of
+ ok ->
+ {[Doc | AccPrepped2], AccErrors2};
+ Error ->
+ {AccPrepped2, [{Doc, Error} | AccErrors2]}
+ end
+ end,
+ {[], AccErrors},
+ Bucket
+ ),
+ prep_and_validate_replicated_updates(
+ Db, RestBuckets, RestOldInfo, [ValidatedBucket | AccPrepped], AccErrors3
+ );
+ #full_doc_info{rev_tree = OldTree} ->
+ OldLeafs = couch_key_tree:get_all_leafs_full(OldTree),
+ OldLeafsLU = [{Start, RevId} || {Start, [{RevId, _} | _]} <- OldLeafs],
+ NewPaths = lists:map(fun couch_doc:to_path/1, Bucket),
+ NewRevTree = couch_key_tree:multi_merge(OldTree, NewPaths),
+ Leafs = couch_key_tree:get_all_leafs_full(NewRevTree),
+ LeafRevsFullDict = dict:from_list([
+ {{Start, RevId}, FullPath}
+ || {Start, [{RevId, _} | _]} = FullPath <- Leafs
+ ]),
+ {ValidatedBucket, AccErrors3} =
+ lists:foldl(
+ fun(#doc{id = Id, revs = {Pos, [RevId | _]}} = Doc, {AccValidated, AccErrors2}) ->
+ IsOldLeaf = lists:member({Pos, RevId}, OldLeafsLU),
+ case dict:find({Pos, RevId}, LeafRevsFullDict) of
+ {ok, {Start, Path}} when not IsOldLeaf ->
+ % our unflushed doc is a leaf node. Go back on the path
+ % to find the previous rev that's on disk.
+
+ LoadPrevRevFun = fun() ->
+ make_first_doc_on_disk(Db, Id, Start - 1, tl(Path))
+ end,
+
+ case couch_doc:has_stubs(Doc) of
+ true ->
+ DiskDoc =
+ case LoadPrevRevFun() of
+ #doc{} = DiskDoc0 ->
+ DiskDoc0;
+ _ ->
+ % Force a missing_stub exception
+ couch_doc:merge_stubs(Doc, #doc{})
+ end,
+ Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
+ GetDiskDocFun = fun() -> DiskDoc end;
+ false ->
+ Doc2 = Doc,
+ GetDiskDocFun = LoadPrevRevFun
+ end,
+
+ case validate_doc_update(Db, Doc2, GetDiskDocFun) of
+ ok ->
+ {[Doc2 | AccValidated], AccErrors2};
+ Error ->
+ {AccValidated, [{Doc, Error} | AccErrors2]}
+ end;
_ ->
- % Force a missing_stub exception
- couch_doc:merge_stubs(Doc, #doc{})
- end,
- Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
- GetDiskDocFun = fun() -> DiskDoc end;
- false ->
- Doc2 = Doc,
- GetDiskDocFun = LoadPrevRevFun
+ % this doc isn't a leaf or already exists in the tree.
+ % ignore but consider it a success.
+ {AccValidated, AccErrors2}
+ end
end,
-
- case validate_doc_update(Db, Doc2, GetDiskDocFun) of
- ok ->
- {[Doc2 | AccValidated], AccErrors2};
- Error ->
- {AccValidated, [{Doc, Error} | AccErrors2]}
- end;
- _ ->
- % this doc isn't a leaf or already exists in the tree.
- % ignore but consider it a success.
- {AccValidated, AccErrors2}
- end
- end,
- {[], AccErrors}, Bucket),
- prep_and_validate_replicated_updates(Db, RestBuckets, RestOldInfo,
- [ValidatedBucket | AccPrepped], AccErrors3)
+ {[], AccErrors},
+ Bucket
+ ),
+ prep_and_validate_replicated_updates(
+ Db,
+ RestBuckets,
+ RestOldInfo,
+ [ValidatedBucket | AccPrepped],
+ AccErrors3
+ )
end.
-
-
-new_revid(#doc{body=Body, revs={OldStart,OldRevs}, atts=Atts, deleted=Deleted}) ->
- DigestedAtts = lists:foldl(fun(Att, Acc) ->
- [N, T, M] = couch_att:fetch([name, type, md5], Att),
- case M == <<>> of
- true -> Acc;
- false -> [{N, T, M} | Acc]
- end
- end, [], Atts),
+new_revid(#doc{body = Body, revs = {OldStart, OldRevs}, atts = Atts, deleted = Deleted}) ->
+ DigestedAtts = lists:foldl(
+ fun(Att, Acc) ->
+ [N, T, M] = couch_att:fetch([name, type, md5], Att),
+ case M == <<>> of
+ true -> Acc;
+ false -> [{N, T, M} | Acc]
+ end
+ end,
+ [],
+ Atts
+ ),
case DigestedAtts of
Atts2 when length(Atts) =/= length(Atts2) ->
% We must have old style non-md5 attachments
?l2b(integer_to_list(couch_util:rand32()));
Atts2 ->
- OldRev = case OldRevs of [] -> 0; [OldRev0|_] -> OldRev0 end,
- couch_hash:md5_hash(term_to_binary([Deleted, OldStart, OldRev, Body, Atts2], [{minor_version, 1}]))
+ OldRev =
+ case OldRevs of
+ [] -> 0;
+ [OldRev0 | _] -> OldRev0
+ end,
+ couch_hash:md5_hash(
+ term_to_binary([Deleted, OldStart, OldRev, Body, Atts2], [{minor_version, 1}])
+ )
end.
new_revs([], OutBuckets, IdRevsAcc) ->
{lists:reverse(OutBuckets), IdRevsAcc};
-new_revs([Bucket|RestBuckets], OutBuckets, IdRevsAcc) ->
+new_revs([Bucket | RestBuckets], OutBuckets, IdRevsAcc) ->
{NewBucket, IdRevsAcc3} = lists:mapfoldl(
- fun(#doc{revs={Start, RevIds}}=Doc, IdRevsAcc2)->
- NewRevId = new_revid(Doc),
- {Doc#doc{revs={Start+1, [NewRevId | RevIds]}},
- [{doc_tag(Doc), {ok, {Start+1, NewRevId}}} | IdRevsAcc2]}
- end, IdRevsAcc, Bucket),
- new_revs(RestBuckets, [NewBucket|OutBuckets], IdRevsAcc3).
-
-check_dup_atts(#doc{atts=Atts}=Doc) ->
- lists:foldl(fun(Att, Names) ->
- Name = couch_att:fetch(name, Att),
- case ordsets:is_element(Name, Names) of
- true -> throw({bad_request, <<"Duplicate attachments">>});
- false -> ordsets:add_element(Name, Names)
- end
- end, ordsets:new(), Atts),
+ fun(#doc{revs = {Start, RevIds}} = Doc, IdRevsAcc2) ->
+ NewRevId = new_revid(Doc),
+ {Doc#doc{revs = {Start + 1, [NewRevId | RevIds]}}, [
+ {doc_tag(Doc), {ok, {Start + 1, NewRevId}}} | IdRevsAcc2
+ ]}
+ end,
+ IdRevsAcc,
+ Bucket
+ ),
+ new_revs(RestBuckets, [NewBucket | OutBuckets], IdRevsAcc3).
+
+check_dup_atts(#doc{atts = Atts} = Doc) ->
+ lists:foldl(
+ fun(Att, Names) ->
+ Name = couch_att:fetch(name, Att),
+ case ordsets:is_element(Name, Names) of
+ true -> throw({bad_request, <<"Duplicate attachments">>});
+ false -> ordsets:add_element(Name, Names)
+ end
+ end,
+ ordsets:new(),
+ Atts
+ ),
Doc.
tag_docs([]) ->
[];
-tag_docs([#doc{meta=Meta}=Doc | Rest]) ->
- [Doc#doc{meta=[{ref, make_ref()} | Meta]} | tag_docs(Rest)].
+tag_docs([#doc{meta = Meta} = Doc | Rest]) ->
+ [Doc#doc{meta = [{ref, make_ref()} | Meta]} | tag_docs(Rest)].
-doc_tag(#doc{meta=Meta}) ->
+doc_tag(#doc{meta = Meta}) ->
case lists:keyfind(ref, 1, Meta) of
{ref, Ref} when is_reference(Ref) -> Ref;
false -> throw(doc_not_tagged);
@@ -1187,58 +1326,105 @@ update_docs(Db, Docs0, Options, replicated_changes) ->
Docs = tag_docs(Docs0),
PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) ->
- prep_and_validate_replicated_updates(Db0, DocBuckets0,
- ExistingDocInfos, [], [])
+ prep_and_validate_replicated_updates(
+ Db0,
+ DocBuckets0,
+ ExistingDocInfos,
+ [],
+ []
+ )
end,
- {ok, DocBuckets, NonRepDocs, DocErrors}
- = before_docs_update(Db, Docs, PrepValidateFun, replicated_changes),
+ {ok, DocBuckets, NonRepDocs, DocErrors} =
+ before_docs_update(Db, Docs, PrepValidateFun, replicated_changes),
- DocBuckets2 = [[doc_flush_atts(Db, check_dup_atts(Doc))
- || Doc <- Bucket] || Bucket <- DocBuckets],
- {ok, _} = write_and_commit(Db, DocBuckets2,
- NonRepDocs, [merge_conflicts | Options]),
+ DocBuckets2 = [
+ [
+ doc_flush_atts(Db, check_dup_atts(Doc))
+ || Doc <- Bucket
+ ]
+ || Bucket <- DocBuckets
+ ],
+ {ok, _} = write_and_commit(
+ Db,
+ DocBuckets2,
+ NonRepDocs,
+ [merge_conflicts | Options]
+ ),
{ok, DocErrors};
-
update_docs(Db, Docs0, Options, interactive_edit) ->
Docs = tag_docs(Docs0),
AllOrNothing = lists:member(all_or_nothing, Options),
PrepValidateFun = fun(Db0, DocBuckets0, ExistingDocInfos) ->
- prep_and_validate_updates(Db0, DocBuckets0, ExistingDocInfos,
- AllOrNothing, [], [])
+ prep_and_validate_updates(
+ Db0,
+ DocBuckets0,
+ ExistingDocInfos,
+ AllOrNothing,
+ [],
+ []
+ )
end,
- {ok, DocBuckets, NonRepDocs, DocErrors}
- = before_docs_update(Db, Docs, PrepValidateFun, interactive_edit),
-
- if (AllOrNothing) and (DocErrors /= []) ->
- RefErrorDict = dict:from_list([{doc_tag(Doc), Doc} || Doc <- Docs]),
- {aborted, lists:map(fun({Ref, Error}) ->
- #doc{id=Id,revs={Start,RevIds}} = dict:fetch(Ref, RefErrorDict),
- case {Start, RevIds} of
- {Pos, [RevId | _]} -> {{Id, {Pos, RevId}}, Error};
- {0, []} -> {{Id, {0, <<>>}}, Error}
- end
- end, DocErrors)};
- true ->
- Options2 = if AllOrNothing -> [merge_conflicts];
- true -> [] end ++ Options,
- DocBuckets2 = [[
- doc_flush_atts(Db, set_new_att_revpos(
- check_dup_atts(Doc)))
- || Doc <- B] || B <- DocBuckets],
- {DocBuckets3, IdRevs} = new_revs(DocBuckets2, [], []),
-
- {ok, CommitResults} = write_and_commit(Db, DocBuckets3,
- NonRepDocs, Options2),
-
- ResultsDict = lists:foldl(fun({Key, Resp}, ResultsAcc) ->
- dict:store(Key, Resp, ResultsAcc)
- end, dict:from_list(IdRevs), CommitResults ++ DocErrors),
- {ok, lists:map(fun(Doc) ->
- dict:fetch(doc_tag(Doc), ResultsDict)
- end, Docs)}
+ {ok, DocBuckets, NonRepDocs, DocErrors} =
+ before_docs_update(Db, Docs, PrepValidateFun, interactive_edit),
+
+ if
+ (AllOrNothing) and (DocErrors /= []) ->
+ RefErrorDict = dict:from_list([{doc_tag(Doc), Doc} || Doc <- Docs]),
+ {aborted,
+ lists:map(
+ fun({Ref, Error}) ->
+ #doc{id = Id, revs = {Start, RevIds}} = dict:fetch(Ref, RefErrorDict),
+ case {Start, RevIds} of
+ {Pos, [RevId | _]} -> {{Id, {Pos, RevId}}, Error};
+ {0, []} -> {{Id, {0, <<>>}}, Error}
+ end
+ end,
+ DocErrors
+ )};
+ true ->
+ Options2 =
+ if
+ AllOrNothing -> [merge_conflicts];
+ true -> []
+ end ++ Options,
+ DocBuckets2 = [
+ [
+ doc_flush_atts(
+ Db,
+ set_new_att_revpos(
+ check_dup_atts(Doc)
+ )
+ )
+ || Doc <- B
+ ]
+ || B <- DocBuckets
+ ],
+ {DocBuckets3, IdRevs} = new_revs(DocBuckets2, [], []),
+
+ {ok, CommitResults} = write_and_commit(
+ Db,
+ DocBuckets3,
+ NonRepDocs,
+ Options2
+ ),
+
+ ResultsDict = lists:foldl(
+ fun({Key, Resp}, ResultsAcc) ->
+ dict:store(Key, Resp, ResultsAcc)
+ end,
+ dict:from_list(IdRevs),
+ CommitResults ++ DocErrors
+ ),
+ {ok,
+ lists:map(
+ fun(Doc) ->
+ dict:fetch(doc_tag(Doc), ResultsDict)
+ end,
+ Docs
+ )}
end.
% Returns the first available document on disk. Input list is a full rev path
@@ -1246,10 +1432,10 @@ update_docs(Db, Docs0, Options, interactive_edit) ->
make_first_doc_on_disk(_Db, _Id, _Pos, []) ->
nil;
make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #doc{}} | RestPath]) ->
- make_first_doc_on_disk(Db, Id, Pos-1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING}|RestPath]) ->
make_first_doc_on_disk(Db, Id, Pos - 1, RestPath);
-make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #leaf{deleted=IsDel, ptr=Sp}} |_]=DocPath) ->
+make_first_doc_on_disk(Db, Id, Pos, [{_Rev, ?REV_MISSING} | RestPath]) ->
+ make_first_doc_on_disk(Db, Id, Pos - 1, RestPath);
+make_first_doc_on_disk(Db, Id, Pos, [{_Rev, #leaf{deleted = IsDel, ptr = Sp}} | _] = DocPath) ->
Revs = [Rev || {Rev, _} <- DocPath],
make_doc(Db, Id, IsDel, Sp, {Pos, Revs}).
@@ -1267,90 +1453,105 @@ collect_results_with_metrics(Pid, MRef, []) ->
collect_results(Pid, MRef, ResultsAcc) ->
receive
- {result, Pid, Result} ->
- collect_results(Pid, MRef, [Result | ResultsAcc]);
- {done, Pid} ->
- {ok, ResultsAcc};
- {retry, Pid} ->
- retry;
- {'DOWN', MRef, _, _, Reason} ->
- exit(Reason)
+ {result, Pid, Result} ->
+ collect_results(Pid, MRef, [Result | ResultsAcc]);
+ {done, Pid} ->
+ {ok, ResultsAcc};
+ {retry, Pid} ->
+ retry;
+ {'DOWN', MRef, _, _, Reason} ->
+ exit(Reason)
end.
-write_and_commit(#db{main_pid=Pid, user_ctx=Ctx}=Db, DocBuckets1,
- NonRepDocs, Options) ->
+write_and_commit(
+ #db{main_pid = Pid, user_ctx = Ctx} = Db,
+ DocBuckets1,
+ NonRepDocs,
+ Options
+) ->
DocBuckets = prepare_doc_summaries(Db, DocBuckets1),
MergeConflicts = lists:member(merge_conflicts, Options),
MRef = erlang:monitor(process, Pid),
try
Pid ! {update_docs, self(), DocBuckets, NonRepDocs, MergeConflicts},
case collect_results_with_metrics(Pid, MRef, []) of
- {ok, Results} -> {ok, Results};
- retry ->
- % This can happen if the db file we wrote to was swapped out by
- % compaction. Retry by reopening the db and writing to the current file
- {ok, Db2} = open(Db#db.name, [{user_ctx, Ctx}]),
- DocBuckets2 = [
- [doc_flush_atts(Db2, Doc) || Doc <- Bucket] ||
- Bucket <- DocBuckets1
- ],
- % We only retry once
- DocBuckets3 = prepare_doc_summaries(Db2, DocBuckets2),
- close(Db2),
- Pid ! {update_docs, self(), DocBuckets3, NonRepDocs, MergeConflicts},
- case collect_results_with_metrics(Pid, MRef, []) of
- {ok, Results} -> {ok, Results};
- retry -> throw({update_error, compaction_retry})
- end
+ {ok, Results} ->
+ {ok, Results};
+ retry ->
+ % This can happen if the db file we wrote to was swapped out by
+ % compaction. Retry by reopening the db and writing to the current file
+ {ok, Db2} = open(Db#db.name, [{user_ctx, Ctx}]),
+ DocBuckets2 = [
+ [doc_flush_atts(Db2, Doc) || Doc <- Bucket]
+ || Bucket <- DocBuckets1
+ ],
+ % We only retry once
+ DocBuckets3 = prepare_doc_summaries(Db2, DocBuckets2),
+ close(Db2),
+ Pid ! {update_docs, self(), DocBuckets3, NonRepDocs, MergeConflicts},
+ case collect_results_with_metrics(Pid, MRef, []) of
+ {ok, Results} -> {ok, Results};
+ retry -> throw({update_error, compaction_retry})
+ end
end
after
erlang:demonitor(MRef, [flush])
end.
-
prepare_doc_summaries(Db, BucketList) ->
- [lists:map(
- fun(#doc{body = Body, atts = Atts} = Doc0) ->
- DiskAtts = [couch_att:to_disk_term(Att) || Att <- Atts],
- {ok, SizeInfo} = couch_att:size_info(Atts),
- AttsStream = case Atts of
- [Att | _] ->
- {stream, StreamEngine} = couch_att:fetch(data, Att),
- StreamEngine;
- [] ->
- nil
+ [
+ lists:map(
+ fun(#doc{body = Body, atts = Atts} = Doc0) ->
+ DiskAtts = [couch_att:to_disk_term(Att) || Att <- Atts],
+ {ok, SizeInfo} = couch_att:size_info(Atts),
+ AttsStream =
+ case Atts of
+ [Att | _] ->
+ {stream, StreamEngine} = couch_att:fetch(data, Att),
+ StreamEngine;
+ [] ->
+ nil
+ end,
+ Doc1 = Doc0#doc{
+ atts = DiskAtts,
+ meta =
+ [
+ {size_info, SizeInfo},
+ {atts_stream, AttsStream},
+ {ejson_size, couch_ejson_size:encoded_size(Body)}
+ ] ++ Doc0#doc.meta
+ },
+ couch_db_engine:serialize_doc(Db, Doc1)
end,
- Doc1 = Doc0#doc{
- atts = DiskAtts,
- meta = [
- {size_info, SizeInfo},
- {atts_stream, AttsStream},
- {ejson_size, couch_ejson_size:encoded_size(Body)}
- ] ++ Doc0#doc.meta
- },
- couch_db_engine:serialize_doc(Db, Doc1)
- end,
- Bucket) || Bucket <- BucketList].
-
+ Bucket
+ )
+ || Bucket <- BucketList
+ ].
before_docs_update(#db{validate_doc_funs = VDFuns} = Db, Docs, PVFun, UpdateType) ->
increment_stat(Db, [couchdb, database_writes]),
% Separate _local docs from normal docs
IsLocal = fun
- (#doc{id= <<?LOCAL_DOC_PREFIX, _/binary>>}) -> true;
+ (#doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>}) -> true;
(_) -> false
end,
{NonRepDocs, Docs2} = lists:partition(IsLocal, Docs),
BucketList = group_alike_docs(Docs2),
- DocBuckets = lists:map(fun(Bucket) ->
- lists:map(fun(Doc) ->
- DocWithBody = couch_doc:with_ejson_body(Doc),
- couch_db_plugin:before_doc_update(Db, DocWithBody, UpdateType)
- end, Bucket)
- end, BucketList),
+ DocBuckets = lists:map(
+ fun(Bucket) ->
+ lists:map(
+ fun(Doc) ->
+ DocWithBody = couch_doc:with_ejson_body(Doc),
+ couch_db_plugin:before_doc_update(Db, DocWithBody, UpdateType)
+ end,
+ Bucket
+ )
+ end,
+ BucketList
+ ),
ValidatePred = fun
(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>}) -> true;
@@ -1363,15 +1564,14 @@ before_docs_update(#db{validate_doc_funs = VDFuns} = Db, Docs, PVFun, UpdateType
Ids = [Id || [#doc{id = Id} | _] <- DocBuckets],
ExistingDocs = get_full_doc_infos(Db, Ids),
{DocBuckets2, DocErrors} = PVFun(Db, DocBuckets, ExistingDocs),
- % remove empty buckets
+ % remove empty buckets
DocBuckets3 = [Bucket || Bucket <- DocBuckets2, Bucket /= []],
{ok, DocBuckets3, NonRepDocs, DocErrors};
false ->
{ok, DocBuckets, NonRepDocs, []}
end.
-
-set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts0}=Doc) ->
+set_new_att_revpos(#doc{revs = {RevPos, _Revs}, atts = Atts0} = Doc) ->
Atts = lists:map(
fun(Att) ->
case couch_att:fetch(data, Att) of
@@ -1379,29 +1579,36 @@ set_new_att_revpos(#doc{revs={RevPos,_Revs},atts=Atts0}=Doc) ->
{stream, _} -> Att;
{Fd, _} when is_pid(Fd) -> Att;
% write required so update RevPos
- _ -> couch_att:store(revpos, RevPos+1, Att)
+ _ -> couch_att:store(revpos, RevPos + 1, Att)
end
- end, Atts0),
+ end,
+ Atts0
+ ),
Doc#doc{atts = Atts}.
-
doc_flush_atts(Db, Doc) ->
- Doc#doc{atts=[couch_att:flush(Db, Att) || Att <- Doc#doc.atts]}.
-
+ Doc#doc{atts = [couch_att:flush(Db, Att) || Att <- Doc#doc.atts]}.
compressible_att_type(MimeType) when is_binary(MimeType) ->
compressible_att_type(?b2l(MimeType));
compressible_att_type(MimeType) ->
TypeExpList = re:split(
- config:get("attachments", "compressible_types",
- ?DEFAULT_COMPRESSIBLE_TYPES),
+ config:get(
+ "attachments",
+ "compressible_types",
+ ?DEFAULT_COMPRESSIBLE_TYPES
+ ),
"\\s*,\\s*",
[{return, list}]
),
lists:any(
fun(TypeExp) ->
- Regexp = ["^\\s*", re:replace(TypeExp, "\\*", ".*"),
- "(?:\\s*;.*?)?\\s*", $$],
+ Regexp = [
+ "^\\s*",
+ re:replace(TypeExp, "\\*", ".*"),
+ "(?:\\s*;.*?)?\\s*",
+ $$
+ ],
re:run(MimeType, Regexp, [caseless]) =/= nomatch
end,
[T || T <- TypeExpList, T /= []]
@@ -1419,74 +1626,80 @@ compressible_att_type(MimeType) ->
% pretend that no Content-MD5 exists.
with_stream(Db, Att, Fun) ->
[InMd5, Type, Enc] = couch_att:fetch([md5, type, encoding], Att),
- BufferSize = config:get_integer("couchdb",
- "attachment_stream_buffer_size", 4096),
- Options = case (Enc =:= identity) andalso compressible_att_type(Type) of
- true ->
- CompLevel = config:get_integer(
- "attachments", "compression_level", 8),
- [
- {buffer_size, BufferSize},
- {encoding, gzip},
- {compression_level, CompLevel}
- ];
- _ ->
- [{buffer_size, BufferSize}]
- end,
+ BufferSize = config:get_integer(
+ "couchdb",
+ "attachment_stream_buffer_size",
+ 4096
+ ),
+ Options =
+ case (Enc =:= identity) andalso compressible_att_type(Type) of
+ true ->
+ CompLevel = config:get_integer(
+ "attachments", "compression_level", 8
+ ),
+ [
+ {buffer_size, BufferSize},
+ {encoding, gzip},
+ {compression_level, CompLevel}
+ ];
+ _ ->
+ [{buffer_size, BufferSize}]
+ end,
{ok, OutputStream} = open_write_stream(Db, Options),
- ReqMd5 = case Fun(OutputStream) of
- {md5, FooterMd5} ->
- case InMd5 of
- md5_in_footer -> FooterMd5;
- _ -> InMd5
- end;
- _ ->
- InMd5
- end,
+ ReqMd5 =
+ case Fun(OutputStream) of
+ {md5, FooterMd5} ->
+ case InMd5 of
+ md5_in_footer -> FooterMd5;
+ _ -> InMd5
+ end;
+ _ ->
+ InMd5
+ end,
{StreamEngine, Len, IdentityLen, Md5, IdentityMd5} =
couch_stream:close(OutputStream),
couch_util:check_md5(IdentityMd5, ReqMd5),
- {AttLen, DiskLen, NewEnc} = case Enc of
- identity ->
- case {Md5, IdentityMd5} of
- {Same, Same} ->
- {Len, IdentityLen, identity};
- _ ->
- {Len, IdentityLen, gzip}
- end;
- gzip ->
- case couch_att:fetch([att_len, disk_len], Att) of
- [AL, DL] when AL =:= undefined orelse DL =:= undefined ->
- % Compressed attachment uploaded through the standalone API.
- {Len, Len, gzip};
- [AL, DL] ->
- % This case is used for efficient push-replication, where a
- % compressed attachment is located in the body of multipart
- % content-type request.
- {AL, DL, gzip}
- end
- end,
- couch_att:store([
- {data, {stream, StreamEngine}},
- {att_len, AttLen},
- {disk_len, DiskLen},
- {md5, Md5},
- {encoding, NewEnc}
- ], Att).
-
+ {AttLen, DiskLen, NewEnc} =
+ case Enc of
+ identity ->
+ case {Md5, IdentityMd5} of
+ {Same, Same} ->
+ {Len, IdentityLen, identity};
+ _ ->
+ {Len, IdentityLen, gzip}
+ end;
+ gzip ->
+ case couch_att:fetch([att_len, disk_len], Att) of
+ [AL, DL] when AL =:= undefined orelse DL =:= undefined ->
+ % Compressed attachment uploaded through the standalone API.
+ {Len, Len, gzip};
+ [AL, DL] ->
+ % This case is used for efficient push-replication, where a
+ % compressed attachment is located in the body of multipart
+ % content-type request.
+ {AL, DL, gzip}
+ end
+ end,
+ couch_att:store(
+ [
+ {data, {stream, StreamEngine}},
+ {att_len, AttLen},
+ {disk_len, DiskLen},
+ {md5, Md5},
+ {encoding, NewEnc}
+ ],
+ Att
+ ).
open_write_stream(Db, Options) ->
couch_db_engine:open_write_stream(Db, Options).
-
open_read_stream(Db, AttState) ->
couch_db_engine:open_read_stream(Db, AttState).
-
is_active_stream(Db, StreamEngine) ->
couch_db_engine:is_active_stream(Db, StreamEngine).
-
calculate_start_seq(_Db, _Node, Seq) when is_integer(Seq) ->
Seq;
calculate_start_seq(Db, Node, {Seq, Uuid}) ->
@@ -1498,30 +1711,44 @@ calculate_start_seq(Db, _Node, {Seq, {split, Uuid}, EpochNode}) ->
% Find last replicated sequence from split source to target
mem3_rep:find_split_target_seq(Db, EpochNode, Uuid, Seq);
false ->
- couch_log:warning("~p calculate_start_seq not owner "
+ couch_log:warning(
+ "~p calculate_start_seq not owner "
"db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p, epochs: ~p",
- [?MODULE, Db#db.name, Seq, Uuid, EpochNode, get_epochs(Db)]),
+ [?MODULE, Db#db.name, Seq, Uuid, EpochNode, get_epochs(Db)]
+ ),
0
end;
calculate_start_seq(Db, Node, {Seq, Uuid, EpochNode}) ->
case is_prefix(Uuid, get_uuid(Db)) of
true ->
case is_owner(EpochNode, Seq, get_epochs(Db)) of
- true -> Seq;
+ true ->
+ Seq;
false ->
%% Shard might have been moved from another node. We
%% matched the uuid already, try to find last viable
%% sequence we can use
- couch_log:warning( "~p calculate_start_seq not owner, "
+ couch_log:warning(
+ "~p calculate_start_seq not owner, "
" trying replacement db: ~p, seq: ~p, uuid: ~p, "
- "epoch_node: ~p, epochs: ~p", [?MODULE, Db#db.name,
- Seq, Uuid, EpochNode, get_epochs(Db)]),
+ "epoch_node: ~p, epochs: ~p",
+ [
+ ?MODULE,
+ Db#db.name,
+ Seq,
+ Uuid,
+ EpochNode,
+ get_epochs(Db)
+ ]
+ ),
calculate_start_seq(Db, Node, {replace, EpochNode, Uuid, Seq})
end;
false ->
- couch_log:warning("~p calculate_start_seq uuid prefix mismatch "
+ couch_log:warning(
+ "~p calculate_start_seq uuid prefix mismatch "
"db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p",
- [?MODULE, Db#db.name, Seq, Uuid, EpochNode]),
+ [?MODULE, Db#db.name, Seq, Uuid, EpochNode]
+ ),
%% The file was rebuilt, most likely in a different
%% order, so rewind.
0
@@ -1531,38 +1758,37 @@ calculate_start_seq(Db, _Node, {replace, OriginalNode, Uuid, Seq}) ->
true ->
try
start_seq(get_epochs(Db), OriginalNode, Seq)
- catch throw:epoch_mismatch ->
- couch_log:warning("~p start_seq duplicate uuid on node: ~p "
- "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p",
- [?MODULE, node(), Db#db.name, Seq, Uuid, OriginalNode]),
- 0
+ catch
+ throw:epoch_mismatch ->
+ couch_log:warning(
+ "~p start_seq duplicate uuid on node: ~p "
+ "db: ~p, seq: ~p, uuid: ~p, epoch_node: ~p",
+ [?MODULE, node(), Db#db.name, Seq, Uuid, OriginalNode]
+ ),
+ 0
end;
false ->
{replace, OriginalNode, Uuid, Seq}
end.
-
validate_epochs(Epochs) ->
%% Assert uniqueness.
case length(Epochs) == length(lists:ukeysort(2, Epochs)) of
- true -> ok;
+ true -> ok;
false -> erlang:error(duplicate_epoch)
end,
%% Assert order.
case Epochs == lists:sort(fun({_, A}, {_, B}) -> B =< A end, Epochs) of
- true -> ok;
+ true -> ok;
false -> erlang:error(epoch_order)
end.
-
is_prefix(Pattern, Subject) ->
- binary:longest_common_prefix([Pattern, Subject]) == size(Pattern).
-
+ binary:longest_common_prefix([Pattern, Subject]) == size(Pattern).
is_owner(Node, Seq, Epochs) ->
Node =:= owner_of(Epochs, Seq).
-
owner_of(Db, Seq) when not is_list(Db) ->
owner_of(get_epochs(Db), Seq);
owner_of([], _Seq) ->
@@ -1572,7 +1798,6 @@ owner_of([{EpochNode, EpochSeq} | _Rest], Seq) when Seq > EpochSeq ->
owner_of([_ | Rest], Seq) ->
owner_of(Rest, Seq).
-
start_seq([{OrigNode, EpochSeq} | _], OrigNode, Seq) when Seq > EpochSeq ->
%% OrigNode is the owner of the Seq so we can safely stream from there
Seq;
@@ -1586,43 +1811,34 @@ start_seq([_ | Rest], OrigNode, Seq) ->
start_seq([], _OrigNode, _Seq) ->
throw(epoch_mismatch).
-
fold_docs(Db, UserFun, UserAcc) ->
fold_docs(Db, UserFun, UserAcc, []).
fold_docs(Db, UserFun, UserAcc, Options) ->
couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options).
-
fold_local_docs(Db, UserFun, UserAcc, Options) ->
couch_db_engine:fold_local_docs(Db, UserFun, UserAcc, Options).
-
fold_design_docs(Db, UserFun, UserAcc, Options1) ->
Options2 = set_design_doc_keys(Options1),
couch_db_engine:fold_docs(Db, UserFun, UserAcc, Options2).
-
fold_changes(Db, StartSeq, UserFun, UserAcc) ->
fold_changes(Db, StartSeq, UserFun, UserAcc, []).
-
fold_changes(Db, StartSeq, UserFun, UserAcc, Opts) ->
couch_db_engine:fold_changes(Db, StartSeq, UserFun, UserAcc, Opts).
-
fold_purge_infos(Db, StartPurgeSeq, Fun, Acc) ->
fold_purge_infos(Db, StartPurgeSeq, Fun, Acc, []).
-
fold_purge_infos(Db, StartPurgeSeq, UFun, UAcc, Opts) ->
couch_db_engine:fold_purge_infos(Db, StartPurgeSeq, UFun, UAcc, Opts).
-
count_changes_since(Db, SinceSeq) ->
couch_db_engine:count_changes_since(Db, SinceSeq).
-
%%% Internal function %%%
open_doc_revs_int(Db, IdRevs, Options) ->
Ids = [Id || {Id, _Revs} <- IdRevs],
@@ -1630,106 +1846,125 @@ open_doc_revs_int(Db, IdRevs, Options) ->
lists:zipwith(
fun({Id, Revs}, Lookup) ->
case Lookup of
- #full_doc_info{rev_tree=RevTree} ->
- {FoundRevs, MissingRevs} =
- case Revs of
- all ->
- {couch_key_tree:get_all_leafs(RevTree), []};
- _ ->
- case lists:member(latest, Options) of
- true ->
- couch_key_tree:get_key_leafs(RevTree, Revs);
- false ->
- couch_key_tree:get(RevTree, Revs)
- end
- end,
- FoundResults =
- lists:map(fun({Value, {Pos, [Rev|_]}=FoundRevPath}) ->
- case Value of
- ?REV_MISSING ->
- % we have the rev in our list but know nothing about it
- {{not_found, missing}, {Pos, Rev}};
- #leaf{deleted=IsDeleted, ptr=SummaryPtr} ->
- {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)}
- end
- end, FoundRevs),
- Results = FoundResults ++ [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs],
- {ok, Results};
- not_found when Revs == all ->
- {ok, []};
- not_found ->
- {ok, [{{not_found, missing}, Rev} || Rev <- Revs]}
+ #full_doc_info{rev_tree = RevTree} ->
+ {FoundRevs, MissingRevs} =
+ case Revs of
+ all ->
+ {couch_key_tree:get_all_leafs(RevTree), []};
+ _ ->
+ case lists:member(latest, Options) of
+ true ->
+ couch_key_tree:get_key_leafs(RevTree, Revs);
+ false ->
+ couch_key_tree:get(RevTree, Revs)
+ end
+ end,
+ FoundResults =
+ lists:map(
+ fun({Value, {Pos, [Rev | _]} = FoundRevPath}) ->
+ case Value of
+ ?REV_MISSING ->
+ % we have the rev in our list but know nothing about it
+ {{not_found, missing}, {Pos, Rev}};
+ #leaf{deleted = IsDeleted, ptr = SummaryPtr} ->
+ {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)}
+ end
+ end,
+ FoundRevs
+ ),
+ Results =
+ FoundResults ++
+ [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs],
+ {ok, Results};
+ not_found when Revs == all ->
+ {ok, []};
+ not_found ->
+ {ok, [{{not_found, missing}, Rev} || Rev <- Revs]}
end
end,
- IdRevs, LookupResults).
+ IdRevs,
+ LookupResults
+ ).
open_doc_int(Db, <<?LOCAL_DOC_PREFIX, _/binary>> = Id, Options) ->
case couch_db_engine:open_local_docs(Db, [Id]) of
- [#doc{} = Doc] ->
- apply_open_options({ok, Doc}, Options);
- [not_found] ->
- {not_found, missing}
+ [#doc{} = Doc] ->
+ apply_open_options({ok, Doc}, Options);
+ [not_found] ->
+ {not_found, missing}
end;
-open_doc_int(Db, #doc_info{id=Id,revs=[RevInfo|_]}=DocInfo, Options) ->
- #rev_info{deleted=IsDeleted,rev={Pos,RevId},body_sp=Bp} = RevInfo,
- Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos,[RevId]}),
+open_doc_int(Db, #doc_info{id = Id, revs = [RevInfo | _]} = DocInfo, Options) ->
+ #rev_info{deleted = IsDeleted, rev = {Pos, RevId}, body_sp = Bp} = RevInfo,
+ Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos, [RevId]}),
apply_open_options(
- {ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}}, Options);
-open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) ->
- #doc_info{revs=[#rev_info{deleted=IsDeleted,rev=Rev,body_sp=Bp}|_]} =
+ {ok, Doc#doc{meta = doc_meta_info(DocInfo, [], Options)}}, Options
+ );
+open_doc_int(Db, #full_doc_info{id = Id, rev_tree = RevTree} = FullDocInfo, Options) ->
+ #doc_info{revs = [#rev_info{deleted = IsDeleted, rev = Rev, body_sp = Bp} | _]} =
DocInfo = couch_doc:to_doc_info(FullDocInfo),
{[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]),
Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath),
apply_open_options(
- {ok, Doc#doc{meta=doc_meta_info(DocInfo, RevTree, Options)}}, Options);
+ {ok, Doc#doc{meta = doc_meta_info(DocInfo, RevTree, Options)}}, Options
+ );
open_doc_int(Db, Id, Options) ->
case get_full_doc_info(Db, Id) of
- #full_doc_info{} = FullDocInfo ->
- open_doc_int(Db, FullDocInfo, Options);
- not_found ->
- {not_found, missing}
+ #full_doc_info{} = FullDocInfo ->
+ open_doc_int(Db, FullDocInfo, Options);
+ not_found ->
+ {not_found, missing}
end.
-doc_meta_info(#doc_info{high_seq=Seq,revs=[#rev_info{rev=Rev}|RestInfo]}, RevTree, Options) ->
+doc_meta_info(
+ #doc_info{high_seq = Seq, revs = [#rev_info{rev = Rev} | RestInfo]}, RevTree, Options
+) ->
case lists:member(revs_info, Options) of
- false -> [];
- true ->
- {[{Pos, RevPath}],[]} =
- couch_key_tree:get_full_key_paths(RevTree, [Rev]),
-
- [{revs_info, Pos, lists:map(
- fun({Rev1, ?REV_MISSING}) ->
- {Rev1, missing};
- ({Rev1, Leaf}) ->
- case Leaf#leaf.deleted of
- true ->
- {Rev1, deleted};
- false ->
- {Rev1, available}
- end
- end, RevPath)}]
- end ++
- case lists:member(conflicts, Options) of
- false -> [];
- true ->
- case [Rev1 || #rev_info{rev=Rev1,deleted=false} <- RestInfo] of
- [] -> [];
- ConflictRevs -> [{conflicts, ConflictRevs}]
- end
- end ++
- case lists:member(deleted_conflicts, Options) of
- false -> [];
- true ->
- case [Rev1 || #rev_info{rev=Rev1,deleted=true} <- RestInfo] of
- [] -> [];
- DelConflictRevs -> [{deleted_conflicts, DelConflictRevs}]
- end
- end ++
- case lists:member(local_seq, Options) of
- false -> [];
- true -> [{local_seq, Seq}]
- end.
+ false ->
+ [];
+ true ->
+ {[{Pos, RevPath}], []} =
+ couch_key_tree:get_full_key_paths(RevTree, [Rev]),
+ [
+ {revs_info, Pos,
+ lists:map(
+ fun
+ ({Rev1, ?REV_MISSING}) ->
+ {Rev1, missing};
+ ({Rev1, Leaf}) ->
+ case Leaf#leaf.deleted of
+ true ->
+ {Rev1, deleted};
+ false ->
+ {Rev1, available}
+ end
+ end,
+ RevPath
+ )}
+ ]
+ end ++
+ case lists:member(conflicts, Options) of
+ false ->
+ [];
+ true ->
+ case [Rev1 || #rev_info{rev = Rev1, deleted = false} <- RestInfo] of
+ [] -> [];
+ ConflictRevs -> [{conflicts, ConflictRevs}]
+ end
+ end ++
+ case lists:member(deleted_conflicts, Options) of
+ false ->
+ [];
+ true ->
+ case [Rev1 || #rev_info{rev = Rev1, deleted = true} <- RestInfo] of
+ [] -> [];
+ DelConflictRevs -> [{deleted_conflicts, DelConflictRevs}]
+ end
+ end ++
+ case lists:member(local_seq, Options) of
+ false -> [];
+ true -> [{local_seq, Seq}]
+ end.
make_doc(_Db, Id, Deleted, nil = _Bp, RevisionPath) ->
#doc{
@@ -1747,29 +1982,29 @@ make_doc(#db{} = Db, Id, Deleted, Bp, {Pos, Revs}) ->
body = Bp,
deleted = Deleted
}),
- Doc1 = case Doc0#doc.atts of
- BinAtts when is_binary(BinAtts) ->
- Doc0#doc{
- atts = couch_compress:decompress(BinAtts)
- };
- ListAtts when is_list(ListAtts) ->
- Doc0
- end,
+ Doc1 =
+ case Doc0#doc.atts of
+ BinAtts when is_binary(BinAtts) ->
+ Doc0#doc{
+ atts = couch_compress:decompress(BinAtts)
+ };
+ ListAtts when is_list(ListAtts) ->
+ Doc0
+ end,
after_doc_read(Db, Doc1#doc{
atts = [couch_att:from_disk_term(Db, T) || T <- Doc1#doc.atts]
}).
-
after_doc_read(#db{} = Db, Doc) ->
DocWithBody = couch_doc:with_ejson_body(Doc),
couch_db_plugin:after_doc_read(Db, DocWithBody).
increment_stat(#db{options = Options}, Stat) ->
case lists:member(sys_db, Options) of
- true ->
- ok;
- false ->
- couch_stats:increment_counter(Stat)
+ true ->
+ ok;
+ false ->
+ couch_stats:increment_counter(Stat)
end.
-spec normalize_dbname(list() | binary()) -> binary().
@@ -1779,23 +2014,22 @@ normalize_dbname(DbName) when is_list(DbName) ->
normalize_dbname(DbName) when is_binary(DbName) ->
mem3:dbname(couch_util:drop_dot_couch_ext(DbName)).
-
-spec dbname_suffix(list() | binary()) -> binary().
dbname_suffix(DbName) ->
filename:basename(normalize_dbname(DbName)).
-
validate_dbname(DbName) when is_list(DbName) ->
validate_dbname(?l2b(DbName));
validate_dbname(DbName) when is_binary(DbName) ->
Normalized = normalize_dbname(DbName),
couch_db_plugin:validate_dbname(
- DbName, Normalized, fun validate_dbname_int/2).
+ DbName, Normalized, fun validate_dbname_int/2
+ ).
validate_dbname_int(DbName, Normalized) when is_binary(DbName) ->
DbNoExt = couch_util:drop_dot_couch_ext(DbName),
- case re:run(DbNoExt, ?DBNAME_REGEX, [{capture,none}, dollar_endonly]) of
+ case re:run(DbNoExt, ?DBNAME_REGEX, [{capture, none}, dollar_endonly]) of
match ->
ok;
nomatch ->
@@ -1811,76 +2045,81 @@ is_system_db_name(DbName) when is_binary(DbName) ->
Normalized = normalize_dbname(DbName),
Suffix = filename:basename(Normalized),
case {filename:dirname(Normalized), lists:member(Suffix, ?SYSTEM_DATABASES)} of
- {<<".">>, Result} -> Result;
- {_Prefix, false} -> false;
+ {<<".">>, Result} ->
+ Result;
+ {_Prefix, false} ->
+ false;
{Prefix, true} ->
- ReOpts = [{capture,none}, dollar_endonly],
+ ReOpts = [{capture, none}, dollar_endonly],
re:run(Prefix, ?DBNAME_REGEX, ReOpts) == match
end.
set_design_doc_keys(Options1) ->
- Dir = case lists:keyfind(dir, 1, Options1) of
- {dir, D0} -> D0;
- _ -> fwd
- end,
+ Dir =
+ case lists:keyfind(dir, 1, Options1) of
+ {dir, D0} -> D0;
+ _ -> fwd
+ end,
Options2 = set_design_doc_start_key(Options1, Dir),
set_design_doc_end_key(Options2, Dir).
-
-define(FIRST_DDOC_KEY, <<"_design/">>).
-define(LAST_DDOC_KEY, <<"_design0">>).
-
set_design_doc_start_key(Options, fwd) ->
Key1 = couch_util:get_value(start_key, Options, ?FIRST_DDOC_KEY),
- Key2 = case Key1 < ?FIRST_DDOC_KEY of
- true -> ?FIRST_DDOC_KEY;
- false -> Key1
- end,
+ Key2 =
+ case Key1 < ?FIRST_DDOC_KEY of
+ true -> ?FIRST_DDOC_KEY;
+ false -> Key1
+ end,
lists:keystore(start_key, 1, Options, {start_key, Key2});
set_design_doc_start_key(Options, rev) ->
Key1 = couch_util:get_value(start_key, Options, ?LAST_DDOC_KEY),
- Key2 = case Key1 > ?LAST_DDOC_KEY of
- true -> ?LAST_DDOC_KEY;
- false -> Key1
- end,
+ Key2 =
+ case Key1 > ?LAST_DDOC_KEY of
+ true -> ?LAST_DDOC_KEY;
+ false -> Key1
+ end,
lists:keystore(start_key, 1, Options, {start_key, Key2}).
-
set_design_doc_end_key(Options, fwd) ->
case couch_util:get_value(end_key_gt, Options) of
undefined ->
Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY),
- Key2 = case Key1 > ?LAST_DDOC_KEY of
- true -> ?LAST_DDOC_KEY;
- false -> Key1
- end,
+ Key2 =
+ case Key1 > ?LAST_DDOC_KEY of
+ true -> ?LAST_DDOC_KEY;
+ false -> Key1
+ end,
lists:keystore(end_key, 1, Options, {end_key, Key2});
EKeyGT ->
- Key2 = case EKeyGT > ?LAST_DDOC_KEY of
- true -> ?LAST_DDOC_KEY;
- false -> EKeyGT
- end,
+ Key2 =
+ case EKeyGT > ?LAST_DDOC_KEY of
+ true -> ?LAST_DDOC_KEY;
+ false -> EKeyGT
+ end,
lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
end;
set_design_doc_end_key(Options, rev) ->
case couch_util:get_value(end_key_gt, Options) of
undefined ->
Key1 = couch_util:get_value(end_key, Options, ?LAST_DDOC_KEY),
- Key2 = case Key1 < ?FIRST_DDOC_KEY of
- true -> ?FIRST_DDOC_KEY;
- false -> Key1
- end,
+ Key2 =
+ case Key1 < ?FIRST_DDOC_KEY of
+ true -> ?FIRST_DDOC_KEY;
+ false -> Key1
+ end,
lists:keystore(end_key, 1, Options, {end_key, Key2});
EKeyGT ->
- Key2 = case EKeyGT < ?FIRST_DDOC_KEY of
- true -> ?FIRST_DDOC_KEY;
- false -> EKeyGT
- end,
+ Key2 =
+ case EKeyGT < ?FIRST_DDOC_KEY of
+ true -> ?FIRST_DDOC_KEY;
+ false -> EKeyGT
+ end,
lists:keystore(end_key_gt, 1, Options, {end_key_gt, Key2})
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -1900,11 +2139,14 @@ teardown(_) ->
validate_dbname_success_test_() ->
Cases =
- generate_cases_with_shards("long/co$mplex-/path+/something")
- ++ generate_cases_with_shards("something")
- ++ lists:append(
- [generate_cases_with_shards(?b2l(SystemDb))
- || SystemDb <- ?SYSTEM_DATABASES]),
+ generate_cases_with_shards("long/co$mplex-/path+/something") ++
+ generate_cases_with_shards("something") ++
+ lists:append(
+ [
+ generate_cases_with_shards(?b2l(SystemDb))
+ || SystemDb <- ?SYSTEM_DATABASES
+ ]
+ ),
{
setup,
fun setup_all/0,
@@ -1918,12 +2160,13 @@ validate_dbname_success_test_() ->
}.
validate_dbname_fail_test_() ->
- Cases = generate_cases("_long/co$mplex-/path+/_something")
- ++ generate_cases("_something")
- ++ generate_cases_with_shards("long/co$mplex-/path+/_something#")
- ++ generate_cases_with_shards("long/co$mplex-/path+/some.thing")
- ++ generate_cases("!abcdefg/werwej/_users")
- ++ generate_cases_with_shards("!abcdefg/werwej/_users"),
+ Cases =
+ generate_cases("_long/co$mplex-/path+/_something") ++
+ generate_cases("_something") ++
+ generate_cases_with_shards("long/co$mplex-/path+/_something#") ++
+ generate_cases_with_shards("long/co$mplex-/path+/some.thing") ++
+ generate_cases("!abcdefg/werwej/_users") ++
+ generate_cases_with_shards("!abcdefg/werwej/_users"),
{
setup,
fun setup_all/0,
@@ -1937,41 +2180,56 @@ validate_dbname_fail_test_() ->
}.
normalize_dbname_test_() ->
- Cases = generate_cases_with_shards("long/co$mplex-/path+/_something")
- ++ generate_cases_with_shards("_something"),
+ Cases =
+ generate_cases_with_shards("long/co$mplex-/path+/_something") ++
+ generate_cases_with_shards("_something"),
WithExpected = [{?l2b(filename:rootname(A)), B} || {A, B} <- Cases],
- [{test_name({Expected, Db}), ?_assertEqual(Expected, normalize_dbname(Db))}
- || {Expected, Db} <- WithExpected].
+ [
+ {test_name({Expected, Db}), ?_assertEqual(Expected, normalize_dbname(Db))}
+ || {Expected, Db} <- WithExpected
+ ].
dbname_suffix_test_() ->
- Cases = generate_cases_with_shards("long/co$mplex-/path+/_something")
- ++ generate_cases_with_shards("_something"),
+ Cases =
+ generate_cases_with_shards("long/co$mplex-/path+/_something") ++
+ generate_cases_with_shards("_something"),
WithExpected = [{?l2b(filename:basename(Arg)), Db} || {Arg, Db} <- Cases],
- [{test_name({Expected, Db}), ?_assertEqual(Expected, dbname_suffix(Db))}
- || {Expected, Db} <- WithExpected].
+ [
+ {test_name({Expected, Db}), ?_assertEqual(Expected, dbname_suffix(Db))}
+ || {Expected, Db} <- WithExpected
+ ].
is_system_db_name_test_() ->
- Cases = lists:append([
- generate_cases_with_shards("long/co$mplex-/path+/" ++ ?b2l(Db))
- || Db <- ?SYSTEM_DATABASES]
- ++ [generate_cases_with_shards(?b2l(Db)) || Db <- ?SYSTEM_DATABASES
- ]),
- WithExpected = [{?l2b(filename:basename(filename:rootname(Arg))), Db}
- || {Arg, Db} <- Cases],
- [{test_name({Expected, Db}) ++ " in ?SYSTEM_DATABASES",
- ?_assert(is_system_db_name(Db))} || {Expected, Db} <- WithExpected].
+ Cases = lists:append(
+ [
+ generate_cases_with_shards("long/co$mplex-/path+/" ++ ?b2l(Db))
+ || Db <- ?SYSTEM_DATABASES
+ ] ++
+ [generate_cases_with_shards(?b2l(Db)) || Db <- ?SYSTEM_DATABASES]
+ ),
+ WithExpected = [
+ {?l2b(filename:basename(filename:rootname(Arg))), Db}
+ || {Arg, Db} <- Cases
+ ],
+ [
+ {test_name({Expected, Db}) ++ " in ?SYSTEM_DATABASES", ?_assert(is_system_db_name(Db))}
+ || {Expected, Db} <- WithExpected
+ ].
should_pass_validate_dbname(DbName) ->
{test_name(DbName), ?_assertEqual(ok, validate_dbname(DbName))}.
should_fail_validate_dbname(DbName) ->
- {test_name(DbName), ?_test(begin
- Result = validate_dbname(DbName),
- ?assertMatch({error, {illegal_database_name, _}}, Result),
- {error, {illegal_database_name, FailedDbName}} = Result,
- ?assertEqual(to_binary(DbName), FailedDbName),
- ok
- end)}.
+ {
+ test_name(DbName),
+ ?_test(begin
+ Result = validate_dbname(DbName),
+ ?assertMatch({error, {illegal_database_name, _}}, Result),
+ {error, {illegal_database_name, FailedDbName}} = Result,
+ ?assertEqual(to_binary(DbName), FailedDbName),
+ ok
+ end)
+ }.
calculate_start_seq_test_() ->
{
@@ -2087,9 +2345,12 @@ generate_cases_with_shards(DbName) ->
DbNameWithShard = add_shard(DbName),
DbNameWithShardAndExtension = add_shard(DbName) ++ ".couch",
Cases = [
- DbName, ?l2b(DbName),
- DbNameWithShard, ?l2b(DbNameWithShard),
- DbNameWithShardAndExtension, ?l2b(DbNameWithShardAndExtension)
+ DbName,
+ ?l2b(DbName),
+ DbNameWithShard,
+ ?l2b(DbNameWithShard),
+ DbNameWithShardAndExtension,
+ ?l2b(DbNameWithShardAndExtension)
],
[{DbName, Case} || Case <- Cases].
diff --git a/src/couch/src/couch_db_engine.erl b/src/couch/src/couch_db_engine.erl
index 918dabcca..de4a42495 100644
--- a/src/couch/src/couch_db_engine.erl
+++ b/src/couch/src/couch_db_engine.erl
@@ -12,11 +12,9 @@
-module(couch_db_engine).
-
-include("couch_db.hrl").
-include("couch_db_int.hrl").
-
-type filepath() :: iolist().
-type docid() :: binary().
-type rev() :: {non_neg_integer(), binary()}.
@@ -26,75 +24,82 @@
-type purge_seq() :: non_neg_integer().
-type doc_pair() :: {
- #full_doc_info{} | not_found,
- #full_doc_info{} | not_found
- }.
+ #full_doc_info{} | not_found,
+ #full_doc_info{} | not_found
+}.
-type doc_pairs() :: [doc_pair()].
-type db_open_options() :: [
- create
- ].
+ create
+].
-type delete_options() :: [
- {context, delete | compaction} |
- sync
- ].
+ {context, delete | compaction}
+ | sync
+].
-type purge_info() :: {purge_seq(), uuid(), docid(), revs()}.
--type epochs() :: [{Node::atom(), UpdateSeq::non_neg_integer()}].
--type size_info() :: [{Name::atom(), Size::non_neg_integer()}].
+-type epochs() :: [{Node :: atom(), UpdateSeq :: non_neg_integer()}].
+-type size_info() :: [{Name :: atom(), Size :: non_neg_integer()}].
-type partition_info() :: [
- {partition, Partition::binary()} |
- {doc_count, DocCount::non_neg_integer()} |
- {doc_del_count, DocDelCount::non_neg_integer()} |
- {sizes, size_info()}
+ {partition, Partition :: binary()}
+ | {doc_count, DocCount :: non_neg_integer()}
+ | {doc_del_count, DocDelCount :: non_neg_integer()}
+ | {sizes, size_info()}
].
-type write_stream_options() :: [
- {buffer_size, Size::pos_integer()} |
- {encoding, atom()} |
- {compression_level, non_neg_integer()}
- ].
+ {buffer_size, Size :: pos_integer()}
+ | {encoding, atom()}
+ | {compression_level, non_neg_integer()}
+].
-type doc_fold_options() :: [
- {start_key, Key::any()} |
- {end_key, Key::any()} |
- {end_key_gt, Key::any()} |
- {dir, fwd | rev} |
- include_reductions |
- include_deleted
- ].
+ {start_key, Key :: any()}
+ | {end_key, Key :: any()}
+ | {end_key_gt, Key :: any()}
+ | {dir, fwd | rev}
+ | include_reductions
+ | include_deleted
+].
-type changes_fold_options() :: [
- {dir, fwd | rev}
- ].
+ {dir, fwd | rev}
+].
-type purge_fold_options() :: [
- {start_key, Key::any()} |
- {end_key, Key::any()} |
- {end_key_gt, Key::any()} |
- {dir, fwd | rev}
- ].
+ {start_key, Key :: any()}
+ | {end_key, Key :: any()}
+ | {end_key_gt, Key :: any()}
+ | {dir, fwd | rev}
+].
-type db_handle() :: any().
--type doc_fold_fun() :: fun((#full_doc_info{}, UserAcc::any()) ->
- {ok, NewUserAcc::any()} |
- {stop, NewUserAcc::any()}).
-
--type local_doc_fold_fun() :: fun((#doc{}, UserAcc::any()) ->
- {ok, NewUserAcc::any()} |
- {stop, NewUserAcc::any()}).
-
--type changes_fold_fun() :: fun((#doc_info{}, UserAcc::any()) ->
- {ok, NewUserAcc::any()} |
- {stop, NewUserAcc::any()}).
-
--type purge_fold_fun() :: fun((purge_info(), UserAcc::any()) ->
- {ok, NewUserAcc::any()} |
- {stop, NewUserAcc::any()}).
-
+-type doc_fold_fun() :: fun(
+ (#full_doc_info{}, UserAcc :: any()) ->
+ {ok, NewUserAcc :: any()}
+ | {stop, NewUserAcc :: any()}
+).
+
+-type local_doc_fold_fun() :: fun(
+ (#doc{}, UserAcc :: any()) ->
+ {ok, NewUserAcc :: any()}
+ | {stop, NewUserAcc :: any()}
+).
+
+-type changes_fold_fun() :: fun(
+ (#doc_info{}, UserAcc :: any()) ->
+ {ok, NewUserAcc :: any()}
+ | {stop, NewUserAcc :: any()}
+).
+
+-type purge_fold_fun() :: fun(
+ (purge_info(), UserAcc :: any()) ->
+ {ok, NewUserAcc :: any()}
+ | {stop, NewUserAcc :: any()}
+).
% This is called by couch_server to determine which
% engine should be used for the given database. DbPath
@@ -102,8 +107,7 @@
% extension for a given engine. The first engine to
% return true is the engine that will be used for the
% database.
--callback exists(DbPath::filepath()) -> boolean().
-
+-callback exists(DbPath :: filepath()) -> boolean().
% This is called by couch_server to delete a database. It
% is called from inside the couch_server process which
@@ -112,11 +116,11 @@
% context. Although since this is executed in the context
% of couch_server it should return relatively quickly.
-callback delete(
- RootDir::filepath(),
- DbPath::filepath(),
- DelOpts::delete_options()) ->
- ok | {error, Reason::atom()}.
-
+ RootDir :: filepath(),
+ DbPath :: filepath(),
+ DelOpts :: delete_options()
+) ->
+ ok | {error, Reason :: atom()}.
% This function can be called from multiple contexts. It
% will either be called just before a call to delete/3 above
@@ -125,11 +129,11 @@
% remove any temporary files used during compaction that
% may be used to recover from a failed compaction swap.
-callback delete_compaction_files(
- RootDir::filepath(),
- DbPath::filepath(),
- DelOpts::delete_options()) ->
- ok.
-
+ RootDir :: filepath(),
+ DbPath :: filepath(),
+ DelOpts :: delete_options()
+) ->
+ ok.
% This is called from the couch_db_updater:init/1 context. As
% such this means that it is guaranteed to only have one process
@@ -145,41 +149,36 @@
% its guaranteed that the handle will only ever be mutated
% in a single threaded context (ie, within the couch_db_updater
% process).
--callback init(DbPath::filepath(), db_open_options()) ->
- {ok, DbHandle::db_handle()}.
-
+-callback init(DbPath :: filepath(), db_open_options()) ->
+ {ok, DbHandle :: db_handle()}.
% This is called in the context of couch_db_updater:terminate/2
% and as such has the same properties for init/2. It's guaranteed
% to be consistent for a given database but may be called by many
% databases concurrently.
--callback terminate(Reason::any(), DbHandle::db_handle()) -> Ignored::any().
-
+-callback terminate(Reason :: any(), DbHandle :: db_handle()) -> Ignored :: any().
% This is called in the context of couch_db_updater:handle_call/3
% for any message that is unknown. It can be used to handle messages
% from asynchronous processes like the engine's compactor if it has one.
--callback handle_db_updater_call(Msg::any(), DbHandle::db_handle()) ->
- {reply, Resp::any(), NewDbHandle::db_handle()} |
- {stop, Reason::any(), Resp::any(), NewDbHandle::db_handle()}.
-
+-callback handle_db_updater_call(Msg :: any(), DbHandle :: db_handle()) ->
+ {reply, Resp :: any(), NewDbHandle :: db_handle()}
+ | {stop, Reason :: any(), Resp :: any(), NewDbHandle :: db_handle()}.
% This is called in the context of couch_db_updater:handle_info/2
% and has the same properties as handle_call/3.
--callback handle_db_updater_info(Msg::any(), DbHandle::db_handle()) ->
- {noreply, NewDbHandle::db_handle()} |
- {noreply, NewDbHandle::db_handle(), Timeout::timeout()} |
- {stop, Reason::any(), NewDbHandle::db_handle()}.
-
+-callback handle_db_updater_info(Msg :: any(), DbHandle :: db_handle()) ->
+ {noreply, NewDbHandle :: db_handle()}
+ | {noreply, NewDbHandle :: db_handle(), Timeout :: timeout()}
+ | {stop, Reason :: any(), NewDbHandle :: db_handle()}.
% These functions are called by any process opening or closing
% a database. As such they need to be able to handle being
% called concurrently. For example, the legacy engine uses these
% to add monitors to the main engine process.
--callback incref(DbHandle::db_handle()) -> {ok, NewDbHandle::db_handle()}.
--callback decref(DbHandle::db_handle()) -> ok.
--callback monitored_by(DbHande::db_handle()) -> [pid()].
-
+-callback incref(DbHandle :: db_handle()) -> {ok, NewDbHandle :: db_handle()}.
+-callback decref(DbHandle :: db_handle()) -> ok.
+-callback monitored_by(DbHande :: db_handle()) -> [pid()].
% This is called in the context of couch_db_updater:handle_info/2
% and should return the timestamp of the last activity of
@@ -187,8 +186,7 @@
% value would be hard to report its ok to just return the
% result of os:timestamp/0 as this will just disable idle
% databases from automatically closing.
--callback last_activity(DbHandle::db_handle()) -> erlang:timestamp().
-
+-callback last_activity(DbHandle :: db_handle()) -> erlang:timestamp().
% All of the get_* functions may be called from many
% processes concurrently.
@@ -196,25 +194,21 @@
% The database should make a note of the update sequence when it
% was last compacted. If the database doesn't need compacting it
% can just hard code a return value of 0.
--callback get_compacted_seq(DbHandle::db_handle()) ->
- CompactedSeq::non_neg_integer().
-
+-callback get_compacted_seq(DbHandle :: db_handle()) ->
+ CompactedSeq :: non_neg_integer().
% The number of documents in the database which have all leaf
% revisions marked as deleted.
--callback get_del_doc_count(DbHandle::db_handle()) ->
- DelDocCount::non_neg_integer().
-
+-callback get_del_doc_count(DbHandle :: db_handle()) ->
+ DelDocCount :: non_neg_integer().
% This number is reported in the database info properties and
% as such can be any JSON value.
--callback get_disk_version(DbHandle::db_handle()) -> Version::json().
-
+-callback get_disk_version(DbHandle :: db_handle()) -> Version :: json().
% The number of documents in the database that have one or more
% leaf revisions not marked as deleted.
--callback get_doc_count(DbHandle::db_handle()) -> DocCount::non_neg_integer().
-
+-callback get_doc_count(DbHandle :: db_handle()) -> DocCount :: non_neg_integer().
% The epochs track which node owned the database starting at
% a given update sequence. Each time a database is opened it
@@ -222,36 +216,29 @@
% for the current node it should add an entry that will be
% written the next time a write is performed. An entry is
% simply a {node(), CurrentUpdateSeq} tuple.
--callback get_epochs(DbHandle::db_handle()) -> Epochs::epochs().
-
+-callback get_epochs(DbHandle :: db_handle()) -> Epochs :: epochs().
% Get the current purge sequence known to the engine. This
% value should be updated during calls to purge_docs.
--callback get_purge_seq(DbHandle::db_handle()) -> purge_seq().
-
+-callback get_purge_seq(DbHandle :: db_handle()) -> purge_seq().
% Get the oldest purge sequence known to the engine
--callback get_oldest_purge_seq(DbHandle::db_handle()) -> purge_seq().
-
+-callback get_oldest_purge_seq(DbHandle :: db_handle()) -> purge_seq().
% Get the purged infos limit. This should just return the last
% value that was passed to set_purged_docs_limit/2.
--callback get_purge_infos_limit(DbHandle::db_handle()) -> pos_integer().
-
+-callback get_purge_infos_limit(DbHandle :: db_handle()) -> pos_integer().
% Get the revision limit. This should just return the last
% value that was passed to set_revs_limit/2.
--callback get_revs_limit(DbHandle::db_handle()) -> RevsLimit::pos_integer().
-
+-callback get_revs_limit(DbHandle :: db_handle()) -> RevsLimit :: pos_integer().
% Get the current security properties. This should just return
% the last value that was passed to set_security/2.
--callback get_security(DbHandle::db_handle()) -> SecProps::any().
-
+-callback get_security(DbHandle :: db_handle()) -> SecProps :: any().
% Get the current properties.
--callback get_props(DbHandle::db_handle()) -> Props::[any()].
-
+-callback get_props(DbHandle :: db_handle()) -> Props :: [any()].
% This information is displayed in the database info poperties. It
% should just be a list of {Name::atom(), Size::non_neg_integer()}
@@ -266,8 +253,7 @@
% external - Number of bytes that would be required to represent the
% contents outside of the database (for capacity and backup
% planning)
--callback get_size_info(DbHandle::db_handle()) -> SizeInfo::size_info().
-
+-callback get_size_info(DbHandle :: db_handle()) -> SizeInfo :: size_info().
% This returns the information for the given partition.
% It should just be a list of {Name::atom(), Size::non_neg_integer()}
@@ -277,56 +263,49 @@
%
% external - Number of bytes that would be required to represent the
% contents of this partition outside of the database
--callback get_partition_info(DbHandle::db_handle(), Partition::binary()) ->
+-callback get_partition_info(DbHandle :: db_handle(), Partition :: binary()) ->
partition_info().
-
% The current update sequence of the database. The update
% sequence should be incrememnted for every revision added to
% the database.
--callback get_update_seq(DbHandle::db_handle()) -> UpdateSeq::non_neg_integer().
-
+-callback get_update_seq(DbHandle :: db_handle()) -> UpdateSeq :: non_neg_integer().
% Whenever a database is created it should generate a
% persistent UUID for identification in case the shard should
% ever need to be moved between nodes in a cluster.
--callback get_uuid(DbHandle::db_handle()) -> UUID::binary().
-
+-callback get_uuid(DbHandle :: db_handle()) -> UUID :: binary().
% These functions are only called by couch_db_updater and
% as such are guaranteed to be single threaded calls. The
% database should simply store these values somewhere so
% they can be returned by the corresponding get_* calls.
--callback set_revs_limit(DbHandle::db_handle(), RevsLimit::pos_integer()) ->
- {ok, NewDbHandle::db_handle()}.
-
+-callback set_revs_limit(DbHandle :: db_handle(), RevsLimit :: pos_integer()) ->
+ {ok, NewDbHandle :: db_handle()}.
--callback set_purge_infos_limit(DbHandle::db_handle(), Limit::pos_integer()) ->
- {ok, NewDbHandle::db_handle()}.
-
-
--callback set_security(DbHandle::db_handle(), SecProps::any()) ->
- {ok, NewDbHandle::db_handle()}.
+-callback set_purge_infos_limit(DbHandle :: db_handle(), Limit :: pos_integer()) ->
+ {ok, NewDbHandle :: db_handle()}.
+-callback set_security(DbHandle :: db_handle(), SecProps :: any()) ->
+ {ok, NewDbHandle :: db_handle()}.
% This function is only called by couch_db_updater and
% as such is guaranteed to be single threaded calls. The
% database should simply store provided property list
% unaltered.
--callback set_props(DbHandle::db_handle(), Props::any()) ->
- {ok, NewDbHandle::db_handle()}.
-
+-callback set_props(DbHandle :: db_handle(), Props :: any()) ->
+ {ok, NewDbHandle :: db_handle()}.
% Set the current update sequence of the database. The intention is to use this
% when copying a database such that the destination update sequence should
% match exactly the source update sequence.
-callback set_update_seq(
- DbHandle::db_handle(),
- UpdateSeq::non_neg_integer()) ->
- {ok, NewDbHandle::db_handle()}.
-
+ DbHandle :: db_handle(),
+ UpdateSeq :: non_neg_integer()
+) ->
+ {ok, NewDbHandle :: db_handle()}.
% This function will be called by many processes concurrently.
% It should return a #full_doc_info{} record or not_found for
@@ -337,9 +316,8 @@
% were present in the database when the DbHandle was retrieved
% from couch_server. It is currently unknown what would break
% if a storage engine deviated from that property.
--callback open_docs(DbHandle::db_handle(), DocIds::[docid()]) ->
- [#full_doc_info{} | not_found].
-
+-callback open_docs(DbHandle :: db_handle(), DocIds :: [docid()]) ->
+ [#full_doc_info{} | not_found].
% This function will be called by many processes concurrently.
% It should return a #doc{} record or not_found for every
@@ -349,9 +327,8 @@
% apply to this function (although this function is called
% rather less frequently so it may not be as big of an
% issue).
--callback open_local_docs(DbHandle::db_handle(), DocIds::[docid()]) ->
- [#doc{} | not_found].
-
+-callback open_local_docs(DbHandle :: db_handle(), DocIds :: [docid()]) ->
+ [#doc{} | not_found].
% This function will be called from many contexts concurrently.
% The provided RawDoc is a #doc{} record that has its body
@@ -360,18 +337,16 @@
% This API exists so that storage engines can store document
% bodies externally from the #full_doc_info{} record (which
% is the traditional approach and is recommended).
--callback read_doc_body(DbHandle::db_handle(), RawDoc::doc()) ->
- doc().
-
+-callback read_doc_body(DbHandle :: db_handle(), RawDoc :: doc()) ->
+ doc().
% This function will be called from many contexts concurrently.
% If the storage engine has a purge_info() record for any of the
% provided UUIDs, those purge_info() records should be returned. The
% resulting list should have the same length as the input list of
% UUIDs.
--callback load_purge_infos(DbHandle::db_handle(), [uuid()]) ->
- [purge_info() | not_found].
-
+-callback load_purge_infos(DbHandle :: db_handle(), [uuid()]) ->
+ [purge_info() | not_found].
% This function is called concurrently by any client process
% that is writing a document. It should accept a #doc{}
@@ -382,9 +357,8 @@
% document bodies in parallel by client processes rather
% than forcing all compression to occur single threaded
% in the context of the couch_db_updater process.
--callback serialize_doc(DbHandle::db_handle(), Doc::doc()) ->
- doc().
-
+-callback serialize_doc(DbHandle :: db_handle(), Doc :: doc()) ->
+ doc().
% This function is called in the context of a couch_db_updater
% which means its single threaded for the given DbHandle.
@@ -397,9 +371,8 @@
% The BytesWritten return value is used to determine the number
% of active bytes in the database which can is used to make
% a determination of when to compact this database.
--callback write_doc_body(DbHandle::db_handle(), Doc::doc()) ->
- {ok, FlushedDoc::doc(), BytesWritten::non_neg_integer()}.
-
+-callback write_doc_body(DbHandle :: db_handle(), Doc :: doc()) ->
+ {ok, FlushedDoc :: doc(), BytesWritten :: non_neg_integer()}.
% This function is called from the context of couch_db_updater
% and as such is guaranteed single threaded for the given
@@ -435,11 +408,11 @@
% batches are non-deterministic (from the point of view of the
% client).
-callback write_doc_infos(
- DbHandle::db_handle(),
- Pairs::doc_pairs(),
- LocalDocs::[#doc{}]) ->
- {ok, NewDbHandle::db_handle()}.
-
+ DbHandle :: db_handle(),
+ Pairs :: doc_pairs(),
+ LocalDocs :: [#doc{}]
+) ->
+ {ok, NewDbHandle :: db_handle()}.
% This function is called from the context of couch_db_updater
% and as such is guaranteed single threaded for the given
@@ -470,25 +443,22 @@
% revisions that were requested to be purged. This should be persisted
% in such a way that we can efficiently load purge_info() by its UUID
% as well as iterate over purge_info() entries in order of their PurgeSeq.
--callback purge_docs(DbHandle::db_handle(), [doc_pair()], [purge_info()]) ->
- {ok, NewDbHandle::db_handle()}.
-
+-callback purge_docs(DbHandle :: db_handle(), [doc_pair()], [purge_info()]) ->
+ {ok, NewDbHandle :: db_handle()}.
% This function should be called from a single threaded context and
% should be used to copy purge infos from on database to another
% when copying a database
--callback copy_purge_infos(DbHandle::db_handle(), [purge_info()]) ->
- {ok, NewDbHandle::db_handle()}.
-
+-callback copy_purge_infos(DbHandle :: db_handle(), [purge_info()]) ->
+ {ok, NewDbHandle :: db_handle()}.
% This function is called in the context of couch_db_udpater and
% as such is single threaded for any given DbHandle.
%
% This call is made periodically to ensure that the database has
% stored all updates on stable storage. (ie, here is where you fsync).
--callback commit_data(DbHandle::db_handle()) ->
- {ok, NewDbHande::db_handle()}.
-
+-callback commit_data(DbHandle :: db_handle()) ->
+ {ok, NewDbHande :: db_handle()}.
% This function is called by multiple processes concurrently.
%
@@ -502,20 +472,18 @@
% Currently an engine can elect to not implement these API's
% by throwing the atom not_supported.
-callback open_write_stream(
- DbHandle::db_handle(),
- Options::write_stream_options()) ->
- {ok, pid()}.
-
+ DbHandle :: db_handle(),
+ Options :: write_stream_options()
+) ->
+ {ok, pid()}.
% See the documentation for open_write_stream
--callback open_read_stream(DbHandle::db_handle(), StreamDiskInfo::any()) ->
- {ok, {Module::atom(), ReadStreamState::any()}}.
-
+-callback open_read_stream(DbHandle :: db_handle(), StreamDiskInfo :: any()) ->
+ {ok, {Module :: atom(), ReadStreamState :: any()}}.
% See the documentation for open_write_stream
--callback is_active_stream(DbHandle::db_handle(), ReadStreamState::any()) ->
- boolean().
-
+-callback is_active_stream(DbHandle :: db_handle(), ReadStreamState :: any()) ->
+ boolean().
% This funciton is called by many processes concurrently.
%
@@ -567,12 +535,12 @@
% that actually happening so a storage engine that includes new results
% between invocations shouldn't have any issues.
-callback fold_docs(
- DbHandle::db_handle(),
- UserFold::doc_fold_fun(),
- UserAcc::any(),
- doc_fold_options()) ->
- {ok, LastUserAcc::any()}.
-
+ DbHandle :: db_handle(),
+ UserFold :: doc_fold_fun(),
+ UserAcc :: any(),
+ doc_fold_options()
+) ->
+ {ok, LastUserAcc :: any()}.
% This function may be called by many processes concurrently.
%
@@ -580,12 +548,12 @@
% should only return local documents and the first argument to the
% user function is a #doc{} record, not a #full_doc_info{}.
-callback fold_local_docs(
- DbHandle::db_handle(),
- UserFold::local_doc_fold_fun(),
- UserAcc::any(),
- doc_fold_options()) ->
- {ok, LastUserAcc::any()}.
-
+ DbHandle :: db_handle(),
+ UserFold :: local_doc_fold_fun(),
+ UserAcc :: any(),
+ doc_fold_options()
+) ->
+ {ok, LastUserAcc :: any()}.
% This function may be called by many processes concurrently.
%
@@ -608,13 +576,13 @@
% The only option currently supported by the API is the `dir`
% option that should behave the same as for fold_docs.
-callback fold_changes(
- DbHandle::db_handle(),
- StartSeq::non_neg_integer(),
- UserFold::changes_fold_fun(),
- UserAcc::any(),
- changes_fold_options()) ->
- {ok, LastUserAcc::any()}.
-
+ DbHandle :: db_handle(),
+ StartSeq :: non_neg_integer(),
+ UserFold :: changes_fold_fun(),
+ UserAcc :: any(),
+ changes_fold_options()
+) ->
+ {ok, LastUserAcc :: any()}.
% This function may be called by many processes concurrently.
%
@@ -623,13 +591,13 @@
%
% The StartPurgeSeq parameter indicates where the fold should start *after*.
-callback fold_purge_infos(
- DbHandle::db_handle(),
- StartPurgeSeq::purge_seq(),
- UserFold::purge_fold_fun(),
- UserAcc::any(),
- purge_fold_options()) ->
- {ok, LastUserAcc::any()}.
-
+ DbHandle :: db_handle(),
+ StartPurgeSeq :: purge_seq(),
+ UserFold :: purge_fold_fun(),
+ UserAcc :: any(),
+ purge_fold_options()
+) ->
+ {ok, LastUserAcc :: any()}.
% This function may be called by many processes concurrently.
%
@@ -647,10 +615,10 @@
% _active_tasks entry if the storage engine isn't accounted for by the
% client.
-callback count_changes_since(
- DbHandle::db_handle(),
- UpdateSeq::non_neg_integer()) ->
- TotalChanges::non_neg_integer().
-
+ DbHandle :: db_handle(),
+ UpdateSeq :: non_neg_integer()
+) ->
+ TotalChanges :: non_neg_integer().
% This function is called in the context of couch_db_updater and as
% such is guaranteed to be single threaded for the given DbHandle.
@@ -666,12 +634,12 @@
% must be the same engine that started the compaction and CompactInfo
% is an arbitrary term that's passed to finish_compaction/4.
-callback start_compaction(
- DbHandle::db_handle(),
- DbName::binary(),
- Options::db_open_options(),
- Parent::pid()) ->
- {ok, NewDbHandle::db_handle(), CompactorPid::pid()}.
-
+ DbHandle :: db_handle(),
+ DbName :: binary(),
+ Options :: db_open_options(),
+ Parent :: pid()
+) ->
+ {ok, NewDbHandle :: db_handle(), CompactorPid :: pid()}.
% This function is called in the context of couch_db_udpater and as
% such is guarnateed to be single threaded for the given DbHandle.
@@ -683,12 +651,12 @@
% to update the DbHandle state of the couch_db_updater it can as
% finish_compaction/4 is called in the context of the couch_db_updater.
-callback finish_compaction(
- OldDbHandle::db_handle(),
- DbName::binary(),
- Options::db_open_options(),
- CompactInfo::any()) ->
- {ok, CompactedDbHandle::db_handle(), CompactorPid::pid() | undefined}.
-
+ OldDbHandle :: db_handle(),
+ DbName :: binary(),
+ Options :: db_open_options(),
+ CompactInfo :: any()
+) ->
+ {ok, CompactedDbHandle :: db_handle(), CompactorPid :: pid() | undefined}.
-export([
exists/2,
@@ -757,34 +725,29 @@
trigger_on_compact/1
]).
-
exists(Engine, DbPath) ->
Engine:exists(DbPath).
-
delete(Engine, RootDir, DbPath, DelOpts) when is_list(DelOpts) ->
Engine:delete(RootDir, DbPath, DelOpts).
-
-delete_compaction_files(Engine, RootDir, DbPath, DelOpts)
- when is_list(DelOpts) ->
+delete_compaction_files(Engine, RootDir, DbPath, DelOpts) when
+ is_list(DelOpts)
+->
Engine:delete_compaction_files(RootDir, DbPath, DelOpts).
-
init(Engine, DbPath, Options) ->
case Engine:init(DbPath, Options) of
- {ok, EngineState} ->
- {ok, {Engine, EngineState}};
- Error ->
- throw(Error)
+ {ok, EngineState} ->
+ {ok, {Engine, EngineState}};
+ Error ->
+ throw(Error)
end.
-
terminate(Reason, #db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:terminate(Reason, EngineState).
-
handle_db_updater_call(Msg, _From, #db{} = Db) ->
#db{
engine = {Engine, EngineState}
@@ -796,7 +759,6 @@ handle_db_updater_call(Msg, _From, #db{} = Db) ->
{stop, Reason, Resp, Db#db{engine = {Engine, NewState}}}
end.
-
handle_db_updater_info(Msg, #db{} = Db) ->
#db{
name = Name,
@@ -812,98 +774,79 @@ handle_db_updater_info(Msg, #db{} = Db) ->
{stop, Reason, Db#db{engine = {Engine, NewState}}}
end.
-
incref(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
{ok, NewState} = Engine:incref(EngineState),
{ok, Db#db{engine = {Engine, NewState}}}.
-
decref(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:decref(EngineState).
-
monitored_by(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:monitored_by(EngineState).
-
last_activity(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:last_activity(EngineState).
-
get_engine(#db{} = Db) ->
#db{engine = {Engine, _}} = Db,
Engine.
-
get_compacted_seq(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:get_compacted_seq(EngineState).
-
get_del_doc_count(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:get_del_doc_count(EngineState).
-
get_disk_version(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:get_disk_version(EngineState).
-
get_doc_count(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:get_doc_count(EngineState).
-
get_epochs(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:get_epochs(EngineState).
-
get_purge_seq(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:get_purge_seq(EngineState).
-
get_oldest_purge_seq(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:get_oldest_purge_seq(EngineState).
-
get_purge_infos_limit(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:get_purge_infos_limit(EngineState).
-
get_revs_limit(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:get_revs_limit(EngineState).
-
get_security(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:get_security(EngineState).
-
get_props(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:get_props(EngineState).
-
get_size_info(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:get_size_info(EngineState).
-
get_partition_info(#db{} = Db, Partition) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:get_partition_info(EngineState, Partition).
-
get_update_seq(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:get_update_seq(EngineState).
@@ -912,134 +855,113 @@ get_uuid(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:get_uuid(EngineState).
-
set_revs_limit(#db{} = Db, RevsLimit) ->
#db{engine = {Engine, EngineState}} = Db,
{ok, NewSt} = Engine:set_revs_limit(EngineState, RevsLimit),
{ok, Db#db{engine = {Engine, NewSt}}}.
-
set_purge_infos_limit(#db{} = Db, PurgedDocsLimit) ->
#db{engine = {Engine, EngineState}} = Db,
{ok, NewSt} = Engine:set_purge_infos_limit(EngineState, PurgedDocsLimit),
{ok, Db#db{engine = {Engine, NewSt}}}.
-
set_security(#db{} = Db, SecProps) ->
#db{engine = {Engine, EngineState}} = Db,
{ok, NewSt} = Engine:set_security(EngineState, SecProps),
{ok, Db#db{engine = {Engine, NewSt}}}.
-
set_props(#db{} = Db, Props) ->
#db{engine = {Engine, EngineState}} = Db,
{ok, NewSt} = Engine:set_props(EngineState, Props),
{ok, Db#db{engine = {Engine, NewSt}}}.
-
set_update_seq(#db{} = Db, UpdateSeq) ->
#db{engine = {Engine, EngineState}} = Db,
{ok, NewSt} = Engine:set_update_seq(EngineState, UpdateSeq),
{ok, Db#db{engine = {Engine, NewSt}}}.
-
open_docs(#db{} = Db, DocIds) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:open_docs(EngineState, DocIds).
-
open_local_docs(#db{} = Db, DocIds) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:open_local_docs(EngineState, DocIds).
-
read_doc_body(#db{} = Db, RawDoc) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:read_doc_body(EngineState, RawDoc).
-
load_purge_infos(#db{} = Db, UUIDs) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:load_purge_infos(EngineState, UUIDs).
-
serialize_doc(#db{} = Db, #doc{} = Doc) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:serialize_doc(EngineState, Doc).
-
write_doc_body(#db{} = Db, #doc{} = Doc) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:write_doc_body(EngineState, Doc).
-
write_doc_infos(#db{} = Db, DocUpdates, LocalDocs) ->
#db{engine = {Engine, EngineState}} = Db,
{ok, NewSt} = Engine:write_doc_infos(EngineState, DocUpdates, LocalDocs),
{ok, Db#db{engine = {Engine, NewSt}}}.
-
purge_docs(#db{} = Db, DocUpdates, Purges) ->
#db{engine = {Engine, EngineState}} = Db,
{ok, NewSt} = Engine:purge_docs(
- EngineState, DocUpdates, Purges),
+ EngineState, DocUpdates, Purges
+ ),
{ok, Db#db{engine = {Engine, NewSt}}}.
-
copy_purge_infos(#db{} = Db, Purges) ->
#db{engine = {Engine, EngineState}} = Db,
{ok, NewSt} = Engine:copy_purge_infos(
- EngineState, Purges),
+ EngineState, Purges
+ ),
{ok, Db#db{engine = {Engine, NewSt}}}.
-
commit_data(#db{} = Db) ->
#db{engine = {Engine, EngineState}} = Db,
{ok, NewSt} = Engine:commit_data(EngineState),
{ok, Db#db{engine = {Engine, NewSt}}}.
-
open_write_stream(#db{} = Db, Options) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:open_write_stream(EngineState, Options).
-
open_read_stream(#db{} = Db, StreamDiskInfo) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:open_read_stream(EngineState, StreamDiskInfo).
-
is_active_stream(#db{} = Db, ReadStreamState) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:is_active_stream(EngineState, ReadStreamState).
-
fold_docs(#db{} = Db, UserFun, UserAcc, Options) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:fold_docs(EngineState, UserFun, UserAcc, Options).
-
fold_local_docs(#db{} = Db, UserFun, UserAcc, Options) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:fold_local_docs(EngineState, UserFun, UserAcc, Options).
-
fold_changes(#db{} = Db, StartSeq, UserFun, UserAcc, Options) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:fold_changes(EngineState, StartSeq, UserFun, UserAcc, Options).
-
fold_purge_infos(#db{} = Db, StartPurgeSeq, UserFun, UserAcc, Options) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:fold_purge_infos(
- EngineState, StartPurgeSeq, UserFun, UserAcc, Options).
-
+ EngineState, StartPurgeSeq, UserFun, UserAcc, Options
+ ).
count_changes_since(#db{} = Db, StartSeq) ->
#db{engine = {Engine, EngineState}} = Db,
Engine:count_changes_since(EngineState, StartSeq).
-
start_compaction(#db{} = Db) ->
#db{
engine = {Engine, EngineState},
@@ -1047,50 +969,53 @@ start_compaction(#db{} = Db) ->
options = Options
} = Db,
{ok, NewEngineState, Pid} = Engine:start_compaction(
- EngineState, DbName, Options, self()),
+ EngineState, DbName, Options, self()
+ ),
{ok, Db#db{
engine = {Engine, NewEngineState},
compactor_pid = Pid
}}.
-
finish_compaction(Db, CompactInfo) ->
#db{
engine = {Engine, St},
name = DbName,
options = Options
} = Db,
- NewDb = case Engine:finish_compaction(St, DbName, Options, CompactInfo) of
- {ok, NewState, undefined} ->
- couch_event:notify(DbName, compacted),
- Db#db{
- engine = {Engine, NewState},
- compactor_pid = nil
- };
- {ok, NewState, CompactorPid} when is_pid(CompactorPid) ->
- Db#db{
- engine = {Engine, NewState},
- compactor_pid = CompactorPid
- }
- end,
+ NewDb =
+ case Engine:finish_compaction(St, DbName, Options, CompactInfo) of
+ {ok, NewState, undefined} ->
+ couch_event:notify(DbName, compacted),
+ Db#db{
+ engine = {Engine, NewState},
+ compactor_pid = nil
+ };
+ {ok, NewState, CompactorPid} when is_pid(CompactorPid) ->
+ Db#db{
+ engine = {Engine, NewState},
+ compactor_pid = CompactorPid
+ }
+ end,
ok = couch_server:db_updated(NewDb),
{ok, NewDb}.
-
trigger_on_compact(DbName) ->
{ok, DDocs} = get_ddocs(DbName),
couch_db_plugin:on_compact(DbName, DDocs).
-
get_ddocs(<<"shards/", _/binary>> = DbName) ->
{_, Ref} = spawn_monitor(fun() ->
exit(fabric:design_docs(mem3:dbname(DbName)))
end),
receive
{'DOWN', Ref, _, _, {ok, JsonDDocs}} ->
- {ok, lists:map(fun(JsonDDoc) ->
- couch_doc:from_json_obj(JsonDDoc)
- end, JsonDDocs)};
+ {ok,
+ lists:map(
+ fun(JsonDDoc) ->
+ couch_doc:from_json_obj(JsonDDoc)
+ end,
+ JsonDDocs
+ )};
{'DOWN', Ref, _, _, Else} ->
Else
end;
diff --git a/src/couch/src/couch_db_epi.erl b/src/couch/src/couch_db_epi.erl
index 21879f683..870202bad 100644
--- a/src/couch/src/couch_db_epi.erl
+++ b/src/couch/src/couch_db_epi.erl
@@ -32,7 +32,6 @@ providers() ->
{chttpd_handlers, couch_httpd_handlers}
].
-
services() ->
[
{couch_db, couch_db_plugin},
diff --git a/src/couch/src/couch_db_header.erl b/src/couch/src/couch_db_header.erl
index 355364f9b..9c81ba6d0 100644
--- a/src/couch/src/couch_db_header.erl
+++ b/src/couch/src/couch_db_header.erl
@@ -12,7 +12,6 @@
-module(couch_db_header).
-
-export([
new/0,
from/1,
@@ -37,7 +36,6 @@
compacted_seq/1
]).
-
% This should be updated anytime a header change happens that requires more
% than filling in new defaults.
%
@@ -66,14 +64,12 @@
compacted_seq
}).
-
new() ->
#db_header{
uuid = couch_uuids:random(),
epochs = [{node(), 0}]
}.
-
from(Header0) ->
Header = upgrade(Header0),
#db_header{
@@ -82,16 +78,15 @@ from(Header0) ->
compacted_seq = Header#db_header.compacted_seq
}.
-
is_header(Header) ->
try
upgrade(Header),
true
- catch _:_ ->
- false
+ catch
+ _:_ ->
+ false
end.
-
upgrade(Header) ->
Funs = [
fun upgrade_tuple/1,
@@ -100,69 +95,63 @@ upgrade(Header) ->
fun upgrade_epochs/1,
fun upgrade_compacted_seq/1
],
- lists:foldl(fun(F, HdrAcc) ->
- F(HdrAcc)
- end, Header, Funs).
-
+ lists:foldl(
+ fun(F, HdrAcc) ->
+ F(HdrAcc)
+ end,
+ Header,
+ Funs
+ ).
set(Header0, Fields) ->
% A subtlety here is that if a database was open during
% the release upgrade that updates to uuids and epochs then
% this dynamic upgrade also assigns a uuid and epoch.
Header = upgrade(Header0),
- lists:foldl(fun({Field, Value}, HdrAcc) ->
- set_field(HdrAcc, Field, Value)
- end, Header, Fields).
-
+ lists:foldl(
+ fun({Field, Value}, HdrAcc) ->
+ set_field(HdrAcc, Field, Value)
+ end,
+ Header,
+ Fields
+ ).
disk_version(Header) ->
get_field(Header, disk_version).
-
update_seq(Header) ->
get_field(Header, update_seq).
-
id_tree_state(Header) ->
get_field(Header, id_tree_state).
-
seq_tree_state(Header) ->
get_field(Header, seq_tree_state).
-
local_tree_state(Header) ->
get_field(Header, local_tree_state).
-
purge_seq(Header) ->
get_field(Header, purge_seq).
-
purged_docs(Header) ->
get_field(Header, purged_docs).
-
security_ptr(Header) ->
get_field(Header, security_ptr).
-
revs_limit(Header) ->
get_field(Header, revs_limit).
-
uuid(Header) ->
get_field(Header, uuid).
-
epochs(Header) ->
get_field(Header, epochs).
-
compacted_seq(Header) ->
get_field(Header, compacted_seq).
-
get_field(Header, Field) ->
Idx = index(Field),
case Idx > tuple_size(Header) of
@@ -170,88 +159,97 @@ get_field(Header, Field) ->
false -> element(index(Field), Header)
end.
-
set_field(Header, Field, Value) ->
setelement(index(Field), Header, Value).
-
index(Field) ->
couch_util:get_value(Field, indexes()).
-
indexes() ->
Fields = record_info(fields, db_header),
Indexes = lists:seq(2, record_info(size, db_header)),
lists:zip(Fields, Indexes).
-
upgrade_tuple(Old) when is_record(Old, db_header) ->
Old;
upgrade_tuple(Old) when is_tuple(Old) ->
NewSize = record_info(size, db_header),
- if tuple_size(Old) < NewSize -> ok; true ->
- erlang:error({invalid_header_size, Old})
+ if
+ tuple_size(Old) < NewSize -> ok;
+ true -> erlang:error({invalid_header_size, Old})
end,
- {_, New} = lists:foldl(fun(Val, {Idx, Hdr}) ->
- {Idx+1, setelement(Idx, Hdr, Val)}
- end, {1, #db_header{}}, tuple_to_list(Old)),
- if is_record(New, db_header) -> ok; true ->
- erlang:error({invalid_header_extension, {Old, New}})
+ {_, New} = lists:foldl(
+ fun(Val, {Idx, Hdr}) ->
+ {Idx + 1, setelement(Idx, Hdr, Val)}
+ end,
+ {1, #db_header{}},
+ tuple_to_list(Old)
+ ),
+ if
+ is_record(New, db_header) -> ok;
+ true -> erlang:error({invalid_header_extension, {Old, New}})
end,
New.
-define(OLD_DISK_VERSION_ERROR,
- "Database files from versions smaller than 0.10.0 are no longer supported").
+ "Database files from versions smaller than 0.10.0 are no longer supported"
+).
-upgrade_disk_version(#db_header{}=Header) ->
+upgrade_disk_version(#db_header{} = Header) ->
case element(2, Header) of
- 1 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 2 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 3 -> throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
- 4 -> Header#db_header{security_ptr = nil}; % [0.10 - 0.11)
- 5 -> Header; % pre 1.2
- ?LATEST_DISK_VERSION -> Header;
+ 1 ->
+ throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
+ 2 ->
+ throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
+ 3 ->
+ throw({database_disk_version_error, ?OLD_DISK_VERSION_ERROR});
+ % [0.10 - 0.11)
+ 4 ->
+ Header#db_header{security_ptr = nil};
+ % pre 1.2
+ 5 ->
+ Header;
+ ?LATEST_DISK_VERSION ->
+ Header;
_ ->
Reason = "Incorrect disk header version",
throw({database_disk_version_error, Reason})
end.
-
-upgrade_uuid(#db_header{}=Header) ->
+upgrade_uuid(#db_header{} = Header) ->
case Header#db_header.uuid of
undefined ->
% Upgrading this old db file to a newer
% on disk format that includes a UUID.
- Header#db_header{uuid=couch_uuids:random()};
+ Header#db_header{uuid = couch_uuids:random()};
_ ->
Header
end.
-
-upgrade_epochs(#db_header{}=Header) ->
- NewEpochs = case Header#db_header.epochs of
- undefined ->
- % This node is taking over ownership of shard with
- % and old version of couch file. Before epochs there
- % was always an implicit assumption that a file was
- % owned since eternity by the node it was on. This
- % just codifies that assumption.
- [{node(), 0}];
- [{Node, _} | _] = Epochs0 when Node == node() ->
- % Current node is the current owner of this db
- Epochs0;
- Epochs1 ->
- % This node is taking over ownership of this db
- % and marking the update sequence where it happened.
- [{node(), Header#db_header.update_seq} | Epochs1]
- end,
+upgrade_epochs(#db_header{} = Header) ->
+ NewEpochs =
+ case Header#db_header.epochs of
+ undefined ->
+ % This node is taking over ownership of shard with
+ % and old version of couch file. Before epochs there
+ % was always an implicit assumption that a file was
+ % owned since eternity by the node it was on. This
+ % just codifies that assumption.
+ [{node(), 0}];
+ [{Node, _} | _] = Epochs0 when Node == node() ->
+ % Current node is the current owner of this db
+ Epochs0;
+ Epochs1 ->
+ % This node is taking over ownership of this db
+ % and marking the update sequence where it happened.
+ [{node(), Header#db_header.update_seq} | Epochs1]
+ end,
% Its possible for a node to open a db and claim
% ownership but never make a write to the db. This
% removes nodes that claimed ownership but never
% changed the database.
DedupedEpochs = remove_dup_epochs(NewEpochs),
- Header#db_header{epochs=DedupedEpochs}.
-
+ Header#db_header{epochs = DedupedEpochs}.
% This is slightly relying on the udpate_seq's being sorted
% in epochs due to how we only ever push things onto the
@@ -260,12 +258,12 @@ upgrade_epochs(#db_header{}=Header) ->
% want to remove dupes (by calling a sort on the input to this
% function). So for now we don't sort but are relying on the
% idea that epochs is always sorted.
-remove_dup_epochs([_]=Epochs) ->
+remove_dup_epochs([_] = Epochs) ->
Epochs;
remove_dup_epochs([{N1, S}, {_N2, S}]) ->
% Seqs match, keep the most recent owner
[{N1, S}];
-remove_dup_epochs([_, _]=Epochs) ->
+remove_dup_epochs([_, _] = Epochs) ->
% Seqs don't match.
Epochs;
remove_dup_epochs([{N1, S}, {_N2, S} | Rest]) ->
@@ -275,11 +273,10 @@ remove_dup_epochs([{N1, S1}, {N2, S2} | Rest]) ->
% Seqs don't match, recurse to check others
[{N1, S1} | remove_dup_epochs([{N2, S2} | Rest])].
-
-upgrade_compacted_seq(#db_header{}=Header) ->
+upgrade_compacted_seq(#db_header{} = Header) ->
case Header#db_header.compacted_seq of
undefined ->
- Header#db_header{compacted_seq=0};
+ Header#db_header{compacted_seq = 0};
_ ->
Header
end.
@@ -296,20 +293,30 @@ latest(_Else) ->
mk_header(Vsn) ->
{
- db_header, % record name
- Vsn, % disk version
- 100, % update_seq
- 0, % unused
- foo, % id_tree_state
- bar, % seq_tree_state
- bam, % local_tree_state
- 1, % purge_seq
- baz, % purged_docs
- bang, % security_ptr
- 999 % revs_limit
+ % record name
+ db_header,
+ % disk version
+ Vsn,
+ % update_seq
+ 100,
+ % unused
+ 0,
+ % id_tree_state
+ foo,
+ % seq_tree_state
+ bar,
+ % local_tree_state
+ bam,
+ % purge_seq
+ 1,
+ % purged_docs
+ baz,
+ % security_ptr
+ bang,
+ % revs_limit
+ 999
}.
-
upgrade_v3_test() ->
Vsn3Header = mk_header(3),
NewHeader = upgrade_tuple(Vsn3Header),
@@ -328,9 +335,10 @@ upgrade_v3_test() ->
?assertEqual(undefined, uuid(NewHeader)),
?assertEqual(undefined, epochs(NewHeader)),
- ?assertThrow({database_disk_version_error, _},
- upgrade_disk_version(NewHeader)).
-
+ ?assertThrow(
+ {database_disk_version_error, _},
+ upgrade_disk_version(NewHeader)
+ ).
upgrade_v5_test() ->
Vsn5Header = mk_header(5),
@@ -342,7 +350,6 @@ upgrade_v5_test() ->
% Security ptr isn't changed for v5 headers
?assertEqual(bang, security_ptr(NewHeader)).
-
upgrade_uuid_test() ->
Vsn5Header = mk_header(5),
@@ -358,7 +365,6 @@ upgrade_uuid_test() ->
ResetHeader = from(NewNewHeader),
?assertEqual(uuid(NewHeader), uuid(ResetHeader)).
-
upgrade_epochs_test() ->
Vsn5Header = mk_header(5),
@@ -391,15 +397,12 @@ upgrade_epochs_test() ->
ResetHeader = from(NewNewHeader),
?assertEqual(OwnedEpochs, epochs(ResetHeader)).
-
get_uuid_from_old_header_test() ->
Vsn5Header = mk_header(5),
?assertEqual(undefined, uuid(Vsn5Header)).
-
get_epochs_from_old_header_test() ->
Vsn5Header = mk_header(5),
?assertEqual(undefined, epochs(Vsn5Header)).
-
-endif.
diff --git a/src/couch/src/couch_db_plugin.erl b/src/couch/src/couch_db_plugin.erl
index c3684c6e3..c84edc1b7 100644
--- a/src/couch/src/couch_db_plugin.erl
+++ b/src/couch/src/couch_db_plugin.erl
@@ -87,10 +87,10 @@ do_apply(Func, Args, Opts) ->
maybe_handle(Func, Args, Default) ->
Handle = couch_epi:get_handle(?SERVICE_ID),
case couch_epi:decide(Handle, ?SERVICE_ID, Func, Args, []) of
- no_decision when is_function(Default) ->
- apply(Default, Args);
- no_decision ->
- Default;
- {decided, Result} ->
- Result
+ no_decision when is_function(Default) ->
+ apply(Default, Args);
+ no_decision ->
+ Default;
+ {decided, Result} ->
+ Result
end.
diff --git a/src/couch/src/couch_db_split.erl b/src/couch/src/couch_db_split.erl
index 1aa86fb37..d219e3731 100644
--- a/src/couch/src/couch_db_split.erl
+++ b/src/couch/src/couch_db_split.erl
@@ -12,20 +12,16 @@
-module(couch_db_split).
-
-export([
split/3,
copy_local_docs/3,
cleanup_target/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-
-define(DEFAULT_BUFFER_SIZE, 16777216).
-
-record(state, {
source_db,
source_uuid,
@@ -51,11 +47,11 @@
atts = []
}).
-
% Public API
split(Source, #{} = Targets, PickFun) when
- map_size(Targets) >= 2, is_function(PickFun, 3) ->
+ map_size(Targets) >= 2, is_function(PickFun, 3)
+->
case couch_db:open_int(Source, [?ADMIN_CTX]) of
{ok, SourceDb} ->
Engine = get_engine(SourceDb),
@@ -74,16 +70,19 @@ split(Source, #{} = Targets, PickFun) when
{error, missing_source}
end.
-
copy_local_docs(Source, #{} = Targets0, PickFun) when
- is_binary(Source), is_function(PickFun, 3) ->
+ is_binary(Source), is_function(PickFun, 3)
+->
case couch_db:open_int(Source, [?ADMIN_CTX]) of
{ok, SourceDb} ->
try
- Targets = maps:map(fun(_, DbName) ->
- {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- #target{db = Db, uuid = couch_db:get_uuid(Db)}
- end, Targets0),
+ Targets = maps:map(
+ fun(_, DbName) ->
+ {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
+ #target{db = Db, uuid = couch_db:get_uuid(Db)}
+ end,
+ Targets0
+ ),
SourceName = couch_db:name(SourceDb),
try
State = #state{
@@ -96,10 +95,13 @@ copy_local_docs(Source, #{} = Targets0, PickFun) when
copy_local_docs(State),
ok
after
- maps:map(fun(_, #target{db = Db} = T) ->
- couch_db:close(Db),
- T#target{db = undefined}
- end, Targets)
+ maps:map(
+ fun(_, #target{db = Db} = T) ->
+ couch_db:close(Db),
+ T#target{db = undefined}
+ end,
+ Targets
+ )
end
after
couch_db:close(SourceDb)
@@ -108,7 +110,6 @@ copy_local_docs(Source, #{} = Targets0, PickFun) when
{error, missing_source}
end.
-
cleanup_target(Source, Target) when is_binary(Source), is_binary(Target) ->
case couch_db:open_int(Source, [?ADMIN_CTX]) of
{ok, SourceDb} ->
@@ -121,35 +122,40 @@ cleanup_target(Source, Target) when is_binary(Source), is_binary(Target) ->
{error, missing_source}
end.
-
% Private Functions
split(SourceDb, Partitioned, Engine, Targets0, PickFun, {M, F, A} = HashFun) ->
- Targets = maps:fold(fun(Key, DbName, Map) ->
- case couch_db:validate_dbname(DbName) of
- ok ->
- ok;
- {error, E} ->
- throw({target_create_error, DbName, E, Map})
+ Targets = maps:fold(
+ fun(Key, DbName, Map) ->
+ case couch_db:validate_dbname(DbName) of
+ ok ->
+ ok;
+ {error, E} ->
+ throw({target_create_error, DbName, E, Map})
+ end,
+ case couch_server:lock(DbName, <<"shard splitting">>) of
+ ok ->
+ ok;
+ {error, Err} ->
+ throw({target_create_error, DbName, Err, Map})
+ end,
+ {ok, Filepath} = couch_server:get_engine_path(DbName, Engine),
+ Opts =
+ [create, ?ADMIN_CTX] ++
+ case Partitioned of
+ true -> [{props, [{partitioned, true}, {hash, [M, F, A]}]}];
+ false -> []
+ end,
+ case couch_db:start_link(Engine, DbName, Filepath, Opts) of
+ {ok, Db} ->
+ Map#{Key => #target{db = Db}};
+ {error, Error} ->
+ throw({target_create_error, DbName, Error, Map})
+ end
end,
- case couch_server:lock(DbName, <<"shard splitting">>) of
- ok ->
- ok;
- {error, Err} ->
- throw({target_create_error, DbName, Err, Map})
- end,
- {ok, Filepath} = couch_server:get_engine_path(DbName, Engine),
- Opts = [create, ?ADMIN_CTX] ++ case Partitioned of
- true -> [{props, [{partitioned, true}, {hash, [M, F, A]}]}];
- false -> []
- end,
- case couch_db:start_link(Engine, DbName, Filepath, Opts) of
- {ok, Db} ->
- Map#{Key => #target{db = Db}};
- {error, Error} ->
- throw({target_create_error, DbName, Error, Map})
- end
- end, #{}, Targets0),
+ #{},
+ Targets0
+ ),
Seq = couch_db:get_update_seq(SourceDb),
State1 = #state{
source_db = SourceDb,
@@ -166,24 +172,27 @@ split(SourceDb, Partitioned, Engine, Targets0, PickFun, {M, F, A} = HashFun) ->
stop_targets(State6#state.targets),
{ok, Seq}.
-
cleanup_targets(#{} = Targets, Engine) ->
- maps:map(fun(_, #target{db = Db} = T) ->
- ok = stop_target_db(Db),
- DbName = couch_db:name(Db),
- delete_target(DbName, Engine),
- couch_server:unlock(DbName),
- T
- end, Targets).
-
+ maps:map(
+ fun(_, #target{db = Db} = T) ->
+ ok = stop_target_db(Db),
+ DbName = couch_db:name(Db),
+ delete_target(DbName, Engine),
+ couch_server:unlock(DbName),
+ T
+ end,
+ Targets
+ ).
stop_targets(#{} = Targets) ->
- maps:map(fun(_, #target{db = Db} = T) ->
- {ok, Db1} = couch_db_engine:commit_data(Db),
- ok = stop_target_db(Db1),
- T
- end, Targets).
-
+ maps:map(
+ fun(_, #target{db = Db} = T) ->
+ {ok, Db1} = couch_db_engine:commit_data(Db),
+ ok = stop_target_db(Db1),
+ T
+ end,
+ Targets
+ ).
stop_target_db(Db) ->
couch_db:close(Db),
@@ -193,84 +202,91 @@ stop_target_db(Db) ->
couch_server:unlock(couch_db:name(Db)),
ok.
-
delete_target(DbName, Engine) ->
RootDir = config:get("couchdb", "database_dir", "."),
{ok, Filepath} = couch_server:get_engine_path(DbName, Engine),
DelOpt = [{context, compaction}, sync],
couch_db_engine:delete(Engine, RootDir, Filepath, DelOpt).
-
pick_target(DocId, #state{} = State, #{} = Targets) ->
#state{pickfun = PickFun, hashfun = HashFun} = State,
Key = PickFun(DocId, maps:keys(Targets), HashFun),
{Key, maps:get(Key, Targets)}.
-
set_targets_update_seq(#state{targets = Targets} = State) ->
Seq = couch_db:get_update_seq(State#state.source_db),
- Targets1 = maps:map(fun(_, #target{db = Db} = Target) ->
- {ok, Db1} = couch_db_engine:set_update_seq(Db, Seq),
- Target#target{db = Db1}
- end, Targets),
+ Targets1 = maps:map(
+ fun(_, #target{db = Db} = Target) ->
+ {ok, Db1} = couch_db_engine:set_update_seq(Db, Seq),
+ Target#target{db = Db1}
+ end,
+ Targets
+ ),
State#state{targets = Targets1}.
-
copy_checkpoints(#state{} = State) ->
#state{source_db = Db, source_uuid = SrcUUID, targets = Targets} = State,
FoldFun = fun(#doc{id = Id} = Doc, Acc) ->
- UpdatedAcc = case Id of
- <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> ->
- % Transform mem3 internal replicator checkpoints to avoid
- % rewinding the changes feed when it sees the new shards
- maps:map(fun(_, #target{uuid = TgtUUID, buffer = Docs} = T) ->
- Doc1 = update_checkpoint_doc(SrcUUID, TgtUUID, Doc),
- T#target{buffer = [Doc1 | Docs]}
- end, Acc);
- <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
- % Copy purge checkpoints to all shards
- maps:map(fun(_, #target{buffer = Docs} = T) ->
- T#target{buffer = [Doc | Docs]}
- end, Acc);
- <<?LOCAL_DOC_PREFIX, _/binary>> ->
- % Skip copying these that will be done during
- % local docs top off right before the shards are switched
- Acc
- end,
+ UpdatedAcc =
+ case Id of
+ <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> ->
+ % Transform mem3 internal replicator checkpoints to avoid
+ % rewinding the changes feed when it sees the new shards
+ maps:map(
+ fun(_, #target{uuid = TgtUUID, buffer = Docs} = T) ->
+ Doc1 = update_checkpoint_doc(SrcUUID, TgtUUID, Doc),
+ T#target{buffer = [Doc1 | Docs]}
+ end,
+ Acc
+ );
+ <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
+ % Copy purge checkpoints to all shards
+ maps:map(
+ fun(_, #target{buffer = Docs} = T) ->
+ T#target{buffer = [Doc | Docs]}
+ end,
+ Acc
+ );
+ <<?LOCAL_DOC_PREFIX, _/binary>> ->
+ % Skip copying these that will be done during
+ % local docs top off right before the shards are switched
+ Acc
+ end,
{ok, UpdatedAcc}
end,
{ok, Targets1} = couch_db_engine:fold_local_docs(Db, FoldFun, Targets, []),
- Targets2 = maps:map(fun(_, #target{db = TDb, buffer = Docs} = T) ->
- case Docs of
- [] ->
- T;
- [_ | _] ->
- Docs1 = lists:reverse(Docs),
- {ok, TDb1} = couch_db_engine:write_doc_infos(TDb, [], Docs1),
- {ok, TDb2} = couch_db_engine:commit_data(TDb1),
- T#target{db = TDb2, buffer = []}
- end
- end, Targets1),
+ Targets2 = maps:map(
+ fun(_, #target{db = TDb, buffer = Docs} = T) ->
+ case Docs of
+ [] ->
+ T;
+ [_ | _] ->
+ Docs1 = lists:reverse(Docs),
+ {ok, TDb1} = couch_db_engine:write_doc_infos(TDb, [], Docs1),
+ {ok, TDb2} = couch_db_engine:commit_data(TDb1),
+ T#target{db = TDb2, buffer = []}
+ end
+ end,
+ Targets1
+ ),
State#state{targets = Targets2}.
-
update_checkpoint_doc(Old, New, #doc{body = {Props}} = Doc) ->
- NewProps = case couch_util:get_value(<<"target_uuid">>, Props) of
- Old ->
- replace_kv(Props, {<<"target_uuid">>, Old, New});
- Other when is_binary(Other) ->
- replace_kv(Props, {<<"source_uuid">>, Old, New})
- end,
+ NewProps =
+ case couch_util:get_value(<<"target_uuid">>, Props) of
+ Old ->
+ replace_kv(Props, {<<"target_uuid">>, Old, New});
+ Other when is_binary(Other) ->
+ replace_kv(Props, {<<"source_uuid">>, Old, New})
+ end,
NewId = update_checkpoint_id(Doc#doc.id, Old, New),
Doc#doc{id = NewId, body = {NewProps}}.
-
update_checkpoint_id(Id, Old, New) ->
OldHash = mem3_rep:local_id_hash(Old),
NewHash = mem3_rep:local_id_hash(New),
binary:replace(Id, OldHash, NewHash).
-
replace_kv({[]}, _) ->
{[]};
replace_kv({KVs}, Replacement) ->
@@ -286,30 +302,33 @@ replace_kv({K, V}, Replacement) ->
replace_kv(V, _) ->
V.
-
copy_meta(#state{source_db = SourceDb, targets = Targets} = State) ->
RevsLimit = couch_db:get_revs_limit(SourceDb),
{SecProps} = couch_db:get_security(SourceDb),
PurgeLimit = couch_db:get_purge_infos_limit(SourceDb),
- Targets1 = maps:map(fun(_, #target{db = Db} = T) ->
- {ok, Db1} = couch_db_engine:set_revs_limit(Db, RevsLimit),
- {ok, Db2} = couch_db_engine:set_security(Db1, SecProps),
- {ok, Db3} = couch_db_engine:set_purge_infos_limit(Db2, PurgeLimit),
- T#target{db = Db3}
- end, Targets),
+ Targets1 = maps:map(
+ fun(_, #target{db = Db} = T) ->
+ {ok, Db1} = couch_db_engine:set_revs_limit(Db, RevsLimit),
+ {ok, Db2} = couch_db_engine:set_security(Db1, SecProps),
+ {ok, Db3} = couch_db_engine:set_purge_infos_limit(Db2, PurgeLimit),
+ T#target{db = Db3}
+ end,
+ Targets
+ ),
State#state{targets = Targets1}.
-
copy_purge_info(#state{source_db = Db} = State) ->
Seq = max(0, couch_db:get_oldest_purge_seq(Db) - 1),
{ok, NewState} = couch_db:fold_purge_infos(Db, Seq, fun purge_cb/2, State),
- Targets = maps:map(fun(_, #target{} = T) ->
- commit_purge_infos(T)
- end, NewState#state.targets),
+ Targets = maps:map(
+ fun(_, #target{} = T) ->
+ commit_purge_infos(T)
+ end,
+ NewState#state.targets
+ ),
NewState#state{targets = Targets}.
-
-acc_and_flush(Item, #target{}= Target, MaxBuffer, FlushCb) ->
+acc_and_flush(Item, #target{} = Target, MaxBuffer, FlushCb) ->
#target{buffer = Buffer, buffer_size = BSize} = Target,
BSize1 = BSize + ?term_size(Item),
Target1 = Target#target{buffer = [Item | Buffer], buffer_size = BSize1},
@@ -318,37 +337,34 @@ acc_and_flush(Item, #target{}= Target, MaxBuffer, FlushCb) ->
false -> Target1
end.
-
purge_cb({_PSeq, _UUID, Id, _Revs} = PI, #state{targets = Targets} = State) ->
{Key, Target} = pick_target(Id, State, Targets),
MaxBuffer = State#state.max_buffer_size,
Target1 = acc_and_flush(PI, Target, MaxBuffer, fun commit_purge_infos/1),
{ok, State#state{targets = Targets#{Key => Target1}}}.
-
commit_purge_infos(#target{buffer = [], db = Db} = Target) ->
Target#target{db = Db};
-
commit_purge_infos(#target{buffer = PIs0, db = Db} = Target) ->
PIs = lists:reverse(PIs0),
{ok, Db1} = couch_db_engine:copy_purge_infos(Db, PIs),
{ok, Db2} = couch_db_engine:commit_data(Db1),
Target#target{buffer = [], buffer_size = 0, db = Db2}.
-
copy_docs(#state{source_db = Db} = State) ->
{ok, NewState} = couch_db:fold_changes(Db, 0, fun changes_cb/2, State),
- CommitTargets = maps:map(fun(_, #target{} = T) ->
- commit_docs(T)
- end, NewState#state.targets),
+ CommitTargets = maps:map(
+ fun(_, #target{} = T) ->
+ commit_docs(T)
+ end,
+ NewState#state.targets
+ ),
NewState#state{targets = CommitTargets}.
-
% Backwards compatibility clause. Seq trees used to hold #doc_infos at one time
changes_cb(#doc_info{id = Id}, #state{source_db = Db} = State) ->
[FDI = #full_doc_info{}] = couch_db_engine:open_docs(Db, [Id]),
changes_cb(FDI, State);
-
changes_cb(#full_doc_info{id = Id} = FDI, #state{} = State) ->
#state{source_db = SourceDb, targets = Targets} = State,
{Key, Target} = pick_target(Id, State, Targets),
@@ -357,17 +373,14 @@ changes_cb(#full_doc_info{id = Id} = FDI, #state{} = State) ->
Target1 = acc_and_flush(FDI1, Target, MaxBuffer, fun commit_docs/1),
{ok, State#state{targets = Targets#{Key => Target1}}}.
-
commit_docs(#target{buffer = [], db = Db} = Target) ->
Target#target{db = Db};
-
commit_docs(#target{buffer = FDIs, db = Db} = Target) ->
Pairs = [{not_found, FDI} || FDI <- lists:reverse(FDIs)],
{ok, Db1} = couch_db_engine:write_doc_infos(Db, Pairs, []),
{ok, Db2} = couch_db_engine:commit_data(Db1),
Target#target{buffer = [], buffer_size = 0, db = Db2}.
-
process_fdi(FDI, SourceDb, TargetDb) ->
#full_doc_info{id = Id, rev_tree = RTree} = FDI,
Acc = #racc{id = Id, source_db = SourceDb, target_db = TargetDb},
@@ -378,10 +391,8 @@ process_fdi(FDI, SourceDb, TargetDb) ->
sizes = #size_info{active = Active, external = External}
}.
-
revtree_cb(_Rev, _Leaf, branch, Acc) ->
{[], Acc};
-
revtree_cb({Pos, RevId}, Leaf, leaf, Acc) ->
#racc{id = Id, source_db = SourceDb, target_db = TargetDb} = Acc,
#leaf{deleted = Deleted, ptr = Ptr, sizes = LeafSizes} = Leaf,
@@ -393,16 +404,20 @@ revtree_cb({Pos, RevId}, Leaf, leaf, Acc) ->
},
Doc1 = couch_db_engine:read_doc_body(SourceDb, Doc0),
#doc{body = Body, atts = AttInfos0} = Doc1,
- External = case LeafSizes#size_info.external of
- 0 when is_binary(Body) ->
- couch_compress:uncompressed_size(Body);
- 0 ->
- couch_ejson_size:encoded_size(Body);
- N -> N
- end,
- AttInfos = if not is_binary(AttInfos0) -> AttInfos0; true ->
- couch_compress:decompress(AttInfos0)
- end,
+ External =
+ case LeafSizes#size_info.external of
+ 0 when is_binary(Body) ->
+ couch_compress:uncompressed_size(Body);
+ 0 ->
+ couch_ejson_size:encoded_size(Body);
+ N ->
+ N
+ end,
+ AttInfos =
+ if
+ not is_binary(AttInfos0) -> AttInfos0;
+ true -> couch_compress:decompress(AttInfos0)
+ end,
Atts = [process_attachment(Att, SourceDb, TargetDb) || Att <- AttInfos],
Doc2 = Doc1#doc{atts = Atts},
Doc3 = couch_db_engine:serialize_doc(TargetDb, Doc2),
@@ -417,42 +432,45 @@ revtree_cb({Pos, RevId}, Leaf, leaf, Acc) ->
},
{NewLeaf, add_sizes(Active, External, AttSizes, Acc)}.
-
% This is copied almost verbatim from the compactor
-process_attachment({Name, Type, BinSp, AttLen, RevPos, ExpectedMd5}, SourceDb,
- TargetDb) ->
+process_attachment(
+ {Name, Type, BinSp, AttLen, RevPos, ExpectedMd5},
+ SourceDb,
+ TargetDb
+) ->
% 010 upgrade code
{ok, SrcStream} = couch_db_engine:open_read_stream(SourceDb, BinSp),
{ok, DstStream} = couch_db_engine:open_write_stream(TargetDb, []),
ok = couch_stream:copy(SrcStream, DstStream),
{NewStream, AttLen, AttLen, ActualMd5, _IdentityMd5} =
- couch_stream:close(DstStream),
+ couch_stream:close(DstStream),
{ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
couch_util:check_md5(ExpectedMd5, ActualMd5),
{Name, Type, NewBinSp, AttLen, AttLen, RevPos, ExpectedMd5, identity};
-
-process_attachment({Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5,
- Enc1}, SourceDb, TargetDb) ->
+process_attachment(
+ {Name, Type, BinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc1}, SourceDb, TargetDb
+) ->
{ok, SrcStream} = couch_db_engine:open_read_stream(SourceDb, BinSp),
{ok, DstStream} = couch_db_engine:open_write_stream(TargetDb, []),
ok = couch_stream:copy(SrcStream, DstStream),
{NewStream, AttLen, _, ActualMd5, _IdentityMd5} =
- couch_stream:close(DstStream),
+ couch_stream:close(DstStream),
{ok, NewBinSp} = couch_stream:to_disk_term(NewStream),
couch_util:check_md5(ExpectedMd5, ActualMd5),
- Enc = case Enc1 of
- true -> gzip; % 0110 upgrade code
- false -> identity; % 0110 upgrade code
- _ -> Enc1
- end,
+ Enc =
+ case Enc1 of
+ % 0110 upgrade code
+ true -> gzip;
+ % 0110 upgrade code
+ false -> identity;
+ _ -> Enc1
+ end,
{Name, Type, NewBinSp, AttLen, DiskLen, RevPos, ExpectedMd5, Enc}.
-
get_engine(Db) ->
{ok, DbInfoProps} = couch_db:get_db_info(Db),
proplists:get_value(engine, DbInfoProps).
-
add_sizes(Active, External, Atts, #racc{} = Acc) ->
#racc{active = ActiveAcc, external = ExternalAcc, atts = AttsAcc} = Acc,
NewActiveAcc = ActiveAcc + Active,
@@ -464,41 +482,42 @@ add_sizes(Active, External, Atts, #racc{} = Acc) ->
atts = NewAttsAcc
}.
-
total_sizes(#racc{active = Active, external = External, atts = Atts}) ->
TotalAtts = lists:foldl(fun({_, S}, A) -> S + A end, 0, Atts),
{Active + TotalAtts, External + TotalAtts}.
-
get_max_buffer_size() ->
config:get_integer("reshard", "split_buffer_size", ?DEFAULT_BUFFER_SIZE).
-
copy_local_docs(#state{source_db = Db, targets = Targets} = State) ->
FoldFun = fun(#doc{id = Id} = Doc, Acc) ->
- UpdatedAcc = case Id of
- <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> ->
- Acc;
- <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
- Acc;
- <<?LOCAL_DOC_PREFIX, _/binary>> ->
- % Users' and replicator app's checkpoints go to their
- % respective shards based on the general hashing algorithm
- {Key, Target} = pick_target(Id, State, Acc),
- #target{buffer = Docs} = Target,
- Acc#{Key => Target#target{buffer = [Doc | Docs]}}
- end,
+ UpdatedAcc =
+ case Id of
+ <<?LOCAL_DOC_PREFIX, "shard-sync-", _/binary>> ->
+ Acc;
+ <<?LOCAL_DOC_PREFIX, "purge-", _/binary>> ->
+ Acc;
+ <<?LOCAL_DOC_PREFIX, _/binary>> ->
+ % Users' and replicator app's checkpoints go to their
+ % respective shards based on the general hashing algorithm
+ {Key, Target} = pick_target(Id, State, Acc),
+ #target{buffer = Docs} = Target,
+ Acc#{Key => Target#target{buffer = [Doc | Docs]}}
+ end,
{ok, UpdatedAcc}
end,
{ok, Targets1} = couch_db:fold_local_docs(Db, FoldFun, Targets, []),
- Targets2 = maps:map(fun(_, #target{db = TDb, buffer = Docs} = T) ->
- case Docs of
- [] ->
- T;
- [_ | _] ->
- Docs1 = lists:reverse(Docs),
- {ok, _} = couch_db:update_docs(TDb, Docs1),
- T#target{buffer = []}
- end
- end, Targets1),
+ Targets2 = maps:map(
+ fun(_, #target{db = TDb, buffer = Docs} = T) ->
+ case Docs of
+ [] ->
+ T;
+ [_ | _] ->
+ Docs1 = lists:reverse(Docs),
+ {ok, _} = couch_db:update_docs(TDb, Docs1),
+ T#target{buffer = []}
+ end
+ end,
+ Targets1
+ ),
State#state{targets = Targets2}.
diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl
index 535acfad6..710b70510 100644
--- a/src/couch/src/couch_db_updater.erl
+++ b/src/couch/src/couch_db_updater.erl
@@ -15,14 +15,14 @@
-vsn(1).
-export([add_sizes/3, upgrade_sizes/1]).
--export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, code_change/3, handle_info/2]).
-include_lib("couch/include/couch_db.hrl").
-include("couch_db_int.hrl").
-define(IDLE_LIMIT_DEFAULT, 61000).
--define(DEFAULT_MAX_PARTITION_SIZE, 16#280000000). % 10 GiB
-
+% 10 GiB
+-define(DEFAULT_MAX_PARTITION_SIZE, 16#280000000).
-record(merge_acc, {
revs_limit,
@@ -33,7 +33,6 @@
full_partitions = []
}).
-
init({Engine, DbName, FilePath, Options0}) ->
erlang:put(io_priority, {db_update, DbName}),
update_idle_limit_from_config(),
@@ -59,7 +58,6 @@ init({Engine, DbName, FilePath, Options0}) ->
proc_lib:init_ack(InitError)
end.
-
terminate(Reason, Db) ->
couch_util:shutdown_sync(Db#db.compactor_pid),
couch_db_engine:terminate(Reason, Db),
@@ -81,7 +79,6 @@ handle_call(cancel_compact, _From, #db{compactor_pid = Pid} = Db) ->
Db2 = Db#db{compactor_pid = nil},
ok = couch_server:db_updated(Db2),
{reply, ok, Db2, idle_limit()};
-
handle_call({set_security, NewSec}, _From, #db{} = Db) ->
{ok, NewDb} = couch_db_engine:set_security(Db, NewSec),
NewSecDb = commit_data(NewDb#db{
@@ -89,36 +86,38 @@ handle_call({set_security, NewSec}, _From, #db{} = Db) ->
}),
ok = couch_server:db_updated(NewSecDb),
{reply, ok, NewSecDb, idle_limit()};
-
handle_call({set_revs_limit, Limit}, _From, Db) ->
{ok, Db2} = couch_db_engine:set_revs_limit(Db, Limit),
Db3 = commit_data(Db2),
ok = couch_server:db_updated(Db3),
{reply, ok, Db3, idle_limit()};
-
handle_call({set_purge_infos_limit, Limit}, _From, Db) ->
{ok, Db2} = couch_db_engine:set_purge_infos_limit(Db, Limit),
ok = couch_server:db_updated(Db2),
{reply, ok, Db2, idle_limit()};
-
handle_call({purge_docs, [], _}, _From, Db) ->
{reply, {ok, []}, Db, idle_limit()};
-
handle_call({purge_docs, PurgeReqs0, Options}, _From, Db) ->
% Filter out any previously applied updates during
% internal replication
IsRepl = lists:member(replicated_changes, Options),
- PurgeReqs = if not IsRepl -> PurgeReqs0; true ->
- UUIDs = [UUID || {UUID, _Id, _Revs} <- PurgeReqs0],
- PurgeInfos = couch_db_engine:load_purge_infos(Db, UUIDs),
- lists:flatmap(fun
- ({not_found, PReq}) -> [PReq];
- ({{_, _, _, _}, _}) -> []
- end, lists:zip(PurgeInfos, PurgeReqs0))
- end,
+ PurgeReqs =
+ if
+ not IsRepl ->
+ PurgeReqs0;
+ true ->
+ UUIDs = [UUID || {UUID, _Id, _Revs} <- PurgeReqs0],
+ PurgeInfos = couch_db_engine:load_purge_infos(Db, UUIDs),
+ lists:flatmap(
+ fun
+ ({not_found, PReq}) -> [PReq];
+ ({{_, _, _, _}, _}) -> []
+ end,
+ lists:zip(PurgeInfos, PurgeReqs0)
+ )
+ end,
{ok, NewDb, Replies} = purge_docs(Db, PurgeReqs),
{reply, {ok, Replies}, NewDb, idle_limit()};
-
handle_call(Msg, From, Db) ->
case couch_db_engine:handle_db_updater_call(Msg, From, Db) of
{reply, Resp, NewDb} ->
@@ -127,7 +126,6 @@ handle_call(Msg, From, Db) ->
Else
end.
-
handle_cast({load_validation_funs, ValidationFuns}, Db) ->
Db2 = Db#db{validate_doc_funs = ValidationFuns},
ok = couch_server:db_updated(Db2),
@@ -152,65 +150,76 @@ handle_cast(start_compact, Db) ->
handle_cast({compact_done, _Engine, CompactInfo}, #db{} = OldDb) ->
{ok, NewDb} = couch_db_engine:finish_compaction(OldDb, CompactInfo),
{noreply, NewDb};
-
handle_cast(wakeup, Db) ->
{noreply, Db, idle_limit()};
-
handle_cast(Msg, #db{name = Name} = Db) ->
- couch_log:error("Database `~s` updater received unexpected cast: ~p",
- [Name, Msg]),
+ couch_log:error(
+ "Database `~s` updater received unexpected cast: ~p",
+ [Name, Msg]
+ ),
{stop, Msg, Db}.
-
-handle_info({update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts},
- Db) ->
+handle_info(
+ {update_docs, Client, GroupedDocs, NonRepDocs, MergeConflicts},
+ Db
+) ->
GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs),
- if NonRepDocs == [] ->
- {GroupedDocs3, Clients} = collect_updates(GroupedDocs2,
- [Client], MergeConflicts);
- true ->
- GroupedDocs3 = GroupedDocs2,
- Clients = [Client]
+ if
+ NonRepDocs == [] ->
+ {GroupedDocs3, Clients} = collect_updates(
+ GroupedDocs2,
+ [Client],
+ MergeConflicts
+ );
+ true ->
+ GroupedDocs3 = GroupedDocs2,
+ Clients = [Client]
end,
NonRepDocs2 = [{Client, NRDoc} || NRDoc <- NonRepDocs],
try update_docs_int(Db, GroupedDocs3, NonRepDocs2, MergeConflicts) of
- {ok, Db2, UpdatedDDocIds} ->
- ok = couch_server:db_updated(Db2),
- case {couch_db:get_update_seq(Db), couch_db:get_update_seq(Db2)} of
- {Seq, Seq} -> ok;
- _ -> couch_event:notify(Db2#db.name, updated)
- end,
- if NonRepDocs2 /= [] ->
- couch_event:notify(Db2#db.name, local_updated);
- true -> ok
- end,
- [catch(ClientPid ! {done, self()}) || ClientPid <- Clients],
- Db3 = case length(UpdatedDDocIds) > 0 of
- true ->
- % Ken and ddoc_cache are the only things that
- % use the unspecified ddoc_updated message. We
- % should update them to use the new message per
- % ddoc.
- lists:foreach(fun(DDocId) ->
- couch_event:notify(Db2#db.name, {ddoc_updated, DDocId})
- end, UpdatedDDocIds),
- couch_event:notify(Db2#db.name, ddoc_updated),
- ddoc_cache:refresh(Db2#db.name, UpdatedDDocIds),
- refresh_validate_doc_funs(Db2);
- false ->
- Db2
- end,
- {noreply, Db3, hibernate_if_no_idle_limit()}
+ {ok, Db2, UpdatedDDocIds} ->
+ ok = couch_server:db_updated(Db2),
+ case {couch_db:get_update_seq(Db), couch_db:get_update_seq(Db2)} of
+ {Seq, Seq} -> ok;
+ _ -> couch_event:notify(Db2#db.name, updated)
+ end,
+ if
+ NonRepDocs2 /= [] ->
+ couch_event:notify(Db2#db.name, local_updated);
+ true ->
+ ok
+ end,
+ [catch (ClientPid ! {done, self()}) || ClientPid <- Clients],
+ Db3 =
+ case length(UpdatedDDocIds) > 0 of
+ true ->
+ % Ken and ddoc_cache are the only things that
+ % use the unspecified ddoc_updated message. We
+ % should update them to use the new message per
+ % ddoc.
+ lists:foreach(
+ fun(DDocId) ->
+ couch_event:notify(Db2#db.name, {ddoc_updated, DDocId})
+ end,
+ UpdatedDDocIds
+ ),
+ couch_event:notify(Db2#db.name, ddoc_updated),
+ ddoc_cache:refresh(Db2#db.name, UpdatedDDocIds),
+ refresh_validate_doc_funs(Db2);
+ false ->
+ Db2
+ end,
+ {noreply, Db3, hibernate_if_no_idle_limit()}
catch
- throw: retry ->
- [catch(ClientPid ! {retry, self()}) || ClientPid <- Clients],
+ throw:retry ->
+ [catch (ClientPid ! {retry, self()}) || ClientPid <- Clients],
{noreply, Db, hibernate_if_no_idle_limit()}
end;
handle_info({'EXIT', _Pid, normal}, Db) ->
{noreply, Db, idle_limit()};
handle_info({'EXIT', _Pid, Reason}, Db) ->
{stop, Reason, Db};
-handle_info(timeout, #db{name=DbName} = Db) ->
+handle_info(timeout, #db{name = DbName} = Db) ->
IdleLimitMSec = update_idle_limit_from_config(),
case couch_db:is_idle(Db) of
true ->
@@ -230,7 +239,6 @@ handle_info(timeout, #db{name=DbName} = Db) ->
% force a thorough garbage collection.
gen_server:cast(self(), wakeup),
{noreply, Db, hibernate};
-
handle_info(Msg, Db) ->
case couch_db_engine:handle_db_updater_info(Msg, Db) of
{noreply, NewDb} ->
@@ -239,7 +247,6 @@ handle_info(Msg, Db) ->
Else
end.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
@@ -248,25 +255,28 @@ sort_and_tag_grouped_docs(Client, GroupedDocs) ->
% The merge_updates function will fail and the database can end up with
% duplicate documents if the incoming groups are not sorted, so as a sanity
% check we sort them again here. See COUCHDB-2735.
- Cmp = fun([#doc{id=A}|_], [#doc{id=B}|_]) -> A < B end,
- lists:map(fun(DocGroup) ->
- [{Client, maybe_tag_doc(D)} || D <- DocGroup]
- end, lists:sort(Cmp, GroupedDocs)).
+ Cmp = fun([#doc{id = A} | _], [#doc{id = B} | _]) -> A < B end,
+ lists:map(
+ fun(DocGroup) ->
+ [{Client, maybe_tag_doc(D)} || D <- DocGroup]
+ end,
+ lists:sort(Cmp, GroupedDocs)
+ ).
-maybe_tag_doc(#doc{id=Id, revs={Pos,[_Rev|PrevRevs]}, meta=Meta0}=Doc) ->
+maybe_tag_doc(#doc{id = Id, revs = {Pos, [_Rev | PrevRevs]}, meta = Meta0} = Doc) ->
case lists:keymember(ref, 1, Meta0) of
true ->
Doc;
false ->
- Key = {Id, {Pos-1, PrevRevs}},
- Doc#doc{meta=[{ref, Key} | Meta0]}
+ Key = {Id, {Pos - 1, PrevRevs}},
+ Doc#doc{meta = [{ref, Key} | Meta0]}
end.
-merge_updates([[{_,#doc{id=X}}|_]=A|RestA], [[{_,#doc{id=X}}|_]=B|RestB]) ->
- [A++B | merge_updates(RestA, RestB)];
-merge_updates([[{_,#doc{id=X}}|_]|_]=A, [[{_,#doc{id=Y}}|_]|_]=B) when X < Y ->
+merge_updates([[{_, #doc{id = X}} | _] = A | RestA], [[{_, #doc{id = X}} | _] = B | RestB]) ->
+ [A ++ B | merge_updates(RestA, RestB)];
+merge_updates([[{_, #doc{id = X}} | _] | _] = A, [[{_, #doc{id = Y}} | _] | _] = B) when X < Y ->
[hd(A) | merge_updates(tl(A), B)];
-merge_updates([[{_,#doc{id=X}}|_]|_]=A, [[{_,#doc{id=Y}}|_]|_]=B) when X > Y ->
+merge_updates([[{_, #doc{id = X}} | _] | _] = A, [[{_, #doc{id = Y}} | _] | _] = B) when X > Y ->
[hd(B) | merge_updates(A, tl(B))];
merge_updates([], RestB) ->
RestB;
@@ -283,18 +293,24 @@ collect_updates(GroupedDocsAcc, ClientsAcc, MergeConflicts) ->
GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs),
GroupedDocsAcc2 =
merge_updates(GroupedDocsAcc, GroupedDocs2),
- collect_updates(GroupedDocsAcc2, [Client | ClientsAcc],
- MergeConflicts)
+ collect_updates(
+ GroupedDocsAcc2,
+ [Client | ClientsAcc],
+ MergeConflicts
+ )
after 0 ->
{GroupedDocsAcc, ClientsAcc}
end.
-
init_db(DbName, FilePath, EngineState, Options) ->
% convert start time tuple to microsecs and store as a binary string
{MegaSecs, Secs, MicroSecs} = os:timestamp(),
- StartTime = ?l2b(io_lib:format("~p",
- [(MegaSecs*1000000*1000000) + (Secs*1000000) + MicroSecs])),
+ StartTime = ?l2b(
+ io_lib:format(
+ "~p",
+ [(MegaSecs * 1000000 * 1000000) + (Secs * 1000000) + MicroSecs]
+ )
+ ),
BDU = couch_util:get_value(before_doc_update, Options, nil),
ADR = couch_util:get_value(after_doc_read, Options, nil),
@@ -319,31 +335,36 @@ init_db(DbName, FilePath, EngineState, Options) ->
options = lists:keystore(props, 1, NonCreateOpts, {props, DbProps})
}.
-
refresh_validate_doc_funs(#db{name = <<"shards/", _/binary>> = Name} = Db) ->
spawn(fabric, reset_validation_funs, [mem3:dbname(Name)]),
Db#db{validate_doc_funs = undefined};
refresh_validate_doc_funs(Db0) ->
- Db = Db0#db{user_ctx=?ADMIN_USER},
+ Db = Db0#db{user_ctx = ?ADMIN_USER},
{ok, DesignDocs} = couch_db:get_design_docs(Db),
ProcessDocFuns = lists:flatmap(
fun(DesignDocInfo) ->
{ok, DesignDoc} = couch_db:open_doc_int(
- Db, DesignDocInfo, [ejson_body]),
+ Db, DesignDocInfo, [ejson_body]
+ ),
case couch_doc:get_validate_doc_fun(DesignDoc) of
- nil -> [];
- Fun -> [Fun]
+ nil -> [];
+ Fun -> [Fun]
end
- end, DesignDocs),
- Db#db{validate_doc_funs=ProcessDocFuns}.
+ end,
+ DesignDocs
+ ),
+ Db#db{validate_doc_funs = ProcessDocFuns}.
% rev tree functions
flush_trees(_Db, [], AccFlushedTrees) ->
{ok, lists:reverse(AccFlushedTrees)};
-flush_trees(#db{} = Db,
- [InfoUnflushed | RestUnflushed], AccFlushed) ->
- #full_doc_info{update_seq=UpdateSeq, rev_tree=Unflushed} = InfoUnflushed,
+flush_trees(
+ #db{} = Db,
+ [InfoUnflushed | RestUnflushed],
+ AccFlushed
+) ->
+ #full_doc_info{update_seq = UpdateSeq, rev_tree = Unflushed} = InfoUnflushed,
{Flushed, FinalAcc} = couch_key_tree:mapfold(
fun(_Rev, Value, Type, SizesAcc) ->
case Value of
@@ -353,9 +374,9 @@ flush_trees(#db{} = Db,
check_doc_atts(Db, Doc),
ExternalSize = get_meta_body_size(Value#doc.meta),
{size_info, AttSizeInfo} =
- lists:keyfind(size_info, 1, Doc#doc.meta),
+ lists:keyfind(size_info, 1, Doc#doc.meta),
{ok, NewDoc, WrittenSize} =
- couch_db_engine:write_doc_body(Db, Doc),
+ couch_db_engine:write_doc_body(Db, Doc),
Leaf = #leaf{
deleted = Doc#doc.deleted,
ptr = NewDoc#doc.body,
@@ -372,7 +393,10 @@ flush_trees(#db{} = Db,
_ ->
{Value, SizesAcc}
end
- end, {0, 0, []}, Unflushed),
+ end,
+ {0, 0, []},
+ Unflushed
+ ),
{FinalAS, FinalES, FinalAtts} = FinalAcc,
TotalAttSize = lists:foldl(fun({_, S}, A) -> S + A end, 0, FinalAtts),
NewInfo = InfoUnflushed#full_doc_info{
@@ -384,30 +408,34 @@ flush_trees(#db{} = Db,
},
flush_trees(Db, RestUnflushed, [NewInfo | AccFlushed]).
-
check_doc_atts(Db, Doc) ->
{atts_stream, Stream} = lists:keyfind(atts_stream, 1, Doc#doc.meta),
% Make sure that the attachments were written to the currently
% active attachment stream. If compaction swaps during a write
% request we may have to rewrite our attachment bodies.
- if Stream == nil -> ok; true ->
- case couch_db:is_active_stream(Db, Stream) of
- true ->
- ok;
- false ->
- % Stream where the attachments were written to is
- % no longer the current attachment stream. This
- % can happen when a database is switched at
- % compaction time.
- couch_log:debug("Stream where the attachments were"
- " written has changed."
- " Possibly retrying.", []),
- throw(retry)
- end
+ if
+ Stream == nil ->
+ ok;
+ true ->
+ case couch_db:is_active_stream(Db, Stream) of
+ true ->
+ ok;
+ false ->
+ % Stream where the attachments were written to is
+ % no longer the current attachment stream. This
+ % can happen when a database is switched at
+ % compaction time.
+ couch_log:debug(
+ "Stream where the attachments were"
+ " written has changed."
+ " Possibly retrying.",
+ []
+ ),
+ throw(retry)
+ end
end.
-
-add_sizes(Type, #leaf{sizes=Sizes, atts=AttSizes}, Acc) ->
+add_sizes(Type, #leaf{sizes = Sizes, atts = AttSizes}, Acc) ->
% Maybe upgrade from disk_size only
#size_info{
active = ActiveSize,
@@ -415,24 +443,27 @@ add_sizes(Type, #leaf{sizes=Sizes, atts=AttSizes}, Acc) ->
} = upgrade_sizes(Sizes),
{ASAcc, ESAcc, AttsAcc} = Acc,
NewASAcc = ActiveSize + ASAcc,
- NewESAcc = ESAcc + if Type == leaf -> ExternalSize; true -> 0 end,
+ NewESAcc =
+ ESAcc +
+ if
+ Type == leaf -> ExternalSize;
+ true -> 0
+ end,
NewAttsAcc = lists:umerge(AttSizes, AttsAcc),
{NewASAcc, NewESAcc, NewAttsAcc}.
-
-upgrade_sizes(#size_info{}=SI) ->
+upgrade_sizes(#size_info{} = SI) ->
SI;
upgrade_sizes({D, E}) ->
- #size_info{active=D, external=E};
+ #size_info{active = D, external = E};
upgrade_sizes(S) when is_integer(S) ->
- #size_info{active=S, external=0}.
-
+ #size_info{active = S, external = 0}.
send_result(Client, Doc, NewResult) ->
% used to send a result to the client
- catch(Client ! {result, self(), {doc_tag(Doc), NewResult}}).
+ catch (Client ! {result, self(), {doc_tag(Doc), NewResult}}).
-doc_tag(#doc{meta=Meta}) ->
+doc_tag(#doc{meta = Meta}) ->
case lists:keyfind(ref, 1, Meta) of
{ref, Ref} -> Ref;
false -> throw(no_doc_tag);
@@ -452,17 +483,21 @@ merge_rev_trees([NewDocs | RestDocsList], [OldDocInfo | RestOldInfo], Acc) ->
% Track doc ids so we can debug large revision trees
erlang:put(last_id_merged, OldDocInfo#full_doc_info.id),
- NewDocInfo0 = lists:foldl(fun({Client, NewDoc}, OldInfoAcc) ->
- NewInfo = merge_rev_tree(OldInfoAcc, NewDoc, Client, MergeConflicts),
- case is_overflowed(NewInfo, OldInfoAcc, FullPartitions) of
- true when not MergeConflicts ->
- DocId = NewInfo#full_doc_info.id,
- send_result(Client, NewDoc, {partition_overflow, DocId}),
- OldInfoAcc;
- _ ->
- NewInfo
- end
- end, OldDocInfo, NewDocs),
+ NewDocInfo0 = lists:foldl(
+ fun({Client, NewDoc}, OldInfoAcc) ->
+ NewInfo = merge_rev_tree(OldInfoAcc, NewDoc, Client, MergeConflicts),
+ case is_overflowed(NewInfo, OldInfoAcc, FullPartitions) of
+ true when not MergeConflicts ->
+ DocId = NewInfo#full_doc_info.id,
+ send_result(Client, NewDoc, {partition_overflow, DocId}),
+ OldInfoAcc;
+ _ ->
+ NewInfo
+ end
+ end,
+ OldDocInfo,
+ NewDocs
+ ),
NewDocInfo1 = maybe_stem_full_doc_info(NewDocInfo0, Limit),
% When MergeConflicts is false, we updated #full_doc_info.deleted on every
% iteration of merge_rev_tree. However, merge_rev_tree does not update
@@ -470,39 +505,43 @@ merge_rev_trees([NewDocs | RestDocsList], [OldDocInfo | RestOldInfo], Acc) ->
% to know whether the doc is deleted between iterations. Since we still
% need to know if the doc is deleted after the merge happens, we have to
% set it here.
- NewDocInfo2 = case MergeConflicts of
- true ->
- NewDocInfo1#full_doc_info{
- deleted = couch_doc:is_deleted(NewDocInfo1)
- };
- false ->
- NewDocInfo1
- end,
- if NewDocInfo2 == OldDocInfo ->
- % nothing changed
- merge_rev_trees(RestDocsList, RestOldInfo, Acc);
- true ->
- % We have updated the document, give it a new update_seq. Its
- % important to note that the update_seq on OldDocInfo should
- % be identical to the value on NewDocInfo1.
- OldSeq = OldDocInfo#full_doc_info.update_seq,
- NewDocInfo3 = NewDocInfo2#full_doc_info{
- update_seq = Acc#merge_acc.cur_seq + 1
- },
- RemoveSeqs = case OldSeq of
- 0 -> Acc#merge_acc.rem_seqs;
- _ -> [OldSeq | Acc#merge_acc.rem_seqs]
+ NewDocInfo2 =
+ case MergeConflicts of
+ true ->
+ NewDocInfo1#full_doc_info{
+ deleted = couch_doc:is_deleted(NewDocInfo1)
+ };
+ false ->
+ NewDocInfo1
end,
- NewAcc = Acc#merge_acc{
- add_infos = [NewDocInfo3 | Acc#merge_acc.add_infos],
- rem_seqs = RemoveSeqs,
- cur_seq = Acc#merge_acc.cur_seq + 1
- },
- merge_rev_trees(RestDocsList, RestOldInfo, NewAcc)
+ if
+ NewDocInfo2 == OldDocInfo ->
+ % nothing changed
+ merge_rev_trees(RestDocsList, RestOldInfo, Acc);
+ true ->
+ % We have updated the document, give it a new update_seq. Its
+ % important to note that the update_seq on OldDocInfo should
+ % be identical to the value on NewDocInfo1.
+ OldSeq = OldDocInfo#full_doc_info.update_seq,
+ NewDocInfo3 = NewDocInfo2#full_doc_info{
+ update_seq = Acc#merge_acc.cur_seq + 1
+ },
+ RemoveSeqs =
+ case OldSeq of
+ 0 -> Acc#merge_acc.rem_seqs;
+ _ -> [OldSeq | Acc#merge_acc.rem_seqs]
+ end,
+ NewAcc = Acc#merge_acc{
+ add_infos = [NewDocInfo3 | Acc#merge_acc.add_infos],
+ rem_seqs = RemoveSeqs,
+ cur_seq = Acc#merge_acc.cur_seq + 1
+ },
+ merge_rev_trees(RestDocsList, RestOldInfo, NewAcc)
end.
-merge_rev_tree(OldInfo, NewDoc, Client, false)
- when OldInfo#full_doc_info.deleted ->
+merge_rev_tree(OldInfo, NewDoc, Client, false) when
+ OldInfo#full_doc_info.deleted
+->
% We're recreating a document that was previously
% deleted. To check that this is a recreation from
% the root we assert that the new document has a
@@ -517,28 +556,29 @@ merge_rev_tree(OldInfo, NewDoc, Client, false)
case RevDepth == 1 andalso not NewDeleted of
true ->
% Update the new doc based on revisions in OldInfo
- #doc_info{revs=[WinningRev | _]} = couch_doc:to_doc_info(OldInfo),
- #rev_info{rev={OldPos, OldRev}} = WinningRev,
- Body = case couch_util:get_value(comp_body, NewDoc#doc.meta) of
- CompBody when is_binary(CompBody) ->
- couch_compress:decompress(CompBody);
- _ ->
- NewDoc#doc.body
- end,
+ #doc_info{revs = [WinningRev | _]} = couch_doc:to_doc_info(OldInfo),
+ #rev_info{rev = {OldPos, OldRev}} = WinningRev,
+ Body =
+ case couch_util:get_value(comp_body, NewDoc#doc.meta) of
+ CompBody when is_binary(CompBody) ->
+ couch_compress:decompress(CompBody);
+ _ ->
+ NewDoc#doc.body
+ end,
RevIdDoc = NewDoc#doc{
revs = {OldPos, [OldRev]},
body = Body
},
NewRevId = couch_db:new_revid(RevIdDoc),
- NewDoc2 = NewDoc#doc{revs={OldPos + 1, [NewRevId, OldRev]}},
+ NewDoc2 = NewDoc#doc{revs = {OldPos + 1, [NewRevId, OldRev]}},
% Merge our modified new doc into the tree
- #full_doc_info{rev_tree=OldTree} = OldInfo,
+ #full_doc_info{rev_tree = OldTree} = OldInfo,
NewTree0 = couch_doc:to_path(NewDoc2),
case couch_key_tree:merge(OldTree, NewTree0) of
{NewTree1, new_leaf} ->
% We changed the revision id so inform the caller
- send_result(Client, NewDoc, {ok, {OldPos+1, NewRevId}}),
+ send_result(Client, NewDoc, {ok, {OldPos + 1, NewRevId}}),
OldInfo#full_doc_info{
rev_tree = NewTree1,
deleted = false
@@ -615,34 +655,44 @@ update_docs_int(Db, DocsList, LocalDocs, MergeConflicts) ->
UpdateSeq = couch_db_engine:get_update_seq(Db),
RevsLimit = couch_db_engine:get_revs_limit(Db),
- Ids = [Id || [{_Client, #doc{id=Id}}|_] <- DocsList],
+ Ids = [Id || [{_Client, #doc{id = Id}} | _] <- DocsList],
% lookup up the old documents, if they exist.
OldDocLookups = couch_db_engine:open_docs(Db, Ids),
- OldDocInfos = lists:zipwith(fun
- (_Id, #full_doc_info{} = FDI) ->
- FDI;
- (Id, not_found) ->
- #full_doc_info{id=Id}
- end, Ids, OldDocLookups),
+ OldDocInfos = lists:zipwith(
+ fun
+ (_Id, #full_doc_info{} = FDI) ->
+ FDI;
+ (Id, not_found) ->
+ #full_doc_info{id = Id}
+ end,
+ Ids,
+ OldDocLookups
+ ),
%% Get the list of full partitions
- FullPartitions = case couch_db:is_partitioned(Db) of
- true ->
- case max_partition_size() of
- N when N =< 0 ->
- [];
- Max ->
- Partitions = lists:usort(lists:flatmap(fun(Id) ->
- case couch_partition:extract(Id) of
- undefined -> [];
- {Partition, _} -> [Partition]
- end
- end, Ids)),
- [P || P <- Partitions, partition_size(Db, P) >= Max]
- end;
- false ->
- []
- end,
+ FullPartitions =
+ case couch_db:is_partitioned(Db) of
+ true ->
+ case max_partition_size() of
+ N when N =< 0 ->
+ [];
+ Max ->
+ Partitions = lists:usort(
+ lists:flatmap(
+ fun(Id) ->
+ case couch_partition:extract(Id) of
+ undefined -> [];
+ {Partition, _} -> [Partition]
+ end
+ end,
+ Ids
+ )
+ ),
+ [P || P <- Partitions, partition_size(Db, P) >= Max]
+ end;
+ false ->
+ []
+ end,
% Merge the new docs into the revision trees.
AccIn = #merge_acc{
@@ -668,8 +718,10 @@ update_docs_int(Db, DocsList, LocalDocs, MergeConflicts) ->
{ok, Db1} = couch_db_engine:write_doc_infos(Db, Pairs, LocalDocs2),
WriteCount = length(IndexFDIs),
- couch_stats:increment_counter([couchdb, document_inserts],
- WriteCount - length(RemSeqs)),
+ couch_stats:increment_counter(
+ [couchdb, document_inserts],
+ WriteCount - length(RemSeqs)
+ ),
couch_stats:increment_counter([couchdb, document_writes], WriteCount),
couch_stats:increment_counter(
[couchdb, local_document_writes],
@@ -678,26 +730,31 @@ update_docs_int(Db, DocsList, LocalDocs, MergeConflicts) ->
% Check if we just updated any design documents, and update the validation
% funs if we did.
- UpdatedDDocIds = lists:flatmap(fun
- (<<"_design/", _/binary>> = Id) -> [Id];
- (_) -> []
- end, Ids),
+ UpdatedDDocIds = lists:flatmap(
+ fun
+ (<<"_design/", _/binary>> = Id) -> [Id];
+ (_) -> []
+ end,
+ Ids
+ ),
{ok, commit_data(Db1), UpdatedDDocIds}.
-
update_local_doc_revs(Docs) ->
- lists:foldl(fun({Client, Doc}, Acc) ->
- case increment_local_doc_revs(Doc) of
- {ok, #doc{revs = {0, [NewRev]}} = NewDoc} ->
- send_result(Client, Doc, {ok, {0, integer_to_binary(NewRev)}}),
- [NewDoc | Acc];
- {error, Error} ->
- send_result(Client, Doc, {error, Error}),
- Acc
- end
- end, [], Docs).
-
+ lists:foldl(
+ fun({Client, Doc}, Acc) ->
+ case increment_local_doc_revs(Doc) of
+ {ok, #doc{revs = {0, [NewRev]}} = NewDoc} ->
+ send_result(Client, Doc, {ok, {0, integer_to_binary(NewRev)}}),
+ [NewDoc | Acc];
+ {error, Error} ->
+ send_result(Client, Doc, {error, Error}),
+ Acc
+ end
+ end,
+ [],
+ Docs
+ ).
increment_local_doc_revs(#doc{deleted = true} = Doc) ->
{ok, Doc#doc{revs = {0, [0]}}};
@@ -707,15 +764,19 @@ increment_local_doc_revs(#doc{revs = {0, [RevStr | _]}} = Doc) ->
try
PrevRev = binary_to_integer(RevStr),
{ok, Doc#doc{revs = {0, [PrevRev + 1]}}}
- catch error:badarg ->
- {error, <<"Invalid rev format">>}
+ catch
+ error:badarg ->
+ {error, <<"Invalid rev format">>}
end;
increment_local_doc_revs(#doc{}) ->
{error, <<"Invalid rev format">>}.
max_partition_size() ->
- config:get_integer("couchdb", "max_partition_size",
- ?DEFAULT_MAX_PARTITION_SIZE).
+ config:get_integer(
+ "couchdb",
+ "max_partition_size",
+ ?DEFAULT_MAX_PARTITION_SIZE
+ ).
partition_size(Db, Partition) ->
{ok, Info} = couch_db:get_partition_info(Db, Partition),
@@ -750,7 +811,6 @@ estimate_size(#full_doc_info{} = FDI) ->
purge_docs(Db, []) ->
{ok, Db, []};
-
purge_docs(Db, PurgeReqs) ->
Ids = lists:usort(lists:map(fun({_UUID, Id, _Revs}) -> Id end, PurgeReqs)),
FDIs = couch_db_engine:open_docs(Db, Ids),
@@ -759,23 +819,30 @@ purge_docs(Db, PurgeReqs) ->
IdFDIs = lists:zip(Ids, FDIs),
{NewIdFDIs, Replies} = apply_purge_reqs(PurgeReqs, IdFDIs, USeq, []),
- Pairs = lists:flatmap(fun({DocId, OldFDI}) ->
- {DocId, NewFDI} = lists:keyfind(DocId, 1, NewIdFDIs),
- case {OldFDI, NewFDI} of
- {not_found, not_found} ->
- [];
- {#full_doc_info{} = A, #full_doc_info{} = A} ->
- [];
- {#full_doc_info{}, _} ->
- [{OldFDI, NewFDI}]
- end
- end, IdFDIs),
+ Pairs = lists:flatmap(
+ fun({DocId, OldFDI}) ->
+ {DocId, NewFDI} = lists:keyfind(DocId, 1, NewIdFDIs),
+ case {OldFDI, NewFDI} of
+ {not_found, not_found} ->
+ [];
+ {#full_doc_info{} = A, #full_doc_info{} = A} ->
+ [];
+ {#full_doc_info{}, _} ->
+ [{OldFDI, NewFDI}]
+ end
+ end,
+ IdFDIs
+ ),
PSeq = couch_db_engine:get_purge_seq(Db),
- {RevPInfos, _} = lists:foldl(fun({UUID, DocId, Revs}, {PIAcc, PSeqAcc}) ->
- Info = {PSeqAcc + 1, UUID, DocId, Revs},
- {[Info | PIAcc], PSeqAcc + 1}
- end, {[], PSeq}, PurgeReqs),
+ {RevPInfos, _} = lists:foldl(
+ fun({UUID, DocId, Revs}, {PIAcc, PSeqAcc}) ->
+ Info = {PSeqAcc + 1, UUID, DocId, Revs},
+ {[Info | PIAcc], PSeqAcc + 1}
+ end,
+ {[], PSeq},
+ PurgeReqs
+ ),
PInfos = lists:reverse(RevPInfos),
{ok, Db1} = couch_db_engine:purge_docs(Db, Pairs, PInfos),
@@ -784,85 +851,90 @@ purge_docs(Db, PurgeReqs) ->
couch_event:notify(Db2#db.name, updated),
{ok, Db2, Replies}.
-
apply_purge_reqs([], IdFDIs, _USeq, Replies) ->
{IdFDIs, lists:reverse(Replies)};
-
apply_purge_reqs([Req | RestReqs], IdFDIs, USeq, Replies) ->
{_UUID, DocId, Revs} = Req,
{value, {_, FDI0}, RestIdFDIs} = lists:keytake(DocId, 1, IdFDIs),
- {NewFDI, RemovedRevs, NewUSeq} = case FDI0 of
- #full_doc_info{rev_tree = Tree} ->
- case couch_key_tree:remove_leafs(Tree, Revs) of
- {_, []} ->
- % No change
- {FDI0, [], USeq};
- {[], Removed} ->
- % Completely purged
- {not_found, Removed, USeq};
- {NewTree, Removed} ->
- % Its possible to purge the #leaf{} that contains
- % the update_seq where this doc sits in the
- % update_seq sequence. Rather than do a bunch of
- % complicated checks we just re-label every #leaf{}
- % and reinsert it into the update_seq sequence.
- {NewTree2, NewUpdateSeq} = couch_key_tree:mapfold(fun
- (_RevId, Leaf, leaf, SeqAcc) ->
- {Leaf#leaf{seq = SeqAcc + 1},
- SeqAcc + 1};
- (_RevId, Value, _Type, SeqAcc) ->
- {Value, SeqAcc}
- end, USeq, NewTree),
-
- FDI1 = FDI0#full_doc_info{
- update_seq = NewUpdateSeq,
- rev_tree = NewTree2
- },
- {FDI1, Removed, NewUpdateSeq}
- end;
- not_found ->
- % Not found means nothing to change
- {not_found, [], USeq}
- end,
+ {NewFDI, RemovedRevs, NewUSeq} =
+ case FDI0 of
+ #full_doc_info{rev_tree = Tree} ->
+ case couch_key_tree:remove_leafs(Tree, Revs) of
+ {_, []} ->
+ % No change
+ {FDI0, [], USeq};
+ {[], Removed} ->
+ % Completely purged
+ {not_found, Removed, USeq};
+ {NewTree, Removed} ->
+ % Its possible to purge the #leaf{} that contains
+ % the update_seq where this doc sits in the
+ % update_seq sequence. Rather than do a bunch of
+ % complicated checks we just re-label every #leaf{}
+ % and reinsert it into the update_seq sequence.
+ {NewTree2, NewUpdateSeq} = couch_key_tree:mapfold(
+ fun
+ (_RevId, Leaf, leaf, SeqAcc) ->
+ {Leaf#leaf{seq = SeqAcc + 1}, SeqAcc + 1};
+ (_RevId, Value, _Type, SeqAcc) ->
+ {Value, SeqAcc}
+ end,
+ USeq,
+ NewTree
+ ),
+
+ FDI1 = FDI0#full_doc_info{
+ update_seq = NewUpdateSeq,
+ rev_tree = NewTree2
+ },
+ {FDI1, Removed, NewUpdateSeq}
+ end;
+ not_found ->
+ % Not found means nothing to change
+ {not_found, [], USeq}
+ end,
NewIdFDIs = [{DocId, NewFDI} | RestIdFDIs],
NewReplies = [{ok, RemovedRevs} | Replies],
apply_purge_reqs(RestReqs, NewIdFDIs, NewUSeq, NewReplies).
-
commit_data(Db) ->
{ok, Db1} = couch_db_engine:commit_data(Db),
Db1#db{
committed_update_seq = couch_db_engine:get_update_seq(Db)
}.
-
pair_write_info(Old, New) ->
- lists:map(fun(FDI) ->
- case lists:keyfind(FDI#full_doc_info.id, #full_doc_info.id, Old) of
- #full_doc_info{} = OldFDI -> {OldFDI, FDI};
- false -> {not_found, FDI}
- end
- end, New).
-
+ lists:map(
+ fun(FDI) ->
+ case lists:keyfind(FDI#full_doc_info.id, #full_doc_info.id, Old) of
+ #full_doc_info{} = OldFDI -> {OldFDI, FDI};
+ false -> {not_found, FDI}
+ end
+ end,
+ New
+ ).
get_meta_body_size(Meta) ->
{ejson_size, ExternalSize} = lists:keyfind(ejson_size, 1, Meta),
ExternalSize.
-
default_security_object(<<"shards/", _/binary>>) ->
case config:get("couchdb", "default_security", "admin_only") of
"admin_only" ->
- [{<<"members">>,{[{<<"roles">>,[<<"_admin">>]}]}},
- {<<"admins">>,{[{<<"roles">>,[<<"_admin">>]}]}}];
+ [
+ {<<"members">>, {[{<<"roles">>, [<<"_admin">>]}]}},
+ {<<"admins">>, {[{<<"roles">>, [<<"_admin">>]}]}}
+ ];
Everyone when Everyone == "everyone"; Everyone == "admin_local" ->
[]
end;
default_security_object(_DbName) ->
case config:get("couchdb", "default_security", "admin_only") of
Admin when Admin == "admin_only"; Admin == "admin_local" ->
- [{<<"members">>,{[{<<"roles">>,[<<"_admin">>]}]}},
- {<<"admins">>,{[{<<"roles">>,[<<"_admin">>]}]}}];
+ [
+ {<<"members">>, {[{<<"roles">>, [<<"_admin">>]}]}},
+ {<<"admins">>, {[{<<"roles">>, [<<"_admin">>]}]}}
+ ];
"everyone" ->
[]
end.
@@ -873,12 +945,13 @@ default_security_object(_DbName) ->
% Storage Engine) code lands this should be moved to the #db{} record.
update_idle_limit_from_config() ->
Default = integer_to_list(?IDLE_LIMIT_DEFAULT),
- IdleLimit = case config:get("couchdb", "idle_check_timeout", Default) of
- "infinity" ->
- infinity;
- Milliseconds ->
- list_to_integer(Milliseconds)
- end,
+ IdleLimit =
+ case config:get("couchdb", "idle_check_timeout", Default) of
+ "infinity" ->
+ infinity;
+ Milliseconds ->
+ list_to_integer(Milliseconds)
+ end,
put(idle_limit, IdleLimit),
IdleLimit.
@@ -893,11 +966,9 @@ hibernate_if_no_idle_limit() ->
Timeout
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
update_local_doc_revs_test_() ->
{inparallel, [
{"Test local doc with valid rev", fun t_good_local_doc/0},
@@ -905,7 +976,6 @@ update_local_doc_revs_test_() ->
{"Test deleted local doc", fun t_dead_local_doc/0}
]}.
-
t_good_local_doc() ->
Doc = #doc{
id = <<"_local/alice">>,
@@ -915,23 +985,23 @@ t_good_local_doc() ->
[NewDoc] = update_local_doc_revs([{self(), Doc}]),
?assertEqual({0, [2]}, NewDoc#doc.revs),
{ok, Result} = receive_result(Doc),
- ?assertEqual({ok,{0,<<"2">>}}, Result).
-
+ ?assertEqual({ok, {0, <<"2">>}}, Result).
t_bad_local_doc() ->
- lists:foreach(fun(BadRevs) ->
- Doc = #doc{
- id = <<"_local/alice">>,
- revs = BadRevs,
- meta = [{ref, make_ref()}]
- },
- NewDocs = update_local_doc_revs([{self(), Doc}]),
- ?assertEqual([], NewDocs),
- {ok, Result} = receive_result(Doc),
- ?assertEqual({error,<<"Invalid rev format">>}, Result)
- end, [{0, [<<"a">>]}, {1, [<<"1">>]}]).
-
-
+ lists:foreach(
+ fun(BadRevs) ->
+ Doc = #doc{
+ id = <<"_local/alice">>,
+ revs = BadRevs,
+ meta = [{ref, make_ref()}]
+ },
+ NewDocs = update_local_doc_revs([{self(), Doc}]),
+ ?assertEqual([], NewDocs),
+ {ok, Result} = receive_result(Doc),
+ ?assertEqual({error, <<"Invalid rev format">>}, Result)
+ end,
+ [{0, [<<"a">>]}, {1, [<<"1">>]}]
+ ).
t_dead_local_doc() ->
Doc = #doc{
@@ -943,8 +1013,7 @@ t_dead_local_doc() ->
[NewDoc] = update_local_doc_revs([{self(), Doc}]),
?assertEqual({0, [0]}, NewDoc#doc.revs),
{ok, Result} = receive_result(Doc),
- ?assertEqual({ok,{0,<<"0">>}}, Result).
-
+ ?assertEqual({ok, {0, <<"0">>}}, Result).
receive_result(#doc{meta = Meta}) ->
Ref = couch_util:get_value(ref, Meta),
diff --git a/src/couch/src/couch_debug.erl b/src/couch/src/couch_debug.erl
index 290d095bf..a2f4cdc87 100644
--- a/src/couch/src/couch_debug.erl
+++ b/src/couch/src/couch_debug.erl
@@ -49,6 +49,7 @@ help() ->
].
-spec help(Function :: atom()) -> ok.
+%% erlfmt-ignore
help(opened_files) ->
io:format("
opened_files()
@@ -205,9 +206,11 @@ help(Unknown) ->
[{port(), CouchFilePid :: pid(), Fd :: pid() | tuple(), FilePath :: string()}].
opened_files() ->
- Info = [couch_file_port_info(Port)
- || Port <- erlang:ports(),
- {name, "efile"} =:= erlang:port_info(Port, name)],
+ Info = [
+ couch_file_port_info(Port)
+ || Port <- erlang:ports(),
+ {name, "efile"} =:= erlang:port_info(Port, name)
+ ],
[I || I <- Info, is_tuple(I)].
couch_file_port_info(Port) ->
@@ -223,17 +226,22 @@ couch_file_port_info(Port) ->
[{port(), CouchFilePid :: pid(), Fd :: pid() | tuple(), FilePath :: string()}].
opened_files_by_regexp(FileRegExp) ->
{ok, RegExp} = re:compile(FileRegExp),
- lists:filter(fun({_Port, _Pid, _Fd, Path}) ->
- re:run(Path, RegExp) =/= nomatch
- end, couch_debug:opened_files()).
+ lists:filter(
+ fun({_Port, _Pid, _Fd, Path}) ->
+ re:run(Path, RegExp) =/= nomatch
+ end,
+ couch_debug:opened_files()
+ ).
-spec opened_files_contains(FileNameFragment :: iodata()) ->
[{port(), CouchFilePid :: pid(), Fd :: pid() | tuple(), FilePath :: string()}].
opened_files_contains(FileNameFragment) ->
- lists:filter(fun({_Port, _Pid, _Fd, Path}) ->
- string:str(Path, FileNameFragment) > 0
- end, couch_debug:opened_files()).
-
+ lists:filter(
+ fun({_Port, _Pid, _Fd, Path}) ->
+ string:str(Path, FileNameFragment) > 0
+ end,
+ couch_debug:opened_files()
+ ).
process_name(Pid) when is_pid(Pid) ->
Info = process_info(Pid, [registered_name, dictionary, initial_call]),
@@ -260,7 +268,8 @@ link_tree(RootPid, Info) ->
link_tree(RootPid, Info, Fun) ->
{_, Result} = link_tree(
- RootPid, [links | Info], gb_trees:empty(), 0, [RootPid], Fun),
+ RootPid, [links | Info], gb_trees:empty(), 0, [RootPid], Fun
+ ),
Result.
link_tree(RootPid, Info, Visited0, Pos, [Pid | Rest], Fun) ->
@@ -272,21 +281,23 @@ link_tree(RootPid, Info, Visited0, Pos, [Pid | Rest], Fun) ->
Visited1 = gb_trees:insert(Pid, Props, Visited0),
{links, Children} = lists:keyfind(links, 1, Props),
{Visited2, NewTree} = link_tree(
- RootPid, Info, Visited1, Pos + 1, Children, Fun),
+ RootPid, Info, Visited1, Pos + 1, Children, Fun
+ ),
{Visited3, Result} = link_tree(
- RootPid, Info, Visited2, Pos, Rest, Fun),
- {Visited3, [{Pos, {Pid, Fun(Pid, Props), NewTree}}] ++ Result};
+ RootPid, Info, Visited2, Pos, Rest, Fun
+ ),
+ {Visited3, [{Pos, {Pid, Fun(Pid, Props), NewTree}}] ++ Result};
none ->
Props = info(Pid, Info),
Visited1 = gb_trees:insert(Pid, Props, Visited0),
{Visited2, Result} = link_tree(
- RootPid, Info, Visited1, Pos, Rest, Fun),
+ RootPid, Info, Visited1, Pos, Rest, Fun
+ ),
{Visited2, [{Pos, {Pid, Fun(Pid, Props), []}}] ++ Result}
end;
link_tree(_RootPid, _Info, Visited, _Pos, [], _Fun) ->
{Visited, []}.
-
info(Pid, Info) when is_pid(Pid) ->
ValidProps = [
backtrace,
@@ -340,12 +351,16 @@ info(Port, Info) when is_port(Port) ->
port_info(Port, lists:usort(Validated)).
port_info(Port, Items) ->
- lists:foldl(fun(Item, Acc) ->
- case (catch erlang:port_info(Port, Item)) of
- {Item, _Value} = Info -> [Info | Acc];
- _Else -> Acc
- end
- end, [], Items).
+ lists:foldl(
+ fun(Item, Acc) ->
+ case (catch erlang:port_info(Port, Item)) of
+ {Item, _Value} = Info -> [Info | Acc];
+ _Else -> Acc
+ end
+ end,
+ [],
+ Items
+ ).
mapfold_tree([], Acc, _Fun) ->
{[], Acc};
@@ -380,8 +395,10 @@ print_linked_processes(Name) when is_atom(Name) ->
print_linked_processes(Pid) when is_pid(Pid) ->
Info = [reductions, message_queue_len, memory],
TableSpec = [
- {50, left, name}, {12, centre, reductions},
- {19, centre, message_queue_len}, {10, centre, memory}
+ {50, left, name},
+ {12, centre, reductions},
+ {19, centre, message_queue_len},
+ {10, centre, memory}
],
Tree = linked_processes_info(Pid, Info),
print_tree(Tree, TableSpec).
@@ -390,9 +407,12 @@ id("couch_file:init" ++ _, Pid, _Props) ->
case couch_file:process_info(Pid) of
{{file_descriptor, prim_file, {Port, Fd}}, FilePath} ->
term2str([
- term2str(Fd), ":",
- term2str(Port), ":",
- shorten_path(FilePath)]);
+ term2str(Fd),
+ ":",
+ term2str(Port),
+ ":",
+ shorten_path(FilePath)
+ ]);
undefined ->
""
end;
@@ -402,8 +422,11 @@ id(_IdStr, _Pid, _Props) ->
print_couch_index_server_processes() ->
Info = [reductions, message_queue_len, memory],
TableSpec = [
- {50, left, name}, {12, centre, reductions},
- {19, centre, message_queue_len}, {14, centre, memory}, {id}
+ {50, left, name},
+ {12, centre, reductions},
+ {19, centre, message_queue_len},
+ {14, centre, memory},
+ {id}
],
Tree = link_tree(whereis(couch_index_server), Info, fun(P, Props) ->
@@ -476,31 +499,40 @@ random_processes(Pids, 0) ->
random_processes(Acc, Depth) ->
Caller = self(),
Ref = make_ref(),
- Pid = case oneof([spawn_link, open_port]) of
- spawn_monitor ->
- {P, _} = spawn_monitor(fun() ->
- Caller ! {Ref, random_processes(Depth - 1)},
- receive looper -> ok end
- end),
- P;
- spawn ->
- spawn(fun() ->
- Caller ! {Ref, random_processes(Depth - 1)},
- receive looper -> ok end
- end);
- spawn_link ->
- spawn_link(fun() ->
- Caller ! {Ref, random_processes(Depth - 1)},
- receive looper -> ok end
- end);
- open_port ->
- spawn_link(fun() ->
- Port = erlang:open_port({spawn, "sleep 10"}, []),
- true = erlang:link(Port),
- Caller ! {Ref, random_processes(Depth - 1)},
- receive looper -> ok end
- end)
- end,
+ Pid =
+ case oneof([spawn_link, open_port]) of
+ spawn_monitor ->
+ {P, _} = spawn_monitor(fun() ->
+ Caller ! {Ref, random_processes(Depth - 1)},
+ receive
+ looper -> ok
+ end
+ end),
+ P;
+ spawn ->
+ spawn(fun() ->
+ Caller ! {Ref, random_processes(Depth - 1)},
+ receive
+ looper -> ok
+ end
+ end);
+ spawn_link ->
+ spawn_link(fun() ->
+ Caller ! {Ref, random_processes(Depth - 1)},
+ receive
+ looper -> ok
+ end
+ end);
+ open_port ->
+ spawn_link(fun() ->
+ Port = erlang:open_port({spawn, "sleep 10"}, []),
+ true = erlang:link(Port),
+ Caller ! {Ref, random_processes(Depth - 1)},
+ receive
+ looper -> ok
+ end
+ end)
+ end,
receive
{Ref, Pids} -> random_processes([Pid | Pids] ++ Acc, Depth - 1)
end.
@@ -508,7 +540,6 @@ random_processes(Acc, Depth) ->
oneof(Options) ->
lists:nth(couch_rand:uniform(length(Options)), Options).
-
tree() ->
[InitialPid | _] = Processes = random_processes(5),
{InitialPid, Processes, link_tree(InitialPid)}.
@@ -524,7 +555,8 @@ link_tree_test_() ->
"link_tree tests",
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_have_same_shape/1,
fun should_include_extra_info/1
@@ -534,16 +566,16 @@ link_tree_test_() ->
should_have_same_shape({InitialPid, _Processes, Tree}) ->
?_test(begin
- InfoTree = linked_processes_info(InitialPid, []),
- ?assert(is_equal(InfoTree, Tree)),
- ok
+ InfoTree = linked_processes_info(InitialPid, []),
+ ?assert(is_equal(InfoTree, Tree)),
+ ok
end).
should_include_extra_info({InitialPid, _Processes, _Tree}) ->
Info = [reductions, message_queue_len, memory],
?_test(begin
- InfoTree = linked_processes_info(InitialPid, Info),
- map_tree(InfoTree, fun(Key, {_Id, Props}, _Pos) ->
+ InfoTree = linked_processes_info(InitialPid, Info),
+ map_tree(InfoTree, fun(Key, {_Id, Props}, _Pos) ->
case Key of
Pid when is_pid(Pid) ->
?assert(lists:keymember(reductions, 1, Props)),
@@ -553,11 +585,12 @@ should_include_extra_info({InitialPid, _Processes, _Tree}) ->
ok
end,
Props
- end),
- ok
+ end),
+ ok
end).
-is_equal([], []) -> true;
+is_equal([], []) ->
+ true;
is_equal([{Pos, {Pid, _, A}} | RestA], [{Pos, {Pid, _, B}} | RestB]) ->
case is_equal(RestA, RestB) of
false -> false;
diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl
index ec16d21db..5d44e456c 100644
--- a/src/couch/src/couch_doc.erl
+++ b/src/couch/src/couch_doc.erl
@@ -12,7 +12,7 @@
-module(couch_doc).
--export([to_doc_info/1,to_doc_info_path/1,parse_rev/1,parse_revs/1,rev_to_str/1,revs_to_strs/1]).
+-export([to_doc_info/1, to_doc_info_path/1, parse_rev/1, parse_revs/1, rev_to_str/1, revs_to_strs/1]).
-export([from_json_obj/1, from_json_obj_validate/1]).
-export([from_json_obj/2, from_json_obj_validate/2]).
-export([to_json_obj/2, has_stubs/1, merge_stubs/2]).
@@ -26,15 +26,14 @@
-export([with_ejson_body/1]).
-export([is_deleted/1]).
-
-include_lib("couch/include/couch_db.hrl").
-spec to_path(#doc{}) -> path().
-to_path(#doc{revs={Start, RevIds}}=Doc) ->
+to_path(#doc{revs = {Start, RevIds}} = Doc) ->
[Branch] = to_branch(Doc, lists:reverse(RevIds)),
{Start - length(RevIds) + 1, Branch}.
--spec to_branch(#doc{}, [RevId::binary()]) -> [branch()].
+-spec to_branch(#doc{}, [RevId :: binary()]) -> [branch()].
to_branch(Doc, [RevId]) ->
[{RevId, Doc, []}];
to_branch(Doc, [RevId | Rest]) ->
@@ -43,8 +42,8 @@ to_branch(Doc, [RevId | Rest]) ->
% helpers used by to_json_obj
to_json_rev(0, []) ->
[];
-to_json_rev(Start, [FirstRevId|_]) ->
- [{<<"_rev">>, ?l2b([integer_to_list(Start),"-",revid_to_str(FirstRevId)])}].
+to_json_rev(Start, [FirstRevId | _]) ->
+ [{<<"_rev">>, ?l2b([integer_to_list(Start), "-", revid_to_str(FirstRevId)])}].
to_json_body(true, {Body}) ->
Body ++ [{<<"_deleted">>, true}];
@@ -52,53 +51,69 @@ to_json_body(false, {Body}) ->
Body.
to_json_revisions(Options, Start, RevIds0) ->
- RevIds = case proplists:get_value(revs, Options) of
+ RevIds =
+ case proplists:get_value(revs, Options) of
+ true ->
+ RevIds0;
+ Num when is_integer(Num), Num > 0 ->
+ lists:sublist(RevIds0, Num);
+ _ ->
+ []
+ end,
+ if
+ RevIds == [] ->
+ [];
true ->
- RevIds0;
- Num when is_integer(Num), Num > 0 ->
- lists:sublist(RevIds0, Num);
- _ ->
- []
- end,
- if RevIds == [] -> []; true ->
- [{<<"_revisions">>, {[{<<"start">>, Start},
- {<<"ids">>, [revid_to_str(R) ||R <- RevIds]}]}}]
+ [
+ {<<"_revisions">>,
+ {[
+ {<<"start">>, Start},
+ {<<"ids">>, [revid_to_str(R) || R <- RevIds]}
+ ]}}
+ ]
end.
-
revid_to_str(RevId) when size(RevId) =:= 16 ->
?l2b(couch_util:to_hex(RevId));
revid_to_str(RevId) ->
RevId.
rev_to_str({Pos, RevId}) ->
- ?l2b([integer_to_list(Pos),"-",revid_to_str(RevId)]).
-
+ ?l2b([integer_to_list(Pos), "-", revid_to_str(RevId)]).
revs_to_strs([]) ->
[];
-revs_to_strs([{Pos, RevId}| Rest]) ->
+revs_to_strs([{Pos, RevId} | Rest]) ->
[rev_to_str({Pos, RevId}) | revs_to_strs(Rest)].
to_json_meta(Meta) ->
lists:flatmap(
- fun({revs_info, Start, RevsInfo}) ->
- {JsonRevsInfo, _Pos} = lists:mapfoldl(
- fun({RevId, Status}, PosAcc) ->
- JsonObj = {[{<<"rev">>, rev_to_str({PosAcc, RevId})},
- {<<"status">>, ?l2b(atom_to_list(Status))}]},
- {JsonObj, PosAcc - 1}
- end, Start, RevsInfo),
- [{<<"_revs_info">>, JsonRevsInfo}];
- ({local_seq, Seq}) ->
- [{<<"_local_seq">>, Seq}];
- ({conflicts, Conflicts}) ->
- [{<<"_conflicts">>, revs_to_strs(Conflicts)}];
- ({deleted_conflicts, DConflicts}) ->
- [{<<"_deleted_conflicts">>, revs_to_strs(DConflicts)}];
- (_) ->
- []
- end, Meta).
+ fun
+ ({revs_info, Start, RevsInfo}) ->
+ {JsonRevsInfo, _Pos} = lists:mapfoldl(
+ fun({RevId, Status}, PosAcc) ->
+ JsonObj =
+ {[
+ {<<"rev">>, rev_to_str({PosAcc, RevId})},
+ {<<"status">>, ?l2b(atom_to_list(Status))}
+ ]},
+ {JsonObj, PosAcc - 1}
+ end,
+ Start,
+ RevsInfo
+ ),
+ [{<<"_revs_info">>, JsonRevsInfo}];
+ ({local_seq, Seq}) ->
+ [{<<"_local_seq">>, Seq}];
+ ({conflicts, Conflicts}) ->
+ [{<<"_conflicts">>, revs_to_strs(Conflicts)}];
+ ({deleted_conflicts, DConflicts}) ->
+ [{<<"_deleted_conflicts">>, revs_to_strs(DConflicts)}];
+ (_) ->
+ []
+ end,
+ Meta
+ ).
to_json_attachments(Attachments, Options) ->
to_json_attachments(
@@ -117,14 +132,23 @@ to_json_attachments(Atts, OutputData, Follows, ShowEnc) ->
to_json_obj(Doc, Options) ->
doc_to_json_obj(with_ejson_body(Doc), Options).
-doc_to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs={Start, RevIds},
- meta=Meta}=Doc,Options)->
- {[{<<"_id">>, Id}]
- ++ to_json_rev(Start, RevIds)
- ++ to_json_body(Del, Body)
- ++ to_json_revisions(Options, Start, RevIds)
- ++ to_json_meta(Meta)
- ++ to_json_attachments(Doc#doc.atts, Options)
+doc_to_json_obj(
+ #doc{
+ id = Id,
+ deleted = Del,
+ body = Body,
+ revs = {Start, RevIds},
+ meta = Meta
+ } = Doc,
+ Options
+) ->
+ {
+ [{<<"_id">>, Id}] ++
+ to_json_rev(Start, RevIds) ++
+ to_json_body(Del, Body) ++
+ to_json_revisions(Options, Start, RevIds) ++
+ to_json_meta(Meta) ++
+ to_json_attachments(Doc#doc.atts, Options)
}.
from_json_obj_validate(EJson) ->
@@ -135,48 +159,54 @@ from_json_obj_validate(EJson, DbName) ->
Doc = from_json_obj(EJson, DbName),
case couch_ejson_size:encoded_size(Doc#doc.body) =< MaxSize of
true ->
- validate_attachment_sizes(Doc#doc.atts),
- Doc;
+ validate_attachment_sizes(Doc#doc.atts),
+ Doc;
false ->
throw({request_entity_too_large, Doc#doc.id})
end.
-
validate_attachment_sizes([]) ->
ok;
validate_attachment_sizes(Atts) ->
MaxAttSize = couch_att:max_attachment_size(),
- lists:foreach(fun(Att) ->
- AttName = couch_att:fetch(name, Att),
- AttSize = couch_att:fetch(att_len, Att),
- couch_att:validate_attachment_size(AttName, AttSize, MaxAttSize)
- end, Atts).
-
+ lists:foreach(
+ fun(Att) ->
+ AttName = couch_att:fetch(name, Att),
+ AttSize = couch_att:fetch(att_len, Att),
+ couch_att:validate_attachment_size(AttName, AttSize, MaxAttSize)
+ end,
+ Atts
+ ).
from_json_obj({Props}) ->
from_json_obj({Props}, undefined).
from_json_obj({Props}, DbName) ->
- transfer_fields(Props, #doc{body=[]}, DbName);
+ transfer_fields(Props, #doc{body = []}, DbName);
from_json_obj(_Other, _) ->
throw({bad_request, "Document must be a JSON object"}).
parse_revid(RevId) when size(RevId) =:= 32 ->
RevInt = erlang:list_to_integer(?b2l(RevId), 16),
- <<RevInt:128>>;
+ <<RevInt:128>>;
parse_revid(RevId) when length(RevId) =:= 32 ->
RevInt = erlang:list_to_integer(RevId, 16),
- <<RevInt:128>>;
+ <<RevInt:128>>;
parse_revid(RevId) when is_binary(RevId) ->
RevId;
parse_revid(RevId) when is_list(RevId) ->
?l2b(RevId).
-
parse_rev(Rev) when is_binary(Rev) ->
parse_rev(?b2l(Rev));
parse_rev(Rev) when is_list(Rev) ->
- SplitRev = lists:splitwith(fun($-) -> false; (_) -> true end, Rev),
+ SplitRev = lists:splitwith(
+ fun
+ ($-) -> false;
+ (_) -> true
+ end,
+ Rev
+ ),
case SplitRev of
{Pos, [$- | RevId]} ->
try
@@ -185,7 +215,8 @@ parse_rev(Rev) when is_list(Rev) ->
catch
error:badarg -> throw({bad_request, <<"Invalid rev format">>})
end;
- _Else -> throw({bad_request, <<"Invalid rev format">>})
+ _Else ->
+ throw({bad_request, <<"Invalid rev format">>})
end;
parse_rev(_BadRev) ->
throw({bad_request, <<"Invalid rev format">>}).
@@ -197,10 +228,11 @@ parse_revs([Rev | Rest]) ->
parse_revs(_) ->
throw({bad_request, "Invalid list of revisions"}).
-
validate_docid(DocId, DbName) ->
- case DbName =:= ?l2b(config:get("mem3", "shards_db", "_dbs")) andalso
- couch_db:is_system_db_name(DocId) of
+ case
+ DbName =:= ?l2b(config:get("mem3", "shards_db", "_dbs")) andalso
+ couch_db:is_system_db_name(DocId)
+ of
true ->
ok;
false ->
@@ -214,10 +246,11 @@ validate_docid(<<"_design/">>) ->
validate_docid(<<"_local/">>) ->
throw({illegal_docid, <<"Illegal document id `_local/`">>});
validate_docid(Id) when is_binary(Id) ->
- MaxLen = case config:get("couchdb", "max_document_id_length", "infinity") of
- "infinity" -> infinity;
- IntegerVal -> list_to_integer(IntegerVal)
- end,
+ MaxLen =
+ case config:get("couchdb", "max_document_id_length", "infinity") of
+ "infinity" -> infinity;
+ IntegerVal -> list_to_integer(IntegerVal)
+ end,
case MaxLen > 0 andalso byte_size(Id) > MaxLen of
true -> throw({illegal_docid, <<"Document id is too long">>});
false -> ok
@@ -227,69 +260,72 @@ validate_docid(Id) when is_binary(Id) ->
true -> ok
end,
case Id of
- <<"_design/", _/binary>> -> ok;
- <<"_local/", _/binary>> -> ok;
- <<"_", _/binary>> ->
- case couch_db_plugin:validate_docid(Id) of
- true ->
- ok;
- false ->
- throw(
- {illegal_docid,
- <<"Only reserved document ids may start with underscore.">>})
- end;
- _Else -> ok
+ <<"_design/", _/binary>> ->
+ ok;
+ <<"_local/", _/binary>> ->
+ ok;
+ <<"_", _/binary>> ->
+ case couch_db_plugin:validate_docid(Id) of
+ true ->
+ ok;
+ false ->
+ throw(
+ {illegal_docid, <<"Only reserved document ids may start with underscore.">>}
+ )
+ end;
+ _Else ->
+ ok
end;
validate_docid(Id) ->
couch_log:debug("Document id is not a string: ~p", [Id]),
throw({illegal_docid, <<"Document id must be a string">>}).
-transfer_fields([], #doc{body=Fields}=Doc, _) ->
+transfer_fields([], #doc{body = Fields} = Doc, _) ->
% convert fields back to json object
- Doc#doc{body={lists:reverse(Fields)}};
-
+ Doc#doc{body = {lists:reverse(Fields)}};
transfer_fields([{<<"_id">>, Id} | Rest], Doc, DbName) ->
validate_docid(Id, DbName),
- transfer_fields(Rest, Doc#doc{id=Id}, DbName);
-
-transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs={0, []}}=Doc, DbName) ->
+ transfer_fields(Rest, Doc#doc{id = Id}, DbName);
+transfer_fields([{<<"_rev">>, Rev} | Rest], #doc{revs = {0, []}} = Doc, DbName) ->
{Pos, RevId} = parse_rev(Rev),
- transfer_fields(Rest,
- Doc#doc{revs={Pos, [RevId]}}, DbName);
-
+ transfer_fields(
+ Rest,
+ Doc#doc{revs = {Pos, [RevId]}},
+ DbName
+ );
transfer_fields([{<<"_rev">>, _Rev} | Rest], Doc, DbName) ->
% we already got the rev from the _revisions
transfer_fields(Rest, Doc, DbName);
-
transfer_fields([{<<"_attachments">>, {JsonBins}} | Rest], Doc, DbName) ->
Atts = [couch_att:from_json(Name, Props) || {Name, {Props}} <- JsonBins],
- transfer_fields(Rest, Doc#doc{atts=Atts}, DbName);
-
+ transfer_fields(Rest, Doc#doc{atts = Atts}, DbName);
transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc, DbName) ->
RevIds = couch_util:get_value(<<"ids">>, Props),
Start = couch_util:get_value(<<"start">>, Props),
- if not is_integer(Start) ->
- throw({doc_validation, "_revisions.start isn't an integer."});
- not is_list(RevIds) ->
- throw({doc_validation, "_revisions.ids isn't a array."});
- true ->
- ok
+ if
+ not is_integer(Start) ->
+ throw({doc_validation, "_revisions.start isn't an integer."});
+ not is_list(RevIds) ->
+ throw({doc_validation, "_revisions.ids isn't a array."});
+ true ->
+ ok
end,
- RevIds2 = lists:map(fun(RevId) ->
- try
- parse_revid(RevId)
- catch
- error:function_clause ->
- throw({doc_validation, "RevId isn't a string"});
- error:badarg ->
- throw({doc_validation, "RevId isn't a valid hexadecimal"})
- end
- end, RevIds),
- transfer_fields(Rest, Doc#doc{revs={Start, RevIds2}}, DbName);
-
+ RevIds2 = lists:map(
+ fun(RevId) ->
+ try
+ parse_revid(RevId)
+ catch
+ error:function_clause ->
+ throw({doc_validation, "RevId isn't a string"});
+ error:badarg ->
+ throw({doc_validation, "RevId isn't a valid hexadecimal"})
+ end
+ end,
+ RevIds
+ ),
+ transfer_fields(Rest, Doc#doc{revs = {Start, RevIds2}}, DbName);
transfer_fields([{<<"_deleted">>, B} | Rest], Doc, DbName) when is_boolean(B) ->
- transfer_fields(Rest, Doc#doc{deleted=B}, DbName);
-
+ transfer_fields(Rest, Doc#doc{deleted = B}, DbName);
% ignored fields
transfer_fields([{<<"_revs_info">>, _} | Rest], Doc, DbName) ->
transfer_fields(Rest, Doc, DbName);
@@ -299,36 +335,49 @@ transfer_fields([{<<"_conflicts">>, _} | Rest], Doc, DbName) ->
transfer_fields(Rest, Doc, DbName);
transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc, DbName) ->
transfer_fields(Rest, Doc, DbName);
-
% special field for per doc access control, for future compatibility
-transfer_fields([{<<"_access">>, _} = Field | Rest],
- #doc{body=Fields} = Doc, DbName) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
-
+transfer_fields(
+ [{<<"_access">>, _} = Field | Rest],
+ #doc{body = Fields} = Doc,
+ DbName
+) ->
+ transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
% special fields for replication documents
-transfer_fields([{<<"_replication_state">>, _} = Field | Rest],
- #doc{body=Fields} = Doc, DbName) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
-transfer_fields([{<<"_replication_state_time">>, _} = Field | Rest],
- #doc{body=Fields} = Doc, DbName) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
-transfer_fields([{<<"_replication_state_reason">>, _} = Field | Rest],
- #doc{body=Fields} = Doc, DbName) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
-transfer_fields([{<<"_replication_id">>, _} = Field | Rest],
- #doc{body=Fields} = Doc, DbName) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
-transfer_fields([{<<"_replication_stats">>, _} = Field | Rest],
- #doc{body=Fields} = Doc, DbName) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName);
-
+transfer_fields(
+ [{<<"_replication_state">>, _} = Field | Rest],
+ #doc{body = Fields} = Doc,
+ DbName
+) ->
+ transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
+transfer_fields(
+ [{<<"_replication_state_time">>, _} = Field | Rest],
+ #doc{body = Fields} = Doc,
+ DbName
+) ->
+ transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
+transfer_fields(
+ [{<<"_replication_state_reason">>, _} = Field | Rest],
+ #doc{body = Fields} = Doc,
+ DbName
+) ->
+ transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
+transfer_fields(
+ [{<<"_replication_id">>, _} = Field | Rest],
+ #doc{body = Fields} = Doc,
+ DbName
+) ->
+ transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
+transfer_fields(
+ [{<<"_replication_stats">>, _} = Field | Rest],
+ #doc{body = Fields} = Doc,
+ DbName
+) ->
+ transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName);
% unknown special field
-transfer_fields([{<<"_",Name/binary>>, _} | _], _, _) ->
- throw({doc_validation,
- ?l2b(io_lib:format("Bad special document member: _~s", [Name]))});
-
-transfer_fields([Field | Rest], #doc{body=Fields}=Doc, DbName) ->
- transfer_fields(Rest, Doc#doc{body=[Field|Fields]}, DbName).
+transfer_fields([{<<"_", Name/binary>>, _} | _], _, _) ->
+ throw({doc_validation, ?l2b(io_lib:format("Bad special document member: _~s", [Name]))});
+transfer_fields([Field | Rest], #doc{body = Fields} = Doc, DbName) ->
+ transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName).
to_doc_info(FullDocInfo) ->
{DocInfo, _Path} = to_doc_info_path(FullDocInfo),
@@ -340,10 +389,11 @@ max_seq(Tree, UpdateSeq) ->
{_Deleted, _DiskPos, OldTreeSeq} ->
% Older versions didn't track data sizes.
erlang:max(MaxOldSeq, OldTreeSeq);
- {_Deleted, _DiskPos, OldTreeSeq, _Size} -> % necessary clause?
+ % necessary clause?
+ {_Deleted, _DiskPos, OldTreeSeq, _Size} ->
% Older versions didn't store #leaf records.
erlang:max(MaxOldSeq, OldTreeSeq);
- #leaf{seq=OldTreeSeq} ->
+ #leaf{seq = OldTreeSeq} ->
erlang:max(MaxOldSeq, OldTreeSeq);
_ ->
MaxOldSeq
@@ -351,20 +401,25 @@ max_seq(Tree, UpdateSeq) ->
end,
couch_key_tree:fold(FoldFun, UpdateSeq, Tree).
-to_doc_info_path(#full_doc_info{id=Id,rev_tree=Tree,update_seq=FDISeq}) ->
+to_doc_info_path(#full_doc_info{id = Id, rev_tree = Tree, update_seq = FDISeq}) ->
RevInfosAndPath = [
- {rev_info(Node), Path} || {_Leaf, Path} = Node <-
+ {rev_info(Node), Path}
+ || {_Leaf, Path} = Node <-
couch_key_tree:get_all_leafs(Tree)
],
SortedRevInfosAndPath = lists:sort(
- fun({#rev_info{deleted=DeletedA,rev=RevA}, _PathA},
- {#rev_info{deleted=DeletedB,rev=RevB}, _PathB}) ->
+ fun(
+ {#rev_info{deleted = DeletedA, rev = RevA}, _PathA},
+ {#rev_info{deleted = DeletedB, rev = RevB}, _PathB}
+ ) ->
% sort descending by {not deleted, rev}
{not DeletedA, RevA} > {not DeletedB, RevB}
- end, RevInfosAndPath),
- [{_RevInfo, WinPath}|_] = SortedRevInfosAndPath,
+ end,
+ RevInfosAndPath
+ ),
+ [{_RevInfo, WinPath} | _] = SortedRevInfosAndPath,
RevInfos = [RevInfo || {RevInfo, _Path} <- SortedRevInfosAndPath],
- {#doc_info{id=Id, high_seq=max_seq(Tree, FDISeq), revs=RevInfos}, WinPath}.
+ {#doc_info{id = Id, high_seq = max_seq(Tree, FDISeq), revs = RevInfos}, WinPath}.
rev_info({#leaf{} = Leaf, {Pos, [RevId | _]}}) ->
#rev_info{
@@ -381,53 +436,56 @@ rev_info({#doc{} = Doc, {Pos, [RevId | _]}}) ->
rev = {Pos, RevId}
}.
-is_deleted(#full_doc_info{rev_tree=Tree}) ->
+is_deleted(#full_doc_info{rev_tree = Tree}) ->
is_deleted(Tree);
is_deleted(Tree) ->
Leafs = couch_key_tree:get_all_leafs(Tree),
try
- lists:foldl(fun
- ({#leaf{deleted=false},_}, _) ->
- throw(not_deleted);
- ({#doc{deleted=false},_}, _) ->
- throw(not_deleted);
- (_, Acc) ->
- Acc
- end, nil, Leafs),
+ lists:foldl(
+ fun
+ ({#leaf{deleted = false}, _}, _) ->
+ throw(not_deleted);
+ ({#doc{deleted = false}, _}, _) ->
+ throw(not_deleted);
+ (_, Acc) ->
+ Acc
+ end,
+ nil,
+ Leafs
+ ),
true
- catch throw:not_deleted ->
- false
+ catch
+ throw:not_deleted ->
+ false
end.
-
get_validate_doc_fun({Props}) ->
get_validate_doc_fun(couch_doc:from_json_obj({Props}));
-get_validate_doc_fun(#doc{body={Props}}=DDoc) ->
+get_validate_doc_fun(#doc{body = {Props}} = DDoc) ->
case couch_util:get_value(<<"validate_doc_update">>, Props) of
- undefined ->
- nil;
- _Else ->
- fun(EditDoc, DiskDoc, Ctx, SecObj) ->
- couch_query_servers:validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj)
- end
+ undefined ->
+ nil;
+ _Else ->
+ fun(EditDoc, DiskDoc, Ctx, SecObj) ->
+ couch_query_servers:validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj)
+ end
end.
-
-has_stubs(#doc{atts=Atts}) ->
+has_stubs(#doc{atts = Atts}) ->
lists:any(fun couch_att:is_stub/1, Atts);
has_stubs(Atts) ->
lists:any(fun couch_att:is_stub/1, Atts).
merge_stubs(#doc{id = Id}, nil) ->
throw({missing_stub, <<"Previous revision missing for document ", Id/binary>>});
-merge_stubs(#doc{id=Id,atts=MemBins}=StubsDoc, #doc{atts=DiskBins}) ->
+merge_stubs(#doc{id = Id, atts = MemBins} = StubsDoc, #doc{atts = DiskBins}) ->
case couch_att:merge_stubs(MemBins, DiskBins) of
{ok, MergedBins} ->
StubsDoc#doc{atts = MergedBins};
{missing, Name} ->
- throw({missing_stub,
- <<"Invalid attachment stub in ", Id/binary, " for ", Name/binary>>
- })
+ throw(
+ {missing_stub, <<"Invalid attachment stub in ", Id/binary, " for ", Name/binary>>}
+ )
end.
len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, SendEncodedAtts) ->
@@ -435,28 +493,38 @@ len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, SendEncodedAtts) ->
AttsDecoded = decode_attributes(AttsToInclude, SendEncodedAtts),
couch_httpd_multipart:length_multipart_stream(Boundary, JsonBytes, AttsDecoded).
-
-doc_to_multi_part_stream(Boundary, JsonBytes, Atts, WriteFun,
- SendEncodedAtts) ->
- AttsToInclude = lists:filter(fun(Att)-> couch_att:fetch(data, Att) /= stub end, Atts),
+doc_to_multi_part_stream(
+ Boundary,
+ JsonBytes,
+ Atts,
+ WriteFun,
+ SendEncodedAtts
+) ->
+ AttsToInclude = lists:filter(fun(Att) -> couch_att:fetch(data, Att) /= stub end, Atts),
AttsDecoded = decode_attributes(AttsToInclude, SendEncodedAtts),
- AttFun = case SendEncodedAtts of
- false -> fun couch_att:foldl_decode/3;
- true -> fun couch_att:foldl/3
- end,
+ AttFun =
+ case SendEncodedAtts of
+ false -> fun couch_att:foldl_decode/3;
+ true -> fun couch_att:foldl/3
+ end,
couch_httpd_multipart:encode_multipart_stream(
- Boundary, JsonBytes, AttsDecoded, WriteFun, AttFun).
+ Boundary, JsonBytes, AttsDecoded, WriteFun, AttFun
+ ).
decode_attributes(Atts, SendEncodedAtts) ->
- lists:map(fun(Att) ->
- [Name, AttLen, DiskLen, Type, Encoding] =
- couch_att:fetch([name, att_len, disk_len, type, encoding], Att),
- Len = case SendEncodedAtts of
- true -> AttLen;
- false -> DiskLen
- end,
- {Att, Name, Len, Type, Encoding}
- end, Atts).
+ lists:map(
+ fun(Att) ->
+ [Name, AttLen, DiskLen, Type, Encoding] =
+ couch_att:fetch([name, att_len, disk_len, type, encoding], Att),
+ Len =
+ case SendEncodedAtts of
+ true -> AttLen;
+ false -> DiskLen
+ end,
+ {Att, Name, Len, Type, Encoding}
+ end,
+ Atts
+ ).
doc_from_multi_part_stream(ContentType, DataFun) ->
doc_from_multi_part_stream(ContentType, DataFun, make_ref()).
@@ -466,25 +534,32 @@ doc_from_multi_part_stream(ContentType, DataFun, Ref) ->
doc_from_multi_part_stream(ContentType, DataFun, Ref, ValidateDocLimits) ->
case couch_httpd_multipart:decode_multipart_stream(ContentType, DataFun, Ref) of
- {{started_open_doc_revs, NewRef}, Parser, _ParserRef} ->
- restart_open_doc_revs(Parser, Ref, NewRef);
- {{doc_bytes, Ref, DocBytes}, Parser, ParserRef} ->
- Doc = case ValidateDocLimits of
- true ->
- from_json_obj_validate(?JSON_DECODE(DocBytes));
- false ->
- from_json_obj(?JSON_DECODE(DocBytes))
- end,
- erlang:put(mochiweb_request_recv, true),
- % we'll send the Parser process ID to the remote nodes so they can
- % retrieve their own copies of the attachment data
- WithParser = fun(follows) -> {follows, Parser, Ref}; (D) -> D end,
- Atts = [couch_att:transform(data, WithParser, A) || A <- Doc#doc.atts],
- WaitFun = fun() ->
- receive {'DOWN', ParserRef, _, _, _} -> ok end
- end,
- {ok, Doc#doc{atts=Atts}, WaitFun, Parser};
- ok -> ok
+ {{started_open_doc_revs, NewRef}, Parser, _ParserRef} ->
+ restart_open_doc_revs(Parser, Ref, NewRef);
+ {{doc_bytes, Ref, DocBytes}, Parser, ParserRef} ->
+ Doc =
+ case ValidateDocLimits of
+ true ->
+ from_json_obj_validate(?JSON_DECODE(DocBytes));
+ false ->
+ from_json_obj(?JSON_DECODE(DocBytes))
+ end,
+ erlang:put(mochiweb_request_recv, true),
+ % we'll send the Parser process ID to the remote nodes so they can
+ % retrieve their own copies of the attachment data
+ WithParser = fun
+ (follows) -> {follows, Parser, Ref};
+ (D) -> D
+ end,
+ Atts = [couch_att:transform(data, WithParser, A) || A <- Doc#doc.atts],
+ WaitFun = fun() ->
+ receive
+ {'DOWN', ParserRef, _, _, _} -> ok
+ end
+ end,
+ {ok, Doc#doc{atts = Atts}, WaitFun, Parser};
+ ok ->
+ ok
end.
restart_open_doc_revs(Parser, Ref, NewRef) ->
@@ -493,7 +568,6 @@ restart_open_doc_revs(Parser, Ref, NewRef) ->
flush_parser_messages(Ref),
erlang:error({restart_open_doc_revs, NewRef}).
-
flush_parser_messages(Ref) ->
receive
{headers, Ref, _} ->
@@ -508,7 +582,6 @@ flush_parser_messages(Ref) ->
ok
end.
-
with_ejson_body(#doc{body = Body} = Doc) when is_binary(Body) ->
Doc#doc{body = couch_compress:decompress(Body)};
with_ejson_body(#doc{body = {_}} = Doc) ->
diff --git a/src/couch/src/couch_ejson_compare.erl b/src/couch/src/couch_ejson_compare.erl
index b02b9ba7c..628bc2fab 100644
--- a/src/couch/src/couch_ejson_compare.erl
+++ b/src/couch/src/couch_ejson_compare.erl
@@ -27,10 +27,8 @@
compare_strings_nif/2
]).
-
-on_load(init/0).
-
init() ->
NumScheds = erlang:system_info(schedulers),
Dir = code:priv_dir(couch),
@@ -39,65 +37,54 @@ init() ->
% partitioned row comparison
less({p, PA, A}, {p, PB, B}) ->
less([PA, A], [PB, B]);
-
less(A, B) ->
try
less_nif(A, B)
catch
- error:max_depth_error ->
- % The EJSON structure is too deep, fallback to Erlang land.
- less_erl(A, B)
+ error:max_depth_error ->
+ % The EJSON structure is too deep, fallback to Erlang land.
+ less_erl(A, B)
end.
less_json_ids({JsonA, IdA}, {JsonB, IdB}) ->
case less(JsonA, JsonB) of
- 0 ->
- IdA < IdB;
- Result ->
- Result < 0
+ 0 ->
+ IdA < IdB;
+ Result ->
+ Result < 0
end.
-less_json(A,B) ->
+less_json(A, B) ->
less(A, B) < 0.
-
get_icu_version() ->
erlang:nif_error(get_icu_version).
-
get_uca_version() ->
erlang:nif_error(get_uca_version).
-
less_nif(A, B) ->
erlang:nif_error(less_nif_load_error, [A, B]).
-
compare_strings_nif(A, B) ->
erlang:nif_error(compare_string_nif, [A, B]).
-
-less_erl(A,A) -> 0;
-
-less_erl(A,B) when is_atom(A), is_atom(B) -> atom_sort(A) - atom_sort(B);
-less_erl(A,_) when is_atom(A) -> -1;
-less_erl(_,B) when is_atom(B) -> 1;
-
-less_erl(A,B) when is_number(A), is_number(B) -> A - B;
-less_erl(A,_) when is_number(A) -> -1;
-less_erl(_,B) when is_number(B) -> 1;
-
-less_erl(A,B) when is_binary(A), is_binary(B) -> compare_strings_nif(A,B);
-less_erl(A,_) when is_binary(A) -> -1;
-less_erl(_,B) when is_binary(B) -> 1;
-
-less_erl(A,B) when is_list(A), is_list(B) -> less_list(A,B);
-less_erl(A,_) when is_list(A) -> -1;
-less_erl(_,B) when is_list(B) -> 1;
-
-less_erl({A},{B}) when is_list(A), is_list(B) -> less_props(A,B);
-less_erl({A},_) when is_list(A) -> -1;
-less_erl(_,{B}) when is_list(B) -> 1.
+less_erl(A, A) -> 0;
+less_erl(A, B) when is_atom(A), is_atom(B) -> atom_sort(A) - atom_sort(B);
+less_erl(A, _) when is_atom(A) -> -1;
+less_erl(_, B) when is_atom(B) -> 1;
+less_erl(A, B) when is_number(A), is_number(B) -> A - B;
+less_erl(A, _) when is_number(A) -> -1;
+less_erl(_, B) when is_number(B) -> 1;
+less_erl(A, B) when is_binary(A), is_binary(B) -> compare_strings_nif(A, B);
+less_erl(A, _) when is_binary(A) -> -1;
+less_erl(_, B) when is_binary(B) -> 1;
+less_erl(A, B) when is_list(A), is_list(B) -> less_list(A, B);
+less_erl(A, _) when is_list(A) -> -1;
+less_erl(_, B) when is_list(B) -> 1;
+less_erl({A}, {B}) when is_list(A), is_list(B) -> less_props(A, B);
+less_erl({A}, _) when is_list(A) -> -1;
+less_erl(_, {B}) when is_list(B) -> 1.
atom_sort(null) -> 1;
atom_sort(false) -> 2;
@@ -105,33 +92,33 @@ atom_sort(true) -> 3.
less_props([], []) ->
0;
-less_props([], [_|_]) ->
+less_props([], [_ | _]) ->
-1;
less_props(_, []) ->
1;
-less_props([{AKey, AValue}|RestA], [{BKey, BValue}|RestB]) ->
+less_props([{AKey, AValue} | RestA], [{BKey, BValue} | RestB]) ->
case compare_strings_nif(AKey, BKey) of
- 0 ->
- case less_erl(AValue, BValue) of
0 ->
- less_props(RestA, RestB);
+ case less_erl(AValue, BValue) of
+ 0 ->
+ less_props(RestA, RestB);
+ Result ->
+ Result
+ end;
Result ->
Result
- end;
- Result ->
- Result
end.
less_list([], []) ->
0;
-less_list([], [_|_]) ->
+less_list([], [_ | _]) ->
-1;
less_list(_, []) ->
1;
-less_list([A|RestA], [B|RestB]) ->
- case less_erl(A,B) of
- 0 ->
- less_list(RestA, RestB);
- Result ->
- Result
+less_list([A | RestA], [B | RestB]) ->
+ case less_erl(A, B) of
+ 0 ->
+ less_list(RestA, RestB);
+ Result ->
+ Result
end.
diff --git a/src/couch/src/couch_ejson_size.erl b/src/couch/src/couch_ejson_size.erl
index f5505680f..54a7094ff 100644
--- a/src/couch/src/couch_ejson_size.erl
+++ b/src/couch/src/couch_ejson_size.erl
@@ -14,85 +14,78 @@
-export([encoded_size/1]).
-
%% Compound objects
encoded_size({[]}) ->
- 2; % opening { and closing }
-
+ % opening { and closing }
+ 2;
encoded_size({KVs}) ->
% Would add 2 because opening { and closing }, but then inside the LC
% would accumulate an extra , at the end so subtract 2 - 1
- 1 + lists:sum([encoded_size(K) + encoded_size(V) + 2 || {K,V} <- KVs]);
-
+ 1 + lists:sum([encoded_size(K) + encoded_size(V) + 2 || {K, V} <- KVs]);
encoded_size([]) ->
- 2; % opening [ and closing ]
-
+ % opening [ and closing ]
+ 2;
encoded_size(List) when is_list(List) ->
% 2 is for [ and ] but inside LC would accumulate an extra , so subtract
% 2 - 1
1 + lists:sum([encoded_size(V) + 1 || V <- List]);
-
%% Floats.
encoded_size(0.0) ->
3;
-
encoded_size(1.0) ->
3;
-
encoded_size(Float) when is_float(Float), Float < 0.0 ->
encoded_size(-Float) + 1;
-
encoded_size(Float) when is_float(Float), Float < 1.0 ->
if
- Float =< 1.0e-300 -> 3; % close enough to 0.0
- Float =< 1.0e-100 -> 6; % Xe-YYY
- Float =< 1.0e-10 -> 5; % Xe-YY
- Float =< 0.01 -> 4; % Xe-Y, 0.0X
- true -> 3 % 0.X
+ % close enough to 0.0
+ Float =< 1.0e-300 -> 3;
+ % Xe-YYY
+ Float =< 1.0e-100 -> 6;
+ % Xe-YY
+ Float =< 1.0e-10 -> 5;
+ % Xe-Y, 0.0X
+ Float =< 0.01 -> 4;
+ % 0.X
+ true -> 3
end;
-
encoded_size(Float) when is_float(Float) ->
if
- Float >= 1.0e100 -> 5; % XeYYY
- Float >= 1.0e10 -> 4; % XeYY
- true -> 3 % XeY, X.Y
+ % XeYYY
+ Float >= 1.0e100 -> 5;
+ % XeYY
+ Float >= 1.0e10 -> 4;
+ % XeY, X.Y
+ true -> 3
end;
-
%% Integers
encoded_size(0) ->
1;
-
encoded_size(Integer) when is_integer(Integer), Integer < 0 ->
encoded_size(-Integer) + 1;
-
encoded_size(Integer) when is_integer(Integer) ->
if
- Integer < 10 -> 1;
- Integer < 100 -> 2;
- Integer < 1000 -> 3;
+ Integer < 10 -> 1;
+ Integer < 100 -> 2;
+ Integer < 1000 -> 3;
Integer < 10000 -> 4;
- true -> trunc(math:log10(Integer)) + 1
+ true -> trunc(math:log10(Integer)) + 1
end;
-
%% Strings
encoded_size(Binary) when is_binary(Binary) ->
2 + byte_size(Binary);
-
%% Special terminal symbols as atoms
encoded_size(null) ->
4;
-
encoded_size(true) ->
4;
-
encoded_size(false) ->
5;
-
%% Other atoms
encoded_size(Atom) when is_atom(Atom) ->
diff --git a/src/couch/src/couch_emsort.erl b/src/couch/src/couch_emsort.erl
index 430d94e01..9dcc08d67 100644
--- a/src/couch/src/couch_emsort.erl
+++ b/src/couch/src/couch_emsort.erl
@@ -142,36 +142,30 @@
num_bb = 0
}).
-
-define(REPORT_INTERVAL, 1000).
-
open(Fd) ->
- {ok, #ems{fd=Fd}}.
-
+ {ok, #ems{fd = Fd}}.
open(Fd, Options) ->
- {ok, set_options(#ems{fd=Fd}, Options)}.
-
+ {ok, set_options(#ems{fd = Fd}, Options)}.
set_options(Ems, []) ->
Ems;
set_options(Ems, [{root, Root} | Rest]) ->
- set_options(Ems#ems{root=Root}, Rest);
+ set_options(Ems#ems{root = Root}, Rest);
set_options(Ems, [{chain_chunk, Count} | Rest]) when is_integer(Count) ->
- set_options(Ems#ems{chain_chunk=Count}, Rest);
+ set_options(Ems#ems{chain_chunk = Count}, Rest);
set_options(Ems, [{back_bone_chunk, Count} | Rest]) when is_integer(Count) ->
- set_options(Ems#ems{bb_chunk=Count}, Rest);
+ set_options(Ems#ems{bb_chunk = Count}, Rest);
set_options(Ems, [{num_kvs, NumKVs} | Rest]) when is_integer(NumKVs) ->
- set_options(Ems#ems{num_kvs=NumKVs}, Rest);
+ set_options(Ems#ems{num_kvs = NumKVs}, Rest);
set_options(Ems, [{num_bb, NumBB} | Rest]) when is_integer(NumBB) ->
- set_options(Ems#ems{num_bb=NumBB}, Rest).
+ set_options(Ems#ems{num_bb = NumBB}, Rest).
-
-get_fd(#ems{fd=Fd}) ->
+get_fd(#ems{fd = Fd}) ->
Fd.
-
get_state(#ems{} = Ems) ->
#ems{
root = Root,
@@ -184,7 +178,6 @@ get_state(#ems{} = Ems) ->
{num_bb, NumBB}
].
-
add(Ems, []) ->
{ok, Ems};
add(Ems, KVs) ->
@@ -195,69 +188,64 @@ add(Ems, KVs) ->
num_bb = Ems#ems.num_bb + 1
}}.
-
-sort(#ems{}=Ems) ->
+sort(#ems{} = Ems) ->
{ok, Ems1} = merge(Ems),
iter(Ems1).
-
merge(Ems) ->
merge(Ems, fun(_) -> ok end).
-
-merge(#ems{root=undefined}=Ems, _Reporter) ->
+merge(#ems{root = undefined} = Ems, _Reporter) ->
{ok, Ems};
-merge(#ems{}=Ems, Reporter) ->
+merge(#ems{} = Ems, Reporter) ->
{ok, decimate(Ems, Reporter)}.
-
-iter(#ems{root=undefined}=Ems) ->
+iter(#ems{root = undefined} = Ems) ->
{ok, {Ems, []}};
-iter(#ems{root={BB, nil}}=Ems) ->
+iter(#ems{root = {BB, nil}} = Ems) ->
Chains = init_chains(Ems, small, BB),
{ok, {Ems, Chains}};
-iter(#ems{root={_, _}}) ->
+iter(#ems{root = {_, _}}) ->
{error, not_merged}.
-
next({_Ems, []}) ->
finished;
next({Ems, Chains}) ->
{KV, RestChains} = choose_kv(small, Ems, Chains),
{ok, KV, {Ems, RestChains}}.
-
-num_kvs(#ems{num_kvs=NumKVs}) ->
+num_kvs(#ems{num_kvs = NumKVs}) ->
NumKVs.
-num_merges(#ems{bb_chunk=BBChunk, num_bb=NumBB}) ->
+num_merges(#ems{bb_chunk = BBChunk, num_bb = NumBB}) ->
num_merges(BBChunk, NumBB).
-
-add_bb_pos(#ems{root=undefined}=Ems, Pos) ->
- Ems#ems{root={[Pos], nil}};
-add_bb_pos(#ems{root={BB, Prev}}=Ems, Pos) ->
+add_bb_pos(#ems{root = undefined} = Ems, Pos) ->
+ Ems#ems{root = {[Pos], nil}};
+add_bb_pos(#ems{root = {BB, Prev}} = Ems, Pos) ->
{NewBB, NewPrev} = append_item(Ems, {BB, Prev}, Pos, Ems#ems.bb_chunk),
- Ems#ems{root={NewBB, NewPrev}}.
-
+ Ems#ems{root = {NewBB, NewPrev}}.
write_kvs(Ems, KVs) ->
% Write the list of KV's to disk in sorted order in chunks
% of 100. Also make sure that the order is so that they
% can be streamed in asscending order.
{LastKVs, LastPos} =
- lists:foldr(fun(KV, Acc) ->
- append_item(Ems, Acc, KV, Ems#ems.chain_chunk)
- end, {[], nil}, lists:sort(KVs)),
+ lists:foldr(
+ fun(KV, Acc) ->
+ append_item(Ems, Acc, KV, Ems#ems.chain_chunk)
+ end,
+ {[], nil},
+ lists:sort(KVs)
+ ),
{ok, Final, _} = couch_file:append_term(Ems#ems.fd, {LastKVs, LastPos}),
Final.
-
-decimate(#ems{root={_BB, nil}}=Ems, _Reporter) ->
+decimate(#ems{root = {_BB, nil}} = Ems, _Reporter) ->
% We have less than bb_chunk backbone pointers so we're
% good to start streaming KV's back to the client.
Ems;
-decimate(#ems{root={BB, NextBB}}=Ems, Reporter) ->
+decimate(#ems{root = {BB, NextBB}} = Ems, Reporter) ->
% To make sure we have a bounded amount of data in RAM
% at any given point we first need to decimate the data
% by performing the first couple iterations of a merge
@@ -273,15 +261,13 @@ decimate(#ems{root={BB, NextBB}}=Ems, Reporter) ->
% Continue deicmating until we have an acceptable bound on
% the number of keys to use.
- decimate(Ems#ems{root={FwdBB, FwdNextBB}}, Reporter).
-
+ decimate(Ems#ems{root = {FwdBB, FwdNextBB}}, Reporter).
merge_back_bone(Ems, Choose, BB, NextBB, Reporter) ->
BBPos = merge_chains(Ems, Choose, BB, Reporter),
Reporter(length(BB)),
merge_rest_back_bone(Ems, Choose, NextBB, {[BBPos], nil}, Reporter).
-
merge_rest_back_bone(_Ems, _Choose, nil, Acc, _Reporter) ->
Acc;
merge_rest_back_bone(Ems, Choose, BBPos, Acc, Reporter) ->
@@ -290,40 +276,39 @@ merge_rest_back_bone(Ems, Choose, BBPos, Acc, Reporter) ->
{NewBB, NewPrev} = append_item(Ems, Acc, NewPos, Ems#ems.bb_chunk),
merge_rest_back_bone(Ems, Choose, NextBB, {NewBB, NewPrev}, Reporter).
-
merge_chains(Ems, Choose, BB, Reporter) ->
Chains = init_chains(Ems, Choose, BB),
merge_chains(Ems, Choose, Chains, {[], nil}, Reporter, 0).
-
merge_chains(Ems, _Choose, [], ChainAcc, _Reporter, _Count) ->
{ok, CPos, _} = couch_file:append_term(Ems#ems.fd, ChainAcc),
CPos;
-merge_chains(#ems{chain_chunk=CC}=Ems, Choose, Chains, Acc, Reporter, Count0) ->
+merge_chains(#ems{chain_chunk = CC} = Ems, Choose, Chains, Acc, Reporter, Count0) ->
{KV, RestChains} = choose_kv(Choose, Ems, Chains),
{NewKVs, NewPrev} = append_item(Ems, Acc, KV, CC),
- Count1 = case (Count0 + 1) rem ?REPORT_INTERVAL of
- 0 ->
- Reporter(Count0),
- 0;
- _ ->
- Count0 + 1
- end,
+ Count1 =
+ case (Count0 + 1) rem ?REPORT_INTERVAL of
+ 0 ->
+ Reporter(Count0),
+ 0;
+ _ ->
+ Count0 + 1
+ end,
merge_chains(Ems, Choose, RestChains, {NewKVs, NewPrev}, Reporter, Count1).
-
init_chains(Ems, Choose, BB) ->
- Chains = lists:map(fun(CPos) ->
- {ok, {KVs, NextKVs}} = couch_file:pread_term(Ems#ems.fd, CPos),
- {KVs, NextKVs}
- end, BB),
+ Chains = lists:map(
+ fun(CPos) ->
+ {ok, {KVs, NextKVs}} = couch_file:pread_term(Ems#ems.fd, CPos),
+ {KVs, NextKVs}
+ end,
+ BB
+ ),
order_chains(Choose, Chains).
-
order_chains(small, Chains) -> lists:sort(Chains);
order_chains(big, Chains) -> lists:reverse(lists:sort(Chains)).
-
choose_kv(_Choose, _Ems, [{[KV], nil} | Rest]) ->
{KV, Rest};
choose_kv(Choose, Ems, [{[KV], Pos} | RestChains]) ->
@@ -338,26 +323,22 @@ choose_kv(Choose, _Ems, [{[KV | RestKVs], Prev} | RestChains]) ->
big -> {KV, ins_big_chain(RestChains, {RestKVs, Prev}, [])}
end.
-
-ins_small_chain([{[{K1,_}|_],_}=C1|Rest], {[{K2,_}|_],_}=C2, Acc) when K1<K2 ->
+ins_small_chain([{[{K1, _} | _], _} = C1 | Rest], {[{K2, _} | _], _} = C2, Acc) when K1 < K2 ->
ins_small_chain(Rest, C2, [C1 | Acc]);
ins_small_chain(Rest, Chain, Acc) ->
lists:reverse(Acc, [Chain | Rest]).
-
-ins_big_chain([{[{K1,_}|_],_}=C1|Rest], {[{K2,_}|_],_}=C2, Acc) when K1>K2 ->
+ins_big_chain([{[{K1, _} | _], _} = C1 | Rest], {[{K2, _} | _], _} = C2, Acc) when K1 > K2 ->
ins_big_chain(Rest, C2, [C1 | Acc]);
ins_big_chain(Rest, Chain, Acc) ->
lists:reverse(Acc, [Chain | Rest]).
-
append_item(Ems, {List, Prev}, Pos, Size) when length(List) >= Size ->
{ok, PrevList, _} = couch_file:append_term(Ems#ems.fd, {List, Prev}),
{[Pos], PrevList};
append_item(_Ems, {List, Prev}, Pos, _Size) ->
{[Pos | List], Prev}.
-
num_merges(BBChunk, NumBB) when NumBB =< BBChunk ->
0;
num_merges(BBChunk, NumBB) when NumBB > BBChunk ->
diff --git a/src/couch/src/couch_event_sup.erl b/src/couch/src/couch_event_sup.erl
index 32f1b9b68..e9fc2e5db 100644
--- a/src/couch/src/couch_event_sup.erl
+++ b/src/couch/src/couch_event_sup.erl
@@ -20,8 +20,8 @@
-include_lib("couch/include/couch_db.hrl").
--export([start_link/3,start_link/4, stop/1]).
--export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2,code_change/3]).
+-export([start_link/3, start_link/4, stop/1]).
+-export([init/1, terminate/2, handle_call/3, handle_cast/2, handle_info/2, code_change/3]).
%
% Instead calling the
@@ -52,10 +52,10 @@ stop(Pid) ->
init({EventMgr, EventHandler, Args}) ->
case gen_event:add_sup_handler(EventMgr, EventHandler, Args) of
- ok ->
- {ok, {EventMgr, EventHandler}};
- {stop, Error} ->
- {stop, Error}
+ ok ->
+ {ok, {EventMgr, EventHandler}};
+ {stop, Error} ->
+ {stop, Error}
end.
terminate(_Reason, _State) ->
diff --git a/src/couch/src/couch_file.erl b/src/couch/src/couch_file.erl
index 2948d685b..0e786525f 100644
--- a/src/couch/src/couch_file.erl
+++ b/src/couch/src/couch_file.erl
@@ -16,10 +16,10 @@
-include_lib("couch/include/couch_db.hrl").
-
-define(INITIAL_WAIT, 60000).
-define(MONITOR_CHECK, 10000).
--define(SIZE_BLOCK, 16#1000). % 4 KiB
+% 4 KiB
+-define(SIZE_BLOCK, 16#1000).
-define(IS_OLD_STATE(S), is_pid(S#file.db_monitor)).
-define(PREFIX_SIZE, 5).
-define(DEFAULT_READ_COUNT, 1024).
@@ -66,38 +66,48 @@ open(Filepath) ->
open(Filepath, []).
open(Filepath, Options) ->
- case gen_server:start_link(couch_file,
- {Filepath, Options, self(), Ref = make_ref()}, []) of
- {ok, Fd} ->
- {ok, Fd};
- ignore ->
- % get the error
- receive
- {Ref, Pid, {error, Reason} = Error} ->
- case process_info(self(), trap_exit) of
- {trap_exit, true} -> receive {'EXIT', Pid, _} -> ok end;
- {trap_exit, false} -> ok
- end,
- case {lists:member(nologifmissing, Options), Reason} of
- {true, enoent} -> ok;
- _ ->
- couch_log:error("Could not open file ~s: ~s",
- [Filepath, file:format_error(Reason)])
- end,
+ case
+ gen_server:start_link(
+ couch_file,
+ {Filepath, Options, self(), Ref = make_ref()},
+ []
+ )
+ of
+ {ok, Fd} ->
+ {ok, Fd};
+ ignore ->
+ % get the error
+ receive
+ {Ref, Pid, {error, Reason} = Error} ->
+ case process_info(self(), trap_exit) of
+ {trap_exit, true} ->
+ receive
+ {'EXIT', Pid, _} -> ok
+ end;
+ {trap_exit, false} ->
+ ok
+ end,
+ case {lists:member(nologifmissing, Options), Reason} of
+ {true, enoent} ->
+ ok;
+ _ ->
+ couch_log:error(
+ "Could not open file ~s: ~s",
+ [Filepath, file:format_error(Reason)]
+ )
+ end,
+ Error
+ end;
+ Error ->
+ % We can't say much here, because it could be any kind of error.
+ % Just let it bubble and an encapsulating subcomponent can perhaps
+ % be more informative. It will likely appear in the SASL log, anyway.
Error
- end;
- Error ->
- % We can't say much here, because it could be any kind of error.
- % Just let it bubble and an encapsulating subcomponent can perhaps
- % be more informative. It will likely appear in the SASL log, anyway.
- Error
end.
-
set_db_pid(Fd, Pid) ->
gen_server:call(Fd, {set_db_pid, Pid}).
-
%%----------------------------------------------------------------------
%% Purpose: To append an Erlang term to the end of the file.
%% Args: Erlang term to serialize and append to the file.
@@ -121,7 +131,6 @@ append_term_md5(Fd, Term, Options) ->
Comp = couch_util:get_value(compression, Options, ?DEFAULT_COMPRESSION),
append_binary_md5(Fd, couch_compress:compress(Term, Comp)).
-
%%----------------------------------------------------------------------
%% Purpose: To append an Erlang binary to the end of the file.
%% Args: Erlang term to serialize and append to the file.
@@ -134,14 +143,15 @@ append_binary(Fd, Bin) ->
ioq:call(Fd, {append_bin, assemble_file_chunk(Bin)}, erlang:get(io_priority)).
append_binary_md5(Fd, Bin) ->
- ioq:call(Fd,
+ ioq:call(
+ Fd,
{append_bin, assemble_file_chunk(Bin, couch_hash:md5_hash(Bin))},
- erlang:get(io_priority)).
+ erlang:get(io_priority)
+ ).
append_raw_chunk(Fd, Chunk) ->
ioq:call(Fd, {append_bin, Chunk}, erlang:get(io_priority)).
-
assemble_file_chunk(Bin) ->
[<<0:1/integer, (iolist_size(Bin)):31/integer>>, Bin].
@@ -155,12 +165,10 @@ assemble_file_chunk(Bin, Md5) ->
%% or {error, Reason}.
%%----------------------------------------------------------------------
-
pread_term(Fd, Pos) ->
{ok, Bin} = pread_binary(Fd, Pos),
{ok, couch_compress:decompress(Bin)}.
-
%%----------------------------------------------------------------------
%% Purpose: Reads a binrary from a file that was written with append_binary
%% Args: Pos, the offset into the file where the term is serialized.
@@ -172,7 +180,6 @@ pread_binary(Fd, Pos) ->
{ok, L} = pread_iolist(Fd, Pos),
{ok, iolist_to_binary(L)}.
-
pread_iolist(Fd, Pos) ->
case ioq:call(Fd, {pread_iolist, Pos}, erlang:get(io_priority)) of
{ok, IoList, Md5} ->
@@ -181,49 +188,52 @@ pread_iolist(Fd, Pos) ->
Error
end.
-
pread_terms(Fd, PosList) ->
{ok, Bins} = pread_binaries(Fd, PosList),
- Terms = lists:map(fun(Bin) ->
- couch_compress:decompress(Bin)
- end, Bins),
+ Terms = lists:map(
+ fun(Bin) ->
+ couch_compress:decompress(Bin)
+ end,
+ Bins
+ ),
{ok, Terms}.
-
pread_binaries(Fd, PosList) ->
{ok, Data} = pread_iolists(Fd, PosList),
{ok, lists:map(fun erlang:iolist_to_binary/1, Data)}.
-
pread_iolists(Fd, PosList) ->
case ioq:call(Fd, {pread_iolists, PosList}, erlang:get(io_priority)) of
{ok, DataMd5s} ->
- Data = lists:zipwith(fun(Pos, {IoList, Md5}) ->
- verify_md5(Fd, Pos, IoList, Md5)
- end, PosList, DataMd5s),
+ Data = lists:zipwith(
+ fun(Pos, {IoList, Md5}) ->
+ verify_md5(Fd, Pos, IoList, Md5)
+ end,
+ PosList,
+ DataMd5s
+ ),
{ok, Data};
Error ->
Error
end.
-
append_terms(Fd, Terms) ->
append_terms(Fd, Terms, []).
-
append_terms(Fd, Terms, Options) ->
Comp = couch_util:get_value(compression, Options, ?DEFAULT_COMPRESSION),
- Bins = lists:map(fun(Term) ->
- couch_compress:compress(Term, Comp)
- end, Terms),
+ Bins = lists:map(
+ fun(Term) ->
+ couch_compress:compress(Term, Comp)
+ end,
+ Terms
+ ),
append_binaries(Fd, Bins).
-
append_binaries(Fd, Bins) ->
WriteBins = lists:map(fun assemble_file_chunk/1, Bins),
ioq:call(Fd, {append_bins, WriteBins}, erlang:get(io_priority)).
-
%%----------------------------------------------------------------------
%% Purpose: The length of a file, in bytes.
%% Returns: {ok, Bytes}
@@ -280,36 +290,42 @@ sync(Fd) ->
close(Fd) ->
gen_server:call(Fd, close, infinity).
-
delete(RootDir, Filepath) ->
delete(RootDir, Filepath, []).
delete(RootDir, FullFilePath, Options) ->
- EnableRecovery = config:get_boolean("couchdb",
- "enable_database_recovery", false),
+ EnableRecovery = config:get_boolean(
+ "couchdb",
+ "enable_database_recovery",
+ false
+ ),
Async = not lists:member(sync, Options),
Context = couch_util:get_value(context, Options, compaction),
case Context =:= delete andalso EnableRecovery of
true ->
rename_file(FullFilePath);
false ->
- DeleteAfterRename = config:get_boolean("couchdb",
- "delete_after_rename", true),
+ DeleteAfterRename = config:get_boolean(
+ "couchdb",
+ "delete_after_rename",
+ true
+ ),
delete_file(RootDir, FullFilePath, Async, DeleteAfterRename)
end.
delete_file(RootDir, Filepath, Async, DeleteAfterRename) ->
- DelFile = filename:join([RootDir,".delete", ?b2l(couch_uuids:random())]),
+ DelFile = filename:join([RootDir, ".delete", ?b2l(couch_uuids:random())]),
case file:rename(Filepath, DelFile) of
- ok when DeleteAfterRename ->
- if (Async) ->
- spawn(file, delete, [DelFile]),
- ok;
- true ->
- file:delete(DelFile)
- end;
- Else ->
- Else
+ ok when DeleteAfterRename ->
+ if
+ (Async) ->
+ spawn(file, delete, [DelFile]),
+ ok;
+ true ->
+ file:delete(DelFile)
+ end;
+ Else ->
+ Else
end.
rename_file(Original) ->
@@ -323,14 +339,21 @@ rename_file(Original) ->
deleted_filename(Original) ->
{{Y, Mon, D}, {H, Min, S}} = calendar:universal_time(),
Suffix = lists:flatten(
- io_lib:format(".~w~2.10.0B~2.10.0B."
- ++ "~2.10.0B~2.10.0B~2.10.0B.deleted"
- ++ filename:extension(Original), [Y, Mon, D, H, Min, S])),
+ io_lib:format(
+ ".~w~2.10.0B~2.10.0B." ++
+ "~2.10.0B~2.10.0B~2.10.0B.deleted" ++
+ filename:extension(Original),
+ [Y, Mon, D, H, Min, S]
+ )
+ ),
filename:rootname(Original) ++ Suffix.
nuke_dir(RootDelDir, Dir) ->
- EnableRecovery = config:get_boolean("couchdb",
- "enable_database_recovery", false),
+ EnableRecovery = config:get_boolean(
+ "couchdb",
+ "enable_database_recovery",
+ false
+ ),
case EnableRecovery of
true ->
rename_file(Dir);
@@ -339,8 +362,11 @@ nuke_dir(RootDelDir, Dir) ->
end.
delete_dir(RootDelDir, Dir) ->
- DeleteAfterRename = config:get_boolean("couchdb",
- "delete_after_rename", true),
+ DeleteAfterRename = config:get_boolean(
+ "couchdb",
+ "delete_after_rename",
+ true
+ ),
FoldFun = fun(File) ->
Path = Dir ++ "/" ++ File,
case filelib:is_dir(Path) of
@@ -359,27 +385,30 @@ delete_dir(RootDelDir, Dir) ->
ok
end.
-
init_delete_dir(RootDir) ->
- Dir = filename:join(RootDir,".delete"),
+ Dir = filename:join(RootDir, ".delete"),
% note: ensure_dir requires an actual filename companent, which is the
% reason for "foo".
- filelib:ensure_dir(filename:join(Dir,"foo")),
+ filelib:ensure_dir(filename:join(Dir, "foo")),
spawn(fun() ->
- filelib:fold_files(Dir, ".*", true,
+ filelib:fold_files(
+ Dir,
+ ".*",
+ true,
fun(Filename, _) ->
ok = file:delete(Filename)
- end, ok)
+ end,
+ ok
+ )
end),
ok.
-
read_header(Fd) ->
case ioq:call(Fd, find_header, erlang:get(io_priority)) of
- {ok, Bin} ->
- {ok, binary_to_term(Bin)};
- Else ->
- Else
+ {ok, Bin} ->
+ {ok, binary_to_term(Bin)};
+ Else ->
+ Else
end.
write_header(Fd, Data) ->
@@ -389,17 +418,14 @@ write_header(Fd, Data) ->
FinalBin = <<Md5/binary, Bin/binary>>,
ioq:call(Fd, {write_header, FinalBin}, erlang:get(io_priority)).
-
init_status_error(ReturnPid, Ref, Error) ->
ReturnPid ! {Ref, self(), Error},
ignore.
-
last_read(Fd) when is_pid(Fd) ->
Now = os:timestamp(),
couch_util:process_dict_get(Fd, read_timestamp, Now).
-
% server functions
init({Filepath, Options, ReturnPid, Ref}) ->
@@ -408,66 +434,67 @@ init({Filepath, Options, ReturnPid, Ref}) ->
IsSys = lists:member(sys_db, Options),
update_read_timestamp(),
case lists:member(create, Options) of
- true ->
- filelib:ensure_dir(Filepath),
- case file:open(Filepath, OpenOptions) of
- {ok, Fd} ->
- %% Save Fd in process dictionary for debugging purposes
- put(couch_file_fd, {Fd, Filepath}),
- {ok, Length} = file:position(Fd, eof),
- case Length > 0 of
- true ->
- % this means the file already exists and has data.
- % FYI: We don't differentiate between empty files and non-existant
- % files here.
- case lists:member(overwrite, Options) of
- true ->
- {ok, 0} = file:position(Fd, 0),
- ok = file:truncate(Fd),
- ok = file:sync(Fd),
- maybe_track_open_os_files(Options),
- erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
- {ok, #file{fd=Fd, is_sys=IsSys, pread_limit=Limit}};
- false ->
- ok = file:close(Fd),
- init_status_error(ReturnPid, Ref, {error, eexist})
- end;
- false ->
- maybe_track_open_os_files(Options),
- erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
- {ok, #file{fd=Fd, is_sys=IsSys, pread_limit=Limit}}
- end;
- Error ->
- init_status_error(ReturnPid, Ref, Error)
- end;
- false ->
- % open in read mode first, so we don't create the file if it doesn't exist.
- case file:open(Filepath, [read, raw]) of
- {ok, Fd_Read} ->
+ true ->
+ filelib:ensure_dir(Filepath),
case file:open(Filepath, OpenOptions) of
{ok, Fd} ->
- %% Save Fd in process dictionary for debugging purposes
- put(couch_file_fd, {Fd, Filepath}),
- ok = file:close(Fd_Read),
- maybe_track_open_os_files(Options),
- {ok, Eof} = file:position(Fd, eof),
- erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
- {ok, #file{fd=Fd, eof=Eof, is_sys=IsSys, pread_limit=Limit}};
- Error ->
- init_status_error(ReturnPid, Ref, Error)
+ %% Save Fd in process dictionary for debugging purposes
+ put(couch_file_fd, {Fd, Filepath}),
+ {ok, Length} = file:position(Fd, eof),
+ case Length > 0 of
+ true ->
+ % this means the file already exists and has data.
+ % FYI: We don't differentiate between empty files and non-existant
+ % files here.
+ case lists:member(overwrite, Options) of
+ true ->
+ {ok, 0} = file:position(Fd, 0),
+ ok = file:truncate(Fd),
+ ok = file:sync(Fd),
+ maybe_track_open_os_files(Options),
+ erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
+ {ok, #file{fd = Fd, is_sys = IsSys, pread_limit = Limit}};
+ false ->
+ ok = file:close(Fd),
+ init_status_error(ReturnPid, Ref, {error, eexist})
+ end;
+ false ->
+ maybe_track_open_os_files(Options),
+ erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
+ {ok, #file{fd = Fd, is_sys = IsSys, pread_limit = Limit}}
+ end;
+ Error ->
+ init_status_error(ReturnPid, Ref, Error)
end;
- Error ->
- init_status_error(ReturnPid, Ref, Error)
- end
+ false ->
+ % open in read mode first, so we don't create the file if it doesn't exist.
+ case file:open(Filepath, [read, raw]) of
+ {ok, Fd_Read} ->
+ case file:open(Filepath, OpenOptions) of
+ {ok, Fd} ->
+ %% Save Fd in process dictionary for debugging purposes
+ put(couch_file_fd, {Fd, Filepath}),
+ ok = file:close(Fd_Read),
+ maybe_track_open_os_files(Options),
+ {ok, Eof} = file:position(Fd, eof),
+ erlang:send_after(?INITIAL_WAIT, self(), maybe_close),
+ {ok, #file{fd = Fd, eof = Eof, is_sys = IsSys, pread_limit = Limit}};
+ Error ->
+ init_status_error(ReturnPid, Ref, Error)
+ end;
+ Error ->
+ init_status_error(ReturnPid, Ref, Error)
+ end
end.
file_open_options(Options) ->
- [read, raw, binary] ++ case lists:member(read_only, Options) of
- true ->
- [];
- false ->
- [append]
- end.
+ [read, raw, binary] ++
+ case lists:member(read_only, Options) of
+ true ->
+ [];
+ false ->
+ [append]
+ end.
maybe_track_open_os_files(Options) ->
case not lists:member(sys_db, Options) of
@@ -484,59 +511,62 @@ terminate(_Reason, #file{fd = Fd}) ->
handle_call(Msg, From, File) when ?IS_OLD_STATE(File) ->
handle_call(Msg, From, upgrade_state(File));
-
-handle_call(close, _From, #file{fd=Fd}=File) ->
+handle_call(close, _From, #file{fd = Fd} = File) ->
{stop, normal, file:close(Fd), File#file{fd = nil}};
-
handle_call({pread_iolist, Pos}, _From, File) ->
update_read_timestamp(),
{LenIolist, NextPos} = read_raw_iolist_int(File, Pos, 4),
case iolist_to_binary(LenIolist) of
- <<1:1/integer,Len:31/integer>> -> % an MD5-prefixed term
- {Md5AndIoList, _} = read_raw_iolist_int(File, NextPos, Len+16),
- {Md5, IoList} = extract_md5(Md5AndIoList),
- {reply, {ok, IoList, Md5}, File};
- <<0:1/integer,Len:31/integer>> ->
- {Iolist, _} = read_raw_iolist_int(File, NextPos, Len),
- {reply, {ok, Iolist, <<>>}, File}
+ % an MD5-prefixed term
+ <<1:1/integer, Len:31/integer>> ->
+ {Md5AndIoList, _} = read_raw_iolist_int(File, NextPos, Len + 16),
+ {Md5, IoList} = extract_md5(Md5AndIoList),
+ {reply, {ok, IoList, Md5}, File};
+ <<0:1/integer, Len:31/integer>> ->
+ {Iolist, _} = read_raw_iolist_int(File, NextPos, Len),
+ {reply, {ok, Iolist, <<>>}, File}
end;
-
handle_call({pread_iolists, PosL}, _From, File) ->
update_read_timestamp(),
LocNums1 = [{Pos, 4} || Pos <- PosL],
DataSizes = read_multi_raw_iolists_int(File, LocNums1),
- LocNums2 = lists:map(fun({LenIoList, NextPos}) ->
- case iolist_to_binary(LenIoList) of
- <<1:1/integer, Len:31/integer>> -> % an MD5-prefixed term
- {NextPos, Len + 16};
- <<0:1/integer, Len:31/integer>> ->
- {NextPos, Len}
- end
- end, DataSizes),
+ LocNums2 = lists:map(
+ fun({LenIoList, NextPos}) ->
+ case iolist_to_binary(LenIoList) of
+ % an MD5-prefixed term
+ <<1:1/integer, Len:31/integer>> ->
+ {NextPos, Len + 16};
+ <<0:1/integer, Len:31/integer>> ->
+ {NextPos, Len}
+ end
+ end,
+ DataSizes
+ ),
Resps = read_multi_raw_iolists_int(File, LocNums2),
- Extracted = lists:zipwith(fun({LenIoList, _}, {IoList, _}) ->
- case iolist_to_binary(LenIoList) of
- <<1:1/integer, _:31/integer>> ->
- {Md5, IoList} = extract_md5(IoList),
- {IoList, Md5};
- <<0:1/integer, _:31/integer>> ->
- {IoList, <<>>}
- end
- end, DataSizes, Resps),
+ Extracted = lists:zipwith(
+ fun({LenIoList, _}, {IoList, _}) ->
+ case iolist_to_binary(LenIoList) of
+ <<1:1/integer, _:31/integer>> ->
+ {Md5, IoList} = extract_md5(IoList),
+ {IoList, Md5};
+ <<0:1/integer, _:31/integer>> ->
+ {IoList, <<>>}
+ end
+ end,
+ DataSizes,
+ Resps
+ ),
{reply, {ok, Extracted}, File};
-
handle_call(bytes, _From, #file{fd = Fd} = File) ->
{reply, file:position(Fd, eof), File};
-
-handle_call({set_db_pid, Pid}, _From, #file{db_monitor=OldRef}=File) ->
+handle_call({set_db_pid, Pid}, _From, #file{db_monitor = OldRef} = File) ->
case is_reference(OldRef) of
true -> demonitor(OldRef, [flush]);
false -> ok
end,
Ref = monitor(process, Pid),
- {reply, ok, File#file{db_monitor=Ref}};
-
-handle_call(sync, _From, #file{fd=Fd}=File) ->
+ {reply, ok, File#file{db_monitor = Ref}};
+handle_call(sync, _From, #file{fd = Fd} = File) ->
case file:sync(Fd) of
ok ->
{reply, ok, File};
@@ -547,68 +577,66 @@ handle_call(sync, _From, #file{fd=Fd}=File) ->
% can't fathom.
{stop, Error, Error, #file{fd = nil}}
end;
-
-handle_call({truncate, Pos}, _From, #file{fd=Fd}=File) ->
+handle_call({truncate, Pos}, _From, #file{fd = Fd} = File) ->
{ok, Pos} = file:position(Fd, Pos),
case file:truncate(Fd) of
- ok ->
- {reply, ok, File#file{eof = Pos}};
- Error ->
- {reply, Error, File}
+ ok ->
+ {reply, ok, File#file{eof = Pos}};
+ Error ->
+ {reply, Error, File}
end;
-
handle_call({append_bin, Bin}, _From, #file{fd = Fd, eof = Pos} = File) ->
Blocks = make_blocks(Pos rem ?SIZE_BLOCK, Bin),
Size = iolist_size(Blocks),
case file:write(Fd, Blocks) of
- ok ->
- {reply, {ok, Pos, Size}, File#file{eof = Pos + Size}};
- Error ->
- {reply, Error, reset_eof(File)}
+ ok ->
+ {reply, {ok, Pos, Size}, File#file{eof = Pos + Size}};
+ Error ->
+ {reply, Error, reset_eof(File)}
end;
-
handle_call({append_bins, Bins}, _From, #file{fd = Fd, eof = Pos} = File) ->
- {BlockResps, FinalPos} = lists:mapfoldl(fun(Bin, PosAcc) ->
- Blocks = make_blocks(PosAcc rem ?SIZE_BLOCK, Bin),
- Size = iolist_size(Blocks),
- {{Blocks, {PosAcc, Size}}, PosAcc + Size}
- end, Pos, Bins),
+ {BlockResps, FinalPos} = lists:mapfoldl(
+ fun(Bin, PosAcc) ->
+ Blocks = make_blocks(PosAcc rem ?SIZE_BLOCK, Bin),
+ Size = iolist_size(Blocks),
+ {{Blocks, {PosAcc, Size}}, PosAcc + Size}
+ end,
+ Pos,
+ Bins
+ ),
{AllBlocks, Resps} = lists:unzip(BlockResps),
case file:write(Fd, AllBlocks) of
- ok ->
- {reply, {ok, Resps}, File#file{eof = FinalPos}};
- Error ->
- {reply, Error, reset_eof(File)}
+ ok ->
+ {reply, {ok, Resps}, File#file{eof = FinalPos}};
+ Error ->
+ {reply, Error, reset_eof(File)}
end;
-
handle_call({write_header, Bin}, _From, #file{fd = Fd, eof = Pos} = File) ->
BinSize = byte_size(Bin),
case Pos rem ?SIZE_BLOCK of
- 0 ->
- Padding = <<>>;
- BlockOffset ->
- Padding = <<0:(8*(?SIZE_BLOCK-BlockOffset))>>
+ 0 ->
+ Padding = <<>>;
+ BlockOffset ->
+ Padding = <<0:(8 * (?SIZE_BLOCK - BlockOffset))>>
end,
FinalBin = [Padding, <<1, BinSize:32/integer>> | make_blocks(5, [Bin])],
case file:write(Fd, FinalBin) of
- ok ->
- {reply, ok, File#file{eof = Pos + iolist_size(FinalBin)}};
- Error ->
- {reply, Error, reset_eof(File)}
+ ok ->
+ {reply, ok, File#file{eof = Pos + iolist_size(FinalBin)}};
+ Error ->
+ {reply, Error, reset_eof(File)}
end;
-
handle_call(find_header, _From, #file{fd = Fd, eof = Pos} = File) ->
{reply, find_header(Fd, Pos div ?SIZE_BLOCK), File}.
handle_cast(close, Fd) ->
- {stop,normal,Fd}.
+ {stop, normal, Fd}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
handle_info(Msg, File) when ?IS_OLD_STATE(File) ->
handle_info(Msg, upgrade_state(File));
-
handle_info(maybe_close, File) ->
case is_idle(File) of
true ->
@@ -617,8 +645,7 @@ handle_info(maybe_close, File) ->
erlang:send_after(?MONITOR_CHECK, self(), maybe_close),
{noreply, File}
end;
-
-handle_info({'DOWN', Ref, process, _Pid, _Info}, #file{db_monitor=Ref}=File) ->
+handle_info({'DOWN', Ref, process, _Pid, _Info}, #file{db_monitor = Ref} = File) ->
case is_idle(File) of
true -> {stop, normal, File};
false -> {noreply, File}
@@ -630,12 +657,13 @@ format_status(_Opt, [PDict, #file{} = File]) ->
find_header(Fd, Block) ->
case (catch load_header(Fd, Block)) of
- {ok, Bin} ->
- {ok, Bin};
- _Error ->
- ReadCount = config:get_integer(
- "couchdb", "find_header_read_count", ?DEFAULT_READ_COUNT),
- find_header(Fd, Block -1, ReadCount)
+ {ok, Bin} ->
+ {ok, Bin};
+ _Error ->
+ ReadCount = config:get_integer(
+ "couchdb", "find_header_read_count", ?DEFAULT_READ_COUNT
+ ),
+ find_header(Fd, Block - 1, ReadCount)
end.
load_header(Fd, Block) ->
@@ -648,22 +676,22 @@ load_header(Fd, Pos, HeaderLen) ->
load_header(Fd, Pos, HeaderLen, RestBlock) ->
TotalBytes = calculate_total_read_len(?PREFIX_SIZE, HeaderLen),
- RawBin = case TotalBytes =< byte_size(RestBlock) of
- true ->
- <<RawBin0:TotalBytes/binary, _/binary>> = RestBlock,
- RawBin0;
- false ->
- ReadStart = Pos + ?PREFIX_SIZE + byte_size(RestBlock),
- ReadLen = TotalBytes - byte_size(RestBlock),
- {ok, Missing} = file:pread(Fd, ReadStart, ReadLen),
- <<RestBlock/binary, Missing/binary>>
- end,
+ RawBin =
+ case TotalBytes =< byte_size(RestBlock) of
+ true ->
+ <<RawBin0:TotalBytes/binary, _/binary>> = RestBlock,
+ RawBin0;
+ false ->
+ ReadStart = Pos + ?PREFIX_SIZE + byte_size(RestBlock),
+ ReadLen = TotalBytes - byte_size(RestBlock),
+ {ok, Missing} = file:pread(Fd, ReadStart, ReadLen),
+ <<RestBlock/binary, Missing/binary>>
+ end,
<<Md5Sig:16/binary, HeaderBin/binary>> =
iolist_to_binary(remove_block_prefixes(?PREFIX_SIZE, RawBin)),
Md5Sig = couch_hash:md5_hash(HeaderBin),
{ok, HeaderBin}.
-
%% Read multiple block locations using a single file:pread/2.
-spec find_header(file:fd(), block_id(), non_neg_integer()) ->
{ok, binary()} | no_valid_header.
@@ -671,23 +699,28 @@ find_header(_Fd, Block, _ReadCount) when Block < 0 ->
no_valid_header;
find_header(Fd, Block, ReadCount) ->
FirstBlock = max(0, Block - ReadCount + 1),
- BlockLocations = [?SIZE_BLOCK*B || B <- lists:seq(FirstBlock, Block)],
+ BlockLocations = [?SIZE_BLOCK * B || B <- lists:seq(FirstBlock, Block)],
{ok, DataL} = file:pread(Fd, [{L, ?PREFIX_SIZE} || L <- BlockLocations]),
%% Since BlockLocations are ordered from oldest to newest, we rely
%% on lists:foldl/3 to reverse the order, making HeaderLocations
%% correctly ordered from newest to oldest.
- HeaderLocations = lists:foldl(fun
- ({Loc, <<1, HeaderSize:32/integer>>}, Acc) ->
- [{Loc, HeaderSize} | Acc];
- (_, Acc) ->
- Acc
- end, [], lists:zip(BlockLocations, DataL)),
+ HeaderLocations = lists:foldl(
+ fun
+ ({Loc, <<1, HeaderSize:32/integer>>}, Acc) ->
+ [{Loc, HeaderSize} | Acc];
+ (_, Acc) ->
+ Acc
+ end,
+ [],
+ lists:zip(BlockLocations, DataL)
+ ),
case find_newest_header(Fd, HeaderLocations) of
{ok, _Location, HeaderBin} ->
{ok, HeaderBin};
_ ->
ok = file:advise(
- Fd, hd(BlockLocations), ReadCount * ?SIZE_BLOCK, dont_need),
+ Fd, hd(BlockLocations), ReadCount * ?SIZE_BLOCK, dont_need
+ ),
NextBlock = hd(BlockLocations) div ?SIZE_BLOCK - 1,
find_header(Fd, NextBlock, ReadCount)
end.
@@ -704,10 +737,10 @@ find_newest_header(Fd, [{Location, Size} | LocationSizes]) ->
find_newest_header(Fd, LocationSizes)
end.
-
--spec read_raw_iolist_int(#file{}, Pos::non_neg_integer(), Len::non_neg_integer()) ->
- {Data::iolist(), CurPos::non_neg_integer()}.
-read_raw_iolist_int(Fd, {Pos, _Size}, Len) -> % 0110 UPGRADE CODE
+-spec read_raw_iolist_int(#file{}, Pos :: non_neg_integer(), Len :: non_neg_integer()) ->
+ {Data :: iolist(), CurPos :: non_neg_integer()}.
+% 0110 UPGRADE CODE
+read_raw_iolist_int(Fd, {Pos, _Size}, Len) ->
read_raw_iolist_int(Fd, Pos, Len);
read_raw_iolist_int(#file{fd = Fd} = File, Pos, Len) ->
{Pos, TotalBytes} = get_pread_locnum(File, Pos, Len),
@@ -727,33 +760,38 @@ read_raw_iolist_int(#file{fd = Fd} = File, Pos, Len) ->
% TODO: check if this is really unused
read_multi_raw_iolists_int(#file{fd = Fd} = File, PosLens) ->
- LocNums = lists:map(fun({Pos, Len}) ->
- get_pread_locnum(File, Pos, Len)
- end, PosLens),
+ LocNums = lists:map(
+ fun({Pos, Len}) ->
+ get_pread_locnum(File, Pos, Len)
+ end,
+ PosLens
+ ),
{ok, Bins} = file:pread(Fd, LocNums),
- lists:zipwith(fun({Pos, TotalBytes}, Bin) ->
- <<RawBin:TotalBytes/binary>> = Bin,
- {remove_block_prefixes(Pos rem ?SIZE_BLOCK, RawBin), Pos + TotalBytes}
- end, LocNums, Bins).
-
+ lists:zipwith(
+ fun({Pos, TotalBytes}, Bin) ->
+ <<RawBin:TotalBytes/binary>> = Bin,
+ {remove_block_prefixes(Pos rem ?SIZE_BLOCK, RawBin), Pos + TotalBytes}
+ end,
+ LocNums,
+ Bins
+ ).
get_pread_locnum(File, Pos, Len) ->
BlockOffset = Pos rem ?SIZE_BLOCK,
TotalBytes = calculate_total_read_len(BlockOffset, Len),
case Pos + TotalBytes of
- Size when Size > File#file.eof ->
- couch_stats:increment_counter([pread, exceed_eof]),
- {_Fd, Filepath} = get(couch_file_fd),
- throw({read_beyond_eof, Filepath});
- Size when Size > File#file.pread_limit ->
- couch_stats:increment_counter([pread, exceed_limit]),
- {_Fd, Filepath} = get(couch_file_fd),
- throw({exceed_pread_limit, Filepath, File#file.pread_limit});
- _ ->
- {Pos, TotalBytes}
+ Size when Size > File#file.eof ->
+ couch_stats:increment_counter([pread, exceed_eof]),
+ {_Fd, Filepath} = get(couch_file_fd),
+ throw({read_beyond_eof, Filepath});
+ Size when Size > File#file.pread_limit ->
+ couch_stats:increment_counter([pread, exceed_limit]),
+ {_Fd, Filepath} = get(couch_file_fd),
+ throw({exceed_pread_limit, Filepath, File#file.pread_limit});
+ _ ->
+ {Pos, TotalBytes}
end.
-
-spec extract_md5(iolist()) -> {binary(), iolist()}.
extract_md5(FullIoList) ->
{Md5List, IoList} = split_iolist(FullIoList, 16, []),
@@ -763,26 +801,28 @@ calculate_total_read_len(0, FinalLen) ->
calculate_total_read_len(1, FinalLen) + 1;
calculate_total_read_len(BlockOffset, FinalLen) ->
case ?SIZE_BLOCK - BlockOffset of
- BlockLeft when BlockLeft >= FinalLen ->
- FinalLen;
- BlockLeft ->
- FinalLen + ((FinalLen - BlockLeft) div (?SIZE_BLOCK -1)) +
- if ((FinalLen - BlockLeft) rem (?SIZE_BLOCK -1)) =:= 0 -> 0;
- true -> 1 end
+ BlockLeft when BlockLeft >= FinalLen ->
+ FinalLen;
+ BlockLeft ->
+ FinalLen + ((FinalLen - BlockLeft) div (?SIZE_BLOCK - 1)) +
+ if
+ ((FinalLen - BlockLeft) rem (?SIZE_BLOCK - 1)) =:= 0 -> 0;
+ true -> 1
+ end
end.
remove_block_prefixes(_BlockOffset, <<>>) ->
[];
-remove_block_prefixes(0, <<_BlockPrefix,Rest/binary>>) ->
+remove_block_prefixes(0, <<_BlockPrefix, Rest/binary>>) ->
remove_block_prefixes(1, Rest);
remove_block_prefixes(BlockOffset, Bin) ->
BlockBytesAvailable = ?SIZE_BLOCK - BlockOffset,
case size(Bin) of
- Size when Size > BlockBytesAvailable ->
- <<DataBlock:BlockBytesAvailable/binary,Rest/binary>> = Bin,
- [DataBlock | remove_block_prefixes(0, Rest)];
- _Size ->
- [Bin]
+ Size when Size > BlockBytesAvailable ->
+ <<DataBlock:BlockBytesAvailable/binary, Rest/binary>> = Bin,
+ [DataBlock | remove_block_prefixes(0, Rest)];
+ _Size ->
+ [Bin]
end.
make_blocks(_BlockOffset, []) ->
@@ -791,16 +831,16 @@ make_blocks(0, IoList) ->
[<<0>> | make_blocks(1, IoList)];
make_blocks(BlockOffset, IoList) ->
case split_iolist(IoList, (?SIZE_BLOCK - BlockOffset), []) of
- {Begin, End} ->
- [Begin | make_blocks(0, End)];
- _SplitRemaining ->
- IoList
+ {Begin, End} ->
+ [Begin | make_blocks(0, End)];
+ _SplitRemaining ->
+ IoList
end.
%% @doc Returns a tuple where the first element contains the leading SplitAt
%% bytes of the original iolist, and the 2nd element is the tail. If SplitAt
%% is larger than byte_size(IoList), return the difference.
--spec split_iolist(IoList::iolist(), SplitAt::non_neg_integer(), Acc::list()) ->
+-spec split_iolist(IoList :: iolist(), SplitAt :: non_neg_integer(), Acc :: list()) ->
{iolist(), iolist()} | non_neg_integer().
split_iolist(List, 0, BeginAcc) ->
{lists:reverse(BeginAcc), List};
@@ -809,14 +849,14 @@ split_iolist([], SplitAt, _BeginAcc) ->
split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) when SplitAt > byte_size(Bin) ->
split_iolist(Rest, SplitAt - byte_size(Bin), [Bin | BeginAcc]);
split_iolist([<<Bin/binary>> | Rest], SplitAt, BeginAcc) ->
- <<Begin:SplitAt/binary,End/binary>> = Bin,
+ <<Begin:SplitAt/binary, End/binary>> = Bin,
split_iolist([End | Rest], 0, [Begin | BeginAcc]);
-split_iolist([Sublist| Rest], SplitAt, BeginAcc) when is_list(Sublist) ->
+split_iolist([Sublist | Rest], SplitAt, BeginAcc) when is_list(Sublist) ->
case split_iolist(Sublist, SplitAt, BeginAcc) of
- {Begin, End} ->
- {Begin, [End | Rest]};
- SplitRemaining ->
- split_iolist(Rest, SplitAt - (SplitAt - SplitRemaining), [Sublist | BeginAcc])
+ {Begin, End} ->
+ {Begin, [End | Rest]};
+ SplitRemaining ->
+ split_iolist(Rest, SplitAt - (SplitAt - SplitRemaining), [Sublist | BeginAcc])
end;
split_iolist([Byte | Rest], SplitAt, BeginAcc) when is_integer(Byte) ->
split_iolist(Rest, SplitAt - 1, [Byte | BeginAcc]).
@@ -825,29 +865,25 @@ monitored_by_pids() ->
{monitored_by, PidsAndRefs} = process_info(self(), monitored_by),
lists:filter(fun is_pid/1, PidsAndRefs).
-
verify_md5(_Fd, _Pos, IoList, <<>>) ->
IoList;
-
verify_md5(Fd, Pos, IoList, Md5) ->
case couch_hash:md5_hash(IoList) of
Md5 -> IoList;
_ -> report_md5_error(Fd, Pos)
end.
-
report_md5_error(Fd, Pos) ->
couch_log:emergency("File corruption in ~p at position ~B", [Fd, Pos]),
exit({file_corruption, <<"file corruption">>}).
-
% System dbs aren't monitored by couch_stats_process_tracker
-is_idle(#file{is_sys=true}) ->
+is_idle(#file{is_sys = true}) ->
case monitored_by_pids() of
[] -> true;
_ -> false
end;
-is_idle(#file{is_sys=false}) ->
+is_idle(#file{is_sys = false}) ->
Tracker = whereis(couch_stats_process_tracker),
case monitored_by_pids() of
[] -> true;
@@ -865,10 +901,10 @@ process_info(Pid) ->
update_read_timestamp() ->
put(read_timestamp, os:timestamp()).
-upgrade_state(#file{db_monitor=DbPid}=File) when is_pid(DbPid) ->
+upgrade_state(#file{db_monitor = DbPid} = File) when is_pid(DbPid) ->
unlink(DbPid),
Ref = monitor(process, DbPid),
- File#file{db_monitor=Ref};
+ File#file{db_monitor = Ref};
upgrade_state(State) ->
State.
@@ -889,21 +925,26 @@ reset_eof(#file{} = File) ->
deleted_filename_test_() ->
DbNames = ["dbname", "db.name", "user/dbname"],
Fixtures = make_filename_fixtures(DbNames),
- lists:map(fun(Fixture) ->
- should_create_proper_deleted_filename(Fixture)
- end, Fixtures).
+ lists:map(
+ fun(Fixture) ->
+ should_create_proper_deleted_filename(Fixture)
+ end,
+ Fixtures
+ ).
should_create_proper_deleted_filename(Before) ->
{Before,
- ?_test(begin
- BeforeExtension = filename:extension(Before),
- BeforeBasename = filename:basename(Before, BeforeExtension),
- Re = "^" ++ BeforeBasename ++ "\.[0-9]{8}\.[0-9]{6}\.deleted\..*$",
- After = deleted_filename(Before),
- ?assertEqual(match,
- re:run(filename:basename(After), Re, [{capture, none}])),
- ?assertEqual(BeforeExtension, filename:extension(After))
- end)}.
+ ?_test(begin
+ BeforeExtension = filename:extension(Before),
+ BeforeBasename = filename:basename(Before, BeforeExtension),
+ Re = "^" ++ BeforeBasename ++ "\.[0-9]{8}\.[0-9]{6}\.deleted\..*$",
+ After = deleted_filename(Before),
+ ?assertEqual(
+ match,
+ re:run(filename:basename(After), Re, [{capture, none}])
+ ),
+ ?assertEqual(BeforeExtension, filename:extension(After))
+ end)}.
make_filename_fixtures(DbNames) ->
Formats = [
@@ -912,12 +953,18 @@ make_filename_fixtures(DbNames) ->
"shards/00000000-1fffffff/~s.1458336317.couch",
".shards/00000000-1fffffff/~s.1458336317_design",
".shards/00000000-1fffffff/~s.1458336317_design"
- "/mrview/3133e28517e89a3e11435dd5ac4ad85a.view"
+ "/mrview/3133e28517e89a3e11435dd5ac4ad85a.view"
],
- lists:flatmap(fun(DbName) ->
- lists:map(fun(Format) ->
- filename:join("/srv/data", io_lib:format(Format, [DbName]))
- end, Formats)
- end, DbNames).
+ lists:flatmap(
+ fun(DbName) ->
+ lists:map(
+ fun(Format) ->
+ filename:join("/srv/data", io_lib:format(Format, [DbName]))
+ end,
+ Formats
+ )
+ end,
+ DbNames
+ ).
-endif.
diff --git a/src/couch/src/couch_flags.erl b/src/couch/src/couch_flags.erl
index 5cfe7f6d1..42d585f2e 100644
--- a/src/couch/src/couch_flags.erl
+++ b/src/couch/src/couch_flags.erl
@@ -64,13 +64,13 @@
-include_lib("mem3/include/mem3.hrl").
-include("couch_db_int.hrl").
--type subject()
- :: #db{}
- | #httpd{}
- | #shard{}
- | #ordered_shard{}
- | string()
- | binary().
+-type subject() ::
+ #db{}
+ | #httpd{}
+ | #shard{}
+ | #ordered_shard{}
+ | string()
+ | binary().
-define(SERVICE_ID, feature_flags).
@@ -79,8 +79,10 @@
enabled(Subject) ->
Key = maybe_handle(subject_key, [Subject], fun subject_key/1),
Handle = couch_epi:get_handle({flags, config}),
- lists:usort(enabled(Handle, {<<"/", Key/binary>>})
- ++ enabled(Handle, {couch_db:normalize_dbname(Key)})).
+ lists:usort(
+ enabled(Handle, {<<"/", Key/binary>>}) ++
+ enabled(Handle, {couch_db:normalize_dbname(Key)})
+ ).
-spec is_enabled(FlagId :: atom(), subject()) -> boolean().
@@ -106,9 +108,9 @@ enabled(Handle, Key) ->
subject_key(#db{name = Name}) ->
subject_key(Name);
-subject_key(#httpd{path_parts=[Name | _Rest]}) ->
+subject_key(#httpd{path_parts = [Name | _Rest]}) ->
subject_key(Name);
-subject_key(#httpd{path_parts=[]}) ->
+subject_key(#httpd{path_parts = []}) ->
<<>>;
subject_key(#shard{name = Name}) ->
subject_key(Name);
@@ -120,9 +122,10 @@ subject_key(Name) when is_binary(Name) ->
Name.
-spec maybe_handle(
- Function :: atom(),
- Args :: [term()],
- Default :: fun((Args :: [term()]) -> term())) ->
+ Function :: atom(),
+ Args :: [term()],
+ Default :: fun((Args :: [term()]) -> term())
+) ->
term().
maybe_handle(Func, Args, Default) ->
diff --git a/src/couch/src/couch_flags_config.erl b/src/couch/src/couch_flags_config.erl
index 104a48257..a50f4411f 100644
--- a/src/couch/src/couch_flags_config.erl
+++ b/src/couch/src/couch_flags_config.erl
@@ -28,23 +28,26 @@
-define(DATA_INTERVAL, 1000).
-define(MAX_FLAG_NAME_LENGTH, 256).
--type pattern()
- :: binary(). %% non empty binary which optionally can end with *
+-type pattern() ::
+ %% non empty binary which optionally can end with *
+ binary().
-type flag_id() :: atom().
-type flags() :: list(flag_id()).
--type parse_pattern()
- :: {
- binary(), %% pattern without trainig * if it is present
- pattern(),
- IsWildCard :: boolean(), %% true if the pattern has training *
- PatternSize :: pos_integer()
- }.
+-type parse_pattern() ::
+ {
+ %% pattern without trainig * if it is present
+ binary(),
+ pattern(),
+ %% true if the pattern has training *
+ IsWildCard :: boolean(),
+ PatternSize :: pos_integer()
+ }.
--type rule()
- :: {
+-type rule() ::
+ {
parse_pattern(),
EnabledFlags :: flags(),
DisabledFlags :: flags()
@@ -75,26 +78,29 @@ data() ->
data(Config) ->
ByPattern = collect_rules(Config),
- lists:reverse([{{P}, {P, size(P), E -- D}} || {P, {_, E, D}} <- ByPattern]).
+ lists:reverse([{{P}, {P, size(P), E -- D}} || {P, {_, E, D}} <- ByPattern]).
-spec parse_rules([{Key :: string(), Value :: string()}]) -> [rule()].
parse_rules(Config) ->
- lists:filtermap(fun({K, V}) ->
- case parse_rule(K, V) of
- {error, {Format, Args}} ->
- couch_log:error(Format, Args),
- false;
- Rule ->
- {true, Rule}
- end
- end, Config).
+ lists:filtermap(
+ fun({K, V}) ->
+ case parse_rule(K, V) of
+ {error, {Format, Args}} ->
+ couch_log:error(Format, Args),
+ false;
+ Rule ->
+ {true, Rule}
+ end
+ end,
+ Config
+ ).
-spec parse_rule(Key :: string(), Value :: string()) ->
rule()
| {error, Reason :: term()}.
-parse_rule(Key, "true") ->
+parse_rule(Key, "true") ->
parse_flags(binary:split(list_to_binary(Key), <<"||">>), true);
parse_rule(Key, "false") ->
parse_flags(binary:split(list_to_binary(Key), <<"||">>), false);
@@ -119,29 +125,32 @@ parse_flags([FlagsBin, PatternBin], Value) ->
end;
parse_flags(_Tokens, _) ->
couch_log:error(
- "Key should be in the form of `[flags]||pattern` (got ~s)", []),
+ "Key should be in the form of `[flags]||pattern` (got ~s)", []
+ ),
false.
-spec parse_flags_term(Flags :: binary()) ->
[flag_id()] | {error, Reason :: term()}.
parse_flags_term(FlagsBin) ->
- {Flags, Errors} = lists:splitwith(fun erlang:is_atom/1,
- [parse_flag(F) || F <- split_by_comma(FlagsBin)]),
+ {Flags, Errors} = lists:splitwith(
+ fun erlang:is_atom/1,
+ [parse_flag(F) || F <- split_by_comma(FlagsBin)]
+ ),
case Errors of
- [] ->
- lists:usort(Flags);
- _ ->
- {error, {
- "Cannot parse list of tags: ~n~p",
- Errors
- }}
+ [] ->
+ lists:usort(Flags);
+ _ ->
+ {error, {
+ "Cannot parse list of tags: ~n~p",
+ Errors
+ }}
end.
split_by_comma(Binary) ->
case binary:split(Binary, <<",">>, [global]) of
- [<<>>] -> [];
- Tokens -> Tokens
+ [<<>>] -> [];
+ Tokens -> Tokens
end.
parse_flag(FlagName) when size(FlagName) > ?MAX_FLAG_NAME_LENGTH ->
@@ -149,7 +158,7 @@ parse_flag(FlagName) when size(FlagName) > ?MAX_FLAG_NAME_LENGTH ->
parse_flag(FlagName) ->
FlagNameS = string:strip(binary_to_list(FlagName)),
try
- list_to_existing_atom(FlagNameS)
+ list_to_existing_atom(FlagNameS)
catch
_:_ -> {invalid_flag, FlagName}
end.
@@ -172,8 +181,10 @@ parse_pattern(PatternBin) ->
collect_rules(ConfigData) ->
ByKey = by_key(parse_rules(ConfigData)),
Keys = lists:sort(fun sort_by_length/2, gb_trees:keys(ByKey)),
- FuzzyKeys = lists:sort(fun sort_by_length/2,
- [K || {K, {{_, _, true, _}, _, _}} <- gb_trees:to_list(ByKey)]),
+ FuzzyKeys = lists:sort(
+ fun sort_by_length/2,
+ [K || {K, {{_, _, true, _}, _, _}} <- gb_trees:to_list(ByKey)]
+ ),
Rules = collect_rules(lists:reverse(Keys), FuzzyKeys, ByKey),
gb_trees:to_list(Rules).
@@ -185,17 +196,22 @@ sort_by_length(A, B) ->
-spec by_key(Items :: [rule()]) -> Dictionary :: gb_trees:tree().
by_key(Items) ->
- lists:foldl(fun({{_, K, _, _}, _, _} = Item, Acc) ->
- update_element(Acc, K, Item, fun(Value) ->
- update_flags(Value, Item)
- end)
- end, gb_trees:empty(), Items).
+ lists:foldl(
+ fun({{_, K, _, _}, _, _} = Item, Acc) ->
+ update_element(Acc, K, Item, fun(Value) ->
+ update_flags(Value, Item)
+ end)
+ end,
+ gb_trees:empty(),
+ Items
+ ).
-spec update_element(
- Tree :: gb_trees:tree(),
- Key :: pattern(),
- Default :: rule(),
- Fun :: fun((Item :: rule()) -> rule())) ->
+ Tree :: gb_trees:tree(),
+ Key :: pattern(),
+ Default :: rule(),
+ Fun :: fun((Item :: rule()) -> rule())
+) ->
gb_trees:tree().
update_element(Tree, Key, Default, Fun) ->
@@ -207,9 +223,10 @@ update_element(Tree, Key, Default, Fun) ->
end.
-spec collect_rules(
- Keys :: [pattern()],
- FuzzyKeys :: [pattern()],
- ByKey :: gb_trees:tree()) ->
+ Keys :: [pattern()],
+ FuzzyKeys :: [pattern()],
+ ByKey :: gb_trees:tree()
+) ->
gb_trees:tree().
collect_rules([], _, Acc) ->
@@ -218,9 +235,10 @@ collect_rules([Current | Rest], Items, Acc) ->
collect_rules(Rest, Items -- [Current], inherit_flags(Current, Items, Acc)).
-spec inherit_flags(
- Current :: pattern(),
- FuzzyKeys :: [pattern()],
- ByKey :: gb_trees:tree()) ->
+ Current :: pattern(),
+ FuzzyKeys :: [pattern()],
+ ByKey :: gb_trees:tree()
+) ->
gb_trees:tree().
inherit_flags(_Current, [], Acc) ->
@@ -234,9 +252,10 @@ inherit_flags(Current, [Item | Items], Acc) ->
end.
-spec match_prefix(
- AKey :: pattern(),
- BKey :: pattern(),
- ByKey :: gb_trees:tree()) ->
+ AKey :: pattern(),
+ BKey :: pattern(),
+ ByKey :: gb_trees:tree()
+) ->
boolean().
match_prefix(AKey, BKey, Acc) ->
@@ -257,9 +276,10 @@ match_prefix({{Key0, _, _, _}, _, _}, {{Key1, _, true, S1}, _, _}) ->
end.
-spec update_flags(
- AKey :: pattern(),
- BKey :: pattern(),
- ByKey :: gb_trees:tree()) ->
+ AKey :: pattern(),
+ BKey :: pattern(),
+ ByKey :: gb_trees:tree()
+) ->
gb_trees:tree().
update_flags(AKey, BKey, Acc) ->
@@ -283,6 +303,7 @@ update_flags({Pattern, E0, D0}, {_, E1, D1}) ->
get_config_section(Section) ->
try
config:get(Section)
- catch error:badarg ->
+ catch
+ error:badarg ->
[]
end.
diff --git a/src/couch/src/couch_hotp.erl b/src/couch/src/couch_hotp.erl
index 4ba81c9bf..cdb8291f3 100644
--- a/src/couch/src/couch_hotp.erl
+++ b/src/couch/src/couch_hotp.erl
@@ -14,15 +14,16 @@
-export([generate/4]).
-generate(Alg, Key, Counter, OutputLen)
- when is_atom(Alg), is_binary(Key), is_integer(Counter), is_integer(OutputLen) ->
+generate(Alg, Key, Counter, OutputLen) when
+ is_atom(Alg), is_binary(Key), is_integer(Counter), is_integer(OutputLen)
+->
Hmac = couch_util:hmac(Alg, Key, <<Counter:64>>),
Offset = binary:last(Hmac) band 16#f,
Code =
((binary:at(Hmac, Offset) band 16#7f) bsl 24) +
- ((binary:at(Hmac, Offset + 1) band 16#ff) bsl 16) +
- ((binary:at(Hmac, Offset + 2) band 16#ff) bsl 8) +
- ((binary:at(Hmac, Offset + 3) band 16#ff)),
+ ((binary:at(Hmac, Offset + 1) band 16#ff) bsl 16) +
+ ((binary:at(Hmac, Offset + 2) band 16#ff) bsl 8) +
+ (binary:at(Hmac, Offset + 3) band 16#ff),
case OutputLen of
6 -> Code rem 1000000;
7 -> Code rem 10000000;
diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index 535fc9245..64b68ce3f 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -18,21 +18,27 @@
-export([start_link/0, start_link/1, stop/0, handle_request/5]).
--export([header_value/2,header_value/3,qs_value/2,qs_value/3,qs/1,qs_json_value/3]).
--export([path/1,absolute_uri/2,body_length/1]).
--export([verify_is_server_admin/1,unquote/1,quote/1,recv/2,recv_chunked/4,error_info/1]).
+-export([header_value/2, header_value/3, qs_value/2, qs_value/3, qs/1, qs_json_value/3]).
+-export([path/1, absolute_uri/2, body_length/1]).
+-export([verify_is_server_admin/1, unquote/1, quote/1, recv/2, recv_chunked/4, error_info/1]).
-export([make_fun_spec_strs/1]).
-export([make_arity_1_fun/1, make_arity_2_fun/1, make_arity_3_fun/1]).
--export([parse_form/1,json_body/1,json_body_obj/1,body/1]).
+-export([parse_form/1, json_body/1, json_body_obj/1, body/1]).
-export([doc_etag/1, doc_etag/3, make_etag/1, etag_match/2, etag_respond/3, etag_maybe/2]).
--export([primary_header_value/2,partition/1,serve_file/3,serve_file/4, server_header/0]).
--export([start_chunked_response/3,send_chunk/2,log_request/2]).
+-export([primary_header_value/2, partition/1, serve_file/3, serve_file/4, server_header/0]).
+-export([start_chunked_response/3, send_chunk/2, log_request/2]).
-export([start_response_length/4, start_response/3, send/2]).
-export([start_json_response/2, start_json_response/3, end_json_response/1]).
--export([send_response/4,send_response_no_cors/4,send_method_not_allowed/2,
- send_error/2,send_error/4, send_redirect/2,send_chunked_error/2]).
--export([send_json/2,send_json/3,send_json/4,last_chunk/1,parse_multipart_request/3]).
--export([accepted_encodings/1,handle_request_int/5,validate_referer/1,validate_ctype/2]).
+-export([
+ send_response/4,
+ send_response_no_cors/4,
+ send_method_not_allowed/2,
+ send_error/2, send_error/4,
+ send_redirect/2,
+ send_chunked_error/2
+]).
+-export([send_json/2, send_json/3, send_json/4, last_chunk/1, parse_multipart_request/3]).
+-export([accepted_encodings/1, handle_request_int/5, validate_referer/1, validate_ctype/2]).
-export([http_1_0_keep_alive/2]).
-export([validate_host/1]).
-export([validate_bind_address/1]).
@@ -47,7 +53,8 @@
-define(DEFAULT_SOCKET_OPTIONS, "[{sndbuf, 262144}]").
-define(DEFAULT_AUTHENTICATION_HANDLERS,
"{couch_httpd_auth, cookie_authentication_handler}, "
- "{couch_httpd_auth, default_authentication_handler}").
+ "{couch_httpd_auth, default_authentication_handler}"
+).
start_link() ->
start_link(http).
@@ -58,18 +65,24 @@ start_link(https) ->
Port = config:get("ssl", "port", "6984"),
{ok, Ciphers} = couch_util:parse_term(config:get("ssl", "ciphers", undefined)),
{ok, Versions} = couch_util:parse_term(config:get("ssl", "tls_versions", undefined)),
- {ok, SecureRenegotiate} = couch_util:parse_term(config:get("ssl", "secure_renegotiate", undefined)),
+ {ok, SecureRenegotiate} = couch_util:parse_term(
+ config:get("ssl", "secure_renegotiate", undefined)
+ ),
ServerOpts0 =
- [{cacertfile, config:get("ssl", "cacert_file", undefined)},
- {keyfile, config:get("ssl", "key_file", undefined)},
- {certfile, config:get("ssl", "cert_file", undefined)},
- {password, config:get("ssl", "password", undefined)},
- {secure_renegotiate, SecureRenegotiate},
- {versions, Versions},
- {ciphers, Ciphers}],
-
- case (couch_util:get_value(keyfile, ServerOpts0) == undefined orelse
- couch_util:get_value(certfile, ServerOpts0) == undefined) of
+ [
+ {cacertfile, config:get("ssl", "cacert_file", undefined)},
+ {keyfile, config:get("ssl", "key_file", undefined)},
+ {certfile, config:get("ssl", "cert_file", undefined)},
+ {password, config:get("ssl", "password", undefined)},
+ {secure_renegotiate, SecureRenegotiate},
+ {versions, Versions},
+ {ciphers, Ciphers}
+ ],
+
+ case
+ (couch_util:get_value(keyfile, ServerOpts0) == undefined orelse
+ couch_util:get_value(certfile, ServerOpts0) == undefined)
+ of
true ->
couch_log:error("SSL enabled but PEM certificates are missing", []),
throw({error, missing_certs});
@@ -77,44 +90,58 @@ start_link(https) ->
ok
end,
- ServerOpts = [Opt || {_, V}=Opt <- ServerOpts0, V /= undefined],
-
- ClientOpts = case config:get("ssl", "verify_ssl_certificates", "false") of
- "false" ->
- [];
- "true" ->
- FailIfNoPeerCert = case config:get("ssl", "fail_if_no_peer_cert", "false") of
- "false" -> false;
- "true" -> true
- end,
- [{depth, list_to_integer(config:get("ssl",
- "ssl_certificate_max_depth", "1"))},
- {fail_if_no_peer_cert, FailIfNoPeerCert},
- {verify, verify_peer}] ++
- case config:get("ssl", "verify_fun", undefined) of
- undefined -> [];
- SpecStr ->
- [{verify_fun, make_arity_3_fun(SpecStr)}]
- end
- end,
+ ServerOpts = [Opt || {_, V} = Opt <- ServerOpts0, V /= undefined],
+
+ ClientOpts =
+ case config:get("ssl", "verify_ssl_certificates", "false") of
+ "false" ->
+ [];
+ "true" ->
+ FailIfNoPeerCert =
+ case config:get("ssl", "fail_if_no_peer_cert", "false") of
+ "false" -> false;
+ "true" -> true
+ end,
+ [
+ {depth,
+ list_to_integer(
+ config:get(
+ "ssl",
+ "ssl_certificate_max_depth",
+ "1"
+ )
+ )},
+ {fail_if_no_peer_cert, FailIfNoPeerCert},
+ {verify, verify_peer}
+ ] ++
+ case config:get("ssl", "verify_fun", undefined) of
+ undefined -> [];
+ SpecStr -> [{verify_fun, make_arity_3_fun(SpecStr)}]
+ end
+ end,
SslOpts = ServerOpts ++ ClientOpts,
Options =
- [{port, Port},
- {ssl, true},
- {ssl_opts, SslOpts}],
+ [
+ {port, Port},
+ {ssl, true},
+ {ssl_opts, SslOpts}
+ ],
start_link(https, Options).
start_link(Name, Options) ->
- BindAddress = case config:get("httpd", "bind_address", "any") of
- "any" -> any;
- Else -> Else
- end,
+ BindAddress =
+ case config:get("httpd", "bind_address", "any") of
+ "any" -> any;
+ Else -> Else
+ end,
ok = validate_bind_address(BindAddress),
{ok, ServerOptions} = couch_util:parse_term(
- config:get("httpd", "server_options", "[]")),
+ config:get("httpd", "server_options", "[]")
+ ),
{ok, SocketOptions} = couch_util:parse_term(
- config:get("httpd", "socket_options", ?DEFAULT_SOCKET_OPTIONS)),
+ config:get("httpd", "socket_options", ?DEFAULT_SOCKET_OPTIONS)
+ ),
set_auth_handlers(),
Handlers = get_httpd_handlers(),
@@ -123,21 +150,26 @@ start_link(Name, Options) ->
% get the same value.
couch_server:get_uuid(),
- Loop = fun(Req)->
+ Loop = fun(Req) ->
case SocketOptions of
- [] ->
- ok;
- _ ->
- ok = mochiweb_socket:setopts(Req:get(socket), SocketOptions)
+ [] ->
+ ok;
+ _ ->
+ ok = mochiweb_socket:setopts(Req:get(socket), SocketOptions)
end,
apply(?MODULE, handle_request, [Req | Handlers])
end,
% set mochiweb options
- FinalOptions = lists:append([Options, ServerOptions, [
+ FinalOptions = lists:append([
+ Options,
+ ServerOptions,
+ [
{loop, Loop},
{name, Name},
- {ip, BindAddress}]]),
+ {ip, BindAddress}
+ ]
+ ]),
% launch mochiweb
case mochiweb_http:start(FinalOptions) of
@@ -148,21 +180,27 @@ start_link(Name, Options) ->
throw({error, Reason})
end.
-
stop() ->
mochiweb_http:stop(couch_httpd),
catch mochiweb_http:stop(https).
-
set_auth_handlers() ->
AuthenticationSrcs = make_fun_spec_strs(
- config:get("httpd", "authentication_handlers",
- ?DEFAULT_AUTHENTICATION_HANDLERS)),
+ config:get(
+ "httpd",
+ "authentication_handlers",
+ ?DEFAULT_AUTHENTICATION_HANDLERS
+ )
+ ),
AuthHandlers = lists:map(
- fun(A) -> {auth_handler_name(A), make_arity_1_fun(A)} end, AuthenticationSrcs),
- AuthenticationFuns = AuthHandlers ++ [
- fun couch_httpd_auth:party_mode_handler/1 %% must be last
- ],
+ fun(A) -> {auth_handler_name(A), make_arity_1_fun(A)} end, AuthenticationSrcs
+ ),
+ AuthenticationFuns =
+ AuthHandlers ++
+ [
+ %% must be last
+ fun couch_httpd_auth:party_mode_handler/1
+ ],
ok = application:set_env(couch, auth_handlers, AuthenticationFuns).
auth_handler_name(SpecStr) ->
@@ -174,21 +212,27 @@ get_httpd_handlers() ->
UrlHandlersList = lists:map(
fun({UrlKey, SpecStr}) ->
{?l2b(UrlKey), make_arity_1_fun(SpecStr)}
- end, HttpdGlobalHandlers),
+ end,
+ HttpdGlobalHandlers
+ ),
{ok, HttpdDbHandlers} = application:get_env(couch, httpd_db_handlers),
DbUrlHandlersList = lists:map(
fun({UrlKey, SpecStr}) ->
{?l2b(UrlKey), make_arity_2_fun(SpecStr)}
- end, HttpdDbHandlers),
+ end,
+ HttpdDbHandlers
+ ),
{ok, HttpdDesignHandlers} = application:get_env(couch, httpd_design_handlers),
DesignUrlHandlersList = lists:map(
fun({UrlKey, SpecStr}) ->
{?l2b(UrlKey), make_arity_3_fun(SpecStr)}
- end, HttpdDesignHandlers),
+ end,
+ HttpdDesignHandlers
+ ),
UrlHandlers = dict:from_list(UrlHandlersList),
DbUrlHandlers = dict:from_list(DbUrlHandlersList),
@@ -200,26 +244,26 @@ get_httpd_handlers() ->
% or "{my_module, my_fun, <<"my_arg">>}"
make_arity_1_fun(SpecStr) ->
case couch_util:parse_term(SpecStr) of
- {ok, {Mod, Fun, SpecArg}} ->
- fun(Arg) -> Mod:Fun(Arg, SpecArg) end;
- {ok, {Mod, Fun}} ->
- fun(Arg) -> Mod:Fun(Arg) end
+ {ok, {Mod, Fun, SpecArg}} ->
+ fun(Arg) -> Mod:Fun(Arg, SpecArg) end;
+ {ok, {Mod, Fun}} ->
+ fun(Arg) -> Mod:Fun(Arg) end
end.
make_arity_2_fun(SpecStr) ->
case couch_util:parse_term(SpecStr) of
- {ok, {Mod, Fun, SpecArg}} ->
- fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2, SpecArg) end;
- {ok, {Mod, Fun}} ->
- fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2) end
+ {ok, {Mod, Fun, SpecArg}} ->
+ fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2, SpecArg) end;
+ {ok, {Mod, Fun}} ->
+ fun(Arg1, Arg2) -> Mod:Fun(Arg1, Arg2) end
end.
make_arity_3_fun(SpecStr) ->
case couch_util:parse_term(SpecStr) of
- {ok, {Mod, Fun, SpecArg}} ->
- fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3, SpecArg) end;
- {ok, {Mod, Fun}} ->
- fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3) end
+ {ok, {Mod, Fun, SpecArg}} ->
+ fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3, SpecArg) end;
+ {ok, {Mod, Fun}} ->
+ fun(Arg1, Arg2, Arg3) -> Mod:Fun(Arg1, Arg2, Arg3) end
end.
% SpecStr is "{my_module, my_fun}, {my_module2, my_fun2}"
@@ -231,15 +275,25 @@ handle_request(MochiReq) ->
erlang:put(mochiweb_request_body, Body),
apply(?MODULE, handle_request, [MochiReq | get_httpd_handlers()]).
-handle_request(MochiReq, DefaultFun, UrlHandlers, DbUrlHandlers,
- DesignUrlHandlers) ->
+handle_request(
+ MochiReq,
+ DefaultFun,
+ UrlHandlers,
+ DbUrlHandlers,
+ DesignUrlHandlers
+) ->
%% reset rewrite count for new request
erlang:put(?REWRITE_COUNT, 0),
MochiReq1 = couch_httpd_vhost:dispatch_host(MochiReq),
- handle_request_int(MochiReq1, DefaultFun,
- UrlHandlers, DbUrlHandlers, DesignUrlHandlers).
+ handle_request_int(
+ MochiReq1,
+ DefaultFun,
+ UrlHandlers,
+ DbUrlHandlers,
+ DesignUrlHandlers
+ ).
handle_request_int(MochiReq, DefaultFun,
UrlHandlers, DbUrlHandlers, DesignUrlHandlers) ->
@@ -420,8 +474,10 @@ valid_hosts() ->
re:split(List, ",", [{return, list}]).
check_request_uri_length(Uri) ->
- check_request_uri_length(Uri,
- chttpd_util:get_chttpd_config("max_uri_length")).
+ check_request_uri_length(
+ Uri,
+ chttpd_util:get_chttpd_config("max_uri_length")
+ ).
check_request_uri_length(_Uri, undefined) ->
ok;
@@ -444,34 +500,33 @@ validate_referer(Req) ->
Host = host_for_request(Req),
Referer = header_value(Req, "Referer", fail),
case Referer of
- fail ->
- throw({bad_request, <<"Referer header required.">>});
- Referer ->
- {_,RefererHost,_,_,_} = mochiweb_util:urlsplit(Referer),
- if
- RefererHost =:= Host -> ok;
- true -> throw({bad_request, <<"Referer header must match host.">>})
- end
+ fail ->
+ throw({bad_request, <<"Referer header required.">>});
+ Referer ->
+ {_, RefererHost, _, _, _} = mochiweb_util:urlsplit(Referer),
+ if
+ RefererHost =:= Host -> ok;
+ true -> throw({bad_request, <<"Referer header must match host.">>})
+ end
end.
validate_ctype(Req, Ctype) ->
case header_value(Req, "Content-Type") of
- undefined ->
- throw({bad_ctype, "Content-Type must be "++Ctype});
- ReqCtype ->
- case string:tokens(ReqCtype, ";") of
- [Ctype] -> ok;
- [Ctype | _Rest] -> ok;
- _Else ->
- throw({bad_ctype, "Content-Type must be "++Ctype})
- end
+ undefined ->
+ throw({bad_ctype, "Content-Type must be " ++ Ctype});
+ ReqCtype ->
+ case string:tokens(ReqCtype, ";") of
+ [Ctype] -> ok;
+ [Ctype | _Rest] -> ok;
+ _Else -> throw({bad_ctype, "Content-Type must be " ++ Ctype})
+ end
end.
-
check_max_request_length(Req) ->
Len = list_to_integer(header_value(Req, "Content-Length", "0")),
MaxLen = chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", 4294967296),
+ "max_http_request_size", 4294967296
+ ),
case Len > MaxLen of
true ->
exit({body_too_large, Len});
@@ -479,32 +534,31 @@ check_max_request_length(Req) ->
ok
end.
-
% Utilities
partition(Path) ->
mochiweb_util:partition(Path, "/").
-header_value(#httpd{mochi_req=MochiReq}, Key) ->
+header_value(#httpd{mochi_req = MochiReq}, Key) ->
MochiReq:get_header_value(Key).
-header_value(#httpd{mochi_req=MochiReq}, Key, Default) ->
+header_value(#httpd{mochi_req = MochiReq}, Key, Default) ->
case MochiReq:get_header_value(Key) of
- undefined -> Default;
- Value -> Value
+ undefined -> Default;
+ Value -> Value
end.
-primary_header_value(#httpd{mochi_req=MochiReq}, Key) ->
+primary_header_value(#httpd{mochi_req = MochiReq}, Key) ->
MochiReq:get_primary_header_value(Key).
-accepted_encodings(#httpd{mochi_req=MochiReq}) ->
+accepted_encodings(#httpd{mochi_req = MochiReq}) ->
case MochiReq:accepted_encodings(["gzip", "identity"]) of
- bad_accept_encoding_value ->
- throw(bad_accept_encoding_value);
- [] ->
- throw(unacceptable_encoding);
- EncList ->
- EncList
+ bad_accept_encoding_value ->
+ throw(bad_accept_encoding_value);
+ [] ->
+ throw(unacceptable_encoding);
+ EncList ->
+ EncList
end.
serve_file(Req, RelativePath, DocumentRoot) ->
@@ -514,7 +568,8 @@ serve_file(Req0, RelativePath0, DocumentRoot0, ExtraHeaders) ->
Headers0 = basic_headers(Req0, ExtraHeaders),
{ok, {Req1, Code1, Headers1, RelativePath1, DocumentRoot1}} =
chttpd_plugin:before_serve_file(
- Req0, 200, Headers0, RelativePath0, DocumentRoot0),
+ Req0, 200, Headers0, RelativePath0, DocumentRoot0
+ ),
log_request(Req1, Code1),
#httpd{mochi_req = MochiReq} = Req1,
{ok, MochiReq:serve_file(RelativePath1, DocumentRoot1, Headers1)}.
@@ -527,53 +582,61 @@ qs_value(Req, Key, Default) ->
qs_json_value(Req, Key, Default) ->
case qs_value(Req, Key, Default) of
- Default ->
- Default;
- Result ->
- ?JSON_DECODE(Result)
+ Default ->
+ Default;
+ Result ->
+ ?JSON_DECODE(Result)
end.
-qs(#httpd{mochi_req=MochiReq}) ->
+qs(#httpd{mochi_req = MochiReq}) ->
MochiReq:parse_qs().
-path(#httpd{mochi_req=MochiReq}) ->
+path(#httpd{mochi_req = MochiReq}) ->
MochiReq:get(path).
-host_for_request(#httpd{mochi_req=MochiReq}) ->
+host_for_request(#httpd{mochi_req = MochiReq}) ->
XHost = chttpd_util:get_chttpd_config(
- "x_forwarded_host", "X-Forwarded-Host"),
+ "x_forwarded_host", "X-Forwarded-Host"
+ ),
case MochiReq:get_header_value(XHost) of
undefined ->
case MochiReq:get_header_value("Host") of
undefined ->
- {ok, {Address, Port}} = case MochiReq:get(socket) of
- {ssl, SslSocket} -> ssl:sockname(SslSocket);
- Socket -> inet:sockname(Socket)
- end,
+ {ok, {Address, Port}} =
+ case MochiReq:get(socket) of
+ {ssl, SslSocket} -> ssl:sockname(SslSocket);
+ Socket -> inet:sockname(Socket)
+ end,
inet_parse:ntoa(Address) ++ ":" ++ integer_to_list(Port);
Value1 ->
Value1
end;
- Value -> Value
+ Value ->
+ Value
end.
-absolute_uri(#httpd{mochi_req=MochiReq}=Req, [$/ | _] = Path) ->
+absolute_uri(#httpd{mochi_req = MochiReq} = Req, [$/ | _] = Path) ->
Host = host_for_request(Req),
XSsl = chttpd_util:get_chttpd_config("x_forwarded_ssl", "X-Forwarded-Ssl"),
- Scheme = case MochiReq:get_header_value(XSsl) of
- "on" -> "https";
- _ ->
- XProto = chttpd_util:get_chttpd_config(
- "x_forwarded_proto", "X-Forwarded-Proto"),
- case MochiReq:get_header_value(XProto) of
- %% Restrict to "https" and "http" schemes only
- "https" -> "https";
- _ -> case MochiReq:get(scheme) of
- https -> "https";
- http -> "http"
- end
- end
- end,
+ Scheme =
+ case MochiReq:get_header_value(XSsl) of
+ "on" ->
+ "https";
+ _ ->
+ XProto = chttpd_util:get_chttpd_config(
+ "x_forwarded_proto", "X-Forwarded-Proto"
+ ),
+ case MochiReq:get_header_value(XProto) of
+ %% Restrict to "https" and "http" schemes only
+ "https" ->
+ "https";
+ _ ->
+ case MochiReq:get(scheme) of
+ https -> "https";
+ http -> "http"
+ end
+ end
+ end,
Scheme ++ "://" ++ Host ++ Path;
absolute_uri(_Req, _Path) ->
throw({bad_request, "path must begin with a /."}).
@@ -584,60 +647,63 @@ unquote(UrlEncodedString) ->
quote(UrlDecodedString) ->
mochiweb_util:quote_plus(UrlDecodedString).
-parse_form(#httpd{mochi_req=MochiReq}) ->
+parse_form(#httpd{mochi_req = MochiReq}) ->
mochiweb_multipart:parse_form(MochiReq).
-recv(#httpd{mochi_req=MochiReq}, Len) ->
+recv(#httpd{mochi_req = MochiReq}, Len) ->
MochiReq:recv(Len).
-recv_chunked(#httpd{mochi_req=MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
+recv_chunked(#httpd{mochi_req = MochiReq}, MaxChunkSize, ChunkFun, InitState) ->
% Fun is called once with each chunk
% Fun({Length, Binary}, State)
% called with Length == 0 on the last time.
- MochiReq:stream_body(MaxChunkSize, ChunkFun, InitState,
+ MochiReq:stream_body(
+ MaxChunkSize,
+ ChunkFun,
+ InitState,
chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", 4294967296)).
+ "max_http_request_size", 4294967296
+ )
+ ).
-body_length(#httpd{mochi_req=MochiReq}) ->
+body_length(#httpd{mochi_req = MochiReq}) ->
MochiReq:get(body_length).
-body(#httpd{mochi_req=MochiReq, req_body=undefined}) ->
+body(#httpd{mochi_req = MochiReq, req_body = undefined}) ->
MaxSize = chttpd_util:get_chttpd_config_integer(
- "max_http_request_size", 4294967296),
+ "max_http_request_size", 4294967296
+ ),
MochiReq:recv_body(MaxSize);
-body(#httpd{req_body=ReqBody}) ->
+body(#httpd{req_body = ReqBody}) ->
ReqBody.
-json_body(#httpd{req_body=undefined} = Httpd) ->
+json_body(#httpd{req_body = undefined} = Httpd) ->
case body(Httpd) of
undefined ->
throw({bad_request, "Missing request body"});
Body ->
?JSON_DECODE(maybe_decompress(Httpd, Body))
end;
-
-json_body(#httpd{req_body=ReqBody}) ->
+json_body(#httpd{req_body = ReqBody}) ->
ReqBody.
json_body_obj(Httpd) ->
case json_body(Httpd) of
{Props} -> {Props};
- _Else ->
- throw({bad_request, "Request body must be a JSON object"})
+ _Else -> throw({bad_request, "Request body must be a JSON object"})
end.
-
maybe_decompress(Httpd, Body) ->
case header_value(Httpd, "Content-Encoding", "identity") of
- "gzip" ->
- zlib:gunzip(Body);
- "identity" ->
- Body;
- Else ->
- throw({bad_ctype, [Else, " is not a supported content encoding."]})
+ "gzip" ->
+ zlib:gunzip(Body);
+ "identity" ->
+ Body;
+ Else ->
+ throw({bad_ctype, [Else, " is not a supported content encoding."]})
end.
-doc_etag(#doc{id=Id, body=Body, revs={Start, [DiskRev|_]}}) ->
+doc_etag(#doc{id = Id, body = Body, revs = {Start, [DiskRev | _]}}) ->
doc_etag(Id, Body, {Start, DiskRev}).
doc_etag(<<"_local/", _/binary>>, Body, {Start, DiskRev}) ->
@@ -647,7 +713,7 @@ doc_etag(_Id, _Body, {Start, DiskRev}) ->
rev_etag({Start, DiskRev}) ->
Rev = couch_doc:rev_to_str({Start, DiskRev}),
- <<$", Rev/binary, $">>.
+ <<$", Rev/binary, $">>.
make_etag(Term) ->
<<SigInt:128/integer>> = couch_hash:md5_hash(term_to_binary(Term)),
@@ -655,20 +721,20 @@ make_etag(Term) ->
etag_match(Req, CurrentEtag) when is_binary(CurrentEtag) ->
etag_match(Req, binary_to_list(CurrentEtag));
-
etag_match(Req, CurrentEtag) ->
EtagsToMatch = string:tokens(
- header_value(Req, "If-None-Match", ""), ", "),
+ header_value(Req, "If-None-Match", ""), ", "
+ ),
lists:member(CurrentEtag, EtagsToMatch).
etag_respond(Req, CurrentEtag, RespFun) ->
case etag_match(Req, CurrentEtag) of
- true ->
- % the client has this in their cache.
- send_response(Req, 304, [{"ETag", CurrentEtag}], <<>>);
- false ->
- % Run the function.
- RespFun()
+ true ->
+ % the client has this in their cache.
+ send_response(Req, 304, [{"ETag", CurrentEtag}], <<>>);
+ false ->
+ % Run the function.
+ RespFun()
end.
etag_maybe(Req, RespFun) ->
@@ -679,15 +745,15 @@ etag_maybe(Req, RespFun) ->
send_response(Req, 304, [{"ETag", ETag}], <<>>)
end.
-verify_is_server_admin(#httpd{user_ctx=UserCtx}) ->
+verify_is_server_admin(#httpd{user_ctx = UserCtx}) ->
verify_is_server_admin(UserCtx);
-verify_is_server_admin(#user_ctx{roles=Roles}) ->
+verify_is_server_admin(#user_ctx{roles = Roles}) ->
case lists:member(<<"_admin">>, Roles) of
- true -> ok;
- false -> throw({unauthorized, <<"You are not a server admin.">>})
+ true -> ok;
+ false -> throw({unauthorized, <<"You are not a server admin.">>})
end.
-log_request(#httpd{mochi_req=MochiReq,peer=Peer}=Req, Code) ->
+log_request(#httpd{mochi_req = MochiReq, peer = Peer} = Req, Code) ->
case erlang:get(dont_log_request) of
true ->
ok;
@@ -714,16 +780,16 @@ log_response(Code, Body) ->
couch_log:error("httpd ~p error response:~n ~s", [Code, Body])
end.
-start_response_length(#httpd{mochi_req=MochiReq}=Req, Code, Headers0, Length) ->
+start_response_length(#httpd{mochi_req = MochiReq} = Req, Code, Headers0, Length) ->
Headers1 = basic_headers(Req, Headers0),
Resp = handle_response(Req, Code, Headers1, Length, start_response_length),
case MochiReq:get(method) of
- 'HEAD' -> throw({http_head_abort, Resp});
- _ -> ok
+ 'HEAD' -> throw({http_head_abort, Resp});
+ _ -> ok
end,
{ok, Resp}.
-start_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers0) ->
+start_response(#httpd{mochi_req = MochiReq} = Req, Code, Headers0) ->
Headers1 = basic_headers(Req, Headers0),
Resp = handle_response(Req, Code, Headers1, undefined, start_response),
case MochiReq:get(method) of
@@ -741,9 +807,9 @@ send(Resp, Data) ->
no_resp_conn_header([]) ->
true;
-no_resp_conn_header([{Hdr, V}|Rest]) when is_binary(Hdr)->
- no_resp_conn_header([{?b2l(Hdr), V}|Rest]);
-no_resp_conn_header([{Hdr, _}|Rest]) when is_list(Hdr)->
+no_resp_conn_header([{Hdr, V} | Rest]) when is_binary(Hdr) ->
+ no_resp_conn_header([{?b2l(Hdr), V} | Rest]);
+no_resp_conn_header([{Hdr, _} | Rest]) when is_list(Hdr) ->
case string:to_lower(Hdr) of
"connection" -> false;
_ -> no_resp_conn_header(Rest)
@@ -760,12 +826,12 @@ http_1_0_keep_alive(Req, Headers) ->
false -> Headers
end.
-start_chunked_response(#httpd{mochi_req=MochiReq}=Req, Code, Headers0) ->
+start_chunked_response(#httpd{mochi_req = MochiReq} = Req, Code, Headers0) ->
Headers1 = add_headers(Req, Headers0),
Resp = handle_response(Req, Code, Headers1, chunked, respond),
case MochiReq:get(method) of
- 'HEAD' -> throw({http_head_abort, Resp});
- _ -> ok
+ 'HEAD' -> throw({http_head_abort, Resp});
+ _ -> ok
end,
{ok, Resp}.
@@ -774,8 +840,9 @@ send_chunk({remote, Pid, Ref} = Resp, Data) ->
{ok, Resp};
send_chunk(Resp, Data) ->
case iolist_size(Data) of
- 0 -> ok; % do nothing
- _ -> Resp:write_chunk(Data)
+ % do nothing
+ 0 -> ok;
+ _ -> Resp:write_chunk(Data)
end,
{ok, Resp}.
@@ -790,17 +857,23 @@ send_response(Req, Code, Headers0, Body) ->
Headers1 = chttpd_cors:headers(Req, Headers0),
send_response_no_cors(Req, Code, Headers1, Body).
-send_response_no_cors(#httpd{mochi_req=MochiReq}=Req, Code, Headers, Body) ->
+send_response_no_cors(#httpd{mochi_req = MochiReq} = Req, Code, Headers, Body) ->
Headers1 = http_1_0_keep_alive(MochiReq, Headers),
Headers2 = basic_headers_no_cors(Req, Headers1),
Headers3 = chttpd_xframe_options:header(Req, Headers2),
- Headers4 = chttpd_prefer_header:maybe_return_minimal(Req, Headers3),
+ Headers4 = chttpd_prefer_header:maybe_return_minimal(Req, Headers3),
Resp = handle_response(Req, Code, Headers4, Body, respond),
log_response(Code, Body),
{ok, Resp}.
send_method_not_allowed(Req, Methods) ->
- send_error(Req, 405, [{"Allow", Methods}], <<"method_not_allowed">>, ?l2b("Only " ++ Methods ++ " allowed")).
+ send_error(
+ Req,
+ 405,
+ [{"Allow", Methods}],
+ <<"method_not_allowed">>,
+ ?l2b("Only " ++ Methods ++ " allowed")
+ ).
send_json(Req, Value) ->
send_json(Req, 200, Value).
@@ -843,13 +916,18 @@ initialize_jsonp(Req) ->
_ -> ok
end,
case get(jsonp) of
- no_jsonp -> [];
- [] -> [];
+ no_jsonp ->
+ [];
+ [] ->
+ [];
CallBack ->
try
% make sure jsonp is configured on (default off)
- case chttpd_util:get_chttpd_config_boolean(
- "allow_jsonp", false) of
+ case
+ chttpd_util:get_chttpd_config_boolean(
+ "allow_jsonp", false
+ )
+ of
true ->
validate_callback(CallBack);
false ->
@@ -889,12 +967,10 @@ validate_callback([Char | Rest]) ->
_ when Char == $_ -> ok;
_ when Char == $[ -> ok;
_ when Char == $] -> ok;
- _ ->
- throw({bad_request, invalid_callback})
+ _ -> throw({bad_request, invalid_callback})
end,
validate_callback(Rest).
-
error_info({Error, Reason}) when is_list(Reason) ->
error_info({Error, ?l2b(Reason)});
error_info(bad_request) ->
@@ -923,8 +999,10 @@ error_info({forbidden, Msg}) ->
error_info({unauthorized, Msg}) ->
{401, <<"unauthorized">>, Msg};
error_info(file_exists) ->
- {412, <<"file_exists">>, <<"The database could not be "
- "created, the file already exists.">>};
+ {412, <<"file_exists">>, <<
+ "The database could not be "
+ "created, the file already exists."
+ >>};
error_info(request_entity_too_large) ->
{413, <<"too_large">>, <<"the request entity is too large">>};
error_info({request_entity_too_large, {attachment, AttName}}) ->
@@ -938,9 +1016,10 @@ error_info({bad_ctype, Reason}) ->
error_info(requested_range_not_satisfiable) ->
{416, <<"requested_range_not_satisfiable">>, <<"Requested range not satisfiable">>};
error_info({error, {illegal_database_name, Name}}) ->
- Message = <<"Name: '", Name/binary, "'. Only lowercase characters (a-z), ",
- "digits (0-9), and any of the characters _, $, (, ), +, -, and / ",
- "are allowed. Must begin with a letter.">>,
+ Message =
+ <<"Name: '", Name/binary, "'. Only lowercase characters (a-z), ",
+ "digits (0-9), and any of the characters _, $, (, ), +, -, and / ",
+ "are allowed. Must begin with a letter.">>,
{400, <<"illegal_database_name">>, Message};
error_info({missing_stub, Reason}) ->
{412, <<"missing_stub">>, Reason};
@@ -951,64 +1030,102 @@ error_info({Error, Reason}) ->
error_info(Error) ->
{500, <<"unknown_error">>, couch_util:to_binary(Error)}.
-error_headers(#httpd{mochi_req=MochiReq}=Req, Code, ErrorStr, ReasonStr) ->
- if Code == 401 ->
- % this is where the basic auth popup is triggered
- case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
- undefined ->
- case chttpd_util:get_chttpd_config("WWW-Authenticate") of
- undefined ->
- % If the client is a browser and the basic auth popup isn't turned on
- % redirect to the session page.
- case ErrorStr of
- <<"unauthorized">> ->
- case chttpd_util:get_chttpd_auth_config(
- "authentication_redirect", "/_utils/session.html") of
- undefined -> {Code, []};
- AuthRedirect ->
- case chttpd_util:get_chttpd_auth_config_boolean(
- "require_valid_user", false) of
- true ->
- % send the browser popup header no matter what if we are require_valid_user
- {Code, [{"WWW-Authenticate", "Basic realm=\"server\""}]};
- false ->
- case MochiReq:accepts_content_type("application/json") of
- true ->
- {Code, []};
- false ->
- case MochiReq:accepts_content_type("text/html") of
- true ->
- % Redirect to the path the user requested, not
- % the one that is used internally.
- UrlReturnRaw = case MochiReq:get_header_value("x-couchdb-vhost-path") of
- undefined ->
- MochiReq:get(path);
- VHostPath ->
- VHostPath
- end,
- RedirectLocation = lists:flatten([
- AuthRedirect,
- "?return=", couch_util:url_encode(UrlReturnRaw),
- "&reason=", couch_util:url_encode(ReasonStr)
- ]),
- {302, [{"Location", absolute_uri(Req, RedirectLocation)}]};
- false ->
+error_headers(#httpd{mochi_req = MochiReq} = Req, Code, ErrorStr, ReasonStr) ->
+ if
+ Code == 401 ->
+ % this is where the basic auth popup is triggered
+ case MochiReq:get_header_value("X-CouchDB-WWW-Authenticate") of
+ undefined ->
+ case chttpd_util:get_chttpd_config("WWW-Authenticate") of
+ undefined ->
+ % If the client is a browser and the basic auth popup isn't turned on
+ % redirect to the session page.
+ case ErrorStr of
+ <<"unauthorized">> ->
+ case
+ chttpd_util:get_chttpd_auth_config(
+ "authentication_redirect", "/_utils/session.html"
+ )
+ of
+ undefined ->
+ {Code, []};
+ AuthRedirect ->
+ case
+ chttpd_util:get_chttpd_auth_config_boolean(
+ "require_valid_user", false
+ )
+ of
+ true ->
+ % send the browser popup header no matter what if we are require_valid_user
+ {Code, [
+ {"WWW-Authenticate",
+ "Basic realm=\"server\""}
+ ]};
+ false ->
+ case
+ MochiReq:accepts_content_type(
+ "application/json"
+ )
+ of
+ true ->
+ {Code, []};
+ false ->
+ case
+ MochiReq:accepts_content_type(
+ "text/html"
+ )
+ of
+ true ->
+ % Redirect to the path the user requested, not
+ % the one that is used internally.
+ UrlReturnRaw =
+ case
+ MochiReq:get_header_value(
+ "x-couchdb-vhost-path"
+ )
+ of
+ undefined ->
+ MochiReq:get(path);
+ VHostPath ->
+ VHostPath
+ end,
+ RedirectLocation = lists:flatten(
+ [
+ AuthRedirect,
+ "?return=",
+ couch_util:url_encode(
+ UrlReturnRaw
+ ),
+ "&reason=",
+ couch_util:url_encode(
+ ReasonStr
+ )
+ ]
+ ),
+ {302, [
+ {"Location",
+ absolute_uri(
+ Req,
+ RedirectLocation
+ )}
+ ]};
+ false ->
+ {Code, []}
+ end
+ end
+ end
+ end;
+ _Else ->
{Code, []}
- end
- end
- end
+ end;
+ Type ->
+ {Code, [{"WWW-Authenticate", Type}]}
end;
- _Else ->
- {Code, []}
- end;
- Type ->
- {Code, [{"WWW-Authenticate", Type}]}
+ Type ->
+ {Code, [{"WWW-Authenticate", Type}]}
end;
- Type ->
- {Code, [{"WWW-Authenticate", Type}]}
- end;
- true ->
- {Code, []}
+ true ->
+ {Code, []}
end.
send_error(Req, Error) ->
@@ -1020,25 +1137,33 @@ send_error(Req, Code, ErrorStr, ReasonStr) ->
send_error(Req, Code, [], ErrorStr, ReasonStr).
send_error(Req, Code, Headers, ErrorStr, ReasonStr) ->
- send_json(Req, Code, Headers,
- {[{<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr}]}).
+ send_json(
+ Req,
+ Code,
+ Headers,
+ {[
+ {<<"error">>, ErrorStr},
+ {<<"reason">>, ReasonStr}
+ ]}
+ ).
% give the option for list functions to output html or other raw errors
send_chunked_error(Resp, {_Error, {[{<<"body">>, Reason}]}}) ->
send_chunk(Resp, Reason),
last_chunk(Resp);
-
send_chunked_error(Resp, Error) ->
{Code, ErrorStr, ReasonStr} = error_info(Error),
- JsonError = {[{<<"code">>, Code},
- {<<"error">>, ErrorStr},
- {<<"reason">>, ReasonStr}]},
- send_chunk(Resp, ?l2b([$\n,?JSON_ENCODE(JsonError),$\n])),
+ JsonError =
+ {[
+ {<<"code">>, Code},
+ {<<"error">>, ErrorStr},
+ {<<"reason">>, ReasonStr}
+ ]},
+ send_chunk(Resp, ?l2b([$\n, ?JSON_ENCODE(JsonError), $\n])),
last_chunk(Resp).
send_redirect(Req, Path) ->
- send_response(Req, 301, [{"Location", absolute_uri(Req, Path)}], <<>>).
+ send_response(Req, 301, [{"Location", absolute_uri(Req, Path)}], <<>>).
negotiate_content_type(_Req) ->
case get(jsonp) of
@@ -1048,27 +1173,33 @@ negotiate_content_type(_Req) ->
end.
server_header() ->
- [{"Server", "CouchDB/" ++ couch_server:get_version() ++
- " (Erlang OTP/" ++ erlang:system_info(otp_release) ++ ")"}].
-
+ [
+ {"Server",
+ "CouchDB/" ++ couch_server:get_version() ++
+ " (Erlang OTP/" ++ erlang:system_info(otp_release) ++ ")"}
+ ].
-record(mp, {boundary, buffer, data_fun, callback}).
-
parse_multipart_request(ContentType, DataFun, Callback) ->
Boundary0 = iolist_to_binary(get_boundary(ContentType)),
Boundary = <<"\r\n--", Boundary0/binary>>,
- Mp = #mp{boundary= Boundary,
- buffer= <<>>,
- data_fun=DataFun,
- callback=Callback},
- {Mp2, _NilCallback} = read_until(Mp, <<"--", Boundary0/binary>>,
- fun nil_callback/1),
- #mp{buffer=Buffer, data_fun=DataFun2, callback=Callback2} =
- parse_part_header(Mp2),
+ Mp = #mp{
+ boundary = Boundary,
+ buffer = <<>>,
+ data_fun = DataFun,
+ callback = Callback
+ },
+ {Mp2, _NilCallback} = read_until(
+ Mp,
+ <<"--", Boundary0/binary>>,
+ fun nil_callback/1
+ ),
+ #mp{buffer = Buffer, data_fun = DataFun2, callback = Callback2} =
+ parse_part_header(Mp2),
{Buffer, DataFun2, Callback2}.
-nil_callback(_Data)->
+nil_callback(_Data) ->
fun nil_callback/1.
get_boundary({"multipart/" ++ _, Opts}) ->
@@ -1077,83 +1208,102 @@ get_boundary({"multipart/" ++ _, Opts}) ->
S
end;
get_boundary(ContentType) ->
- {"multipart/" ++ _ , Opts} = mochiweb_util:parse_header(ContentType),
+ {"multipart/" ++ _, Opts} = mochiweb_util:parse_header(ContentType),
get_boundary({"multipart/", Opts}).
-
-
split_header(<<>>) ->
[];
split_header(Line) ->
- {Name, Rest} = lists:splitwith(fun (C) -> C =/= $: end,
- binary_to_list(Line)),
- [$: | Value] = case Rest of
- [] ->
- throw({bad_request, <<"bad part header">>});
- Res ->
- Res
- end,
- [{string:to_lower(string:strip(Name)),
- mochiweb_util:parse_header(Value)}].
+ {Name, Rest} = lists:splitwith(
+ fun(C) -> C =/= $: end,
+ binary_to_list(Line)
+ ),
+ [$: | Value] =
+ case Rest of
+ [] ->
+ throw({bad_request, <<"bad part header">>});
+ Res ->
+ Res
+ end,
+ [{string:to_lower(string:strip(Name)), mochiweb_util:parse_header(Value)}].
-read_until(#mp{data_fun=DataFun, buffer=Buffer}=Mp, Pattern, Callback) ->
+read_until(#mp{data_fun = DataFun, buffer = Buffer} = Mp, Pattern, Callback) ->
case couch_util:find_in_binary(Pattern, Buffer) of
- not_found ->
- Callback2 = Callback(Buffer),
- {Buffer2, DataFun2} = DataFun(),
- Buffer3 = iolist_to_binary(Buffer2),
- read_until(Mp#mp{data_fun=DataFun2,buffer=Buffer3}, Pattern, Callback2);
- {partial, 0} ->
- {NewData, DataFun2} = DataFun(),
- read_until(Mp#mp{data_fun=DataFun2,
- buffer= iolist_to_binary([Buffer,NewData])},
- Pattern, Callback);
- {partial, Skip} ->
- <<DataChunk:Skip/binary, Rest/binary>> = Buffer,
- Callback2 = Callback(DataChunk),
- {NewData, DataFun2} = DataFun(),
- read_until(Mp#mp{data_fun=DataFun2,
- buffer= iolist_to_binary([Rest | NewData])},
- Pattern, Callback2);
- {exact, 0} ->
- PatternLen = size(Pattern),
- <<_:PatternLen/binary, Rest/binary>> = Buffer,
- {Mp#mp{buffer= Rest}, Callback};
- {exact, Skip} ->
- PatternLen = size(Pattern),
- <<DataChunk:Skip/binary, _:PatternLen/binary, Rest/binary>> = Buffer,
- Callback2 = Callback(DataChunk),
- {Mp#mp{buffer= Rest}, Callback2}
+ not_found ->
+ Callback2 = Callback(Buffer),
+ {Buffer2, DataFun2} = DataFun(),
+ Buffer3 = iolist_to_binary(Buffer2),
+ read_until(Mp#mp{data_fun = DataFun2, buffer = Buffer3}, Pattern, Callback2);
+ {partial, 0} ->
+ {NewData, DataFun2} = DataFun(),
+ read_until(
+ Mp#mp{
+ data_fun = DataFun2,
+ buffer = iolist_to_binary([Buffer, NewData])
+ },
+ Pattern,
+ Callback
+ );
+ {partial, Skip} ->
+ <<DataChunk:Skip/binary, Rest/binary>> = Buffer,
+ Callback2 = Callback(DataChunk),
+ {NewData, DataFun2} = DataFun(),
+ read_until(
+ Mp#mp{
+ data_fun = DataFun2,
+ buffer = iolist_to_binary([Rest | NewData])
+ },
+ Pattern,
+ Callback2
+ );
+ {exact, 0} ->
+ PatternLen = size(Pattern),
+ <<_:PatternLen/binary, Rest/binary>> = Buffer,
+ {Mp#mp{buffer = Rest}, Callback};
+ {exact, Skip} ->
+ PatternLen = size(Pattern),
+ <<DataChunk:Skip/binary, _:PatternLen/binary, Rest/binary>> = Buffer,
+ Callback2 = Callback(DataChunk),
+ {Mp#mp{buffer = Rest}, Callback2}
end.
-
-parse_part_header(#mp{callback=UserCallBack}=Mp) ->
- {Mp2, AccCallback} = read_until(Mp, <<"\r\n\r\n">>,
- fun(Next) -> acc_callback(Next, []) end),
+parse_part_header(#mp{callback = UserCallBack} = Mp) ->
+ {Mp2, AccCallback} = read_until(
+ Mp,
+ <<"\r\n\r\n">>,
+ fun(Next) -> acc_callback(Next, []) end
+ ),
HeaderData = AccCallback(get_data),
Headers =
- lists:foldl(fun(Line, Acc) ->
- split_header(Line) ++ Acc
- end, [], re:split(HeaderData,<<"\r\n">>, [])),
+ lists:foldl(
+ fun(Line, Acc) ->
+ split_header(Line) ++ Acc
+ end,
+ [],
+ re:split(HeaderData, <<"\r\n">>, [])
+ ),
NextCallback = UserCallBack({headers, Headers}),
- parse_part_body(Mp2#mp{callback=NextCallback}).
-
-parse_part_body(#mp{boundary=Prefix, callback=Callback}=Mp) ->
- {Mp2, WrappedCallback} = read_until(Mp, Prefix,
- fun(Data) -> body_callback_wrapper(Data, Callback) end),
+ parse_part_body(Mp2#mp{callback = NextCallback}).
+
+parse_part_body(#mp{boundary = Prefix, callback = Callback} = Mp) ->
+ {Mp2, WrappedCallback} = read_until(
+ Mp,
+ Prefix,
+ fun(Data) -> body_callback_wrapper(Data, Callback) end
+ ),
Callback2 = WrappedCallback(get_callback),
Callback3 = Callback2(body_end),
- case check_for_last(Mp2#mp{callback=Callback3}) of
- {last, #mp{callback=Callback3}=Mp3} ->
- Mp3#mp{callback=Callback3(eof)};
- {more, Mp3} ->
- parse_part_header(Mp3)
+ case check_for_last(Mp2#mp{callback = Callback3}) of
+ {last, #mp{callback = Callback3} = Mp3} ->
+ Mp3#mp{callback = Callback3(eof)};
+ {more, Mp3} ->
+ parse_part_header(Mp3)
end.
-acc_callback(get_data, Acc)->
+acc_callback(get_data, Acc) ->
iolist_to_binary(lists:reverse(Acc));
-acc_callback(Data, Acc)->
+acc_callback(Data, Acc) ->
fun(Next) -> acc_callback(Next, [Data | Acc]) end.
body_callback_wrapper(get_callback, Callback) ->
@@ -1162,18 +1312,23 @@ body_callback_wrapper(Data, Callback) ->
Callback2 = Callback({body, Data}),
fun(Next) -> body_callback_wrapper(Next, Callback2) end.
-
-check_for_last(#mp{buffer=Buffer, data_fun=DataFun}=Mp) ->
+check_for_last(#mp{buffer = Buffer, data_fun = DataFun} = Mp) ->
case Buffer of
- <<"--",_/binary>> -> {last, Mp};
- <<_, _, _/binary>> -> {more, Mp};
- _ -> % not long enough
- {Data, DataFun2} = DataFun(),
- check_for_last(Mp#mp{buffer= <<Buffer/binary, Data/binary>>,
- data_fun = DataFun2})
+ <<"--", _/binary>> ->
+ {last, Mp};
+ <<_, _, _/binary>> ->
+ {more, Mp};
+ % not long enough
+ _ ->
+ {Data, DataFun2} = DataFun(),
+ check_for_last(Mp#mp{
+ buffer = <<Buffer/binary, Data/binary>>,
+ data_fun = DataFun2
+ })
end.
-validate_bind_address(any) -> ok;
+validate_bind_address(any) ->
+ ok;
validate_bind_address(Address) ->
case inet_parse:address(Address) of
{ok, _} -> ok;
@@ -1191,9 +1346,9 @@ basic_headers(Req, Headers0) ->
chttpd_cors:headers(Req, Headers2).
basic_headers_no_cors(Req, Headers) ->
- Headers
- ++ server_header()
- ++ couch_httpd_auth:cookie_auth_header(Req, Headers).
+ Headers ++
+ server_header() ++
+ couch_httpd_auth:cookie_auth_header(Req, Headers).
handle_response(Req0, Code0, Headers0, Args0, Type) ->
{ok, {Req1, Code1, Headers1, Args1}} = before_response(Req0, Code0, Headers0, Args0),
@@ -1259,27 +1414,40 @@ maybe_add_default_headers_test_() ->
MustRevalidate = {"Cache-Control", "must-revalidate"},
ApplicationJavascript = {"Content-Type", "application/javascript"},
Cases = [
- {[],
- [MustRevalidate, ApplicationJavascript],
- "Should add Content-Type and Cache-Control to empty heaeders"},
-
- {[NoCache],
- [NoCache, ApplicationJavascript],
- "Should add Content-Type only if Cache-Control is present"},
-
- {[ApplicationJson],
- [MustRevalidate, ApplicationJson],
- "Should add Cache-Control if Content-Type is present"},
-
- {[NoCache, ApplicationJson],
- [NoCache, ApplicationJson],
- "Should not add headers if Cache-Control and Content-Type are there"}
+ {
+ [],
+ [MustRevalidate, ApplicationJavascript],
+ "Should add Content-Type and Cache-Control to empty heaeders"
+ },
+
+ {
+ [NoCache],
+ [NoCache, ApplicationJavascript],
+ "Should add Content-Type only if Cache-Control is present"
+ },
+
+ {
+ [ApplicationJson],
+ [MustRevalidate, ApplicationJson],
+ "Should add Cache-Control if Content-Type is present"
+ },
+
+ {
+ [NoCache, ApplicationJson],
+ [NoCache, ApplicationJson],
+ "Should not add headers if Cache-Control and Content-Type are there"
+ }
],
- Tests = lists:map(fun({InitialHeaders, ProperResult, Desc}) ->
- {Desc,
- ?_assertEqual(ProperResult,
- maybe_add_default_headers(DummyRequest, InitialHeaders))}
- end, Cases),
+ Tests = lists:map(
+ fun({InitialHeaders, ProperResult, Desc}) ->
+ {Desc,
+ ?_assertEqual(
+ ProperResult,
+ maybe_add_default_headers(DummyRequest, InitialHeaders)
+ )}
+ end,
+ Cases
+ ),
{"Tests adding default headers", Tests}.
log_request_test_() ->
@@ -1299,27 +1467,24 @@ log_request_test_() ->
[
fun() -> should_accept_code_and_message(true) end,
fun() -> should_accept_code_and_message(false) end
- ]
- }.
+ ]}.
should_accept_code_and_message(DontLogFlag) ->
erlang:put(dont_log_response, DontLogFlag),
- {"with dont_log_response = " ++ atom_to_list(DontLogFlag),
- [
- {"Should accept code 200 and string message",
- ?_assertEqual(ok, log_response(200, "OK"))},
- {"Should accept code 200 and JSON message",
+ {"with dont_log_response = " ++ atom_to_list(DontLogFlag), [
+ {"Should accept code 200 and string message", ?_assertEqual(ok, log_response(200, "OK"))},
+ {"Should accept code 200 and JSON message",
?_assertEqual(ok, log_response(200, {json, {[{ok, true}]}}))},
- {"Should accept code >= 400 and string error",
+ {"Should accept code >= 400 and string error",
?_assertEqual(ok, log_response(405, method_not_allowed))},
- {"Should accept code >= 400 and JSON error",
- ?_assertEqual(ok,
- log_response(405, {json, {[{error, method_not_allowed}]}}))},
- {"Should accept code >= 500 and string error",
- ?_assertEqual(ok, log_response(500, undef))},
- {"Should accept code >= 500 and JSON error",
+ {"Should accept code >= 400 and JSON error",
+ ?_assertEqual(
+ ok,
+ log_response(405, {json, {[{error, method_not_allowed}]}})
+ )},
+ {"Should accept code >= 500 and string error", ?_assertEqual(ok, log_response(500, undef))},
+ {"Should accept code >= 500 and JSON error",
?_assertEqual(ok, log_response(500, {json, {[{error, undef}]}}))}
- ]
- }.
+ ]}.
-endif.
diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl
index 01a210d05..7bcb85fba 100644
--- a/src/couch/src/couch_httpd_auth.erl
+++ b/src/couch/src/couch_httpd_auth.erl
@@ -18,8 +18,10 @@
-export([party_mode_handler/1]).
--export([default_authentication_handler/1, default_authentication_handler/2,
- special_test_authentication_handler/1]).
+-export([
+ default_authentication_handler/1, default_authentication_handler/2,
+ special_test_authentication_handler/1
+]).
-export([cookie_authentication_handler/1, cookie_authentication_handler/2]).
-export([null_authentication_handler/1]).
-export([proxy_authentication_handler/1, proxy_authentification_handler/1]).
@@ -33,59 +35,68 @@
-export([jwt_authentication_handler/1]).
--import(couch_httpd, [header_value/2, send_json/2, send_json/4, send_method_not_allowed/2, maybe_decompress/2]).
+-import(couch_httpd, [
+ header_value/2, send_json/2, send_json/4, send_method_not_allowed/2, maybe_decompress/2
+]).
--compile({no_auto_import,[integer_to_binary/1, integer_to_binary/2]}).
+-compile({no_auto_import, [integer_to_binary/1, integer_to_binary/2]}).
party_mode_handler(Req) ->
- case chttpd_util:get_chttpd_auth_config_boolean(
- "require_valid_user", false) of
- true ->
- throw({unauthorized, <<"Authentication required.">>});
- false ->
- Req#httpd{user_ctx=#user_ctx{}}
+ case
+ chttpd_util:get_chttpd_auth_config_boolean(
+ "require_valid_user", false
+ )
+ of
+ true ->
+ throw({unauthorized, <<"Authentication required.">>});
+ false ->
+ Req#httpd{user_ctx = #user_ctx{}}
end.
special_test_authentication_handler(Req) ->
case header_value(Req, "WWW-Authenticate") of
- "X-Couch-Test-Auth " ++ NamePass ->
- % NamePass is a colon separated string: "joe schmoe:a password".
- [Name, Pass] = re:split(NamePass, ":", [{return, list}, {parts, 2}]),
- case {Name, Pass} of
- {"Jan Lehnardt", "apple"} -> ok;
- {"Christopher Lenz", "dog food"} -> ok;
- {"Noah Slater", "biggiesmalls endian"} -> ok;
- {"Chris Anderson", "mp3"} -> ok;
- {"Damien Katz", "pecan pie"} -> ok;
- {_, _} ->
- throw({unauthorized, <<"Name or password is incorrect.">>})
- end,
- Req#httpd{user_ctx=#user_ctx{name=?l2b(Name)}};
- _ ->
- % No X-Couch-Test-Auth credentials sent, give admin access so the
- % previous authentication can be restored after the test
- Req#httpd{user_ctx=?ADMIN_USER}
+ "X-Couch-Test-Auth " ++ NamePass ->
+ % NamePass is a colon separated string: "joe schmoe:a password".
+ [Name, Pass] = re:split(NamePass, ":", [{return, list}, {parts, 2}]),
+ case {Name, Pass} of
+ {"Jan Lehnardt", "apple"} -> ok;
+ {"Christopher Lenz", "dog food"} -> ok;
+ {"Noah Slater", "biggiesmalls endian"} -> ok;
+ {"Chris Anderson", "mp3"} -> ok;
+ {"Damien Katz", "pecan pie"} -> ok;
+ {_, _} -> throw({unauthorized, <<"Name or password is incorrect.">>})
+ end,
+ Req#httpd{user_ctx = #user_ctx{name = ?l2b(Name)}};
+ _ ->
+ % No X-Couch-Test-Auth credentials sent, give admin access so the
+ % previous authentication can be restored after the test
+ Req#httpd{user_ctx = ?ADMIN_USER}
end.
basic_name_pw(Req) ->
AuthorizationHeader = header_value(Req, "Authorization"),
case AuthorizationHeader of
- "Basic " ++ Base64Value ->
- try re:split(base64:decode(Base64Value), ":",
- [{return, list}, {parts, 2}]) of
- ["_", "_"] ->
- % special name and pass to be logged out
- nil;
- [User, Pass] ->
- {User, Pass};
+ "Basic " ++ Base64Value ->
+ try
+ re:split(
+ base64:decode(Base64Value),
+ ":",
+ [{return, list}, {parts, 2}]
+ )
+ of
+ ["_", "_"] ->
+ % special name and pass to be logged out
+ nil;
+ [User, Pass] ->
+ {User, Pass};
+ _ ->
+ nil
+ catch
+ error:function_clause ->
+ throw({bad_request, "Authorization header has invalid base64 value"})
+ end;
_ ->
nil
- catch
- error:function_clause ->
- throw({bad_request, "Authorization header has invalid base64 value"})
- end;
- _ ->
- nil
end.
default_authentication_handler(Req) ->
@@ -93,42 +104,47 @@ default_authentication_handler(Req) ->
default_authentication_handler(Req, AuthModule) ->
case basic_name_pw(Req) of
- {User, Pass} ->
- case AuthModule:get_user_creds(Req, User) of
- nil ->
- throw({unauthorized, <<"Name or password is incorrect.">>});
- {ok, UserProps, _AuthCtx} ->
- reject_if_totp(UserProps),
- UserName = ?l2b(User),
- Password = ?l2b(Pass),
- case authenticate(Password, UserProps) of
- true ->
- Req#httpd{user_ctx=#user_ctx{
- name=UserName,
- roles=couch_util:get_value(<<"roles">>, UserProps, [])
- }};
- false ->
- authentication_warning(Req, UserName),
- throw({unauthorized, <<"Name or password is incorrect.">>})
- end
- end;
- nil ->
- case couch_server:has_admins() of
- true ->
- Req;
- false ->
- case chttpd_util:get_chttpd_auth_config_boolean(
- "require_valid_user", false) of
- true -> Req;
- % If no admins, and no user required, then everyone is admin!
- % Yay, admin party!
- false -> Req#httpd{user_ctx=?ADMIN_USER}
+ {User, Pass} ->
+ case AuthModule:get_user_creds(Req, User) of
+ nil ->
+ throw({unauthorized, <<"Name or password is incorrect.">>});
+ {ok, UserProps, _AuthCtx} ->
+ reject_if_totp(UserProps),
+ UserName = ?l2b(User),
+ Password = ?l2b(Pass),
+ case authenticate(Password, UserProps) of
+ true ->
+ Req#httpd{
+ user_ctx = #user_ctx{
+ name = UserName,
+ roles = couch_util:get_value(<<"roles">>, UserProps, [])
+ }
+ };
+ false ->
+ authentication_warning(Req, UserName),
+ throw({unauthorized, <<"Name or password is incorrect.">>})
+ end
+ end;
+ nil ->
+ case couch_server:has_admins() of
+ true ->
+ Req;
+ false ->
+ case
+ chttpd_util:get_chttpd_auth_config_boolean(
+ "require_valid_user", false
+ )
+ of
+ true -> Req;
+ % If no admins, and no user required, then everyone is admin!
+ % Yay, admin party!
+ false -> Req#httpd{user_ctx = ?ADMIN_USER}
+ end
end
- end
end.
null_authentication_handler(Req) ->
- Req#httpd{user_ctx=?ADMIN_USER}.
+ Req#httpd{user_ctx = ?ADMIN_USER}.
%% @doc proxy auth handler.
%
@@ -155,39 +171,53 @@ proxy_authentication_handler(Req) ->
%% @deprecated
proxy_authentification_handler(Req) ->
proxy_authentication_handler(Req).
-
+
proxy_auth_user(Req) ->
XHeaderUserName = chttpd_util:get_chttpd_auth_config(
- "x_auth_username", "X-Auth-CouchDB-UserName"),
+ "x_auth_username", "X-Auth-CouchDB-UserName"
+ ),
XHeaderRoles = chttpd_util:get_chttpd_auth_config(
- "x_auth_roles", "X-Auth-CouchDB-Roles"),
+ "x_auth_roles", "X-Auth-CouchDB-Roles"
+ ),
XHeaderToken = chttpd_util:get_chttpd_auth_config(
- "x_auth_token", "X-Auth-CouchDB-Token"),
+ "x_auth_token", "X-Auth-CouchDB-Token"
+ ),
case header_value(Req, XHeaderUserName) of
- undefined -> nil;
+ undefined ->
+ nil;
UserName ->
- Roles = case header_value(Req, XHeaderRoles) of
- undefined -> [];
- Else ->
- [?l2b(R) || R <- string:tokens(Else, ",")]
- end,
- case chttpd_util:get_chttpd_auth_config_boolean(
- "proxy_use_secret", false) of
+ Roles =
+ case header_value(Req, XHeaderRoles) of
+ undefined -> [];
+ Else -> [?l2b(R) || R <- string:tokens(Else, ",")]
+ end,
+ case
+ chttpd_util:get_chttpd_auth_config_boolean(
+ "proxy_use_secret", false
+ )
+ of
true ->
case chttpd_util:get_chttpd_auth_config("secret") of
undefined ->
- Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}};
+ Req#httpd{user_ctx = #user_ctx{name = ?l2b(UserName), roles = Roles}};
Secret ->
- ExpectedToken = couch_util:to_hex(couch_util:hmac(sha, Secret, UserName)),
+ ExpectedToken = couch_util:to_hex(
+ couch_util:hmac(sha, Secret, UserName)
+ ),
case header_value(Req, XHeaderToken) of
Token when Token == ExpectedToken ->
- Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName),
- roles=Roles}};
- _ -> nil
+ Req#httpd{
+ user_ctx = #user_ctx{
+ name = ?l2b(UserName),
+ roles = Roles
+ }
+ };
+ _ ->
+ nil
end
end;
false ->
- Req#httpd{user_ctx=#user_ctx{name=?l2b(UserName), roles=Roles}}
+ Req#httpd{user_ctx = #user_ctx{name = ?l2b(UserName), roles = Roles}}
end
end.
@@ -198,22 +228,35 @@ jwt_authentication_handler(Req) ->
case jwtf:decode(?l2b(Jwt), [alg | RequiredClaims], fun jwtf_keystore:get/2) of
{ok, {Claims}} ->
case lists:keyfind(<<"sub">>, 1, Claims) of
- false -> throw({unauthorized, <<"Token missing sub claim.">>});
- {_, User} -> Req#httpd{user_ctx=#user_ctx{
- name = User,
- roles = couch_util:get_value(?l2b(config:get("jwt_auth", "roles_claim_name", "_couchdb.roles")), Claims, [])
- }}
+ false ->
+ throw({unauthorized, <<"Token missing sub claim.">>});
+ {_, User} ->
+ Req#httpd{
+ user_ctx = #user_ctx{
+ name = User,
+ roles = couch_util:get_value(
+ ?l2b(
+ config:get(
+ "jwt_auth", "roles_claim_name", "_couchdb.roles"
+ )
+ ),
+ Claims,
+ []
+ )
+ }
+ }
end;
{error, Reason} ->
throw(Reason)
end;
- _ -> Req
+ _ ->
+ Req
end.
get_configured_claims() ->
Claims = config:get("jwt_auth", "required_claims", ""),
Re = "((?<key1>[a-z]+)|{(?<key2>[a-z]+)\s*,\s*\"(?<val>[^\"]+)\"})",
- case re:run(Claims, Re, [global, {capture, [key1, key2, val], binary}]) of
+ case re:run(Claims, Re, [global, {capture, [key1, key2, val], binary}]) of
nomatch when Claims /= "" ->
couch_log:error("[jwt_auth] required_claims is set to an invalid value.", []),
throw({misconfigured_server, <<"JWT is not configured correctly">>});
@@ -231,61 +274,77 @@ to_claim([<<>>, Key, Value]) ->
cookie_authentication_handler(Req) ->
cookie_authentication_handler(Req, couch_auth_cache).
-cookie_authentication_handler(#httpd{mochi_req=MochiReq}=Req, AuthModule) ->
+cookie_authentication_handler(#httpd{mochi_req = MochiReq} = Req, AuthModule) ->
case MochiReq:get_cookie_value("AuthSession") of
- undefined -> Req;
- [] -> Req;
- Cookie ->
- [User, TimeStr, HashStr] = try
- AuthSession = couch_util:decodeBase64Url(Cookie),
- [_A, _B, _Cs] = re:split(?b2l(AuthSession), ":",
- [{return, list}, {parts, 3}])
- catch
- _:_Error ->
- Reason = <<"Malformed AuthSession cookie. Please clear your cookies.">>,
- throw({bad_request, Reason})
- end,
- % Verify expiry and hash
- CurrentTime = make_cookie_time(),
- case chttpd_util:get_chttpd_auth_config("secret") of
undefined ->
- couch_log:debug("cookie auth secret is not set",[]),
Req;
- SecretStr ->
- Secret = ?l2b(SecretStr),
- case AuthModule:get_user_creds(Req, User) of
- nil -> Req;
- {ok, UserProps, _AuthCtx} ->
- UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<"">>),
- FullSecret = <<Secret/binary, UserSalt/binary>>,
- ExpectedHash = couch_util:hmac(sha, FullSecret, User ++ ":" ++ TimeStr),
- Hash = ?l2b(HashStr),
- Timeout = chttpd_util:get_chttpd_auth_config_integer(
- "timeout", 600),
- couch_log:debug("timeout ~p", [Timeout]),
- case (catch erlang:list_to_integer(TimeStr, 16)) of
- TimeStamp when CurrentTime < TimeStamp + Timeout ->
- case couch_passwords:verify(ExpectedHash, Hash) of
- true ->
- TimeLeft = TimeStamp + Timeout - CurrentTime,
- couch_log:debug("Successful cookie auth as: ~p",
- [User]),
- Req#httpd{user_ctx=#user_ctx{
- name=?l2b(User),
- roles=couch_util:get_value(<<"roles">>, UserProps, [])
- }, auth={FullSecret, TimeLeft < Timeout*0.9}};
- _Else ->
- Req
- end;
- _Else ->
- Req
- end
+ [] ->
+ Req;
+ Cookie ->
+ [User, TimeStr, HashStr] =
+ try
+ AuthSession = couch_util:decodeBase64Url(Cookie),
+ [_A, _B, _Cs] = re:split(
+ ?b2l(AuthSession),
+ ":",
+ [{return, list}, {parts, 3}]
+ )
+ catch
+ _:_Error ->
+ Reason = <<"Malformed AuthSession cookie. Please clear your cookies.">>,
+ throw({bad_request, Reason})
+ end,
+ % Verify expiry and hash
+ CurrentTime = make_cookie_time(),
+ case chttpd_util:get_chttpd_auth_config("secret") of
+ undefined ->
+ couch_log:debug("cookie auth secret is not set", []),
+ Req;
+ SecretStr ->
+ Secret = ?l2b(SecretStr),
+ case AuthModule:get_user_creds(Req, User) of
+ nil ->
+ Req;
+ {ok, UserProps, _AuthCtx} ->
+ UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<"">>),
+ FullSecret = <<Secret/binary, UserSalt/binary>>,
+ ExpectedHash = couch_util:hmac(sha, FullSecret, User ++ ":" ++ TimeStr),
+ Hash = ?l2b(HashStr),
+ Timeout = chttpd_util:get_chttpd_auth_config_integer(
+ "timeout", 600
+ ),
+ couch_log:debug("timeout ~p", [Timeout]),
+ case (catch erlang:list_to_integer(TimeStr, 16)) of
+ TimeStamp when CurrentTime < TimeStamp + Timeout ->
+ case couch_passwords:verify(ExpectedHash, Hash) of
+ true ->
+ TimeLeft = TimeStamp + Timeout - CurrentTime,
+ couch_log:debug(
+ "Successful cookie auth as: ~p",
+ [User]
+ ),
+ Req#httpd{
+ user_ctx = #user_ctx{
+ name = ?l2b(User),
+ roles = couch_util:get_value(
+ <<"roles">>, UserProps, []
+ )
+ },
+ auth = {FullSecret, TimeLeft < Timeout * 0.9}
+ };
+ _Else ->
+ Req
+ end;
+ _Else ->
+ Req
+ end
+ end
end
- end
end.
-cookie_auth_header(#httpd{user_ctx=#user_ctx{name=null}}, _Headers) -> [];
-cookie_auth_header(#httpd{user_ctx=#user_ctx{name=User}, auth={Secret, true}}=Req, Headers) ->
+cookie_auth_header(#httpd{user_ctx = #user_ctx{name = null}}, _Headers) ->
+ [];
+cookie_auth_header(#httpd{user_ctx = #user_ctx{name = User}, auth = {Secret, true}} = Req, Headers) ->
% Note: we only set the AuthSession cookie if:
% * a valid AuthSession cookie has been received
% * we are outside a 10% timeout window
@@ -296,20 +355,24 @@ cookie_auth_header(#httpd{user_ctx=#user_ctx{name=User}, auth={Secret, true}}=Re
CookieHeader = couch_util:get_value("Set-Cookie", Headers, ""),
Cookies = mochiweb_cookies:parse_cookie(CookieHeader),
AuthSession = couch_util:get_value("AuthSession", Cookies),
- if AuthSession == undefined ->
- TimeStamp = make_cookie_time(),
- [cookie_auth_cookie(Req, ?b2l(User), Secret, TimeStamp)];
- true ->
- []
+ if
+ AuthSession == undefined ->
+ TimeStamp = make_cookie_time(),
+ [cookie_auth_cookie(Req, ?b2l(User), Secret, TimeStamp)];
+ true ->
+ []
end;
-cookie_auth_header(_Req, _Headers) -> [].
+cookie_auth_header(_Req, _Headers) ->
+ [].
cookie_auth_cookie(Req, User, Secret, TimeStamp) ->
SessionData = User ++ ":" ++ erlang:integer_to_list(TimeStamp, 16),
Hash = couch_util:hmac(sha, Secret, SessionData),
- mochiweb_cookies:cookie("AuthSession",
+ mochiweb_cookies:cookie(
+ "AuthSession",
couch_util:encodeBase64Url(SessionData ++ ":" ++ ?b2l(Hash)),
- [{path, "/"}] ++ cookie_scheme(Req) ++ max_age() ++ cookie_domain() ++ same_site()).
+ [{path, "/"}] ++ cookie_scheme(Req) ++ max_age() ++ cookie_domain() ++ same_site()
+ ).
ensure_cookie_auth_secret() ->
case chttpd_util:get_chttpd_auth_config("secret") of
@@ -317,7 +380,8 @@ ensure_cookie_auth_secret() ->
NewSecret = ?b2l(couch_uuids:random()),
config:set("chttpd_auth", "secret", NewSecret),
NewSecret;
- Secret -> Secret
+ Secret ->
+ Secret
end.
% session handlers
@@ -325,27 +389,32 @@ ensure_cookie_auth_secret() ->
handle_session_req(Req) ->
handle_session_req(Req, couch_auth_cache).
-handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req, AuthModule) ->
+handle_session_req(#httpd{method = 'POST', mochi_req = MochiReq} = Req, AuthModule) ->
ReqBody = MochiReq:recv_body(),
- Form = case MochiReq:get_primary_header_value("content-type") of
- % content type should be json
- "application/x-www-form-urlencoded" ++ _ ->
- mochiweb_util:parse_qs(ReqBody);
- "application/json" ++ _ ->
- {Pairs} = ?JSON_DECODE(maybe_decompress(Req, ReqBody)),
- lists:map(fun({Key, Value}) ->
- {?b2l(Key), ?b2l(Value)}
- end, Pairs);
- _ ->
- []
- end,
+ Form =
+ case MochiReq:get_primary_header_value("content-type") of
+ % content type should be json
+ "application/x-www-form-urlencoded" ++ _ ->
+ mochiweb_util:parse_qs(ReqBody);
+ "application/json" ++ _ ->
+ {Pairs} = ?JSON_DECODE(maybe_decompress(Req, ReqBody)),
+ lists:map(
+ fun({Key, Value}) ->
+ {?b2l(Key), ?b2l(Value)}
+ end,
+ Pairs
+ );
+ _ ->
+ []
+ end,
UserName = ?l2b(extract_username(Form)),
Password = ?l2b(couch_util:get_value("password", Form, "")),
- couch_log:debug("Attempt Login: ~s",[UserName]),
- {ok, UserProps, _AuthCtx} = case AuthModule:get_user_creds(Req, UserName) of
- nil -> {ok, [], nil};
- Result -> Result
- end,
+ couch_log:debug("Attempt Login: ~s", [UserName]),
+ {ok, UserProps, _AuthCtx} =
+ case AuthModule:get_user_creds(Req, UserName) of
+ nil -> {ok, [], nil};
+ Result -> Result
+ end,
case authenticate(Password, UserProps) of
true ->
verify_totp(UserProps, Form),
@@ -353,68 +422,102 @@ handle_session_req(#httpd{method='POST', mochi_req=MochiReq}=Req, AuthModule) ->
Secret = ?l2b(ensure_cookie_auth_secret()),
UserSalt = couch_util:get_value(<<"salt">>, UserProps),
CurrentTime = make_cookie_time(),
- Cookie = cookie_auth_cookie(Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime),
+ Cookie = cookie_auth_cookie(
+ Req, ?b2l(UserName), <<Secret/binary, UserSalt/binary>>, CurrentTime
+ ),
% TODO document the "next" feature in Futon
- {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
- nil ->
- {200, [Cookie]};
- Redirect ->
- {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
- end,
- send_json(Req#httpd{req_body=ReqBody}, Code, Headers,
+ {Code, Headers} =
+ case couch_httpd:qs_value(Req, "next", nil) of
+ nil ->
+ {200, [Cookie]};
+ Redirect ->
+ {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
+ end,
+ send_json(
+ Req#httpd{req_body = ReqBody},
+ Code,
+ Headers,
{[
{ok, true},
{name, UserName},
{roles, couch_util:get_value(<<"roles">>, UserProps, [])}
- ]});
+ ]}
+ );
false ->
authentication_warning(Req, UserName),
% clear the session
- Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}] ++ cookie_scheme(Req)),
- {Code, Headers} = case couch_httpd:qs_value(Req, "fail", nil) of
- nil ->
- {401, [Cookie]};
- Redirect ->
- {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
- end,
- send_json(Req, Code, Headers, {[{error, <<"unauthorized">>},{reason, <<"Name or password is incorrect.">>}]})
+ Cookie = mochiweb_cookies:cookie(
+ "AuthSession", "", [{path, "/"}] ++ cookie_scheme(Req)
+ ),
+ {Code, Headers} =
+ case couch_httpd:qs_value(Req, "fail", nil) of
+ nil ->
+ {401, [Cookie]};
+ Redirect ->
+ {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
+ end,
+ send_json(
+ Req,
+ Code,
+ Headers,
+ {[{error, <<"unauthorized">>}, {reason, <<"Name or password is incorrect.">>}]}
+ )
end;
% get user info
% GET /_session
-handle_session_req(#httpd{method='GET', user_ctx=UserCtx}=Req, _AuthModule) ->
+handle_session_req(#httpd{method = 'GET', user_ctx = UserCtx} = Req, _AuthModule) ->
Name = UserCtx#user_ctx.name,
ForceLogin = couch_httpd:qs_value(Req, "basic", "false"),
case {Name, ForceLogin} of
{null, "true"} ->
throw({unauthorized, <<"Please login.">>});
{Name, _} ->
- send_json(Req, {[
- % remove this ok
- {ok, true},
- {<<"userCtx">>, {[
- {name, Name},
- {roles, UserCtx#user_ctx.roles}
- ]}},
- {info, {[
- {authentication_handlers, [
- N || {N, _Fun} <- Req#httpd.authentication_handlers]}
- ] ++ maybe_value(authenticated, UserCtx#user_ctx.handler, fun(Handler) ->
- Handler
- end) ++ maybe_value(authentication_db, config:get("chttpd_auth", "authentication_db"), fun(Val) ->
- ?l2b(Val)
- end)}}
- ]})
+ send_json(
+ Req,
+ {[
+ % remove this ok
+ {ok, true},
+ {<<"userCtx">>,
+ {[
+ {name, Name},
+ {roles, UserCtx#user_ctx.roles}
+ ]}},
+ {info, {
+ [
+ {authentication_handlers, [
+ N
+ || {N, _Fun} <- Req#httpd.authentication_handlers
+ ]}
+ ] ++
+ maybe_value(authenticated, UserCtx#user_ctx.handler, fun(Handler) ->
+ Handler
+ end) ++
+ maybe_value(
+ authentication_db,
+ config:get("chttpd_auth", "authentication_db"),
+ fun(Val) ->
+ ?l2b(Val)
+ end
+ )
+ }}
+ ]}
+ )
end;
% logout by deleting the session
-handle_session_req(#httpd{method='DELETE'}=Req, _AuthModule) ->
- Cookie = mochiweb_cookies:cookie("AuthSession", "", [{path, "/"}] ++
- cookie_domain() ++ cookie_scheme(Req)),
- {Code, Headers} = case couch_httpd:qs_value(Req, "next", nil) of
- nil ->
- {200, [Cookie]};
- Redirect ->
- {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
- end,
+handle_session_req(#httpd{method = 'DELETE'} = Req, _AuthModule) ->
+ Cookie = mochiweb_cookies:cookie(
+ "AuthSession",
+ "",
+ [{path, "/"}] ++
+ cookie_domain() ++ cookie_scheme(Req)
+ ),
+ {Code, Headers} =
+ case couch_httpd:qs_value(Req, "next", nil) of
+ nil ->
+ {200, [Cookie]};
+ Redirect ->
+ {302, [Cookie, {"Location", couch_httpd:absolute_uri(Req, Redirect)}]}
+ end,
send_json(Req, Code, Headers, {[{ok, true}]});
handle_session_req(Req, _AuthModule) ->
send_method_not_allowed(Req, "GET,HEAD,POST,DELETE").
@@ -433,22 +536,25 @@ extract_username(Form) ->
end.
maybe_value(_Key, undefined, _Fun) -> [];
-maybe_value(Key, Else, Fun) ->
- [{Key, Fun(Else)}].
+maybe_value(Key, Else, Fun) -> [{Key, Fun(Else)}].
authenticate(Pass, UserProps) ->
UserSalt = couch_util:get_value(<<"salt">>, UserProps, <<>>),
{PasswordHash, ExpectedHash} =
case couch_util:get_value(<<"password_scheme">>, UserProps, <<"simple">>) of
- <<"simple">> ->
- {couch_passwords:simple(Pass, UserSalt),
- couch_util:get_value(<<"password_sha">>, UserProps, nil)};
- <<"pbkdf2">> ->
- Iterations = couch_util:get_value(<<"iterations">>, UserProps, 10000),
- verify_iterations(Iterations),
- {couch_passwords:pbkdf2(Pass, UserSalt, Iterations),
- couch_util:get_value(<<"derived_key">>, UserProps, nil)}
- end,
+ <<"simple">> ->
+ {
+ couch_passwords:simple(Pass, UserSalt),
+ couch_util:get_value(<<"password_sha">>, UserProps, nil)
+ };
+ <<"pbkdf2">> ->
+ Iterations = couch_util:get_value(<<"iterations">>, UserProps, 10000),
+ verify_iterations(Iterations),
+ {
+ couch_passwords:pbkdf2(Pass, UserSalt, Iterations),
+ couch_util:get_value(<<"derived_key">>, UserProps, nil)
+ }
+ end,
couch_passwords:verify(PasswordHash, ExpectedHash).
verify_iterations(Iterations) when is_integer(Iterations) ->
@@ -471,21 +577,25 @@ make_cookie_time() ->
{NowMS, NowS, _} = os:timestamp(),
NowMS * 1000000 + NowS.
-cookie_scheme(#httpd{mochi_req=MochiReq}) ->
+cookie_scheme(#httpd{mochi_req = MochiReq}) ->
[{http_only, true}] ++
- case MochiReq:get(scheme) of
- http -> [];
- https -> [{secure, true}]
- end.
+ case MochiReq:get(scheme) of
+ http -> [];
+ https -> [{secure, true}]
+ end.
max_age() ->
- case chttpd_util:get_chttpd_auth_config_boolean(
- "allow_persistent_cookies", true) of
+ case
+ chttpd_util:get_chttpd_auth_config_boolean(
+ "allow_persistent_cookies", true
+ )
+ of
false ->
[];
true ->
Timeout = chttpd_util:get_chttpd_auth_config_integer(
- "timeout", 600),
+ "timeout", 600
+ ),
[{max_age, Timeout}]
end.
@@ -496,20 +606,22 @@ cookie_domain() ->
_ -> [{domain, Domain}]
end.
-
same_site() ->
SameSite = chttpd_util:get_chttpd_auth_config("same_site", ""),
case string:to_lower(SameSite) of
- "" -> [];
- "none" -> [{same_site, none}];
- "lax" -> [{same_site, lax}];
- "strict" -> [{same_site, strict}];
+ "" ->
+ [];
+ "none" ->
+ [{same_site, none}];
+ "lax" ->
+ [{same_site, lax}];
+ "strict" ->
+ [{same_site, strict}];
_ ->
- couch_log:error("invalid config value couch_httpd_auth.same_site: ~p ",[SameSite]),
+ couch_log:error("invalid config value couch_httpd_auth.same_site: ~p ", [SameSite]),
[]
end.
-
reject_if_totp(User) ->
case get_totp_config(User) of
undefined ->
@@ -525,7 +637,8 @@ verify_totp(User, Form) ->
{Props} ->
Key = couch_base32:decode(couch_util:get_value(<<"key">>, Props)),
Alg = couch_util:to_existing_atom(
- couch_util:get_value(<<"algorithm">>, Props, <<"sha">>)),
+ couch_util:get_value(<<"algorithm">>, Props, <<"sha">>)
+ ),
Len = couch_util:get_value(<<"length">>, Props, 6),
Token = ?l2b(couch_util:get_value("token", Form, "")),
verify_token(Alg, Key, Len, Token)
@@ -536,12 +649,17 @@ get_totp_config(User) ->
verify_token(Alg, Key, Len, Token) ->
Now = make_cookie_time(),
- Tokens = [generate_token(Alg, Key, Len, Now - 30),
- generate_token(Alg, Key, Len, Now),
- generate_token(Alg, Key, Len, Now + 30)],
+ Tokens = [
+ generate_token(Alg, Key, Len, Now - 30),
+ generate_token(Alg, Key, Len, Now),
+ generate_token(Alg, Key, Len, Now + 30)
+ ],
%% evaluate all tokens in constant time
- Match = lists:foldl(fun(T, Acc) -> couch_util:verify(T, Token) or Acc end,
- false, Tokens),
+ Match = lists:foldl(
+ fun(T, Acc) -> couch_util:verify(T, Token) or Acc end,
+ false,
+ Tokens
+ ),
case Match of
true ->
ok;
@@ -553,17 +671,20 @@ generate_token(Alg, Key, Len, Timestamp) ->
integer_to_binary(couch_totp:generate(Alg, Key, Timestamp, 30, Len), Len).
integer_to_binary(Int, Len) when is_integer(Int), is_integer(Len) ->
- Unpadded = case erlang:function_exported(erlang, integer_to_binary, 1) of
- true ->
- erlang:integer_to_binary(Int);
- false ->
- ?l2b(integer_to_list(Int))
- end,
+ Unpadded =
+ case erlang:function_exported(erlang, integer_to_binary, 1) of
+ true ->
+ erlang:integer_to_binary(Int);
+ false ->
+ ?l2b(integer_to_list(Int))
+ end,
Padding = binary:copy(<<"0">>, Len),
Padded = <<Padding/binary, Unpadded/binary>>,
binary:part(Padded, byte_size(Padded), -Len).
authentication_warning(#httpd{mochi_req = Req}, User) ->
Peer = Req:get(peer),
- couch_log:warning("~p: Authentication failed for user ~s from ~s",
- [?MODULE, User, Peer]).
+ couch_log:warning(
+ "~p: Authentication failed for user ~s from ~s",
+ [?MODULE, User, Peer]
+ ).
diff --git a/src/couch/src/couch_httpd_db.erl b/src/couch/src/couch_httpd_db.erl
index 2418c1a4c..e82eea7f7 100644
--- a/src/couch/src/couch_httpd_db.erl
+++ b/src/couch/src/couch_httpd_db.erl
@@ -16,17 +16,36 @@
-include_lib("couch/include/couch_db.hrl").
--export([handle_request/1, handle_compact_req/2, handle_design_req/2,
- db_req/2, couch_doc_open/4, handle_db_changes_req/2,
+-export([
+ handle_request/1,
+ handle_compact_req/2,
+ handle_design_req/2,
+ db_req/2,
+ couch_doc_open/4,
+ handle_db_changes_req/2,
update_doc_result_to_json/1, update_doc_result_to_json/2,
- handle_design_info_req/3, parse_copy_destination_header/1,
- parse_changes_query/2, handle_changes_req/4]).
-
--import(couch_httpd,
- [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
- start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1,
- start_chunked_response/3, absolute_uri/2, send/2,
- start_response_length/4, send_error/4]).
+ handle_design_info_req/3,
+ parse_copy_destination_header/1,
+ parse_changes_query/2,
+ handle_changes_req/4
+]).
+
+-import(
+ couch_httpd,
+ [
+ send_json/2, send_json/3, send_json/4,
+ send_method_not_allowed/2,
+ start_json_response/2,
+ send_chunk/2,
+ last_chunk/1,
+ end_json_response/1,
+ start_chunked_response/3,
+ absolute_uri/2,
+ send/2,
+ start_response_length/4,
+ send_error/4
+ ]
+).
-record(doc_query_args, {
options = [],
@@ -37,134 +56,148 @@
}).
% Database request handlers
-handle_request(#httpd{path_parts=[DbName|RestParts],method=Method,
- db_url_handlers=DbUrlHandlers}=Req)->
+handle_request(
+ #httpd{
+ path_parts = [DbName | RestParts],
+ method = Method,
+ db_url_handlers = DbUrlHandlers
+ } = Req
+) ->
case {Method, RestParts} of
- {'PUT', []} ->
- create_db_req(Req, DbName);
- {'DELETE', []} ->
- % if we get ?rev=... the user is using a faulty script where the
- % document id is empty by accident. Let them recover safely.
- case couch_httpd:qs_value(Req, "rev", false) of
- false -> delete_db_req(Req, DbName);
- _Rev -> throw({bad_request,
- "You tried to DELETE a database with a ?rev= parameter. "
- ++ "Did you mean to DELETE a document instead?"})
- end;
- {_, []} ->
- do_db_req(Req, fun db_req/2);
- {_, [SecondPart|_]} ->
- Handler = couch_util:dict_find(SecondPart, DbUrlHandlers, fun db_req/2),
- do_db_req(Req, Handler)
+ {'PUT', []} ->
+ create_db_req(Req, DbName);
+ {'DELETE', []} ->
+ % if we get ?rev=... the user is using a faulty script where the
+ % document id is empty by accident. Let them recover safely.
+ case couch_httpd:qs_value(Req, "rev", false) of
+ false ->
+ delete_db_req(Req, DbName);
+ _Rev ->
+ throw(
+ {bad_request,
+ "You tried to DELETE a database with a ?rev= parameter. " ++
+ "Did you mean to DELETE a document instead?"}
+ )
+ end;
+ {_, []} ->
+ do_db_req(Req, fun db_req/2);
+ {_, [SecondPart | _]} ->
+ Handler = couch_util:dict_find(SecondPart, DbUrlHandlers, fun db_req/2),
+ do_db_req(Req, Handler)
end.
-
handle_db_changes_req(Req, Db) ->
ChangesArgs = parse_changes_query(Req, Db),
ChangesFun = couch_changes:handle_db_changes(ChangesArgs, Req, Db),
handle_changes_req(Req, Db, ChangesArgs, ChangesFun).
-
-handle_changes_req(#httpd{method='POST'}=Req, Db, ChangesArgs, ChangesFun) ->
+handle_changes_req(#httpd{method = 'POST'} = Req, Db, ChangesArgs, ChangesFun) ->
couch_httpd:validate_ctype(Req, "application/json"),
handle_changes_req1(Req, Db, ChangesArgs, ChangesFun);
-handle_changes_req(#httpd{method='GET'}=Req, Db, ChangesArgs, ChangesFun) ->
+handle_changes_req(#httpd{method = 'GET'} = Req, Db, ChangesArgs, ChangesFun) ->
handle_changes_req1(Req, Db, ChangesArgs, ChangesFun);
-handle_changes_req(#httpd{}=Req, _Db, _ChangesArgs, _ChangesFun) ->
+handle_changes_req(#httpd{} = Req, _Db, _ChangesArgs, _ChangesFun) ->
couch_httpd:send_method_not_allowed(Req, "GET,HEAD,POST").
handle_changes_req1(Req, Db, ChangesArgs, ChangesFun) ->
DbName = couch_db:name(Db),
AuthDbName = ?l2b(config:get("couch_httpd_auth", "authentication_db")),
case AuthDbName of
- DbName ->
- % in the authentication database, _changes is admin-only.
- ok = couch_db:check_is_admin(Db);
- _Else ->
- % on other databases, _changes is free for all.
- ok
+ DbName ->
+ % in the authentication database, _changes is admin-only.
+ ok = couch_db:check_is_admin(Db);
+ _Else ->
+ % on other databases, _changes is free for all.
+ ok
end,
MakeCallback = fun(Resp) ->
- fun({change, {ChangeProp}=Change, _}, "eventsource") ->
- Seq = proplists:get_value(<<"seq">>, ChangeProp),
- couch_httpd:send_chunk(Resp, ["data: ", ?JSON_ENCODE(Change),
- "\n", "id: ", ?JSON_ENCODE(Seq),
- "\n\n"]);
- ({change, Change, _}, "continuous") ->
- couch_httpd:send_chunk(Resp, [?JSON_ENCODE(Change) | "\n"]);
- ({change, Change, Prepend}, _) ->
- couch_httpd:send_chunk(Resp, [Prepend, ?JSON_ENCODE(Change)]);
- (start, "eventsource") ->
- ok;
- (start, "continuous") ->
- ok;
- (start, _) ->
- couch_httpd:send_chunk(Resp, "{\"results\":[\n");
- ({stop, _EndSeq}, "eventsource") ->
- couch_httpd:end_json_response(Resp);
- ({stop, EndSeq}, "continuous") ->
- couch_httpd:send_chunk(
- Resp,
- [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]
- ),
- couch_httpd:end_json_response(Resp);
- ({stop, EndSeq}, _) ->
- couch_httpd:send_chunk(
- Resp,
- io_lib:format("\n],\n\"last_seq\":~w}\n", [EndSeq])
- ),
- couch_httpd:end_json_response(Resp);
- (timeout, "eventsource") ->
- couch_httpd:send_chunk(Resp, "event: heartbeat\ndata: \n\n");
- (timeout, _) ->
- couch_httpd:send_chunk(Resp, "\n")
+ fun
+ ({change, {ChangeProp} = Change, _}, "eventsource") ->
+ Seq = proplists:get_value(<<"seq">>, ChangeProp),
+ couch_httpd:send_chunk(Resp, [
+ "data: ",
+ ?JSON_ENCODE(Change),
+ "\n",
+ "id: ",
+ ?JSON_ENCODE(Seq),
+ "\n\n"
+ ]);
+ ({change, Change, _}, "continuous") ->
+ couch_httpd:send_chunk(Resp, [?JSON_ENCODE(Change) | "\n"]);
+ ({change, Change, Prepend}, _) ->
+ couch_httpd:send_chunk(Resp, [Prepend, ?JSON_ENCODE(Change)]);
+ (start, "eventsource") ->
+ ok;
+ (start, "continuous") ->
+ ok;
+ (start, _) ->
+ couch_httpd:send_chunk(Resp, "{\"results\":[\n");
+ ({stop, _EndSeq}, "eventsource") ->
+ couch_httpd:end_json_response(Resp);
+ ({stop, EndSeq}, "continuous") ->
+ couch_httpd:send_chunk(
+ Resp,
+ [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]
+ ),
+ couch_httpd:end_json_response(Resp);
+ ({stop, EndSeq}, _) ->
+ couch_httpd:send_chunk(
+ Resp,
+ io_lib:format("\n],\n\"last_seq\":~w}\n", [EndSeq])
+ ),
+ couch_httpd:end_json_response(Resp);
+ (timeout, "eventsource") ->
+ couch_httpd:send_chunk(Resp, "event: heartbeat\ndata: \n\n");
+ (timeout, _) ->
+ couch_httpd:send_chunk(Resp, "\n")
end
end,
- WrapperFun = case ChangesArgs#changes_args.feed of
- "normal" ->
- {ok, Info} = couch_db:get_db_info(Db),
- CurrentEtag = couch_httpd:make_etag(Info),
- fun(FeedChangesFun) ->
- couch_httpd:etag_respond(
- Req,
- CurrentEtag,
- fun() ->
- {ok, Resp} = couch_httpd:start_json_response(
- Req, 200, [{"ETag", CurrentEtag}]
- ),
+ WrapperFun =
+ case ChangesArgs#changes_args.feed of
+ "normal" ->
+ {ok, Info} = couch_db:get_db_info(Db),
+ CurrentEtag = couch_httpd:make_etag(Info),
+ fun(FeedChangesFun) ->
+ couch_httpd:etag_respond(
+ Req,
+ CurrentEtag,
+ fun() ->
+ {ok, Resp} = couch_httpd:start_json_response(
+ Req, 200, [{"ETag", CurrentEtag}]
+ ),
+ FeedChangesFun(MakeCallback(Resp))
+ end
+ )
+ end;
+ "eventsource" ->
+ Headers = [
+ {"Content-Type", "text/event-stream"},
+ {"Cache-Control", "no-cache"}
+ ],
+ {ok, Resp} = couch_httpd:start_chunked_response(Req, 200, Headers),
+ fun(FeedChangesFun) ->
+ FeedChangesFun(MakeCallback(Resp))
+ end;
+ _ ->
+ % "longpoll" or "continuous"
+ {ok, Resp} = couch_httpd:start_json_response(Req, 200),
+ fun(FeedChangesFun) ->
FeedChangesFun(MakeCallback(Resp))
end
- )
- end;
- "eventsource" ->
- Headers = [
- {"Content-Type", "text/event-stream"},
- {"Cache-Control", "no-cache"}
- ],
- {ok, Resp} = couch_httpd:start_chunked_response(Req, 200, Headers),
- fun(FeedChangesFun) ->
- FeedChangesFun(MakeCallback(Resp))
- end;
- _ ->
- % "longpoll" or "continuous"
- {ok, Resp} = couch_httpd:start_json_response(Req, 200),
- fun(FeedChangesFun) ->
- FeedChangesFun(MakeCallback(Resp))
- end
- end,
+ end,
couch_stats:increment_counter(
- [couchdb, httpd, clients_requesting_changes]),
+ [couchdb, httpd, clients_requesting_changes]
+ ),
try
WrapperFun(ChangesFun)
after
couch_stats:decrement_counter(
- [couchdb, httpd, clients_requesting_changes])
+ [couchdb, httpd, clients_requesting_changes]
+ )
end.
-
-
-handle_compact_req(#httpd{method='POST'}=Req, Db) ->
+handle_compact_req(#httpd{method = 'POST'} = Req, Db) ->
case Req#httpd.path_parts of
[_DbName, <<"_compact">>] ->
ok = couch_db:check_is_admin(Db),
@@ -179,24 +212,30 @@ handle_compact_req(#httpd{method='POST'}=Req, Db) ->
),
couch_mrview_http:handle_compact_req(Req, Db, DDoc)
end;
-
handle_compact_req(Req, _Db) ->
send_method_not_allowed(Req, "POST").
-
-handle_design_req(#httpd{
- path_parts=[_DbName, _Design, DesignName, <<"_",_/binary>> = Action | _Rest],
+handle_design_req(
+ #httpd{
+ path_parts = [_DbName, _Design, DesignName, <<"_", _/binary>> = Action | _Rest],
design_url_handlers = DesignUrlHandlers
- }=Req, Db) ->
+ } = Req,
+ Db
+) ->
case couch_db:is_system_db(Db) of
- true ->
- case (catch couch_db:check_is_admin(Db)) of
- ok -> ok;
- _ ->
- throw({forbidden, <<"Only admins can access design document",
- " actions for system databases.">>})
- end;
- false -> ok
+ true ->
+ case (catch couch_db:check_is_admin(Db)) of
+ ok ->
+ ok;
+ _ ->
+ throw(
+ {forbidden,
+ <<"Only admins can access design document",
+ " actions for system databases.">>}
+ )
+ end;
+ false ->
+ ok
end,
% maybe load ddoc through fabric
@@ -212,295 +251,315 @@ handle_design_req(#httpd{
throw({not_found, <<"missing handler: ", Action/binary>>})
end),
Handler(Req, Db, DDoc);
-
handle_design_req(Req, Db) ->
db_req(Req, Db).
-handle_design_info_req(#httpd{
- method='GET',
- path_parts=[_DbName, _Design, DesignName, _]
- }=Req, Db, _DDoc) ->
+handle_design_info_req(
+ #httpd{
+ method = 'GET',
+ path_parts = [_DbName, _Design, DesignName, _]
+ } = Req,
+ Db,
+ _DDoc
+) ->
DesignId = <<"_design/", DesignName/binary>>,
DDoc = couch_httpd_db:couch_doc_open(Db, DesignId, nil, [ejson_body]),
couch_mrview_http:handle_info_req(Req, Db, DDoc).
-create_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
+create_db_req(#httpd{user_ctx = UserCtx} = Req, DbName) ->
ok = couch_httpd:verify_is_server_admin(Req),
- Engine = case couch_httpd:qs_value(Req, "engine") of
- EngineStr when is_list(EngineStr) ->
- [{engine, iolist_to_binary(EngineStr)}];
- _ ->
- []
- end,
+ Engine =
+ case couch_httpd:qs_value(Req, "engine") of
+ EngineStr when is_list(EngineStr) ->
+ [{engine, iolist_to_binary(EngineStr)}];
+ _ ->
+ []
+ end,
case couch_server:create(DbName, [{user_ctx, UserCtx}] ++ Engine) of
- {ok, Db} ->
- couch_db:close(Db),
- DbUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
- send_json(Req, 201, [{"Location", DbUrl}], {[{ok, true}]});
- Error ->
- throw(Error)
+ {ok, Db} ->
+ couch_db:close(Db),
+ DbUrl = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName)),
+ send_json(Req, 201, [{"Location", DbUrl}], {[{ok, true}]});
+ Error ->
+ throw(Error)
end.
-delete_db_req(#httpd{user_ctx=UserCtx}=Req, DbName) ->
+delete_db_req(#httpd{user_ctx = UserCtx} = Req, DbName) ->
ok = couch_httpd:verify_is_server_admin(Req),
- Options = case couch_httpd:qs_value(Req, "sync") of
- "true" -> [sync, {user_ctx, UserCtx}];
- _ -> [{user_ctx, UserCtx}]
- end,
+ Options =
+ case couch_httpd:qs_value(Req, "sync") of
+ "true" -> [sync, {user_ctx, UserCtx}];
+ _ -> [{user_ctx, UserCtx}]
+ end,
case couch_server:delete(DbName, Options) of
- ok ->
- send_json(Req, 200, {[{ok, true}]});
- Error ->
- throw(Error)
+ ok ->
+ send_json(Req, 200, {[{ok, true}]});
+ Error ->
+ throw(Error)
end.
-do_db_req(#httpd{user_ctx=UserCtx,path_parts=[DbName|_]}=Req, Fun) ->
+do_db_req(#httpd{user_ctx = UserCtx, path_parts = [DbName | _]} = Req, Fun) ->
case couch_db:open(DbName, [{user_ctx, UserCtx}]) of
- {ok, Db} ->
- try
- Fun(Req, Db)
- after
- catch couch_db:close(Db)
- end;
- Error ->
- throw(Error)
+ {ok, Db} ->
+ try
+ Fun(Req, Db)
+ after
+ catch couch_db:close(Db)
+ end;
+ Error ->
+ throw(Error)
end.
-db_req(#httpd{method='GET',path_parts=[_DbName]}=Req, Db) ->
+db_req(#httpd{method = 'GET', path_parts = [_DbName]} = Req, Db) ->
{ok, DbInfo} = couch_db:get_db_info(Db),
send_json(Req, {DbInfo});
-
-db_req(#httpd{method='POST',path_parts=[_DbName]}=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [_DbName]} = Req, Db) ->
couch_httpd:validate_ctype(Req, "application/json"),
Doc = couch_db:doc_from_json_obj_validate(Db, couch_httpd:json_body(Req)),
validate_attachment_names(Doc),
- Doc2 = case Doc#doc.id of
- <<"">> ->
- Doc#doc{id=couch_uuids:new(), revs={0, []}};
- _ ->
- Doc
- end,
+ Doc2 =
+ case Doc#doc.id of
+ <<"">> ->
+ Doc#doc{id = couch_uuids:new(), revs = {0, []}};
+ _ ->
+ Doc
+ end,
DocId = Doc2#doc.id,
update_doc(Req, Db, DocId, Doc2);
-
-db_req(#httpd{path_parts=[_DbName]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_DbName]} = Req, _Db) ->
send_method_not_allowed(Req, "DELETE,GET,HEAD,POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_ensure_full_commit">>]}=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [_, <<"_ensure_full_commit">>]} = Req, Db) ->
couch_httpd:validate_ctype(Req, "application/json"),
_ = couch_httpd:body(Req),
StartTime = couch_db:get_instance_start_time(Db),
- send_json(Req, 201, {[
- {ok, true},
- {instance_start_time, StartTime}
- ]});
-
-db_req(#httpd{path_parts=[_,<<"_ensure_full_commit">>]}=Req, _Db) ->
+ send_json(
+ Req,
+ 201,
+ {[
+ {ok, true},
+ {instance_start_time, StartTime}
+ ]}
+ );
+db_req(#httpd{path_parts = [_, <<"_ensure_full_commit">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_bulk_docs">>]}=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [_, <<"_bulk_docs">>]} = Req, Db) ->
couch_stats:increment_counter([couchdb, httpd, bulk_requests]),
couch_httpd:validate_ctype(Req, "application/json"),
{JsonProps} = couch_httpd:json_body_obj(Req),
case couch_util:get_value(<<"docs">>, JsonProps) of
- undefined ->
- send_error(Req, 400, <<"bad_request">>, <<"Missing JSON list of 'docs'">>);
- DocsArray ->
- couch_stats:update_histogram([couchdb, httpd, bulk_docs], length(DocsArray)),
- case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
- "true" ->
- Options = [full_commit];
- "false" ->
- Options = [delay_commit];
- _ ->
- Options = []
- end,
- case couch_util:get_value(<<"new_edits">>, JsonProps, true) of
- true ->
- Docs = lists:map(
- fun({ObjProps} = JsonObj) ->
- Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj),
- validate_attachment_names(Doc),
- Id = case Doc#doc.id of
- <<>> -> couch_uuids:new();
- Id0 -> Id0
- end,
- case couch_util:get_value(<<"_rev">>, ObjProps) of
- undefined ->
- Revs = {0, []};
- Rev ->
- {Pos, RevId} = couch_doc:parse_rev(Rev),
- Revs = {Pos, [RevId]}
- end,
- Doc#doc{id=Id,revs=Revs}
- end,
- DocsArray),
- Options2 =
- case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
- true -> [all_or_nothing|Options];
- _ -> Options
+ undefined ->
+ send_error(Req, 400, <<"bad_request">>, <<"Missing JSON list of 'docs'">>);
+ DocsArray ->
+ couch_stats:update_histogram([couchdb, httpd, bulk_docs], length(DocsArray)),
+ case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
+ "true" ->
+ Options = [full_commit];
+ "false" ->
+ Options = [delay_commit];
+ _ ->
+ Options = []
end,
- case couch_db:update_docs(Db, Docs, Options2) of
- {ok, Results} ->
- % output the results
- DocResults = lists:zipwith(fun update_doc_result_to_json/2,
- Docs, Results),
- send_json(Req, 201, DocResults);
- {aborted, Errors} ->
- ErrorsJson =
- lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 417, ErrorsJson)
- end;
- false ->
- Docs = lists:map(fun(JsonObj) ->
- Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj),
- validate_attachment_names(Doc),
- Doc
- end, DocsArray),
- {ok, Errors} = couch_db:update_docs(Db, Docs, Options, replicated_changes),
- ErrorsJson =
- lists:map(fun update_doc_result_to_json/1, Errors),
- send_json(Req, 201, ErrorsJson)
- end
+ case couch_util:get_value(<<"new_edits">>, JsonProps, true) of
+ true ->
+ Docs = lists:map(
+ fun({ObjProps} = JsonObj) ->
+ Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj),
+ validate_attachment_names(Doc),
+ Id =
+ case Doc#doc.id of
+ <<>> -> couch_uuids:new();
+ Id0 -> Id0
+ end,
+ case couch_util:get_value(<<"_rev">>, ObjProps) of
+ undefined ->
+ Revs = {0, []};
+ Rev ->
+ {Pos, RevId} = couch_doc:parse_rev(Rev),
+ Revs = {Pos, [RevId]}
+ end,
+ Doc#doc{id = Id, revs = Revs}
+ end,
+ DocsArray
+ ),
+ Options2 =
+ case couch_util:get_value(<<"all_or_nothing">>, JsonProps) of
+ true -> [all_or_nothing | Options];
+ _ -> Options
+ end,
+ case couch_db:update_docs(Db, Docs, Options2) of
+ {ok, Results} ->
+ % output the results
+ DocResults = lists:zipwith(
+ fun update_doc_result_to_json/2,
+ Docs,
+ Results
+ ),
+ send_json(Req, 201, DocResults);
+ {aborted, Errors} ->
+ ErrorsJson =
+ lists:map(fun update_doc_result_to_json/1, Errors),
+ send_json(Req, 417, ErrorsJson)
+ end;
+ false ->
+ Docs = lists:map(
+ fun(JsonObj) ->
+ Doc = couch_db:doc_from_json_obj_validate(Db, JsonObj),
+ validate_attachment_names(Doc),
+ Doc
+ end,
+ DocsArray
+ ),
+ {ok, Errors} = couch_db:update_docs(Db, Docs, Options, replicated_changes),
+ ErrorsJson =
+ lists:map(fun update_doc_result_to_json/1, Errors),
+ send_json(Req, 201, ErrorsJson)
+ end
end;
-db_req(#httpd{path_parts=[_,<<"_bulk_docs">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_bulk_docs">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_purge">>]}=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [_, <<"_purge">>]} = Req, Db) ->
couch_stats:increment_counter([couchdb, httpd, purge_requests]),
couch_httpd:validate_ctype(Req, "application/json"),
{IdRevs} = couch_httpd:json_body_obj(Req),
- PurgeReqs = lists:map(fun({Id, JsonRevs}) ->
- {couch_uuids:new(), Id, couch_doc:parse_revs(JsonRevs)}
- end, IdRevs),
+ PurgeReqs = lists:map(
+ fun({Id, JsonRevs}) ->
+ {couch_uuids:new(), Id, couch_doc:parse_revs(JsonRevs)}
+ end,
+ IdRevs
+ ),
{ok, Replies} = couch_db:purge_docs(Db, PurgeReqs),
- Results = lists:zipwith(fun({Id, _}, {ok, Reply}) ->
- {Id, couch_doc:revs_to_strs(Reply)}
- end, IdRevs, Replies),
+ Results = lists:zipwith(
+ fun({Id, _}, {ok, Reply}) ->
+ {Id, couch_doc:revs_to_strs(Reply)}
+ end,
+ IdRevs,
+ Replies
+ ),
{ok, Db2} = couch_db:reopen(Db),
PurgeSeq = couch_db:get_purge_seq(Db2),
send_json(Req, 200, {[{purge_seq, PurgeSeq}, {purged, {Results}}]});
-
-db_req(#httpd{path_parts=[_,<<"_purge">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_purge">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_missing_revs">>]}=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [_, <<"_missing_revs">>]} = Req, Db) ->
couch_httpd:validate_ctype(Req, "application/json"),
{JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
- JsonDocIdRevs2 = [{Id, [couch_doc:parse_rev(RevStr) || RevStr <- RevStrs]} || {Id, RevStrs} <- JsonDocIdRevs],
+ JsonDocIdRevs2 = [
+ {Id, [couch_doc:parse_rev(RevStr) || RevStr <- RevStrs]}
+ || {Id, RevStrs} <- JsonDocIdRevs
+ ],
{ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
Results2 = [{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs, _} <- Results],
- send_json(Req, {[
- {missing_revs, {Results2}}
- ]});
-
-db_req(#httpd{path_parts=[_,<<"_missing_revs">>]}=Req, _Db) ->
+ send_json(
+ Req,
+ {[
+ {missing_revs, {Results2}}
+ ]}
+ );
+db_req(#httpd{path_parts = [_, <<"_missing_revs">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='POST',path_parts=[_,<<"_revs_diff">>]}=Req, Db) ->
+db_req(#httpd{method = 'POST', path_parts = [_, <<"_revs_diff">>]} = Req, Db) ->
couch_httpd:validate_ctype(Req, "application/json"),
{JsonDocIdRevs} = couch_httpd:json_body_obj(Req),
JsonDocIdRevs2 =
[{Id, couch_doc:parse_revs(RevStrs)} || {Id, RevStrs} <- JsonDocIdRevs],
{ok, Results} = couch_db:get_missing_revs(Db, JsonDocIdRevs2),
Results2 =
- lists:map(fun({Id, MissingRevs, PossibleAncestors}) ->
- {Id,
- {[{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
- if PossibleAncestors == [] ->
- [];
- true ->
- [{possible_ancestors,
- couch_doc:revs_to_strs(PossibleAncestors)}]
- end}}
- end, Results),
+ lists:map(
+ fun({Id, MissingRevs, PossibleAncestors}) ->
+ {Id, {
+ [{missing, couch_doc:revs_to_strs(MissingRevs)}] ++
+ if
+ PossibleAncestors == [] ->
+ [];
+ true ->
+ [{possible_ancestors, couch_doc:revs_to_strs(PossibleAncestors)}]
+ end
+ }}
+ end,
+ Results
+ ),
send_json(Req, {Results2});
-
-db_req(#httpd{path_parts=[_,<<"_revs_diff">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_revs_diff">>]} = Req, _Db) ->
send_method_not_allowed(Req, "POST");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_security">>]}=Req, Db) ->
+db_req(#httpd{method = 'PUT', path_parts = [_, <<"_security">>]} = Req, Db) ->
SecObj = couch_httpd:json_body(Req),
ok = couch_db:set_security(Db, SecObj),
send_json(Req, {[{<<"ok">>, true}]});
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_security">>]}=Req, Db) ->
+db_req(#httpd{method = 'GET', path_parts = [_, <<"_security">>]} = Req, Db) ->
send_json(Req, couch_db:get_security(Db));
-
-db_req(#httpd{path_parts=[_,<<"_security">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_security">>]} = Req, _Db) ->
send_method_not_allowed(Req, "PUT,GET");
-
-db_req(#httpd{method='PUT',path_parts=[_,<<"_revs_limit">>]}=Req,
- Db) ->
+db_req(
+ #httpd{method = 'PUT', path_parts = [_, <<"_revs_limit">>]} = Req,
+ Db
+) ->
Limit = couch_httpd:json_body(Req),
- case is_integer(Limit) of
- true ->
- ok = couch_db:set_revs_limit(Db, Limit),
- send_json(Req, {[{<<"ok">>, true}]});
- false ->
- throw({bad_request, <<"Rev limit has to be an integer">>})
- end;
-
-db_req(#httpd{method='GET',path_parts=[_,<<"_revs_limit">>]}=Req, Db) ->
+ case is_integer(Limit) of
+ true ->
+ ok = couch_db:set_revs_limit(Db, Limit),
+ send_json(Req, {[{<<"ok">>, true}]});
+ false ->
+ throw({bad_request, <<"Rev limit has to be an integer">>})
+ end;
+db_req(#httpd{method = 'GET', path_parts = [_, <<"_revs_limit">>]} = Req, Db) ->
send_json(Req, couch_db:get_revs_limit(Db));
-
-db_req(#httpd{path_parts=[_,<<"_revs_limit">>]}=Req, _Db) ->
+db_req(#httpd{path_parts = [_, <<"_revs_limit">>]} = Req, _Db) ->
send_method_not_allowed(Req, "PUT,GET");
-
% Special case to enable using an unencoded slash in the URL of design docs,
% as slashes in document IDs must otherwise be URL encoded.
-db_req(#httpd{method='GET',mochi_req=MochiReq, path_parts=[DbName,<<"_design/",_/binary>>|_]}=Req, _Db) ->
+db_req(
+ #httpd{
+ method = 'GET', mochi_req = MochiReq, path_parts = [DbName, <<"_design/", _/binary>> | _]
+ } = Req,
+ _Db
+) ->
PathFront = "/" ++ couch_httpd:quote(binary_to_list(DbName)) ++ "/",
- [_|PathTail] = re:split(MochiReq:get(raw_path), "_design%2F",
- [{return, list}]),
- couch_httpd:send_redirect(Req, PathFront ++ "_design/" ++
- mochiweb_util:join(PathTail, "_design%2F"));
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name]}=Req, Db) ->
- db_doc_req(Req, Db, <<"_design/",Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName,<<"_design">>,Name|FileNameParts]}=Req, Db) ->
- db_attachment_req(Req, Db, <<"_design/",Name/binary>>, FileNameParts);
-
-
+ [_ | PathTail] = re:split(
+ MochiReq:get(raw_path),
+ "_design%2F",
+ [{return, list}]
+ ),
+ couch_httpd:send_redirect(
+ Req,
+ PathFront ++ "_design/" ++
+ mochiweb_util:join(PathTail, "_design%2F")
+ );
+db_req(#httpd{path_parts = [_DbName, <<"_design">>, Name]} = Req, Db) ->
+ db_doc_req(Req, Db, <<"_design/", Name/binary>>);
+db_req(#httpd{path_parts = [_DbName, <<"_design">>, Name | FileNameParts]} = Req, Db) ->
+ db_attachment_req(Req, Db, <<"_design/", Name/binary>>, FileNameParts);
% Special case to allow for accessing local documents without %2F
% encoding the docid. Throws out requests that don't have the second
% path part or that specify an attachment name.
-db_req(#httpd{path_parts=[_DbName, <<"_local">>]}, _Db) ->
+db_req(#httpd{path_parts = [_DbName, <<"_local">>]}, _Db) ->
throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local/">>]}, _Db) ->
+db_req(#httpd{path_parts = [_DbName, <<"_local/">>]}, _Db) ->
throw({bad_request, <<"Invalid _local document id.">>});
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">>, Name]}=Req, Db) ->
+db_req(#httpd{path_parts = [_DbName, <<"_local">>, Name]} = Req, Db) ->
db_doc_req(Req, Db, <<"_local/", Name/binary>>);
-
-db_req(#httpd{path_parts=[_DbName, <<"_local">> | _Rest]}, _Db) ->
+db_req(#httpd{path_parts = [_DbName, <<"_local">> | _Rest]}, _Db) ->
throw({bad_request, <<"_local documents do not accept attachments.">>});
-
-db_req(#httpd{path_parts=[_, DocId]}=Req, Db) ->
+db_req(#httpd{path_parts = [_, DocId]} = Req, Db) ->
db_doc_req(Req, Db, DocId);
-
-db_req(#httpd{path_parts=[_, DocId | FileNameParts]}=Req, Db) ->
+db_req(#httpd{path_parts = [_, DocId | FileNameParts]} = Req, Db) ->
db_attachment_req(Req, Db, DocId, FileNameParts).
-db_doc_req(#httpd{method='DELETE'}=Req, Db, DocId) ->
+db_doc_req(#httpd{method = 'DELETE'} = Req, Db, DocId) ->
% check for the existence of the doc to handle the 404 case.
couch_doc_open(Db, DocId, nil, []),
case couch_httpd:qs_value(Req, "rev") of
- undefined ->
- JsonObj = {[{<<"_deleted">>,true}]},
- Doc = couch_doc_from_req(Req, Db, DocId, JsonObj),
- update_doc(Req, Db, DocId, Doc);
- Rev ->
- JsonObj = {[{<<"_rev">>, ?l2b(Rev)},{<<"_deleted">>,true}]},
- Doc = couch_doc_from_req(Req, Db, DocId, JsonObj),
- update_doc(Req, Db, DocId, Doc)
+ undefined ->
+ JsonObj = {[{<<"_deleted">>, true}]},
+ Doc = couch_doc_from_req(Req, Db, DocId, JsonObj),
+ update_doc(Req, Db, DocId, Doc);
+ Rev ->
+ JsonObj = {[{<<"_rev">>, ?l2b(Rev)}, {<<"_deleted">>, true}]},
+ Doc = couch_doc_from_req(Req, Db, DocId, JsonObj),
+ update_doc(Req, Db, DocId, Doc)
end;
-
db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
#doc_query_args{
rev = Rev,
@@ -508,206 +567,243 @@ db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) ->
options = Options1,
atts_since = AttsSince
} = parse_doc_query(Req),
- Options = case AttsSince of
- nil ->
- Options1;
- RevList when is_list(RevList) ->
- [{atts_since, RevList}, attachments | Options1]
- end,
+ Options =
+ case AttsSince of
+ nil ->
+ Options1;
+ RevList when is_list(RevList) ->
+ [{atts_since, RevList}, attachments | Options1]
+ end,
case Revs of
- [] ->
- Doc = couch_doc_open(Db, DocId, Rev, Options),
- send_doc(Req, Doc, Options);
- _ ->
- {ok, Results} = couch_db:open_doc_revs(Db, DocId, Revs, Options),
- case MochiReq:accepts_content_type("multipart/mixed") of
- false ->
- {ok, Resp} = start_json_response(Req, 200),
- send_chunk(Resp, "["),
- % We loop through the docs. The first time through the separator
- % is whitespace, then a comma on subsequent iterations.
- lists:foldl(
- fun(Result, AccSeparator) ->
- case Result of
- {ok, Doc} ->
- JsonDoc = couch_doc:to_json_obj(Doc, Options),
- Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
- send_chunk(Resp, AccSeparator ++ Json);
- {{not_found, missing}, RevId} ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
- send_chunk(Resp, AccSeparator ++ Json)
- end,
- "," % AccSeparator now has a comma
- end,
- "", Results),
- send_chunk(Resp, "]"),
- end_json_response(Resp);
- true ->
- send_docs_multipart(Req, Results, Options)
- end
+ [] ->
+ Doc = couch_doc_open(Db, DocId, Rev, Options),
+ send_doc(Req, Doc, Options);
+ _ ->
+ {ok, Results} = couch_db:open_doc_revs(Db, DocId, Revs, Options),
+ case MochiReq:accepts_content_type("multipart/mixed") of
+ false ->
+ {ok, Resp} = start_json_response(Req, 200),
+ send_chunk(Resp, "["),
+ % We loop through the docs. The first time through the separator
+ % is whitespace, then a comma on subsequent iterations.
+ lists:foldl(
+ fun(Result, AccSeparator) ->
+ case Result of
+ {ok, Doc} ->
+ JsonDoc = couch_doc:to_json_obj(Doc, Options),
+ Json = ?JSON_ENCODE({[{ok, JsonDoc}]}),
+ send_chunk(Resp, AccSeparator ++ Json);
+ {{not_found, missing}, RevId} ->
+ RevStr = couch_doc:rev_to_str(RevId),
+ Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
+ send_chunk(Resp, AccSeparator ++ Json)
+ end,
+ % AccSeparator now has a comma
+ ","
+ end,
+ "",
+ Results
+ ),
+ send_chunk(Resp, "]"),
+ end_json_response(Resp);
+ true ->
+ send_docs_multipart(Req, Results, Options)
+ end
end;
-
-
-db_doc_req(#httpd{method='POST'}=Req, Db, DocId) ->
+db_doc_req(#httpd{method = 'POST'} = Req, Db, DocId) ->
couch_httpd:validate_referer(Req),
couch_db:validate_docid(Db, DocId),
couch_httpd:validate_ctype(Req, "multipart/form-data"),
Form = couch_httpd:parse_form(Req),
case couch_util:get_value("_doc", Form) of
- undefined ->
- Rev = couch_doc:parse_rev(couch_util:get_value("_rev", Form)),
- {ok, [{ok, Doc}]} = couch_db:open_doc_revs(Db, DocId, [Rev], []);
- Json ->
- Doc = couch_doc_from_req(Req, Db, DocId, ?JSON_DECODE(Json))
+ undefined ->
+ Rev = couch_doc:parse_rev(couch_util:get_value("_rev", Form)),
+ {ok, [{ok, Doc}]} = couch_db:open_doc_revs(Db, DocId, [Rev], []);
+ Json ->
+ Doc = couch_doc_from_req(Req, Db, DocId, ?JSON_DECODE(Json))
end,
UpdatedAtts = [
couch_att:new([
{name, validate_attachment_name(Name)},
{type, list_to_binary(ContentType)},
{data, Content}
- ]) ||
- {Name, {ContentType, _}, Content} <-
- proplists:get_all_values("_attachments", Form)
+ ])
+ || {Name, {ContentType, _}, Content} <-
+ proplists:get_all_values("_attachments", Form)
],
- #doc{atts=OldAtts} = Doc,
+ #doc{atts = OldAtts} = Doc,
OldAtts2 = lists:flatmap(
fun(Att) ->
OldName = couch_att:fetch(name, Att),
case [1 || A <- UpdatedAtts, couch_att:fetch(name, A) == OldName] of
- [] -> [Att]; % the attachment wasn't in the UpdatedAtts, return it
- _ -> [] % the attachment was in the UpdatedAtts, drop it
+ % the attachment wasn't in the UpdatedAtts, return it
+ [] -> [Att];
+ % the attachment was in the UpdatedAtts, drop it
+ _ -> []
end
- end, OldAtts),
+ end,
+ OldAtts
+ ),
NewDoc = Doc#doc{
atts = UpdatedAtts ++ OldAtts2
},
update_doc(Req, Db, DocId, NewDoc);
-
-db_doc_req(#httpd{method='PUT'}=Req, Db, DocId) ->
+db_doc_req(#httpd{method = 'PUT'} = Req, Db, DocId) ->
couch_db:validate_docid(Db, DocId),
case couch_util:to_list(couch_httpd:header_value(Req, "Content-Type")) of
- ("multipart/related;" ++ _) = ContentType ->
- couch_httpd:check_max_request_length(Req),
- {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(
- ContentType, fun() -> receive_request_data(Req) end),
- Doc = couch_doc_from_req(Req, Db, DocId, Doc0),
- try
- Result = update_doc(Req, Db, DocId, Doc),
- WaitFun(),
- Result
- catch throw:Err ->
- % Document rejected by a validate_doc_update function.
- couch_httpd_multipart:abort_multipart_stream(Parser),
- throw(Err)
- end;
- _Else ->
- Body = couch_httpd:json_body(Req),
- Doc = couch_doc_from_req(Req, Db, DocId, Body),
- update_doc(Req, Db, DocId, Doc)
+ ("multipart/related;" ++ _) = ContentType ->
+ couch_httpd:check_max_request_length(Req),
+ {ok, Doc0, WaitFun, Parser} = couch_doc:doc_from_multi_part_stream(
+ ContentType, fun() -> receive_request_data(Req) end
+ ),
+ Doc = couch_doc_from_req(Req, Db, DocId, Doc0),
+ try
+ Result = update_doc(Req, Db, DocId, Doc),
+ WaitFun(),
+ Result
+ catch
+ throw:Err ->
+ % Document rejected by a validate_doc_update function.
+ couch_httpd_multipart:abort_multipart_stream(Parser),
+ throw(Err)
+ end;
+ _Else ->
+ Body = couch_httpd:json_body(Req),
+ Doc = couch_doc_from_req(Req, Db, DocId, Body),
+ update_doc(Req, Db, DocId, Doc)
end;
-
-db_doc_req(#httpd{method='COPY'}=Req, Db, SourceDocId) ->
+db_doc_req(#httpd{method = 'COPY'} = Req, Db, SourceDocId) ->
SourceRev =
- case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
- missing_rev -> nil;
- Rev -> Rev
- end,
+ case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
+ missing_rev -> nil;
+ Rev -> Rev
+ end,
{TargetDocId0, TargetRevs} = parse_copy_destination_header(Req),
TargetDocId = list_to_binary(mochiweb_util:unquote(TargetDocId0)),
% open old doc
Doc = couch_doc_open(Db, SourceDocId, SourceRev, []),
% save new doc
- update_doc(Req, Db, TargetDocId, Doc#doc{id=TargetDocId, revs=TargetRevs});
-
+ update_doc(Req, Db, TargetDocId, Doc#doc{id = TargetDocId, revs = TargetRevs});
db_doc_req(Req, _Db, _DocId) ->
send_method_not_allowed(Req, "DELETE,GET,HEAD,POST,PUT,COPY").
-
send_doc(Req, Doc, Options) ->
case Doc#doc.meta of
- [] ->
- DiskEtag = couch_httpd:doc_etag(Doc),
- % output etag only when we have no meta
- couch_httpd:etag_respond(Req, DiskEtag, fun() ->
- send_doc_efficiently(Req, Doc, [{"ETag", DiskEtag}], Options)
- end);
- _ ->
- send_doc_efficiently(Req, Doc, [], Options)
+ [] ->
+ DiskEtag = couch_httpd:doc_etag(Doc),
+ % output etag only when we have no meta
+ couch_httpd:etag_respond(Req, DiskEtag, fun() ->
+ send_doc_efficiently(Req, Doc, [{"ETag", DiskEtag}], Options)
+ end);
+ _ ->
+ send_doc_efficiently(Req, Doc, [], Options)
end.
-
-send_doc_efficiently(Req, #doc{atts=[]}=Doc, Headers, Options) ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
-send_doc_efficiently(#httpd{mochi_req = MochiReq} = Req,
- #doc{atts = Atts} = Doc, Headers, Options) ->
+send_doc_efficiently(Req, #doc{atts = []} = Doc, Headers, Options) ->
+ send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
+send_doc_efficiently(
+ #httpd{mochi_req = MochiReq} = Req,
+ #doc{atts = Atts} = Doc,
+ Headers,
+ Options
+) ->
case lists:member(attachments, Options) of
- true ->
- case MochiReq:accepts_content_type("multipart/related") of
- false ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
true ->
- Boundary = couch_uuids:random(),
- JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc,
- [attachments, follows, att_encoding_info | Options])),
- {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
- Boundary,JsonBytes, Atts, true),
- CType = {"Content-Type", ?b2l(ContentType)},
- {ok, Resp} = start_response_length(Req, 200, [CType|Headers], Len),
- couch_doc:doc_to_multi_part_stream(Boundary,JsonBytes,Atts,
- fun(Data) -> couch_httpd:send(Resp, Data) end, true)
- end;
- false ->
- send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
+ case MochiReq:accepts_content_type("multipart/related") of
+ false ->
+ send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options));
+ true ->
+ Boundary = couch_uuids:random(),
+ JsonBytes = ?JSON_ENCODE(
+ couch_doc:to_json_obj(
+ Doc,
+ [attachments, follows, att_encoding_info | Options]
+ )
+ ),
+ {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
+ Boundary, JsonBytes, Atts, true
+ ),
+ CType = {"Content-Type", ?b2l(ContentType)},
+ {ok, Resp} = start_response_length(Req, 200, [CType | Headers], Len),
+ couch_doc:doc_to_multi_part_stream(
+ Boundary,
+ JsonBytes,
+ Atts,
+ fun(Data) -> couch_httpd:send(Resp, Data) end,
+ true
+ )
+ end;
+ false ->
+ send_json(Req, 200, Headers, couch_doc:to_json_obj(Doc, Options))
end.
send_docs_multipart(Req, Results, Options1) ->
OuterBoundary = couch_uuids:random(),
InnerBoundary = couch_uuids:random(),
Options = [attachments, follows, att_encoding_info | Options1],
- CType = {"Content-Type",
- "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
+ CType = {"Content-Type", "multipart/mixed; boundary=\"" ++ ?b2l(OuterBoundary) ++ "\""},
{ok, Resp} = start_chunked_response(Req, 200, [CType]),
couch_httpd:send_chunk(Resp, <<"--", OuterBoundary/binary>>),
lists:foreach(
- fun({ok, #doc{atts=Atts}=Doc}) ->
- JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
- {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
- InnerBoundary, JsonBytes, Atts, true),
- couch_httpd:send_chunk(Resp, <<"\r\nContent-Type: ",
- ContentType/binary, "\r\n\r\n">>),
- couch_doc:doc_to_multi_part_stream(InnerBoundary, JsonBytes, Atts,
- fun(Data) -> couch_httpd:send_chunk(Resp, Data)
- end, true),
- couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>);
- ({{not_found, missing}, RevId}) ->
- RevStr = couch_doc:rev_to_str(RevId),
- Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
- couch_httpd:send_chunk(Resp,
- [<<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
- Json,
- <<"\r\n--", OuterBoundary/binary>>])
- end, Results),
+ fun
+ ({ok, #doc{atts = Atts} = Doc}) ->
+ JsonBytes = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, Options)),
+ {ContentType, _Len} = couch_doc:len_doc_to_multi_part_stream(
+ InnerBoundary, JsonBytes, Atts, true
+ ),
+ couch_httpd:send_chunk(
+ Resp, <<"\r\nContent-Type: ", ContentType/binary, "\r\n\r\n">>
+ ),
+ couch_doc:doc_to_multi_part_stream(
+ InnerBoundary,
+ JsonBytes,
+ Atts,
+ fun(Data) -> couch_httpd:send_chunk(Resp, Data) end,
+ true
+ ),
+ couch_httpd:send_chunk(Resp, <<"\r\n--", OuterBoundary/binary>>);
+ ({{not_found, missing}, RevId}) ->
+ RevStr = couch_doc:rev_to_str(RevId),
+ Json = ?JSON_ENCODE({[{<<"missing">>, RevStr}]}),
+ couch_httpd:send_chunk(
+ Resp,
+ [
+ <<"\r\nContent-Type: application/json; error=\"true\"\r\n\r\n">>,
+ Json,
+ <<"\r\n--", OuterBoundary/binary>>
+ ]
+ )
+ end,
+ Results
+ ),
couch_httpd:send_chunk(Resp, <<"--">>),
couch_httpd:last_chunk(Resp).
send_ranges_multipart(Req, ContentType, Len, Att, Ranges) ->
Boundary = couch_uuids:random(),
- CType = {"Content-Type",
- "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
+ CType = {"Content-Type", "multipart/byteranges; boundary=\"" ++ ?b2l(Boundary) ++ "\""},
{ok, Resp} = start_chunked_response(Req, 206, [CType]),
couch_httpd:send_chunk(Resp, <<"--", Boundary/binary>>),
- lists:foreach(fun({From, To}) ->
- ContentRange = ?l2b(make_content_range(From, To, Len)),
- couch_httpd:send_chunk(Resp,
- <<"\r\nContent-Type: ", ContentType/binary, "\r\n",
- "Content-Range: ", ContentRange/binary, "\r\n",
- "\r\n">>),
- couch_att:range_foldl(Att, From, To + 1,
- fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
- couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
- end, Ranges),
+ lists:foreach(
+ fun({From, To}) ->
+ ContentRange = ?l2b(make_content_range(From, To, Len)),
+ couch_httpd:send_chunk(
+ Resp,
+ <<"\r\nContent-Type: ", ContentType/binary, "\r\n", "Content-Range: ",
+ ContentRange/binary, "\r\n", "\r\n">>
+ ),
+ couch_att:range_foldl(
+ Att,
+ From,
+ To + 1,
+ fun(Seg, _) -> send_chunk(Resp, Seg) end,
+ {ok, Resp}
+ ),
+ couch_httpd:send_chunk(Resp, <<"\r\n--", Boundary/binary>>)
+ end,
+ Ranges
+ ),
couch_httpd:send_chunk(Resp, <<"--">>),
couch_httpd:last_chunk(Resp),
{ok, Resp}.
@@ -726,11 +822,15 @@ make_content_range(From, To, Len) ->
io_lib:format("bytes ~B-~B/~B", [From, To, Len]).
update_doc_result_to_json({{Id, Rev}, Error}) ->
- {_Code, Err, Msg} = couch_httpd:error_info(Error),
- {[{id, Id}, {rev, couch_doc:rev_to_str(Rev)},
- {error, Err}, {reason, Msg}]}.
-
-update_doc_result_to_json(#doc{id=DocId}, Result) ->
+ {_Code, Err, Msg} = couch_httpd:error_info(Error),
+ {[
+ {id, Id},
+ {rev, couch_doc:rev_to_str(Rev)},
+ {error, Err},
+ {reason, Msg}
+ ]}.
+
+update_doc_result_to_json(#doc{id = DocId}, Result) ->
update_doc_result_to_json(DocId, Result);
update_doc_result_to_json(DocId, {ok, NewRev}) ->
{[{ok, true}, {id, DocId}, {rev, couch_doc:rev_to_str(NewRev)}]};
@@ -738,10 +838,11 @@ update_doc_result_to_json(DocId, Error) ->
{_Code, ErrorStr, Reason} = couch_httpd:error_info(Error),
{[{id, DocId}, {error, ErrorStr}, {reason, Reason}]}.
-
-update_doc(Req, Db, DocId, #doc{deleted=false}=Doc) ->
+update_doc(Req, Db, DocId, #doc{deleted = false} = Doc) ->
DbName = couch_db:name(Db),
- Loc = absolute_uri(Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(DocId)),
+ Loc = absolute_uri(
+ Req, "/" ++ couch_util:url_encode(DbName) ++ "/" ++ couch_util:url_encode(DocId)
+ ),
update_doc(Req, Db, DocId, Doc, [{"Location", Loc}]);
update_doc(Req, Db, DocId, Doc) ->
update_doc(Req, Db, DocId, Doc, []).
@@ -752,70 +853,85 @@ update_doc(Req, Db, DocId, Doc, Headers) ->
} = parse_doc_query(Req),
update_doc(Req, Db, DocId, Doc, Headers, UpdateType).
-update_doc(Req, Db, DocId, #doc{deleted=Deleted}=Doc, Headers, UpdateType) ->
+update_doc(Req, Db, DocId, #doc{deleted = Deleted} = Doc, Headers, UpdateType) ->
case couch_httpd:header_value(Req, "X-Couch-Full-Commit") of
- "true" ->
- Options = [full_commit];
- "false" ->
- Options = [delay_commit];
- _ ->
- Options = []
+ "true" ->
+ Options = [full_commit];
+ "false" ->
+ Options = [delay_commit];
+ _ ->
+ Options = []
end,
case couch_httpd:qs_value(Req, "batch") of
- "ok" ->
- % async batching
- spawn(fun() ->
- case catch(couch_db:update_doc(Db, Doc, Options, UpdateType)) of
- {ok, _} -> ok;
- Error ->
- couch_log:info("Batch doc error (~s): ~p",[DocId, Error])
+ "ok" ->
+ % async batching
+ spawn(fun() ->
+ case catch (couch_db:update_doc(Db, Doc, Options, UpdateType)) of
+ {ok, _} -> ok;
+ Error -> couch_log:info("Batch doc error (~s): ~p", [DocId, Error])
end
end),
- send_json(Req, 202, Headers, {[
- {ok, true},
- {id, DocId}
- ]});
- _Normal ->
- % normal
- {ok, NewRev} = couch_db:update_doc(Db, Doc, Options, UpdateType),
- NewRevStr = couch_doc:rev_to_str(NewRev),
- ResponseHeaders = [{"ETag", <<"\"", NewRevStr/binary, "\"">>}] ++ Headers,
- send_json(Req,
- if Deleted orelse Req#httpd.method == 'DELETE' -> 200;
- true -> 201 end,
- ResponseHeaders, {[
- {ok, true},
- {id, DocId},
- {rev, NewRevStr}]})
+ send_json(
+ Req,
+ 202,
+ Headers,
+ {[
+ {ok, true},
+ {id, DocId}
+ ]}
+ );
+ _Normal ->
+ % normal
+ {ok, NewRev} = couch_db:update_doc(Db, Doc, Options, UpdateType),
+ NewRevStr = couch_doc:rev_to_str(NewRev),
+ ResponseHeaders = [{"ETag", <<"\"", NewRevStr/binary, "\"">>}] ++ Headers,
+ send_json(
+ Req,
+ if
+ Deleted orelse Req#httpd.method == 'DELETE' -> 200;
+ true -> 201
+ end,
+ ResponseHeaders,
+ {[
+ {ok, true},
+ {id, DocId},
+ {rev, NewRevStr}
+ ]}
+ )
end.
-couch_doc_from_req(Req, _Db, DocId, #doc{revs=Revs}=Doc) ->
+couch_doc_from_req(Req, _Db, DocId, #doc{revs = Revs} = Doc) ->
validate_attachment_names(Doc),
- Rev = case couch_httpd:qs_value(Req, "rev") of
- undefined ->
- undefined;
- QSRev ->
- couch_doc:parse_rev(QSRev)
- end,
+ Rev =
+ case couch_httpd:qs_value(Req, "rev") of
+ undefined ->
+ undefined;
+ QSRev ->
+ couch_doc:parse_rev(QSRev)
+ end,
Revs2 =
- case Revs of
- {Start, [RevId|_]} ->
- if Rev /= undefined andalso Rev /= {Start, RevId} ->
- throw({bad_request, "Document rev from request body and query "
- "string have different values"});
- true ->
- case extract_header_rev(Req, {Start, RevId}) of
- missing_rev -> {0, []};
- _ -> Revs
- end
- end;
- _ ->
- case extract_header_rev(Req, Rev) of
- missing_rev -> {0, []};
- {Pos, RevId2} -> {Pos, [RevId2]}
- end
- end,
- Doc#doc{id=DocId, revs=Revs2};
+ case Revs of
+ {Start, [RevId | _]} ->
+ if
+ Rev /= undefined andalso Rev /= {Start, RevId} ->
+ throw(
+ {bad_request,
+ "Document rev from request body and query "
+ "string have different values"}
+ );
+ true ->
+ case extract_header_rev(Req, {Start, RevId}) of
+ missing_rev -> {0, []};
+ _ -> Revs
+ end
+ end;
+ _ ->
+ case extract_header_rev(Req, Rev) of
+ missing_rev -> {0, []};
+ {Pos, RevId2} -> {Pos, [RevId2]}
+ end
+ end,
+ Doc#doc{id = DocId, revs = Revs2};
couch_doc_from_req(Req, Db, DocId, Json) ->
Doc = couch_db:doc_from_json_obj_validate(Db, Json),
couch_doc_from_req(Req, Db, DocId, Doc).
@@ -826,233 +942,283 @@ couch_doc_from_req(Req, Db, DocId, Json) ->
couch_doc_open(Db, DocId, Rev, Options) ->
case Rev of
- nil -> % open most recent rev
- case couch_db:open_doc(Db, DocId, Options) of
- {ok, Doc} ->
- Doc;
- Error ->
- throw(Error)
- end;
- _ -> % open a specific rev (deletions come back as stubs)
- case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of
- {ok, [{ok, Doc}]} ->
- Doc;
- {ok, [{{not_found, missing}, Rev}]} ->
- throw(not_found);
- {ok, [Else]} ->
- throw(Else)
- end
- end.
+ % open most recent rev
+ nil ->
+ case couch_db:open_doc(Db, DocId, Options) of
+ {ok, Doc} ->
+ Doc;
+ Error ->
+ throw(Error)
+ end;
+ % open a specific rev (deletions come back as stubs)
+ _ ->
+ case couch_db:open_doc_revs(Db, DocId, [Rev], Options) of
+ {ok, [{ok, Doc}]} ->
+ Doc;
+ {ok, [{{not_found, missing}, Rev}]} ->
+ throw(not_found);
+ {ok, [Else]} ->
+ throw(Else)
+ end
+ end.
% Attachment request handlers
-db_attachment_req(#httpd{method='GET',mochi_req=MochiReq}=Req, Db, DocId, FileNameParts) ->
- FileName = list_to_binary(mochiweb_util:join(lists:map(fun binary_to_list/1, FileNameParts),"/")),
+db_attachment_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId, FileNameParts) ->
+ FileName = list_to_binary(
+ mochiweb_util:join(lists:map(fun binary_to_list/1, FileNameParts), "/")
+ ),
#doc_query_args{
- rev=Rev,
- options=Options
+ rev = Rev,
+ options = Options
} = parse_doc_query(Req),
#doc{
- atts=Atts
+ atts = Atts
} = Doc = couch_doc_open(Db, DocId, Rev, Options),
case [A || A <- Atts, couch_att:fetch(name, A) == FileName] of
- [] ->
- throw({not_found, "Document is missing attachment"});
- [Att] ->
- [Type, Enc, DiskLen, AttLen, Md5] = couch_att:fetch([type, encoding, disk_len, att_len, md5], Att),
- Etag = case Md5 of
- <<>> -> couch_httpd:doc_etag(Doc);
- _ -> "\"" ++ ?b2l(base64:encode(Md5)) ++ "\""
- end,
- ReqAcceptsAttEnc = lists:member(
- atom_to_list(Enc),
- couch_httpd:accepted_encodings(Req)
- ),
- Len = case {Enc, ReqAcceptsAttEnc} of
- {identity, _} ->
- % stored and served in identity form
- DiskLen;
- {_, false} when DiskLen =/= AttLen ->
- % Stored encoded, but client doesn't accept the encoding we used,
- % so we need to decode on the fly. DiskLen is the identity length
- % of the attachment.
- DiskLen;
- {_, true} ->
- % Stored and served encoded. AttLen is the encoded length.
- AttLen;
- _ ->
- % We received an encoded attachment and stored it as such, so we
- % don't know the identity length. The client doesn't accept the
- % encoding, and since we cannot serve a correct Content-Length
- % header we'll fall back to a chunked response.
- undefined
- end,
- Headers = [
- {"ETag", Etag},
- {"Cache-Control", "must-revalidate"},
- {"Content-Type", binary_to_list(Type)}
- ] ++ case ReqAcceptsAttEnc of
- true when Enc =/= identity ->
- % RFC 2616 says that the 'identify' encoding should not be used in
- % the Content-Encoding header
- [{"Content-Encoding", atom_to_list(Enc)}];
- _ ->
- []
- end ++ case Enc of
- identity ->
- [{"Accept-Ranges", "bytes"}];
- _ ->
- [{"Accept-Ranges", "none"}]
- end,
- AttFun = case ReqAcceptsAttEnc of
- false ->
- fun couch_att:foldl_decode/3;
- true ->
- fun couch_att:foldl/3
- end,
- couch_httpd:etag_respond(
- Req,
- Etag,
- fun() ->
- case Len of
- undefined ->
- {ok, Resp} = start_chunked_response(Req, 200, Headers),
- AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
- last_chunk(Resp);
- _ ->
- Ranges = parse_ranges(MochiReq:get(range), Len),
- case {Enc, Ranges} of
- {identity, [{From, To}]} ->
- Headers1 = [{"Content-Range", make_content_range(From, To, Len)}]
- ++ Headers,
- {ok, Resp} = start_response_length(Req, 206, Headers1, To - From + 1),
- couch_att:range_foldl(Att, From, To + 1,
- fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp});
- {identity, Ranges} when is_list(Ranges) andalso length(Ranges) < 10 ->
- send_ranges_multipart(Req, Type, Len, Att, Ranges);
+ [] ->
+ throw({not_found, "Document is missing attachment"});
+ [Att] ->
+ [Type, Enc, DiskLen, AttLen, Md5] = couch_att:fetch(
+ [type, encoding, disk_len, att_len, md5], Att
+ ),
+ Etag =
+ case Md5 of
+ <<>> -> couch_httpd:doc_etag(Doc);
+ _ -> "\"" ++ ?b2l(base64:encode(Md5)) ++ "\""
+ end,
+ ReqAcceptsAttEnc = lists:member(
+ atom_to_list(Enc),
+ couch_httpd:accepted_encodings(Req)
+ ),
+ Len =
+ case {Enc, ReqAcceptsAttEnc} of
+ {identity, _} ->
+ % stored and served in identity form
+ DiskLen;
+ {_, false} when DiskLen =/= AttLen ->
+ % Stored encoded, but client doesn't accept the encoding we used,
+ % so we need to decode on the fly. DiskLen is the identity length
+ % of the attachment.
+ DiskLen;
+ {_, true} ->
+ % Stored and served encoded. AttLen is the encoded length.
+ AttLen;
+ _ ->
+ % We received an encoded attachment and stored it as such, so we
+ % don't know the identity length. The client doesn't accept the
+ % encoding, and since we cannot serve a correct Content-Length
+ % header we'll fall back to a chunked response.
+ undefined
+ end,
+ Headers =
+ [
+ {"ETag", Etag},
+ {"Cache-Control", "must-revalidate"},
+ {"Content-Type", binary_to_list(Type)}
+ ] ++
+ case ReqAcceptsAttEnc of
+ true when Enc =/= identity ->
+ % RFC 2616 says that the 'identify' encoding should not be used in
+ % the Content-Encoding header
+ [{"Content-Encoding", atom_to_list(Enc)}];
_ ->
- Headers1 = Headers ++
- if Enc =:= identity orelse ReqAcceptsAttEnc =:= true ->
- [{"Content-MD5", base64:encode(Md5)}];
- true ->
- []
- end,
- {ok, Resp} = start_response_length(Req, 200, Headers1, Len),
- AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
+ []
+ end ++
+ case Enc of
+ identity ->
+ [{"Accept-Ranges", "bytes"}];
+ _ ->
+ [{"Accept-Ranges", "none"}]
+ end,
+ AttFun =
+ case ReqAcceptsAttEnc of
+ false ->
+ fun couch_att:foldl_decode/3;
+ true ->
+ fun couch_att:foldl/3
+ end,
+ couch_httpd:etag_respond(
+ Req,
+ Etag,
+ fun() ->
+ case Len of
+ undefined ->
+ {ok, Resp} = start_chunked_response(Req, 200, Headers),
+ AttFun(Att, fun(Seg, _) -> send_chunk(Resp, Seg) end, {ok, Resp}),
+ last_chunk(Resp);
+ _ ->
+ Ranges = parse_ranges(MochiReq:get(range), Len),
+ case {Enc, Ranges} of
+ {identity, [{From, To}]} ->
+ Headers1 =
+ [{"Content-Range", make_content_range(From, To, Len)}] ++
+ Headers,
+ {ok, Resp} = start_response_length(
+ Req, 206, Headers1, To - From + 1
+ ),
+ couch_att:range_foldl(
+ Att,
+ From,
+ To + 1,
+ fun(Seg, _) -> send(Resp, Seg) end,
+ {ok, Resp}
+ );
+ {identity, Ranges} when
+ is_list(Ranges) andalso length(Ranges) < 10
+ ->
+ send_ranges_multipart(Req, Type, Len, Att, Ranges);
+ _ ->
+ Headers1 =
+ Headers ++
+ if
+ Enc =:= identity orelse ReqAcceptsAttEnc =:= true ->
+ [{"Content-MD5", base64:encode(Md5)}];
+ true ->
+ []
+ end,
+ {ok, Resp} = start_response_length(Req, 200, Headers1, Len),
+ AttFun(Att, fun(Seg, _) -> send(Resp, Seg) end, {ok, Resp})
+ end
end
end
- end
- )
+ )
end;
-
-
-db_attachment_req(#httpd{method=Method,mochi_req=MochiReq}=Req, Db, DocId, FileNameParts)
- when (Method == 'PUT') or (Method == 'DELETE') ->
+db_attachment_req(
+ #httpd{method = Method, mochi_req = MochiReq} = Req, Db, DocId, FileNameParts
+) when
+ (Method == 'PUT') or (Method == 'DELETE')
+->
FileName = validate_attachment_name(
- mochiweb_util:join(
- lists:map(fun binary_to_list/1,
- FileNameParts),"/")),
- NewAtt = case Method of
- 'DELETE' ->
- [];
- _ ->
- MimeType = case couch_httpd:header_value(Req,"Content-Type") of
- % We could throw an error here or guess by the FileName.
- % Currently, just giving it a default.
- undefined -> <<"application/octet-stream">>;
- CType -> list_to_binary(CType)
- end,
- Data = case couch_httpd:body_length(Req) of
- undefined ->
- <<"">>;
- {unknown_transfer_encoding, Unknown} ->
- exit({unknown_transfer_encoding, Unknown});
- chunked ->
- fun(MaxChunkSize, ChunkFun, InitState) ->
- couch_httpd:recv_chunked(
- Req, MaxChunkSize, ChunkFun, InitState
- )
- end;
- 0 ->
- <<"">>;
- Length when is_integer(Length) ->
- Expect = case couch_httpd:header_value(Req, "expect") of
+ mochiweb_util:join(
+ lists:map(
+ fun binary_to_list/1,
+ FileNameParts
+ ),
+ "/"
+ )
+ ),
+ NewAtt =
+ case Method of
+ 'DELETE' ->
+ [];
+ _ ->
+ MimeType =
+ case couch_httpd:header_value(Req, "Content-Type") of
+ % We could throw an error here or guess by the FileName.
+ % Currently, just giving it a default.
+ undefined -> <<"application/octet-stream">>;
+ CType -> list_to_binary(CType)
+ end,
+ Data =
+ case couch_httpd:body_length(Req) of
undefined ->
- undefined;
- Value when is_list(Value) ->
- string:to_lower(Value)
+ <<"">>;
+ {unknown_transfer_encoding, Unknown} ->
+ exit({unknown_transfer_encoding, Unknown});
+ chunked ->
+ fun(MaxChunkSize, ChunkFun, InitState) ->
+ couch_httpd:recv_chunked(
+ Req, MaxChunkSize, ChunkFun, InitState
+ )
+ end;
+ 0 ->
+ <<"">>;
+ Length when is_integer(Length) ->
+ Expect =
+ case couch_httpd:header_value(Req, "expect") of
+ undefined ->
+ undefined;
+ Value when is_list(Value) ->
+ string:to_lower(Value)
+ end,
+ case Expect of
+ "100-continue" ->
+ MochiReq:start_raw_response({100, gb_trees:empty()});
+ _Else ->
+ ok
+ end,
+ fun() -> couch_httpd:recv(Req, 0) end;
+ Length ->
+ exit({length_not_integer, Length})
end,
- case Expect of
- "100-continue" ->
- MochiReq:start_raw_response({100, gb_trees:empty()});
- _Else ->
- ok
+ AttLen =
+ case couch_httpd:header_value(Req, "Content-Length") of
+ undefined -> undefined;
+ Len -> list_to_integer(Len)
end,
- fun() -> couch_httpd:recv(Req, 0) end;
- Length ->
- exit({length_not_integer, Length})
- end,
- AttLen = case couch_httpd:header_value(Req,"Content-Length") of
- undefined -> undefined;
- Len -> list_to_integer(Len)
- end,
- ContentEnc = string:to_lower(string:strip(
- couch_httpd:header_value(Req,"Content-Encoding","identity")
- )),
- Encoding = case ContentEnc of
- "identity" ->
- identity;
- "gzip" ->
- gzip;
- _ ->
- throw({
- bad_ctype,
- "Only gzip and identity content-encodings are supported"
- })
- end,
- [couch_att:new([
- {name, FileName},
- {type, MimeType},
- {data, Data},
- {att_len, AttLen},
- {md5, get_md5_header(Req)},
- {encoding, Encoding}
- ])]
- end,
+ ContentEnc = string:to_lower(
+ string:strip(
+ couch_httpd:header_value(Req, "Content-Encoding", "identity")
+ )
+ ),
+ Encoding =
+ case ContentEnc of
+ "identity" ->
+ identity;
+ "gzip" ->
+ gzip;
+ _ ->
+ throw({
+ bad_ctype,
+ "Only gzip and identity content-encodings are supported"
+ })
+ end,
+ [
+ couch_att:new([
+ {name, FileName},
+ {type, MimeType},
+ {data, Data},
+ {att_len, AttLen},
+ {md5, get_md5_header(Req)},
+ {encoding, Encoding}
+ ])
+ ]
+ end,
- Doc = case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
- missing_rev -> % make the new doc
- if Method =/= 'DELETE' -> ok; true ->
- % check for the existence of the doc to handle the 404 case.
- couch_doc_open(Db, DocId, nil, [])
- end,
- couch_db:validate_docid(Db, DocId),
- #doc{id=DocId};
- Rev ->
- case couch_db:open_doc_revs(Db, DocId, [Rev], []) of
- {ok, [{ok, Doc0}]} -> Doc0;
- {ok, [{{not_found, missing}, Rev}]} -> throw(conflict);
- {ok, [Error]} -> throw(Error)
- end
- end,
+ Doc =
+ case extract_header_rev(Req, couch_httpd:qs_value(Req, "rev")) of
+ % make the new doc
+ missing_rev ->
+ if
+ Method =/= 'DELETE' ->
+ ok;
+ true ->
+ % check for the existence of the doc to handle the 404 case.
+ couch_doc_open(Db, DocId, nil, [])
+ end,
+ couch_db:validate_docid(Db, DocId),
+ #doc{id = DocId};
+ Rev ->
+ case couch_db:open_doc_revs(Db, DocId, [Rev], []) of
+ {ok, [{ok, Doc0}]} -> Doc0;
+ {ok, [{{not_found, missing}, Rev}]} -> throw(conflict);
+ {ok, [Error]} -> throw(Error)
+ end
+ end,
- #doc{atts=Atts} = Doc,
+ #doc{atts = Atts} = Doc,
DocEdited = Doc#doc{
atts = NewAtt ++ [A || A <- Atts, couch_att:fetch(name, A) /= FileName]
},
- Headers = case Method of
- 'DELETE' ->
- [];
- _ ->
- [{"Location", absolute_uri(Req, "/" ++
- couch_util:url_encode(couch_db:name(Db)) ++ "/" ++
- couch_util:url_encode(DocId) ++ "/" ++
- couch_util:url_encode(FileName)
- )}]
- end,
+ Headers =
+ case Method of
+ 'DELETE' ->
+ [];
+ _ ->
+ [
+ {"Location",
+ absolute_uri(
+ Req,
+ "/" ++
+ couch_util:url_encode(couch_db:name(Db)) ++ "/" ++
+ couch_util:url_encode(DocId) ++ "/" ++
+ couch_util:url_encode(FileName)
+ )}
+ ]
+ end,
update_doc(Req, Db, DocId, DocEdited, Headers);
-
db_attachment_req(Req, _Db, _DocId, _FileNameParts) ->
send_method_not_allowed(Req, "DELETE,GET,HEAD,PUT").
@@ -1065,17 +1231,19 @@ parse_ranges(Ranges, Len) ->
parse_ranges([], _Len, Acc) ->
lists:reverse(Acc);
-parse_ranges([{0, none}|_], _Len, _Acc) ->
+parse_ranges([{0, none} | _], _Len, _Acc) ->
undefined;
-parse_ranges([{From, To}|_], _Len, _Acc) when is_integer(From) andalso is_integer(To) andalso To < From ->
+parse_ranges([{From, To} | _], _Len, _Acc) when
+ is_integer(From) andalso is_integer(To) andalso To < From
+->
throw(requested_range_not_satisfiable);
-parse_ranges([{From, To}|Rest], Len, Acc) when is_integer(To) andalso To >= Len ->
- parse_ranges([{From, Len-1}] ++ Rest, Len, Acc);
-parse_ranges([{none, To}|Rest], Len, Acc) ->
+parse_ranges([{From, To} | Rest], Len, Acc) when is_integer(To) andalso To >= Len ->
+ parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
+parse_ranges([{none, To} | Rest], Len, Acc) ->
parse_ranges([{Len - To, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From, none}|Rest], Len, Acc) ->
+parse_ranges([{From, none} | Rest], Len, Acc) ->
parse_ranges([{From, Len - 1}] ++ Rest, Len, Acc);
-parse_ranges([{From,To}|Rest], Len, Acc) ->
+parse_ranges([{From, To} | Rest], Len, Acc) ->
parse_ranges(Rest, Len, [{From, To}] ++ Acc).
get_md5_header(Req) ->
@@ -1099,99 +1267,111 @@ get_md5_header(Req) ->
end.
parse_doc_query(Req) ->
- lists:foldl(fun({Key,Value}, Args) ->
- case {Key, Value} of
- {"attachments", "true"} ->
- Options = [attachments | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"meta", "true"} ->
- Options = [revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"revs", "true"} ->
- Options = [revs | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"local_seq", "true"} ->
- Options = [local_seq | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"revs_info", "true"} ->
- Options = [revs_info | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"conflicts", "true"} ->
- Options = [conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"deleted_conflicts", "true"} ->
- Options = [deleted_conflicts | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"rev", Rev} ->
- Args#doc_query_args{rev=couch_doc:parse_rev(Rev)};
- {"open_revs", "all"} ->
- Args#doc_query_args{open_revs=all};
- {"open_revs", RevsJsonStr} ->
- JsonArray = ?JSON_DECODE(RevsJsonStr),
- Args#doc_query_args{open_revs=couch_doc:parse_revs(JsonArray)};
- {"latest", "true"} ->
- Options = [latest | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- {"atts_since", RevsJsonStr} ->
- JsonArray = ?JSON_DECODE(RevsJsonStr),
- Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)};
- {"new_edits", "false"} ->
- Args#doc_query_args{update_type=replicated_changes};
- {"new_edits", "true"} ->
- Args#doc_query_args{update_type=interactive_edit};
- {"att_encoding_info", "true"} ->
- Options = [att_encoding_info | Args#doc_query_args.options],
- Args#doc_query_args{options=Options};
- _Else -> % unknown key value pair, ignore.
- Args
- end
- end, #doc_query_args{}, couch_httpd:qs(Req)).
+ lists:foldl(
+ fun({Key, Value}, Args) ->
+ case {Key, Value} of
+ {"attachments", "true"} ->
+ Options = [attachments | Args#doc_query_args.options],
+ Args#doc_query_args{options = Options};
+ {"meta", "true"} ->
+ Options = [
+ revs_info, conflicts, deleted_conflicts | Args#doc_query_args.options
+ ],
+ Args#doc_query_args{options = Options};
+ {"revs", "true"} ->
+ Options = [revs | Args#doc_query_args.options],
+ Args#doc_query_args{options = Options};
+ {"local_seq", "true"} ->
+ Options = [local_seq | Args#doc_query_args.options],
+ Args#doc_query_args{options = Options};
+ {"revs_info", "true"} ->
+ Options = [revs_info | Args#doc_query_args.options],
+ Args#doc_query_args{options = Options};
+ {"conflicts", "true"} ->
+ Options = [conflicts | Args#doc_query_args.options],
+ Args#doc_query_args{options = Options};
+ {"deleted_conflicts", "true"} ->
+ Options = [deleted_conflicts | Args#doc_query_args.options],
+ Args#doc_query_args{options = Options};
+ {"rev", Rev} ->
+ Args#doc_query_args{rev = couch_doc:parse_rev(Rev)};
+ {"open_revs", "all"} ->
+ Args#doc_query_args{open_revs = all};
+ {"open_revs", RevsJsonStr} ->
+ JsonArray = ?JSON_DECODE(RevsJsonStr),
+ Args#doc_query_args{open_revs = couch_doc:parse_revs(JsonArray)};
+ {"latest", "true"} ->
+ Options = [latest | Args#doc_query_args.options],
+ Args#doc_query_args{options = Options};
+ {"atts_since", RevsJsonStr} ->
+ JsonArray = ?JSON_DECODE(RevsJsonStr),
+ Args#doc_query_args{atts_since = couch_doc:parse_revs(JsonArray)};
+ {"new_edits", "false"} ->
+ Args#doc_query_args{update_type = replicated_changes};
+ {"new_edits", "true"} ->
+ Args#doc_query_args{update_type = interactive_edit};
+ {"att_encoding_info", "true"} ->
+ Options = [att_encoding_info | Args#doc_query_args.options],
+ Args#doc_query_args{options = Options};
+ % unknown key value pair, ignore.
+ _Else ->
+ Args
+ end
+ end,
+ #doc_query_args{},
+ couch_httpd:qs(Req)
+ ).
parse_changes_query(Req, Db) ->
- ChangesArgs = lists:foldl(fun({Key, Value}, Args) ->
- case {string:to_lower(Key), Value} of
- {"feed", "live"} ->
- %% sugar for continuous
- Args#changes_args{feed="continuous"};
- {"feed", _} ->
- Args#changes_args{feed=Value};
- {"descending", "true"} ->
- Args#changes_args{dir=rev};
- {"since", "now"} ->
- UpdateSeq = couch_util:with_db(couch_db:name(Db), fun(WDb) ->
- couch_db:get_update_seq(WDb)
- end),
- Args#changes_args{since=UpdateSeq};
- {"since", _} ->
- Args#changes_args{since=list_to_integer(Value)};
- {"last-event-id", _} ->
- Args#changes_args{since=list_to_integer(Value)};
- {"limit", _} ->
- Args#changes_args{limit=list_to_integer(Value)};
- {"style", _} ->
- Args#changes_args{style=list_to_existing_atom(Value)};
- {"heartbeat", "true"} ->
- Args#changes_args{heartbeat=true};
- {"heartbeat", _} ->
- Args#changes_args{heartbeat=list_to_integer(Value)};
- {"timeout", _} ->
- Args#changes_args{timeout=list_to_integer(Value)};
- {"include_docs", "true"} ->
- Args#changes_args{include_docs=true};
- {"attachments", "true"} ->
- Opts = Args#changes_args.doc_options,
- Args#changes_args{doc_options=[attachments|Opts]};
- {"att_encoding_info", "true"} ->
- Opts = Args#changes_args.doc_options,
- Args#changes_args{doc_options=[att_encoding_info|Opts]};
- {"conflicts", "true"} ->
- Args#changes_args{conflicts=true};
- {"filter", _} ->
- Args#changes_args{filter=Value};
- _Else -> % unknown key value pair, ignore.
- Args
- end
- end, #changes_args{}, couch_httpd:qs(Req)),
+ ChangesArgs = lists:foldl(
+ fun({Key, Value}, Args) ->
+ case {string:to_lower(Key), Value} of
+ {"feed", "live"} ->
+ %% sugar for continuous
+ Args#changes_args{feed = "continuous"};
+ {"feed", _} ->
+ Args#changes_args{feed = Value};
+ {"descending", "true"} ->
+ Args#changes_args{dir = rev};
+ {"since", "now"} ->
+ UpdateSeq = couch_util:with_db(couch_db:name(Db), fun(WDb) ->
+ couch_db:get_update_seq(WDb)
+ end),
+ Args#changes_args{since = UpdateSeq};
+ {"since", _} ->
+ Args#changes_args{since = list_to_integer(Value)};
+ {"last-event-id", _} ->
+ Args#changes_args{since = list_to_integer(Value)};
+ {"limit", _} ->
+ Args#changes_args{limit = list_to_integer(Value)};
+ {"style", _} ->
+ Args#changes_args{style = list_to_existing_atom(Value)};
+ {"heartbeat", "true"} ->
+ Args#changes_args{heartbeat = true};
+ {"heartbeat", _} ->
+ Args#changes_args{heartbeat = list_to_integer(Value)};
+ {"timeout", _} ->
+ Args#changes_args{timeout = list_to_integer(Value)};
+ {"include_docs", "true"} ->
+ Args#changes_args{include_docs = true};
+ {"attachments", "true"} ->
+ Opts = Args#changes_args.doc_options,
+ Args#changes_args{doc_options = [attachments | Opts]};
+ {"att_encoding_info", "true"} ->
+ Opts = Args#changes_args.doc_options,
+ Args#changes_args{doc_options = [att_encoding_info | Opts]};
+ {"conflicts", "true"} ->
+ Args#changes_args{conflicts = true};
+ {"filter", _} ->
+ Args#changes_args{filter = Value};
+ % unknown key value pair, ignore.
+ _Else ->
+ Args
+ end
+ end,
+ #changes_args{},
+ couch_httpd:qs(Req)
+ ),
%% if it's an EventSource request with a Last-event-ID header
%% that should override the `since` query string, since it's
%% probably the browser reconnecting.
@@ -1201,60 +1381,62 @@ parse_changes_query(Req, Db) ->
undefined ->
ChangesArgs;
Value ->
- ChangesArgs#changes_args{since=list_to_integer(Value)}
+ ChangesArgs#changes_args{since = list_to_integer(Value)}
end;
_ ->
ChangesArgs
end.
-extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev)->
+extract_header_rev(Req, ExplicitRev) when is_binary(ExplicitRev) or is_list(ExplicitRev) ->
extract_header_rev(Req, couch_doc:parse_rev(ExplicitRev));
extract_header_rev(Req, ExplicitRev) ->
- Etag = case couch_httpd:header_value(Req, "If-Match") of
- undefined -> undefined;
- Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
- end,
+ Etag =
+ case couch_httpd:header_value(Req, "If-Match") of
+ undefined -> undefined;
+ Value -> couch_doc:parse_rev(string:strip(Value, both, $"))
+ end,
case {ExplicitRev, Etag} of
- {undefined, undefined} -> missing_rev;
- {_, undefined} -> ExplicitRev;
- {undefined, _} -> Etag;
- _ when ExplicitRev == Etag -> Etag;
- _ ->
- throw({bad_request, "Document rev and etag have different values"})
+ {undefined, undefined} -> missing_rev;
+ {_, undefined} -> ExplicitRev;
+ {undefined, _} -> Etag;
+ _ when ExplicitRev == Etag -> Etag;
+ _ -> throw({bad_request, "Document rev and etag have different values"})
end.
-
parse_copy_destination_header(Req) ->
case couch_httpd:header_value(Req, "Destination") of
- undefined ->
- throw({bad_request, "Destination header is mandatory for COPY."});
- Destination ->
- case re:run(Destination, "^https?://", [{capture, none}]) of
- match ->
- throw({bad_request, "Destination URL must be relative."});
- nomatch ->
- % see if ?rev=revid got appended to the Destination header
- case re:run(Destination, "\\?", [{capture, none}]) of
- nomatch ->
- {list_to_binary(Destination), {0, []}};
- match ->
- [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
- [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
- {Pos, RevId} = couch_doc:parse_rev(Rev),
- {list_to_binary(DocId), {Pos, [RevId]}}
+ undefined ->
+ throw({bad_request, "Destination header is mandatory for COPY."});
+ Destination ->
+ case re:run(Destination, "^https?://", [{capture, none}]) of
+ match ->
+ throw({bad_request, "Destination URL must be relative."});
+ nomatch ->
+ % see if ?rev=revid got appended to the Destination header
+ case re:run(Destination, "\\?", [{capture, none}]) of
+ nomatch ->
+ {list_to_binary(Destination), {0, []}};
+ match ->
+ [DocId, RevQs] = re:split(Destination, "\\?", [{return, list}]),
+ [_RevQueryKey, Rev] = re:split(RevQs, "=", [{return, list}]),
+ {Pos, RevId} = couch_doc:parse_rev(Rev),
+ {list_to_binary(DocId), {Pos, [RevId]}}
+ end
end
- end
end.
validate_attachment_names(Doc) ->
- lists:foreach(fun(Att) ->
- Name = couch_att:fetch(name, Att),
- validate_attachment_name(Name)
- end, Doc#doc.atts).
+ lists:foreach(
+ fun(Att) ->
+ Name = couch_att:fetch(name, Att),
+ validate_attachment_name(Name)
+ end,
+ Doc#doc.atts
+ ).
validate_attachment_name(Name) when is_list(Name) ->
validate_attachment_name(list_to_binary(Name));
-validate_attachment_name(<<"_",_/binary>>) ->
+validate_attachment_name(<<"_", _/binary>>) ->
throw({bad_request, <<"Attachment name can't start with '_'">>});
validate_attachment_name(Name) ->
case couch_util:validate_utf8(Name) of
diff --git a/src/couch/src/couch_httpd_misc_handlers.erl b/src/couch/src/couch_httpd_misc_handlers.erl
index ea9c1cb84..d9c591875 100644
--- a/src/couch/src/couch_httpd_misc_handlers.erl
+++ b/src/couch/src/couch_httpd_misc_handlers.erl
@@ -12,87 +12,104 @@
-module(couch_httpd_misc_handlers).
--export([handle_welcome_req/2,handle_favicon_req/2,handle_utils_dir_req/2,
+-export([
+ handle_welcome_req/2,
+ handle_favicon_req/2,
+ handle_utils_dir_req/2,
handle_all_dbs_req/1,
- handle_uuids_req/1,handle_config_req/1,
- handle_task_status_req/1, handle_file_req/2]).
-
+ handle_uuids_req/1,
+ handle_config_req/1,
+ handle_task_status_req/1,
+ handle_file_req/2
+]).
-include_lib("couch/include/couch_db.hrl").
--import(couch_httpd,
- [send_json/2,send_json/3,send_json/4,send_method_not_allowed/2,
- start_json_response/2,send_chunk/2,last_chunk/1,end_json_response/1,
- start_chunked_response/3, send_error/4]).
+-import(
+ couch_httpd,
+ [
+ send_json/2, send_json/3, send_json/4,
+ send_method_not_allowed/2,
+ start_json_response/2,
+ send_chunk/2,
+ last_chunk/1,
+ end_json_response/1,
+ start_chunked_response/3,
+ send_error/4
+ ]
+).
% httpd global handlers
-handle_welcome_req(#httpd{method='GET'}=Req, WelcomeMessage) ->
- send_json(Req, {[
- {couchdb, WelcomeMessage},
- {uuid, couch_server:get_uuid()},
- {version, list_to_binary(couch_server:get_version())}
- ] ++ case config:get("vendor") of
- [] ->
- [];
- Properties ->
- [{vendor, {[{?l2b(K), ?l2b(V)} || {K, V} <- Properties]}}]
- end
+handle_welcome_req(#httpd{method = 'GET'} = Req, WelcomeMessage) ->
+ send_json(Req, {
+ [
+ {couchdb, WelcomeMessage},
+ {uuid, couch_server:get_uuid()},
+ {version, list_to_binary(couch_server:get_version())}
+ ] ++
+ case config:get("vendor") of
+ [] ->
+ [];
+ Properties ->
+ [{vendor, {[{?l2b(K), ?l2b(V)} || {K, V} <- Properties]}}]
+ end
});
handle_welcome_req(Req, _) ->
send_method_not_allowed(Req, "GET,HEAD").
-handle_favicon_req(#httpd{method='GET'}=Req, DocumentRoot) ->
- {{Year,Month,Day},Time} = erlang:universaltime(),
- OneYearFromNow = {{Year+1,Month,Day},Time},
+handle_favicon_req(#httpd{method = 'GET'} = Req, DocumentRoot) ->
+ {{Year, Month, Day}, Time} = erlang:universaltime(),
+ OneYearFromNow = {{Year + 1, Month, Day}, Time},
CachingHeaders = [
%favicon should expire a year from now
{"Cache-Control", "public, max-age=31536000"},
{"Expires", couch_util:rfc1123_date(OneYearFromNow)}
],
couch_httpd:serve_file(Req, "favicon.ico", DocumentRoot, CachingHeaders);
-
handle_favicon_req(Req, _) ->
send_method_not_allowed(Req, "GET,HEAD").
-handle_file_req(#httpd{method='GET'}=Req, Document) ->
+handle_file_req(#httpd{method = 'GET'} = Req, Document) ->
couch_httpd:serve_file(Req, filename:basename(Document), filename:dirname(Document));
-
handle_file_req(Req, _) ->
send_method_not_allowed(Req, "GET,HEAD").
handle_utils_dir_req(Req, _) ->
- send_error(Req, 410, <<"no_node_local_fauxton">>,
- ?l2b("The web interface is no longer available on the node-local port.")).
-
+ send_error(
+ Req,
+ 410,
+ <<"no_node_local_fauxton">>,
+ ?l2b("The web interface is no longer available on the node-local port.")
+ ).
-handle_all_dbs_req(#httpd{method='GET'}=Req) ->
+handle_all_dbs_req(#httpd{method = 'GET'} = Req) ->
{ok, DbNames} = couch_server:all_databases(),
send_json(Req, DbNames);
handle_all_dbs_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD").
-
-handle_task_status_req(#httpd{method='GET'}=Req) ->
+handle_task_status_req(#httpd{method = 'GET'} = Req) ->
ok = couch_httpd:verify_is_server_admin(Req),
% convert the list of prop lists to a list of json objects
send_json(Req, [{Props} || Props <- couch_task_status:all()]);
handle_task_status_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD").
-
-handle_uuids_req(#httpd{method='GET'}=Req) ->
- Max = config:get_integer("uuids","max_count", 1000),
- Count = try list_to_integer(couch_httpd:qs_value(Req, "count", "1")) of
- N when N > Max ->
- throw({bad_request, <<"count parameter too large">>});
- N when N < 0 ->
- throw({bad_request, <<"count must be a positive integer">>});
- N -> N
- catch
- error:badarg ->
- throw({bad_request, <<"count must be a positive integer">>})
- end,
+handle_uuids_req(#httpd{method = 'GET'} = Req) ->
+ Max = config:get_integer("uuids", "max_count", 1000),
+ Count =
+ try list_to_integer(couch_httpd:qs_value(Req, "count", "1")) of
+ N when N > Max ->
+ throw({bad_request, <<"count parameter too large">>});
+ N when N < 0 ->
+ throw({bad_request, <<"count must be a positive integer">>});
+ N ->
+ N
+ catch
+ error:badarg ->
+ throw({bad_request, <<"count must be a positive integer">>})
+ end,
UUIDs = [couch_uuids:new() || _ <- lists:seq(1, Count)],
Etag = couch_httpd:make_etag(UUIDs),
couch_httpd:etag_respond(Req, Etag, fun() ->
@@ -109,51 +126,60 @@ handle_uuids_req(#httpd{method='GET'}=Req) ->
handle_uuids_req(Req) ->
send_method_not_allowed(Req, "GET").
-
% Config request handler
-
% GET /_config/
% GET /_config
-handle_config_req(#httpd{method='GET', path_parts=[_]}=Req) ->
+handle_config_req(#httpd{method = 'GET', path_parts = [_]} = Req) ->
ok = couch_httpd:verify_is_server_admin(Req),
- Grouped = lists:foldl(fun({{Section, Key}, Value}, Acc) ->
- case dict:is_key(Section, Acc) of
- true ->
- dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
- false ->
- dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
- end
- end, dict:new(), config:all()),
- KVs = dict:fold(fun(Section, Values, Acc) ->
- [{list_to_binary(Section), {Values}} | Acc]
- end, [], Grouped),
+ Grouped = lists:foldl(
+ fun({{Section, Key}, Value}, Acc) ->
+ case dict:is_key(Section, Acc) of
+ true ->
+ dict:append(Section, {list_to_binary(Key), list_to_binary(Value)}, Acc);
+ false ->
+ dict:store(Section, [{list_to_binary(Key), list_to_binary(Value)}], Acc)
+ end
+ end,
+ dict:new(),
+ config:all()
+ ),
+ KVs = dict:fold(
+ fun(Section, Values, Acc) ->
+ [{list_to_binary(Section), {Values}} | Acc]
+ end,
+ [],
+ Grouped
+ ),
send_json(Req, 200, {KVs});
% GET /_config/Section
-handle_config_req(#httpd{method='GET', path_parts=[_,Section]}=Req) ->
+handle_config_req(#httpd{method = 'GET', path_parts = [_, Section]} = Req) ->
ok = couch_httpd:verify_is_server_admin(Req),
- KVs = [{list_to_binary(Key), list_to_binary(Value)}
- || {Key, Value} <- config:get(Section)],
+ KVs = [
+ {list_to_binary(Key), list_to_binary(Value)}
+ || {Key, Value} <- config:get(Section)
+ ],
send_json(Req, 200, {KVs});
% GET /_config/Section/Key
-handle_config_req(#httpd{method='GET', path_parts=[_, Section, Key]}=Req) ->
+handle_config_req(#httpd{method = 'GET', path_parts = [_, Section, Key]} = Req) ->
ok = couch_httpd:verify_is_server_admin(Req),
case config:get(Section, Key, undefined) of
- undefined ->
- throw({not_found, unknown_config_value});
- Value ->
- send_json(Req, 200, list_to_binary(Value))
+ undefined ->
+ throw({not_found, unknown_config_value});
+ Value ->
+ send_json(Req, 200, list_to_binary(Value))
end;
% POST /_config/_reload - Flushes unpersisted config values from RAM
-handle_config_req(#httpd{method='POST', path_parts=[_, <<"_reload">>]}=Req) ->
+handle_config_req(#httpd{method = 'POST', path_parts = [_, <<"_reload">>]} = Req) ->
couch_httpd:validate_ctype(Req, "application/json"),
_ = couch_httpd:body(Req),
ok = couch_httpd:verify_is_server_admin(Req),
ok = config:reload(),
send_json(Req, 200, {[{ok, true}]});
% PUT or DELETE /_config/Section/Key
-handle_config_req(#httpd{method=Method, path_parts=[_, Section, Key]}=Req)
- when (Method == 'PUT') or (Method == 'DELETE') ->
+handle_config_req(#httpd{method = Method, path_parts = [_, Section, Key]} = Req) when
+ (Method == 'PUT') or (Method == 'DELETE')
+->
ok = couch_httpd:verify_is_server_admin(Req),
couch_util:check_config_blacklist(Section),
Persist = couch_httpd:header_value(Req, "X-Couch-Persist") /= "false",
@@ -169,19 +195,25 @@ handle_config_req(#httpd{method=Method, path_parts=[_, Section, Key]}=Req)
% variable itself.
FallbackWhitelist = [{<<"chttpd">>, <<"config_whitelist">>}],
- Whitelist = case couch_util:parse_term(WhitelistValue) of
- {ok, Value} when is_list(Value) ->
- Value;
- {ok, _NonListValue} ->
- FallbackWhitelist;
- {error, _} ->
- [{WhitelistSection, WhitelistKey}] = FallbackWhitelist,
- couch_log:error("Only whitelisting ~s/~s due to error"
- " parsing: ~p",
- [WhitelistSection, WhitelistKey,
- WhitelistValue]),
- FallbackWhitelist
- end,
+ Whitelist =
+ case couch_util:parse_term(WhitelistValue) of
+ {ok, Value} when is_list(Value) ->
+ Value;
+ {ok, _NonListValue} ->
+ FallbackWhitelist;
+ {error, _} ->
+ [{WhitelistSection, WhitelistKey}] = FallbackWhitelist,
+ couch_log:error(
+ "Only whitelisting ~s/~s due to error"
+ " parsing: ~p",
+ [
+ WhitelistSection,
+ WhitelistKey,
+ WhitelistValue
+ ]
+ ),
+ FallbackWhitelist
+ end,
IsRequestedKeyVal = fun(Element) ->
case Element of
@@ -207,8 +239,12 @@ handle_config_req(#httpd{method=Method, path_parts=[_, Section, Key]}=Req)
handle_approved_config_req(Req, Persist);
_NotWhitelisted ->
% Disallow modifying this non-whitelisted variable.
- send_error(Req, 400, <<"modification_not_allowed">>,
- ?l2b("This config variable is read-only"))
+ send_error(
+ Req,
+ 400,
+ <<"modification_not_allowed">>,
+ ?l2b("This config variable is read-only")
+ )
end
end;
handle_config_req(Req) ->
@@ -218,52 +254,60 @@ handle_config_req(Req) ->
% "value"
handle_approved_config_req(Req, Persist) ->
Query = couch_httpd:qs(Req),
- UseRawValue = case lists:keyfind("raw", 1, Query) of
- false -> false; % Not specified
- {"raw", ""} -> false; % Specified with no value, i.e. "?raw" and "?raw="
- {"raw", "false"} -> false;
- {"raw", "true"} -> true;
- {"raw", InvalidValue} -> InvalidValue
- end,
+ UseRawValue =
+ case lists:keyfind("raw", 1, Query) of
+ % Not specified
+ false -> false;
+ % Specified with no value, i.e. "?raw" and "?raw="
+ {"raw", ""} -> false;
+ {"raw", "false"} -> false;
+ {"raw", "true"} -> true;
+ {"raw", InvalidValue} -> InvalidValue
+ end,
handle_approved_config_req(Req, Persist, UseRawValue).
-handle_approved_config_req(#httpd{method='PUT', path_parts=[_, Section, Key]}=Req,
- Persist, UseRawValue)
- when UseRawValue =:= false orelse UseRawValue =:= true ->
+handle_approved_config_req(
+ #httpd{method = 'PUT', path_parts = [_, Section, Key]} = Req,
+ Persist,
+ UseRawValue
+) when
+ UseRawValue =:= false orelse UseRawValue =:= true
+->
RawValue = couch_httpd:json_body(Req),
- Value = case UseRawValue of
- true ->
- % Client requests no change to the provided value.
- RawValue;
- false ->
- % Pre-process the value as necessary.
- case Section of
- <<"admins">> ->
- couch_passwords:hash_admin_password(RawValue);
- _ ->
- couch_util:trim(RawValue)
- end
- end,
+ Value =
+ case UseRawValue of
+ true ->
+ % Client requests no change to the provided value.
+ RawValue;
+ false ->
+ % Pre-process the value as necessary.
+ case Section of
+ <<"admins">> ->
+ couch_passwords:hash_admin_password(RawValue);
+ _ ->
+ couch_util:trim(RawValue)
+ end
+ end,
OldValue = config:get(Section, Key, ""),
case config:set(Section, Key, ?b2l(Value), Persist) of
- ok ->
- send_json(Req, 200, list_to_binary(OldValue));
- Error ->
- throw(Error)
+ ok ->
+ send_json(Req, 200, list_to_binary(OldValue));
+ Error ->
+ throw(Error)
end;
-
-handle_approved_config_req(#httpd{method='PUT'}=Req, _Persist, UseRawValue) ->
+handle_approved_config_req(#httpd{method = 'PUT'} = Req, _Persist, UseRawValue) ->
Err = io_lib:format("Bad value for 'raw' option: ~s", [UseRawValue]),
send_json(Req, 400, {[{error, ?l2b(Err)}]});
-
% DELETE /_config/Section/Key
-handle_approved_config_req(#httpd{method='DELETE',path_parts=[_,Section,Key]}=Req,
- Persist, _UseRawValue) ->
+handle_approved_config_req(
+ #httpd{method = 'DELETE', path_parts = [_, Section, Key]} = Req,
+ Persist,
+ _UseRawValue
+) ->
case config:get(Section, Key, undefined) of
- undefined ->
- throw({not_found, unknown_config_value});
- OldValue ->
- config:delete(Section, Key, Persist),
- send_json(Req, 200, list_to_binary(OldValue))
+ undefined ->
+ throw({not_found, unknown_config_value});
+ OldValue ->
+ config:delete(Section, Key, Persist),
+ send_json(Req, 200, list_to_binary(OldValue))
end.
-
diff --git a/src/couch/src/couch_httpd_multipart.erl b/src/couch/src/couch_httpd_multipart.erl
index 33795a3a1..ecdf10562 100644
--- a/src/couch/src/couch_httpd_multipart.erl
+++ b/src/couch/src/couch_httpd_multipart.erl
@@ -30,47 +30,53 @@ decode_multipart_stream(ContentType, DataFun, Ref) ->
ParentRef = erlang:monitor(process, Parent),
put(mp_parent_ref, ParentRef),
num_mp_writers(NumMpWriters),
- {<<"--",_/binary>>, _, _} = couch_httpd:parse_multipart_request(
- ContentType, DataFun,
- fun(Next) -> mp_parse_doc(Next, []) end),
+ {<<"--", _/binary>>, _, _} = couch_httpd:parse_multipart_request(
+ ContentType,
+ DataFun,
+ fun(Next) -> mp_parse_doc(Next, []) end
+ ),
unlink(Parent)
- end),
+ end),
Parser ! {get_doc_bytes, Ref, self()},
receive
- {started_open_doc_revs, NewRef} ->
- %% FIXME: How to remove the knowledge about this message?
- {{started_open_doc_revs, NewRef}, Parser, ParserRef};
- {doc_bytes, Ref, DocBytes} ->
- {{doc_bytes, Ref, DocBytes}, Parser, ParserRef};
- {'DOWN', ParserRef, _, _, normal} ->
- ok;
- {'DOWN', ParserRef, process, Parser, {{nocatch, {Error, Msg}}, _}} ->
- couch_log:error("Multipart streamer ~p died with reason ~p",
- [ParserRef, Msg]),
- throw({Error, Msg});
- {'DOWN', ParserRef, _, _, Reason} ->
- couch_log:error("Multipart streamer ~p died with reason ~p",
- [ParserRef, Reason]),
- throw({error, Reason})
+ {started_open_doc_revs, NewRef} ->
+ %% FIXME: How to remove the knowledge about this message?
+ {{started_open_doc_revs, NewRef}, Parser, ParserRef};
+ {doc_bytes, Ref, DocBytes} ->
+ {{doc_bytes, Ref, DocBytes}, Parser, ParserRef};
+ {'DOWN', ParserRef, _, _, normal} ->
+ ok;
+ {'DOWN', ParserRef, process, Parser, {{nocatch, {Error, Msg}}, _}} ->
+ couch_log:error(
+ "Multipart streamer ~p died with reason ~p",
+ [ParserRef, Msg]
+ ),
+ throw({Error, Msg});
+ {'DOWN', ParserRef, _, _, Reason} ->
+ couch_log:error(
+ "Multipart streamer ~p died with reason ~p",
+ [ParserRef, Reason]
+ ),
+ throw({error, Reason})
end.
-
mp_parse_doc({headers, H}, []) ->
case couch_util:get_value("content-type", H) of
- {"application/json", _} ->
- fun (Next) ->
- mp_parse_doc(Next, [])
- end;
- _ ->
- throw({bad_ctype, <<"Content-Type must be application/json">>})
+ {"application/json", _} ->
+ fun(Next) ->
+ mp_parse_doc(Next, [])
+ end;
+ _ ->
+ throw({bad_ctype, <<"Content-Type must be application/json">>})
end;
mp_parse_doc({body, Bytes}, AccBytes) ->
- fun (Next) ->
+ fun(Next) ->
mp_parse_doc(Next, [Bytes | AccBytes])
end;
mp_parse_doc(body_end, AccBytes) ->
- receive {get_doc_bytes, Ref, From} ->
- From ! {doc_bytes, Ref, lists:reverse(AccBytes)}
+ receive
+ {get_doc_bytes, Ref, From} ->
+ From ! {doc_bytes, Ref, lists:reverse(AccBytes)}
end,
fun(Next) ->
mp_parse_atts(Next, {Ref, [], 0, orddict:new(), []})
@@ -81,7 +87,7 @@ mp_parse_atts({headers, _}, Acc) ->
mp_parse_atts(body_end, Acc) ->
fun(Next) -> mp_parse_atts(Next, Acc) end;
mp_parse_atts({body, Bytes}, {Ref, Chunks, Offset, Counters, Waiting}) ->
- case maybe_send_data({Ref, Chunks++[Bytes], Offset, Counters, Waiting}) of
+ case maybe_send_data({Ref, Chunks ++ [Bytes], Offset, Counters, Waiting}) of
abort_parsing ->
fun(Next) -> mp_abort_parse_atts(Next, nil) end;
NewAcc ->
@@ -91,34 +97,34 @@ mp_parse_atts(eof, {Ref, Chunks, Offset, Counters, Waiting}) ->
N = num_mp_writers(),
M = length(Counters),
case (M == N) andalso Chunks == [] of
- true ->
- ok;
- false ->
- ParentRef = get(mp_parent_ref),
- receive
- abort_parsing ->
+ true ->
ok;
- {get_bytes, Ref, From} ->
- C2 = update_writer(From, Counters),
- case maybe_send_data({Ref, Chunks, Offset, C2, [From|Waiting]}) of
- abort_parsing ->
- ok;
- NewAcc ->
- mp_parse_atts(eof, NewAcc)
- end;
- {'DOWN', ParentRef, _, _, _} ->
- exit(mp_reader_coordinator_died);
- {'DOWN', WriterRef, _, WriterPid, _} ->
- case remove_writer(WriterPid, WriterRef, Counters) of
+ false ->
+ ParentRef = get(mp_parent_ref),
+ receive
abort_parsing ->
ok;
- C2 ->
- NewAcc = {Ref, Chunks, Offset, C2, Waiting -- [WriterPid]},
- mp_parse_atts(eof, NewAcc)
+ {get_bytes, Ref, From} ->
+ C2 = update_writer(From, Counters),
+ case maybe_send_data({Ref, Chunks, Offset, C2, [From | Waiting]}) of
+ abort_parsing ->
+ ok;
+ NewAcc ->
+ mp_parse_atts(eof, NewAcc)
+ end;
+ {'DOWN', ParentRef, _, _, _} ->
+ exit(mp_reader_coordinator_died);
+ {'DOWN', WriterRef, _, WriterPid, _} ->
+ case remove_writer(WriterPid, WriterRef, Counters) of
+ abort_parsing ->
+ ok;
+ C2 ->
+ NewAcc = {Ref, Chunks, Offset, C2, Waiting -- [WriterPid]},
+ mp_parse_atts(eof, NewAcc)
+ end
+ after 300000 ->
+ ok
end
- after 300000 ->
- ok
- end
end.
mp_abort_parse_atts(eof, _) ->
@@ -127,82 +133,89 @@ mp_abort_parse_atts(_, _) ->
fun(Next) -> mp_abort_parse_atts(Next, nil) end.
maybe_send_data({Ref, Chunks, Offset, Counters, Waiting}) ->
- receive {get_bytes, Ref, From} ->
- NewCounters = update_writer(From, Counters),
- maybe_send_data({Ref, Chunks, Offset, NewCounters, [From|Waiting]})
+ receive
+ {get_bytes, Ref, From} ->
+ NewCounters = update_writer(From, Counters),
+ maybe_send_data({Ref, Chunks, Offset, NewCounters, [From | Waiting]})
after 0 ->
% reply to as many writers as possible
- NewWaiting = lists:filter(fun(Writer) ->
- {_, WhichChunk} = orddict:fetch(Writer, Counters),
- ListIndex = WhichChunk - Offset,
- if ListIndex =< length(Chunks) ->
- Writer ! {bytes, Ref, lists:nth(ListIndex, Chunks)},
- false;
- true ->
- true
- end
- end, Waiting),
+ NewWaiting = lists:filter(
+ fun(Writer) ->
+ {_, WhichChunk} = orddict:fetch(Writer, Counters),
+ ListIndex = WhichChunk - Offset,
+ if
+ ListIndex =< length(Chunks) ->
+ Writer ! {bytes, Ref, lists:nth(ListIndex, Chunks)},
+ false;
+ true ->
+ true
+ end
+ end,
+ Waiting
+ ),
% check if we can drop a chunk from the head of the list
- SmallestIndex = case Counters of
- [] ->
- 0;
- _ ->
- lists:min([C || {_WPid, {_WRef, C}} <- Counters])
- end,
+ SmallestIndex =
+ case Counters of
+ [] ->
+ 0;
+ _ ->
+ lists:min([C || {_WPid, {_WRef, C}} <- Counters])
+ end,
Size = length(Counters),
N = num_mp_writers(),
- if Size == N andalso SmallestIndex == (Offset+1) ->
- NewChunks = tl(Chunks),
- NewOffset = Offset+1;
- true ->
- NewChunks = Chunks,
- NewOffset = Offset
+ if
+ Size == N andalso SmallestIndex == (Offset + 1) ->
+ NewChunks = tl(Chunks),
+ NewOffset = Offset + 1;
+ true ->
+ NewChunks = Chunks,
+ NewOffset = Offset
end,
% we should wait for a writer if no one has written the last chunk
LargestIndex = lists:max([0] ++ [C || {_WPid, {_WRef, C}} <- Counters]),
- if LargestIndex >= (Offset + length(Chunks)) ->
- % someone has written all possible chunks, keep moving
- {Ref, NewChunks, NewOffset, Counters, NewWaiting};
- true ->
- ParentRef = get(mp_parent_ref),
- receive
- abort_parsing ->
- abort_parsing;
- {'DOWN', ParentRef, _, _, _} ->
- exit(mp_reader_coordinator_died);
- {'DOWN', WriterRef, _, WriterPid, _} ->
- case remove_writer(WriterPid, WriterRef, Counters) of
+ if
+ LargestIndex >= (Offset + length(Chunks)) ->
+ % someone has written all possible chunks, keep moving
+ {Ref, NewChunks, NewOffset, Counters, NewWaiting};
+ true ->
+ ParentRef = get(mp_parent_ref),
+ receive
abort_parsing ->
abort_parsing;
- C2 ->
- RestWaiting = NewWaiting -- [WriterPid],
- NewAcc = {Ref, NewChunks, NewOffset, C2, RestWaiting},
- maybe_send_data(NewAcc)
- end;
- {get_bytes, Ref, X} ->
- C2 = update_writer(X, Counters),
- maybe_send_data({Ref, NewChunks, NewOffset, C2, [X|NewWaiting]})
- after 300000 ->
- abort_parsing
- end
+ {'DOWN', ParentRef, _, _, _} ->
+ exit(mp_reader_coordinator_died);
+ {'DOWN', WriterRef, _, WriterPid, _} ->
+ case remove_writer(WriterPid, WriterRef, Counters) of
+ abort_parsing ->
+ abort_parsing;
+ C2 ->
+ RestWaiting = NewWaiting -- [WriterPid],
+ NewAcc = {Ref, NewChunks, NewOffset, C2, RestWaiting},
+ maybe_send_data(NewAcc)
+ end;
+ {get_bytes, Ref, X} ->
+ C2 = update_writer(X, Counters),
+ maybe_send_data({Ref, NewChunks, NewOffset, C2, [X | NewWaiting]})
+ after 300000 ->
+ abort_parsing
+ end
end
end.
-
update_writer(WriterPid, Counters) ->
UpdateFun = fun({WriterRef, Count}) -> {WriterRef, Count + 1} end,
- InitialValue = case orddict:find(WriterPid, Counters) of
- {ok, IV} ->
- IV;
- error ->
- WriterRef = erlang:monitor(process, WriterPid),
- {WriterRef, 1}
- end,
+ InitialValue =
+ case orddict:find(WriterPid, Counters) of
+ {ok, IV} ->
+ IV;
+ error ->
+ WriterRef = erlang:monitor(process, WriterPid),
+ {WriterRef, 1}
+ end,
orddict:update(WriterPid, UpdateFun, InitialValue, Counters).
-
remove_writer(WriterPid, WriterRef, Counters) ->
case orddict:find(WriterPid, Counters) of
{ok, {WriterRef, _}} ->
@@ -221,11 +234,9 @@ remove_writer(WriterPid, WriterRef, Counters) ->
abort_parsing
end.
-
num_mp_writers(N) ->
erlang:put(mp_att_writers, N).
-
num_mp_writers() ->
case erlang:get(mp_att_writers) of
undefined -> 1;
@@ -235,15 +246,21 @@ num_mp_writers() ->
encode_multipart_stream(_Boundary, JsonBytes, [], WriteFun, _AttFun) ->
WriteFun(JsonBytes);
encode_multipart_stream(Boundary, JsonBytes, Atts, WriteFun, AttFun) ->
- WriteFun([<<"--", Boundary/binary,
- "\r\nContent-Type: application/json\r\n\r\n">>,
- JsonBytes, <<"\r\n--", Boundary/binary>>]),
+ WriteFun([
+ <<"--", Boundary/binary, "\r\nContent-Type: application/json\r\n\r\n">>,
+ JsonBytes,
+ <<"\r\n--", Boundary/binary>>
+ ]),
atts_to_mp(Atts, Boundary, WriteFun, AttFun).
atts_to_mp([], _Boundary, WriteFun, _AttFun) ->
WriteFun(<<"--">>);
-atts_to_mp([{Att, Name, Len, Type, Encoding} | RestAtts], Boundary, WriteFun,
- AttFun) ->
+atts_to_mp(
+ [{Att, Name, Len, Type, Encoding} | RestAtts],
+ Boundary,
+ WriteFun,
+ AttFun
+) ->
LengthBin = list_to_binary(integer_to_list(Len)),
% write headers
WriteFun(<<"\r\nContent-Disposition: attachment; filename=\"", Name/binary, "\"">>),
@@ -264,40 +281,52 @@ atts_to_mp([{Att, Name, Len, Type, Encoding} | RestAtts], Boundary, WriteFun,
atts_to_mp(RestAtts, Boundary, WriteFun, AttFun).
length_multipart_stream(Boundary, JsonBytes, Atts) ->
- AttsSize = lists:foldl(fun({_Att, Name, Len, Type, Encoding}, AccAttsSize) ->
- AccAttsSize +
- 4 + % "\r\n\r\n"
- length(integer_to_list(Len)) +
- Len +
- 4 + % "\r\n--"
- size(Boundary) +
- % attachment headers
- % (the length of the Content-Length has already been set)
- size(Name) +
- size(Type) +
- length("\r\nContent-Disposition: attachment; filename=\"\"") +
- length("\r\nContent-Type: ") +
- length("\r\nContent-Length: ") +
- case Encoding of
- identity ->
- 0;
- _ ->
- length(atom_to_list(Encoding)) +
- length("\r\nContent-Encoding: ")
- end
- end, 0, Atts),
- if AttsSize == 0 ->
- {<<"application/json">>, iolist_size(JsonBytes)};
- true ->
- {<<"multipart/related; boundary=\"", Boundary/binary, "\"">>,
- 2 + % "--"
- size(Boundary) +
- 36 + % "\r\ncontent-type: application/json\r\n\r\n"
- iolist_size(JsonBytes) +
- 4 + % "\r\n--"
- size(Boundary) +
- + AttsSize +
- 2 % "--"
+ AttsSize = lists:foldl(
+ fun({_Att, Name, Len, Type, Encoding}, AccAttsSize) ->
+ AccAttsSize +
+ % "\r\n\r\n"
+ 4 +
+ length(integer_to_list(Len)) +
+ Len +
+ % "\r\n--"
+ 4 +
+ size(Boundary) +
+ % attachment headers
+ % (the length of the Content-Length has already been set)
+ size(Name) +
+ size(Type) +
+ length("\r\nContent-Disposition: attachment; filename=\"\"") +
+ length("\r\nContent-Type: ") +
+ length("\r\nContent-Length: ") +
+ case Encoding of
+ identity ->
+ 0;
+ _ ->
+ length(atom_to_list(Encoding)) +
+ length("\r\nContent-Encoding: ")
+ end
+ end,
+ 0,
+ Atts
+ ),
+ if
+ AttsSize == 0 ->
+ {<<"application/json">>, iolist_size(JsonBytes)};
+ true ->
+ {
+ <<"multipart/related; boundary=\"", Boundary/binary, "\"">>,
+ % "--"
+ 2 +
+ size(Boundary) +
+ % "\r\ncontent-type: application/json\r\n\r\n"
+ 36 +
+ iolist_size(JsonBytes) +
+ % "\r\n--"
+ 4 +
+ size(Boundary) +
+ +AttsSize +
+ % "--"
+ 2
}
end.
diff --git a/src/couch/src/couch_httpd_rewrite.erl b/src/couch/src/couch_httpd_rewrite.erl
index 40e5c9e3c..97f48a4c0 100644
--- a/src/couch/src/couch_httpd_rewrite.erl
+++ b/src/couch/src/couch_httpd_rewrite.erl
@@ -12,7 +12,6 @@
%
% bind_path is based on bind method from Webmachine
-
%% @doc Module for URL rewriting by pattern matching.
-module(couch_httpd_rewrite).
@@ -25,7 +24,6 @@
-define(SEPARATOR, $\/).
-define(MATCH_ALL, {bind, <<"*">>}).
-
%% doc The http rewrite handler. All rewriting is done from
%% /dbname/_design/ddocname/_rewrite by default.
%%
@@ -110,13 +108,15 @@
%% "to": "/some/:foo",
%% }}
-
-
-handle_rewrite_req(#httpd{
- path_parts=[DbName, <<"_design">>, DesignName, _Rewrite|PathParts],
- method=Method,
- mochi_req=MochiReq}=Req, _Db, DDoc) ->
-
+handle_rewrite_req(
+ #httpd{
+ path_parts = [DbName, <<"_design">>, DesignName, _Rewrite | PathParts],
+ method = Method,
+ mochi_req = MochiReq
+ } = Req,
+ _Db,
+ DDoc
+) ->
% we are in a design handler
DesignId = <<"_design/", DesignName/binary>>,
Prefix = <<"/", (?l2b(couch_util:url_encode(DbName)))/binary, "/", DesignId/binary>>,
@@ -131,19 +131,27 @@ handle_rewrite_req(#httpd{
erlang:put(?REWRITE_COUNT, RewritesSoFar + 1)
end,
- #doc{body={Props}} = DDoc,
+ #doc{body = {Props}} = DDoc,
% get rules from ddoc
case couch_util:get_value(<<"rewrites">>, Props) of
undefined ->
- couch_httpd:send_error(Req, 404, <<"rewrite_error">>,
- <<"Invalid path.">>);
+ couch_httpd:send_error(
+ Req,
+ 404,
+ <<"rewrite_error">>,
+ <<"Invalid path.">>
+ );
Bin when is_binary(Bin) ->
- couch_httpd:send_error(Req, 400, <<"rewrite_error">>,
- <<"Rewrite rules are a String. They must be a JSON Array.">>);
+ couch_httpd:send_error(
+ Req,
+ 400,
+ <<"rewrite_error">>,
+ <<"Rewrite rules are a String. They must be a JSON Array.">>
+ );
Rules ->
% create dispatch list from rules
- DispatchList = [make_rule(Rule) || {Rule} <- Rules],
+ DispatchList = [make_rule(Rule) || {Rule} <- Rules],
Method1 = couch_util:to_binary(Method),
% get raw path by matching url to a rule. Throws not_found.
@@ -155,39 +163,45 @@ handle_rewrite_req(#httpd{
Path0 = string:join(NewPathParts, [?SEPARATOR]),
% if path is relative detect it and rewrite path
- Path1 = case mochiweb_util:safe_relative_path(Path0) of
- undefined ->
- ?b2l(Prefix) ++ "/" ++ Path0;
- P1 ->
- ?b2l(Prefix) ++ "/" ++ P1
- end,
+ Path1 =
+ case mochiweb_util:safe_relative_path(Path0) of
+ undefined ->
+ ?b2l(Prefix) ++ "/" ++ Path0;
+ P1 ->
+ ?b2l(Prefix) ++ "/" ++ P1
+ end,
Path2 = normalize_path(Path1),
- Path3 = case Bindings of
- [] ->
- Path2;
- _ ->
- [Path2, "?", mochiweb_util:urlencode(Bindings)]
- end,
+ Path3 =
+ case Bindings of
+ [] ->
+ Path2;
+ _ ->
+ [Path2, "?", mochiweb_util:urlencode(Bindings)]
+ end,
RawPath1 = ?b2l(iolist_to_binary(Path3)),
% In order to do OAuth correctly, we have to save the
% requested path. We use default so chained rewriting
% wont replace the original header.
- Headers = mochiweb_headers:default("x-couchdb-requested-path",
- MochiReq:get(raw_path),
- MochiReq:get(headers)),
+ Headers = mochiweb_headers:default(
+ "x-couchdb-requested-path",
+ MochiReq:get(raw_path),
+ MochiReq:get(headers)
+ ),
couch_log:debug("rewrite to ~p ~n", [RawPath1]),
% build a new mochiweb request
- MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
- MochiReq:get(method),
- RawPath1,
- MochiReq:get(version),
- Headers),
+ MochiReq1 = mochiweb_request:new(
+ MochiReq:get(socket),
+ MochiReq:get(method),
+ RawPath1,
+ MochiReq:get(version),
+ Headers
+ ),
% cleanup, It force mochiweb to reparse raw uri.
MochiReq1:cleanup(),
@@ -198,14 +212,19 @@ handle_rewrite_req(#httpd{
default_fun = DefaultFun,
url_handlers = UrlHandlers,
user_ctx = UserCtx,
- auth = Auth
+ auth = Auth
} = Req,
erlang:put(pre_rewrite_auth, Auth),
erlang:put(pre_rewrite_user_ctx, UserCtx),
- couch_httpd:handle_request_int(MochiReq1, DefaultFun,
- UrlHandlers, DbUrlHandlers, DesignUrlHandlers)
- end.
+ couch_httpd:handle_request_int(
+ MochiReq1,
+ DefaultFun,
+ UrlHandlers,
+ DbUrlHandlers,
+ DesignUrlHandlers
+ )
+ end.
quote_plus({bind, X}) ->
mochiweb_util:quote_plus(X);
@@ -216,7 +235,7 @@ quote_plus(X) ->
%% 404 error not_found is raised
try_bind_path([], _Method, _PathParts, _QueryList) ->
throw(not_found);
-try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
+try_bind_path([Dispatch | Rest], Method, PathParts, QueryList) ->
[{PathParts1, Method1}, RedirectPath, QueryArgs, Formats] = Dispatch,
case bind_method(Method1, Method) of
true ->
@@ -225,22 +244,35 @@ try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
Bindings1 = Bindings ++ QueryList,
% we parse query args from the rule and fill
% it eventually with bindings vars
- QueryArgs1 = make_query_list(QueryArgs, Bindings1,
- Formats, []),
+ QueryArgs1 = make_query_list(
+ QueryArgs,
+ Bindings1,
+ Formats,
+ []
+ ),
% remove params in QueryLists1 that are already in
% QueryArgs1
- Bindings2 = lists:foldl(fun({K, V}, Acc) ->
- K1 = to_binding(K),
- KV = case couch_util:get_value(K1, QueryArgs1) of
- undefined -> [{K1, V}];
- _V1 -> []
+ Bindings2 = lists:foldl(
+ fun({K, V}, Acc) ->
+ K1 = to_binding(K),
+ KV =
+ case couch_util:get_value(K1, QueryArgs1) of
+ undefined -> [{K1, V}];
+ _V1 -> []
+ end,
+ Acc ++ KV
end,
- Acc ++ KV
- end, [], Bindings1),
+ [],
+ Bindings1
+ ),
FinalBindings = Bindings2 ++ QueryArgs1,
- NewPathParts = make_new_path(RedirectPath, FinalBindings,
- Remaining, []),
+ NewPathParts = make_new_path(
+ RedirectPath,
+ FinalBindings,
+ Remaining,
+ []
+ ),
{NewPathParts, FinalBindings};
fail ->
try_bind_path(Rest, Method, PathParts, QueryList)
@@ -254,37 +286,51 @@ try_bind_path([Dispatch|Rest], Method, PathParts, QueryList) ->
%% passed in url.
make_query_list([], _Bindings, _Formats, Acc) ->
Acc;
-make_query_list([{Key, {Value}}|Rest], Bindings, Formats, Acc) ->
+make_query_list([{Key, {Value}} | Rest], Bindings, Formats, Acc) ->
Value1 = {Value},
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_binary(Value) ->
+ make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1} | Acc]);
+make_query_list([{Key, Value} | Rest], Bindings, Formats, Acc) when is_binary(Value) ->
Value1 = replace_var(Value, Bindings, Formats),
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) when is_list(Value) ->
+ make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1} | Acc]);
+make_query_list([{Key, Value} | Rest], Bindings, Formats, Acc) when is_list(Value) ->
Value1 = replace_var(Value, Bindings, Formats),
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1}|Acc]);
-make_query_list([{Key, Value}|Rest], Bindings, Formats, Acc) ->
- make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value}|Acc]).
+ make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value1} | Acc]);
+make_query_list([{Key, Value} | Rest], Bindings, Formats, Acc) ->
+ make_query_list(Rest, Bindings, Formats, [{to_binding(Key), Value} | Acc]).
-replace_var(<<"*">>=Value, Bindings, Formats) ->
+replace_var(<<"*">> = Value, Bindings, Formats) ->
get_var(Value, Bindings, Value, Formats);
replace_var(<<":", Var/binary>> = Value, Bindings, Formats) ->
get_var(Var, Bindings, Value, Formats);
replace_var(Value, _Bindings, _Formats) when is_binary(Value) ->
Value;
replace_var(Value, Bindings, Formats) when is_list(Value) ->
- lists:reverse(lists:foldl(fun
- (<<":", Var/binary>>=Value1, Acc) ->
- [get_var(Var, Bindings, Value1, Formats)|Acc];
+ lists:reverse(
+ lists:foldl(
+ fun
+ (<<":", Var/binary>> = Value1, Acc) ->
+ [get_var(Var, Bindings, Value1, Formats) | Acc];
(Value1, Acc) ->
- [Value1|Acc]
- end, [], Value));
+ [Value1 | Acc]
+ end,
+ [],
+ Value
+ )
+ );
replace_var(Value, _Bindings, _Formats) ->
Value.
-
+
maybe_json(Key, Value) ->
- case lists:member(Key, [<<"key">>, <<"startkey">>, <<"start_key">>,
- <<"endkey">>, <<"end_key">>, <<"keys">>]) of
+ case
+ lists:member(Key, [
+ <<"key">>,
+ <<"startkey">>,
+ <<"start_key">>,
+ <<"endkey">>,
+ <<"end_key">>,
+ <<"keys">>
+ ])
+ of
true ->
?JSON_ENCODE(Value);
false ->
@@ -299,7 +345,7 @@ get_var(VarName, Props, Default, Formats) ->
maybe_format(VarName, Value, Formats) ->
case couch_util:get_value(VarName, Formats) of
undefined ->
- Value;
+ Value;
Format ->
format(Format, Value)
end.
@@ -324,7 +370,7 @@ format(<<"bool">>, Value) when is_list(Value) ->
_ -> Value
end;
format(_Format, Value) ->
- Value.
+ Value.
%% doc: build new patch from bindings. bindings are query args
%% (+ dynamic query rewritten if needed) and bindings found in
@@ -334,94 +380,103 @@ make_new_path([], _Bindings, _Remaining, Acc) ->
make_new_path([?MATCH_ALL], _Bindings, Remaining, Acc) ->
Acc1 = lists:reverse(Acc) ++ Remaining,
Acc1;
-make_new_path([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
+make_new_path([?MATCH_ALL | _Rest], _Bindings, Remaining, Acc) ->
Acc1 = lists:reverse(Acc) ++ Remaining,
Acc1;
-make_new_path([{bind, P}|Rest], Bindings, Remaining, Acc) ->
- P2 = case couch_util:get_value({bind, P}, Bindings) of
- undefined -> << "undefined">>;
- P1 ->
- iolist_to_binary(P1)
- end,
- make_new_path(Rest, Bindings, Remaining, [P2|Acc]);
-make_new_path([P|Rest], Bindings, Remaining, Acc) ->
- make_new_path(Rest, Bindings, Remaining, [P|Acc]).
-
+make_new_path([{bind, P} | Rest], Bindings, Remaining, Acc) ->
+ P2 =
+ case couch_util:get_value({bind, P}, Bindings) of
+ undefined -> <<"undefined">>;
+ P1 -> iolist_to_binary(P1)
+ end,
+ make_new_path(Rest, Bindings, Remaining, [P2 | Acc]);
+make_new_path([P | Rest], Bindings, Remaining, Acc) ->
+ make_new_path(Rest, Bindings, Remaining, [P | Acc]).
%% @doc If method of the query fith the rule method. If the
%% method rule is '*', which is the default, all
%% request method will bind. It allows us to make rules
%% depending on HTTP method.
-bind_method(?MATCH_ALL, _Method ) ->
+bind_method(?MATCH_ALL, _Method) ->
true;
bind_method({bind, Method}, Method) ->
true;
bind_method(_, _) ->
false.
-
%% @doc bind path. Using the rule from we try to bind variables given
%% to the current url by pattern matching
bind_path([], [], Bindings) ->
{ok, [], Bindings};
-bind_path([?MATCH_ALL], [Match|_RestMatch]=Rest, Bindings) ->
- {ok, Rest, [{?MATCH_ALL, Match}|Bindings]};
+bind_path([?MATCH_ALL], [Match | _RestMatch] = Rest, Bindings) ->
+ {ok, Rest, [{?MATCH_ALL, Match} | Bindings]};
bind_path(_, [], _) ->
fail;
-bind_path([{bind, Token}|RestToken],[Match|RestMatch],Bindings) ->
- bind_path(RestToken, RestMatch, [{{bind, Token}, Match}|Bindings]);
-bind_path([Token|RestToken], [Token|RestMatch], Bindings) ->
+bind_path([{bind, Token} | RestToken], [Match | RestMatch], Bindings) ->
+ bind_path(RestToken, RestMatch, [{{bind, Token}, Match} | Bindings]);
+bind_path([Token | RestToken], [Token | RestMatch], Bindings) ->
bind_path(RestToken, RestMatch, Bindings);
bind_path(_, _, _) ->
fail.
-
%% normalize path.
-normalize_path(Path) ->
- "/" ++ string:join(normalize_path1(string:tokens(Path,
- "/"), []), [?SEPARATOR]).
-
+normalize_path(Path) ->
+ "/" ++
+ string:join(
+ normalize_path1(
+ string:tokens(
+ Path,
+ "/"
+ ),
+ []
+ ),
+ [?SEPARATOR]
+ ).
normalize_path1([], Acc) ->
lists:reverse(Acc);
-normalize_path1([".."|Rest], Acc) ->
- Acc1 = case Acc of
- [] -> [".."|Acc];
- [T|_] when T =:= ".." -> [".."|Acc];
- [_|R] -> R
- end,
+normalize_path1([".." | Rest], Acc) ->
+ Acc1 =
+ case Acc of
+ [] -> [".." | Acc];
+ [T | _] when T =:= ".." -> [".." | Acc];
+ [_ | R] -> R
+ end,
normalize_path1(Rest, Acc1);
-normalize_path1(["."|Rest], Acc) ->
+normalize_path1(["." | Rest], Acc) ->
normalize_path1(Rest, Acc);
-normalize_path1([Path|Rest], Acc) ->
- normalize_path1(Rest, [Path|Acc]).
-
+normalize_path1([Path | Rest], Acc) ->
+ normalize_path1(Rest, [Path | Acc]).
%% @doc transform json rule in erlang for pattern matching
make_rule(Rule) ->
- Method = case couch_util:get_value(<<"method">>, Rule) of
- undefined -> ?MATCH_ALL;
- M -> to_binding(M)
- end,
- QueryArgs = case couch_util:get_value(<<"query">>, Rule) of
- undefined -> [];
- {Args} -> Args
+ Method =
+ case couch_util:get_value(<<"method">>, Rule) of
+ undefined -> ?MATCH_ALL;
+ M -> to_binding(M)
end,
- FromParts = case couch_util:get_value(<<"from">>, Rule) of
- undefined -> [?MATCH_ALL];
- From ->
- parse_path(From)
+ QueryArgs =
+ case couch_util:get_value(<<"query">>, Rule) of
+ undefined -> [];
+ {Args} -> Args
end,
- ToParts = case couch_util:get_value(<<"to">>, Rule) of
- undefined ->
- throw({error, invalid_rewrite_target});
- To ->
- parse_path(To)
+ FromParts =
+ case couch_util:get_value(<<"from">>, Rule) of
+ undefined -> [?MATCH_ALL];
+ From -> parse_path(From)
+ end,
+ ToParts =
+ case couch_util:get_value(<<"to">>, Rule) of
+ undefined ->
+ throw({error, invalid_rewrite_target});
+ To ->
+ parse_path(To)
+ end,
+ Formats =
+ case couch_util:get_value(<<"formats">>, Rule) of
+ undefined -> [];
+ {Fmts} -> Fmts
end,
- Formats = case couch_util:get_value(<<"formats">>, Rule) of
- undefined -> [];
- {Fmts} -> Fmts
- end,
[{FromParts, Method}, ToParts, QueryArgs, Formats].
parse_path(Path) ->
@@ -433,43 +488,59 @@ parse_path(Path) ->
%% in erlang atom.
path_to_list([], Acc, _DotDotCount) ->
lists:reverse(Acc);
-path_to_list([<<>>|R], Acc, DotDotCount) ->
+path_to_list([<<>> | R], Acc, DotDotCount) ->
path_to_list(R, Acc, DotDotCount);
-path_to_list([<<"*">>|R], Acc, DotDotCount) ->
- path_to_list(R, [?MATCH_ALL|Acc], DotDotCount);
-path_to_list([<<"..">>|R], Acc, DotDotCount) when DotDotCount == 2 ->
+path_to_list([<<"*">> | R], Acc, DotDotCount) ->
+ path_to_list(R, [?MATCH_ALL | Acc], DotDotCount);
+path_to_list([<<"..">> | R], Acc, DotDotCount) when DotDotCount == 2 ->
case chttpd_util:get_chttpd_config_boolean("secure_rewrites", true) of
false ->
- path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
+ path_to_list(R, [<<"..">> | Acc], DotDotCount + 1);
true ->
- couch_log:info("insecure_rewrite_rule ~p blocked",
- [lists:reverse(Acc) ++ [<<"..">>] ++ R]),
+ couch_log:info(
+ "insecure_rewrite_rule ~p blocked",
+ [lists:reverse(Acc) ++ [<<"..">>] ++ R]
+ ),
throw({insecure_rewrite_rule, "too many ../.. segments"})
end;
-path_to_list([<<"..">>|R], Acc, DotDotCount) ->
- path_to_list(R, [<<"..">>|Acc], DotDotCount+1);
-path_to_list([P|R], Acc, DotDotCount) ->
- P1 = case P of
- <<":", Var/binary>> ->
- to_binding(Var);
- _ -> P
- end,
- path_to_list(R, [P1|Acc], DotDotCount).
+path_to_list([<<"..">> | R], Acc, DotDotCount) ->
+ path_to_list(R, [<<"..">> | Acc], DotDotCount + 1);
+path_to_list([P | R], Acc, DotDotCount) ->
+ P1 =
+ case P of
+ <<":", Var/binary>> ->
+ to_binding(Var);
+ _ ->
+ P
+ end,
+ path_to_list(R, [P1 | Acc], DotDotCount).
maybe_encode_bindings([]) ->
[];
-maybe_encode_bindings(Props) ->
- lists:foldl(fun
+maybe_encode_bindings(Props) ->
+ lists:foldl(
+ fun
({{bind, <<"*">>}, _V}, Acc) ->
Acc;
({{bind, K}, V}, Acc) ->
V1 = iolist_to_binary(maybe_json(K, V)),
- [{K, V1}|Acc]
- end, [], Props).
-
-decode_query_value({K,V}) ->
- case lists:member(K, ["key", "startkey", "start_key",
- "endkey", "end_key", "keys"]) of
+ [{K, V1} | Acc]
+ end,
+ [],
+ Props
+ ).
+
+decode_query_value({K, V}) ->
+ case
+ lists:member(K, [
+ "key",
+ "startkey",
+ "start_key",
+ "endkey",
+ "end_key",
+ "keys"
+ ])
+ of
true ->
{to_binding(K), ?JSON_DECODE(V)};
false ->
diff --git a/src/couch/src/couch_httpd_vhost.erl b/src/couch/src/couch_httpd_vhost.erl
index 409631d25..0bff6a36d 100644
--- a/src/couch/src/couch_httpd_vhost.erl
+++ b/src/couch/src/couch_httpd_vhost.erl
@@ -33,9 +33,10 @@
-define(RELISTEN_DELAY, 5000).
-record(vhosts_state, {
- vhosts,
- vhost_globals,
- vhosts_fun}).
+ vhosts,
+ vhost_globals,
+ vhosts_fun
+}).
%% doc the vhost manager.
%% This gen_server keep state of vhosts added to the ini and try to
@@ -109,34 +110,44 @@ dispatch_host_int(MochiReq) ->
#vhosts_state{
vhost_globals = VHostGlobals,
vhosts = VHosts,
- vhosts_fun=Fun} = get_state(),
+ vhosts_fun = Fun
+ } = get_state(),
{"/" ++ VPath, Query, Fragment} = mochiweb_util:urlsplit_path(MochiReq:get(raw_path)),
- VPathParts = string:tokens(VPath, "/"),
+ VPathParts = string:tokens(VPath, "/"),
VHost = host(MochiReq),
{VHostParts, VhostPort} = split_host_port(VHost),
- FinalMochiReq = case try_bind_vhost(VHosts, lists:reverse(VHostParts),
- VhostPort, VPathParts) of
- no_vhost_matched -> MochiReq;
- {VhostTarget, NewPath} ->
- case vhost_global(VHostGlobals, MochiReq) of
- true ->
- MochiReq;
- _Else ->
- NewPath1 = mochiweb_util:urlunsplit_path({NewPath, Query,
- Fragment}),
- MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
- MochiReq:get(method),
- NewPath1,
- MochiReq:get(version),
- MochiReq:get(headers)),
- Fun(MochiReq1, VhostTarget)
- end
- end,
+ FinalMochiReq =
+ case
+ try_bind_vhost(
+ VHosts,
+ lists:reverse(VHostParts),
+ VhostPort,
+ VPathParts
+ )
+ of
+ no_vhost_matched ->
+ MochiReq;
+ {VhostTarget, NewPath} ->
+ case vhost_global(VHostGlobals, MochiReq) of
+ true ->
+ MochiReq;
+ _Else ->
+ NewPath1 = mochiweb_util:urlunsplit_path({NewPath, Query, Fragment}),
+ MochiReq1 = mochiweb_request:new(
+ MochiReq:get(socket),
+ MochiReq:get(method),
+ NewPath1,
+ MochiReq:get(version),
+ MochiReq:get(headers)
+ ),
+ Fun(MochiReq1, VhostTarget)
+ end
+ end,
FinalMochiReq.
-append_path("/"=_Target, "/"=_Path) ->
+append_path("/" = _Target, "/" = _Path) ->
"/";
append_path(Target, Path) ->
Target ++ Path.
@@ -148,15 +159,20 @@ redirect_to_vhost(MochiReq, VhostTarget) ->
couch_log:debug("Vhost Target: '~p'~n", [Target]),
- Headers = mochiweb_headers:enter("x-couchdb-vhost-path", Path,
- MochiReq:get(headers)),
+ Headers = mochiweb_headers:enter(
+ "x-couchdb-vhost-path",
+ Path,
+ MochiReq:get(headers)
+ ),
% build a new mochiweb request
- MochiReq1 = mochiweb_request:new(MochiReq:get(socket),
- MochiReq:get(method),
- Target,
- MochiReq:get(version),
- Headers),
+ MochiReq1 = mochiweb_request:new(
+ MochiReq:get(socket),
+ MochiReq:get(method),
+ Target,
+ MochiReq:get(version),
+ Headers
+ ),
% cleanup, It force mochiweb to reparse raw uri.
MochiReq1:cleanup(),
MochiReq1.
@@ -164,23 +180,25 @@ redirect_to_vhost(MochiReq, VhostTarget) ->
%% if so, then it will not be rewritten, but will run as a normal couchdb request.
%* normally you'd use this for _uuids _utils and a few of the others you want to
%% keep available on vhosts. You can also use it to make databases 'global'.
-vhost_global( VhostGlobals, MochiReq) ->
+vhost_global(VhostGlobals, MochiReq) ->
RawUri = MochiReq:get(raw_path),
{"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
- Front = case couch_httpd:partition(Path) of
- {"", "", ""} ->
- "/"; % Special case the root url handler
- {FirstPart, _, _} ->
- FirstPart
- end,
- [true] == [true||V <- VhostGlobals, V == Front].
+ Front =
+ case couch_httpd:partition(Path) of
+ {"", "", ""} ->
+ % Special case the root url handler
+ "/";
+ {FirstPart, _, _} ->
+ FirstPart
+ end,
+ [true] == [true || V <- VhostGlobals, V == Front].
%% bind host
%% first it try to bind the port then the hostname.
try_bind_vhost([], _HostParts, _Port, _PathParts) ->
no_vhost_matched;
-try_bind_vhost([VhostSpec|Rest], HostParts, Port, PathParts) ->
+try_bind_vhost([VhostSpec | Rest], HostParts, Port, PathParts) ->
{{VHostParts, VPort, VPath}, Path} = VhostSpec,
case bind_port(VPort, Port) of
ok ->
@@ -191,12 +209,18 @@ try_bind_vhost([VhostSpec|Rest], HostParts, Port, PathParts) ->
Path1 = make_target(Path, Bindings, Remainings, []),
{make_path(Path1), make_path(PathParts1)};
fail ->
- try_bind_vhost(Rest, HostParts, Port,
- PathParts)
+ try_bind_vhost(
+ Rest,
+ HostParts,
+ Port,
+ PathParts
+ )
end;
- fail -> try_bind_vhost(Rest, HostParts, Port, PathParts)
+ fail ->
+ try_bind_vhost(Rest, HostParts, Port, PathParts)
end;
- fail -> try_bind_vhost(Rest, HostParts, Port, PathParts)
+ fail ->
+ try_bind_vhost(Rest, HostParts, Port, PathParts)
end.
%% doc: build new patch from bindings. bindings are query args
@@ -209,72 +233,82 @@ make_target([], _Bindings, _Remaining, Acc) ->
make_target([?MATCH_ALL], _Bindings, Remaining, Acc) ->
Acc1 = lists:reverse(Acc) ++ Remaining,
Acc1;
-make_target([?MATCH_ALL|_Rest], _Bindings, Remaining, Acc) ->
+make_target([?MATCH_ALL | _Rest], _Bindings, Remaining, Acc) ->
Acc1 = lists:reverse(Acc) ++ Remaining,
Acc1;
-make_target([{bind, P}|Rest], Bindings, Remaining, Acc) ->
- P2 = case couch_util:get_value({bind, P}, Bindings) of
- undefined -> "undefined";
- P1 -> P1
- end,
- make_target(Rest, Bindings, Remaining, [P2|Acc]);
-make_target([P|Rest], Bindings, Remaining, Acc) ->
- make_target(Rest, Bindings, Remaining, [P|Acc]).
+make_target([{bind, P} | Rest], Bindings, Remaining, Acc) ->
+ P2 =
+ case couch_util:get_value({bind, P}, Bindings) of
+ undefined -> "undefined";
+ P1 -> P1
+ end,
+ make_target(Rest, Bindings, Remaining, [P2 | Acc]);
+make_target([P | Rest], Bindings, Remaining, Acc) ->
+ make_target(Rest, Bindings, Remaining, [P | Acc]).
%% bind port
bind_port(Port, Port) -> ok;
bind_port('*', _) -> ok;
-bind_port(_,_) -> fail.
+bind_port(_, _) -> fail.
%% bind bhost
-bind_vhost([],[], Bindings) -> {ok, Bindings, []};
-bind_vhost([?MATCH_ALL], [], _Bindings) -> fail;
-bind_vhost([?MATCH_ALL], Rest, Bindings) -> {ok, Bindings, Rest};
-bind_vhost([], _HostParts, _Bindings) -> fail;
-bind_vhost([{bind, Token}|Rest], [Match|RestHost], Bindings) ->
- bind_vhost(Rest, RestHost, [{{bind, Token}, Match}|Bindings]);
-bind_vhost([Cname|Rest], [Cname|RestHost], Bindings) ->
+bind_vhost([], [], Bindings) ->
+ {ok, Bindings, []};
+bind_vhost([?MATCH_ALL], [], _Bindings) ->
+ fail;
+bind_vhost([?MATCH_ALL], Rest, Bindings) ->
+ {ok, Bindings, Rest};
+bind_vhost([], _HostParts, _Bindings) ->
+ fail;
+bind_vhost([{bind, Token} | Rest], [Match | RestHost], Bindings) ->
+ bind_vhost(Rest, RestHost, [{{bind, Token}, Match} | Bindings]);
+bind_vhost([Cname | Rest], [Cname | RestHost], Bindings) ->
bind_vhost(Rest, RestHost, Bindings);
-bind_vhost(_, _, _) -> fail.
+bind_vhost(_, _, _) ->
+ fail.
%% bind path
bind_path([], PathParts) ->
{ok, PathParts};
bind_path(_VPathParts, []) ->
fail;
-bind_path([Path|VRest],[Path|Rest]) ->
- bind_path(VRest, Rest);
+bind_path([Path | VRest], [Path | Rest]) ->
+ bind_path(VRest, Rest);
bind_path(_, _) ->
fail.
% utilities
-
%% create vhost list from ini
host(MochiReq) ->
XHost = chttpd_util:get_chttpd_config(
- "x_forwarded_host", "X-Forwarded-Host"),
+ "x_forwarded_host", "X-Forwarded-Host"
+ ),
case MochiReq:get_header_value(XHost) of
undefined ->
case MochiReq:get_header_value("Host") of
undefined -> [];
Value1 -> Value1
end;
- Value -> Value
+ Value ->
+ Value
end.
make_vhosts() ->
- Vhosts = lists:foldl(fun
- ({_, ""}, Acc) ->
- Acc;
- ({Vhost, Path}, Acc) ->
- [{parse_vhost(Vhost), split_path(Path)}|Acc]
- end, [], config:get("vhosts")),
+ Vhosts = lists:foldl(
+ fun
+ ({_, ""}, Acc) ->
+ Acc;
+ ({Vhost, Path}, Acc) ->
+ [{parse_vhost(Vhost), split_path(Path)} | Acc]
+ end,
+ [],
+ config:get("vhosts")
+ ),
lists:reverse(lists:usort(Vhosts)).
-
parse_vhost(Vhost) ->
case urlsplit_netloc(Vhost, []) of
{[], Path} ->
@@ -289,15 +323,21 @@ parse_vhost(Vhost) ->
{H1, P, string:tokens(Path, "/")}
end.
-
split_host_port(HostAsString) ->
case string:rchr(HostAsString, $:) of
0 ->
{split_host(HostAsString), '*'};
N ->
- HostPart = string:substr(HostAsString, 1, N-1),
- case (catch erlang:list_to_integer(string:substr(HostAsString,
- N+1, length(HostAsString)))) of
+ HostPart = string:substr(HostAsString, 1, N - 1),
+ case
+ (catch erlang:list_to_integer(
+ string:substr(
+ HostAsString,
+ N + 1,
+ length(HostAsString)
+ )
+ ))
+ of
{'EXIT', _} ->
{split_host(HostAsString), '*'};
Port ->
@@ -311,36 +351,34 @@ split_host(HostAsString) ->
split_path(Path) ->
make_spec(string:tokens(Path, "/"), []).
-
make_spec([], Acc) ->
lists:reverse(Acc);
-make_spec([""|R], Acc) ->
+make_spec(["" | R], Acc) ->
make_spec(R, Acc);
-make_spec(["*"|R], Acc) ->
- make_spec(R, [?MATCH_ALL|Acc]);
-make_spec([P|R], Acc) ->
+make_spec(["*" | R], Acc) ->
+ make_spec(R, [?MATCH_ALL | Acc]);
+make_spec([P | R], Acc) ->
P1 = parse_var(P),
- make_spec(R, [P1|Acc]).
-
+ make_spec(R, [P1 | Acc]).
parse_var(P) ->
case P of
":" ++ Var ->
{bind, Var};
- _ -> P
+ _ ->
+ P
end.
-
% mochiweb doesn't export it.
urlsplit_netloc("", Acc) ->
{lists:reverse(Acc), ""};
-urlsplit_netloc(Rest=[C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
+urlsplit_netloc(Rest = [C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
{lists:reverse(Acc), Rest};
urlsplit_netloc([C | Rest], Acc) ->
urlsplit_netloc(Rest, [C | Acc]).
make_path(Parts) ->
- "/" ++ string:join(Parts,[?SEPARATOR]).
+ "/" ++ string:join(Parts, [?SEPARATOR]).
init(_) ->
ok = config:listen_for_changes(?MODULE, nil),
@@ -348,17 +386,19 @@ init(_) ->
%% load configuration
{VHostGlobals, VHosts, Fun} = load_conf(),
State = #vhosts_state{
- vhost_globals=VHostGlobals,
- vhosts=VHosts,
- vhosts_fun=Fun},
+ vhost_globals = VHostGlobals,
+ vhosts = VHosts,
+ vhosts_fun = Fun
+ },
{ok, State}.
handle_call(reload, _From, _State) ->
{VHostGlobals, VHosts, Fun} = load_conf(),
{reply, ok, #vhosts_state{
- vhost_globals=VHostGlobals,
- vhosts=VHosts,
- vhosts_fun=Fun}};
+ vhost_globals = VHostGlobals,
+ vhosts = VHosts,
+ vhosts_fun = Fun
+ }};
handle_call(get_state, _From, State) ->
{reply, State, State};
handle_call(_Msg, _From, State) ->
@@ -379,7 +419,6 @@ terminate(_Reason, _State) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
handle_config_change("vhosts", _, _, _, _) ->
{ok, ?MODULE:reload()};
handle_config_change(_, _, _, _, _) ->
@@ -392,8 +431,11 @@ handle_config_terminate(_Server, _Reason, _State) ->
load_conf() ->
%% get vhost globals
- VHostGlobals = re:split("_utils, _uuids, _session, _users", "\\s*,\\s*",
- [{return, list}]),
+ VHostGlobals = re:split(
+ "_utils, _uuids, _session, _users",
+ "\\s*,\\s*",
+ [{return, list}]
+ ),
%% build vhosts matching rules
VHosts = make_vhosts(),
diff --git a/src/couch/src/couch_io_logger.erl b/src/couch/src/couch_io_logger.erl
index 188e031cb..f859874b6 100644
--- a/src/couch/src/couch_io_logger.erl
+++ b/src/couch/src/couch_io_logger.erl
@@ -20,7 +20,6 @@
stop_error/1
]).
-
start(undefined) ->
ok;
start(Dir) ->
@@ -42,7 +41,6 @@ start(Dir) ->
ok
end.
-
stop_noerror() ->
case get(logger_path) of
undefined ->
@@ -51,7 +49,6 @@ stop_noerror() ->
close_logs()
end.
-
stop_error(Err) ->
case get(logger_path) of
undefined ->
@@ -61,21 +58,17 @@ stop_error(Err) ->
close_logs()
end.
-
log_output(Data) ->
log(get(logger_out_fd), Data).
-
log_input(Data) ->
log(get(logger_in_fd), Data).
-
unix_time() ->
{Mega, Sec, USec} = os:timestamp(),
UnixTs = (Mega * 1000000 + Sec) * 1000000 + USec,
integer_to_list(UnixTs).
-
log_name() ->
Ts = unix_time(),
Pid0 = erlang:pid_to_list(self()),
@@ -83,12 +76,10 @@ log_name() ->
Pid2 = string:strip(Pid1, right, $>),
lists:flatten(io_lib:format("~s_~s", [Ts, Pid2])).
-
close_logs() ->
file:close(get(logger_out_fd)),
file:close(get(logger_in_fd)).
-
save_error_logs(Path, Err) ->
Otp = erlang:system_info(otp_release),
Msg = io_lib:format("Error: ~p~nNode: ~p~nOTP: ~p~n", [Err, node(), Otp]),
@@ -97,10 +88,9 @@ save_error_logs(Path, Err) ->
OFd = get(logger_in_fd),
file:position(IFd, 0),
file:position(OFd, 0),
- file:copy(IFd, Path ++ ".out.log"),
+ file:copy(IFd, Path ++ ".out.log"),
file:copy(OFd, Path ++ ".in.log").
-
log(undefined, _Data) ->
ok;
log(Fd, Data) ->
diff --git a/src/couch/src/couch_key_tree.erl b/src/couch/src/couch_key_tree.erl
index 94150418e..84c786148 100644
--- a/src/couch/src/couch_key_tree.erl
+++ b/src/couch/src/couch_key_tree.erl
@@ -48,41 +48,43 @@
-module(couch_key_tree).
-export([
-count_leafs/1,
-find_missing/2,
-fold/3,
-get/2,
-get_all_leafs/1,
-get_all_leafs_full/1,
-get_full_key_paths/2,
-get_key_leafs/2,
-map/2,
-map_leafs/2,
-mapfold/3,
-multi_merge/2,
-merge/2,
-remove_leafs/2,
-stem/2
+ count_leafs/1,
+ find_missing/2,
+ fold/3,
+ get/2,
+ get_all_leafs/1,
+ get_all_leafs_full/1,
+ get_full_key_paths/2,
+ get_key_leafs/2,
+ map/2,
+ map_leafs/2,
+ mapfold/3,
+ multi_merge/2,
+ merge/2,
+ remove_leafs/2,
+ stem/2
]).
-include_lib("couch/include/couch_db.hrl").
--type treenode() :: {Key::term(), Value::term(), [Node::treenode()]}.
--type tree() :: {Depth::pos_integer(), [treenode()]}.
+-type treenode() :: {Key :: term(), Value :: term(), [Node :: treenode()]}.
+-type tree() :: {Depth :: pos_integer(), [treenode()]}.
-type revtree() :: [tree()].
-
%% @doc Merge multiple paths into the given tree.
-spec multi_merge(revtree(), tree()) -> revtree().
multi_merge(RevTree, Trees) ->
- lists:foldl(fun(Tree, RevTreeAcc) ->
- {NewRevTree, _} = merge(RevTreeAcc, Tree),
- NewRevTree
- end, RevTree, lists:sort(Trees)).
-
+ lists:foldl(
+ fun(Tree, RevTreeAcc) ->
+ {NewRevTree, _} = merge(RevTreeAcc, Tree),
+ NewRevTree
+ end,
+ RevTree,
+ lists:sort(Trees)
+ ).
%% @doc Merge a path into a tree.
-spec merge(revtree(), tree() | path()) ->
- {revtree(), new_leaf | new_branch | internal_node}.
+ {revtree(), new_leaf | new_branch | internal_node}.
merge(RevTree, Tree) ->
{Merged, Result} = merge_tree(RevTree, Tree, []),
{lists:sort(Merged), Result}.
@@ -92,12 +94,12 @@ merge(RevTree, Tree) ->
%% If it can't find a branch that the new tree merges into, add it as a
%% new branch in the RevTree.
-spec merge_tree(revtree(), tree() | path(), revtree()) ->
- {revtree(), new_leaf | new_branch | internal_node}.
+ {revtree(), new_leaf | new_branch | internal_node}.
merge_tree([], Tree, []) ->
{[Tree], new_leaf};
merge_tree([], Tree, MergeAcc) ->
- {[Tree|MergeAcc], new_branch};
-merge_tree([{Depth, Nodes} | Rest], {IDepth, INodes}=Tree, MergeAcc) ->
+ {[Tree | MergeAcc], new_branch};
+merge_tree([{Depth, Nodes} | Rest], {IDepth, INodes} = Tree, MergeAcc) ->
% For the intrepid observer following along at home, notice what we're
% doing here with (Depth - IDepth). This tells us which of the two
% branches (Nodes or INodes) we need to seek into. If Depth > IDepth
@@ -125,7 +127,7 @@ merge_tree([{Depth, Nodes} | Rest], {IDepth, INodes}=Tree, MergeAcc) ->
%% ends up running out of nodes we know that these two branches can
%% not be merged.
-spec merge_at([node()], integer(), [node()]) ->
- {revtree(), new_leaf | new_branch | internal_node} | fail.
+ {revtree(), new_leaf | new_branch | internal_node} | fail.
merge_at(_Nodes, _Pos, []) ->
fail;
merge_at([], _Pos, _INodes) ->
@@ -172,7 +174,7 @@ merge_at([Tree | Sibs], 0, INodes) ->
end.
-spec merge_extend(revtree(), revtree()) ->
- {revtree(), new_leaf | new_branch | internal_node}.
+ {revtree(), new_leaf | new_branch | internal_node}.
merge_extend([], B) when B =/= [] ->
% Most likely the insert branch simply extends this one, so the new
% branch is exactly B. Its also possible that B is a branch because
@@ -189,7 +191,7 @@ merge_extend([{K, V1, SubA} | NextA], [{K, V2, SubB}]) ->
% level in the two branches.
{Merged, Result} = merge_extend(SubA, SubB),
{[{K, value_pref(V1, V2), Merged} | NextA], Result};
-merge_extend([{K1, _, _}=NodeA | Rest], [{K2, _, _}=NodeB]) when K1 > K2 ->
+merge_extend([{K1, _, _} = NodeA | Rest], [{K2, _, _} = NodeB]) when K1 > K2 ->
% Keys are ordered so we know this is where the insert branch needs
% to be inserted into the tree. We also know that this creates a new
% branch so we have a new leaf to report.
@@ -200,10 +202,11 @@ merge_extend([Tree | RestA], NextB) ->
% key in NextB might be larger than the largest key in RestA which
% means we've created a new branch.
{Merged, Result0} = merge_extend(RestA, NextB),
- Result = case length(Merged) == length(RestA) of
- true -> Result0;
- false -> new_branch
- end,
+ Result =
+ case length(Merged) == length(RestA) of
+ true -> Result0;
+ false -> new_branch
+ end,
{[Tree | Merged], Result}.
find_missing(_Tree, []) ->
@@ -228,17 +231,17 @@ find_missing_simple(Pos, [{Key, _, SubTree} | RestTree], SeachKeys) ->
SrcKeys3 = find_missing_simple(Pos + 1, SubTree, SrcKeys2),
ImpossibleKeys ++ find_missing_simple(Pos, RestTree, SrcKeys3).
-
filter_leafs([], _Keys, FilteredAcc, RemovedKeysAcc) ->
{FilteredAcc, RemovedKeysAcc};
-filter_leafs([{Pos, [{LeafKey, _}|_]} = Path |Rest], Keys, FilteredAcc, RemovedKeysAcc) ->
+filter_leafs([{Pos, [{LeafKey, _} | _]} = Path | Rest], Keys, FilteredAcc, RemovedKeysAcc) ->
FilteredKeys = lists:delete({Pos, LeafKey}, Keys),
- if FilteredKeys == Keys ->
- % this leaf is not a key we are looking to remove
- filter_leafs(Rest, Keys, [Path | FilteredAcc], RemovedKeysAcc);
- true ->
- % this did match a key, remove both the node and the input key
- filter_leafs(Rest, FilteredKeys, FilteredAcc, [{Pos, LeafKey} | RemovedKeysAcc])
+ if
+ FilteredKeys == Keys ->
+ % this leaf is not a key we are looking to remove
+ filter_leafs(Rest, Keys, [Path | FilteredAcc], RemovedKeysAcc);
+ true ->
+ % this did match a key, remove both the node and the input key
+ filter_leafs(Rest, FilteredKeys, FilteredAcc, [{Pos, LeafKey} | RemovedKeysAcc])
end.
% Removes any branches from the tree whose leaf node(s) are in the Keys
@@ -255,15 +258,18 @@ remove_leafs(Trees, Keys) ->
% convert paths back to trees
NewTree = lists:foldl(
- fun({StartPos, Path},TreeAcc) ->
+ fun({StartPos, Path}, TreeAcc) ->
[SingleTree] = lists:foldl(
- fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path),
+ fun({K, V}, NewTreeAcc) -> [{K, V, NewTreeAcc}] end, [], Path
+ ),
{NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}),
NewTrees
- end, [], SortedPaths),
+ end,
+ [],
+ SortedPaths
+ ),
{NewTree, RemovedKeys}.
-
% get the leafs in the tree matching the keys. The matching key nodes can be
% leafs or an inner nodes. If an inner node, then the leafs for that node
% are returned.
@@ -274,7 +280,7 @@ get_key_leafs(_, [], Acc) ->
{Acc, []};
get_key_leafs([], Keys, Acc) ->
{Acc, Keys};
-get_key_leafs([{Pos, Tree}|Rest], Keys, Acc) ->
+get_key_leafs([{Pos, Tree} | Rest], Keys, Acc) ->
{Gotten, RemainingKeys} = get_key_leafs_simple(Pos, [Tree], Keys, []),
get_key_leafs(Rest, RemainingKeys, Gotten ++ Acc).
@@ -282,7 +288,7 @@ get_key_leafs_simple(_Pos, _Tree, [], _PathAcc) ->
{[], []};
get_key_leafs_simple(_Pos, [], Keys, _PathAcc) ->
{[], Keys};
-get_key_leafs_simple(Pos, [{Key, _, SubTree}=Tree | RestTree], Keys, PathAcc) ->
+get_key_leafs_simple(Pos, [{Key, _, SubTree} = Tree | RestTree], Keys, PathAcc) ->
case lists:delete({Pos, Key}, Keys) of
Keys ->
% Same list, key not found
@@ -300,7 +306,6 @@ get_key_leafs_simple(Pos, [{Key, _, SubTree}=Tree | RestTree], Keys, PathAcc) ->
{ChildLeafs ++ SiblingLeafs, Keys4}
end.
-
get_key_leafs_simple2(_Pos, [], Keys, _PathAcc) ->
% No more tree to deal with so no more keys to return.
{[], Keys};
@@ -320,10 +325,12 @@ get_key_leafs_simple2(Pos, [{Key, _Value, SubTree} | RestTree], Keys, PathAcc) -
{SiblingLeafs, Keys4} = get_key_leafs_simple2(Pos, RestTree, Keys3, PathAcc),
{ChildLeafs ++ SiblingLeafs, Keys4}.
-
get(Tree, KeysToGet) ->
{KeyPaths, KeysNotFound} = get_full_key_paths(Tree, KeysToGet),
- FixedResults = [ {Value, {Pos, [Key0 || {Key0, _} <- Path]}} || {Pos, [{_Key, Value}|_]=Path} <- KeyPaths],
+ FixedResults = [
+ {Value, {Pos, [Key0 || {Key0, _} <- Path]}}
+ || {Pos, [{_Key, Value} | _] = Path} <- KeyPaths
+ ],
{FixedResults, KeysNotFound}.
get_full_key_paths(Tree, Keys) ->
@@ -333,11 +340,10 @@ get_full_key_paths(_, [], Acc) ->
{Acc, []};
get_full_key_paths([], Keys, Acc) ->
{Acc, Keys};
-get_full_key_paths([{Pos, Tree}|Rest], Keys, Acc) ->
+get_full_key_paths([{Pos, Tree} | Rest], Keys, Acc) ->
{Gotten, RemainingKeys} = get_full_key_paths(Pos, [Tree], Keys, []),
get_full_key_paths(Rest, RemainingKeys, Gotten ++ Acc).
-
get_full_key_paths(_Pos, _Tree, [], _KeyPathAcc) ->
{[], []};
get_full_key_paths(_Pos, [], KeysToGet, _KeyPathAcc) ->
@@ -345,13 +351,17 @@ get_full_key_paths(_Pos, [], KeysToGet, _KeyPathAcc) ->
get_full_key_paths(Pos, [{KeyId, Value, SubTree} | RestTree], KeysToGet, KeyPathAcc) ->
KeysToGet2 = KeysToGet -- [{Pos, KeyId}],
CurrentNodeResult =
- case length(KeysToGet2) =:= length(KeysToGet) of
- true -> % not in the key list.
- [];
- false -> % this node is the key list. return it
- [{Pos, [{KeyId, Value} | KeyPathAcc]}]
- end,
- {KeysGotten, KeysRemaining} = get_full_key_paths(Pos + 1, SubTree, KeysToGet2, [{KeyId, Value} | KeyPathAcc]),
+ case length(KeysToGet2) =:= length(KeysToGet) of
+ % not in the key list.
+ true ->
+ [];
+ % this node is the key list. return it
+ false ->
+ [{Pos, [{KeyId, Value} | KeyPathAcc]}]
+ end,
+ {KeysGotten, KeysRemaining} = get_full_key_paths(Pos + 1, SubTree, KeysToGet2, [
+ {KeyId, Value} | KeyPathAcc
+ ]),
{KeysGotten2, KeysRemaining2} = get_full_key_paths(Pos, RestTree, KeysRemaining, KeyPathAcc),
{CurrentNodeResult ++ KeysGotten ++ KeysGotten2, KeysRemaining2}.
@@ -368,14 +378,15 @@ get_all_leafs_full_simple(_Pos, [], _KeyPathAcc) ->
get_all_leafs_full_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
[{Pos, [{KeyId, Value} | KeyPathAcc]} | get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc)];
get_all_leafs_full_simple(Pos, [{KeyId, Value, SubTree} | RestTree], KeyPathAcc) ->
- get_all_leafs_full_simple(Pos + 1, SubTree, [{KeyId, Value} | KeyPathAcc]) ++ get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc).
+ get_all_leafs_full_simple(Pos + 1, SubTree, [{KeyId, Value} | KeyPathAcc]) ++
+ get_all_leafs_full_simple(Pos, RestTree, KeyPathAcc).
get_all_leafs(Trees) ->
get_all_leafs(Trees, []).
get_all_leafs([], Acc) ->
Acc;
-get_all_leafs([{Pos, Tree}|Rest], Acc) ->
+get_all_leafs([{Pos, Tree} | Rest], Acc) ->
get_all_leafs(Rest, get_all_leafs_simple(Pos, [Tree], []) ++ Acc).
get_all_leafs_simple(_Pos, [], _KeyPathAcc) ->
@@ -383,12 +394,12 @@ get_all_leafs_simple(_Pos, [], _KeyPathAcc) ->
get_all_leafs_simple(Pos, [{KeyId, Value, []} | RestTree], KeyPathAcc) ->
[{Value, {Pos, [KeyId | KeyPathAcc]}} | get_all_leafs_simple(Pos, RestTree, KeyPathAcc)];
get_all_leafs_simple(Pos, [{KeyId, _Value, SubTree} | RestTree], KeyPathAcc) ->
- get_all_leafs_simple(Pos + 1, SubTree, [KeyId | KeyPathAcc]) ++ get_all_leafs_simple(Pos, RestTree, KeyPathAcc).
-
+ get_all_leafs_simple(Pos + 1, SubTree, [KeyId | KeyPathAcc]) ++
+ get_all_leafs_simple(Pos, RestTree, KeyPathAcc).
count_leafs([]) ->
0;
-count_leafs([{_Pos,Tree}|Rest]) ->
+count_leafs([{_Pos, Tree} | Rest]) ->
count_leafs_simple([Tree]) + count_leafs(Rest).
count_leafs_simple([]) ->
@@ -398,42 +409,49 @@ count_leafs_simple([{_Key, _Value, []} | RestTree]) ->
count_leafs_simple([{_Key, _Value, SubTree} | RestTree]) ->
count_leafs_simple(SubTree) + count_leafs_simple(RestTree).
-
fold(_Fun, Acc, []) ->
Acc;
-fold(Fun, Acc0, [{Pos, Tree}|Rest]) ->
+fold(Fun, Acc0, [{Pos, Tree} | Rest]) ->
Acc1 = fold_simple(Fun, Acc0, Pos, [Tree]),
fold(Fun, Acc1, Rest).
fold_simple(_Fun, Acc, _Pos, []) ->
Acc;
fold_simple(Fun, Acc0, Pos, [{Key, Value, SubTree} | RestTree]) ->
- Type = if SubTree == [] -> leaf; true -> branch end,
+ Type =
+ if
+ SubTree == [] -> leaf;
+ true -> branch
+ end,
Acc1 = Fun({Pos, Key}, Value, Type, Acc0),
- Acc2 = fold_simple(Fun, Acc1, Pos+1, SubTree),
+ Acc2 = fold_simple(Fun, Acc1, Pos + 1, SubTree),
fold_simple(Fun, Acc2, Pos, RestTree).
-
map(_Fun, []) ->
[];
-map(Fun, [{Pos, Tree}|Rest]) ->
+map(Fun, [{Pos, Tree} | Rest]) ->
case erlang:fun_info(Fun, arity) of
- {arity, 2} ->
- [NewTree] = map_simple(fun(A,B,_C) -> Fun(A,B) end, Pos, [Tree]),
- [{Pos, NewTree} | map(Fun, Rest)];
- {arity, 3} ->
- [NewTree] = map_simple(Fun, Pos, [Tree]),
- [{Pos, NewTree} | map(Fun, Rest)]
+ {arity, 2} ->
+ [NewTree] = map_simple(fun(A, B, _C) -> Fun(A, B) end, Pos, [Tree]),
+ [{Pos, NewTree} | map(Fun, Rest)];
+ {arity, 3} ->
+ [NewTree] = map_simple(Fun, Pos, [Tree]),
+ [{Pos, NewTree} | map(Fun, Rest)]
end.
map_simple(_Fun, _Pos, []) ->
[];
map_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
- Value2 = Fun({Pos, Key}, Value,
- if SubTree == [] -> leaf; true -> branch end),
+ Value2 = Fun(
+ {Pos, Key},
+ Value,
+ if
+ SubTree == [] -> leaf;
+ true -> branch
+ end
+ ),
[{Key, Value2, map_simple(Fun, Pos + 1, SubTree)} | map_simple(Fun, Pos, RestTree)].
-
mapfold(_Fun, Acc, []) ->
{[], Acc};
mapfold(Fun, Acc, [{Pos, Tree} | Rest]) ->
@@ -444,16 +462,22 @@ mapfold(Fun, Acc, [{Pos, Tree} | Rest]) ->
mapfold_simple(_Fun, Acc, _Pos, []) ->
{[], Acc};
mapfold_simple(Fun, Acc, Pos, [{Key, Value, SubTree} | RestTree]) ->
- {Value2, Acc2} = Fun({Pos, Key}, Value,
- if SubTree == [] -> leaf; true -> branch end, Acc),
+ {Value2, Acc2} = Fun(
+ {Pos, Key},
+ Value,
+ if
+ SubTree == [] -> leaf;
+ true -> branch
+ end,
+ Acc
+ ),
{SubTree2, Acc3} = mapfold_simple(Fun, Acc2, Pos + 1, SubTree),
{RestTree2, Acc4} = mapfold_simple(Fun, Acc3, Pos, RestTree),
{[{Key, Value2, SubTree2} | RestTree2], Acc4}.
-
map_leafs(_Fun, []) ->
[];
-map_leafs(Fun, [{Pos, Tree}|Rest]) ->
+map_leafs(Fun, [{Pos, Tree} | Rest]) ->
[NewTree] = map_leafs_simple(Fun, Pos, [Tree]),
[{Pos, NewTree} | map_leafs(Fun, Rest)].
@@ -465,19 +489,22 @@ map_leafs_simple(Fun, Pos, [{Key, Value, []} | RestTree]) ->
map_leafs_simple(Fun, Pos, [{Key, Value, SubTree} | RestTree]) ->
[{Key, Value, map_leafs_simple(Fun, Pos + 1, SubTree)} | map_leafs_simple(Fun, Pos, RestTree)].
-
stem(Trees, Limit) ->
try
- {_, Branches} = lists:foldl(fun(Tree, {Seen, TreeAcc}) ->
- {NewSeen, NewBranches} = stem_tree(Tree, Limit, Seen),
- {NewSeen, NewBranches ++ TreeAcc}
- end, {sets:new(), []}, Trees),
+ {_, Branches} = lists:foldl(
+ fun(Tree, {Seen, TreeAcc}) ->
+ {NewSeen, NewBranches} = stem_tree(Tree, Limit, Seen),
+ {NewSeen, NewBranches ++ TreeAcc}
+ end,
+ {sets:new(), []},
+ Trees
+ ),
lists:sort(Branches)
- catch throw:dupe_keys ->
- repair_tree(Trees, Limit)
+ catch
+ throw:dupe_keys ->
+ repair_tree(Trees, Limit)
end.
-
stem_tree({Depth, Child}, Limit, Seen) ->
case stem_tree(Depth, Child, Limit, Seen) of
{NewSeen, _, NewChild, NewBranches} ->
@@ -486,41 +513,45 @@ stem_tree({Depth, Child}, Limit, Seen) ->
{NewSeen, NewBranches}
end.
-
stem_tree(_Depth, {Key, _Val, []} = Leaf, Limit, Seen) ->
{check_key(Key, Seen), Limit - 1, Leaf, []};
-
stem_tree(Depth, {Key, Val, Children}, Limit, Seen0) ->
Seen1 = check_key(Key, Seen0),
- FinalAcc = lists:foldl(fun(Child, Acc) ->
- {SeenAcc, LimitPosAcc, ChildAcc, BranchAcc} = Acc,
- case stem_tree(Depth + 1, Child, Limit, SeenAcc) of
- {NewSeenAcc, LimitPos, NewChild, NewBranches} ->
- NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc),
- NewChildAcc = [NewChild | ChildAcc],
- NewBranchAcc = NewBranches ++ BranchAcc,
- {NewSeenAcc, NewLimitPosAcc, NewChildAcc, NewBranchAcc};
- {NewSeenAcc, LimitPos, NewBranches} ->
- NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc),
- NewBranchAcc = NewBranches ++ BranchAcc,
- {NewSeenAcc, NewLimitPosAcc, ChildAcc, NewBranchAcc}
- end
- end, {Seen1, -1, [], []}, Children),
+ FinalAcc = lists:foldl(
+ fun(Child, Acc) ->
+ {SeenAcc, LimitPosAcc, ChildAcc, BranchAcc} = Acc,
+ case stem_tree(Depth + 1, Child, Limit, SeenAcc) of
+ {NewSeenAcc, LimitPos, NewChild, NewBranches} ->
+ NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc),
+ NewChildAcc = [NewChild | ChildAcc],
+ NewBranchAcc = NewBranches ++ BranchAcc,
+ {NewSeenAcc, NewLimitPosAcc, NewChildAcc, NewBranchAcc};
+ {NewSeenAcc, LimitPos, NewBranches} ->
+ NewLimitPosAcc = erlang:max(LimitPos, LimitPosAcc),
+ NewBranchAcc = NewBranches ++ BranchAcc,
+ {NewSeenAcc, NewLimitPosAcc, ChildAcc, NewBranchAcc}
+ end
+ end,
+ {Seen1, -1, [], []},
+ Children
+ ),
{FinalSeen, FinalLimitPos, FinalChildren, FinalBranches} = FinalAcc,
case FinalLimitPos of
N when N > 0, length(FinalChildren) > 0 ->
FinalNode = {Key, Val, lists:reverse(FinalChildren)},
{FinalSeen, FinalLimitPos - 1, FinalNode, FinalBranches};
0 when length(FinalChildren) > 0 ->
- NewBranches = lists:map(fun(Child) ->
- {Depth + 1, Child}
- end, lists:reverse(FinalChildren)),
+ NewBranches = lists:map(
+ fun(Child) ->
+ {Depth + 1, Child}
+ end,
+ lists:reverse(FinalChildren)
+ ),
{FinalSeen, -1, NewBranches ++ FinalBranches};
N when N < 0, length(FinalChildren) == 0 ->
{FinalSeen, FinalLimitPos - 1, FinalBranches}
end.
-
check_key(Key, Seen) ->
case sets:is_element(Key, Seen) of
true ->
@@ -529,29 +560,40 @@ check_key(Key, Seen) ->
sets:add_element(Key, Seen)
end.
-
repair_tree(Trees, Limit) ->
% flatten each branch in a tree into a tree path, sort by starting rev #
- Paths = lists:sort(lists:map(fun({Pos, Path}) ->
- StemmedPath = lists:sublist(Path, Limit),
- {Pos + 1 - length(StemmedPath), StemmedPath}
- end, get_all_leafs_full(Trees))),
+ Paths = lists:sort(
+ lists:map(
+ fun({Pos, Path}) ->
+ StemmedPath = lists:sublist(Path, Limit),
+ {Pos + 1 - length(StemmedPath), StemmedPath}
+ end,
+ get_all_leafs_full(Trees)
+ )
+ ),
% convert paths back to trees
lists:foldl(
- fun({StartPos, Path},TreeAcc) ->
+ fun({StartPos, Path}, TreeAcc) ->
[SingleTree] = lists:foldl(
- fun({K,V},NewTreeAcc) -> [{K,V,NewTreeAcc}] end, [], Path),
+ fun({K, V}, NewTreeAcc) -> [{K, V, NewTreeAcc}] end, [], Path
+ ),
{NewTrees, _} = merge(TreeAcc, {StartPos, SingleTree}),
NewTrees
- end, [], Paths).
-
-
-value_pref(Tuple, _) when is_tuple(Tuple),
- (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) ->
+ end,
+ [],
+ Paths
+ ).
+
+value_pref(Tuple, _) when
+ is_tuple(Tuple),
+ (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4)
+->
Tuple;
-value_pref(_, Tuple) when is_tuple(Tuple),
- (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4) ->
+value_pref(_, Tuple) when
+ is_tuple(Tuple),
+ (tuple_size(Tuple) == 3 orelse tuple_size(Tuple) == 4)
+->
Tuple;
value_pref(?REV_MISSING, Other) ->
Other;
diff --git a/src/couch/src/couch_lru.erl b/src/couch/src/couch_lru.erl
index 618a0144f..1fad20280 100644
--- a/src/couch/src/couch_lru.erl
+++ b/src/couch/src/couch_lru.erl
@@ -24,14 +24,14 @@ insert(DbName, {Tree0, Dict0}) ->
update(DbName, {Tree0, Dict0}) ->
case dict:find(DbName, Dict0) of
- {ok, Old} ->
- New = couch_util:unique_monotonic_integer(),
- Tree = gb_trees:insert(New, DbName, gb_trees:delete(Old, Tree0)),
- Dict = dict:store(DbName, New, Dict0),
- {Tree, Dict};
- error ->
- % We closed this database before processing the update. Ignore
- {Tree0, Dict0}
+ {ok, Old} ->
+ New = couch_util:unique_monotonic_integer(),
+ Tree = gb_trees:insert(New, DbName, gb_trees:delete(Old, Tree0)),
+ Dict = dict:store(DbName, New, Dict0),
+ {Tree, Dict};
+ error ->
+ % We closed this database before processing the update. Ignore
+ {Tree0, Dict0}
end.
%% Attempt to close the oldest idle database.
@@ -47,21 +47,22 @@ close_int({Lru, DbName, Iter}, {Tree, Dict} = Cache) ->
CouchDbsPidToName = couch_server:couch_dbs_pid_to_name(DbName),
case ets:update_element(CouchDbs, DbName, {#entry.lock, locked}) of
- true ->
- [#entry{db = Db, pid = Pid}] = ets:lookup(CouchDbs, DbName),
- case couch_db:is_idle(Db) of true ->
- true = ets:delete(CouchDbs, DbName),
- true = ets:delete(CouchDbsPidToName, Pid),
- exit(Pid, kill),
- {true, {gb_trees:delete(Lru, Tree), dict:erase(DbName, Dict)}};
+ true ->
+ [#entry{db = Db, pid = Pid}] = ets:lookup(CouchDbs, DbName),
+ case couch_db:is_idle(Db) of
+ true ->
+ true = ets:delete(CouchDbs, DbName),
+ true = ets:delete(CouchDbsPidToName, Pid),
+ exit(Pid, kill),
+ {true, {gb_trees:delete(Lru, Tree), dict:erase(DbName, Dict)}};
+ false ->
+ ElemSpec = {#entry.lock, unlocked},
+ true = ets:update_element(CouchDbs, DbName, ElemSpec),
+ couch_stats:increment_counter([couchdb, couch_server, lru_skip]),
+ close_int(gb_trees:next(Iter), update(DbName, Cache))
+ end;
false ->
- ElemSpec = {#entry.lock, unlocked},
- true = ets:update_element(CouchDbs, DbName, ElemSpec),
- couch_stats:increment_counter([couchdb, couch_server, lru_skip]),
- close_int(gb_trees:next(Iter), update(DbName, Cache))
- end;
- false ->
- NewTree = gb_trees:delete(Lru, Tree),
- NewIter = gb_trees:iterator(NewTree),
- close_int(gb_trees:next(NewIter), {NewTree, dict:erase(DbName, Dict)})
-end.
+ NewTree = gb_trees:delete(Lru, Tree),
+ NewIter = gb_trees:iterator(NewTree),
+ close_int(gb_trees:next(NewIter), {NewTree, dict:erase(DbName, Dict)})
+ end.
diff --git a/src/couch/src/couch_multidb_changes.erl b/src/couch/src/couch_multidb_changes.erl
index e2bbda3e3..adb1b740f 100644
--- a/src/couch/src/couch_multidb_changes.erl
+++ b/src/couch/src/couch_multidb_changes.erl
@@ -15,27 +15,27 @@
-behaviour(gen_server).
-export([
- start_link/4
+ start_link/4
]).
-export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_info/2,
+ handle_cast/2,
+ code_change/3
]).
-export([
- changes_reader/3,
- changes_reader_cb/3
+ changes_reader/3,
+ changes_reader_cb/3
]).
-include_lib("couch/include/couch_db.hrl").
-include_lib("mem3/include/mem3.hrl").
--define(CTX, {user_ctx, #user_ctx{roles=[<<"_admin">>, <<"_replicator">>]}}).
+-define(CTX, {user_ctx, #user_ctx{roles = [<<"_admin">>, <<"_replicator">>]}}).
-define(AVG_DELAY_MSEC, 10).
-define(MAX_DELAY_MSEC, 120000).
@@ -68,20 +68,18 @@
-callback db_change(DbName :: binary(), Change :: term(), Context :: term()) ->
Context :: term().
-
% External API
-
% Opts list can contain:
% - `skip_ddocs` : Skip design docs
-spec start_link(binary(), module(), term(), list()) ->
{ok, pid()} | ignore | {error, term()}.
start_link(DbSuffix, Module, Context, Opts) when
- is_binary(DbSuffix), is_atom(Module), is_list(Opts) ->
+ is_binary(DbSuffix), is_atom(Module), is_list(Opts)
+->
gen_server:start_link(?MODULE, [DbSuffix, Module, Context, Opts], []).
-
% gen_server callbacks
init([DbSuffix, Module, Context, Opts]) ->
@@ -98,21 +96,21 @@ init([DbSuffix, Module, Context, Opts]) ->
skip_ddocs = proplists:is_defined(skip_ddocs, Opts)
}}.
-
terminate(_Reason, _State) ->
ok.
-
-handle_call({change, DbName, Change}, _From,
- #state{skip_ddocs=SkipDDocs, mod=Mod, ctx=Ctx} = State) ->
+handle_call(
+ {change, DbName, Change},
+ _From,
+ #state{skip_ddocs = SkipDDocs, mod = Mod, ctx = Ctx} = State
+) ->
case {SkipDDocs, is_design_doc(Change)} of
{true, true} ->
{reply, ok, State};
{_, _} ->
- {reply, ok, State#state{ctx=Mod:db_change(DbName, Change, Ctx)}}
+ {reply, ok, State#state{ctx = Mod:db_change(DbName, Change, Ctx)}}
end;
-
-handle_call({checkpoint, DbName, EndSeq}, _From, #state{tid=Ets} = State) ->
+handle_call({checkpoint, DbName, EndSeq}, _From, #state{tid = Ets} = State) ->
case ets:lookup(Ets, DbName) of
[] ->
true = ets:insert(Ets, {DbName, EndSeq, false});
@@ -121,11 +119,9 @@ handle_call({checkpoint, DbName, EndSeq}, _From, #state{tid=Ets} = State) ->
end,
{reply, ok, State}.
-
handle_cast({resume_scan, DbName}, State) ->
{noreply, resume_scan(DbName, State)}.
-
handle_info({'$couch_event', DbName, Event}, #state{suffix = Suf} = State) ->
case Suf =:= couch_db:dbname_suffix(DbName) of
true ->
@@ -133,23 +129,22 @@ handle_info({'$couch_event', DbName, Event}, #state{suffix = Suf} = State) ->
_ ->
{noreply, State}
end;
-
handle_info({'DOWN', Ref, _, _, Info}, #state{event_server = Ref} = State) ->
{stop, {couch_event_server_died, Info}, State};
-
handle_info({'EXIT', From, normal}, #state{scanner = From} = State) ->
- {noreply, State#state{scanner=nil}};
-
+ {noreply, State#state{scanner = nil}};
handle_info({'EXIT', From, Reason}, #state{scanner = From} = State) ->
{stop, {scanner_died, Reason}, State};
-
handle_info({'EXIT', From, Reason}, #state{pids = Pids} = State) ->
couch_log:debug("~p change feed exited ~p", [State#state.suffix, From]),
case lists:keytake(From, 2, Pids) of
{value, {DbName, From}, NewPids} ->
- if Reason == normal -> ok; true ->
- Fmt = "~s : Known change feed ~w died :: ~w",
- couch_log:error(Fmt, [?MODULE, From, Reason])
+ if
+ Reason == normal ->
+ ok;
+ true ->
+ Fmt = "~s : Known change feed ~w died :: ~w",
+ couch_log:error(Fmt, [?MODULE, From, Reason])
end,
NewState = State#state{pids = NewPids},
case ets:lookup(State#state.tid, DbName) of
@@ -165,15 +160,12 @@ handle_info({'EXIT', From, Reason}, #state{pids = Pids} = State) ->
couch_log:error(Fmt, [?MODULE, State#state.suffix, From, Reason]),
{stop, {unexpected_exit, From, Reason}, State}
end;
-
handle_info(_Msg, State) ->
{noreply, State}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
% Private functions
-spec register_with_event_server(pid()) -> reference().
@@ -182,7 +174,6 @@ register_with_event_server(Server) ->
couch_event:register_all(Server),
Ref.
-
-spec db_callback(created | deleted | updated, binary(), #state{}) -> #state{}.
db_callback(created, DbName, #state{mod = Mod, ctx = Ctx} = State) ->
NewState = State#state{ctx = Mod:db_created(DbName, Ctx)},
@@ -194,9 +185,8 @@ db_callback(updated, DbName, State) ->
db_callback(_Other, _DbName, State) ->
State.
-
-spec resume_scan(binary(), #state{}) -> #state{}.
-resume_scan(DbName, #state{pids=Pids, tid=Ets} = State) ->
+resume_scan(DbName, #state{pids = Pids, tid = Ets} = State) ->
case {lists:keyfind(DbName, 1, Pids), ets:lookup(Ets, DbName)} of
{{DbName, _}, []} ->
% Found existing change feed, but not entry in ETS
@@ -217,20 +207,18 @@ resume_scan(DbName, #state{pids=Pids, tid=Ets} = State) ->
Mod = State#state.mod,
Ctx = Mod:db_found(DbName, State#state.ctx),
Pid = start_changes_reader(DbName, 0),
- State#state{ctx=Ctx, pids=[{DbName, Pid} | Pids]};
+ State#state{ctx = Ctx, pids = [{DbName, Pid} | Pids]};
{false, [{DbName, EndSeq, _}]} ->
% No existing change feed running. Found existing checkpoint.
% Start a new change reader from last checkpoint.
true = ets:insert(Ets, {DbName, EndSeq, false}),
Pid = start_changes_reader(DbName, EndSeq),
- State#state{pids=[{DbName, Pid} | Pids]}
- end.
-
+ State#state{pids = [{DbName, Pid} | Pids]}
+ end.
start_changes_reader(DbName, Since) ->
spawn_link(?MODULE, changes_reader, [self(), DbName, Since]).
-
changes_reader(Server, DbName, Since) ->
{ok, Db} = couch_db:open_int(DbName, [?CTX, sys_db]),
ChangesArgs = #changes_args{
@@ -242,7 +230,6 @@ changes_reader(Server, DbName, Since) ->
ChFun = couch_changes:handle_db_changes(ChangesArgs, {json_req, null}, Db),
ChFun({fun ?MODULE:changes_reader_cb/3, {Server, DbName}}).
-
changes_reader_cb({change, Change, _}, _, {Server, DbName}) ->
ok = gen_server:call(Server, {change, DbName, Change}, infinity),
{Server, DbName};
@@ -252,34 +239,35 @@ changes_reader_cb({stop, EndSeq}, _, {Server, DbName}) ->
changes_reader_cb(_, _, Acc) ->
Acc.
-
scan_all_dbs(Server, DbSuffix) when is_pid(Server) ->
ok = scan_local_db(Server, DbSuffix),
{ok, Db} = mem3_util:ensure_exists(
- config:get("mem3", "shards_db", "_dbs")),
+ config:get("mem3", "shards_db", "_dbs")
+ ),
ChangesFun = couch_changes:handle_db_changes(#changes_args{}, nil, Db),
ChangesFun({fun scan_changes_cb/3, {Server, DbSuffix, 1}}),
couch_db:close(Db).
-
scan_changes_cb({change, {Change}, _}, _, {_Server, DbSuffix, _Count} = Acc) ->
DbName = couch_util:get_value(<<"id">>, Change),
- case DbName of <<"_design/", _/binary>> -> Acc; _Else ->
- NameMatch = DbSuffix =:= couch_db:dbname_suffix(DbName),
- case {NameMatch, couch_replicator_utils:is_deleted(Change)} of
- {false, _} ->
- Acc;
- {true, true} ->
- Acc;
- {true, false} ->
- Shards = local_shards(DbName),
- lists:foldl(fun notify_fold/2, Acc, Shards)
- end
+ case DbName of
+ <<"_design/", _/binary>> ->
+ Acc;
+ _Else ->
+ NameMatch = DbSuffix =:= couch_db:dbname_suffix(DbName),
+ case {NameMatch, couch_replicator_utils:is_deleted(Change)} of
+ {false, _} ->
+ Acc;
+ {true, true} ->
+ Acc;
+ {true, false} ->
+ Shards = local_shards(DbName),
+ lists:foldl(fun notify_fold/2, Acc, Shards)
+ end
end;
scan_changes_cb(_, _, Acc) ->
Acc.
-
local_shards(DbName) ->
try
[ShardName || #shard{name = ShardName} <- mem3:local_shards(DbName)]
@@ -288,7 +276,6 @@ local_shards(DbName) ->
[]
end.
-
notify_fold(DbName, {Server, DbSuffix, Count}) ->
Jitter = jitter(Count),
spawn_link(fun() ->
@@ -297,7 +284,6 @@ notify_fold(DbName, {Server, DbSuffix, Count}) ->
end),
{Server, DbSuffix, Count + 1}.
-
% Jitter is proportional to the number of shards found so far. This is done to
% avoid a stampede and notifying the callback function with potentially a large
% number of shards back to back during startup.
@@ -305,7 +291,6 @@ jitter(N) ->
Range = min(2 * N * ?AVG_DELAY_MSEC, ?MAX_DELAY_MSEC),
couch_rand:uniform(Range).
-
scan_local_db(Server, DbSuffix) when is_pid(Server) ->
case couch_db:open_int(DbSuffix, [?CTX, sys_db, nologifmissing]) of
{ok, Db} ->
@@ -315,7 +300,6 @@ scan_local_db(Server, DbSuffix) when is_pid(Server) ->
ok
end.
-
is_design_doc({Change}) ->
case lists:keyfind(<<"id">>, 1, Change) of
false ->
@@ -324,13 +308,11 @@ is_design_doc({Change}) ->
is_design_doc_id(Id)
end.
-
is_design_doc_id(<<?DESIGN_DOC_PREFIX, _/binary>>) ->
true;
is_design_doc_id(_) ->
false.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -380,7 +362,6 @@ couch_multidb_changes_test_() ->
}
}.
-
setup_all() ->
mock_logs(),
mock_callback_mod(),
@@ -389,25 +370,31 @@ setup_all() ->
meck:expect(mem3_util, ensure_exists, 1, {ok, dbs}),
ChangesFun = meck:val(fun(_) -> ok end),
meck:expect(couch_changes, handle_db_changes, 3, ChangesFun),
- meck:expect(couch_db, open_int,
- fun(?DBNAME, [?CTX, sys_db]) -> {ok, db};
+ meck:expect(
+ couch_db,
+ open_int,
+ fun
+ (?DBNAME, [?CTX, sys_db]) -> {ok, db};
(_, _) -> {not_found, no_db_file}
- end),
+ end
+ ),
meck:expect(couch_db, close, 1, ok),
mock_changes_reader(),
% create process to stand in for couch_event_server
% mocking erlang:monitor doesn't work, so give it real process to monitor
- EvtPid = spawn_link(fun() -> receive looper -> ok end end),
+ EvtPid = spawn_link(fun() ->
+ receive
+ looper -> ok
+ end
+ end),
true = register(couch_event_server, EvtPid),
EvtPid.
-
teardown_all(EvtPid) ->
unlink(EvtPid),
exit(EvtPid, kill),
meck:unload().
-
setup() ->
meck:reset([
?MOD,
@@ -417,11 +404,9 @@ setup() ->
couch_log
]).
-
teardown(_) ->
ok.
-
t_handle_call_change() ->
?_test(begin
State = mock_state(),
@@ -431,7 +416,6 @@ t_handle_call_change() ->
?assert(meck:called(?MOD, db_change, [?DBNAME, Change, zig]))
end).
-
t_handle_call_change_filter_design_docs() ->
?_test(begin
State0 = mock_state(),
@@ -442,7 +426,6 @@ t_handle_call_change_filter_design_docs() ->
?assertNot(meck:called(?MOD, db_change, [?DBNAME, Change, zig]))
end).
-
t_handle_call_checkpoint_new() ->
?_test(begin
Tid = mock_ets(),
@@ -452,7 +435,6 @@ t_handle_call_checkpoint_new() ->
ets:delete(Tid)
end).
-
t_handle_call_checkpoint_existing() ->
?_test(begin
Tid = mock_ets(),
@@ -463,7 +445,6 @@ t_handle_call_checkpoint_existing() ->
ets:delete(Tid)
end).
-
t_handle_info_created() ->
?_test(begin
Tid = mock_ets(),
@@ -473,18 +454,16 @@ t_handle_info_created() ->
?assert(meck:called(?MOD, db_created, [?DBNAME, zig]))
end).
-
t_handle_info_deleted() ->
- ?_test(begin
+ ?_test(begin
State = mock_state(),
handle_info_check({'$couch_event', ?DBNAME, deleted}, State),
?assert(meck:validate(?MOD)),
?assert(meck:called(?MOD, db_deleted, [?DBNAME, zig]))
end).
-
t_handle_info_updated() ->
- ?_test(begin
+ ?_test(begin
Tid = mock_ets(),
State = mock_state(Tid),
handle_info_check({'$couch_event', ?DBNAME, updated}, State),
@@ -492,9 +471,8 @@ t_handle_info_updated() ->
?assert(meck:called(?MOD, db_found, [?DBNAME, zig]))
end).
-
t_handle_info_other_event() ->
- ?_test(begin
+ ?_test(begin
State = mock_state(),
handle_info_check({'$couch_event', ?DBNAME, somethingelse}, State),
?assertNot(meck:called(?MOD, db_created, [?DBNAME, somethingelse])),
@@ -502,15 +480,13 @@ t_handle_info_other_event() ->
?assertNot(meck:called(?MOD, db_found, [?DBNAME, somethingelse]))
end).
-
t_handle_info_created_other_db() ->
- ?_test(begin
+ ?_test(begin
State = mock_state(),
handle_info_check({'$couch_event', <<"otherdb">>, created}, State),
?assertNot(meck:called(?MOD, db_created, [?DBNAME, zig]))
end).
-
t_handle_info_scanner_exit_normal() ->
?_test(begin
Res = handle_info({'EXIT', spid, normal}, mock_state()),
@@ -519,32 +495,28 @@ t_handle_info_scanner_exit_normal() ->
?assertEqual(nil, RState#state.scanner)
end).
-
t_handle_info_scanner_crashed() ->
?_test(begin
Res = handle_info({'EXIT', spid, oops}, mock_state()),
?assertMatch({stop, {scanner_died, oops}, _State}, Res)
end).
-
t_handle_info_event_server_exited() ->
?_test(begin
Res = handle_info({'DOWN', esref, type, espid, reason}, mock_state()),
?assertMatch({stop, {couch_event_server_died, reason}, _}, Res)
end).
-
t_handle_info_unknown_pid_exited() ->
?_test(begin
State0 = mock_state(),
- Res0 = handle_info({'EXIT', somepid, normal}, State0),
+ Res0 = handle_info({'EXIT', somepid, normal}, State0),
?assertMatch({noreply, State0}, Res0),
State1 = mock_state(),
Res1 = handle_info({'EXIT', somepid, oops}, State1),
?assertMatch({stop, {unexpected_exit, somepid, oops}, State1}, Res1)
end).
-
t_handle_info_change_feed_exited() ->
?_test(begin
Tid0 = mock_ets(),
@@ -563,7 +535,6 @@ t_handle_info_change_feed_exited() ->
ets:delete(Tid1)
end).
-
t_handle_info_change_feed_exited_and_need_rescan() ->
?_test(begin
Tid = mock_ets(),
@@ -582,7 +553,6 @@ t_handle_info_change_feed_exited_and_need_rescan() ->
ets:delete(Tid)
end).
-
t_spawn_changes_reader() ->
?_test(begin
Pid = start_changes_reader(?DBNAME, 3),
@@ -592,16 +562,20 @@ t_spawn_changes_reader() ->
?assert(meck:validate(couch_db)),
?assert(meck:validate(couch_changes)),
?assert(meck:called(couch_db, open_int, [?DBNAME, [?CTX, sys_db]])),
- ?assert(meck:called(couch_changes, handle_db_changes, [
- #changes_args{
- include_docs = true,
- since = 3,
- feed = "normal",
- timeout = infinity
- }, {json_req, null}, db]))
+ ?assert(
+ meck:called(couch_changes, handle_db_changes, [
+ #changes_args{
+ include_docs = true,
+ since = 3,
+ feed = "normal",
+ timeout = infinity
+ },
+ {json_req, null},
+ db
+ ])
+ )
end).
-
t_changes_reader_cb_change() ->
?_test(begin
{ok, Pid} = start_link(?SUFFIX, ?MOD, zig, []),
@@ -613,7 +587,6 @@ t_changes_reader_cb_change() ->
exit(Pid, kill)
end).
-
t_changes_reader_cb_stop() ->
?_test(begin
{ok, Pid} = start_link(?SUFFIX, ?MOD, zig, []),
@@ -626,11 +599,9 @@ t_changes_reader_cb_stop() ->
exit(Pid, kill)
end).
-
t_changes_reader_cb_other() ->
?_assertEqual(acc, changes_reader_cb(other, chtype, acc)).
-
t_handle_call_resume_scan_no_chfeed_no_ets_entry() ->
?_test(begin
Tid = mock_ets(),
@@ -644,17 +615,21 @@ t_handle_call_resume_scan_no_chfeed_no_ets_entry() ->
[{?DBNAME, Pid}] = RState#state.pids,
ChArgs = kill_mock_changes_reader_and_get_its_args(Pid),
?assertEqual({self(), ?DBNAME}, ChArgs),
- ?assert(meck:called(couch_changes, handle_db_changes, [
- #changes_args{
- include_docs = true,
- since = 0,
- feed = "normal",
- timeout = infinity
- }, {json_req, null}, db])),
+ ?assert(
+ meck:called(couch_changes, handle_db_changes, [
+ #changes_args{
+ include_docs = true,
+ since = 0,
+ feed = "normal",
+ timeout = infinity
+ },
+ {json_req, null},
+ db
+ ])
+ ),
ets:delete(Tid)
end).
-
t_handle_call_resume_scan_chfeed_no_ets_entry() ->
?_test(begin
Tid = mock_ets(),
@@ -667,7 +642,6 @@ t_handle_call_resume_scan_chfeed_no_ets_entry() ->
kill_mock_changes_reader_and_get_its_args(Pid)
end).
-
t_handle_call_resume_scan_chfeed_ets_entry() ->
?_test(begin
Tid = mock_ets(),
@@ -681,7 +655,6 @@ t_handle_call_resume_scan_chfeed_ets_entry() ->
kill_mock_changes_reader_and_get_its_args(Pid)
end).
-
t_handle_call_resume_scan_no_chfeed_ets_entry() ->
?_test(begin
Tid = mock_ets(),
@@ -694,92 +667,96 @@ t_handle_call_resume_scan_no_chfeed_ets_entry() ->
[{?DBNAME, Pid}] = RState#state.pids,
ChArgs = kill_mock_changes_reader_and_get_its_args(Pid),
?assertEqual({self(), ?DBNAME}, ChArgs),
- ?assert(meck:called(couch_changes, handle_db_changes, [
- #changes_args{
- include_docs = true,
- since = 1,
- feed = "normal",
- timeout = infinity
- }, {json_req, null}, db])),
+ ?assert(
+ meck:called(couch_changes, handle_db_changes, [
+ #changes_args{
+ include_docs = true,
+ since = 1,
+ feed = "normal",
+ timeout = infinity
+ },
+ {json_req, null},
+ db
+ ])
+ ),
ets:delete(Tid)
end).
-
t_start_link() ->
?_test(begin
{ok, Pid} = start_link(?SUFFIX, ?MOD, nil, []),
?assert(is_pid(Pid)),
- ?assertMatch(#state{
- mod = ?MOD,
- suffix = ?SUFFIX,
- ctx = nil,
- pids = [],
- skip_ddocs = false
- }, sys:get_state(Pid)),
+ ?assertMatch(
+ #state{
+ mod = ?MOD,
+ suffix = ?SUFFIX,
+ ctx = nil,
+ pids = [],
+ skip_ddocs = false
+ },
+ sys:get_state(Pid)
+ ),
unlink(Pid),
exit(Pid, kill),
?assert(meck:called(couch_event, register_all, [Pid]))
end).
-
t_start_link_no_ddocs() ->
?_test(begin
{ok, Pid} = start_link(?SUFFIX, ?MOD, nil, [skip_ddocs]),
?assert(is_pid(Pid)),
- ?assertMatch(#state{
- mod = ?MOD,
- suffix = ?SUFFIX,
- ctx = nil,
- pids = [],
- skip_ddocs = true
- }, sys:get_state(Pid)),
+ ?assertMatch(
+ #state{
+ mod = ?MOD,
+ suffix = ?SUFFIX,
+ ctx = nil,
+ pids = [],
+ skip_ddocs = true
+ },
+ sys:get_state(Pid)
+ ),
unlink(Pid),
exit(Pid, kill)
end).
-
t_misc_gen_server_callbacks() ->
?_test(begin
?assertEqual(ok, terminate(reason, state)),
?assertEqual({ok, state}, code_change(old, state, extra))
end).
-
scan_dbs_test_() ->
-{
- setup,
- fun() ->
- Ctx = test_util:start_couch([mem3, fabric]),
- GlobalDb = ?tempdb(),
- ok = fabric:create_db(GlobalDb, [?CTX]),
- #shard{name = LocalDb} = hd(mem3:local_shards(GlobalDb)),
- {Ctx, GlobalDb, LocalDb}
- end,
- fun({Ctx, GlobalDb, _LocalDb}) ->
- fabric:delete_db(GlobalDb, [?CTX]),
- test_util:stop_couch(Ctx)
- end,
- {with, [
- fun t_find_shard/1,
- fun t_shard_not_found/1,
- fun t_pass_local/1,
- fun t_fail_local/1
- ]}
-}.
-
+ {
+ setup,
+ fun() ->
+ Ctx = test_util:start_couch([mem3, fabric]),
+ GlobalDb = ?tempdb(),
+ ok = fabric:create_db(GlobalDb, [?CTX]),
+ #shard{name = LocalDb} = hd(mem3:local_shards(GlobalDb)),
+ {Ctx, GlobalDb, LocalDb}
+ end,
+ fun({Ctx, GlobalDb, _LocalDb}) ->
+ fabric:delete_db(GlobalDb, [?CTX]),
+ test_util:stop_couch(Ctx)
+ end,
+ {with, [
+ fun t_find_shard/1,
+ fun t_shard_not_found/1,
+ fun t_pass_local/1,
+ fun t_fail_local/1
+ ]}
+ }.
t_find_shard({_, DbName, _}) ->
?_test(begin
?assertEqual(2, length(local_shards(DbName)))
end).
-
t_shard_not_found(_) ->
?_test(begin
?assertEqual([], local_shards(?tempdb()))
end).
-
t_pass_local({_, _, LocalDb}) ->
?_test(begin
scan_local_db(self(), LocalDb),
@@ -787,11 +764,10 @@ t_pass_local({_, _, LocalDb}) ->
{'$gen_cast', Msg} ->
?assertEqual(Msg, {resume_scan, LocalDb})
after 0 ->
- ?assert(false)
+ ?assert(false)
end
end).
-
t_fail_local({_, _, LocalDb}) ->
?_test(begin
scan_local_db(self(), <<"some_other_db">>),
@@ -799,11 +775,10 @@ t_fail_local({_, _, LocalDb}) ->
{'$gen_cast', Msg} ->
?assertNotEqual(Msg, {resume_scan, LocalDb})
after 0 ->
- ?assert(true)
+ ?assert(true)
end
end).
-
% Test helper functions
mock_logs() ->
@@ -812,7 +787,6 @@ mock_logs() ->
meck:expect(couch_log, info, 2, ok),
meck:expect(couch_log, debug, 2, ok).
-
mock_callback_mod() ->
meck:new(?MOD, [non_strict]),
meck:expect(?MOD, db_created, fun(_DbName, Ctx) -> Ctx end),
@@ -820,7 +794,6 @@ mock_callback_mod() ->
meck:expect(?MOD, db_found, fun(_DbName, Ctx) -> Ctx end),
meck:expect(?MOD, db_change, fun(_DbName, _Change, Ctx) -> Ctx end).
-
mock_changes_reader_loop({_CbFun, {Server, DbName}}) ->
receive
die ->
@@ -834,23 +807,23 @@ kill_mock_changes_reader_and_get_its_args(Pid) ->
receive
{'DOWN', Ref, _, Pid, {Server, DbName}} ->
{Server, DbName}
- after 1000 ->
- erlang:error(spawn_change_reader_timeout)
+ after 1000 ->
+ erlang:error(spawn_change_reader_timeout)
end.
-
mock_changes_reader() ->
- meck:expect(couch_changes, handle_db_changes,
+ meck:expect(
+ couch_changes,
+ handle_db_changes,
fun
(_ChArgs, _Req, db) -> fun mock_changes_reader_loop/1;
(_ChArgs, _Req, dbs) -> fun(_) -> ok end
- end).
-
+ end
+ ).
mock_ets() ->
ets:new(multidb_test_ets, [set, public]).
-
mock_state() ->
#state{
mod = ?MOD,
@@ -858,19 +831,17 @@ mock_state() ->
suffix = ?SUFFIX,
event_server = esref,
scanner = spid,
- pids = []}.
-
+ pids = []
+ }.
mock_state(Ets) ->
State = mock_state(),
State#state{tid = Ets}.
-
mock_state(Ets, Pid) ->
State = mock_state(Ets),
State#state{pids = [{?DBNAME, Pid}]}.
-
change_row(Id) when is_binary(Id) ->
{[
{<<"seq">>, 1},
@@ -879,13 +850,10 @@ change_row(Id) when is_binary(Id) ->
{doc, {[{<<"_id">>, Id}, {<<"_rev">>, <<"1-f00">>}]}}
]}.
-
handle_call_ok(Msg, State) ->
?assertMatch({reply, ok, _}, handle_call(Msg, from, State)).
-
handle_info_check(Msg, State) ->
?assertMatch({noreply, _}, handle_info(Msg, State)).
-
-endif.
diff --git a/src/couch/src/couch_native_process.erl b/src/couch/src/couch_native_process.erl
index eee8b2860..feea00c3a 100644
--- a/src/couch/src/couch_native_process.erl
+++ b/src/couch/src/couch_native_process.erl
@@ -41,8 +41,15 @@
-behaviour(gen_server).
-vsn(1).
--export([start_link/0,init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,
- handle_info/2]).
+-export([
+ start_link/0,
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ code_change/3,
+ handle_info/2
+]).
-export([set_timeout/2, prompt/2]).
-define(STATE, native_proc_state).
@@ -74,15 +81,15 @@ prompt(Pid, Data) when is_list(Data) ->
init([]) ->
V = config:get("query_server_config", "os_process_idle_limit", "300"),
Idle = list_to_integer(V) * 1000,
- {ok, #evstate{ddocs=dict:new(), idle=Idle}, Idle}.
+ {ok, #evstate{ddocs = dict:new(), idle = Idle}, Idle}.
handle_call({set_timeout, TimeOut}, _From, State) ->
- {reply, ok, State#evstate{timeout=TimeOut}, State#evstate.idle};
-
+ {reply, ok, State#evstate{timeout = TimeOut}, State#evstate.idle};
handle_call({prompt, Data}, _From, State) ->
- couch_log:debug("Prompt native qs: ~s",[?JSON_ENCODE(Data)]),
- {NewState, Resp} = try run(State, to_binary(Data)) of
- {S, R} -> {S, R}
+ couch_log:debug("Prompt native qs: ~s", [?JSON_ENCODE(Data)]),
+ {NewState, Resp} =
+ try run(State, to_binary(Data)) of
+ {S, R} -> {S, R}
catch
throw:{error, Why} ->
{State, [<<"error">>, Why, Why]}
@@ -118,14 +125,14 @@ handle_info(timeout, State) ->
gen_server:cast(couch_proc_manager, {os_proc_idle, self()}),
erlang:garbage_collect(),
{noreply, State, State#evstate.idle};
-handle_info({'EXIT',_,normal}, State) ->
+handle_info({'EXIT', _, normal}, State) ->
{noreply, State, State#evstate.idle};
-handle_info({'EXIT',_,Reason}, State) ->
+handle_info({'EXIT', _, Reason}, State) ->
{stop, Reason, State}.
terminate(_Reason, _State) -> ok.
code_change(_OldVersion, State, _Extra) -> {ok, State}.
-run(#evstate{list_pid=Pid}=State, [<<"list_row">>, Row]) when is_pid(Pid) ->
+run(#evstate{list_pid = Pid} = State, [<<"list_row">>, Row]) when is_pid(Pid) ->
Pid ! {self(), list_row, Row},
receive
{Pid, chunks, Data} ->
@@ -137,124 +144,137 @@ run(#evstate{list_pid=Pid}=State, [<<"list_row">>, Row]) when is_pid(Pid) ->
throw({timeout, list_cleanup})
end,
process_flag(trap_exit, erlang:get(do_trap)),
- {State#evstate{list_pid=nil}, [<<"end">>, Data]}
+ {State#evstate{list_pid = nil}, [<<"end">>, Data]}
after State#evstate.timeout ->
throw({timeout, list_row})
end;
-run(#evstate{list_pid=Pid}=State, [<<"list_end">>]) when is_pid(Pid) ->
+run(#evstate{list_pid = Pid} = State, [<<"list_end">>]) when is_pid(Pid) ->
Pid ! {self(), list_end},
Resp =
- receive
- {Pid, list_end, Data} ->
- receive
- {'EXIT', Pid, normal} -> ok
- after State#evstate.timeout ->
- throw({timeout, list_cleanup})
- end,
- [<<"end">>, Data]
- after State#evstate.timeout ->
- throw({timeout, list_end})
- end,
+ receive
+ {Pid, list_end, Data} ->
+ receive
+ {'EXIT', Pid, normal} -> ok
+ after State#evstate.timeout ->
+ throw({timeout, list_cleanup})
+ end,
+ [<<"end">>, Data]
+ after State#evstate.timeout ->
+ throw({timeout, list_end})
+ end,
process_flag(trap_exit, erlang:get(do_trap)),
- {State#evstate{list_pid=nil}, Resp};
-run(#evstate{list_pid=Pid}=State, _Command) when is_pid(Pid) ->
+ {State#evstate{list_pid = nil}, Resp};
+run(#evstate{list_pid = Pid} = State, _Command) when is_pid(Pid) ->
{State, [<<"error">>, list_error, list_error]};
-run(#evstate{ddocs=DDocs}, [<<"reset">>]) ->
- {#evstate{ddocs=DDocs}, true};
-run(#evstate{ddocs=DDocs, idle=Idle}, [<<"reset">>, QueryConfig]) ->
+run(#evstate{ddocs = DDocs}, [<<"reset">>]) ->
+ {#evstate{ddocs = DDocs}, true};
+run(#evstate{ddocs = DDocs, idle = Idle}, [<<"reset">>, QueryConfig]) ->
NewState = #evstate{
ddocs = DDocs,
query_config = QueryConfig,
idle = Idle
},
{NewState, true};
-run(#evstate{funs=Funs}=State, [<<"add_fun">> , BinFunc]) ->
+run(#evstate{funs = Funs} = State, [<<"add_fun">>, BinFunc]) ->
FunInfo = makefun(State, BinFunc),
- {State#evstate{funs=Funs ++ [FunInfo]}, true};
-run(State, [<<"map_doc">> , Doc]) ->
- Resp = lists:map(fun({Sig, Fun}) ->
- erlang:put(Sig, []),
- Fun(Doc),
- lists:reverse(erlang:get(Sig))
- end, State#evstate.funs),
+ {State#evstate{funs = Funs ++ [FunInfo]}, true};
+run(State, [<<"map_doc">>, Doc]) ->
+ Resp = lists:map(
+ fun({Sig, Fun}) ->
+ erlang:put(Sig, []),
+ Fun(Doc),
+ lists:reverse(erlang:get(Sig))
+ end,
+ State#evstate.funs
+ ),
{State, Resp};
run(State, [<<"reduce">>, Funs, KVs]) ->
{Keys, Vals} =
- lists:foldl(fun([K, V], {KAcc, VAcc}) ->
- {[K | KAcc], [V | VAcc]}
- end, {[], []}, KVs),
+ lists:foldl(
+ fun([K, V], {KAcc, VAcc}) ->
+ {[K | KAcc], [V | VAcc]}
+ end,
+ {[], []},
+ KVs
+ ),
Keys2 = lists:reverse(Keys),
Vals2 = lists:reverse(Vals),
{State, catch reduce(State, Funs, Keys2, Vals2, false)};
run(State, [<<"rereduce">>, Funs, Vals]) ->
{State, catch reduce(State, Funs, null, Vals, true)};
-run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, <<"new">>, DDocId, DDoc]) ->
+run(#evstate{ddocs = DDocs} = State, [<<"ddoc">>, <<"new">>, DDocId, DDoc]) ->
DDocs2 = store_ddoc(DDocs, DDocId, DDoc),
- {State#evstate{ddocs=DDocs2}, true};
-run(#evstate{ddocs=DDocs}=State, [<<"ddoc">>, DDocId | Rest]) ->
+ {State#evstate{ddocs = DDocs2}, true};
+run(#evstate{ddocs = DDocs} = State, [<<"ddoc">>, DDocId | Rest]) ->
DDoc = load_ddoc(DDocs, DDocId),
ddoc(State, DDoc, Rest);
run(_, Unknown) ->
couch_log:error("Native Process: Unknown command: ~p~n", [Unknown]),
throw({error, unknown_command}).
-
+
ddoc(State, {DDoc}, [FunPath, Args]) ->
% load fun from the FunPath
- BFun = lists:foldl(fun
- (Key, {Props}) when is_list(Props) ->
- couch_util:get_value(Key, Props, nil);
- (_Key, Fun) when is_binary(Fun) ->
- Fun;
- (_Key, nil) ->
- throw({error, not_found});
- (_Key, _Fun) ->
- throw({error, malformed_ddoc})
- end, {DDoc}, FunPath),
+ BFun = lists:foldl(
+ fun
+ (Key, {Props}) when is_list(Props) ->
+ couch_util:get_value(Key, Props, nil);
+ (_Key, Fun) when is_binary(Fun) ->
+ Fun;
+ (_Key, nil) ->
+ throw({error, not_found});
+ (_Key, _Fun) ->
+ throw({error, malformed_ddoc})
+ end,
+ {DDoc},
+ FunPath
+ ),
ddoc(State, makefun(State, BFun, {DDoc}), FunPath, Args).
ddoc(State, {_, Fun}, [<<"validate_doc_update">>], Args) ->
{State, (catch apply(Fun, Args))};
ddoc(State, {_, Fun}, [<<"rewrites">>], Args) ->
{State, (catch apply(Fun, Args))};
-ddoc(State, {_, Fun}, [<<"filters">>|_], [Docs, Req]) ->
+ddoc(State, {_, Fun}, [<<"filters">> | _], [Docs, Req]) ->
FilterFunWrapper = fun(Doc) ->
case catch Fun(Doc, Req) of
- true -> true;
- false -> false;
- {'EXIT', Error} -> couch_log:error("~p", [Error])
+ true -> true;
+ false -> false;
+ {'EXIT', Error} -> couch_log:error("~p", [Error])
end
end,
Resp = lists:map(FilterFunWrapper, Docs),
{State, [true, Resp]};
-ddoc(State, {_, Fun}, [<<"views">>|_], [Docs]) ->
+ddoc(State, {_, Fun}, [<<"views">> | _], [Docs]) ->
MapFunWrapper = fun(Doc) ->
case catch Fun(Doc) of
- undefined -> true;
- ok -> false;
- false -> false;
- [_|_] -> true;
- {'EXIT', Error} -> couch_log:error("~p", [Error])
+ undefined -> true;
+ ok -> false;
+ false -> false;
+ [_ | _] -> true;
+ {'EXIT', Error} -> couch_log:error("~p", [Error])
end
end,
Resp = lists:map(MapFunWrapper, Docs),
{State, [true, Resp]};
-ddoc(State, {_, Fun}, [<<"shows">>|_], Args) ->
- Resp = case (catch apply(Fun, Args)) of
- FunResp when is_list(FunResp) ->
- FunResp;
- {FunResp} ->
- [<<"resp">>, {FunResp}];
- FunResp ->
- FunResp
- end,
+ddoc(State, {_, Fun}, [<<"shows">> | _], Args) ->
+ Resp =
+ case (catch apply(Fun, Args)) of
+ FunResp when is_list(FunResp) ->
+ FunResp;
+ {FunResp} ->
+ [<<"resp">>, {FunResp}];
+ FunResp ->
+ FunResp
+ end,
{State, Resp};
-ddoc(State, {_, Fun}, [<<"updates">>|_], Args) ->
- Resp = case (catch apply(Fun, Args)) of
- [JsonDoc, JsonResp] ->
- [<<"up">>, JsonDoc, JsonResp]
- end,
+ddoc(State, {_, Fun}, [<<"updates">> | _], Args) ->
+ Resp =
+ case (catch apply(Fun, Args)) of
+ [JsonDoc, JsonResp] ->
+ [<<"up">>, JsonDoc, JsonResp]
+ end,
{State, Resp};
-ddoc(State, {Sig, Fun}, [<<"lists">>|_], Args) ->
+ddoc(State, {Sig, Fun}, [<<"lists">> | _], Args) ->
Self = self(),
SpawnFun = fun() ->
LastChunk = (catch apply(Fun, Args)),
@@ -270,22 +290,22 @@ ddoc(State, {Sig, Fun}, [<<"lists">>|_], Args) ->
ok
end,
LastChunks =
- case erlang:get(Sig) of
- undefined -> [LastChunk];
- OtherChunks -> [LastChunk | OtherChunks]
- end,
+ case erlang:get(Sig) of
+ undefined -> [LastChunk];
+ OtherChunks -> [LastChunk | OtherChunks]
+ end,
Self ! {self(), list_end, lists:reverse(LastChunks)}
end,
erlang:put(do_trap, process_flag(trap_exit, true)),
Pid = spawn_link(SpawnFun),
Resp =
- receive
- {Pid, start, Chunks, JsonResp} ->
- [<<"start">>, Chunks, JsonResp]
- after State#evstate.timeout ->
- throw({timeout, list_start})
- end,
- {State#evstate{list_pid=Pid}, Resp}.
+ receive
+ {Pid, start, Chunks, JsonResp} ->
+ [<<"start">>, Chunks, JsonResp]
+ after State#evstate.timeout ->
+ throw({timeout, list_start})
+ end,
+ {State#evstate{list_pid = Pid}, Resp}.
store_ddoc(DDocs, DDocId, DDoc) ->
dict:store(DDocId, DDoc, DDocs).
@@ -293,7 +313,11 @@ load_ddoc(DDocs, DDocId) ->
try dict:fetch(DDocId, DDocs) of
{DDoc} -> {DDoc}
catch
- _:_Else -> throw({error, ?l2b(io_lib:format("Native Query Server missing DDoc with Id: ~s",[DDocId]))})
+ _:_Else ->
+ throw(
+ {error,
+ ?l2b(io_lib:format("Native Query Server missing DDoc with Id: ~s", [DDocId]))}
+ )
end.
bindings(State, Sig) ->
@@ -316,10 +340,10 @@ bindings(State, Sig, DDoc) ->
Send = fun(Chunk) ->
Curr =
- case erlang:get(Sig) of
- undefined -> [];
- Else -> Else
- end,
+ case erlang:get(Sig) of
+ undefined -> [];
+ Else -> Else
+ end,
erlang:put(Sig, [Chunk | Curr])
end,
@@ -329,10 +353,10 @@ bindings(State, Sig, DDoc) ->
ok;
_ ->
Chunks =
- case erlang:get(Sig) of
- undefined -> [];
- CurrChunks -> CurrChunks
- end,
+ case erlang:get(Sig) of
+ undefined -> [];
+ CurrChunks -> CurrChunks
+ end,
Self ! {self(), chunks, lists:reverse(Chunks)}
end,
erlang:put(Sig, []),
@@ -343,7 +367,7 @@ bindings(State, Sig, DDoc) ->
throw({timeout, list_pid_getrow})
end
end,
-
+
FoldRows = fun(Fun, Acc) -> foldrows(GetRow, Fun, Acc) end,
Bindings = [
@@ -357,7 +381,8 @@ bindings(State, Sig, DDoc) ->
case DDoc of
{_Props} ->
Bindings ++ [{'DDoc', DDoc}];
- _Else -> Bindings
+ _Else ->
+ Bindings
end.
% thanks to erlview, via:
@@ -373,30 +398,41 @@ makefun(State, Source, {DDoc}) ->
makefun(_State, Source, BindFuns) when is_list(BindFuns) ->
FunStr = binary_to_list(Source),
{ok, Tokens, _} = erl_scan:string(FunStr),
- Form = case (catch erl_parse:parse_exprs(Tokens)) of
- {ok, [ParsedForm]} ->
- ParsedForm;
- {error, {LineNum, _Mod, [Mesg, Params]}}=Error ->
- couch_log:error("Syntax error on line: ~p~n~s~p~n",
- [LineNum, Mesg, Params]),
- throw(Error)
- end,
- Bindings = lists:foldl(fun({Name, Fun}, Acc) ->
- erl_eval:add_binding(Name, Fun, Acc)
- end, erl_eval:new_bindings(), BindFuns),
+ Form =
+ case (catch erl_parse:parse_exprs(Tokens)) of
+ {ok, [ParsedForm]} ->
+ ParsedForm;
+ {error, {LineNum, _Mod, [Mesg, Params]}} = Error ->
+ couch_log:error(
+ "Syntax error on line: ~p~n~s~p~n",
+ [LineNum, Mesg, Params]
+ ),
+ throw(Error)
+ end,
+ Bindings = lists:foldl(
+ fun({Name, Fun}, Acc) ->
+ erl_eval:add_binding(Name, Fun, Acc)
+ end,
+ erl_eval:new_bindings(),
+ BindFuns
+ ),
{value, Fun, _} = erl_eval:expr(Form, Bindings),
Fun.
reduce(State, BinFuns, Keys, Vals, ReReduce) ->
- Funs = case is_list(BinFuns) of
- true ->
- lists:map(fun(BF) -> makefun(State, BF) end, BinFuns);
- _ ->
- [makefun(State, BinFuns)]
- end,
- Reds = lists:map(fun({_Sig, Fun}) ->
- Fun(Keys, Vals, ReReduce)
- end, Funs),
+ Funs =
+ case is_list(BinFuns) of
+ true ->
+ lists:map(fun(BF) -> makefun(State, BF) end, BinFuns);
+ _ ->
+ [makefun(State, BinFuns)]
+ end,
+ Reds = lists:map(
+ fun({_Sig, Fun}) ->
+ Fun(Keys, Vals, ReReduce)
+ end,
+ Funs
+ ),
[true, Reds].
foldrows(GetRow, ProcRow, Acc) ->
@@ -416,15 +452,15 @@ start_list_resp(Self, Sig) ->
case erlang:get(list_started) of
undefined ->
Headers =
- case erlang:get(list_headers) of
- undefined -> {[{<<"headers">>, {[]}}]};
- CurrHdrs -> CurrHdrs
- end,
+ case erlang:get(list_headers) of
+ undefined -> {[{<<"headers">>, {[]}}]};
+ CurrHdrs -> CurrHdrs
+ end,
Chunks =
- case erlang:get(Sig) of
- undefined -> [];
- CurrChunks -> CurrChunks
- end,
+ case erlang:get(Sig) of
+ undefined -> [];
+ CurrChunks -> CurrChunks
+ end,
Self ! {self(), start, lists:reverse(Chunks), Headers},
erlang:put(list_started, true),
erlang:put(Sig, []),
diff --git a/src/couch/src/couch_os_process.erl b/src/couch/src/couch_os_process.erl
index 63a241433..da5df5134 100644
--- a/src/couch/src/couch_os_process.erl
+++ b/src/couch/src/couch_os_process.erl
@@ -23,14 +23,14 @@
-define(PORT_OPTIONS, [stream, {line, 4096}, binary, exit_status, hide]).
--record(os_proc,
- {command,
- port,
- writer,
- reader,
- timeout=5000,
- idle
- }).
+-record(os_proc, {
+ command,
+ port,
+ writer,
+ reader,
+ timeout = 5000,
+ idle
+}).
start_link(Command) ->
start_link(Command, []).
@@ -55,7 +55,7 @@ prompt(Pid, Data) ->
{ok, Result} ->
Result;
Error ->
- couch_log:error("OS Process Error ~p :: ~p",[Pid,Error]),
+ couch_log:error("OS Process Error ~p :: ~p", [Pid, Error]),
throw(Error)
end.
@@ -72,21 +72,21 @@ readline(#os_proc{} = OsProc) ->
Res.
readline(#os_proc{port = Port} = OsProc, Acc) ->
receive
- {Port, {data, {noeol, Data}}} when is_binary(Acc) ->
- readline(OsProc, <<Acc/binary,Data/binary>>);
- {Port, {data, {noeol, Data}}} when is_binary(Data) ->
- readline(OsProc, Data);
- {Port, {data, {noeol, Data}}} ->
- readline(OsProc, [Data|Acc]);
- {Port, {data, {eol, <<Data/binary>>}}} when is_binary(Acc) ->
- [<<Acc/binary,Data/binary>>];
- {Port, {data, {eol, Data}}} when is_binary(Data) ->
- [Data];
- {Port, {data, {eol, Data}}} ->
- lists:reverse(Acc, Data);
- {Port, Err} ->
- catch port_close(Port),
- throw({os_process_error, Err})
+ {Port, {data, {noeol, Data}}} when is_binary(Acc) ->
+ readline(OsProc, <<Acc/binary, Data/binary>>);
+ {Port, {data, {noeol, Data}}} when is_binary(Data) ->
+ readline(OsProc, Data);
+ {Port, {data, {noeol, Data}}} ->
+ readline(OsProc, [Data | Acc]);
+ {Port, {data, {eol, <<Data/binary>>}}} when is_binary(Acc) ->
+ [<<Acc/binary, Data/binary>>];
+ {Port, {data, {eol, Data}}} when is_binary(Data) ->
+ [Data];
+ {Port, {data, {eol, Data}}} ->
+ lists:reverse(Acc, Data);
+ {Port, Err} ->
+ catch port_close(Port),
+ throw({os_process_error, Err})
after OsProc#os_proc.timeout ->
catch port_close(Port),
throw({os_process_error, "OS process timed out."})
@@ -95,8 +95,10 @@ readline(#os_proc{port = Port} = OsProc, Acc) ->
% Standard JSON functions
writejson(OsProc, Data) when is_record(OsProc, os_proc) ->
JsonData = ?JSON_ENCODE(Data),
- couch_log:debug("OS Process ~p Input :: ~s",
- [OsProc#os_proc.port, JsonData]),
+ couch_log:debug(
+ "OS Process ~p Input :: ~s",
+ [OsProc#os_proc.port, JsonData]
+ ),
true = writeline(OsProc, JsonData).
readjson(OsProc) when is_record(OsProc, os_proc) ->
@@ -109,24 +111,28 @@ readjson(OsProc) when is_record(OsProc, os_proc) ->
% command, otherwise return the raw JSON line to the caller.
pick_command(Line)
catch
- throw:abort ->
- {json, Line};
- throw:{cmd, _Cmd} ->
- case ?JSON_DECODE(Line) of
- [<<"log">>, Msg] when is_binary(Msg) ->
- % we got a message to log. Log it and continue
- couch_log:info("OS Process ~p Log :: ~s",
- [OsProc#os_proc.port, Msg]),
- readjson(OsProc);
- [<<"error">>, Id, Reason] ->
- throw({error, {couch_util:to_existing_atom(Id),Reason}});
- [<<"fatal">>, Id, Reason] ->
- couch_log:info("OS Process ~p Fatal Error :: ~s ~p",
- [OsProc#os_proc.port, Id, Reason]),
- throw({couch_util:to_existing_atom(Id),Reason});
- _Result ->
- {json, Line}
- end
+ throw:abort ->
+ {json, Line};
+ throw:{cmd, _Cmd} ->
+ case ?JSON_DECODE(Line) of
+ [<<"log">>, Msg] when is_binary(Msg) ->
+ % we got a message to log. Log it and continue
+ couch_log:info(
+ "OS Process ~p Log :: ~s",
+ [OsProc#os_proc.port, Msg]
+ ),
+ readjson(OsProc);
+ [<<"error">>, Id, Reason] ->
+ throw({error, {couch_util:to_existing_atom(Id), Reason}});
+ [<<"fatal">>, Id, Reason] ->
+ couch_log:info(
+ "OS Process ~p Fatal Error :: ~s ~p",
+ [OsProc#os_proc.port, Id, Reason]
+ ),
+ throw({couch_util:to_existing_atom(Id), Reason});
+ _Result ->
+ {json, Line}
+ end
end.
pick_command(Line) ->
@@ -146,7 +152,6 @@ pick_command1(<<"fatal">> = Cmd) ->
pick_command1(_) ->
throw(abort).
-
% gen_server API
init([Command, Options, PortOptions]) ->
couch_io_logger:start(os:getenv("COUCHDB_IO_LOG_DIR")),
@@ -155,34 +160,38 @@ init([Command, Options, PortOptions]) ->
V = config:get("query_server_config", "os_process_idle_limit", "300"),
IdleLimit = list_to_integer(V) * 1000,
BaseProc = #os_proc{
- command=Command,
- port=open_port({spawn, Spawnkiller ++ " " ++ Command}, PortOptions),
- writer=fun ?MODULE:writejson/2,
- reader=fun ?MODULE:readjson/1,
- idle=IdleLimit
+ command = Command,
+ port = open_port({spawn, Spawnkiller ++ " " ++ Command}, PortOptions),
+ writer = fun ?MODULE:writejson/2,
+ reader = fun ?MODULE:readjson/1,
+ idle = IdleLimit
},
KillCmd = iolist_to_binary(readline(BaseProc)),
Pid = self(),
couch_log:debug("OS Process Start :: ~p", [BaseProc#os_proc.port]),
spawn(fun() ->
- % this ensure the real os process is killed when this process dies.
- erlang:monitor(process, Pid),
- killer(?b2l(KillCmd))
- end),
+ % this ensure the real os process is killed when this process dies.
+ erlang:monitor(process, Pid),
+ killer(?b2l(KillCmd))
+ end),
OsProc =
- lists:foldl(fun(Opt, Proc) ->
- case Opt of
- {writer, Writer} when is_function(Writer) ->
- Proc#os_proc{writer=Writer};
- {reader, Reader} when is_function(Reader) ->
- Proc#os_proc{reader=Reader};
- {timeout, TimeOut} when is_integer(TimeOut) ->
- Proc#os_proc{timeout=TimeOut}
- end
- end, BaseProc, Options),
+ lists:foldl(
+ fun(Opt, Proc) ->
+ case Opt of
+ {writer, Writer} when is_function(Writer) ->
+ Proc#os_proc{writer = Writer};
+ {reader, Reader} when is_function(Reader) ->
+ Proc#os_proc{reader = Reader};
+ {timeout, TimeOut} when is_integer(TimeOut) ->
+ Proc#os_proc{timeout = TimeOut}
+ end
+ end,
+ BaseProc,
+ Options
+ ),
{ok, OsProc, IdleLimit}.
-terminate(Reason, #os_proc{port=Port}) ->
+terminate(Reason, #os_proc{port = Port}) ->
catch port_close(Port),
case Reason of
normal ->
@@ -192,10 +201,10 @@ terminate(Reason, #os_proc{port=Port}) ->
end,
ok.
-handle_call({set_timeout, TimeOut}, _From, #os_proc{idle=Idle}=OsProc) ->
- {reply, ok, OsProc#os_proc{timeout=TimeOut}, Idle};
-handle_call({prompt, Data}, _From, #os_proc{idle=Idle}=OsProc) ->
- #os_proc{writer=Writer, reader=Reader} = OsProc,
+handle_call({set_timeout, TimeOut}, _From, #os_proc{idle = Idle} = OsProc) ->
+ {reply, ok, OsProc#os_proc{timeout = TimeOut}, Idle};
+handle_call({prompt, Data}, _From, #os_proc{idle = Idle} = OsProc) ->
+ #os_proc{writer = Writer, reader = Reader} = OsProc,
try
Writer(OsProc, Data),
{reply, {ok, Reader(OsProc)}, OsProc, Idle}
@@ -210,7 +219,7 @@ handle_call({prompt, Data}, _From, #os_proc{idle=Idle}=OsProc) ->
garbage_collect()
end.
-handle_cast({send, Data}, #os_proc{writer=Writer, idle=Idle}=OsProc) ->
+handle_cast({send, Data}, #os_proc{writer = Writer, idle = Idle} = OsProc) ->
try
Writer(OsProc, Data),
{noreply, OsProc, Idle}
@@ -219,31 +228,31 @@ handle_cast({send, Data}, #os_proc{writer=Writer, idle=Idle}=OsProc) ->
couch_log:error("Failed sending data: ~p -> ~p", [Data, OsError]),
{stop, normal, OsProc}
end;
-handle_cast(garbage_collect, #os_proc{idle=Idle}=OsProc) ->
+handle_cast(garbage_collect, #os_proc{idle = Idle} = OsProc) ->
erlang:garbage_collect(),
{noreply, OsProc, Idle};
handle_cast(stop, OsProc) ->
{stop, normal, OsProc};
-handle_cast(Msg, #os_proc{idle=Idle}=OsProc) ->
+handle_cast(Msg, #os_proc{idle = Idle} = OsProc) ->
couch_log:debug("OS Proc: Unknown cast: ~p", [Msg]),
{noreply, OsProc, Idle}.
-handle_info(timeout, #os_proc{idle=Idle}=OsProc) ->
+handle_info(timeout, #os_proc{idle = Idle} = OsProc) ->
gen_server:cast(couch_proc_manager, {os_proc_idle, self()}),
erlang:garbage_collect(),
{noreply, OsProc, Idle};
-handle_info({Port, {exit_status, 0}}, #os_proc{port=Port}=OsProc) ->
+handle_info({Port, {exit_status, 0}}, #os_proc{port = Port} = OsProc) ->
couch_log:info("OS Process terminated normally", []),
{stop, normal, OsProc};
-handle_info({Port, {exit_status, Status}}, #os_proc{port=Port}=OsProc) ->
+handle_info({Port, {exit_status, Status}}, #os_proc{port = Port} = OsProc) ->
couch_log:error("OS Process died with status: ~p", [Status]),
{stop, {exit_status, Status}, OsProc};
-handle_info(Msg, #os_proc{idle=Idle}=OsProc) ->
+handle_info(Msg, #os_proc{idle = Idle} = OsProc) ->
couch_log:debug("OS Proc: Unknown info: ~p", [Msg]),
{noreply, OsProc, Idle}.
-code_change(_, {os_proc, Cmd, Port, W, R, Timeout} , _) ->
- V = config:get("query_server_config","os_process_idle_limit","300"),
+code_change(_, {os_proc, Cmd, Port, W, R, Timeout}, _) ->
+ V = config:get("query_server_config", "os_process_idle_limit", "300"),
State = #os_proc{
command = Cmd,
port = Port,
@@ -257,9 +266,9 @@ code_change(_OldVsn, State, _Extra) ->
{ok, State}.
killer(KillCmd) ->
- receive _ ->
- os:cmd(KillCmd)
+ receive
+ _ ->
+ os:cmd(KillCmd)
after 1000 ->
?MODULE:killer(KillCmd)
end.
-
diff --git a/src/couch/src/couch_partition.erl b/src/couch/src/couch_partition.erl
index f2efcaa5e..101b5b324 100644
--- a/src/couch/src/couch_partition.erl
+++ b/src/couch/src/couch_partition.erl
@@ -12,7 +12,6 @@
-module(couch_partition).
-
-export([
extract/1,
from_docid/1,
@@ -29,10 +28,8 @@
hash/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-
extract(Value) when is_binary(Value) ->
case binary:split(Value, <<":">>) of
[Partition, Rest] ->
@@ -40,11 +37,9 @@ extract(Value) when is_binary(Value) ->
_ ->
undefined
end;
-
extract(_) ->
undefined.
-
from_docid(DocId) ->
case extract(DocId) of
undefined ->
@@ -53,7 +48,6 @@ from_docid(DocId) ->
Partition
end.
-
is_member(DocId, Partition) ->
case extract(DocId) of
{Partition, _} ->
@@ -62,53 +56,52 @@ is_member(DocId, Partition) ->
false
end.
-
start_key(Partition) ->
<<Partition/binary, ":">>.
-
end_key(Partition) ->
<<Partition/binary, ";">>.
-
shard_key(Partition) ->
<<Partition/binary, ":foo">>.
-
validate_dbname(DbName, Options) when is_list(DbName) ->
validate_dbname(?l2b(DbName), Options);
validate_dbname(DbName, Options) when is_binary(DbName) ->
Props = couch_util:get_value(props, Options, []),
IsPartitioned = couch_util:get_value(partitioned, Props, false),
- if not IsPartitioned -> ok; true ->
-
- DbsDbName = config:get("mem3", "shards_db", "_dbs"),
- NodesDbName = config:get("mem3", "nodes_db", "_nodes"),
- UsersDbSuffix = config:get("couchdb", "users_db_suffix", "_users"),
- Suffix = couch_db:dbname_suffix(DbName),
+ if
+ not IsPartitioned ->
+ ok;
+ true ->
+ DbsDbName = config:get("mem3", "shards_db", "_dbs"),
+ NodesDbName = config:get("mem3", "nodes_db", "_nodes"),
+ UsersDbSuffix = config:get("couchdb", "users_db_suffix", "_users"),
+ Suffix = couch_db:dbname_suffix(DbName),
- SysDbNames = [
+ SysDbNames = [
iolist_to_binary(DbsDbName),
iolist_to_binary(NodesDbName)
| ?SYSTEM_DATABASES
],
- Suffices = [
+ Suffices = [
<<"_replicator">>,
<<"_users">>,
iolist_to_binary(UsersDbSuffix)
],
- IsSysDb = lists:member(DbName, SysDbNames)
- orelse lists:member(Suffix, Suffices),
+ IsSysDb =
+ lists:member(DbName, SysDbNames) orelse
+ lists:member(Suffix, Suffices),
- if not IsSysDb -> ok; true ->
- throw({bad_request, <<"Cannot partition a system database">>})
- end
+ if
+ not IsSysDb -> ok;
+ true -> throw({bad_request, <<"Cannot partition a system database">>})
+ end
end.
-
validate_docid(<<"_design/", _/binary>>) ->
ok;
validate_docid(<<"_local/", _/binary>>) ->
@@ -125,7 +118,6 @@ validate_docid(DocId) when is_binary(DocId) ->
couch_doc:validate_docid(PartitionedDocId)
end.
-
validate_partition(<<>>) ->
throw({illegal_partition, <<"Partition must not be empty">>});
validate_partition(Partition) when is_binary(Partition) ->
@@ -153,7 +145,6 @@ validate_partition(Partition) when is_binary(Partition) ->
validate_partition(_) ->
throw({illegal_partition, <<"Partition must be a string">>}).
-
% Document ids that start with an underscore
% (i.e., _local and _design) do not contain a
% partition and thus do not use the partition
diff --git a/src/couch/src/couch_passwords.erl b/src/couch/src/couch_passwords.erl
index 55ffb359f..828d2f68b 100644
--- a/src/couch/src/couch_passwords.erl
+++ b/src/couch/src/couch_passwords.erl
@@ -40,98 +40,144 @@ hash_admin_password(ClearPassword) when is_binary(ClearPassword) ->
Scheme = chttpd_util:get_chttpd_auth_config("password_scheme", "pbkdf2"),
hash_admin_password(Scheme, ClearPassword).
-hash_admin_password("simple", ClearPassword) -> % deprecated
+% deprecated
+hash_admin_password("simple", ClearPassword) ->
Salt = couch_uuids:random(),
Hash = crypto:hash(sha, <<ClearPassword/binary, Salt/binary>>),
?l2b("-hashed-" ++ couch_util:to_hex(Hash) ++ "," ++ ?b2l(Salt));
hash_admin_password("pbkdf2", ClearPassword) ->
Iterations = chttpd_util:get_chttpd_auth_config("iterations", "10"),
Salt = couch_uuids:random(),
- DerivedKey = couch_passwords:pbkdf2(couch_util:to_binary(ClearPassword),
- Salt, list_to_integer(Iterations)),
- ?l2b("-pbkdf2-" ++ ?b2l(DerivedKey) ++ ","
- ++ ?b2l(Salt) ++ ","
- ++ Iterations).
+ DerivedKey = couch_passwords:pbkdf2(
+ couch_util:to_binary(ClearPassword),
+ Salt,
+ list_to_integer(Iterations)
+ ),
+ ?l2b(
+ "-pbkdf2-" ++ ?b2l(DerivedKey) ++ "," ++
+ ?b2l(Salt) ++ "," ++
+ Iterations
+ ).
-spec get_unhashed_admins() -> list().
get_unhashed_admins() ->
lists:filter(
- fun({_User, "-hashed-" ++ _}) ->
- false; % already hashed
- ({_User, "-pbkdf2-" ++ _}) ->
- false; % already hashed
- ({_User, _ClearPassword}) ->
- true
+ fun
+ ({_User, "-hashed-" ++ _}) ->
+ % already hashed
+ false;
+ ({_User, "-pbkdf2-" ++ _}) ->
+ % already hashed
+ false;
+ ({_User, _ClearPassword}) ->
+ true
end,
- config:get("admins")).
+ config:get("admins")
+ ).
%% Current scheme, much stronger.
-spec pbkdf2(binary(), binary(), integer()) -> binary().
-pbkdf2(Password, Salt, Iterations) when is_binary(Password),
- is_binary(Salt),
- is_integer(Iterations),
- Iterations > 0 ->
+pbkdf2(Password, Salt, Iterations) when
+ is_binary(Password),
+ is_binary(Salt),
+ is_integer(Iterations),
+ Iterations > 0
+->
{ok, Result} = pbkdf2(Password, Salt, Iterations, ?SHA1_OUTPUT_LENGTH),
Result;
-pbkdf2(Password, Salt, Iterations) when is_binary(Salt),
- is_integer(Iterations),
- Iterations > 0 ->
+pbkdf2(Password, Salt, Iterations) when
+ is_binary(Salt),
+ is_integer(Iterations),
+ Iterations > 0
+->
Msg = io_lib:format("Password value of '~p' is invalid.", [Password]),
throw({forbidden, Msg});
-pbkdf2(Password, Salt, Iterations) when is_binary(Password),
- is_integer(Iterations),
- Iterations > 0 ->
+pbkdf2(Password, Salt, Iterations) when
+ is_binary(Password),
+ is_integer(Iterations),
+ Iterations > 0
+->
Msg = io_lib:format("Salt value of '~p' is invalid.", [Salt]),
throw({forbidden, Msg}).
--spec pbkdf2(binary(), binary(), integer(), integer())
- -> {ok, binary()} | {error, derived_key_too_long}.
-pbkdf2(_Password, _Salt, _Iterations, DerivedLength)
- when DerivedLength > ?MAX_DERIVED_KEY_LENGTH ->
+-spec pbkdf2(binary(), binary(), integer(), integer()) ->
+ {ok, binary()} | {error, derived_key_too_long}.
+pbkdf2(_Password, _Salt, _Iterations, DerivedLength) when
+ DerivedLength > ?MAX_DERIVED_KEY_LENGTH
+->
{error, derived_key_too_long};
-pbkdf2(Password, Salt, Iterations, DerivedLength) when is_binary(Password),
- is_binary(Salt),
- is_integer(Iterations),
- Iterations > 0,
- is_integer(DerivedLength) ->
+pbkdf2(Password, Salt, Iterations, DerivedLength) when
+ is_binary(Password),
+ is_binary(Salt),
+ is_integer(Iterations),
+ Iterations > 0,
+ is_integer(DerivedLength)
+->
L = ceiling(DerivedLength / ?SHA1_OUTPUT_LENGTH),
- <<Bin:DerivedLength/binary,_/binary>> =
+ <<Bin:DerivedLength/binary, _/binary>> =
iolist_to_binary(pbkdf2(Password, Salt, Iterations, L, 1, [])),
{ok, ?l2b(couch_util:to_hex(Bin))}.
--spec pbkdf2(binary(), binary(), integer(), integer(), integer(), iolist())
- -> iolist().
-pbkdf2(_Password, _Salt, _Iterations, BlockCount, BlockIndex, Acc)
- when BlockIndex > BlockCount ->
+-spec pbkdf2(binary(), binary(), integer(), integer(), integer(), iolist()) ->
+ iolist().
+pbkdf2(_Password, _Salt, _Iterations, BlockCount, BlockIndex, Acc) when
+ BlockIndex > BlockCount
+->
lists:reverse(Acc);
pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex, Acc) ->
Block = pbkdf2(Password, Salt, Iterations, BlockIndex, 1, <<>>, <<>>),
- pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex + 1, [Block|Acc]).
+ pbkdf2(Password, Salt, Iterations, BlockCount, BlockIndex + 1, [Block | Acc]).
--spec pbkdf2(binary(), binary(), integer(), integer(), integer(),
- binary(), binary()) -> binary().
-pbkdf2(_Password, _Salt, Iterations, _BlockIndex, Iteration, _Prev, Acc)
- when Iteration > Iterations ->
+-spec pbkdf2(
+ binary(),
+ binary(),
+ integer(),
+ integer(),
+ integer(),
+ binary(),
+ binary()
+) -> binary().
+pbkdf2(_Password, _Salt, Iterations, _BlockIndex, Iteration, _Prev, Acc) when
+ Iteration > Iterations
+->
Acc;
pbkdf2(Password, Salt, Iterations, BlockIndex, 1, _Prev, _Acc) ->
- InitialBlock = couch_util:hmac(sha, Password,
- <<Salt/binary,BlockIndex:32/integer>>),
- pbkdf2(Password, Salt, Iterations, BlockIndex, 2,
- InitialBlock, InitialBlock);
+ InitialBlock = couch_util:hmac(
+ sha,
+ Password,
+ <<Salt/binary, BlockIndex:32/integer>>
+ ),
+ pbkdf2(
+ Password,
+ Salt,
+ Iterations,
+ BlockIndex,
+ 2,
+ InitialBlock,
+ InitialBlock
+ );
pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration, Prev, Acc) ->
Next = couch_util:hmac(sha, Password, Prev),
- pbkdf2(Password, Salt, Iterations, BlockIndex, Iteration + 1,
- Next, crypto:exor(Next, Acc)).
+ pbkdf2(
+ Password,
+ Salt,
+ Iterations,
+ BlockIndex,
+ Iteration + 1,
+ Next,
+ crypto:exor(Next, Acc)
+ ).
%% verify two lists for equality without short-circuits to avoid timing attacks.
-spec verify(string(), string(), integer()) -> boolean().
-verify([X|RestX], [Y|RestY], Result) ->
+verify([X | RestX], [Y | RestY], Result) ->
verify(RestX, RestY, (X bxor Y) bor Result);
verify([], [], Result) ->
Result == 0.
--spec verify(binary(), binary()) -> boolean();
- (list(), list()) -> boolean().
+-spec verify
+ (binary(), binary()) -> boolean();
+ (list(), list()) -> boolean().
verify(<<X/binary>>, <<Y/binary>>) ->
verify(?b2l(X), ?b2l(Y));
verify(X, Y) when is_list(X) and is_list(Y) ->
@@ -141,7 +187,8 @@ verify(X, Y) when is_list(X) and is_list(Y) ->
false ->
false
end;
-verify(_X, _Y) -> false.
+verify(_X, _Y) ->
+ false.
-spec ceiling(number()) -> integer().
ceiling(X) ->
diff --git a/src/couch/src/couch_primary_sup.erl b/src/couch/src/couch_primary_sup.erl
index 73c3de710..4f2917f98 100644
--- a/src/couch/src/couch_primary_sup.erl
+++ b/src/couch/src/couch_primary_sup.erl
@@ -15,30 +15,20 @@
-export([init/1, start_link/0]).
start_link() ->
- supervisor:start_link({local,couch_primary_services}, ?MODULE, []).
+ supervisor:start_link({local, couch_primary_services}, ?MODULE, []).
init([]) ->
- Children = [
- {couch_task_status,
- {couch_task_status, start_link, []},
- permanent,
- brutal_kill,
- worker,
- [couch_task_status]}
- ] ++ couch_servers(),
+ Children =
+ [
+ {couch_task_status, {couch_task_status, start_link, []}, permanent, brutal_kill, worker,
+ [couch_task_status]}
+ ] ++ couch_servers(),
{ok, {{one_for_one, 10, 3600}, Children}}.
-
couch_servers() ->
N = couch_server:num_servers(),
[couch_server(I) || I <- lists:seq(1, N)].
couch_server(N) ->
Name = couch_server:couch_server(N),
- {Name,
- {couch_server, sup_start_link, [N]},
- permanent,
- brutal_kill,
- worker,
- [couch_server]
- }.
+ {Name, {couch_server, sup_start_link, [N]}, permanent, brutal_kill, worker, [couch_server]}.
diff --git a/src/couch/src/couch_proc_manager.erl b/src/couch/src/couch_proc_manager.erl
index e7a25a6d2..6d86c16a7 100644
--- a/src/couch/src/couch_proc_manager.erl
+++ b/src/couch/src/couch_proc_manager.erl
@@ -60,7 +60,7 @@
-record(client, {
timestamp :: os:timestamp() | '_',
- from :: undefined | {pid(), reference()} | '_',
+ from :: undefined | {pid(), reference()} | '_',
lang :: binary() | '_',
ddoc :: #doc{} | '_',
ddoc_key :: undefined | {DDocId :: docid(), Rev :: revision()} | '_'
@@ -77,27 +77,21 @@
t0 = os:timestamp()
}).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
get_proc_count() ->
gen_server:call(?MODULE, get_proc_count).
-
get_stale_proc_count() ->
gen_server:call(?MODULE, get_stale_proc_count).
-
reload() ->
gen_server:call(?MODULE, set_threshold_ts).
-
terminate_stale_procs() ->
gen_server:call(?MODULE, terminate_stale_procs).
-
init([]) ->
process_flag(trap_exit, true),
ok = config:listen_for_changes(?MODULE, undefined),
@@ -120,50 +114,48 @@ init([]) ->
soft_limit = get_soft_limit()
}}.
-
terminate(_Reason, _State) ->
- ets:foldl(fun(#proc_int{pid=P}, _) ->
- couch_util:shutdown_sync(P)
- end, 0, ?PROCS),
+ ets:foldl(
+ fun(#proc_int{pid = P}, _) ->
+ couch_util:shutdown_sync(P)
+ end,
+ 0,
+ ?PROCS
+ ),
ok.
-
handle_call(get_proc_count, _From, State) ->
NumProcs = ets:info(?PROCS, size),
NumOpening = ets:info(?OPENING, size),
{reply, NumProcs + NumOpening, State};
-
handle_call(get_stale_proc_count, _From, State) ->
#state{threshold_ts = T0} = State,
- MatchSpec = [{#proc_int{t0='$1', _='_'}, [{'<', '$1', {T0}}], [true]}],
+ MatchSpec = [{#proc_int{t0 = '$1', _ = '_'}, [{'<', '$1', {T0}}], [true]}],
{reply, ets:select_count(?PROCS, MatchSpec), State};
-
-handle_call({get_proc, #doc{body={Props}}=DDoc, DDocKey}, From, State) ->
+handle_call({get_proc, #doc{body = {Props}} = DDoc, DDocKey}, From, State) ->
LangStr = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
Lang = couch_util:to_binary(LangStr),
- Client = #client{from=From, lang=Lang, ddoc=DDoc, ddoc_key=DDocKey},
+ Client = #client{from = From, lang = Lang, ddoc = DDoc, ddoc_key = DDocKey},
add_waiting_client(Client),
{noreply, flush_waiters(State, Lang)};
-
handle_call({get_proc, LangStr}, From, State) ->
Lang = couch_util:to_binary(LangStr),
- Client = #client{from=From, lang=Lang},
+ Client = #client{from = From, lang = Lang},
add_waiting_client(Client),
{noreply, flush_waiters(State, Lang)};
-
-handle_call({ret_proc, #proc{client=Ref} = Proc}, _From, State) ->
+handle_call({ret_proc, #proc{client = Ref} = Proc}, _From, State) ->
erlang:demonitor(Ref, [flush]),
- NewState = case ets:lookup(?PROCS, Proc#proc.pid) of
- [#proc_int{}=ProcInt] ->
- return_proc(State, ProcInt);
- [] ->
- % Proc must've died and we already
- % cleared it out of the table in
- % the handle_info clause.
- State
- end,
+ NewState =
+ case ets:lookup(?PROCS, Proc#proc.pid) of
+ [#proc_int{} = ProcInt] ->
+ return_proc(State, ProcInt);
+ [] ->
+ % Proc must've died and we already
+ % cleared it out of the table in
+ % the handle_info clause.
+ State
+ end,
{reply, true, NewState};
-
handle_call(set_threshold_ts, _From, State) ->
FoldFun = fun
(#proc_int{client = undefined} = Proc, StateAcc) ->
@@ -173,7 +165,6 @@ handle_call(set_threshold_ts, _From, State) ->
end,
NewState = ets:foldl(FoldFun, State, ?PROCS),
{reply, ok, NewState#state{threshold_ts = os:timestamp()}};
-
handle_call(terminate_stale_procs, _From, #state{threshold_ts = Ts1} = State) ->
FoldFun = fun
(#proc_int{client = undefined, t0 = Ts2} = Proc, StateAcc) ->
@@ -188,26 +179,24 @@ handle_call(terminate_stale_procs, _From, #state{threshold_ts = Ts1} = State) ->
end,
NewState = ets:foldl(FoldFun, State, ?PROCS),
{reply, ok, NewState};
-
handle_call(_Call, _From, State) ->
{reply, ignored, State}.
-
-handle_cast({os_proc_idle, Pid}, #state{counts=Counts}=State) ->
- NewState = case ets:lookup(?PROCS, Pid) of
- [#proc_int{client=undefined, lang=Lang}=Proc] ->
- case dict:find(Lang, Counts) of
- {ok, Count} when Count >= State#state.soft_limit ->
- couch_log:info("Closing idle OS Process: ~p", [Pid]),
- remove_proc(State, Proc);
- {ok, _} ->
- State
- end;
- _ ->
- State
- end,
+handle_cast({os_proc_idle, Pid}, #state{counts = Counts} = State) ->
+ NewState =
+ case ets:lookup(?PROCS, Pid) of
+ [#proc_int{client = undefined, lang = Lang} = Proc] ->
+ case dict:find(Lang, Counts) of
+ {ok, Count} when Count >= State#state.soft_limit ->
+ couch_log:info("Closing idle OS Process: ~p", [Pid]),
+ remove_proc(State, Proc);
+ {ok, _} ->
+ State
+ end;
+ _ ->
+ State
+ end,
{noreply, NewState};
-
handle_cast(reload_config, State) ->
NewState = State#state{
config = get_proc_config(),
@@ -216,29 +205,24 @@ handle_cast(reload_config, State) ->
},
maybe_configure_erlang_native_servers(),
{noreply, flush_waiters(NewState)};
-
handle_cast(_Msg, State) ->
{noreply, State}.
-
handle_info(shutdown, State) ->
{stop, shutdown, State};
-
-handle_info({'EXIT', Pid, {spawn_ok, Proc0, {ClientPid,_} = From}}, State) ->
+handle_info({'EXIT', Pid, {spawn_ok, Proc0, {ClientPid, _} = From}}, State) ->
ets:delete(?OPENING, Pid),
link(Proc0#proc_int.pid),
Proc = assign_proc(ClientPid, Proc0),
gen_server:reply(From, {ok, Proc, State#state.config}),
{noreply, State};
-
handle_info({'EXIT', Pid, spawn_error}, State) ->
- [{Pid, #client{lang=Lang}}] = ets:lookup(?OPENING, Pid),
+ [{Pid, #client{lang = Lang}}] = ets:lookup(?OPENING, Pid),
ets:delete(?OPENING, Pid),
NewState = State#state{
counts = dict:update_counter(Lang, -1, State#state.counts)
},
{noreply, flush_waiters(NewState, Lang)};
-
handle_info({'EXIT', Pid, Reason}, State) ->
couch_log:info("~p ~p died ~p", [?MODULE, Pid, Reason]),
case ets:lookup(?PROCS, Pid) of
@@ -248,25 +232,20 @@ handle_info({'EXIT', Pid, Reason}, State) ->
[] ->
{noreply, State}
end;
-
handle_info({'DOWN', Ref, _, _, _Reason}, State0) ->
- case ets:match_object(?PROCS, #proc_int{client=Ref, _='_'}) of
+ case ets:match_object(?PROCS, #proc_int{client = Ref, _ = '_'}) of
[#proc_int{} = Proc] ->
{noreply, return_proc(State0, Proc)};
[] ->
{noreply, State0}
end;
-
-
handle_info(restart_config_listener, State) ->
ok = config:listen_for_changes(?MODULE, nil),
{noreply, State};
-
handle_info(_Msg, State) ->
{noreply, State}.
-
-code_change(_OldVsn, #state{}=State, _Extra) ->
+code_change(_OldVsn, #state{} = State, _Extra) ->
{ok, State}.
handle_config_terminate(_, stop, _) ->
@@ -284,7 +263,6 @@ handle_config_change("query_server_config", _, _, _, _) ->
handle_config_change(_, _, _, _, _) ->
{ok, undefined}.
-
find_proc(#client{lang = Lang, ddoc_key = undefined}) ->
Pred = fun(_) ->
true
@@ -296,7 +274,7 @@ find_proc(#client{lang = Lang, ddoc = DDoc, ddoc_key = DDocKey} = Client) ->
end,
case find_proc(Lang, Pred) of
not_found ->
- case find_proc(Client#client{ddoc_key=undefined}) of
+ case find_proc(Client#client{ddoc_key = undefined}) of
{ok, Proc} ->
teach_ddoc(DDoc, DDocKey, Proc);
Else ->
@@ -313,9 +291,8 @@ find_proc(Lang, Fun) ->
{error, Reason}
end.
-
iter_procs(Lang, Fun) when is_binary(Lang) ->
- Pattern = #proc_int{lang=Lang, client=undefined, _='_'},
+ Pattern = #proc_int{lang = Lang, client = undefined, _ = '_'},
MSpec = [{Pattern, [], ['$_']}],
case ets:select_reverse(?PROCS, MSpec, 25) of
'$end_of_table' ->
@@ -324,7 +301,6 @@ iter_procs(Lang, Fun) when is_binary(Lang) ->
iter_procs_int(Continuation, Fun)
end.
-
iter_procs_int({[], Continuation0}, Fun) ->
case ets:select_reverse(Continuation0) of
'$end_of_table' ->
@@ -340,7 +316,6 @@ iter_procs_int({[Proc | Rest], Continuation}, Fun) ->
iter_procs_int({Rest, Continuation}, Fun)
end.
-
spawn_proc(State, Client) ->
Pid = spawn_link(?MODULE, new_proc, [Client]),
ets:insert(?OPENING, {Pid, Client}),
@@ -350,36 +325,38 @@ spawn_proc(State, Client) ->
counts = dict:update_counter(Lang, 1, Counts)
}.
-
-new_proc(#client{ddoc=undefined, ddoc_key=undefined}=Client) ->
- #client{from=From, lang=Lang} = Client,
- Resp = try
- case new_proc_int(From, Lang) of
- {ok, Proc} ->
- {spawn_ok, Proc, From};
- Error ->
- gen_server:reply(From, {error, Error}),
+new_proc(#client{ddoc = undefined, ddoc_key = undefined} = Client) ->
+ #client{from = From, lang = Lang} = Client,
+ Resp =
+ try
+ case new_proc_int(From, Lang) of
+ {ok, Proc} ->
+ {spawn_ok, Proc, From};
+ Error ->
+ gen_server:reply(From, {error, Error}),
+ spawn_error
+ end
+ catch
+ _:_ ->
spawn_error
- end
- catch _:_ ->
- spawn_error
- end,
+ end,
exit(Resp);
-
new_proc(Client) ->
- #client{from=From, lang=Lang, ddoc=DDoc, ddoc_key=DDocKey} = Client,
- Resp = try
- case new_proc_int(From, Lang) of
- {ok, NewProc} ->
- {ok, Proc} = teach_ddoc(DDoc, DDocKey, NewProc),
- {spawn_ok, Proc, From};
- Error ->
- gen_server:reply(From, {error, Error}),
- spawn_error
- end
- catch _:_ ->
- spawn_error
- end,
+ #client{from = From, lang = Lang, ddoc = DDoc, ddoc_key = DDocKey} = Client,
+ Resp =
+ try
+ case new_proc_int(From, Lang) of
+ {ok, NewProc} ->
+ {ok, Proc} = teach_ddoc(DDoc, DDocKey, NewProc),
+ {spawn_ok, Proc, From};
+ Error ->
+ gen_server:reply(From, {error, Error}),
+ spawn_error
+ end
+ catch
+ _:_ ->
+ spawn_error
+ end,
exit(Resp).
split_string_if_longer(String, Pos) ->
@@ -399,14 +376,17 @@ split_by_char(String, Char) ->
get_servers_from_env(Spec) ->
SpecLen = length(Spec),
% loop over os:getenv(), match SPEC_
- lists:filtermap(fun(EnvStr) ->
- case split_string_if_longer(EnvStr, SpecLen) of
- {Spec, Rest} ->
- {true, split_by_char(Rest, $=)};
- _ ->
- false
- end
- end, os:getenv()).
+ lists:filtermap(
+ fun(EnvStr) ->
+ case split_string_if_longer(EnvStr, SpecLen) of
+ {Spec, Rest} ->
+ {true, split_by_char(Rest, $=)};
+ _ ->
+ false
+ end
+ end,
+ os:getenv()
+ ).
get_query_server(LangStr) ->
case ets:lookup(?SERVERS, string:to_upper(LangStr)) of
@@ -425,39 +405,39 @@ native_query_server_enabled() ->
maybe_configure_erlang_native_servers() ->
case native_query_server_enabled() of
true ->
- ets:insert(?SERVERS, [
- {"ERLANG", {couch_native_process, start_link, []}}]);
+ ets:insert(?SERVERS, [
+ {"ERLANG", {couch_native_process, start_link, []}}
+ ]);
_Else ->
- ok
+ ok
end.
new_proc_int(From, Lang) when is_binary(Lang) ->
LangStr = binary_to_list(Lang),
case get_query_server(LangStr) of
- undefined ->
- gen_server:reply(From, {unknown_query_language, Lang});
- {M, F, A} ->
- {ok, Pid} = apply(M, F, A),
- make_proc(Pid, Lang, M);
- Command ->
- {ok, Pid} = couch_os_process:start_link(Command),
- make_proc(Pid, Lang, couch_os_process)
+ undefined ->
+ gen_server:reply(From, {unknown_query_language, Lang});
+ {M, F, A} ->
+ {ok, Pid} = apply(M, F, A),
+ make_proc(Pid, Lang, M);
+ Command ->
+ {ok, Pid} = couch_os_process:start_link(Command),
+ make_proc(Pid, Lang, couch_os_process)
end.
-
-teach_ddoc(DDoc, {DDocId, _Rev}=DDocKey, #proc_int{ddoc_keys=Keys}=Proc) ->
+teach_ddoc(DDoc, {DDocId, _Rev} = DDocKey, #proc_int{ddoc_keys = Keys} = Proc) ->
% send ddoc over the wire
% we only share the rev with the client we know to update code
% but it only keeps the latest copy, per each ddoc, around.
true = couch_query_servers:proc_prompt(
export_proc(Proc),
- [<<"ddoc">>, <<"new">>, DDocId, couch_doc:to_json_obj(DDoc, [])]),
+ [<<"ddoc">>, <<"new">>, DDocId, couch_doc:to_json_obj(DDoc, [])]
+ ),
% we should remove any other ddocs keys for this docid
% because the query server overwrites without the rev
- Keys2 = [{D,R} || {D,R} <- Keys, D /= DDocId],
+ Keys2 = [{D, R} || {D, R} <- Keys, D /= DDocId],
% add ddoc to the proc
- {ok, Proc#proc_int{ddoc_keys=[DDocKey|Keys2]}}.
-
+ {ok, Proc#proc_int{ddoc_keys = [DDocKey | Keys2]}}.
make_proc(Pid, Lang, Mod) when is_binary(Lang) ->
Proc = #proc_int{
@@ -470,42 +450,42 @@ make_proc(Pid, Lang, Mod) when is_binary(Lang) ->
unlink(Pid),
{ok, Proc}.
-
-assign_proc(Pid, #proc_int{client=undefined}=Proc0) when is_pid(Pid) ->
+assign_proc(Pid, #proc_int{client = undefined} = Proc0) when is_pid(Pid) ->
Proc = Proc0#proc_int{client = erlang:monitor(process, Pid)},
ets:insert(?PROCS, Proc),
export_proc(Proc);
-assign_proc(#client{}=Client, #proc_int{client=undefined}=Proc) ->
+assign_proc(#client{} = Client, #proc_int{client = undefined} = Proc) ->
{Pid, _} = Client#client.from,
assign_proc(Pid, Proc).
-
return_proc(#state{} = State, #proc_int{} = ProcInt) ->
#proc_int{pid = Pid, lang = Lang} = ProcInt,
- NewState = case is_process_alive(Pid) of true ->
- case ProcInt#proc_int.t0 < State#state.threshold_ts of
+ NewState =
+ case is_process_alive(Pid) of
true ->
- remove_proc(State, ProcInt);
+ case ProcInt#proc_int.t0 < State#state.threshold_ts of
+ true ->
+ remove_proc(State, ProcInt);
+ false ->
+ gen_server:cast(Pid, garbage_collect),
+ true = ets:update_element(?PROCS, Pid, [
+ {#proc_int.client, undefined}
+ ]),
+ State
+ end;
false ->
- gen_server:cast(Pid, garbage_collect),
- true = ets:update_element(?PROCS, Pid, [
- {#proc_int.client, undefined}
- ]),
- State
- end;
- false ->
- remove_proc(State, ProcInt)
- end,
+ remove_proc(State, ProcInt)
+ end,
flush_waiters(NewState, Lang).
-
-remove_proc(State, #proc_int{}=Proc) ->
+remove_proc(State, #proc_int{} = Proc) ->
ets:delete(?PROCS, Proc#proc_int.pid),
- case is_process_alive(Proc#proc_int.pid) of true ->
- unlink(Proc#proc_int.pid),
- gen_server:cast(Proc#proc_int.pid, stop);
- false ->
- ok
+ case is_process_alive(Proc#proc_int.pid) of
+ true ->
+ unlink(Proc#proc_int.pid),
+ gen_server:cast(Proc#proc_int.pid, stop);
+ false ->
+ ok
end,
Counts = State#state.counts,
Lang = Proc#proc_int.lang,
@@ -513,7 +493,6 @@ remove_proc(State, #proc_int{}=Proc) ->
counts = dict:update_counter(Lang, -1, Counts)
}.
-
-spec export_proc(#proc_int{}) -> #proc{}.
export_proc(#proc_int{} = ProcInt) ->
ProcIntList = tuple_to_list(ProcInt),
@@ -521,17 +500,19 @@ export_proc(#proc_int{} = ProcInt) ->
[_ | Data] = lists:sublist(ProcIntList, ProcLen),
list_to_tuple([proc | Data]).
-
flush_waiters(State) ->
- dict:fold(fun(Lang, Count, StateAcc) ->
- case Count < State#state.hard_limit of
- true ->
- flush_waiters(StateAcc, Lang);
- false ->
- StateAcc
- end
- end, State, State#state.counts).
-
+ dict:fold(
+ fun(Lang, Count, StateAcc) ->
+ case Count < State#state.hard_limit of
+ true ->
+ flush_waiters(StateAcc, Lang);
+ false ->
+ StateAcc
+ end
+ end,
+ State,
+ State#state.counts
+ ).
flush_waiters(State, Lang) ->
CanSpawn = can_spawn(State, Lang),
@@ -558,31 +539,27 @@ flush_waiters(State, Lang) ->
State
end.
-
add_waiting_client(Client) ->
- ets:insert(?WAITERS, Client#client{timestamp=os:timestamp()}).
+ ets:insert(?WAITERS, Client#client{timestamp = os:timestamp()}).
-spec get_waiting_client(Lang :: binary()) -> undefined | #client{}.
get_waiting_client(Lang) ->
- case ets:match_object(?WAITERS, #client{lang=Lang, _='_'}, 1) of
+ case ets:match_object(?WAITERS, #client{lang = Lang, _ = '_'}, 1) of
'$end_of_table' ->
undefined;
- {[#client{}=Client], _} ->
+ {[#client{} = Client], _} ->
Client
end.
-
remove_waiting_client(#client{timestamp = Timestamp}) ->
ets:delete(?WAITERS, Timestamp).
-
can_spawn(#state{hard_limit = HardLimit, counts = Counts}, Lang) ->
case dict:find(Lang, Counts) of
{ok, Count} -> Count < HardLimit;
error -> true
end.
-
get_proc_config() ->
Limit = config:get_boolean("query_server_config", "reduce_limit", true),
Timeout = config:get_integer("couchdb", "os_process_timeout", 5000),
@@ -591,11 +568,9 @@ get_proc_config() ->
{<<"timeout">>, Timeout}
]}.
-
get_hard_limit() ->
LimStr = config:get("query_server_config", "os_process_limit", "100"),
list_to_integer(LimStr).
-
get_soft_limit() ->
config:get_integer("query_server_config", "os_process_soft_limit", 100).
diff --git a/src/couch/src/couch_query_servers.erl b/src/couch/src/couch_query_servers.erl
index 10b8048dd..5dd7c4a4b 100644
--- a/src/couch/src/couch_query_servers.erl
+++ b/src/couch/src/couch_query_servers.erl
@@ -14,7 +14,7 @@
-export([try_compile/4]).
-export([start_doc_map/3, map_doc_raw/2, stop_doc_map/1, raw_to_ejson/1]).
--export([reduce/3, rereduce/3,validate_doc_update/5]).
+-export([reduce/3, rereduce/3, validate_doc_update/5]).
-export([filter_docs/5]).
-export([filter_view/3]).
-export([finalize/2]).
@@ -27,14 +27,17 @@
-include_lib("couch/include/couch_db.hrl").
--define(SUMERROR, <<"The _sum function requires that map values be numbers, "
+-define(SUMERROR, <<
+ "The _sum function requires that map values be numbers, "
"arrays of numbers, or objects. Objects cannot be mixed with other "
"data structures. Objects can be arbitrarily nested, provided that the values "
- "for all fields are themselves numbers, arrays of numbers, or objects.">>).
-
--define(STATERROR, <<"The _stats function requires that map values be numbers "
- "or arrays of numbers, not '~p'">>).
+ "for all fields are themselves numbers, arrays of numbers, or objects."
+>>).
+-define(STATERROR, <<
+ "The _stats function requires that map values be numbers "
+ "or arrays of numbers, not '~p'"
+>>).
try_compile(Proc, FunctionType, FunctionName, FunctionSource) ->
try
@@ -54,20 +57,21 @@ try_compile(Proc, FunctionType, FunctionName, FunctionSource) ->
start_doc_map(Lang, Functions, Lib) ->
Proc = get_os_process(Lang),
case Lib of
- {[]} -> ok;
- Lib ->
- true = proc_prompt(Proc, [<<"add_lib">>, Lib])
+ {[]} -> ok;
+ Lib -> true = proc_prompt(Proc, [<<"add_lib">>, Lib])
end,
- lists:foreach(fun(FunctionSource) ->
- true = proc_prompt(Proc, [<<"add_fun">>, FunctionSource])
- end, Functions),
+ lists:foreach(
+ fun(FunctionSource) ->
+ true = proc_prompt(Proc, [<<"add_fun">>, FunctionSource])
+ end,
+ Functions
+ ),
{ok, Proc}.
map_doc_raw(Proc, Doc) ->
Json = couch_doc:to_json_obj(Doc, []),
{ok, proc_prompt_raw(Proc, [<<"map_doc">>, Json])}.
-
stop_doc_map(nil) ->
ok;
stop_doc_map(Proc) ->
@@ -77,20 +81,24 @@ group_reductions_results([]) ->
[];
group_reductions_results(List) ->
{Heads, Tails} = lists:foldl(
- fun([H|T], {HAcc,TAcc}) ->
- {[H|HAcc], [T|TAcc]}
- end, {[], []}, List),
+ fun([H | T], {HAcc, TAcc}) ->
+ {[H | HAcc], [T | TAcc]}
+ end,
+ {[], []},
+ List
+ ),
case Tails of
- [[]|_] -> % no tails left
- [Heads];
- _ ->
- [Heads | group_reductions_results(Tails)]
+ % no tails left
+ [[] | _] ->
+ [Heads];
+ _ ->
+ [Heads | group_reductions_results(Tails)]
end.
-finalize(<<"_approx_count_distinct",_/binary>>, Reduction) ->
+finalize(<<"_approx_count_distinct", _/binary>>, Reduction) ->
true = hyper:is_hyper(Reduction),
{ok, round(hyper:card(Reduction))};
-finalize(<<"_stats",_/binary>>, Unpacked) ->
+finalize(<<"_stats", _/binary>>, Unpacked) ->
{ok, pack_stats(Unpacked)};
finalize(_RedSrc, Reduction) ->
{ok, Reduction}.
@@ -101,45 +109,51 @@ rereduce(Lang, RedSrcs, ReducedValues) ->
Grouped = group_reductions_results(ReducedValues),
Results = lists:zipwith(
fun
- (<<"_", _/binary>> = FunSrc, Values) ->
- {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []),
- Result;
- (FunSrc, Values) ->
- os_rereduce(Lang, [FunSrc], Values)
- end, RedSrcs, Grouped),
+ (<<"_", _/binary>> = FunSrc, Values) ->
+ {ok, [Result]} = builtin_reduce(rereduce, [FunSrc], [[[], V] || V <- Values], []),
+ Result;
+ (FunSrc, Values) ->
+ os_rereduce(Lang, [FunSrc], Values)
+ end,
+ RedSrcs,
+ Grouped
+ ),
{ok, Results}.
reduce(_Lang, [], _KVs) ->
{ok, []};
reduce(Lang, RedSrcs, KVs) ->
- {OsRedSrcs, BuiltinReds} = lists:partition(fun
- (<<"_", _/binary>>) -> false;
- (_OsFun) -> true
- end, RedSrcs),
+ {OsRedSrcs, BuiltinReds} = lists:partition(
+ fun
+ (<<"_", _/binary>>) -> false;
+ (_OsFun) -> true
+ end,
+ RedSrcs
+ ),
{ok, OsResults} = os_reduce(Lang, OsRedSrcs, KVs),
{ok, BuiltinResults} = builtin_reduce(reduce, BuiltinReds, KVs, []),
recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, []).
-
recombine_reduce_results([], [], [], Acc) ->
{ok, lists:reverse(Acc)};
-recombine_reduce_results([<<"_", _/binary>>|RedSrcs], OsResults, [BRes|BuiltinResults], Acc) ->
- recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [BRes|Acc]);
-recombine_reduce_results([_OsFun|RedSrcs], [OsR|OsResults], BuiltinResults, Acc) ->
- recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [OsR|Acc]).
+recombine_reduce_results([<<"_", _/binary>> | RedSrcs], OsResults, [BRes | BuiltinResults], Acc) ->
+ recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [BRes | Acc]);
+recombine_reduce_results([_OsFun | RedSrcs], [OsR | OsResults], BuiltinResults, Acc) ->
+ recombine_reduce_results(RedSrcs, OsResults, BuiltinResults, [OsR | Acc]).
os_reduce(_Lang, [], _KVs) ->
{ok, []};
os_reduce(Lang, OsRedSrcs, KVs) ->
Proc = get_os_process(Lang),
- OsResults = try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of
- [true, Reductions] -> Reductions
- catch
- throw:{reduce_overflow_error, Msg} ->
- [{[{reduce_overflow_error, Msg}]} || _ <- OsRedSrcs]
- after
- ok = ret_os_process(Proc)
- end,
+ OsResults =
+ try proc_prompt(Proc, [<<"reduce">>, OsRedSrcs, KVs]) of
+ [true, Reductions] -> Reductions
+ catch
+ throw:{reduce_overflow_error, Msg} ->
+ [{[{reduce_overflow_error, Msg}]} || _ <- OsRedSrcs]
+ after
+ ok = ret_os_process(Proc)
+ end,
{ok, OsResults}.
os_rereduce(Lang, OsRedSrcs, KVs) ->
@@ -158,7 +172,6 @@ os_rereduce(Lang, OsRedSrcs, KVs) ->
Error
end.
-
get_overflow_error([]) ->
undefined;
get_overflow_error([{[{reduce_overflow_error, _}]} = Error | _]) ->
@@ -166,26 +179,24 @@ get_overflow_error([{[{reduce_overflow_error, _}]} = Error | _]) ->
get_overflow_error([_ | Rest]) ->
get_overflow_error(Rest).
-
builtin_reduce(_Re, [], _KVs, Acc) ->
{ok, lists:reverse(Acc)};
-builtin_reduce(Re, [<<"_sum",_/binary>>|BuiltinReds], KVs, Acc) ->
+builtin_reduce(Re, [<<"_sum", _/binary>> | BuiltinReds], KVs, Acc) ->
Sum = builtin_sum_rows(KVs, 0),
Red = check_sum_overflow(?term_size(KVs), ?term_size(Sum), Sum),
- builtin_reduce(Re, BuiltinReds, KVs, [Red|Acc]);
-builtin_reduce(reduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
+ builtin_reduce(Re, BuiltinReds, KVs, [Red | Acc]);
+builtin_reduce(reduce, [<<"_count", _/binary>> | BuiltinReds], KVs, Acc) ->
Count = length(KVs),
- builtin_reduce(reduce, BuiltinReds, KVs, [Count|Acc]);
-builtin_reduce(rereduce, [<<"_count",_/binary>>|BuiltinReds], KVs, Acc) ->
+ builtin_reduce(reduce, BuiltinReds, KVs, [Count | Acc]);
+builtin_reduce(rereduce, [<<"_count", _/binary>> | BuiltinReds], KVs, Acc) ->
Count = builtin_sum_rows(KVs, 0),
- builtin_reduce(rereduce, BuiltinReds, KVs, [Count|Acc]);
-builtin_reduce(Re, [<<"_stats",_/binary>>|BuiltinReds], KVs, Acc) ->
+ builtin_reduce(rereduce, BuiltinReds, KVs, [Count | Acc]);
+builtin_reduce(Re, [<<"_stats", _/binary>> | BuiltinReds], KVs, Acc) ->
Stats = builtin_stats(Re, KVs),
- builtin_reduce(Re, BuiltinReds, KVs, [Stats|Acc]);
-builtin_reduce(Re, [<<"_approx_count_distinct",_/binary>>|BuiltinReds], KVs, Acc) ->
+ builtin_reduce(Re, BuiltinReds, KVs, [Stats | Acc]);
+builtin_reduce(Re, [<<"_approx_count_distinct", _/binary>> | BuiltinReds], KVs, Acc) ->
Distinct = approx_count_distinct(Re, KVs),
- builtin_reduce(Re, BuiltinReds, KVs, [Distinct|Acc]).
-
+ builtin_reduce(Re, BuiltinReds, KVs, [Distinct | Acc]).
builtin_sum_rows([], Acc) ->
Acc;
@@ -197,11 +208,13 @@ builtin_sum_rows([[_Key, Value] | RestKVs], Acc) ->
throw:{builtin_reduce_error, Obj} ->
Obj;
throw:{invalid_value, Reason, Cause} ->
- {[{<<"error">>, <<"builtin_reduce_error">>},
- {<<"reason">>, Reason}, {<<"caused_by">>, Cause}]}
+ {[
+ {<<"error">>, <<"builtin_reduce_error">>},
+ {<<"reason">>, Reason},
+ {<<"caused_by">>, Cause}
+ ]}
end.
-
sum_values(Value, Acc) when is_number(Value), is_number(Acc) ->
Acc + Value;
sum_values(Value, Acc) when is_list(Value), is_list(Acc) ->
@@ -239,12 +252,12 @@ sum_objects(Rest, []) ->
sum_arrays([], []) ->
[];
-sum_arrays([_|_]=Xs, []) ->
+sum_arrays([_ | _] = Xs, []) ->
Xs;
-sum_arrays([], [_|_]=Ys) ->
+sum_arrays([], [_ | _] = Ys) ->
Ys;
-sum_arrays([X|Xs], [Y|Ys]) when is_number(X), is_number(Y) ->
- [X+Y | sum_arrays(Xs,Ys)];
+sum_arrays([X | Xs], [Y | Ys]) when is_number(X), is_number(Y) ->
+ [X + Y | sum_arrays(Xs, Ys)];
sum_arrays(Else, _) ->
throw_sum_error(Else).
@@ -265,37 +278,42 @@ check_sum_overflow(InSize, OutSize, Sum) ->
end.
log_sum_overflow(InSize, OutSize) ->
- Fmt = "Reduce output must shrink more rapidly: "
- "input size: ~b "
- "output size: ~b",
+ Fmt =
+ "Reduce output must shrink more rapidly: "
+ "input size: ~b "
+ "output size: ~b",
Msg = iolist_to_binary(io_lib:format(Fmt, [InSize, OutSize])),
couch_log:error(Msg, []),
Msg.
builtin_stats(_, []) ->
{0, 0, 0, 0, 0};
-builtin_stats(_, [[_,First]|Rest]) ->
- lists:foldl(fun([_Key, Value], Acc) ->
- stat_values(Value, Acc)
- end, build_initial_accumulator(First), Rest).
+builtin_stats(_, [[_, First] | Rest]) ->
+ lists:foldl(
+ fun([_Key, Value], Acc) ->
+ stat_values(Value, Acc)
+ end,
+ build_initial_accumulator(First),
+ Rest
+ ).
stat_values(Value, Acc) when is_list(Value), is_list(Acc) ->
lists:zipwith(fun stat_values/2, Value, Acc);
stat_values({PreRed}, Acc) when is_list(PreRed) ->
stat_values(unpack_stats({PreRed}), Acc);
stat_values(Value, Acc) when is_number(Value) ->
- stat_values({Value, 1, Value, Value, Value*Value}, Acc);
+ stat_values({Value, 1, Value, Value, Value * Value}, Acc);
stat_values(Value, Acc) when is_number(Acc) ->
- stat_values(Value, {Acc, 1, Acc, Acc, Acc*Acc});
+ stat_values(Value, {Acc, 1, Acc, Acc, Acc * Acc});
stat_values(Value, Acc) when is_tuple(Value), is_tuple(Acc) ->
{Sum0, Cnt0, Min0, Max0, Sqr0} = Value,
{Sum1, Cnt1, Min1, Max1, Sqr1} = Acc,
{
- Sum0 + Sum1,
- Cnt0 + Cnt1,
- erlang:min(Min0, Min1),
- erlang:max(Max0, Max1),
- Sqr0 + Sqr1
+ Sum0 + Sum1,
+ Cnt0 + Cnt1,
+ erlang:min(Min0, Min1),
+ erlang:max(Max0, Max1),
+ Sqr0 + Sqr1
};
stat_values(Else, _Acc) ->
throw_stat_error(Else).
@@ -303,7 +321,7 @@ stat_values(Else, _Acc) ->
build_initial_accumulator(L) when is_list(L) ->
[build_initial_accumulator(X) || X <- L];
build_initial_accumulator(X) when is_number(X) ->
- {X, 1, X, X, X*X};
+ {X, 1, X, X, X * X};
build_initial_accumulator({_, _, _, _, _} = AlreadyUnpacked) ->
AlreadyUnpacked;
build_initial_accumulator({Props}) ->
@@ -314,16 +332,21 @@ build_initial_accumulator(Else) ->
unpack_stats({PreRed}) when is_list(PreRed) ->
{
- get_number(<<"sum">>, PreRed),
- get_number(<<"count">>, PreRed),
- get_number(<<"min">>, PreRed),
- get_number(<<"max">>, PreRed),
- get_number(<<"sumsqr">>, PreRed)
+ get_number(<<"sum">>, PreRed),
+ get_number(<<"count">>, PreRed),
+ get_number(<<"min">>, PreRed),
+ get_number(<<"max">>, PreRed),
+ get_number(<<"sumsqr">>, PreRed)
}.
-
pack_stats({Sum, Cnt, Min, Max, Sqr}) ->
- {[{<<"sum">>,Sum}, {<<"count">>,Cnt}, {<<"min">>,Min}, {<<"max">>,Max}, {<<"sumsqr">>,Sqr}]};
+ {[
+ {<<"sum">>, Sum},
+ {<<"count">>, Cnt},
+ {<<"min">>, Min},
+ {<<"max">>, Max},
+ {<<"sumsqr">>, Sqr}
+ ]};
pack_stats({Packed}) ->
% Legacy code path before we had the finalize operation
{Packed};
@@ -332,35 +355,43 @@ pack_stats(Stats) when is_list(Stats) ->
get_number(Key, Props) ->
case couch_util:get_value(Key, Props) of
- X when is_number(X) ->
- X;
- undefined when is_binary(Key) ->
- get_number(binary_to_atom(Key, latin1), Props);
- undefined ->
- Msg = io_lib:format("user _stats input missing required field ~s (~p)",
- [Key, Props]),
- throw({invalid_value, iolist_to_binary(Msg)});
- Else ->
- Msg = io_lib:format("non-numeric _stats input received for ~s: ~w",
- [Key, Else]),
- throw({invalid_value, iolist_to_binary(Msg)})
+ X when is_number(X) ->
+ X;
+ undefined when is_binary(Key) ->
+ get_number(binary_to_atom(Key, latin1), Props);
+ undefined ->
+ Msg = io_lib:format(
+ "user _stats input missing required field ~s (~p)",
+ [Key, Props]
+ ),
+ throw({invalid_value, iolist_to_binary(Msg)});
+ Else ->
+ Msg = io_lib:format(
+ "non-numeric _stats input received for ~s: ~w",
+ [Key, Else]
+ ),
+ throw({invalid_value, iolist_to_binary(Msg)})
end.
% TODO allow customization of precision in the ddoc.
approx_count_distinct(reduce, KVs) ->
- lists:foldl(fun([[Key, _Id], _Value], Filter) ->
- hyper:insert(term_to_binary(Key), Filter)
- end, hyper:new(11), KVs);
+ lists:foldl(
+ fun([[Key, _Id], _Value], Filter) ->
+ hyper:insert(term_to_binary(Key), Filter)
+ end,
+ hyper:new(11),
+ KVs
+ );
approx_count_distinct(rereduce, Reds) ->
hyper:union([Filter || [_, Filter] <- Reds]).
% use the function stored in ddoc.validate_doc_update to test an update.
-spec validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) -> ok when
- DDoc :: ddoc(),
+ DDoc :: ddoc(),
EditDoc :: doc(),
DiskDoc :: doc() | nil,
- Ctx :: user_ctx(),
- SecObj :: sec_obj().
+ Ctx :: user_ctx(),
+ SecObj :: sec_obj().
validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
JsonEditDoc = couch_doc:to_json_obj(EditDoc, [revs]),
@@ -370,8 +401,9 @@ validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
[<<"validate_doc_update">>],
[JsonEditDoc, JsonDiskDoc, Ctx, SecObj]
),
- if Resp == 1 -> ok; true ->
- couch_stats:increment_counter([couchdb, query_server, vdu_rejects], 1)
+ if
+ Resp == 1 -> ok;
+ true -> couch_stats:increment_counter([couchdb, query_server, vdu_rejects], 1)
end,
case Resp of
RespCode when RespCode =:= 1; RespCode =:= ok; RespCode =:= true ->
@@ -386,11 +418,15 @@ validate_doc_update(DDoc, EditDoc, DiskDoc, Ctx, SecObj) ->
throw({unknown_error, Message})
end.
-
rewrite(Req, Db, DDoc) ->
- Fields = [F || F <- chttpd_external:json_req_obj_fields(),
- F =/= <<"info">>, F =/= <<"form">>,
- F =/= <<"uuid">>, F =/= <<"id">>],
+ Fields = [
+ F
+ || F <- chttpd_external:json_req_obj_fields(),
+ F =/= <<"info">>,
+ F =/= <<"form">>,
+ F =/= <<"uuid">>,
+ F =/= <<"id">>
+ ],
JsonReq = chttpd_external:json_req_obj(Req, Db, null, Fields),
case couch_query_servers:ddoc_prompt(DDoc, [<<"rewrites">>], [JsonReq]) of
{[{<<"forbidden">>, Message}]} ->
@@ -399,10 +435,10 @@ rewrite(Req, Db, DDoc) ->
throw({unauthorized, Message});
[<<"no_dispatch_rule">>] ->
undefined;
- [<<"ok">>, {V}=Rewrite] when is_list(V) ->
+ [<<"ok">>, {V} = Rewrite] when is_list(V) ->
ok = validate_rewrite_response(Rewrite),
Rewrite;
- [<<"ok">>, _] ->
+ [<<"ok">>, _] ->
throw_rewrite_error(<<"bad rewrite">>);
V ->
couch_log:error("bad rewrite return ~p", [V]),
@@ -430,15 +466,17 @@ validate_rewrite_response_field(<<"body">>, Body) when is_binary(Body) ->
ok;
validate_rewrite_response_field(<<"body">>, _) ->
throw_rewrite_error(<<"bad body">>);
-validate_rewrite_response_field(<<"headers">>, {Props}=Headers) when is_list(Props) ->
+validate_rewrite_response_field(<<"headers">>, {Props} = Headers) when is_list(Props) ->
validate_object_fields(Headers);
validate_rewrite_response_field(<<"headers">>, _) ->
throw_rewrite_error(<<"bad headers">>);
-validate_rewrite_response_field(<<"query">>, {Props}=Query) when is_list(Props) ->
+validate_rewrite_response_field(<<"query">>, {Props} = Query) when is_list(Props) ->
validate_object_fields(Query);
validate_rewrite_response_field(<<"query">>, _) ->
throw_rewrite_error(<<"bad query">>);
-validate_rewrite_response_field(<<"code">>, Code) when is_integer(Code) andalso Code >= 200 andalso Code < 600 ->
+validate_rewrite_response_field(<<"code">>, Code) when
+ is_integer(Code) andalso Code >= 200 andalso Code < 600
+->
ok;
validate_rewrite_response_field(<<"code">>, _) ->
throw_rewrite_error(<<"bad code">>);
@@ -447,24 +485,26 @@ validate_rewrite_response_field(K, V) ->
ok.
validate_object_fields({Props}) when is_list(Props) ->
- lists:foreach(fun
- ({Key, Value}) when is_binary(Key) andalso is_binary(Value) ->
- ok;
- ({Key, Value}) ->
- Reason = io_lib:format(
- "object key/value must be strings ~p=~p", [Key, Value]),
- throw_rewrite_error(Reason);
- (Value) ->
- throw_rewrite_error(io_lib:format("bad value ~p", [Value]))
- end, Props).
-
+ lists:foreach(
+ fun
+ ({Key, Value}) when is_binary(Key) andalso is_binary(Value) ->
+ ok;
+ ({Key, Value}) ->
+ Reason = io_lib:format(
+ "object key/value must be strings ~p=~p", [Key, Value]
+ ),
+ throw_rewrite_error(Reason);
+ (Value) ->
+ throw_rewrite_error(io_lib:format("bad value ~p", [Value]))
+ end,
+ Props
+ ).
-throw_rewrite_error(Reason) when is_list(Reason)->
+throw_rewrite_error(Reason) when is_list(Reason) ->
throw_rewrite_error(iolist_to_binary(Reason));
throw_rewrite_error(Reason) when is_binary(Reason) ->
throw({rewrite_error, Reason}).
-
json_doc_options() ->
json_doc_options([]).
@@ -487,18 +527,19 @@ filter_view(DDoc, VName, Docs) ->
{ok, Passes}.
filter_docs(Req, Db, DDoc, FName, Docs) ->
- JsonReq = case Req of
- {json_req, JsonObj} ->
- JsonObj;
- #httpd{} = HttpReq ->
- chttpd_external:json_req_obj(HttpReq, Db)
- end,
+ JsonReq =
+ case Req of
+ {json_req, JsonObj} ->
+ JsonObj;
+ #httpd{} = HttpReq ->
+ chttpd_external:json_req_obj(HttpReq, Db)
+ end,
Options = json_doc_options(),
JsonDocs = [json_doc(Doc, Options) || Doc <- Docs],
try
{ok, filter_docs_int(DDoc, FName, JsonReq, JsonDocs)}
catch
- throw:{os_process_error,{exit_status,1}} ->
+ throw:{os_process_error, {exit_status, 1}} ->
%% batch used too much memory, retry sequentially.
Fun = fun(JsonDoc) ->
filter_docs_int(DDoc, FName, JsonReq, [JsonDoc])
@@ -507,8 +548,11 @@ filter_docs(Req, Db, DDoc, FName, Docs) ->
end.
filter_docs_int(DDoc, FName, JsonReq, JsonDocs) ->
- [true, Passes] = ddoc_prompt(DDoc, [<<"filters">>, FName],
- [JsonDocs, JsonReq]),
+ [true, Passes] = ddoc_prompt(
+ DDoc,
+ [<<"filters">>, FName],
+ [JsonDocs, JsonReq]
+ ),
Passes.
ddoc_proc_prompt({Proc, DDocId}, FunPath, Args) ->
@@ -533,12 +577,12 @@ with_ddoc_proc(#doc{id=DDocId,revs={Start, [DiskRev|_]}}=DDoc, Fun) ->
end.
proc_prompt(Proc, Args) ->
- case proc_prompt_raw(Proc, Args) of
- {json, Json} ->
- raw_to_ejson({json, Json});
- EJson ->
- EJson
- end.
+ case proc_prompt_raw(Proc, Args) of
+ {json, Json} ->
+ raw_to_ejson({json, Json});
+ EJson ->
+ EJson
+ end.
proc_prompt_raw(#proc{prompt_fun = {Mod, Func}} = Proc, Args) ->
apply(Mod, Func, [Proc#proc.pid, Args]).
@@ -546,13 +590,16 @@ proc_prompt_raw(#proc{prompt_fun = {Mod, Func}} = Proc, Args) ->
raw_to_ejson({json, Json}) ->
try
?JSON_DECODE(Json)
- catch throw:{invalid_json, {_, invalid_string}} ->
- Forced = try
- force_utf8(Json)
- catch _:_ ->
- Json
- end,
- ?JSON_DECODE(Forced)
+ catch
+ throw:{invalid_json, {_, invalid_string}} ->
+ Forced =
+ try
+ force_utf8(Json)
+ catch
+ _:_ ->
+ Json
+ end,
+ ?JSON_DECODE(Forced)
end;
raw_to_ejson(EJson) ->
EJson.
@@ -561,14 +608,15 @@ force_utf8(Bin) ->
case binary:match(Bin, <<"\\u">>) of
{Start, 2} ->
<<Prefix:Start/binary, Rest1/binary>> = Bin,
- {Insert, Rest3} = case check_uescape(Rest1) of
- {ok, Skip} ->
- <<Skipped:Skip/binary, Rest2/binary>> = Rest1,
- {Skipped, Rest2};
- {error, Skip} ->
- <<_:Skip/binary, Rest2/binary>> = Rest1,
- {<<16#EF, 16#BF, 16#BD>>, Rest2}
- end,
+ {Insert, Rest3} =
+ case check_uescape(Rest1) of
+ {ok, Skip} ->
+ <<Skipped:Skip/binary, Rest2/binary>> = Rest1,
+ {Skipped, Rest2};
+ {error, Skip} ->
+ <<_:Skip/binary, Rest2/binary>> = Rest1,
+ {<<16#EF, 16#BF, 16#BD>>, Rest2}
+ end,
RestForced = force_utf8(Rest3),
<<Prefix/binary, Insert/binary, RestForced/binary>>;
nomatch ->
@@ -588,8 +636,9 @@ check_uescape(Data) ->
try
[_] = xmerl_ucs:from_utf16be(UTF16),
{ok, 12}
- catch _:_ ->
- {error, 6}
+ catch
+ _:_ ->
+ {error, 6}
end;
{_, _} ->
% Found a uescape that's not a low half
@@ -628,33 +677,33 @@ get_os_process_timeout() ->
get_ddoc_process(#doc{} = DDoc, DDocKey) ->
% remove this case statement
case gen_server:call(couch_proc_manager, {get_proc, DDoc, DDocKey}, get_os_process_timeout()) of
- {ok, Proc, {QueryConfig}} ->
- % process knows the ddoc
- case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
- true ->
- proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
- Proc;
- _ ->
- catch proc_stop(Proc),
- get_ddoc_process(DDoc, DDocKey)
- end;
- Error ->
- throw(Error)
+ {ok, Proc, {QueryConfig}} ->
+ % process knows the ddoc
+ case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
+ true ->
+ proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
+ Proc;
+ _ ->
+ catch proc_stop(Proc),
+ get_ddoc_process(DDoc, DDocKey)
+ end;
+ Error ->
+ throw(Error)
end.
get_os_process(Lang) ->
case gen_server:call(couch_proc_manager, {get_proc, Lang}, get_os_process_timeout()) of
- {ok, Proc, {QueryConfig}} ->
- case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
- true ->
- proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
- Proc;
- _ ->
- catch proc_stop(Proc),
- get_os_process(Lang)
- end;
- Error ->
- throw(Error)
+ {ok, Proc, {QueryConfig}} ->
+ case (catch proc_prompt(Proc, [<<"reset">>, {QueryConfig}])) of
+ true ->
+ proc_set_timeout(Proc, couch_util:get_value(<<"timeout">>, QueryConfig)),
+ Proc;
+ _ ->
+ catch proc_stop(Proc),
+ get_os_process(Lang)
+ end;
+ Error ->
+ throw(Error)
end.
ret_os_process(Proc) ->
@@ -668,7 +717,6 @@ throw_sum_error(Else) ->
throw_stat_error(Else) ->
throw({invalid_value, iolist_to_binary(io_lib:format(?STATERROR, [Else]))}).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -680,19 +728,38 @@ builtin_sum_rows_negative_test() ->
% it's only one document.
?assertEqual(A, builtin_sum_rows([["K", A]], [])),
{Result} = builtin_sum_rows([["K", A]], [1, 2, 3]),
- ?assertEqual({<<"error">>, <<"builtin_reduce_error">>},
- lists:keyfind(<<"error">>, 1, Result)).
+ ?assertEqual(
+ {<<"error">>, <<"builtin_reduce_error">>},
+ lists:keyfind(<<"error">>, 1, Result)
+ ).
sum_values_test() ->
?assertEqual(3, sum_values(1, 2)),
- ?assertEqual([2,4,6], sum_values(1, [1,4,6])),
- ?assertEqual([3,5,7], sum_values([3,2,4], [0,3,3])),
- X = {[{<<"a">>,1}, {<<"b">>,[1,2]}, {<<"c">>, {[{<<"d">>,3}]}},
- {<<"g">>,1}]},
- Y = {[{<<"a">>,2}, {<<"b">>,3}, {<<"c">>, {[{<<"e">>, 5}]}},
- {<<"f">>,1}, {<<"g">>,1}]},
- Z = {[{<<"a">>,3}, {<<"b">>,[4,2]}, {<<"c">>, {[{<<"d">>,3},{<<"e">>,5}]}},
- {<<"f">>,1}, {<<"g">>,2}]},
+ ?assertEqual([2, 4, 6], sum_values(1, [1, 4, 6])),
+ ?assertEqual([3, 5, 7], sum_values([3, 2, 4], [0, 3, 3])),
+ X =
+ {[
+ {<<"a">>, 1},
+ {<<"b">>, [1, 2]},
+ {<<"c">>, {[{<<"d">>, 3}]}},
+ {<<"g">>, 1}
+ ]},
+ Y =
+ {[
+ {<<"a">>, 2},
+ {<<"b">>, 3},
+ {<<"c">>, {[{<<"e">>, 5}]}},
+ {<<"f">>, 1},
+ {<<"g">>, 1}
+ ]},
+ Z =
+ {[
+ {<<"a">>, 3},
+ {<<"b">>, [4, 2]},
+ {<<"c">>, {[{<<"d">>, 3}, {<<"e">>, 5}]}},
+ {<<"f">>, 1},
+ {<<"g">>, 2}
+ ]},
?assertEqual(Z, sum_values(X, Y)),
?assertEqual(Z, sum_values(Y, X)).
@@ -701,8 +768,12 @@ sum_values_negative_test() ->
A = [{[{<<"a">>, 1}]}, {[{<<"a">>, 2}]}, {[{<<"a">>, 3}]}],
B = ["error 1", "error 2"],
C = [<<"error 3">>, <<"error 4">>],
- KV = {[{<<"error">>, <<"builtin_reduce_error">>},
- {<<"reason">>, ?SUMERROR}, {<<"caused_by">>, <<"some cause">>}]},
+ KV =
+ {[
+ {<<"error">>, <<"builtin_reduce_error">>},
+ {<<"reason">>, ?SUMERROR},
+ {<<"caused_by">>, <<"some cause">>}
+ ]},
?assertThrow({invalid_value, _, _}, sum_values(A, [1, 2, 3])),
?assertThrow({invalid_value, _, _}, sum_values(A, 0)),
?assertThrow({invalid_value, _, _}, sum_values(B, [1, 2])),
@@ -712,48 +783,103 @@ sum_values_negative_test() ->
stat_values_test() ->
?assertEqual({1, 2, 0, 1, 1}, stat_values(1, 0)),
?assertEqual({11, 2, 1, 10, 101}, stat_values(1, 10)),
- ?assertEqual([{9, 2, 2, 7, 53},
- {14, 2, 3, 11, 130},
- {18, 2, 5, 13, 194}
- ], stat_values([2,3,5], [7,11,13])).
+ ?assertEqual(
+ [
+ {9, 2, 2, 7, 53},
+ {14, 2, 3, 11, 130},
+ {18, 2, 5, 13, 194}
+ ],
+ stat_values([2, 3, 5], [7, 11, 13])
+ ).
reduce_stats_test() ->
- ?assertEqual([
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ], test_reduce(<<"_stats">>, [[[null, key], 2]])),
+ ?assertEqual(
+ [
+ {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
+ ],
+ test_reduce(<<"_stats">>, [[[null, key], 2]])
+ ),
- ?assertEqual([[
- {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ]], test_reduce(<<"_stats">>, [[[null, key],[1,2]]])),
+ ?assertEqual(
+ [
+ [
+ {[
+ {<<"sum">>, 1},
+ {<<"count">>, 1},
+ {<<"min">>, 1},
+ {<<"max">>, 1},
+ {<<"sumsqr">>, 1}
+ ]},
+ {[
+ {<<"sum">>, 2},
+ {<<"count">>, 1},
+ {<<"min">>, 2},
+ {<<"max">>, 2},
+ {<<"sumsqr">>, 4}
+ ]}
+ ]
+ ],
+ test_reduce(<<"_stats">>, [[[null, key], [1, 2]]])
+ ),
+
+ ?assertEqual(
+ {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]},
+ element(2, finalize(<<"_stats">>, {2, 1, 2, 2, 4}))
+ ),
?assertEqual(
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- , element(2, finalize(<<"_stats">>, {2, 1, 2, 2, 4}))),
-
- ?assertEqual([
- {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ], element(2, finalize(<<"_stats">>, [
- {1, 1, 1, 1, 1},
- {2, 1, 2, 2, 4}
- ]))),
-
- ?assertEqual([
- {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ], element(2, finalize(<<"_stats">>, [
- {1, 1, 1, 1, 1},
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ]))),
-
- ?assertEqual([
- {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
- {[{<<"sum">>,2},{<<"count">>,1},{<<"min">>,2},{<<"max">>,2},{<<"sumsqr">>,4}]}
- ], element(2, finalize(<<"_stats">>, [
- {[{<<"sum">>,1},{<<"count">>,1},{<<"min">>,1},{<<"max">>,1},{<<"sumsqr">>,1}]},
- {2, 1, 2, 2, 4}
- ]))),
+ [
+ {[{<<"sum">>, 1}, {<<"count">>, 1}, {<<"min">>, 1}, {<<"max">>, 1}, {<<"sumsqr">>, 1}]},
+ {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
+ ],
+ element(
+ 2,
+ finalize(<<"_stats">>, [
+ {1, 1, 1, 1, 1},
+ {2, 1, 2, 2, 4}
+ ])
+ )
+ ),
+
+ ?assertEqual(
+ [
+ {[{<<"sum">>, 1}, {<<"count">>, 1}, {<<"min">>, 1}, {<<"max">>, 1}, {<<"sumsqr">>, 1}]},
+ {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
+ ],
+ element(
+ 2,
+ finalize(<<"_stats">>, [
+ {1, 1, 1, 1, 1},
+ {[
+ {<<"sum">>, 2},
+ {<<"count">>, 1},
+ {<<"min">>, 2},
+ {<<"max">>, 2},
+ {<<"sumsqr">>, 4}
+ ]}
+ ])
+ )
+ ),
+
+ ?assertEqual(
+ [
+ {[{<<"sum">>, 1}, {<<"count">>, 1}, {<<"min">>, 1}, {<<"max">>, 1}, {<<"sumsqr">>, 1}]},
+ {[{<<"sum">>, 2}, {<<"count">>, 1}, {<<"min">>, 2}, {<<"max">>, 2}, {<<"sumsqr">>, 4}]}
+ ],
+ element(
+ 2,
+ finalize(<<"_stats">>, [
+ {[
+ {<<"sum">>, 1},
+ {<<"count">>, 1},
+ {<<"min">>, 1},
+ {<<"max">>, 1},
+ {<<"sumsqr">>, 1}
+ ]},
+ {2, 1, 2, 2, 4}
+ ])
+ )
+ ),
ok.
test_reduce(Reducer, KVs) ->
@@ -773,9 +899,12 @@ force_utf8_test() ->
% Truncated but we doesn't break replacements
<<"\\u0FA">>
],
- lists:foreach(fun(Case) ->
- ?assertEqual(Case, force_utf8(Case))
- end, Ok),
+ lists:foreach(
+ fun(Case) ->
+ ?assertEqual(Case, force_utf8(Case))
+ end,
+ Ok
+ ),
NotOk = [
<<"\\uDCA5">>,
@@ -788,15 +917,18 @@ force_utf8_test() ->
<<"\\uD83D\\u00A0">>
],
ToJSON = fun(Bin) -> <<34, Bin/binary, 34>> end,
- lists:foreach(fun(Case) ->
- try
- ?assertNotEqual(Case, force_utf8(Case)),
- ?assertThrow(_, ?JSON_DECODE(ToJSON(Case))),
- ?assertMatch(<<_/binary>>, ?JSON_DECODE(ToJSON(force_utf8(Case))))
- catch
- T:R ->
- io:format(standard_error, "~p~n~p~n", [T, R])
- end
- end, NotOk).
+ lists:foreach(
+ fun(Case) ->
+ try
+ ?assertNotEqual(Case, force_utf8(Case)),
+ ?assertThrow(_, ?JSON_DECODE(ToJSON(Case))),
+ ?assertMatch(<<_/binary>>, ?JSON_DECODE(ToJSON(force_utf8(Case))))
+ catch
+ T:R ->
+ io:format(standard_error, "~p~n~p~n", [T, R])
+ end
+ end,
+ NotOk
+ ).
-endif.
diff --git a/src/couch/src/couch_rand.erl b/src/couch/src/couch_rand.erl
index 67454b8ad..bc30956a4 100644
--- a/src/couch/src/couch_rand.erl
+++ b/src/couch/src/couch_rand.erl
@@ -12,16 +12,13 @@
-module(couch_rand).
-
-export([
uniform/0,
uniform/1
]).
-
uniform() ->
rand:uniform().
-
uniform(N) ->
rand:uniform(N).
diff --git a/src/couch/src/couch_secondary_sup.erl b/src/couch/src/couch_secondary_sup.erl
index bb7821555..a328c170e 100644
--- a/src/couch/src/couch_secondary_sup.erl
+++ b/src/couch/src/couch_secondary_sup.erl
@@ -15,16 +15,12 @@
-export([init/1, start_link/0]).
start_link() ->
- supervisor:start_link({local,couch_secondary_services}, ?MODULE, []).
+ supervisor:start_link({local, couch_secondary_services}, ?MODULE, []).
init([]) ->
SecondarySupervisors = [
- {couch_plugin_event,
- {gen_event, start_link, [{local, couch_plugin}]},
- permanent,
- brutal_kill,
- worker,
- dynamic}
+ {couch_plugin_event, {gen_event, start_link, [{local, couch_plugin}]}, permanent,
+ brutal_kill, worker, dynamic}
],
Daemons = [
{index_server, {couch_index_server, start_link, []}},
@@ -33,31 +29,34 @@ init([]) ->
{uuids, {couch_uuids, start, []}}
],
- MaybeHttp = case http_enabled() of
- true -> [{httpd, {couch_httpd, start_link, []}}];
- false -> couch_httpd:set_auth_handlers(), []
- end,
+ MaybeHttp =
+ case http_enabled() of
+ true ->
+ [{httpd, {couch_httpd, start_link, []}}];
+ false ->
+ couch_httpd:set_auth_handlers(),
+ []
+ end,
- MaybeHttps = case https_enabled() of
- true -> [{httpsd, {chttpd, start_link, [https]}}];
- false -> []
- end,
+ MaybeHttps =
+ case https_enabled() of
+ true -> [{httpsd, {chttpd, start_link, [https]}}];
+ false -> []
+ end,
- Children = SecondarySupervisors ++ [
- begin
- {Module, Fun, Args} = Spec,
+ Children =
+ SecondarySupervisors ++
+ [
+ begin
+ {Module, Fun, Args} = Spec,
- {Name,
- {Module, Fun, Args},
- permanent,
- brutal_kill,
- worker,
- [Module]}
- end
- || {Name, Spec}
- <- Daemons ++ MaybeHttp ++ MaybeHttps, Spec /= ""],
- {ok, {{one_for_one, 50, 3600},
- couch_epi:register_service(couch_db_epi, Children)}}.
+ {Name, {Module, Fun, Args}, permanent, brutal_kill, worker, [Module]}
+ end
+ || {Name, Spec} <-
+ Daemons ++ MaybeHttp ++ MaybeHttps,
+ Spec /= ""
+ ],
+ {ok, {{one_for_one, 50, 3600}, couch_epi:register_service(couch_db_epi, Children)}}.
http_enabled() ->
config:get_boolean("httpd", "enable", false).
diff --git a/src/couch/src/couch_server.erl b/src/couch/src/couch_server.erl
index 3c72e3357..06be86786 100644
--- a/src/couch/src/couch_server.erl
+++ b/src/couch/src/couch_server.erl
@@ -15,11 +15,11 @@
-behaviour(config_listener).
-vsn(3).
--export([open/2,create/2,delete/2,get_version/0,get_version/1,get_git_sha/0,get_uuid/0]).
+-export([open/2, create/2, delete/2, get_version/0, get_version/1, get_git_sha/0, get_uuid/0]).
-export([all_databases/0, all_databases/2]).
--export([init/1, handle_call/3,sup_start_link/1]).
--export([handle_cast/2,code_change/3,handle_info/2,terminate/2]).
--export([dev_start/0,is_admin/2,has_admins/0,get_stats/0]).
+-export([init/1, handle_call/3, sup_start_link/1]).
+-export([handle_cast/2, code_change/3, handle_info/2, terminate/2]).
+-export([dev_start/0, is_admin/2, has_admins/0, get_stats/0]).
-export([close_db_if_idle/1]).
-export([delete_compaction_files/1]).
-export([exists/1]).
@@ -28,7 +28,7 @@
-export([lock/2, unlock/1]).
-export([db_updated/1]).
-export([num_servers/0, couch_server/1, couch_dbs_pid_to_name/1, couch_dbs/1]).
--export([aggregate_queue_len/0,get_spidermonkey_version/0]).
+-export([aggregate_queue_len/0, get_spidermonkey_version/0]).
% config_listener api
-export([handle_config_change/5, handle_config_terminate/3]).
@@ -40,18 +40,18 @@
-define(RELISTEN_DELAY, 5000).
-define(DEFAULT_ENGINE, "couch").
--record(server,{
+-record(server, {
root_dir = [],
engines = [],
- max_dbs_open=?MAX_DBS_OPEN,
- dbs_open=0,
- start_time="",
- update_lru_on_read=true,
+ max_dbs_open = ?MAX_DBS_OPEN,
+ dbs_open = 0,
+ start_time = "",
+ update_lru_on_read = true,
lru = couch_lru:new(),
couch_dbs,
couch_dbs_pid_to_name,
couch_dbs_locks
- }).
+}).
dev_start() ->
couch:stop(),
@@ -59,11 +59,12 @@ dev_start() ->
couch:start().
get_version() ->
- ?COUCHDB_VERSION. %% Defined in rebar.config.script
+ %% Defined in rebar.config.script
+ ?COUCHDB_VERSION.
get_version(short) ->
- %% strip git hash from version string
- [Version|_Rest] = string:tokens(get_version(), "+"),
- Version.
+ %% strip git hash from version string
+ [Version | _Rest] = string:tokens(get_version(), "+"),
+ Version.
get_git_sha() -> ?COUCHDB_GIT_SHA.
@@ -73,16 +74,18 @@ get_uuid() ->
UUID = couch_uuids:random(),
config:set("couchdb", "uuid", ?b2l(UUID)),
UUID;
- UUID -> ?l2b(UUID)
+ UUID ->
+ ?l2b(UUID)
end.
get_stats() ->
Fun = fun(N, {TimeAcc, OpenAcc}) ->
- {ok, #server{start_time=Time,dbs_open=Open}} =
+ {ok, #server{start_time = Time, dbs_open = Open}} =
gen_server:call(couch_server(N), get_server),
- {max(Time, TimeAcc), Open + OpenAcc} end,
+ {max(Time, TimeAcc), Open + OpenAcc}
+ end,
{Time, Open} =
- lists:foldl(Fun, {0, 0}, lists:seq(1, num_servers())),
+ lists:foldl(Fun, {0, 0}, lists:seq(1, num_servers())),
[{start_time, ?l2b(Time)}, {dbs_open, Open}].
get_spidermonkey_version() -> list_to_binary(?COUCHDB_SPIDERMONKEY_VERSION).
@@ -94,31 +97,32 @@ open(DbName, Options) ->
try
validate_open_or_create(DbName, Options),
open_int(DbName, Options)
- catch throw:{?MODULE, Error} ->
- Error
+ catch
+ throw:{?MODULE, Error} ->
+ Error
end.
open_int(DbName, Options0) ->
Ctx = couch_util:get_value(user_ctx, Options0, #user_ctx{}),
case ets:lookup(couch_dbs(DbName), DbName) of
- [#entry{db = Db0, lock = Lock} = Entry] when Lock =/= locked ->
- update_lru(DbName, Entry#entry.db_options),
- {ok, Db1} = couch_db:incref(Db0),
- couch_db:set_user_ctx(Db1, Ctx);
- _ ->
- Options = maybe_add_sys_db_callbacks(DbName, Options0),
- Timeout = couch_util:get_value(timeout, Options, infinity),
- Create = couch_util:get_value(create_if_missing, Options, false),
- case gen_server:call(couch_server(DbName), {open, DbName, Options}, Timeout) of
- {ok, Db0} ->
+ [#entry{db = Db0, lock = Lock} = Entry] when Lock =/= locked ->
+ update_lru(DbName, Entry#entry.db_options),
{ok, Db1} = couch_db:incref(Db0),
couch_db:set_user_ctx(Db1, Ctx);
- {not_found, no_db_file} when Create ->
- couch_log:warning("creating missing database: ~s", [DbName]),
- couch_server:create(DbName, Options);
- Error ->
- Error
- end
+ _ ->
+ Options = maybe_add_sys_db_callbacks(DbName, Options0),
+ Timeout = couch_util:get_value(timeout, Options, infinity),
+ Create = couch_util:get_value(create_if_missing, Options, false),
+ case gen_server:call(couch_server(DbName), {open, DbName, Options}, Timeout) of
+ {ok, Db0} ->
+ {ok, Db1} = couch_db:incref(Db0),
+ couch_db:set_user_ctx(Db1, Ctx);
+ {not_found, no_db_file} when Create ->
+ couch_log:warning("creating missing database: ~s", [DbName]),
+ couch_server:create(DbName, Options);
+ Error ->
+ Error
+ end
end.
update_lru(DbName, Options) ->
@@ -132,47 +136,48 @@ update_lru(DbName, Options) ->
ok
end.
-
create(DbName, Options) ->
try
validate_open_or_create(DbName, Options),
create_int(DbName, Options)
- catch throw:{?MODULE, Error} ->
- Error
+ catch
+ throw:{?MODULE, Error} ->
+ Error
end.
create_int(DbName, Options0) ->
Options = maybe_add_sys_db_callbacks(DbName, Options0),
couch_partition:validate_dbname(DbName, Options),
case gen_server:call(couch_server(DbName), {create, DbName, Options}, infinity) of
- {ok, Db0} ->
- Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
- {ok, Db1} = couch_db:incref(Db0),
- couch_db:set_user_ctx(Db1, Ctx);
- Error ->
- Error
+ {ok, Db0} ->
+ Ctx = couch_util:get_value(user_ctx, Options, #user_ctx{}),
+ {ok, Db1} = couch_db:incref(Db0),
+ couch_db:set_user_ctx(Db1, Ctx);
+ Error ->
+ Error
end.
delete(DbName, Options) ->
gen_server:call(couch_server(DbName), {delete, DbName, Options}, infinity).
-
exists(DbName) ->
RootDir = config:get("couchdb", "database_dir", "."),
Engines = get_configured_engines(),
Possible = get_possible_engines(DbName, RootDir, Engines),
Possible /= [].
-
delete_compaction_files(DbName) ->
delete_compaction_files(DbName, []).
delete_compaction_files(DbName, DelOpts) when is_list(DbName) ->
RootDir = config:get("couchdb", "database_dir", "."),
- lists:foreach(fun({Ext, Engine}) ->
- FPath = make_filepath(RootDir, DbName, Ext),
- couch_db_engine:delete_compaction_files(Engine, RootDir, FPath, DelOpts)
- end, get_configured_engines()),
+ lists:foreach(
+ fun({Ext, Engine}) ->
+ FPath = make_filepath(RootDir, DbName, Ext),
+ couch_db_engine:delete_compaction_files(Engine, RootDir, FPath, DelOpts)
+ end,
+ get_configured_engines()
+ ),
ok;
delete_compaction_files(DbName, DelOpts) when is_binary(DbName) ->
delete_compaction_files(?b2l(DbName), DelOpts).
@@ -185,22 +190,32 @@ maybe_add_sys_db_callbacks(DbName, Options) ->
IsReplicatorDb = path_ends_with(DbName, "_replicator"),
UsersDbSuffix = config:get("couchdb", "users_db_suffix", "_users"),
- IsUsersDb = path_ends_with(DbName, "_users")
- orelse path_ends_with(DbName, UsersDbSuffix),
+ IsUsersDb =
+ path_ends_with(DbName, "_users") orelse
+ path_ends_with(DbName, UsersDbSuffix),
if
DbName == DbsDbName ->
- [{before_doc_update, fun mem3_bdu:before_doc_update/3},
- sys_db | Options];
+ [
+ {before_doc_update, fun mem3_bdu:before_doc_update/3},
+ sys_db
+ | Options
+ ];
DbName == NodesDbName ->
[sys_db | Options];
IsReplicatorDb ->
- [{before_doc_update, fun couch_replicator_docs:before_doc_update/3},
- {after_doc_read, fun couch_replicator_docs:after_doc_read/2},
- sys_db | Options];
+ [
+ {before_doc_update, fun couch_replicator_docs:before_doc_update/3},
+ {after_doc_read, fun couch_replicator_docs:after_doc_read/2},
+ sys_db
+ | Options
+ ];
IsUsersDb ->
- [{before_doc_update, fun couch_users_db:before_doc_update/3},
- {after_doc_read, fun couch_users_db:after_doc_read/2},
- sys_db | Options];
+ [
+ {before_doc_update, fun couch_users_db:before_doc_update/3},
+ {after_doc_read, fun couch_users_db:after_doc_read/2},
+ sys_db
+ | Options
+ ];
true ->
Options
end.
@@ -215,11 +230,11 @@ check_dbname(DbName) ->
is_admin(User, ClearPwd) ->
case config:get("admins", User) of
- "-hashed-" ++ HashedPwdAndSalt ->
- [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
- couch_util:to_hex(crypto:hash(sha, ClearPwd ++ Salt)) == HashedPwd;
- _Else ->
- false
+ "-hashed-" ++ HashedPwdAndSalt ->
+ [HashedPwd, Salt] = string:tokens(HashedPwdAndSalt, ","),
+ couch_util:to_hex(crypto:hash(sha, ClearPwd ++ Salt)) == HashedPwd;
+ _Else ->
+ false
end.
has_admins() ->
@@ -233,7 +248,9 @@ hash_admin_passwords(Persist) ->
fun({User, ClearPassword}) ->
HashedPassword = couch_passwords:hash_admin_password(ClearPassword),
config:set("admins", User, ?b2l(HashedPassword), Persist)
- end, couch_passwords:get_unhashed_admins()).
+ end,
+ couch_passwords:get_unhashed_admins()
+ ).
close_db_if_idle(DbName) ->
case ets:lookup(couch_dbs(DbName), DbName) of
@@ -243,7 +260,6 @@ close_db_if_idle(DbName) ->
ok
end.
-
init([N]) ->
couch_util:set_mqd_off_heap(?MODULE),
couch_util:set_process_priority(?MODULE, high),
@@ -260,7 +276,8 @@ init([N]) ->
% Mark if fips is enabled
case
erlang:function_exported(crypto, info_fips, 0) andalso
- crypto:info_fips() == enabled of
+ crypto:info_fips() == enabled
+ of
true ->
config:enable_feature('fips');
false ->
@@ -276,7 +293,8 @@ init([N]) ->
Engines = get_configured_engines(),
MaxDbsOpen = config:get_integer("couchdb", "max_dbs_open", ?MAX_DBS_OPEN),
UpdateLruOnRead = config:get_boolean(
- "couchdb", "update_lru_on_read", false),
+ "couchdb", "update_lru_on_read", false
+ ),
ok = config:listen_for_changes(?MODULE, N),
ok = couch_file:init_delete_dir(RootDir),
hash_admin_passwords(),
@@ -295,44 +313,55 @@ init([N]) ->
{read_concurrency, true}
]),
process_flag(trap_exit, true),
- {ok, #server{root_dir=RootDir,
- engines = Engines,
- max_dbs_open=per_couch_server(MaxDbsOpen),
- update_lru_on_read=UpdateLruOnRead,
- start_time=couch_util:rfc1123_date(),
- couch_dbs=couch_dbs(N),
- couch_dbs_pid_to_name=couch_dbs_pid_to_name(N),
- couch_dbs_locks=couch_dbs_locks(N)}}.
+ {ok, #server{
+ root_dir = RootDir,
+ engines = Engines,
+ max_dbs_open = per_couch_server(MaxDbsOpen),
+ update_lru_on_read = UpdateLruOnRead,
+ start_time = couch_util:rfc1123_date(),
+ couch_dbs = couch_dbs(N),
+ couch_dbs_pid_to_name = couch_dbs_pid_to_name(N),
+ couch_dbs_locks = couch_dbs_locks(N)
+ }}.
terminate(Reason, Srv) ->
- couch_log:error("couch_server terminating with ~p, state ~2048p",
- [Reason,
- Srv#server{lru = redacted}]),
- ets:foldl(fun(#entry{db = Db}, _) ->
- % Filter out any entry records for open_async
- % processes that haven't finished.
- if Db == undefined -> ok; true ->
- couch_util:shutdown_sync(couch_db:get_pid(Db))
- end
- end, nil, couch_dbs(Srv)),
+ couch_log:error(
+ "couch_server terminating with ~p, state ~2048p",
+ [
+ Reason,
+ Srv#server{lru = redacted}
+ ]
+ ),
+ ets:foldl(
+ fun(#entry{db = Db}, _) ->
+ % Filter out any entry records for open_async
+ % processes that haven't finished.
+ if
+ Db == undefined -> ok;
+ true -> couch_util:shutdown_sync(couch_db:get_pid(Db))
+ end
+ end,
+ nil,
+ couch_dbs(Srv)
+ ),
ok.
handle_config_change("couchdb", "database_dir", _, _, _) ->
exit(whereis(couch_server), config_change),
remove_handler;
handle_config_change("couchdb", "update_lru_on_read", "true", _, N) ->
- gen_server:call(couch_server(N),{set_update_lru_on_read,true}),
+ gen_server:call(couch_server(N), {set_update_lru_on_read, true}),
{ok, N};
handle_config_change("couchdb", "update_lru_on_read", _, _, N) ->
- gen_server:call(couch_server(N),{set_update_lru_on_read,false}),
+ gen_server:call(couch_server(N), {set_update_lru_on_read, false}),
{ok, N};
handle_config_change("couchdb", "max_dbs_open", Max0, _, N) when is_list(Max0) ->
Max1 = per_couch_server(list_to_integer(Max0)),
- gen_server:call(couch_server(N),{set_max_dbs_open,Max1}),
+ gen_server:call(couch_server(N), {set_max_dbs_open, Max1}),
{ok, N};
handle_config_change("couchdb", "max_dbs_open", _, _, N) ->
Max = per_couch_server(?MAX_DBS_OPEN),
- gen_server:call(couch_server(N),{set_max_dbs_open,Max}),
+ gen_server:call(couch_server(N), {set_max_dbs_open, Max}),
{ok, N};
handle_config_change("couchdb_engines", _, _, _, N) ->
gen_server:call(couch_server(N), reload_engines),
@@ -361,57 +390,64 @@ handle_config_terminate(_, stop, _) ->
handle_config_terminate(_Server, _Reason, N) ->
erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), {restart_config_listener, N}).
-
per_couch_server(X) ->
erlang:max(1, X div num_servers()).
-
all_databases() ->
{ok, DbList} = all_databases(
- fun(DbName, Acc) -> {ok, [DbName | Acc]} end, []),
+ fun(DbName, Acc) -> {ok, [DbName | Acc]} end, []
+ ),
{ok, lists:usort(DbList)}.
all_databases(Fun, Acc0) ->
- {ok, #server{root_dir=Root}} = gen_server:call(couch_server_1, get_server),
+ {ok, #server{root_dir = Root}} = gen_server:call(couch_server_1, get_server),
NormRoot = couch_util:normpath(Root),
Extensions = get_engine_extensions(),
ExtRegExp = "(" ++ string:join(Extensions, "|") ++ ")",
RegExp =
- "^[a-z0-9\\_\\$()\\+\\-]*" % stock CouchDB name regex
- "(\\.[0-9]{10,})?" % optional shard timestamp
- "\\." ++ ExtRegExp ++ "$", % filename extension
- FinalAcc = try
- couch_util:fold_files(Root,
- RegExp,
- true,
- fun(Filename, AccIn) ->
- NormFilename = couch_util:normpath(Filename),
- case NormFilename -- NormRoot of
- [$/ | RelativeFilename] -> ok;
- RelativeFilename -> ok
+ % stock CouchDB name regex
+ "^[a-z0-9\\_\\$()\\+\\-]*"
+ % optional shard timestamp
+ "(\\.[0-9]{10,})?"
+ % filename extension
+ "\\." ++ ExtRegExp ++ "$",
+ FinalAcc =
+ try
+ couch_util:fold_files(
+ Root,
+ RegExp,
+ true,
+ fun(Filename, AccIn) ->
+ NormFilename = couch_util:normpath(Filename),
+ case NormFilename -- NormRoot of
+ [$/ | RelativeFilename] -> ok;
+ RelativeFilename -> ok
+ end,
+ Ext = filename:extension(RelativeFilename),
+ case Fun(?l2b(filename:rootname(RelativeFilename, Ext)), AccIn) of
+ {ok, NewAcc} -> NewAcc;
+ {stop, NewAcc} -> throw({stop, Fun, NewAcc})
+ end
end,
- Ext = filename:extension(RelativeFilename),
- case Fun(?l2b(filename:rootname(RelativeFilename, Ext)), AccIn) of
- {ok, NewAcc} -> NewAcc;
- {stop, NewAcc} -> throw({stop, Fun, NewAcc})
- end
- end, Acc0)
- catch throw:{stop, Fun, Acc1} ->
- Acc1
- end,
+ Acc0
+ )
+ catch
+ throw:{stop, Fun, Acc1} ->
+ Acc1
+ end,
{ok, FinalAcc}.
-
make_room(Server, Options) ->
case lists:member(sys_db, Options) of
false -> maybe_close_lru_db(Server);
true -> {ok, Server}
end.
-maybe_close_lru_db(#server{dbs_open=NumOpen, max_dbs_open=MaxOpen}=Server)
- when NumOpen < MaxOpen ->
+maybe_close_lru_db(#server{dbs_open = NumOpen, max_dbs_open = MaxOpen} = Server) when
+ NumOpen < MaxOpen
+->
{ok, Server};
-maybe_close_lru_db(#server{lru=Lru}=Server) ->
+maybe_close_lru_db(#server{lru = Lru} = Server) ->
case couch_lru:close(Lru) of
{true, NewLru} ->
{ok, db_closed(Server#server{lru = NewLru}, [])};
@@ -427,10 +463,11 @@ open_async(Server, From, DbName, Options) ->
T0 = os:timestamp(),
Opener = spawn_link(fun() ->
Res = open_async_int(NoLRUServer, DbName, Options),
- IsSuccess = case Res of
- {ok, _} -> true;
- _ -> false
- end,
+ IsSuccess =
+ case Res of
+ {ok, _} -> true;
+ _ -> false
+ end,
case IsSuccess andalso lists:member(create, Options) of
true ->
couch_event:notify(DbName, created);
@@ -449,10 +486,11 @@ open_async(Server, From, DbName, Options) ->
couch_log:info("open_result error ~p for ~s", [Res, DbName])
end
end),
- ReqType = case lists:member(create, Options) of
- true -> create;
- false -> open
- end,
+ ReqType =
+ case lists:member(create, Options) of
+ true -> create;
+ false -> open
+ end,
true = ets:insert(couch_dbs(Server), #entry{
name = DbName,
pid = Opener,
@@ -478,7 +516,7 @@ open_async_int(Server, DbName, Options) ->
Error1
end.
-handle_call(close_lru, _From, #server{lru=Lru} = Server) ->
+handle_call(close_lru, _From, #server{lru = Lru} = Server) ->
case couch_lru:close(Lru) of
{true, NewLru} ->
{reply, ok, db_closed(Server#server{lru = NewLru}, [])};
@@ -488,9 +526,9 @@ handle_call(close_lru, _From, #server{lru=Lru} = Server) ->
handle_call(open_dbs_count, _From, Server) ->
{reply, Server#server.dbs_open, Server};
handle_call({set_update_lru_on_read, UpdateOnRead}, _From, Server) ->
- {reply, ok, Server#server{update_lru_on_read=UpdateOnRead}};
+ {reply, ok, Server#server{update_lru_on_read = UpdateOnRead}};
handle_call({set_max_dbs_open, Max}, _From, Server) ->
- {reply, ok, Server#server{max_dbs_open=Max}};
+ {reply, ok, Server#server{max_dbs_open = Max}};
handle_call(reload_engines, _From, Server) ->
{reply, ok, Server#server{engines = get_configured_engines()}};
handle_call(get_server, _From, Server) ->
@@ -522,12 +560,13 @@ handle_call({open_result, DbName, {ok, Db}}, {Opener, _}, Server) ->
start_time = couch_db:get_instance_start_time(Db)
}),
true = ets:insert(couch_dbs_pid_to_name(Server), {DbPid, DbName}),
- Lru = case couch_db:is_system_db(Db) of
- false ->
- couch_lru:insert(DbName, Server#server.lru);
- true ->
- Server#server.lru
- end,
+ Lru =
+ case couch_db:is_system_db(Db) of
+ false ->
+ couch_lru:insert(DbName, Server#server.lru);
+ true ->
+ Server#server.lru
+ end,
{reply, ok, Server#server{lru = Lru}};
[#entry{}] ->
% A mismatched opener pid means that this open_result message
@@ -547,12 +586,13 @@ handle_call({open_result, DbName, Error}, {Opener, _}, Server) ->
[gen_server:reply(Waiter, Error) || Waiter <- Waiters],
true = ets:delete(couch_dbs(Server), DbName),
true = ets:delete(couch_dbs_pid_to_name(Server), Opener),
- NewServer = case ReqType of
- {create, DbName, Options, CrFrom} ->
- open_async(Server, CrFrom, DbName, Options);
- _ ->
- Server
- end,
+ NewServer =
+ case ReqType of
+ {create, DbName, Options, CrFrom} ->
+ open_async(Server, CrFrom, DbName, Options);
+ _ ->
+ Server
+ end,
{reply, ok, db_closed(NewServer, Entry#entry.db_options)};
[#entry{}] ->
% A mismatched pid means that this open_result message
@@ -561,159 +601,174 @@ handle_call({open_result, DbName, Error}, {Opener, _}, Server) ->
end;
handle_call({open, DbName, Options}, From, Server) ->
case ets:lookup(couch_dbs(Server), DbName) of
- [] ->
- case make_room(Server, Options) of
- {ok, Server2} ->
- {noreply, open_async(Server2, From, DbName, Options)};
- CloseError ->
- {reply, CloseError, Server}
- end;
- [#entry{waiters = Waiters} = Entry] when is_list(Waiters) ->
- true = ets:insert(couch_dbs(Server), Entry#entry{waiters = [From | Waiters]}),
- NumWaiters = length(Waiters),
- if NumWaiters =< 10 orelse NumWaiters rem 10 /= 0 -> ok; true ->
- Fmt = "~b clients waiting to open db ~s",
- couch_log:info(Fmt, [length(Waiters), DbName])
- end,
- {noreply, Server};
- [#entry{db = Db}] ->
- {reply, {ok, Db}, Server}
+ [] ->
+ case make_room(Server, Options) of
+ {ok, Server2} ->
+ {noreply, open_async(Server2, From, DbName, Options)};
+ CloseError ->
+ {reply, CloseError, Server}
+ end;
+ [#entry{waiters = Waiters} = Entry] when is_list(Waiters) ->
+ true = ets:insert(couch_dbs(Server), Entry#entry{waiters = [From | Waiters]}),
+ NumWaiters = length(Waiters),
+ if
+ NumWaiters =< 10 orelse NumWaiters rem 10 /= 0 ->
+ ok;
+ true ->
+ Fmt = "~b clients waiting to open db ~s",
+ couch_log:info(Fmt, [length(Waiters), DbName])
+ end,
+ {noreply, Server};
+ [#entry{db = Db}] ->
+ {reply, {ok, Db}, Server}
end;
handle_call({create, DbName, Options}, From, Server) ->
case ets:lookup(couch_dbs(Server), DbName) of
- [] ->
- case make_room(Server, Options) of
- {ok, Server2} ->
+ [] ->
+ case make_room(Server, Options) of
+ {ok, Server2} ->
+ CrOptions = [create | Options],
+ {noreply, open_async(Server2, From, DbName, CrOptions)};
+ CloseError ->
+ {reply, CloseError, Server}
+ end;
+ [#entry{req_type = open} = Entry] ->
+ % We're trying to create a database while someone is in
+ % the middle of trying to open it. We allow one creator
+ % to wait while we figure out if it'll succeed.
CrOptions = [create | Options],
- {noreply, open_async(Server2, From, DbName, CrOptions)};
- CloseError ->
- {reply, CloseError, Server}
- end;
- [#entry{req_type = open} = Entry] ->
- % We're trying to create a database while someone is in
- % the middle of trying to open it. We allow one creator
- % to wait while we figure out if it'll succeed.
- CrOptions = [create | Options],
- Req = {create, DbName, CrOptions, From},
- true = ets:insert(couch_dbs(Server), Entry#entry{req_type = Req}),
- {noreply, Server};
- [_AlreadyRunningDb] ->
- {reply, file_exists, Server}
+ Req = {create, DbName, CrOptions, From},
+ true = ets:insert(couch_dbs(Server), Entry#entry{req_type = Req}),
+ {noreply, Server};
+ [_AlreadyRunningDb] ->
+ {reply, file_exists, Server}
end;
handle_call({delete, DbName, Options}, _From, Server) ->
DbNameList = binary_to_list(DbName),
case check_dbname(DbNameList) of
- ok ->
- Server2 =
- case ets:lookup(couch_dbs(Server), DbName) of
- [] -> Server;
- [#entry{pid = Pid, waiters = Waiters} = Entry] when is_list(Waiters) ->
- true = ets:delete(couch_dbs(Server), DbName),
- true = ets:delete(couch_dbs_pid_to_name(Server), Pid),
- exit(Pid, kill),
- [gen_server:reply(Waiter, not_found) || Waiter <- Waiters],
- db_closed(Server, Entry#entry.db_options);
- [#entry{pid = Pid} = Entry] ->
- true = ets:delete(couch_dbs(Server), DbName),
- true = ets:delete(couch_dbs_pid_to_name(Server), Pid),
- exit(Pid, kill),
- db_closed(Server, Entry#entry.db_options)
- end,
+ ok ->
+ Server2 =
+ case ets:lookup(couch_dbs(Server), DbName) of
+ [] ->
+ Server;
+ [#entry{pid = Pid, waiters = Waiters} = Entry] when is_list(Waiters) ->
+ true = ets:delete(couch_dbs(Server), DbName),
+ true = ets:delete(couch_dbs_pid_to_name(Server), Pid),
+ exit(Pid, kill),
+ [gen_server:reply(Waiter, not_found) || Waiter <- Waiters],
+ db_closed(Server, Entry#entry.db_options);
+ [#entry{pid = Pid} = Entry] ->
+ true = ets:delete(couch_dbs(Server), DbName),
+ true = ets:delete(couch_dbs_pid_to_name(Server), Pid),
+ exit(Pid, kill),
+ db_closed(Server, Entry#entry.db_options)
+ end,
- couch_db_plugin:on_delete(DbName, Options),
+ couch_db_plugin:on_delete(DbName, Options),
- DelOpt = [{context, delete} | Options],
+ DelOpt = [{context, delete} | Options],
- % Make sure and remove all compaction data
- delete_compaction_files(DbNameList, Options),
+ % Make sure and remove all compaction data
+ delete_compaction_files(DbNameList, Options),
- {ok, {Engine, FilePath}} = get_engine(Server, DbNameList),
- RootDir = Server#server.root_dir,
- case couch_db_engine:delete(Engine, RootDir, FilePath, DelOpt) of
- ok ->
- couch_event:notify(DbName, deleted),
- {reply, ok, Server2};
- {error, enoent} ->
- {reply, not_found, Server2};
- Else ->
- {reply, Else, Server2}
- end;
- Error ->
- {reply, Error, Server}
+ {ok, {Engine, FilePath}} = get_engine(Server, DbNameList),
+ RootDir = Server#server.root_dir,
+ case couch_db_engine:delete(Engine, RootDir, FilePath, DelOpt) of
+ ok ->
+ couch_event:notify(DbName, deleted),
+ {reply, ok, Server2};
+ {error, enoent} ->
+ {reply, not_found, Server2};
+ Else ->
+ {reply, Else, Server2}
+ end;
+ Error ->
+ {reply, Error, Server}
end;
handle_call({db_updated, Db}, _From, Server0) ->
DbName = couch_db:name(Db),
StartTime = couch_db:get_instance_start_time(Db),
- Server = try ets:lookup_element(couch_dbs(Server0), DbName, #entry.start_time) of
- StartTime ->
- true = ets:update_element(couch_dbs(Server0), DbName, {#entry.db, Db}),
- Lru = case couch_db:is_system_db(Db) of
- false -> couch_lru:update(DbName, Server0#server.lru);
- true -> Server0#server.lru
- end,
- Server0#server{lru = Lru};
- _ ->
- Server0
- catch _:_ ->
- Server0
- end,
+ Server =
+ try ets:lookup_element(couch_dbs(Server0), DbName, #entry.start_time) of
+ StartTime ->
+ true = ets:update_element(couch_dbs(Server0), DbName, {#entry.db, Db}),
+ Lru =
+ case couch_db:is_system_db(Db) of
+ false -> couch_lru:update(DbName, Server0#server.lru);
+ true -> Server0#server.lru
+ end,
+ Server0#server{lru = Lru};
+ _ ->
+ Server0
+ catch
+ _:_ ->
+ Server0
+ end,
{reply, ok, Server}.
-handle_cast({update_lru, DbName}, #server{lru = Lru, update_lru_on_read=true} = Server) ->
+handle_cast({update_lru, DbName}, #server{lru = Lru, update_lru_on_read = true} = Server) ->
{noreply, Server#server{lru = couch_lru:update(DbName, Lru)}};
handle_cast({update_lru, _DbName}, Server) ->
{noreply, Server};
handle_cast({close_db_if_idle, DbName}, Server) ->
case ets:update_element(couch_dbs(Server), DbName, {#entry.lock, locked}) of
- true ->
- [#entry{db = Db, db_options = DbOpts}] = ets:lookup(couch_dbs(Server), DbName),
- case couch_db:is_idle(Db) of
true ->
- DbPid = couch_db:get_pid(Db),
- true = ets:delete(couch_dbs(Server), DbName),
- true = ets:delete(couch_dbs_pid_to_name(Server), DbPid),
- exit(DbPid, kill),
- {noreply, db_closed(Server, DbOpts)};
+ [#entry{db = Db, db_options = DbOpts}] = ets:lookup(couch_dbs(Server), DbName),
+ case couch_db:is_idle(Db) of
+ true ->
+ DbPid = couch_db:get_pid(Db),
+ true = ets:delete(couch_dbs(Server), DbName),
+ true = ets:delete(couch_dbs_pid_to_name(Server), DbPid),
+ exit(DbPid, kill),
+ {noreply, db_closed(Server, DbOpts)};
+ false ->
+ true = ets:update_element(
+ couch_dbs(Server), DbName, {#entry.lock, unlocked}
+ ),
+ {noreply, Server}
+ end;
false ->
- true = ets:update_element(
- couch_dbs(Server), DbName, {#entry.lock, unlocked}),
{noreply, Server}
- end;
- false ->
- {noreply, Server}
end;
-
handle_cast(Msg, Server) ->
{stop, {unknown_cast_message, Msg}, Server}.
-code_change(_OldVsn, #server{}=State, _Extra) ->
+code_change(_OldVsn, #server{} = State, _Extra) ->
{ok, State}.
handle_info({'EXIT', _Pid, config_change}, Server) ->
{stop, config_change, Server};
handle_info({'EXIT', Pid, Reason}, Server) ->
case ets:lookup(couch_dbs_pid_to_name(Server), Pid) of
- [{Pid, DbName}] ->
- [#entry{waiters = Waiters} = Entry] = ets:lookup(couch_dbs(Server), DbName),
- if Reason /= snappy_nif_not_loaded -> ok; true ->
- Msg = io_lib:format("To open the database `~s`, Apache CouchDB "
- "must be built with Erlang OTP R13B04 or higher.", [DbName]),
- couch_log:error(Msg, [])
- end,
- % We kill databases on purpose so there's no reason
- % to log that fact. So we restrict logging to "interesting"
- % reasons.
- if Reason == normal orelse Reason == killed -> ok; true ->
- couch_log:info("db ~s died with reason ~p", [DbName, Reason])
- end,
- if not is_list(Waiters) -> ok; true ->
- [gen_server:reply(Waiter, Reason) || Waiter <- Waiters]
- end,
- true = ets:delete(couch_dbs(Server), DbName),
- true = ets:delete(couch_dbs_pid_to_name(Server), Pid),
- {noreply, db_closed(Server, Entry#entry.db_options)};
- [] ->
- {noreply, Server}
+ [{Pid, DbName}] ->
+ [#entry{waiters = Waiters} = Entry] = ets:lookup(couch_dbs(Server), DbName),
+ if
+ Reason /= snappy_nif_not_loaded ->
+ ok;
+ true ->
+ Msg = io_lib:format(
+ "To open the database `~s`, Apache CouchDB "
+ "must be built with Erlang OTP R13B04 or higher.",
+ [DbName]
+ ),
+ couch_log:error(Msg, [])
+ end,
+ % We kill databases on purpose so there's no reason
+ % to log that fact. So we restrict logging to "interesting"
+ % reasons.
+ if
+ Reason == normal orelse Reason == killed -> ok;
+ true -> couch_log:info("db ~s died with reason ~p", [DbName, Reason])
+ end,
+ if
+ not is_list(Waiters) -> ok;
+ true -> [gen_server:reply(Waiter, Reason) || Waiter <- Waiters]
+ end,
+ true = ets:delete(couch_dbs(Server), DbName),
+ true = ets:delete(couch_dbs_pid_to_name(Server), Pid),
+ {noreply, db_closed(Server, Entry#entry.db_options)};
+ [] ->
+ {noreply, Server}
end;
handle_info({restart_config_listener, N}, State) ->
ok = config:listen_for_changes(?MODULE, N),
@@ -723,13 +778,13 @@ handle_info(Info, Server) ->
db_opened(Server, Options) ->
case lists:member(sys_db, Options) of
- false -> Server#server{dbs_open=Server#server.dbs_open + 1};
+ false -> Server#server{dbs_open = Server#server.dbs_open + 1};
true -> Server
end.
db_closed(Server, Options) ->
case lists:member(sys_db, Options) of
- false -> Server#server{dbs_open=Server#server.dbs_open - 1};
+ false -> Server#server{dbs_open = Server#server.dbs_open - 1};
true -> Server
end.
@@ -757,13 +812,17 @@ validate_open_or_create(DbName, Options) ->
get_configured_engines() ->
ConfigEntries = config:get("couchdb_engines"),
- Engines = lists:flatmap(fun({Extension, ModuleStr}) ->
- try
- [{Extension, list_to_atom(ModuleStr)}]
- catch _T:_R ->
- []
- end
- end, ConfigEntries),
+ Engines = lists:flatmap(
+ fun({Extension, ModuleStr}) ->
+ try
+ [{Extension, list_to_atom(ModuleStr)}]
+ catch
+ _T:_R ->
+ []
+ end
+ end,
+ ConfigEntries
+ ),
case Engines of
[] ->
[{"couch", couch_bt_engine}];
@@ -771,7 +830,6 @@ get_configured_engines() ->
Else
end.
-
get_engine(Server, DbName, Options) ->
#server{
root_dir = RootDir,
@@ -791,7 +849,6 @@ get_engine(Server, DbName, Options) ->
get_engine(Server, DbName)
end.
-
get_engine(Server, DbName) ->
#server{
root_dir = RootDir,
@@ -807,18 +864,20 @@ get_engine(Server, DbName) ->
erlang:error(engine_conflict)
end.
-
get_possible_engines(DbName, RootDir, Engines) ->
- lists:foldl(fun({Extension, Engine}, Acc) ->
- Path = make_filepath(RootDir, DbName, Extension),
- case couch_db_engine:exists(Engine, Path) of
- true ->
- [{Engine, Path} | Acc];
- false ->
- Acc
- end
- end, [], Engines).
-
+ lists:foldl(
+ fun({Extension, Engine}, Acc) ->
+ Path = make_filepath(RootDir, DbName, Extension),
+ case couch_db_engine:exists(Engine, Path) of
+ true ->
+ [{Engine, Path} | Acc];
+ false ->
+ Acc
+ end
+ end,
+ [],
+ Engines
+ ).
get_default_engine(Server, DbName) ->
#server{
@@ -831,15 +890,15 @@ get_default_engine(Server, DbName) ->
{Extension, Module} ->
{ok, {Module, make_filepath(RootDir, DbName, Extension)}};
false ->
- Fmt = "Invalid storage engine extension ~s,"
- " configured engine extensions are: ~s",
+ Fmt =
+ "Invalid storage engine extension ~s,"
+ " configured engine extensions are: ~s",
Exts = [E || {E, _} <- Engines],
Args = [Extension, string:join(Exts, ", ")],
couch_log:error(Fmt, Args),
{ok, Default}
end.
-
make_filepath(RootDir, DbName, Extension) when is_binary(RootDir) ->
make_filepath(binary_to_list(RootDir), DbName, Extension);
make_filepath(RootDir, DbName, Extension) when is_binary(DbName) ->
@@ -849,7 +908,6 @@ make_filepath(RootDir, DbName, Extension) when is_binary(Extension) ->
make_filepath(RootDir, DbName, Extension) ->
filename:join([RootDir, "./" ++ DbName ++ "." ++ Extension]).
-
get_engine_extensions() ->
case config:get("couchdb_engines") of
[] ->
@@ -858,7 +916,6 @@ get_engine_extensions() ->
[Ext || {Ext, _Mod} <- Entries]
end.
-
check_engine(Options) ->
case couch_util:get_value(engine, Options) of
Ext when is_binary(Ext) ->
@@ -874,7 +931,6 @@ check_engine(Options) ->
ok
end.
-
get_engine_path(DbName, Engine) when is_binary(DbName), is_atom(Engine) ->
RootDir = config:get("couchdb", "database_dir", "."),
case lists:keyfind(Engine, 2, get_configured_engines()) of
@@ -897,60 +953,48 @@ unlock(DbName) when is_binary(DbName) ->
true = ets:delete(couch_dbs_locks(DbName), DbName),
ok.
-
db_updated(Db) ->
DbName = couch_db:name(Db),
gen_server:call(couch_server(DbName), {db_updated, Db}, infinity).
-
couch_server(Arg) ->
name("couch_server", Arg).
-
couch_dbs(Arg) ->
name("couch_dbs", Arg).
-
couch_dbs_pid_to_name(Arg) ->
name("couch_dbs_pid_to_name", Arg).
-
couch_dbs_locks(Arg) ->
name("couch_dbs_locks", Arg).
-
name("couch_dbs", #server{} = Server) ->
Server#server.couch_dbs;
-
name("couch_dbs_pid_to_name", #server{} = Server) ->
Server#server.couch_dbs_pid_to_name;
-
name("couch_dbs_locks", #server{} = Server) ->
Server#server.couch_dbs_locks;
-
name(BaseName, DbName) when is_list(DbName) ->
name(BaseName, ?l2b(DbName));
-
name(BaseName, DbName) when is_binary(DbName) ->
N = 1 + erlang:phash2(DbName, num_servers()),
name(BaseName, N);
-
name(BaseName, N) when is_integer(N), N > 0 ->
list_to_atom(BaseName ++ "_" ++ integer_to_list(N)).
-
num_servers() ->
erlang:system_info(schedulers).
-
aggregate_queue_len() ->
N = num_servers(),
Names = [couch_server(I) || I <- lists:seq(1, N)],
- MQs = [process_info(whereis(Name), message_queue_len) ||
- Name <- Names],
+ MQs = [
+ process_info(whereis(Name), message_queue_len)
+ || Name <- Names
+ ],
lists:sum([X || {_, X} <- MQs]).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -1006,10 +1050,13 @@ should_add_sys_db_callbacks() ->
"_replicator.couch",
"_replicator"
],
- lists:foreach(fun(DbName) ->
- check_case(DbName, true),
- check_case(?l2b(DbName), true)
- end, Cases).
+ lists:foreach(
+ fun(DbName) ->
+ check_case(DbName, true),
+ check_case(?l2b(DbName), true)
+ end,
+ Cases
+ ).
should_not_add_sys_db_callbacks() ->
Cases = [
@@ -1021,10 +1068,13 @@ should_not_add_sys_db_callbacks() ->
"mydb.couch",
"mydb"
],
- lists:foreach(fun(DbName) ->
- check_case(DbName, false),
- check_case(?l2b(DbName), false)
- end, Cases).
+ lists:foreach(
+ fun(DbName) ->
+ check_case(DbName, false),
+ check_case(?l2b(DbName), false)
+ end,
+ Cases
+ ).
check_case(DbName, IsAdded) ->
Options = maybe_add_sys_db_callbacks(DbName, [other_options]),
diff --git a/src/couch/src/couch_stream.erl b/src/couch/src/couch_stream.erl
index 2ab46d7e7..12b290820 100644
--- a/src/couch/src/couch_stream.erl
+++ b/src/couch/src/couch_stream.erl
@@ -14,7 +14,6 @@
-behaviour(gen_server).
-vsn(1).
-
-export([
open/1,
open/2,
@@ -39,16 +38,14 @@
code_change/3
]).
-
-include_lib("couch/include/couch_db.hrl").
-define(DEFAULT_BUFFER_SIZE, 4096).
-
-record(stream, {
engine,
opener_monitor,
- written_pointers=[],
+ written_pointers = [],
buffer_list = [],
buffer_len = 0,
max_buffer,
@@ -62,39 +59,35 @@
end_encoding_fun
}).
-
open({_StreamEngine, _StreamEngineState} = Engine) ->
open(Engine, []).
-
open({_StreamEngine, _StreamEngineState} = Engine, Options) ->
gen_server:start_link(?MODULE, {Engine, self(), erlang:get(io_priority), Options}, []).
-
close(Pid) ->
gen_server:call(Pid, close, infinity).
-
copy(Src, Dst) ->
- foldl(Src, fun(Bin, _) ->
- ok = write(Dst, Bin)
- end, ok).
-
+ foldl(
+ Src,
+ fun(Bin, _) ->
+ ok = write(Dst, Bin)
+ end,
+ ok
+ ).
write(_Pid, <<>>) ->
ok;
write(Pid, Bin) ->
gen_server:call(Pid, {write, Bin}, infinity).
-
to_disk_term({Engine, EngineState}) ->
Engine:to_disk_term(EngineState).
-
foldl({Engine, EngineState}, Fun, Acc) ->
Engine:foldl(EngineState, Fun, Acc).
-
foldl(Engine, <<>>, Fun, Acc) ->
foldl(Engine, Fun, Acc);
foldl(Engine, Md5, UserFun, UserAcc) ->
@@ -103,18 +96,17 @@ foldl(Engine, Md5, UserFun, UserAcc) ->
Md5 = couch_hash:md5_hash_final(Md5Acc),
OutAcc.
-
foldl_decode(Engine, Md5, Enc, UserFun, UserAcc1) ->
- {DecDataFun, DecEndFun} = case Enc of
- gzip -> ungzip_init();
- identity -> identity_enc_dec_funs()
- end,
+ {DecDataFun, DecEndFun} =
+ case Enc of
+ gzip -> ungzip_init();
+ identity -> identity_enc_dec_funs()
+ end,
InitAcc = {DecDataFun, UserFun, UserAcc1},
{_, _, UserAcc2} = foldl(Engine, Md5, fun foldl_decode/2, InitAcc),
DecEndFun(),
UserAcc2.
-
range_foldl(Engine, From, To, UserFun, UserAcc) when To >= From ->
NewEngine = do_seek(Engine, From),
InitAcc = {To - From, UserFun, UserAcc},
@@ -126,19 +118,16 @@ range_foldl(Engine, From, To, UserFun, UserAcc) when To >= From ->
UserAcc3
end.
-
foldl_md5(Bin, {Md5Acc, UserFun, UserAcc}) ->
NewMd5Acc = couch_hash:md5_hash_update(Md5Acc, Bin),
{NewMd5Acc, UserFun, UserFun(Bin, UserAcc)}.
-
foldl_decode(EncBin, {DecFun, UserFun, UserAcc}) ->
case DecFun(EncBin) of
<<>> -> {DecFun, UserFun, UserAcc};
Dec -> {DecFun, UserFun, UserFun(Dec, UserAcc)}
end.
-
foldl_length(Bin, {Length, UserFun, UserAcc}) ->
BinSize = size(Bin),
case BinSize =< Length of
@@ -151,24 +140,24 @@ foldl_length(Bin, {Length, UserFun, UserAcc}) ->
gzip_init(Options) ->
case couch_util:get_value(compression_level, Options, 0) of
- Lvl when Lvl >= 1 andalso Lvl =< 9 ->
- Z = zlib:open(),
- % 15 = ?MAX_WBITS (defined in the zlib module)
- % the 16 + ?MAX_WBITS formula was obtained by inspecting zlib:gzip/1
- ok = zlib:deflateInit(Z, Lvl, deflated, 16 + 15, 8, default),
- {
- fun(Data) ->
- zlib:deflate(Z, Data)
- end,
- fun() ->
- Last = zlib:deflate(Z, [], finish),
- ok = zlib:deflateEnd(Z),
- ok = zlib:close(Z),
- Last
- end
- };
- _ ->
- identity_enc_dec_funs()
+ Lvl when Lvl >= 1 andalso Lvl =< 9 ->
+ Z = zlib:open(),
+ % 15 = ?MAX_WBITS (defined in the zlib module)
+ % the 16 + ?MAX_WBITS formula was obtained by inspecting zlib:gzip/1
+ ok = zlib:deflateInit(Z, Lvl, deflated, 16 + 15, 8, default),
+ {
+ fun(Data) ->
+ zlib:deflate(Z, Data)
+ end,
+ fun() ->
+ Last = zlib:deflate(Z, [], finish),
+ ok = zlib:deflateEnd(Z),
+ ok = zlib:close(Z),
+ Last
+ end
+ };
+ _ ->
+ identity_enc_dec_funs()
end.
ungzip_init() ->
@@ -190,25 +179,24 @@ identity_enc_dec_funs() ->
fun() -> [] end
}.
-
init({Engine, OpenerPid, OpenerPriority, Options}) ->
erlang:put(io_priority, OpenerPriority),
{EncodingFun, EndEncodingFun} =
- case couch_util:get_value(encoding, Options, identity) of
- identity -> identity_enc_dec_funs();
- gzip -> gzip_init(Options)
- end,
+ case couch_util:get_value(encoding, Options, identity) of
+ identity -> identity_enc_dec_funs();
+ gzip -> gzip_init(Options)
+ end,
{ok, #stream{
- engine=Engine,
- opener_monitor=erlang:monitor(process, OpenerPid),
- md5=couch_hash:md5_hash_init(),
- identity_md5=couch_hash:md5_hash_init(),
- encoding_fun=EncodingFun,
- end_encoding_fun=EndEncodingFun,
- max_buffer=couch_util:get_value(
- buffer_size, Options, ?DEFAULT_BUFFER_SIZE)
- }
- }.
+ engine = Engine,
+ opener_monitor = erlang:monitor(process, OpenerPid),
+ md5 = couch_hash:md5_hash_init(),
+ identity_md5 = couch_hash:md5_hash_init(),
+ encoding_fun = EncodingFun,
+ end_encoding_fun = EndEncodingFun,
+ max_buffer = couch_util:get_value(
+ buffer_size, Options, ?DEFAULT_BUFFER_SIZE
+ )
+ }}.
terminate(_Reason, _Stream) ->
ok.
@@ -224,36 +212,42 @@ handle_call({write, Bin}, _From, Stream) ->
md5 = Md5,
identity_md5 = IdenMd5,
identity_len = IdenLen,
- encoding_fun = EncodingFun} = Stream,
- if BinSize + BufferLen > Max ->
- WriteBin = lists:reverse(Buffer, [Bin]),
- IdenMd5_2 = couch_hash:md5_hash_update(IdenMd5, WriteBin),
- case EncodingFun(WriteBin) of
- [] ->
- % case where the encoder did some internal buffering
- % (zlib does it for example)
- NewEngine = Engine,
- WrittenLen2 = WrittenLen,
- Md5_2 = Md5;
- WriteBin2 ->
- NewEngine = do_write(Engine, WriteBin2),
- WrittenLen2 = WrittenLen + iolist_size(WriteBin2),
- Md5_2 = couch_hash:md5_hash_update(Md5, WriteBin2)
- end,
+ encoding_fun = EncodingFun
+ } = Stream,
+ if
+ BinSize + BufferLen > Max ->
+ WriteBin = lists:reverse(Buffer, [Bin]),
+ IdenMd5_2 = couch_hash:md5_hash_update(IdenMd5, WriteBin),
+ case EncodingFun(WriteBin) of
+ [] ->
+ % case where the encoder did some internal buffering
+ % (zlib does it for example)
+ NewEngine = Engine,
+ WrittenLen2 = WrittenLen,
+ Md5_2 = Md5;
+ WriteBin2 ->
+ NewEngine = do_write(Engine, WriteBin2),
+ WrittenLen2 = WrittenLen + iolist_size(WriteBin2),
+ Md5_2 = couch_hash:md5_hash_update(Md5, WriteBin2)
+ end,
- {reply, ok, Stream#stream{
- engine = NewEngine,
- written_len=WrittenLen2,
- buffer_list=[],
- buffer_len=0,
- md5=Md5_2,
- identity_md5=IdenMd5_2,
- identity_len=IdenLen + BinSize}, hibernate};
- true ->
- {reply, ok, Stream#stream{
- buffer_list=[Bin|Buffer],
- buffer_len=BufferLen + BinSize,
- identity_len=IdenLen + BinSize}}
+ {reply, ok,
+ Stream#stream{
+ engine = NewEngine,
+ written_len = WrittenLen2,
+ buffer_list = [],
+ buffer_len = 0,
+ md5 = Md5_2,
+ identity_md5 = IdenMd5_2,
+ identity_len = IdenLen + BinSize
+ },
+ hibernate};
+ true ->
+ {reply, ok, Stream#stream{
+ buffer_list = [Bin | Buffer],
+ buffer_len = BufferLen + BinSize,
+ identity_len = IdenLen + BinSize
+ }}
end;
handle_call(close, _From, Stream) ->
#stream{
@@ -265,35 +259,36 @@ handle_call(close, _From, Stream) ->
identity_md5 = IdenMd5,
identity_len = IdenLen,
encoding_fun = EncodingFun,
- end_encoding_fun = EndEncodingFun} = Stream,
+ end_encoding_fun = EndEncodingFun
+ } = Stream,
WriteBin = lists:reverse(Buffer),
IdenMd5Final = couch_hash:md5_hash_final(couch_hash:md5_hash_update(IdenMd5, WriteBin)),
WriteBin2 = EncodingFun(WriteBin) ++ EndEncodingFun(),
Md5Final = couch_hash:md5_hash_final(couch_hash:md5_hash_update(Md5, WriteBin2)),
- Result = case WriteBin2 of
- [] ->
- {do_finalize(Engine), WrittenLen, IdenLen, Md5Final, IdenMd5Final};
- _ ->
- NewEngine = do_write(Engine, WriteBin2),
- StreamLen = WrittenLen + iolist_size(WriteBin2),
- {do_finalize(NewEngine), StreamLen, IdenLen, Md5Final, IdenMd5Final}
- end,
+ Result =
+ case WriteBin2 of
+ [] ->
+ {do_finalize(Engine), WrittenLen, IdenLen, Md5Final, IdenMd5Final};
+ _ ->
+ NewEngine = do_write(Engine, WriteBin2),
+ StreamLen = WrittenLen + iolist_size(WriteBin2),
+ {do_finalize(NewEngine), StreamLen, IdenLen, Md5Final, IdenMd5Final}
+ end,
erlang:demonitor(MonRef),
{stop, normal, Result, Stream}.
handle_cast(_Msg, State) ->
- {noreply,State}.
+ {noreply, State}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-handle_info({'DOWN', Ref, _, _, _}, #stream{opener_monitor=Ref} = State) ->
+handle_info({'DOWN', Ref, _, _, _}, #stream{opener_monitor = Ref} = State) ->
{stop, normal, State};
handle_info(_Info, State) ->
{noreply, State}.
-
do_seek({Engine, EngineState}, Offset) ->
{ok, NewState} = Engine:seek(EngineState, Offset),
{Engine, NewState}.
@@ -305,4 +300,3 @@ do_write({Engine, EngineState}, Data) ->
do_finalize({Engine, EngineState}) ->
{ok, NewState} = Engine:finalize(EngineState),
{Engine, NewState}.
-
diff --git a/src/couch/src/couch_sup.erl b/src/couch/src/couch_sup.erl
index b936c1e5d..033f7115f 100644
--- a/src/couch/src/couch_sup.erl
+++ b/src/couch/src/couch_sup.erl
@@ -15,7 +15,6 @@
-vsn(1).
-behaviour(config_listener).
-
-export([
start_link/0,
init/1,
@@ -23,10 +22,8 @@
handle_config_terminate/3
]).
-
-include_lib("couch/include/couch_db.hrl").
-
start_link() ->
assert_admins(),
maybe_launch_admin_annoyance_reporter(),
@@ -42,43 +39,45 @@ start_link() ->
Else
end.
-
init(_Args) ->
couch_log:info("Starting ~s", [?MODULE]),
- {ok, {{one_for_one,10, 60}, [
- {
- config_listener_mon,
- {config_listener_mon, start_link, [?MODULE, nil]},
- permanent,
- 5000,
- worker,
- [config_listener_mon]
- },
- {
- couch_primary_services,
- {couch_primary_sup, start_link, []},
- permanent,
- infinity,
- supervisor,
- [couch_primary_sup]
- },
- {
- couch_secondary_services,
- {couch_secondary_sup, start_link, []},
- permanent,
- infinity,
- supervisor,
- [couch_secondary_sup]
- }
- ]}}.
-
+ {ok,
+ {{one_for_one, 10, 60}, [
+ {
+ config_listener_mon,
+ {config_listener_mon, start_link, [?MODULE, nil]},
+ permanent,
+ 5000,
+ worker,
+ [config_listener_mon]
+ },
+ {
+ couch_primary_services,
+ {couch_primary_sup, start_link, []},
+ permanent,
+ infinity,
+ supervisor,
+ [couch_primary_sup]
+ },
+ {
+ couch_secondary_services,
+ {couch_secondary_sup, start_link, []},
+ permanent,
+ infinity,
+ supervisor,
+ [couch_secondary_sup]
+ }
+ ]}}.
handle_config_change("daemons", _, _, _, _) ->
exit(whereis(?MODULE), shutdown),
remove_handler;
handle_config_change("couchdb", "util_driver_dir", _, _, _) ->
- [Pid] = [P || {collation_driver, P, _, _}
- <- supervisor:which_children(couch_primary_services)],
+ [Pid] = [
+ P
+ || {collation_driver, P, _, _} <-
+ supervisor:which_children(couch_primary_services)
+ ],
Pid ! reload_driver,
{ok, nil};
handle_config_change(_, _, _, _, _) ->
@@ -91,44 +90,47 @@ assert_admins() ->
couch_log:info("Preflight check: Asserting Admin Account~n", []),
case {config:get("admins"), os:getenv("COUCHDB_TEST_ADMIN_PARTY_OVERRIDE")} of
{[], false} ->
- couch_log:info("~n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n"
- ++ " No Admin Account Found, aborting startup. ~n"
- ++ " Please configure an admin account in your local.ini file. ~n"
- ++ "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n", []),
+ couch_log:info(
+ "~n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n" ++
+ " No Admin Account Found, aborting startup. ~n" ++
+ " Please configure an admin account in your local.ini file. ~n" ++
+ "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%~n",
+ []
+ ),
% Wait a second so the log message can make it to the log
timer:sleep(500),
erlang:halt(1);
- _ -> ok
+ _ ->
+ ok
end.
send_no_admin_account_error_message() ->
- couch_log:error("No Admin Account configured."
- ++ " Please configure an Admin Account in your local.ini file and restart CouchDB.~n", []),
+ couch_log:error(
+ "No Admin Account configured." ++
+ " Please configure an Admin Account in your local.ini file and restart CouchDB.~n",
+ []
+ ),
FiveMinutes = 5 * 1000 * 60,
timer:sleep(FiveMinutes),
send_no_admin_account_error_message().
-
+
maybe_launch_admin_annoyance_reporter() ->
case os:getenv("COUCHDB_TEST_ADMIN_PARTY_OVERRIDE") of
false -> ok;
_ -> spawn_link(fun send_no_admin_account_error_message/0)
end.
-
notify_starting() ->
couch_log:info("Apache CouchDB ~s is starting.~n", [
couch_server:get_version()
]).
-
notify_started() ->
couch_log:info("Apache CouchDB has started. Time to relax.~n", []).
-
notify_error(Error) ->
couch_log:error("Error starting Apache CouchDB:~n~n ~p~n~n", [Error]).
-
write_pidfile() ->
case init:get_argument(pidfile) of
{ok, [PidFile]} ->
diff --git a/src/couch/src/couch_task_status.erl b/src/couch/src/couch_task_status.erl
index 74247d63d..42d7c4f62 100644
--- a/src/couch/src/couch_task_status.erl
+++ b/src/couch/src/couch_task_status.erl
@@ -36,36 +36,30 @@
-define(set(L, K, V), lists:keystore(K, 1, L, {K, V})).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
stop() ->
gen_server:cast(?MODULE, stop).
-
all() ->
gen_server:call(?MODULE, all).
-
add_task(Props) ->
put(task_status_update, {{0, 0, 0}, 0}),
Ts = timestamp(),
TaskProps = lists:ukeysort(
- 1, [{started_on, Ts}, {updated_on, Ts} | Props]),
+ 1, [{started_on, Ts}, {updated_on, Ts} | Props]
+ ),
put(task_status_props, TaskProps),
gen_server:call(?MODULE, {add_task, TaskProps}).
-
is_task_added() ->
is_list(erlang:get(task_status_props)).
-
set_update_frequency(Msecs) ->
put(task_status_update, {{0, 0, 0}, Msecs * 1000}).
-
update(Props) ->
MergeProps = lists:ukeysort(1, Props),
CurrProps = erlang:get(task_status_props),
@@ -77,7 +71,6 @@ update(Props) ->
persist(TaskProps)
end.
-
get(Props) when is_list(Props) ->
TaskProps = erlang:get(task_status_props),
[couch_util:get_value(P, TaskProps) || P <- Props];
@@ -85,61 +78,54 @@ get(Prop) ->
TaskProps = erlang:get(task_status_props),
couch_util:get_value(Prop, TaskProps).
-
maybe_persist(TaskProps) ->
{LastUpdateTime, Frequency} = erlang:get(task_status_update),
case timer:now_diff(Now = os:timestamp(), LastUpdateTime) >= Frequency of
- true ->
- put(task_status_update, {Now, Frequency}),
- persist(TaskProps);
- false ->
- ok
+ true ->
+ put(task_status_update, {Now, Frequency}),
+ persist(TaskProps);
+ false ->
+ ok
end.
-
persist(TaskProps0) ->
TaskProps = ?set(TaskProps0, updated_on, timestamp(os:timestamp())),
put(task_status_props, TaskProps),
gen_server:cast(?MODULE, {update_status, self(), TaskProps}).
-
init([]) ->
% read configuration settings and register for configuration changes
ets:new(?MODULE, [ordered_set, protected, named_table]),
{ok, nil}.
-
-terminate(_Reason,_State) ->
+terminate(_Reason, _State) ->
ok.
-
handle_call({add_task, TaskProps}, {From, _}, Server) ->
case ets:lookup(?MODULE, From) of
- [] ->
- true = ets:insert(?MODULE, {From, TaskProps}),
- erlang:monitor(process, From),
- {reply, ok, Server};
- [_] ->
- {reply, {add_task_error, already_registered}, Server}
+ [] ->
+ true = ets:insert(?MODULE, {From, TaskProps}),
+ erlang:monitor(process, From),
+ {reply, ok, Server};
+ [_] ->
+ {reply, {add_task_error, already_registered}, Server}
end;
handle_call(all, _, Server) ->
All = [
[{pid, ?l2b(pid_to_list(Pid))}, process_status(Pid) | TaskProps]
- ||
- {Pid, TaskProps} <- ets:tab2list(?MODULE)
+ || {Pid, TaskProps} <- ets:tab2list(?MODULE)
],
{reply, All, Server}.
-
handle_cast({update_status, Pid, NewProps}, Server) ->
case ets:lookup(?MODULE, Pid) of
- [{Pid, _CurProps}] ->
- couch_log:debug("New task status for ~p: ~p", [Pid, NewProps]),
- true = ets:insert(?MODULE, {Pid, NewProps});
- _ ->
- % Task finished/died in the meanwhile and we must have received
- % a monitor message before this call - ignore.
- ok
+ [{Pid, _CurProps}] ->
+ couch_log:debug("New task status for ~p: ~p", [Pid, NewProps]),
+ true = ets:insert(?MODULE, {Pid, NewProps});
+ _ ->
+ % Task finished/died in the meanwhile and we must have received
+ % a monitor message before this call - ignore.
+ ok
end,
{noreply, Server};
handle_cast(stop, State) ->
@@ -150,18 +136,15 @@ handle_info({'DOWN', _MonitorRef, _Type, Pid, _Info}, Server) ->
ets:delete(?MODULE, Pid),
{noreply, Server}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
timestamp() ->
timestamp(os:timestamp()).
timestamp({Mega, Secs, _}) ->
Mega * 1000000 + Secs.
-
process_status(Pid) ->
case process_info(Pid, status) of
undefined ->
diff --git a/src/couch/src/couch_totp.erl b/src/couch/src/couch_totp.erl
index 56e70d81a..3eff9a583 100644
--- a/src/couch/src/couch_totp.erl
+++ b/src/couch/src/couch_totp.erl
@@ -14,10 +14,11 @@
-export([generate/5]).
-generate(Alg, Key, CounterSecs, StepSecs, OutputLen)
- when is_atom(Alg),
- is_binary(Key),
- is_integer(CounterSecs),
- is_integer(StepSecs),
- is_integer(OutputLen) ->
+generate(Alg, Key, CounterSecs, StepSecs, OutputLen) when
+ is_atom(Alg),
+ is_binary(Key),
+ is_integer(CounterSecs),
+ is_integer(StepSecs),
+ is_integer(OutputLen)
+->
couch_hotp:generate(Alg, Key, CounterSecs div StepSecs, OutputLen).
diff --git a/src/couch/src/couch_users_db.erl b/src/couch/src/couch_users_db.erl
index 0c169d3ed..7ef3aee78 100644
--- a/src/couch/src/couch_users_db.erl
+++ b/src/couch/src/couch_users_db.erl
@@ -42,15 +42,15 @@
% Else
% -> save_doc
before_doc_update(Doc, Db, _UpdateType) ->
- #user_ctx{name=Name} = couch_db:get_user_ctx(Db),
+ #user_ctx{name = Name} = couch_db:get_user_ctx(Db),
DocName = get_doc_name(Doc),
case (catch couch_db:check_is_admin(Db)) of
- ok ->
- save_doc(Doc);
- _ when Name =:= DocName orelse Name =:= null ->
- save_doc(Doc);
- _ ->
- throw(not_found)
+ ok ->
+ save_doc(Doc);
+ _ when Name =:= DocName orelse Name =:= null ->
+ save_doc(Doc);
+ _ ->
+ throw(not_found)
end.
% If newDoc.password == null || newDoc.password == undefined:
@@ -60,38 +60,41 @@ before_doc_update(Doc, Db, _UpdateType) ->
% newDoc.password_sha = hash_pw(newDoc.password + salt)
% newDoc.salt = salt
% newDoc.password = null
-save_doc(#doc{body={Body}} = Doc) ->
+save_doc(#doc{body = {Body}} = Doc) ->
%% Support both schemes to smooth migration from legacy scheme
Scheme = chttpd_util:get_chttpd_auth_config("password_scheme", "pbkdf2"),
case {couch_util:get_value(?PASSWORD, Body), Scheme} of
- {null, _} -> % server admins don't have a user-db password entry
- Doc;
- {undefined, _} ->
- Doc;
- {ClearPassword, "simple"} -> % deprecated
- ok = validate_password(ClearPassword),
- Salt = couch_uuids:random(),
- PasswordSha = couch_passwords:simple(ClearPassword, Salt),
- Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?SIMPLE),
- Body1 = ?replace(Body0, ?SALT, Salt),
- Body2 = ?replace(Body1, ?PASSWORD_SHA, PasswordSha),
- Body3 = proplists:delete(?PASSWORD, Body2),
- Doc#doc{body={Body3}};
- {ClearPassword, "pbkdf2"} ->
- ok = validate_password(ClearPassword),
- Iterations = chttpd_util:get_chttpd_auth_config_integer(
- "iterations", 10),
- Salt = couch_uuids:random(),
- DerivedKey = couch_passwords:pbkdf2(ClearPassword, Salt, Iterations),
- Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?PBKDF2),
- Body1 = ?replace(Body0, ?ITERATIONS, Iterations),
- Body2 = ?replace(Body1, ?DERIVED_KEY, DerivedKey),
- Body3 = ?replace(Body2, ?SALT, Salt),
- Body4 = proplists:delete(?PASSWORD, Body3),
- Doc#doc{body={Body4}};
- {_ClearPassword, Scheme} ->
- couch_log:error("[couch_httpd_auth] password_scheme value of '~p' is invalid.", [Scheme]),
- throw({forbidden, ?PASSWORD_SERVER_ERROR})
+ % server admins don't have a user-db password entry
+ {null, _} ->
+ Doc;
+ {undefined, _} ->
+ Doc;
+ % deprecated
+ {ClearPassword, "simple"} ->
+ ok = validate_password(ClearPassword),
+ Salt = couch_uuids:random(),
+ PasswordSha = couch_passwords:simple(ClearPassword, Salt),
+ Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?SIMPLE),
+ Body1 = ?replace(Body0, ?SALT, Salt),
+ Body2 = ?replace(Body1, ?PASSWORD_SHA, PasswordSha),
+ Body3 = proplists:delete(?PASSWORD, Body2),
+ Doc#doc{body = {Body3}};
+ {ClearPassword, "pbkdf2"} ->
+ ok = validate_password(ClearPassword),
+ Iterations = chttpd_util:get_chttpd_auth_config_integer(
+ "iterations", 10
+ ),
+ Salt = couch_uuids:random(),
+ DerivedKey = couch_passwords:pbkdf2(ClearPassword, Salt, Iterations),
+ Body0 = ?replace(Body, ?PASSWORD_SCHEME, ?PBKDF2),
+ Body1 = ?replace(Body0, ?ITERATIONS, Iterations),
+ Body2 = ?replace(Body1, ?DERIVED_KEY, DerivedKey),
+ Body3 = ?replace(Body2, ?SALT, Salt),
+ Body4 = proplists:delete(?PASSWORD, Body3),
+ Doc#doc{body = {Body4}};
+ {_ClearPassword, Scheme} ->
+ couch_log:error("[couch_httpd_auth] password_scheme value of '~p' is invalid.", [Scheme]),
+ throw({forbidden, ?PASSWORD_SERVER_ERROR})
end.
% Validate if a new password matches all RegExp in the password_regexp setting.
@@ -104,47 +107,52 @@ validate_password(ClearPassword) ->
"[]" ->
ok;
ValidateConfig ->
- RequirementList = case couch_util:parse_term(ValidateConfig) of
- {ok, RegExpList} when is_list(RegExpList) ->
- RegExpList;
- {ok, NonListValue} ->
- couch_log:error(
- "[couch_httpd_auth] password_regexp value of '~p'"
- " is not a list.",
- [NonListValue]
- ),
- throw({forbidden, ?PASSWORD_SERVER_ERROR});
- {error, ErrorInfo} ->
- couch_log:error(
- "[couch_httpd_auth] password_regexp value of '~p'"
- " could not get parsed. ~p",
- [ValidateConfig, ErrorInfo]
- ),
- throw({forbidden, ?PASSWORD_SERVER_ERROR})
- end,
- % Check the password on every RegExp.
- lists:foreach(fun(RegExpTuple) ->
- case get_password_regexp_and_error_msg(RegExpTuple) of
- {ok, RegExp, PasswordErrorMsg} ->
- check_password(ClearPassword, RegExp, PasswordErrorMsg);
- {error} ->
+ RequirementList =
+ case couch_util:parse_term(ValidateConfig) of
+ {ok, RegExpList} when is_list(RegExpList) ->
+ RegExpList;
+ {ok, NonListValue} ->
+ couch_log:error(
+ "[couch_httpd_auth] password_regexp value of '~p'"
+ " is not a list.",
+ [NonListValue]
+ ),
+ throw({forbidden, ?PASSWORD_SERVER_ERROR});
+ {error, ErrorInfo} ->
couch_log:error(
- "[couch_httpd_auth] password_regexp part of '~p' "
- "is not a RegExp string or "
- "a RegExp and Reason tuple.",
- [RegExpTuple]
+ "[couch_httpd_auth] password_regexp value of '~p'"
+ " could not get parsed. ~p",
+ [ValidateConfig, ErrorInfo]
),
throw({forbidden, ?PASSWORD_SERVER_ERROR})
- end
- end, RequirementList),
+ end,
+ % Check the password on every RegExp.
+ lists:foreach(
+ fun(RegExpTuple) ->
+ case get_password_regexp_and_error_msg(RegExpTuple) of
+ {ok, RegExp, PasswordErrorMsg} ->
+ check_password(ClearPassword, RegExp, PasswordErrorMsg);
+ {error} ->
+ couch_log:error(
+ "[couch_httpd_auth] password_regexp part of '~p' "
+ "is not a RegExp string or "
+ "a RegExp and Reason tuple.",
+ [RegExpTuple]
+ ),
+ throw({forbidden, ?PASSWORD_SERVER_ERROR})
+ end
+ end,
+ RequirementList
+ ),
ok
end.
% Get the RegExp out of the tuple and combine the the error message.
% First is with a Reason string.
-get_password_regexp_and_error_msg({RegExp, Reason})
- when is_list(RegExp) andalso is_list(Reason)
- andalso length(Reason) > 0 ->
+get_password_regexp_and_error_msg({RegExp, Reason}) when
+ is_list(RegExp) andalso is_list(Reason) andalso
+ length(Reason) > 0
+->
{ok, RegExp, lists:concat([?REQUIREMENT_ERROR, " ", Reason])};
% With a not correct Reason string.
get_password_regexp_and_error_msg({RegExp, _Reason}) when is_list(RegExp) ->
@@ -181,36 +189,40 @@ check_password(Password, RegExp, ErrorMsg) ->
% -> return doc
after_doc_read(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, Db) ->
case (catch couch_db:check_is_admin(Db)) of
- ok ->
- Doc;
- _ ->
- throw({forbidden,
- <<"Only administrators can view design docs in the users database.">>})
+ ok ->
+ Doc;
+ _ ->
+ throw(
+ {forbidden, <<"Only administrators can view design docs in the users database.">>}
+ )
end;
after_doc_read(Doc, Db) ->
- #user_ctx{name=Name} = couch_db:get_user_ctx(Db),
+ #user_ctx{name = Name} = couch_db:get_user_ctx(Db),
DocName = get_doc_name(Doc),
case (catch couch_db:check_is_admin(Db)) of
- ok ->
- Doc;
- _ when Name =:= DocName ->
- Doc;
- _ ->
- Doc1 = strip_non_public_fields(Doc),
- case Doc1 of
- #doc{body={[]}} ->
- throw(not_found);
- _ ->
- Doc1
- end
+ ok ->
+ Doc;
+ _ when Name =:= DocName ->
+ Doc;
+ _ ->
+ Doc1 = strip_non_public_fields(Doc),
+ case Doc1 of
+ #doc{body = {[]}} ->
+ throw(not_found);
+ _ ->
+ Doc1
+ end
end.
-get_doc_name(#doc{id= <<"org.couchdb.user:", Name/binary>>}) ->
+get_doc_name(#doc{id = <<"org.couchdb.user:", Name/binary>>}) ->
Name;
get_doc_name(_) ->
undefined.
-strip_non_public_fields(#doc{body={Props}}=Doc) ->
- Public = re:split(chttpd_util:get_chttpd_auth_config("public_fields", ""),
- "\\s*,\\s*", [{return, binary}]),
- Doc#doc{body={[{K, V} || {K, V} <- Props, lists:member(K, Public)]}}.
+strip_non_public_fields(#doc{body = {Props}} = Doc) ->
+ Public = re:split(
+ chttpd_util:get_chttpd_auth_config("public_fields", ""),
+ "\\s*,\\s*",
+ [{return, binary}]
+ ),
+ Doc#doc{body = {[{K, V} || {K, V} <- Props, lists:member(K, Public)]}}.
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index c7dc894d7..b7a6ad39a 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -15,14 +15,14 @@
-export([priv_dir/0, normpath/1, fold_files/5]).
-export([should_flush/0, should_flush/1, to_existing_atom/1]).
-export([rand32/0, implode/2]).
--export([abs_pathname/1,abs_pathname/2, trim/1, drop_dot_couch_ext/1]).
+-export([abs_pathname/1, abs_pathname/2, trim/1, drop_dot_couch_ext/1]).
-export([encodeBase64Url/1, decodeBase64Url/1]).
-export([validate_utf8/1, to_hex/1, parse_term/1, dict_find/3]).
-export([get_nested_json_value/2, json_user_ctx/1]).
-export([proplist_apply_field/2, json_apply_field/2]).
-export([to_binary/1, to_integer/1, to_list/1, url_encode/1]).
-export([json_encode/1, json_decode/1, json_decode/2]).
--export([verify/2,simple_call/2,shutdown_sync/1]).
+-export([verify/2, simple_call/2, shutdown_sync/1]).
-export([get_value/2, get_value/3]).
-export([reorder_results/2]).
-export([url_strip_password/1]).
@@ -60,7 +60,6 @@
<<"feature_flags">>
]).
-
priv_dir() ->
case code:priv_dir(couch) of
{error, bad_name} ->
@@ -68,7 +67,8 @@ priv_dir() ->
% renaming src/couch to src/couch. Not really worth the hassle.
% -Damien
code:priv_dir(couchdb);
- Dir -> Dir
+ Dir ->
+ Dir
end.
% Normalize a pathname by removing .. and . components.
@@ -84,7 +84,6 @@ normparts(["." | RestParts], Acc) ->
normparts([Part | RestParts], Acc) ->
normparts(RestParts, [Part | Acc]).
-
% This is implementation is similar the builtin filelib:fold_files/5
% except that this version will run the user supplied function
% on directories that match the regular expression as well.
@@ -125,13 +124,21 @@ fold_files2([File | Rest], Dir, RegExp, Recursive, Fun, Acc0) ->
% works like list_to_existing_atom, except can be list or binary and it
% gives you the original value instead of an error if no existing atom.
to_existing_atom(V) when is_list(V) ->
- try list_to_existing_atom(V) catch _:_ -> V end;
+ try
+ list_to_existing_atom(V)
+ catch
+ _:_ -> V
+ end;
to_existing_atom(V) when is_binary(V) ->
- try list_to_existing_atom(?b2l(V)) catch _:_ -> V end;
+ try
+ list_to_existing_atom(?b2l(V))
+ catch
+ _:_ -> V
+ end;
to_existing_atom(V) when is_atom(V) ->
V.
-shutdown_sync(Pid) when not is_pid(Pid)->
+shutdown_sync(Pid) when not is_pid(Pid) ->
ok;
shutdown_sync(Pid) ->
MRef = erlang:monitor(process, Pid),
@@ -139,23 +146,22 @@ shutdown_sync(Pid) ->
catch unlink(Pid),
catch exit(Pid, shutdown),
receive
- {'DOWN', MRef, _, _, _} ->
- ok
+ {'DOWN', MRef, _, _, _} ->
+ ok
end
after
erlang:demonitor(MRef, [flush])
end.
-
simple_call(Pid, Message) ->
MRef = erlang:monitor(process, Pid),
try
Pid ! {self(), Message},
receive
- {Pid, Result} ->
- Result;
- {'DOWN', MRef, _, _, Reason} ->
- exit(Reason)
+ {Pid, Result} ->
+ Result;
+ {'DOWN', MRef, _, _, Reason} ->
+ exit(Reason)
end
after
erlang:demonitor(MRef, [flush])
@@ -171,28 +177,40 @@ validate_utf8_fast(B, O) ->
<<_:O/binary>> ->
true;
<<_:O/binary, C1, _/binary>> when
- C1 < 128 ->
+ C1 < 128
+ ->
validate_utf8_fast(B, 1 + O);
<<_:O/binary, C1, C2, _/binary>> when
- C1 >= 194, C1 =< 223,
- C2 >= 128, C2 =< 191 ->
+ C1 >= 194,
+ C1 =< 223,
+ C2 >= 128,
+ C2 =< 191
+ ->
validate_utf8_fast(B, 2 + O);
<<_:O/binary, C1, C2, C3, _/binary>> when
- C1 >= 224, C1 =< 239,
- C2 >= 128, C2 =< 191,
- C3 >= 128, C3 =< 191 ->
+ C1 >= 224,
+ C1 =< 239,
+ C2 >= 128,
+ C2 =< 191,
+ C3 >= 128,
+ C3 =< 191
+ ->
validate_utf8_fast(B, 3 + O);
<<_:O/binary, C1, C2, C3, C4, _/binary>> when
- C1 >= 240, C1 =< 244,
- C2 >= 128, C2 =< 191,
- C3 >= 128, C3 =< 191,
- C4 >= 128, C4 =< 191 ->
+ C1 >= 240,
+ C1 =< 244,
+ C2 >= 128,
+ C2 =< 191,
+ C3 >= 128,
+ C3 =< 191,
+ C4 >= 128,
+ C4 =< 191
+ ->
validate_utf8_fast(B, 4 + O);
_ ->
false
end.
-
to_hex(<<Hi:4, Lo:4, Rest/binary>>) ->
[nibble_to_hex(Hi), nibble_to_hex(Lo) | to_hex(Rest)];
to_hex(<<>>) ->
@@ -217,7 +235,6 @@ nibble_to_hex(13) -> $d;
nibble_to_hex(14) -> $e;
nibble_to_hex(15) -> $f.
-
parse_term(Bin) when is_binary(Bin) ->
parse_term(binary_to_list(Bin));
parse_term(List) ->
@@ -229,16 +246,16 @@ get_value(Key, List) ->
get_value(Key, List, Default) ->
case lists:keysearch(Key, 1, List) of
- {value, {Key,Value}} ->
- Value;
- false ->
- Default
+ {value, {Key, Value}} ->
+ Value;
+ false ->
+ Default
end.
-get_nested_json_value({Props}, [Key|Keys]) ->
+get_nested_json_value({Props}, [Key | Keys]) ->
case couch_util:get_value(Key, Props, nil) of
- nil -> throw({not_found, <<"missing json key: ", Key/binary>>});
- Value -> get_nested_json_value(Value, Keys)
+ nil -> throw({not_found, <<"missing json key: ", Key/binary>>});
+ Value -> get_nested_json_value(Value, Keys)
end;
get_nested_json_value(Value, []) ->
Value;
@@ -256,15 +273,16 @@ json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) ->
json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
json_apply_field({Key, NewValue}, [], Acc) ->
- {[{Key, NewValue}|Acc]}.
+ {[{Key, NewValue} | Acc]}.
json_user_ctx(Db) ->
ShardName = couch_db:name(Db),
Ctx = couch_db:get_user_ctx(Db),
- {[{<<"db">>, mem3:dbname(ShardName)},
- {<<"name">>,Ctx#user_ctx.name},
- {<<"roles">>,Ctx#user_ctx.roles}]}.
-
+ {[
+ {<<"db">>, mem3:dbname(ShardName)},
+ {<<"name">>, Ctx#user_ctx.name},
+ {<<"roles">>, Ctx#user_ctx.roles}
+ ]}.
% returns a random integer
rand32() ->
@@ -276,7 +294,7 @@ rand32() ->
abs_pathname(" " ++ Filename) ->
% strip leading whitspace
abs_pathname(Filename);
-abs_pathname([$/ |_]=Filename) ->
+abs_pathname([$/ | _] = Filename) ->
Filename;
abs_pathname(Filename) ->
{ok, Cwd} = file:get_cwd(),
@@ -287,24 +305,25 @@ abs_pathname(Filename, Dir) ->
Name = filename:absname(Filename, Dir ++ "/"),
OutFilename = filename:join(fix_path_list(filename:split(Name), [])),
% If the filename is a dir (last char slash, put back end slash
- case string:right(Filename,1) of
- "/" ->
- OutFilename ++ "/";
- "\\" ->
- OutFilename ++ "/";
- _Else->
- OutFilename
+ case string:right(Filename, 1) of
+ "/" ->
+ OutFilename ++ "/";
+ "\\" ->
+ OutFilename ++ "/";
+ _Else ->
+ OutFilename
end.
% if this as an executable with arguments, seperate out the arguments
% ""./foo\ bar.sh -baz=blah" -> {"./foo\ bar.sh", " -baz=blah"}
separate_cmd_args("", CmdAcc) ->
{lists:reverse(CmdAcc), ""};
-separate_cmd_args("\\ " ++ Rest, CmdAcc) -> % handle skipped value
+% handle skipped value
+separate_cmd_args("\\ " ++ Rest, CmdAcc) ->
separate_cmd_args(Rest, " \\" ++ CmdAcc);
separate_cmd_args(" " ++ Rest, CmdAcc) ->
{lists:reverse(CmdAcc), " " ++ Rest};
-separate_cmd_args([Char|Rest], CmdAcc) ->
+separate_cmd_args([Char | Rest], CmdAcc) ->
separate_cmd_args(Rest, [Char | CmdAcc]).
% Is a character whitespace (from https://en.wikipedia.org/wiki/Whitespace_character#Unicode)?
@@ -341,7 +360,6 @@ is_whitespace(8288) -> true;
is_whitespace(65279) -> true;
is_whitespace(_Else) -> false.
-
% removes leading and trailing whitespace from a string
trim(String) when is_binary(String) ->
% mirror string:trim() behaviour of returning a binary when a binary is passed in
@@ -350,7 +368,6 @@ trim(String) ->
String2 = lists:dropwhile(fun is_whitespace/1, String),
lists:reverse(lists:dropwhile(fun is_whitespace/1, lists:reverse(String2))).
-
drop_dot_couch_ext(DbName) when is_binary(DbName) ->
PrefixLen = size(DbName) - 6,
case DbName of
@@ -359,48 +376,53 @@ drop_dot_couch_ext(DbName) when is_binary(DbName) ->
Else ->
Else
end;
-
drop_dot_couch_ext(DbName) when is_list(DbName) ->
binary_to_list(drop_dot_couch_ext(iolist_to_binary(DbName))).
-
% takes a heirarchical list of dirs and removes the dots ".", double dots
% ".." and the corresponding parent dirs.
fix_path_list([], Acc) ->
lists:reverse(Acc);
-fix_path_list([".."|Rest], [_PrevAcc|RestAcc]) ->
+fix_path_list([".." | Rest], [_PrevAcc | RestAcc]) ->
fix_path_list(Rest, RestAcc);
-fix_path_list(["."|Rest], Acc) ->
+fix_path_list(["." | Rest], Acc) ->
fix_path_list(Rest, Acc);
fix_path_list([Dir | Rest], Acc) ->
fix_path_list(Rest, [Dir | Acc]).
-
implode(List, Sep) ->
implode(List, Sep, []).
implode([], _Sep, Acc) ->
lists:flatten(lists:reverse(Acc));
implode([H], Sep, Acc) ->
- implode([], Sep, [H|Acc]);
-implode([H|T], Sep, Acc) ->
- implode(T, Sep, [Sep,H|Acc]).
-
+ implode([], Sep, [H | Acc]);
+implode([H | T], Sep, Acc) ->
+ implode(T, Sep, [Sep, H | Acc]).
should_flush() ->
should_flush(?FLUSH_MAX_MEM).
should_flush(MemThreshHold) ->
{memory, ProcMem} = process_info(self(), memory),
- BinMem = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
- 0, element(2,process_info(self(), binary))),
- if ProcMem+BinMem > 2*MemThreshHold ->
- garbage_collect(),
- {memory, ProcMem2} = process_info(self(), memory),
- BinMem2 = lists:foldl(fun({_Id, Size, _NRefs}, Acc) -> Size+Acc end,
- 0, element(2,process_info(self(), binary))),
- ProcMem2+BinMem2 > MemThreshHold;
- true -> false end.
+ BinMem = lists:foldl(
+ fun({_Id, Size, _NRefs}, Acc) -> Size + Acc end,
+ 0,
+ element(2, process_info(self(), binary))
+ ),
+ if
+ ProcMem + BinMem > 2 * MemThreshHold ->
+ garbage_collect(),
+ {memory, ProcMem2} = process_info(self(), memory),
+ BinMem2 = lists:foldl(
+ fun({_Id, Size, _NRefs}, Acc) -> Size + Acc end,
+ 0,
+ element(2, process_info(self(), binary))
+ ),
+ ProcMem2 + BinMem2 > MemThreshHold;
+ true ->
+ false
+ end.
encodeBase64Url(Url) ->
b64url:encode(Url).
@@ -410,10 +432,10 @@ decodeBase64Url(Url64) ->
dict_find(Key, Dict, DefaultValue) ->
case dict:find(Key, Dict) of
- {ok, Value} ->
- Value;
- error ->
- DefaultValue
+ {ok, Value} ->
+ Value;
+ error ->
+ DefaultValue
end.
to_binary(V) when is_binary(V) ->
@@ -448,23 +470,23 @@ to_list(V) ->
url_encode(Bin) when is_binary(Bin) ->
url_encode(binary_to_list(Bin));
-url_encode([H|T]) ->
+url_encode([H | T]) ->
if
- H >= $a, $z >= H ->
- [H|url_encode(T)];
- H >= $A, $Z >= H ->
- [H|url_encode(T)];
- H >= $0, $9 >= H ->
- [H|url_encode(T)];
- H == $_; H == $.; H == $-; H == $: ->
- [H|url_encode(T)];
- true ->
- case lists:flatten(io_lib:format("~.16.0B", [H])) of
- [X, Y] ->
- [$%, X, Y | url_encode(T)];
- [X] ->
- [$%, $0, X | url_encode(T)]
- end
+ H >= $a, $z >= H ->
+ [H | url_encode(T)];
+ H >= $A, $Z >= H ->
+ [H | url_encode(T)];
+ H >= $0, $9 >= H ->
+ [H | url_encode(T)];
+ H == $_; H == $.; H == $-; H == $: ->
+ [H | url_encode(T)];
+ true ->
+ case lists:flatten(io_lib:format("~.16.0B", [H])) of
+ [X, Y] ->
+ [$%, X, Y | url_encode(T)];
+ [X] ->
+ [$%, $0, X | url_encode(T)]
+ end
end;
url_encode([]) ->
[].
@@ -483,7 +505,7 @@ json_decode(V, Opts) ->
throw({invalid_json, Error})
end.
-verify([X|RestX], [Y|RestY], Result) ->
+verify([X | RestX], [Y | RestY], Result) ->
verify(RestX, RestY, (X bxor Y) bor Result);
verify([], [], Result) ->
Result == 0.
@@ -497,7 +519,8 @@ verify(X, Y) when is_list(X) and is_list(Y) ->
false ->
false
end;
-verify(_X, _Y) -> false.
+verify(_X, _Y) ->
+ false.
% linear search is faster for small lists, length() is 0.5 ms for 100k list
reorder_results(Keys, SortedResults) when length(Keys) < 100 ->
@@ -507,10 +530,12 @@ reorder_results(Keys, SortedResults) ->
[dict:fetch(Key, KeyDict) || Key <- Keys].
url_strip_password(Url) ->
- re:replace(Url,
+ re:replace(
+ Url,
"(http|https|socks5)://([^:]+):[^@]+@(.*)$",
"\\1://\\2:*****@\\3",
- [{return, list}]).
+ [{return, list}]
+ ).
encode_doc_id(#doc{id = Id}) ->
encode_doc_id(Id);
@@ -528,7 +553,7 @@ normalize_ddoc_id(<<"_design/", _/binary>> = DDocId) ->
normalize_ddoc_id(DDocId) when is_binary(DDocId) ->
<<"_design/", DDocId/binary>>.
-with_db(DbName, Fun) when is_binary(DbName) ->
+with_db(DbName, Fun) when is_binary(DbName) ->
case couch_db:open_int(DbName, [?ADMIN_CTX]) of
{ok, Db} ->
try
@@ -548,20 +573,26 @@ with_db(Db, Fun) ->
end.
rfc1123_date() ->
- {{YYYY,MM,DD},{Hour,Min,Sec}} = calendar:universal_time(),
- DayNumber = calendar:day_of_the_week({YYYY,MM,DD}),
+ {{YYYY, MM, DD}, {Hour, Min, Sec}} = calendar:universal_time(),
+ DayNumber = calendar:day_of_the_week({YYYY, MM, DD}),
lists:flatten(
- io_lib:format("~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
- [day(DayNumber),DD,month(MM),YYYY,Hour,Min,Sec])).
+ io_lib:format(
+ "~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
+ [day(DayNumber), DD, month(MM), YYYY, Hour, Min, Sec]
+ )
+ ).
rfc1123_date(undefined) ->
undefined;
rfc1123_date(UniversalTime) ->
- {{YYYY,MM,DD},{Hour,Min,Sec}} = UniversalTime,
- DayNumber = calendar:day_of_the_week({YYYY,MM,DD}),
+ {{YYYY, MM, DD}, {Hour, Min, Sec}} = UniversalTime,
+ DayNumber = calendar:day_of_the_week({YYYY, MM, DD}),
lists:flatten(
- io_lib:format("~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
- [day(DayNumber),DD,month(MM),YYYY,Hour,Min,Sec])).
+ io_lib:format(
+ "~s, ~2.2.0w ~3.s ~4.4.0w ~2.2.0w:~2.2.0w:~2.2.0w GMT",
+ [day(DayNumber), DD, month(MM), YYYY, Hour, Min, Sec]
+ )
+ ).
%% day
@@ -598,30 +629,32 @@ boolean_to_integer(true) ->
boolean_to_integer(false) ->
0.
-
validate_positive_int(N) when is_list(N) ->
try
I = list_to_integer(N),
validate_positive_int(I)
- catch error:badarg ->
- false
+ catch
+ error:badarg ->
+ false
end;
validate_positive_int(N) when is_integer(N), N > 0 -> true;
-validate_positive_int(_) -> false.
-
+validate_positive_int(_) ->
+ false.
find_in_binary(_B, <<>>) ->
not_found;
-
find_in_binary(B, Data) ->
case binary:match(Data, [B], []) of
- nomatch ->
- MatchLength = erlang:min(byte_size(B), byte_size(Data)),
- match_prefix_at_end(binary:part(B, {0, MatchLength}),
- binary:part(Data, {byte_size(Data), -MatchLength}),
- MatchLength, byte_size(Data) - MatchLength);
- {Pos, _Len} ->
- {exact, Pos}
+ nomatch ->
+ MatchLength = erlang:min(byte_size(B), byte_size(Data)),
+ match_prefix_at_end(
+ binary:part(B, {0, MatchLength}),
+ binary:part(Data, {byte_size(Data), -MatchLength}),
+ MatchLength,
+ byte_size(Data) - MatchLength
+ );
+ {Pos, _Len} ->
+ {exact, Pos}
end.
match_prefix_at_end(Prefix, Data, PrefixLength, N) ->
@@ -630,10 +663,14 @@ match_prefix_at_end(Prefix, Data, PrefixLength, N) ->
match_rest_of_prefix([], _Prefix, _Data, _PrefixLength, _N) ->
not_found;
-
match_rest_of_prefix([{Pos, _Len} | Rest], Prefix, Data, PrefixLength, N) ->
- case binary:match(binary:part(Data, {PrefixLength, Pos - PrefixLength}),
- [binary:part(Prefix, {0, PrefixLength - Pos})], []) of
+ case
+ binary:match(
+ binary:part(Data, {PrefixLength, Pos - PrefixLength}),
+ [binary:part(Prefix, {0, PrefixLength - Pos})],
+ []
+ )
+ of
nomatch ->
match_rest_of_prefix(Rest, Prefix, Data, PrefixLength, N);
{_Pos, _Len1} ->
@@ -642,44 +679,42 @@ match_rest_of_prefix([{Pos, _Len} | Rest], Prefix, Data, PrefixLength, N) ->
callback_exists(Module, Function, Arity) ->
case ensure_loaded(Module) of
- true ->
- InfoList = Module:module_info(exports),
- lists:member({Function, Arity}, InfoList);
- false ->
- false
+ true ->
+ InfoList = Module:module_info(exports),
+ lists:member({Function, Arity}, InfoList);
+ false ->
+ false
end.
validate_callback_exists(Module, Function, Arity) ->
case callback_exists(Module, Function, Arity) of
- true ->
- ok;
- false ->
- CallbackStr = lists:flatten(
- io_lib:format("~w:~w/~w", [Module, Function, Arity])),
- throw({error,
- {undefined_callback, CallbackStr, {Module, Function, Arity}}})
+ true ->
+ ok;
+ false ->
+ CallbackStr = lists:flatten(
+ io_lib:format("~w:~w/~w", [Module, Function, Arity])
+ ),
+ throw({error, {undefined_callback, CallbackStr, {Module, Function, Arity}}})
end.
-
check_md5(_NewSig, <<>>) -> ok;
check_md5(Sig, Sig) -> ok;
check_md5(_, _) -> throw(md5_mismatch).
-
set_mqd_off_heap(Module) ->
case config:get_boolean("off_heap_mqd", atom_to_list(Module), true) of
true ->
try
erlang:process_flag(message_queue_data, off_heap),
ok
- catch error:badarg ->
+ catch
+ error:badarg ->
ok
end;
false ->
ok
end.
-
set_process_priority(Module, Level) ->
case config:get_boolean("process_priority", atom_to_list(Module), false) of
true ->
@@ -689,18 +724,17 @@ set_process_priority(Module, Level) ->
ok
end.
-
ensure_loaded(Module) when is_atom(Module) ->
case code:ensure_loaded(Module) of
- {module, Module} ->
- true;
- {error, embedded} ->
- true;
- {error, _} ->
- false
+ {module, Module} ->
+ true;
+ {error, embedded} ->
+ true;
+ {error, _} ->
+ false
end;
-ensure_loaded(_Module) -> false.
-
+ensure_loaded(_Module) ->
+ false.
%% This is especially useful in gen_servers when you need to call
%% a function that does a receive as it would hijack incoming messages.
@@ -718,11 +752,9 @@ with_proc(M, F, A, Timeout) ->
{error, timeout}
end.
-
process_dict_get(Pid, Key) ->
process_dict_get(Pid, Key, undefined).
-
process_dict_get(Pid, Key, DefaultValue) ->
case process_info(Pid, dictionary) of
{dictionary, Dict} ->
@@ -736,21 +768,18 @@ process_dict_get(Pid, Key, DefaultValue) ->
DefaultValue
end.
-
unique_monotonic_integer() ->
erlang:unique_integer([monotonic, positive]).
-
check_config_blacklist(Section) ->
case lists:member(Section, ?BLACKLIST_CONFIG_SECTIONS) of
- true ->
- Msg = <<"Config section blacklisted for modification over HTTP API.">>,
- throw({forbidden, Msg});
- _ ->
- ok
+ true ->
+ Msg = <<"Config section blacklisted for modification over HTTP API.">>,
+ throw({forbidden, Msg});
+ _ ->
+ ok
end.
-
-ifdef(OTP_RELEASE).
-if(?OTP_RELEASE >= 22).
@@ -765,7 +794,8 @@ hmac(Alg, Key, Message) ->
hmac(Alg, Key, Message) ->
crypto:hmac(Alg, Key, Message).
--endif. % -if(?OTP_RELEASE >= 22)
+% -if(?OTP_RELEASE >= 22)
+-endif.
-else.
@@ -773,4 +803,5 @@ hmac(Alg, Key, Message) ->
hmac(Alg, Key, Message) ->
crypto:hmac(Alg, Key, Message).
--endif. % -ifdef(OTP_RELEASE)
+% -ifdef(OTP_RELEASE)
+-endif.
diff --git a/src/couch/src/couch_uuids.erl b/src/couch/src/couch_uuids.erl
index 3fffd04b3..be6089dff 100644
--- a/src/couch/src/couch_uuids.erl
+++ b/src/couch/src/couch_uuids.erl
@@ -127,23 +127,22 @@ utc_random(ClockSeq) ->
utc_suffix(Suffix, ClockSeq, Now) ->
OsMicros = micros_since_epoch(Now),
- NewClockSeq = if
- OsMicros =< ClockSeq ->
- % Timestamp is lagging, use ClockSeq as Timestamp
- ClockSeq + 1;
- OsMicros > ClockSeq ->
- % Timestamp advanced, use it, and reset ClockSeq with it
- OsMicros
- end,
+ NewClockSeq =
+ if
+ OsMicros =< ClockSeq ->
+ % Timestamp is lagging, use ClockSeq as Timestamp
+ ClockSeq + 1;
+ OsMicros > ClockSeq ->
+ % Timestamp advanced, use it, and reset ClockSeq with it
+ OsMicros
+ end,
Prefix = io_lib:format("~14.16.0b", [NewClockSeq]),
{list_to_binary(Prefix ++ Suffix), NewClockSeq}.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
utc_id_time_does_not_advance_test() ->
% Timestamp didn't advance but local clock sequence should and new UUIds
% should be generated
@@ -156,7 +155,6 @@ utc_id_time_does_not_advance_test() ->
?assertNotEqual(UtcId0, UtcId1),
?assertEqual(ClockSeq1 + 1, ClockSeq2).
-
utc_id_time_advanced_test() ->
% Timestamp advanced, a new UUID generated and also the last clock sequence
% is updated to that timestamp.
@@ -187,5 +185,4 @@ utc_random_test_time_advance_test() ->
?assertEqual(32, byte_size(UtcRandom)),
?assert(NextClockSeq > micros_since_epoch({1000, 0, 0})).
-
-endif.
diff --git a/src/couch/src/couch_work_queue.erl b/src/couch/src/couch_work_queue.erl
index 5d747de82..d767a33be 100644
--- a/src/couch/src/couch_work_queue.erl
+++ b/src/couch/src/couch_work_queue.erl
@@ -35,21 +35,17 @@
multi_workers = false
}).
-
new(Options) ->
gen_server:start_link(couch_work_queue, Options, []).
-
queue(Wq, Item) when is_binary(Item) ->
gen_server:call(Wq, {queue, Item, byte_size(Item)}, infinity);
queue(Wq, Item) ->
gen_server:call(Wq, {queue, Item, ?term_size(Item)}, infinity).
-
dequeue(Wq) ->
dequeue(Wq, all).
-
dequeue(Wq, MaxItems) ->
try
gen_server:call(Wq, {dequeue, MaxItems}, infinity)
@@ -57,7 +53,6 @@ dequeue(Wq, MaxItems) ->
_:_ -> closed
end.
-
item_count(Wq) ->
try
gen_server:call(Wq, item_count, infinity)
@@ -65,7 +60,6 @@ item_count(Wq) ->
_:_ -> closed
end.
-
size(Wq) ->
try
gen_server:call(Wq, size, infinity)
@@ -73,10 +67,8 @@ size(Wq) ->
_:_ -> closed
end.
-
close(Wq) ->
gen_server:cast(Wq, close).
-
init(Options) ->
Q = #q{
@@ -86,50 +78,47 @@ init(Options) ->
},
{ok, Q, hibernate}.
-
-terminate(_Reason, #q{work_waiters=Workers}) ->
+terminate(_Reason, #q{work_waiters = Workers}) ->
lists:foreach(fun({W, _}) -> gen_server:reply(W, closed) end, Workers).
-
handle_call({queue, Item, Size}, From, #q{work_waiters = []} = Q0) ->
- Q = Q0#q{size = Q0#q.size + Size,
- items = Q0#q.items + 1,
- queue = queue:in({Item, Size}, Q0#q.queue)},
- case (Q#q.size >= Q#q.max_size) orelse
- (Q#q.items >= Q#q.max_items) of
- true ->
- {noreply, Q#q{blocked = [From | Q#q.blocked]}, hibernate};
- false ->
- {reply, ok, Q, hibernate}
+ Q = Q0#q{
+ size = Q0#q.size + Size,
+ items = Q0#q.items + 1,
+ queue = queue:in({Item, Size}, Q0#q.queue)
+ },
+ case
+ (Q#q.size >= Q#q.max_size) orelse
+ (Q#q.items >= Q#q.max_items)
+ of
+ true ->
+ {noreply, Q#q{blocked = [From | Q#q.blocked]}, hibernate};
+ false ->
+ {reply, ok, Q, hibernate}
end;
-
handle_call({queue, Item, _}, _From, #q{work_waiters = [{W, _Max} | Rest]} = Q) ->
gen_server:reply(W, {ok, [Item]}),
{reply, ok, Q#q{work_waiters = Rest}, hibernate};
-
handle_call({dequeue, Max}, From, Q) ->
#q{work_waiters = Workers, multi_workers = Multi, items = Count} = Q,
case {Workers, Multi} of
- {[_ | _], false} ->
- exit("Only one caller allowed to wait for this work at a time");
- {[_ | _], true} ->
- {noreply, Q#q{work_waiters=Workers ++ [{From, Max}]}};
- _ ->
- case Count of
- 0 ->
- {noreply, Q#q{work_waiters=Workers ++ [{From, Max}]}};
- C when C > 0 ->
- deliver_queue_items(Max, Q)
- end
+ {[_ | _], false} ->
+ exit("Only one caller allowed to wait for this work at a time");
+ {[_ | _], true} ->
+ {noreply, Q#q{work_waiters = Workers ++ [{From, Max}]}};
+ _ ->
+ case Count of
+ 0 ->
+ {noreply, Q#q{work_waiters = Workers ++ [{From, Max}]}};
+ C when C > 0 ->
+ deliver_queue_items(Max, Q)
+ end
end;
-
handle_call(item_count, _From, Q) ->
{reply, Q#q.items, Q};
-
handle_call(size, _From, Q) ->
{reply, Q#q.size, Q}.
-
deliver_queue_items(Max, Q) ->
#q{
queue = Queue,
@@ -139,48 +128,45 @@ deliver_queue_items(Max, Q) ->
blocked = Blocked
} = Q,
case (Max =:= all) orelse (Max >= Count) of
- false ->
- {Items, Size2, Queue2, Blocked2} = dequeue_items(
- Max, Size, Queue, Blocked, []),
- Q2 = Q#q{
- items = Count - Max, size = Size2, blocked = Blocked2, queue = Queue2
- },
- {reply, {ok, Items}, Q2};
- true ->
- lists:foreach(fun(F) -> gen_server:reply(F, ok) end, Blocked),
- Q2 = Q#q{items = 0, size = 0, blocked = [], queue = queue:new()},
- Items = [Item || {Item, _} <- queue:to_list(Queue)],
- case Close of
false ->
+ {Items, Size2, Queue2, Blocked2} = dequeue_items(
+ Max, Size, Queue, Blocked, []
+ ),
+ Q2 = Q#q{
+ items = Count - Max, size = Size2, blocked = Blocked2, queue = Queue2
+ },
{reply, {ok, Items}, Q2};
true ->
- {stop, normal, {ok, Items}, Q2}
- end
+ lists:foreach(fun(F) -> gen_server:reply(F, ok) end, Blocked),
+ Q2 = Q#q{items = 0, size = 0, blocked = [], queue = queue:new()},
+ Items = [Item || {Item, _} <- queue:to_list(Queue)],
+ case Close of
+ false ->
+ {reply, {ok, Items}, Q2};
+ true ->
+ {stop, normal, {ok, Items}, Q2}
+ end
end.
-
dequeue_items(0, Size, Queue, Blocked, DequeuedAcc) ->
{lists:reverse(DequeuedAcc), Size, Queue, Blocked};
-
dequeue_items(NumItems, Size, Queue, Blocked, DequeuedAcc) ->
{{value, {Item, ItemSize}}, Queue2} = queue:out(Queue),
case Blocked of
- [] ->
- Blocked2 = Blocked;
- [From | Blocked2] ->
- gen_server:reply(From, ok)
+ [] ->
+ Blocked2 = Blocked;
+ [From | Blocked2] ->
+ gen_server:reply(From, ok)
end,
dequeue_items(
- NumItems - 1, Size - ItemSize, Queue2, Blocked2, [Item | DequeuedAcc]).
-
+ NumItems - 1, Size - ItemSize, Queue2, Blocked2, [Item | DequeuedAcc]
+ ).
handle_cast(close, #q{items = 0} = Q) ->
{stop, normal, Q};
-
handle_cast(close, Q) ->
{noreply, Q#q{close_on_dequeue = true}}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
diff --git a/src/couch/src/test_request.erl b/src/couch/src/test_request.erl
index 48f49bda6..d7364012f 100644
--- a/src/couch/src/test_request.erl
+++ b/src/couch/src/test_request.erl
@@ -74,7 +74,6 @@ options(Url, Headers) ->
options(Url, Headers, Opts) ->
request(options, Url, Headers, [], Opts).
-
request(Method, Url, Headers) ->
request(Method, Url, Headers, []).
diff --git a/src/couch/src/test_util.erl b/src/couch/src/test_util.erl
index 125e76492..4b802bc49 100644
--- a/src/couch/src/test_util.erl
+++ b/src/couch/src/test_util.erl
@@ -40,8 +40,7 @@
-record(test_context, {mocked = [], started = [], module}).
--define(DEFAULT_APPS,
- [inets, ibrowse, ssl, config, couch_epi, couch_event, couch]).
+-define(DEFAULT_APPS, [inets, ibrowse, ssl, config, couch_epi, couch_event, couch]).
srcdir() ->
code:priv_dir(couch) ++ "/../../".
@@ -57,9 +56,12 @@ init_code_path() ->
"mochiweb",
"snappy"
],
- lists:foreach(fun(Name) ->
- code:add_patha(filename:join([builddir(), "src", Name]))
- end, Paths).
+ lists:foreach(
+ fun(Name) ->
+ code:add_patha(filename:join([builddir(), "src", Name]))
+ end,
+ Paths
+ ).
source_file(Name) ->
filename:join([srcdir(), Name]).
@@ -94,21 +96,21 @@ start_applications(Apps) ->
start_applications([], Acc) ->
lists:reverse(Acc);
-start_applications([App|Apps], Acc) when App == kernel; App == stdlib ->
+start_applications([App | Apps], Acc) when App == kernel; App == stdlib ->
start_applications(Apps, Acc);
-start_applications([App|Apps], Acc) ->
+start_applications([App | Apps], Acc) ->
case application:start(App) of
- {error, {already_started, crypto}} ->
- start_applications(Apps, [crypto | Acc]);
- {error, {already_started, App}} ->
- io:format(standard_error, "Application ~s was left running!~n", [App]),
- application:stop(App),
- start_applications([App|Apps], Acc);
- {error, Reason} ->
- io:format(standard_error, "Cannot start application '~s', reason ~p~n", [App, Reason]),
- throw({error, {cannot_start, App, Reason}});
- ok ->
- start_applications(Apps, [App|Acc])
+ {error, {already_started, crypto}} ->
+ start_applications(Apps, [crypto | Acc]);
+ {error, {already_started, App}} ->
+ io:format(standard_error, "Application ~s was left running!~n", [App]),
+ application:stop(App),
+ start_applications([App | Apps], Acc);
+ {error, Reason} ->
+ io:format(standard_error, "Cannot start application '~s', reason ~p~n", [App, Reason]),
+ throw({error, {cannot_start, App, Reason}});
+ ok ->
+ start_applications(Apps, [App | Acc])
end.
stop_applications(Apps) ->
@@ -119,12 +121,11 @@ start_config(Chain) ->
case config:start_link(Chain) of
{ok, Pid} ->
{ok, Pid};
- {error, {already_started, OldPid}} ->
+ {error, {already_started, OldPid}} ->
ok = stop_config(OldPid),
start_config(Chain)
end.
-
stop_config(Pid) ->
Timeout = 1000,
case stop_sync(Pid, fun() -> config:stop() end, Timeout) of
@@ -150,8 +151,8 @@ stop_sync(Pid, Fun, Timeout) when is_function(Fun) and is_pid(Pid) ->
catch unlink(Pid),
Res = (catch Fun()),
receive
- {'DOWN', MRef, _, _, _} ->
- Res
+ {'DOWN', MRef, _, _, _} ->
+ Res
after Timeout ->
timeout
end
@@ -159,7 +160,8 @@ stop_sync(Pid, Fun, Timeout) when is_function(Fun) and is_pid(Pid) ->
after
erlang:demonitor(MRef, [flush])
end;
-stop_sync(_, _, _) -> error(badarg).
+stop_sync(_, _, _) ->
+ error(badarg).
stop_sync_throw(Name, Error) ->
stop_sync_throw(Name, shutdown, Error).
@@ -176,7 +178,8 @@ stop_sync_throw(Pid, Fun, Error, Timeout) ->
with_process_restart(Name) ->
{Pid, true} = with_process_restart(
- Name, fun() -> exit(whereis(Name), shutdown) end),
+ Name, fun() -> exit(whereis(Name), shutdown) end
+ ),
Pid.
with_process_restart(Name, Fun) ->
@@ -185,24 +188,26 @@ with_process_restart(Name, Fun) ->
with_process_restart(Name, Fun, Timeout) ->
Res = stop_sync(Name, Fun),
case wait_process(Name, Timeout) of
- timeout ->
- timeout;
- Pid ->
- {Pid, Res}
+ timeout ->
+ timeout;
+ Pid ->
+ {Pid, Res}
end.
-
wait_process(Name) ->
wait_process(Name, 5000).
wait_process(Name, Timeout) ->
- wait(fun() ->
- case whereis(Name) of
- undefined ->
- wait;
- Pid ->
- Pid
- end
- end, Timeout).
+ wait(
+ fun() ->
+ case whereis(Name) of
+ undefined ->
+ wait;
+ Pid ->
+ Pid
+ end
+ end,
+ Timeout
+ ).
wait(Fun) ->
wait(Fun, 5000, 50).
@@ -218,11 +223,11 @@ wait(_Fun, Timeout, _Delay, Started, Prev) when Prev - Started > Timeout ->
timeout;
wait(Fun, Timeout, Delay, Started, _Prev) ->
case Fun() of
- wait ->
- ok = timer:sleep(Delay),
- wait(Fun, Timeout, Delay, Started, now_us());
- Else ->
- Else
+ wait ->
+ ok = timer:sleep(Delay),
+ wait(Fun, Timeout, Delay, Started, now_us());
+ Else ->
+ Else
end.
wait_value(Fun, Value) ->
@@ -260,13 +265,17 @@ stop(#test_context{mocked = Mocked, started = Apps}) ->
fake_db(Fields0) ->
{ok, Db, Fields} = maybe_set_engine(Fields0),
Indexes = lists:zip(
- record_info(fields, db),
- lists:seq(2, record_info(size, db))
- ),
- lists:foldl(fun({FieldName, Value}, Acc) ->
- Idx = couch_util:get_value(FieldName, Indexes),
- setelement(Idx, Acc, Value)
- end, Db, Fields).
+ record_info(fields, db),
+ lists:seq(2, record_info(size, db))
+ ),
+ lists:foldl(
+ fun({FieldName, Value}, Acc) ->
+ Idx = couch_util:get_value(FieldName, Indexes),
+ setelement(Idx, Acc, Value)
+ end,
+ Db,
+ Fields
+ ).
maybe_set_engine(Fields0) ->
case lists:member(engine, Fields0) of
@@ -279,11 +288,24 @@ maybe_set_engine(Fields0) ->
end.
get_engine_header(Fields) ->
- Keys = [disk_version, update_seq, unused, id_tree_state,
- seq_tree_state, local_tree_state, purge_seq, purged_docs,
- security_ptr, revs_limit, uuid, epochs, compacted_seq],
+ Keys = [
+ disk_version,
+ update_seq,
+ unused,
+ id_tree_state,
+ seq_tree_state,
+ local_tree_state,
+ purge_seq,
+ purged_docs,
+ security_ptr,
+ revs_limit,
+ uuid,
+ epochs,
+ compacted_seq
+ ],
{HeadFields, RestFields} = lists:partition(
- fun({K, _}) -> lists:member(K, Keys) end, Fields),
+ fun({K, _}) -> lists:member(K, Keys) end, Fields
+ ),
Header0 = couch_bt_engine_header:new(),
Header = couch_bt_engine_header:set(Header0, HeadFields),
{ok, Header, RestFields}.
@@ -315,7 +337,7 @@ load_applications_with_stats() ->
ok.
stats_file_to_app(File) ->
- [_Desc, _Priv, App|_] = lists:reverse(filename:split(File)),
+ [_Desc, _Priv, App | _] = lists:reverse(filename:split(File)),
erlang:list_to_atom(App).
calculate_start_order(Apps) ->
@@ -345,14 +367,19 @@ load_app_deps(App, StartOrder) ->
{error, {already_loaded, App}} -> ok
end,
{ok, Apps} = application:get_key(App, applications),
- Deps = case App of
- kernel -> Apps;
- stdlib -> Apps;
- _ -> lists:usort([kernel, stdlib | Apps])
- end,
- NewStartOrder = lists:foldl(fun(Dep, Acc) ->
- load_app_deps(Dep, Acc)
- end, StartOrder, Deps),
+ Deps =
+ case App of
+ kernel -> Apps;
+ stdlib -> Apps;
+ _ -> lists:usort([kernel, stdlib | Apps])
+ end,
+ NewStartOrder = lists:foldl(
+ fun(Dep, Acc) ->
+ load_app_deps(Dep, Acc)
+ end,
+ StartOrder,
+ Deps
+ ),
[App | NewStartOrder]
end.
diff --git a/src/couch/test/eunit/chttpd_endpoints_tests.erl b/src/couch/test/eunit/chttpd_endpoints_tests.erl
index 3c8586a14..63f67c243 100644
--- a/src/couch/test/eunit/chttpd_endpoints_tests.erl
+++ b/src/couch/test/eunit/chttpd_endpoints_tests.erl
@@ -15,7 +15,6 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
endpoints_test_() ->
{
"Checking dynamic endpoints",
@@ -33,7 +32,6 @@ endpoints_test_() ->
}
}.
-
url_handlers() ->
Handlers = [
{<<"">>, chttpd_misc, handle_welcome_req},
@@ -53,15 +51,17 @@ url_handlers() ->
{<<"_cluster_setup">>, setup_httpd, handle_setup_req}
],
- lists:foreach(fun({Path, Mod, Fun}) ->
- Handler = chttpd_handlers:url_handler(Path, undefined),
- Expect = fun Mod:Fun/1,
- ?assertEqual(Expect, Handler)
- end, Handlers),
+ lists:foreach(
+ fun({Path, Mod, Fun}) ->
+ Handler = chttpd_handlers:url_handler(Path, undefined),
+ Expect = fun Mod:Fun/1,
+ ?assertEqual(Expect, Handler)
+ end,
+ Handlers
+ ),
?assertEqual(undefined, chttpd_handlers:url_handler("foo", undefined)).
-
db_handlers() ->
Handlers = [
{<<"_view_cleanup">>, chttpd_db, handle_view_cleanup_req},
@@ -75,15 +75,17 @@ db_handlers() ->
{<<"_find">>, mango_httpd, handle_req}
],
- lists:foreach(fun({Path, Mod, Fun}) ->
- Handler = chttpd_handlers:db_handler(Path, undefined),
- Expect = fun Mod:Fun/2,
- ?assertEqual(Expect, Handler)
- end, Handlers),
+ lists:foreach(
+ fun({Path, Mod, Fun}) ->
+ Handler = chttpd_handlers:db_handler(Path, undefined),
+ Expect = fun Mod:Fun/2,
+ ?assertEqual(Expect, Handler)
+ end,
+ Handlers
+ ),
?assertEqual(undefined, chttpd_handlers:db_handler("bam", undefined)).
-
design_handlers() ->
Handlers = [
{<<"_view">>, chttpd_view, handle_view_req},
@@ -94,10 +96,13 @@ design_handlers() ->
{<<"_rewrite">>, chttpd_rewrite, handle_rewrite_req}
],
- lists:foreach(fun({Path, Mod, Fun}) ->
- Handler = chttpd_handlers:design_handler(Path, undefined),
- Expect = fun Mod:Fun/3,
- ?assertEqual(Expect, Handler)
- end, Handlers),
+ lists:foreach(
+ fun({Path, Mod, Fun}) ->
+ Handler = chttpd_handlers:design_handler(Path, undefined),
+ Expect = fun Mod:Fun/3,
+ ?assertEqual(Expect, Handler)
+ end,
+ Handlers
+ ),
?assertEqual(undefined, chttpd_handlers:design_handler("baz", undefined)).
diff --git a/src/couch/test/eunit/couch_auth_cache_tests.erl b/src/couch/test/eunit/couch_auth_cache_tests.erl
index 71faf77d6..a4c31083a 100644
--- a/src/couch/test/eunit/couch_auth_cache_tests.erl
+++ b/src/couch/test/eunit/couch_auth_cache_tests.erl
@@ -21,27 +21,31 @@
start() ->
test_util:start_couch([ioq]).
-
setup() ->
DbName = ?tempdb(),
- config:set("couch_httpd_auth", "authentication_db",
- ?b2l(DbName), false),
+ config:set(
+ "couch_httpd_auth",
+ "authentication_db",
+ ?b2l(DbName),
+ false
+ ),
DbName.
teardown(DbName) ->
ok = couch_server:delete(DbName, [?ADMIN_CTX]),
ok.
-
couch_auth_cache_test_() ->
{
"CouchDB auth cache tests",
{
setup,
- fun start/0, fun test_util:stop_couch/1,
+ fun start/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_get_nil_on_missed_cache/1,
fun should_get_right_password_hash/1,
@@ -120,18 +124,18 @@ auth_vdu_test_() ->
[missing, user, other]
]),
AllPossibleCases = couch_tests_combinatorics:product(
- [AllPossibleDocs, AllPossibleDocs]),
+ [AllPossibleDocs, AllPossibleDocs]
+ ),
?assertEqual([], AllPossibleCases -- [[A, B] || {A, B, _} <- Cases]),
{
"Check User doc validation",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- [
- make_validate_test(Case) || Case <- Cases
- ]
- }
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
+ [make_validate_test(Case) || Case <- Cases]
+ }
}.
should_get_nil_on_missed_cache(_) ->
@@ -142,8 +146,10 @@ should_get_right_password_hash(DbName) ->
PasswordHash = hash_password("pass1"),
{ok, _} = update_user_doc(DbName, "joe", "pass1"),
{ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
- ?assertEqual(PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds))
+ ?assertEqual(
+ PasswordHash,
+ couch_util:get_value(<<"password_sha">>, Creds)
+ )
end).
should_ensure_doc_hash_equals_cached_one(DbName) ->
@@ -162,8 +168,10 @@ should_update_password(DbName) ->
{ok, Rev} = update_user_doc(DbName, "joe", "pass1"),
{ok, _} = update_user_doc(DbName, "joe", "pass2", Rev),
{ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
- ?assertEqual(PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds))
+ ?assertEqual(
+ PasswordHash,
+ couch_util:get_value(<<"password_sha">>, Creds)
+ )
end).
should_cleanup_cache_after_userdoc_deletion(DbName) ->
@@ -183,15 +191,21 @@ should_restore_cache_after_userdoc_recreation(DbName) ->
{ok, _} = update_user_doc(DbName, "joe", "pass5"),
{ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
- ?assertEqual(PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds))
+ ?assertEqual(
+ PasswordHash,
+ couch_util:get_value(<<"password_sha">>, Creds)
+ )
end).
should_drop_cache_on_auth_db_change(DbName) ->
?_test(begin
{ok, _} = update_user_doc(DbName, "joe", "pass1"),
- config:set("couch_httpd_auth", "authentication_db",
- ?b2l(?tempdb()), false),
+ config:set(
+ "couch_httpd_auth",
+ "authentication_db",
+ ?b2l(?tempdb()),
+ false
+ ),
?assertEqual(nil, couch_auth_cache:get_user_creds("joe"))
end).
@@ -202,17 +216,27 @@ should_restore_cache_on_auth_db_change(DbName) ->
{ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
DbName1 = ?tempdb(),
- config:set("couch_httpd_auth", "authentication_db",
- ?b2l(DbName1), false),
+ config:set(
+ "couch_httpd_auth",
+ "authentication_db",
+ ?b2l(DbName1),
+ false
+ ),
{ok, _} = update_user_doc(DbName1, "joe", "pass5"),
- config:set("couch_httpd_auth", "authentication_db",
- ?b2l(DbName), false),
+ config:set(
+ "couch_httpd_auth",
+ "authentication_db",
+ ?b2l(DbName),
+ false
+ ),
{ok, Creds, _} = couch_auth_cache:get_user_creds("joe"),
- ?assertEqual(PasswordHash,
- couch_util:get_value(<<"password_sha">>, Creds))
+ ?assertEqual(
+ PasswordHash,
+ couch_util:get_value(<<"password_sha">>, Creds)
+ )
end).
should_recover_cache_after_shutdown(DbName) ->
@@ -225,7 +249,6 @@ should_recover_cache_after_shutdown(DbName) ->
?assertEqual(PasswordHash, get_user_doc_password_sha(DbName, "joe"))
end).
-
should_get_admin_from_config(_DbName) ->
?_test(begin
config:set("admins", "testadmin", "password", false),
@@ -245,17 +268,19 @@ update_user_doc(DbName, UserName, Password) ->
update_user_doc(DbName, UserName, Password, Rev) ->
ok = couch_auth_cache:ensure_users_db_exists(),
User = iolist_to_binary(UserName),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"org.couchdb.user:", User/binary>>},
- {<<"name">>, User},
- {<<"type">>, <<"user">>},
- {<<"salt">>, ?SALT},
- {<<"password_sha">>, hash_password(Password)},
- {<<"roles">>, []}
- ] ++ case Rev of
- nil -> [];
- _ -> [{<<"_rev">>, Rev}]
- end
+ Doc = couch_doc:from_json_obj({
+ [
+ {<<"_id">>, <<"org.couchdb.user:", User/binary>>},
+ {<<"name">>, User},
+ {<<"type">>, <<"user">>},
+ {<<"salt">>, ?SALT},
+ {<<"password_sha">>, hash_password(Password)},
+ {<<"roles">>, []}
+ ] ++
+ case Rev of
+ nil -> [];
+ _ -> [{<<"_rev">>, Rev}]
+ end
}),
{ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
{ok, NewRev} = couch_db:update_doc(AuthDb, Doc, []),
@@ -275,13 +300,13 @@ get_doc_rev(DbName, UserName) ->
DocId = iolist_to_binary([<<"org.couchdb.user:">>, UserName]),
{ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
UpdateRev =
- case couch_db:open_doc(AuthDb, DocId, []) of
- {ok, Doc} ->
- {Props} = couch_doc:to_json_obj(Doc, []),
- couch_util:get_value(<<"_rev">>, Props);
- {not_found, missing} ->
- nil
- end,
+ case couch_db:open_doc(AuthDb, DocId, []) of
+ {ok, Doc} ->
+ {Props} = couch_doc:to_json_obj(Doc, []),
+ couch_util:get_value(<<"_rev">>, Props);
+ {not_found, missing} ->
+ nil
+ end,
ok = couch_db:close(AuthDb),
{ok, UpdateRev}.
@@ -298,15 +323,16 @@ delete_user_doc(DbName, UserName) ->
{ok, AuthDb} = couch_db:open_int(DbName, [?ADMIN_CTX]),
{ok, Doc} = couch_db:open_doc(AuthDb, DocId, []),
{Props} = couch_doc:to_json_obj(Doc, []),
- DeletedDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DocId},
- {<<"_rev">>, couch_util:get_value(<<"_rev">>, Props)},
- {<<"_deleted">>, true}
- ]}),
+ DeletedDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DocId},
+ {<<"_rev">>, couch_util:get_value(<<"_rev">>, Props)},
+ {<<"_deleted">>, true}
+ ]}
+ ),
{ok, _} = couch_db:update_doc(AuthDb, DeletedDoc, []),
ok = couch_db:close(AuthDb).
-
make_validate_test({Old, New, "ok"} = Case) ->
{test_id(Case), ?_assertEqual(ok, validate(doc(Old), doc(New)))};
make_validate_test({Old, New, Reason} = Case) ->
@@ -314,19 +340,25 @@ make_validate_test({Old, New, Reason} = Case) ->
{test_id(Case), ?_assertThrow({forbidden, Failure}, validate(doc(Old), doc(New)))}.
test_id({[OldRoles, OldType], [NewRoles, NewType], Result}) ->
- lists:flatten(io_lib:format(
- "(roles: ~w, type: ~w) -> (roles: ~w, type: ~w) ==> \"~s\"",
- [OldRoles, OldType, NewRoles, NewType, Result])).
+ lists:flatten(
+ io_lib:format(
+ "(roles: ~w, type: ~w) -> (roles: ~w, type: ~w) ==> \"~s\"",
+ [OldRoles, OldType, NewRoles, NewType, Result]
+ )
+ ).
doc([Roles, Type]) ->
- couch_doc:from_json_obj({[
- {<<"_id">>,<<"org.couchdb.user:foo">>},
- {<<"_rev">>,<<"1-281c81adb1bf10927a6160f246dc0468">>},
- {<<"name">>,<<"foo">>},
- {<<"password_scheme">>,<<"simple">>},
- {<<"salt">>,<<"00000000000000000000000000000000">>},
- {<<"password_sha">>, <<"111111111111111111111111111111111111">>}]
- ++ type(Type) ++ roles(Roles)}).
+ couch_doc:from_json_obj({
+ [
+ {<<"_id">>, <<"org.couchdb.user:foo">>},
+ {<<"_rev">>, <<"1-281c81adb1bf10927a6160f246dc0468">>},
+ {<<"name">>, <<"foo">>},
+ {<<"password_scheme">>, <<"simple">>},
+ {<<"salt">>, <<"00000000000000000000000000000000">>},
+ {<<"password_sha">>, <<"111111111111111111111111111111111111">>}
+ ] ++
+ type(Type) ++ roles(Roles)
+ }).
roles(custom) -> [{<<"roles">>, [<<"custom">>]}];
roles(missing) -> [].
@@ -336,11 +368,12 @@ type(other) -> [{<<"type">>, <<"other">>}];
type(missing) -> [].
validate(DiskDoc, NewDoc) ->
- JSONCtx = {[
- {<<"db">>, <<"foo/bar">>},
- {<<"name">>, <<"foo">>},
- {<<"roles">>, [<<"_admin">>]}
- ]},
+ JSONCtx =
+ {[
+ {<<"db">>, <<"foo/bar">>},
+ {<<"name">>, <<"foo">>},
+ {<<"roles">>, [<<"_admin">>]}
+ ]},
validate(DiskDoc, NewDoc, JSONCtx).
validate(DiskDoc, NewDoc, JSONCtx) ->
diff --git a/src/couch/test/eunit/couch_bt_engine_compactor_ev.erl b/src/couch/test/eunit/couch_bt_engine_compactor_ev.erl
index f50be84de..72b780a7f 100644
--- a/src/couch/test/eunit/couch_bt_engine_compactor_ev.erl
+++ b/src/couch/test/eunit/couch_bt_engine_compactor_ev.erl
@@ -12,7 +12,6 @@
-module(couch_bt_engine_compactor_ev).
-
-export([
init/0,
terminate/0,
@@ -24,22 +23,17 @@
event/1
]).
-
-define(TAB, couch_db_updater_ev_tab).
-
init() ->
ets:new(?TAB, [set, public, named_table]).
-
terminate() ->
ets:delete(?TAB).
-
clear() ->
ets:delete_all_objects(?TAB).
-
set_wait(Event) ->
Self = self(),
WaitFun = fun(_) ->
@@ -51,48 +45,48 @@ set_wait(Event) ->
end,
ContinueFun = fun(Pid) ->
Pid ! {Self, go},
- receive {Pid, ok} -> ok end
+ receive
+ {Pid, ok} -> ok
+ end
end,
ets:insert(?TAB, {Event, WaitFun}),
{ok, ContinueFun}.
-
set_crash(Event) ->
Reason = {couch_db_updater_ev_crash, Event},
CrashFun = fun(_) -> exit(Reason) end,
ets:insert(?TAB, {Event, CrashFun}),
{ok, Reason}.
-
event(Event) ->
- NewEvent = case Event of
- seq_init ->
- put(?MODULE, 0),
- Event;
- seq_copy ->
- Count = get(?MODULE),
- put(?MODULE, Count + 1),
- {seq_copy, Count};
- id_init ->
- put(?MODULE, 0),
- Event;
- id_copy ->
- Count = get(?MODULE),
- put(?MODULE, Count + 1),
- {id_copy, Count};
- md_copy_init ->
- put(?MODULE, 0),
- Event;
- md_copy_row ->
- Count = get(?MODULE),
- put(?MODULE, Count + 1),
- {md_copy_row, Count};
- _ ->
- Event
- end,
+ NewEvent =
+ case Event of
+ seq_init ->
+ put(?MODULE, 0),
+ Event;
+ seq_copy ->
+ Count = get(?MODULE),
+ put(?MODULE, Count + 1),
+ {seq_copy, Count};
+ id_init ->
+ put(?MODULE, 0),
+ Event;
+ id_copy ->
+ Count = get(?MODULE),
+ put(?MODULE, Count + 1),
+ {id_copy, Count};
+ md_copy_init ->
+ put(?MODULE, 0),
+ Event;
+ md_copy_row ->
+ Count = get(?MODULE),
+ put(?MODULE, Count + 1),
+ {md_copy_row, Count};
+ _ ->
+ Event
+ end,
handle_event(NewEvent).
-
handle_event(Event) ->
try
case ets:lookup(?TAB, Event) of
@@ -101,6 +95,7 @@ handle_event(Event) ->
[] ->
ok
end
- catch error:badarg ->
- ok
- end. \ No newline at end of file
+ catch
+ error:badarg ->
+ ok
+ end.
diff --git a/src/couch/test/eunit/couch_bt_engine_compactor_ev_tests.erl b/src/couch/test/eunit/couch_bt_engine_compactor_ev_tests.erl
index 090217b4c..007c74d06 100644
--- a/src/couch/test/eunit/couch_bt_engine_compactor_ev_tests.erl
+++ b/src/couch/test/eunit/couch_bt_engine_compactor_ev_tests.erl
@@ -39,21 +39,28 @@
events() ->
[
- init, % The compactor process is spawned
- files_opened, % After compaction files have opened
+ % The compactor process is spawned
+ init,
+ % After compaction files have opened
+ files_opened,
- purge_init, % Just before apply purge changes
- purge_done, % Just after finish purge updates
+ % Just before apply purge changes
+ purge_init,
+ % Just after finish purge updates
+ purge_done,
% The firs phase is when we copy all document body and attachment
% data to the new database file in order of update sequence so
% that we can resume on crash.
- seq_init, % Before the first change is copied
- {seq_copy, 0}, % After change N is copied
+ % Before the first change is copied
+ seq_init,
+ % After change N is copied
+ {seq_copy, 0},
{seq_copy, ?INIT_DOCS div 2},
{seq_copy, ?INIT_DOCS - 2},
- seq_done, % After last change is copied
+ % After last change is copied
+ seq_done,
% The id copy phases come in two flavors. Before a compaction
% swap is attempted they're copied from the id_tree in the
@@ -61,19 +68,27 @@ events() ->
% stored in an emsort file on disk. Thus the two sets of
% related events here.
- md_sort_init, % Just before metadata sort starts
- md_sort_done, % Justa after metadata sort finished
- md_copy_init, % Just before metadata copy starts
- {md_copy_row, 0}, % After docid N is copied
+ % Just before metadata sort starts
+ md_sort_init,
+ % Justa after metadata sort finished
+ md_sort_done,
+ % Just before metadata copy starts
+ md_copy_init,
+ % After docid N is copied
+ {md_copy_row, 0},
{md_copy_row, ?INIT_DOCS div 2},
{md_copy_row, ?INIT_DOCS - 2},
- md_copy_done, % Just after the last docid is copied
+ % Just after the last docid is copied
+ md_copy_done,
% And then the final steps before we finish
- before_final_sync, % Just before final sync
- after_final_sync, % Just after the final sync
- before_notify % Just before the final notification
+ % Just before final sync
+ before_final_sync,
+ % Just after the final sync
+ after_final_sync,
+ % Just before the final notification
+ before_notify
].
% Mark which evens only happen when documents are present
@@ -86,7 +101,6 @@ requires_docs({md_copy_row, _}) -> true;
requires_docs(md_copy_done) -> true;
requires_docs(_) -> false.
-
% Mark which events only happen when there's write activity during
% a compaction.
@@ -97,25 +111,21 @@ requires_write({md_copy_row, _}) -> true;
requires_write(md_copy_done) -> true;
requires_write(_) -> false.
-
setup() ->
purge_module(),
?EV_MOD:init(),
test_util:start_couch().
-
teardown(Ctx) ->
test_util:stop_couch(Ctx),
?EV_MOD:terminate().
-
start_empty_db_test(_Event) ->
?EV_MOD:clear(),
DbName = ?tempdb(),
{ok, _} = couch_db:create(DbName, [?ADMIN_CTX]),
DbName.
-
start_populated_db_test(Event) ->
DbName = start_empty_db_test(Event),
{ok, Db} = couch_db:open_int(DbName, []),
@@ -126,11 +136,9 @@ start_populated_db_test(Event) ->
end,
DbName.
-
stop_test(_Event, DbName) ->
couch_server:delete(DbName, [?ADMIN_CTX]).
-
static_empty_db_test_() ->
FiltFun = fun(E) ->
not (requires_docs(E) or requires_write(E))
@@ -153,7 +161,6 @@ static_empty_db_test_() ->
}
}.
-
static_populated_db_test_() ->
FiltFun = fun(E) -> not requires_write(E) end,
Events = lists:filter(FiltFun, events()) -- [init],
@@ -174,7 +181,6 @@ static_populated_db_test_() ->
}
}.
-
dynamic_empty_db_test_() ->
FiltFun = fun(E) -> not requires_docs(E) end,
Events = lists:filter(FiltFun, events()) -- [init],
@@ -195,7 +201,6 @@ dynamic_empty_db_test_() ->
}
}.
-
dynamic_populated_db_test_() ->
Events = events() -- [init],
{
@@ -215,13 +220,11 @@ dynamic_populated_db_test_() ->
}
}.
-
run_static_init(Event, DbName) ->
Name = lists:flatten(io_lib:format("~p", [Event])),
Test = {timeout, ?TIMEOUT_EUNIT, ?_test(run_static(Event, DbName))},
{Name, Test}.
-
run_static(Event, DbName) ->
{ok, ContinueFun} = ?EV_MOD:set_wait(init),
{ok, Reason} = ?EV_MOD:set_crash(Event),
@@ -236,13 +239,11 @@ run_static(Event, DbName) ->
run_successful_compaction(DbName),
couch_db:close(Db).
-
run_dynamic_init(Event, DbName) ->
Name = lists:flatten(io_lib:format("~p", [Event])),
Test = {timeout, ?TIMEOUT_EUNIT, ?_test(run_dynamic(Event, DbName))},
{Name, Test}.
-
run_dynamic(Event, DbName) ->
{ok, ContinueFun} = ?EV_MOD:set_wait(init),
{ok, Reason} = ?EV_MOD:set_crash(Event),
@@ -258,7 +259,6 @@ run_dynamic(Event, DbName) ->
run_successful_compaction(DbName),
couch_db:close(Db).
-
run_successful_compaction(DbName) ->
?EV_MOD:clear(),
{ok, ContinueFun} = ?EV_MOD:set_wait(init),
@@ -274,14 +274,11 @@ run_successful_compaction(DbName) ->
validate_compaction(NewDb),
couch_db:close(Db).
-
wait_db_cleared(Db) ->
wait_db_cleared(Db, 5).
-
wait_db_cleared(Db, N) when N < 0 ->
erlang:error({db_clear_timeout, couch_db:name(Db)});
-
wait_db_cleared(Db, N) ->
Tab = couch_server:couch_dbs(couch_db:name(Db)),
case ets:lookup(Tab, couch_db:name(Db)) of
@@ -290,29 +287,33 @@ wait_db_cleared(Db, N) ->
[#entry{db = NewDb}] ->
OldPid = couch_db:get_pid(Db),
NewPid = couch_db:get_pid(NewDb),
- if NewPid /= OldPid -> ok; true ->
- timer:sleep(100),
- wait_db_cleared(Db, N - 1)
+ if
+ NewPid /= OldPid ->
+ ok;
+ true ->
+ timer:sleep(100),
+ wait_db_cleared(Db, N - 1)
end
end.
-
populate_db(_Db, NumDocs) when NumDocs =< 0 ->
ok;
populate_db(Db, NumDocs) ->
String = [$a || _ <- lists:seq(1, erlang:min(NumDocs, 500))],
Docs = lists:map(
fun(_) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, couch_uuids:random()},
- {<<"string">>, list_to_binary(String)}
- ]})
+ couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, couch_uuids:random()},
+ {<<"string">>, list_to_binary(String)}
+ ]}
+ )
end,
- lists:seq(1, 500)),
+ lists:seq(1, 500)
+ ),
{ok, _} = couch_db:update_docs(Db, Docs, []),
populate_db(Db, NumDocs - 500).
-
validate_compaction(Db) ->
{ok, DocCount} = couch_db:get_doc_count(Db),
{ok, DelDocCount} = couch_db:get_del_doc_count(Db),
@@ -325,7 +326,6 @@ validate_compaction(Db) ->
?assertEqual(DocCount + DelDocCount, LastCount),
?assertEqual(NumChanges, LastCount).
-
purge_module() ->
case code:which(couch_db_updater) of
cover_compiled ->
diff --git a/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl b/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl
index 4c4c43958..73428b0a9 100644
--- a/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl
+++ b/src/couch/test/eunit/couch_bt_engine_compactor_tests.erl
@@ -12,15 +12,12 @@
-module(couch_bt_engine_compactor_tests).
-
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-define(DELAY, 100).
-define(WAIT_DELAY_COUNT, 50).
-
setup() ->
DbName = ?tempdb(),
{ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
@@ -28,12 +25,10 @@ setup() ->
create_docs(DbName),
DbName.
-
teardown(DbName) when is_binary(DbName) ->
couch_server:delete(DbName, [?ADMIN_CTX]),
ok.
-
compaction_resume_test_() ->
{
setup,
@@ -49,7 +44,6 @@ compaction_resume_test_() ->
}
}.
-
compaction_resume(DbName) ->
?_test(begin
check_db_validity(DbName),
@@ -66,14 +60,12 @@ compaction_resume(DbName) ->
check_db_validity(DbName)
end).
-
check_db_validity(DbName) ->
couch_util:with_db(DbName, fun(Db) ->
?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
?assertEqual(3, couch_db:count_changes_since(Db, 0))
end).
-
with_mecked_emsort(Fun) ->
meck:new(couch_emsort, [passthrough]),
meck:expect(couch_emsort, iter, fun(_) -> erlang:error(kaboom) end),
@@ -83,35 +75,35 @@ with_mecked_emsort(Fun) ->
meck:unload()
end.
-
create_docs(DbName) ->
couch_util:with_db(DbName, fun(Db) ->
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 1}
-
- ]}),
- Doc2 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc2">>},
- {<<"value">>, 2}
-
- ]}),
- Doc3 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc3">>},
- {<<"value">>, 3}
-
- ]}),
+ Doc1 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc1">>},
+ {<<"value">>, 1}
+ ]}
+ ),
+ Doc2 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc2">>},
+ {<<"value">>, 2}
+ ]}
+ ),
+ Doc3 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc3">>},
+ {<<"value">>, 3}
+ ]}
+ ),
{ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3])
end).
-
compact_db(DbName) ->
couch_util:with_db(DbName, fun(Db) ->
{ok, _} = couch_db:start_compact(Db)
end),
wait_db_compact_done(DbName, ?WAIT_DELAY_COUNT).
-
wait_db_compact_done(_DbName, 0) ->
Failure = [
{module, ?MODULE},
@@ -123,7 +115,10 @@ wait_db_compact_done(DbName, N) ->
IsDone = couch_util:with_db(DbName, fun(Db) ->
not is_pid(couch_db:get_compactor_pid(Db))
end),
- if IsDone -> ok; true ->
- timer:sleep(?DELAY),
- wait_db_compact_done(DbName, N - 1)
+ if
+ IsDone ->
+ ok;
+ true ->
+ timer:sleep(?DELAY),
+ wait_db_compact_done(DbName, N - 1)
end.
diff --git a/src/couch/test/eunit/couch_bt_engine_tests.erl b/src/couch/test/eunit/couch_bt_engine_tests.erl
index 3e3ecbf25..56d18d3a4 100644
--- a/src/couch/test/eunit/couch_bt_engine_tests.erl
+++ b/src/couch/test/eunit/couch_bt_engine_tests.erl
@@ -12,9 +12,7 @@
-module(couch_bt_engine_tests).
-
-include_lib("eunit/include/eunit.hrl").
-
-couch_bt_engine_test_()->
+couch_bt_engine_test_() ->
cpse_util:create_tests(couch, couch_bt_engine, "couch").
diff --git a/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl b/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl
index a2a972caf..62f128a4f 100644
--- a/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl
+++ b/src/couch/test/eunit/couch_bt_engine_upgrade_tests.erl
@@ -15,7 +15,8 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
--define(TIMEOUT, 60). % seconds
+% seconds
+-define(TIMEOUT, 60).
setup(_) ->
Ctx = test_util:start_couch(),
@@ -30,23 +31,27 @@ setup(_) ->
"db_v7_with_2_purge_req.couch",
"db_v7_with_1_purge_req_for_2_docs.couch"
],
- NewPaths = lists:map(fun(DbFileName) ->
- OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]),
- NewDbFilePath = filename:join([DbDir, DbFileName]),
- ok = filelib:ensure_dir(NewDbFilePath),
- file:delete(NewDbFilePath),
- {ok, _} = file:copy(OldDbFilePath, NewDbFilePath),
- NewDbFilePath
- end, DbFileNames),
+ NewPaths = lists:map(
+ fun(DbFileName) ->
+ OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName]),
+ NewDbFilePath = filename:join([DbDir, DbFileName]),
+ ok = filelib:ensure_dir(NewDbFilePath),
+ file:delete(NewDbFilePath),
+ {ok, _} = file:copy(OldDbFilePath, NewDbFilePath),
+ NewDbFilePath
+ end,
+ DbFileNames
+ ),
{Ctx, NewPaths}.
-
teardown(_, {Ctx, Paths}) ->
test_util:stop_couch(Ctx),
- lists:foreach(fun(Path) ->
- file:delete(Path)
- end, Paths).
-
+ lists:foreach(
+ fun(Path) ->
+ file:delete(Path)
+ end,
+ Paths
+ ).
upgrade_test_() ->
From = [6, 7],
@@ -54,174 +59,182 @@ upgrade_test_() ->
"Couch Bt Engine Upgrade tests",
{
foreachx,
- fun setup/1, fun teardown/2,
+ fun setup/1,
+ fun teardown/2,
[{F, fun t_upgrade_without_purge_req/2} || F <- From] ++
- [{F, fun t_upgrade_with_1_purge_req/2} || F <- From] ++
- [{F, fun t_upgrade_with_N_purge_req/2} || F <- From] ++
- [{F, fun t_upgrade_with_1_purge_req_for_2_docs/2} || F <- From]
+ [{F, fun t_upgrade_with_1_purge_req/2} || F <- From] ++
+ [{F, fun t_upgrade_with_N_purge_req/2} || F <- From] ++
+ [{F, fun t_upgrade_with_1_purge_req_for_2_docs/2} || F <- From]
}
}.
-
t_upgrade_without_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % There are three documents in the fixture
- % db with zero purge entries
- DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom)
- ++ "_without_purge_req"),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(0, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, [])
- end),
- ?assertEqual([], UpgradedPurged),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- {ok, Rev} = save_doc(
- DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}
- ),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 5}, couch_db:get_doc_count(Db)),
- ?assertEqual(0, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc4">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
- ?assertEqual(1, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ % There are three documents in the fixture
+ % db with zero purge entries
+ DbName = ?l2b(
+ "db_v" ++ integer_to_list(VersionFrom) ++
+ "_without_purge_req"
+ ),
+
+ ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
+ {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
+ ?assertEqual(0, couch_db:get_purge_seq(Db)),
+ couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, [])
+ end),
+ ?assertEqual([], UpgradedPurged),
+ ?assertEqual(8, get_disk_version_from_header(DbName)),
+ {ok, Rev} = save_doc(
+ DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}
+ ),
+ {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}),
+
+ couch_util:with_db(DbName, fun(Db) ->
+ ?assertEqual({ok, 5}, couch_db:get_doc_count(Db)),
+ ?assertEqual(0, couch_db:get_purge_seq(Db))
+ end),
+
+ PurgeReqs = [
+ {couch_uuids:random(), <<"doc4">>, [Rev]}
+ ],
+
+ {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
+ couch_db:purge_docs(Db, PurgeReqs)
+ end),
+ ?assertEqual(PRevs, [Rev]),
+
+ couch_util:with_db(DbName, fun(Db) ->
+ ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
+ ?assertEqual(1, couch_db:get_purge_seq(Db))
+ end)
+ end)}.
t_upgrade_with_1_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % There are two documents in the fixture database
- % with a single purge entry
- DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom)
- ++ "_with_1_purge_req"),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(1, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, [])
- end),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- ?assertEqual([{1, <<"doc1">>}], UpgradedPurged),
-
- {ok, Rev} = save_doc(
- DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}
- ),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
- ?assertEqual(1, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc4">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
- ?assertEqual(2, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ % There are two documents in the fixture database
+ % with a single purge entry
+ DbName = ?l2b(
+ "db_v" ++ integer_to_list(VersionFrom) ++
+ "_with_1_purge_req"
+ ),
+
+ ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
+ {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
+ ?assertEqual(1, couch_db:get_purge_seq(Db)),
+ couch_db:fold_purge_infos(Db, 0, fun fold_fun/2, [])
+ end),
+ ?assertEqual(8, get_disk_version_from_header(DbName)),
+ ?assertEqual([{1, <<"doc1">>}], UpgradedPurged),
+
+ {ok, Rev} = save_doc(
+ DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}
+ ),
+ {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}),
+
+ couch_util:with_db(DbName, fun(Db) ->
+ ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
+ ?assertEqual(1, couch_db:get_purge_seq(Db))
+ end),
+
+ PurgeReqs = [
+ {couch_uuids:random(), <<"doc4">>, [Rev]}
+ ],
+
+ {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
+ couch_db:purge_docs(Db, PurgeReqs)
+ end),
+ ?assertEqual(PRevs, [Rev]),
+
+ couch_util:with_db(DbName, fun(Db) ->
+ ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
+ ?assertEqual(2, couch_db:get_purge_seq(Db))
+ end)
+ end)}.
t_upgrade_with_N_purge_req(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % There is one document in the fixture database
- % with two docs that have been purged
- DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom)
- ++ "_with_2_purge_req"),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(2, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, [])
- end),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- ?assertEqual([{2, <<"doc2">>}], UpgradedPurged),
-
- {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
- ?assertEqual(2, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc4">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 2}, couch_db:get_doc_count(Db)),
- ?assertEqual(3, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ % There is one document in the fixture database
+ % with two docs that have been purged
+ DbName = ?l2b(
+ "db_v" ++ integer_to_list(VersionFrom) ++
+ "_with_2_purge_req"
+ ),
+
+ ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
+ {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
+ ?assertEqual(2, couch_db:get_purge_seq(Db)),
+ couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, [])
+ end),
+ ?assertEqual(8, get_disk_version_from_header(DbName)),
+ ?assertEqual([{2, <<"doc2">>}], UpgradedPurged),
+
+ {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc4">>}, {<<"v">>, 1}]}),
+ {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc5">>}, {<<"v">>, 2}]}),
+
+ couch_util:with_db(DbName, fun(Db) ->
+ ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
+ ?assertEqual(2, couch_db:get_purge_seq(Db))
+ end),
+
+ PurgeReqs = [
+ {couch_uuids:random(), <<"doc4">>, [Rev]}
+ ],
+
+ {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
+ couch_db:purge_docs(Db, PurgeReqs)
+ end),
+ ?assertEqual(PRevs, [Rev]),
+
+ couch_util:with_db(DbName, fun(Db) ->
+ ?assertEqual({ok, 2}, couch_db:get_doc_count(Db)),
+ ?assertEqual(3, couch_db:get_purge_seq(Db))
+ end)
+ end)}.
t_upgrade_with_1_purge_req_for_2_docs(VersionFrom, {_Ctx, _NewPaths}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % There are two documents (Doc4 and Doc5) in the fixture database
- % with three docs (Doc1, Doc2 and Doc3) that have been purged, and
- % with one purge req for Doc1 and another purge req for Doc 2 and Doc3
- DbName = ?l2b("db_v" ++ integer_to_list(VersionFrom)
- ++ "_with_1_purge_req_for_2_docs"),
-
- ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
- {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual(3, couch_db:get_purge_seq(Db)),
- couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, [])
- end),
- ?assertEqual(8, get_disk_version_from_header(DbName)),
- ?assertEqual([{3,<<"doc2">>},{2,<<"doc3">>}], UpgradedPurged),
-
- {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc6">>}, {<<"v">>, 1}]}),
- {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc7">>}, {<<"v">>, 2}]}),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
- ?assertEqual(3, couch_db:get_purge_seq(Db))
- end),
-
- PurgeReqs = [
- {couch_uuids:random(), <<"doc6">>, [Rev]}
- ],
-
- {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
- couch_db:purge_docs(Db, PurgeReqs)
- end),
- ?assertEqual(PRevs, [Rev]),
-
- couch_util:with_db(DbName, fun(Db) ->
- ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
- ?assertEqual(4, couch_db:get_purge_seq(Db))
- end)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ % There are two documents (Doc4 and Doc5) in the fixture database
+ % with three docs (Doc1, Doc2 and Doc3) that have been purged, and
+ % with one purge req for Doc1 and another purge req for Doc 2 and Doc3
+ DbName = ?l2b(
+ "db_v" ++ integer_to_list(VersionFrom) ++
+ "_with_1_purge_req_for_2_docs"
+ ),
+
+ ?assertEqual(VersionFrom, get_disk_version_from_header(DbName)),
+ {ok, UpgradedPurged} = couch_util:with_db(DbName, fun(Db) ->
+ ?assertEqual(3, couch_db:get_purge_seq(Db)),
+ couch_db:fold_purge_infos(Db, 1, fun fold_fun/2, [])
+ end),
+ ?assertEqual(8, get_disk_version_from_header(DbName)),
+ ?assertEqual([{3, <<"doc2">>}, {2, <<"doc3">>}], UpgradedPurged),
+
+ {ok, Rev} = save_doc(DbName, {[{<<"_id">>, <<"doc6">>}, {<<"v">>, 1}]}),
+ {ok, _} = save_doc(DbName, {[{<<"_id">>, <<"doc7">>}, {<<"v">>, 2}]}),
+
+ couch_util:with_db(DbName, fun(Db) ->
+ ?assertEqual({ok, 4}, couch_db:get_doc_count(Db)),
+ ?assertEqual(3, couch_db:get_purge_seq(Db))
+ end),
+
+ PurgeReqs = [
+ {couch_uuids:random(), <<"doc6">>, [Rev]}
+ ],
+
+ {ok, [{ok, PRevs}]} = couch_util:with_db(DbName, fun(Db) ->
+ couch_db:purge_docs(Db, PurgeReqs)
+ end),
+ ?assertEqual(PRevs, [Rev]),
+
+ couch_util:with_db(DbName, fun(Db) ->
+ ?assertEqual({ok, 3}, couch_db:get_doc_count(Db)),
+ ?assertEqual(4, couch_db:get_purge_seq(Db))
+ end)
+ end)}.
save_doc(DbName, Json) ->
Doc = couch_doc:from_json_obj(Json),
@@ -229,11 +242,9 @@ save_doc(DbName, Json) ->
couch_db:update_doc(Db, Doc, [])
end).
-
fold_fun({PSeq, _UUID, Id, _Revs}, Acc) ->
{ok, [{PSeq, Id} | Acc]}.
-
get_disk_version_from_header(DbFileName) ->
DbDir = config:get("couchdb", "database_dir"),
DbFilePath = filename:join([DbDir, ?l2b(?b2l(DbFileName) ++ ".couch")]),
diff --git a/src/couch/test/eunit/couch_btree_tests.erl b/src/couch/test/eunit/couch_btree_tests.erl
index c9b791d2c..1c9ba7771 100644
--- a/src/couch/test/eunit/couch_btree_tests.erl
+++ b/src/couch/test/eunit/couch_btree_tests.erl
@@ -16,13 +16,15 @@
-include_lib("couch/include/couch_db.hrl").
-define(ROWS, 1000).
--define(TIMEOUT, 60). % seconds
-
+% seconds
+-define(TIMEOUT, 60).
setup() ->
{ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
- {ok, Btree} = couch_btree:open(nil, Fd, [{compression, none},
- {reduce, fun reduce_fun/2}]),
+ {ok, Btree} = couch_btree:open(nil, Fd, [
+ {compression, none},
+ {reduce, fun reduce_fun/2}
+ ]),
{Fd, Btree}.
setup_kvs(_) ->
@@ -35,7 +37,10 @@ setup_red() ->
"even" -> {"odd", [{{Key, Idx}, 1} | Acc]};
_ -> {"even", [{{Key, Idx}, 1} | Acc]}
end
- end, {"odd", []}, lists:seq(1, ?ROWS)),
+ end,
+ {"odd", []},
+ lists:seq(1, ?ROWS)
+ ),
{Fd, Btree} = setup(),
{ok, Btree1} = couch_btree:add_remove(Btree, EvenOddKVs, []),
{Fd, Btree1}.
@@ -49,7 +54,6 @@ teardown({Fd, _}) ->
teardown(_, {Fd, _}) ->
teardown(Fd).
-
kvs_test_funs() ->
[
fun should_set_fd_correctly/2,
@@ -72,7 +76,6 @@ red_test_funs() ->
fun should_reduce_second_half/2
].
-
btree_open_test_() ->
{ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
{ok, Btree} = couch_btree:open(nil, Fd, [{compression, none}]),
@@ -88,10 +91,12 @@ sorted_kvs_test_() ->
"BTree with sorted keys",
{
setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
+ fun() -> test_util:start(?MODULE, [ioq]) end,
+ fun test_util:stop/1,
{
foreachx,
- fun setup_kvs/1, fun teardown/2,
+ fun setup_kvs/1,
+ fun teardown/2,
[{Sorted, Fun} || Fun <- Funs]
}
}
@@ -105,10 +110,12 @@ rsorted_kvs_test_() ->
"BTree with backward sorted keys",
{
setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
+ fun() -> test_util:start(?MODULE, [ioq]) end,
+ fun test_util:stop/1,
{
foreachx,
- fun setup_kvs/1, fun teardown/2,
+ fun setup_kvs/1,
+ fun teardown/2,
[{Reversed, Fun} || Fun <- Funs]
}
}
@@ -122,10 +129,12 @@ shuffled_kvs_test_() ->
"BTree with shuffled keys",
{
setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
+ fun() -> test_util:start(?MODULE, [ioq]) end,
+ fun test_util:stop/1,
{
foreachx,
- fun setup_kvs/1, fun teardown/2,
+ fun setup_kvs/1,
+ fun teardown/2,
[{Shuffled, Fun} || Fun <- Funs]
}
}
@@ -136,13 +145,15 @@ reductions_test_() ->
"BTree reductions",
{
setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
+ fun() -> test_util:start(?MODULE, [ioq]) end,
+ fun test_util:stop/1,
[
{
"Common tests",
{
foreach,
- fun setup_red/0, fun teardown/1,
+ fun setup_red/0,
+ fun teardown/1,
[
fun should_reduce_without_specified_direction/1,
fun should_reduce_forward/1,
@@ -157,7 +168,8 @@ reductions_test_() ->
"Forward direction",
{
foreachx,
- fun setup_red/1, fun teardown/2,
+ fun setup_red/1,
+ fun teardown/2,
[{fwd, F} || F <- red_test_funs()]
}
},
@@ -165,7 +177,8 @@ reductions_test_() ->
"Backward direction",
{
foreachx,
- fun setup_red/1, fun teardown/2,
+ fun setup_red/1,
+ fun teardown/2,
[{rev, F} || F <- red_test_funs()]
}
}
@@ -175,7 +188,6 @@ reductions_test_() ->
}
}.
-
should_set_fd_correctly(_, {Fd, Btree}) ->
?_assertMatch(Fd, Btree#btree.fd).
@@ -191,7 +203,7 @@ should_set_reduce_option(_, {_, Btree}) ->
?_assertMatch(ReduceFun, Btree1#btree.reduce).
should_fold_over_empty_btree(_, {_, Btree}) ->
- {ok, _, EmptyRes} = couch_btree:foldl(Btree, fun(_, X) -> {ok, X+1} end, 0),
+ {ok, _, EmptyRes} = couch_btree:foldl(Btree, fun(_, X) -> {ok, X + 1} end, 0),
?_assertEqual(EmptyRes, 0).
should_add_all_keys(KeyValues, {Fd, Btree}) ->
@@ -214,8 +226,10 @@ should_have_lesser_size_than_file(Fd, Btree) ->
?_assert((couch_btree:size(Btree) =< couch_file:bytes(Fd))).
should_keep_root_pointer_to_kp_node(Fd, Btree) ->
- ?_assertMatch({ok, {kp_node, _}},
- couch_file:pread_term(Fd, element(1, Btree#btree.root))).
+ ?_assertMatch(
+ {ok, {kp_node, _}},
+ couch_file:pread_term(Fd, element(1, Btree#btree.root))
+ ).
should_remove_all_keys(KeyValues, Btree) ->
Keys = keys(KeyValues),
@@ -234,7 +248,10 @@ should_continuously_add_new_kv(KeyValues, {_, Btree}) ->
{ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
?assert(couch_btree:size(BtAcc2) > PrevSize),
{BtAcc2, couch_btree:size(BtAcc2)}
- end, {Btree, couch_btree:size(Btree)}, KeyValues),
+ end,
+ {Btree, couch_btree:size(Btree)},
+ KeyValues
+ ),
{
"Should continuously add key-values to btree",
[
@@ -250,7 +267,10 @@ should_continuously_remove_keys(KeyValues, {_, Btree}) ->
{ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
?assert(couch_btree:size(BtAcc2) < PrevSize),
{BtAcc2, couch_btree:size(BtAcc2)}
- end, {Btree1, couch_btree:size(Btree1)}, KeyValues),
+ end,
+ {Btree1, couch_btree:size(Btree1)},
+ KeyValues
+ ),
{
"Should continuously remove keys from btree",
[
@@ -266,48 +286,57 @@ should_insert_keys_in_reversed_order(KeyValues, {_, Btree}) ->
{ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
?assert(couch_btree:size(BtAcc2) > PrevSize),
{BtAcc2, couch_btree:size(BtAcc2)}
- end, {Btree, couch_btree:size(Btree)}, KeyValuesRev),
+ end,
+ {Btree, couch_btree:size(Btree)},
+ KeyValuesRev
+ ),
should_produce_valid_btree(Btree1, KeyValues).
should_add_every_odd_key_remove_every_even(KeyValues, {_, Btree}) ->
{ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
- {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) ->
- case Count rem 2 == 0 of
- true -> {Count + 1, [X | Left], Right};
- false -> {Count + 1, Left, [X | Right]}
- end
- end, {0, [], []}, KeyValues),
- {timeout, ?TIMEOUT,
- ?_assert(test_add_remove(Btree1, Rem2Keys0, Rem2Keys1))
- }.
+ {_, Rem2Keys0, Rem2Keys1} = lists:foldl(
+ fun(X, {Count, Left, Right}) ->
+ case Count rem 2 == 0 of
+ true -> {Count + 1, [X | Left], Right};
+ false -> {Count + 1, Left, [X | Right]}
+ end
+ end,
+ {0, [], []},
+ KeyValues
+ ),
+ {timeout, ?TIMEOUT, ?_assert(test_add_remove(Btree1, Rem2Keys0, Rem2Keys1))}.
should_add_every_even_key_remove_every_old(KeyValues, {_, Btree}) ->
{ok, Btree1} = couch_btree:add_remove(Btree, KeyValues, []),
- {_, Rem2Keys0, Rem2Keys1} = lists:foldl(fun(X, {Count, Left, Right}) ->
- case Count rem 2 == 0 of
- true -> {Count + 1, [X | Left], Right};
- false -> {Count + 1, Left, [X | Right]}
- end
- end, {0, [], []}, KeyValues),
- {timeout, ?TIMEOUT,
- ?_assert(test_add_remove(Btree1, Rem2Keys1, Rem2Keys0))
- }.
-
+ {_, Rem2Keys0, Rem2Keys1} = lists:foldl(
+ fun(X, {Count, Left, Right}) ->
+ case Count rem 2 == 0 of
+ true -> {Count + 1, [X | Left], Right};
+ false -> {Count + 1, Left, [X | Right]}
+ end
+ end,
+ {0, [], []},
+ KeyValues
+ ),
+ {timeout, ?TIMEOUT, ?_assert(test_add_remove(Btree1, Rem2Keys1, Rem2Keys0))}.
should_reduce_without_specified_direction({_, Btree}) ->
?_assertMatch(
{ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]},
- fold_reduce(Btree, [])).
+ fold_reduce(Btree, [])
+ ).
should_reduce_forward({_, Btree}) ->
?_assertMatch(
{ok, [{{"odd", _}, ?ROWS div 2}, {{"even", _}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd}])).
+ fold_reduce(Btree, [{dir, fwd}])
+ ).
should_reduce_backward({_, Btree}) ->
?_assertMatch(
{ok, [{{"even", _}, ?ROWS div 2}, {{"odd", _}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev}])).
+ fold_reduce(Btree, [{dir, rev}])
+ ).
should_reduce_whole_range(fwd, {_, Btree}) ->
{SK, EK} = {{"even", 0}, {"odd", ?ROWS - 1}},
@@ -315,20 +344,30 @@ should_reduce_whole_range(fwd, {_, Btree}) ->
{
"include endkey",
?_assertMatch(
- {ok, [{{"odd", 1}, ?ROWS div 2},
- {{"even", 2}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK},
- {end_key, EK}]))
+ {ok, [
+ {{"odd", 1}, ?ROWS div 2},
+ {{"even", 2}, ?ROWS div 2}
+ ]},
+ fold_reduce(Btree, [
+ {dir, fwd},
+ {start_key, SK},
+ {end_key, EK}
+ ])
+ )
},
{
"exclude endkey",
?_assertMatch(
- {ok, [{{"odd", 1}, (?ROWS div 2) - 1},
- {{"even", 2}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK},
- {end_key_gt, EK}]))
+ {ok, [
+ {{"odd", 1}, (?ROWS div 2) - 1},
+ {{"even", 2}, ?ROWS div 2}
+ ]},
+ fold_reduce(Btree, [
+ {dir, fwd},
+ {start_key, SK},
+ {end_key_gt, EK}
+ ])
+ )
}
];
should_reduce_whole_range(rev, {_, Btree}) ->
@@ -337,20 +376,30 @@ should_reduce_whole_range(rev, {_, Btree}) ->
{
"include endkey",
?_assertMatch(
- {ok, [{{"even", ?ROWS}, ?ROWS div 2},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key, EK}]))
+ {ok, [
+ {{"even", ?ROWS}, ?ROWS div 2},
+ {{"odd", ?ROWS - 1}, ?ROWS div 2}
+ ]},
+ fold_reduce(Btree, [
+ {dir, rev},
+ {start_key, SK},
+ {end_key, EK}
+ ])
+ )
},
{
"exclude endkey",
?_assertMatch(
- {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key_gt, EK}]))
+ {ok, [
+ {{"even", ?ROWS}, (?ROWS div 2) - 1},
+ {{"odd", ?ROWS - 1}, ?ROWS div 2}
+ ]},
+ fold_reduce(Btree, [
+ {dir, rev},
+ {start_key, SK},
+ {end_key_gt, EK}
+ ])
+ )
}
].
@@ -360,19 +409,30 @@ should_reduce_first_half(fwd, {_, Btree}) ->
{
"include endkey",
?_assertMatch(
- {ok, [{{"odd", 1}, ?ROWS div 4},
- {{"even", 2}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK}, {end_key, EK}]))
+ {ok, [
+ {{"odd", 1}, ?ROWS div 4},
+ {{"even", 2}, ?ROWS div 2}
+ ]},
+ fold_reduce(Btree, [
+ {dir, fwd},
+ {start_key, SK},
+ {end_key, EK}
+ ])
+ )
},
{
"exclude endkey",
?_assertMatch(
- {ok, [{{"odd", 1}, (?ROWS div 4) - 1},
- {{"even", 2}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK},
- {end_key_gt, EK}]))
+ {ok, [
+ {{"odd", 1}, (?ROWS div 4) - 1},
+ {{"even", 2}, ?ROWS div 2}
+ ]},
+ fold_reduce(Btree, [
+ {dir, fwd},
+ {start_key, SK},
+ {end_key_gt, EK}
+ ])
+ )
}
];
should_reduce_first_half(rev, {_, Btree}) ->
@@ -381,20 +441,30 @@ should_reduce_first_half(rev, {_, Btree}) ->
{
"include endkey",
?_assertMatch(
- {ok, [{{"even", ?ROWS}, (?ROWS div 4) + 1},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key, EK}]))
+ {ok, [
+ {{"even", ?ROWS}, (?ROWS div 4) + 1},
+ {{"odd", ?ROWS - 1}, ?ROWS div 2}
+ ]},
+ fold_reduce(Btree, [
+ {dir, rev},
+ {start_key, SK},
+ {end_key, EK}
+ ])
+ )
},
{
"exclude endkey",
?_assertMatch(
- {ok, [{{"even", ?ROWS}, ?ROWS div 4},
- {{"odd", ?ROWS - 1}, ?ROWS div 2}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key_gt, EK}]))
+ {ok, [
+ {{"even", ?ROWS}, ?ROWS div 4},
+ {{"odd", ?ROWS - 1}, ?ROWS div 2}
+ ]},
+ fold_reduce(Btree, [
+ {dir, rev},
+ {start_key, SK},
+ {end_key_gt, EK}
+ ])
+ )
}
].
@@ -404,20 +474,30 @@ should_reduce_second_half(fwd, {_, Btree}) ->
{
"include endkey",
?_assertMatch(
- {ok, [{{"odd", 1}, ?ROWS div 2},
- {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK},
- {end_key, EK}]))
+ {ok, [
+ {{"odd", 1}, ?ROWS div 2},
+ {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}
+ ]},
+ fold_reduce(Btree, [
+ {dir, fwd},
+ {start_key, SK},
+ {end_key, EK}
+ ])
+ )
},
{
"exclude endkey",
?_assertMatch(
- {ok, [{{"odd", 1}, (?ROWS div 2) - 1},
- {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}]},
- fold_reduce(Btree, [{dir, fwd},
- {start_key, SK},
- {end_key_gt, EK}]))
+ {ok, [
+ {{"odd", 1}, (?ROWS div 2) - 1},
+ {{"even", ?ROWS div 2}, (?ROWS div 4) + 1}
+ ]},
+ fold_reduce(Btree, [
+ {dir, fwd},
+ {start_key, SK},
+ {end_key_gt, EK}
+ ])
+ )
}
];
should_reduce_second_half(rev, {_, Btree}) ->
@@ -426,20 +506,30 @@ should_reduce_second_half(rev, {_, Btree}) ->
{
"include endkey",
?_assertMatch(
- {ok, [{{"even", ?ROWS}, ?ROWS div 2},
- {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key, EK}]))
+ {ok, [
+ {{"even", ?ROWS}, ?ROWS div 2},
+ {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}
+ ]},
+ fold_reduce(Btree, [
+ {dir, rev},
+ {start_key, SK},
+ {end_key, EK}
+ ])
+ )
},
{
"exclude endkey",
?_assertMatch(
- {ok, [{{"even", ?ROWS}, (?ROWS div 2) - 1},
- {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}]},
- fold_reduce(Btree, [{dir, rev},
- {start_key, SK},
- {end_key_gt, EK}]))
+ {ok, [
+ {{"even", ?ROWS}, (?ROWS div 2) - 1},
+ {{"odd", (?ROWS div 2) + 1}, (?ROWS div 4) + 1}
+ ]},
+ fold_reduce(Btree, [
+ {dir, rev},
+ {start_key, SK},
+ {end_key_gt, EK}
+ ])
+ )
}
].
@@ -459,9 +549,12 @@ fold_reduce(Btree, Opts) ->
FoldFun = fun(GroupedKey, Unreduced, Acc) ->
{ok, [{GroupedKey, couch_btree:final_reduce(Btree, Unreduced)} | Acc]}
end,
- couch_btree:fold_reduce(Btree, FoldFun, [],
- [{key_group_fun, GroupFun}] ++ Opts).
-
+ couch_btree:fold_reduce(
+ Btree,
+ FoldFun,
+ [],
+ [{key_group_fun, GroupFun}] ++ Opts
+ ).
keys(KVs) ->
[K || {K, _} <- KVs].
@@ -471,7 +564,6 @@ reduce_fun(reduce, KVs) ->
reduce_fun(rereduce, Reds) ->
lists:sum(Reds).
-
shuffle(List) ->
randomize(round(math:log(length(List)) + 0.5), List).
@@ -481,7 +573,10 @@ randomize(T, List) ->
lists:foldl(
fun(_E, Acc) ->
randomize(Acc)
- end, randomize(List), lists:seq(1, (T - 1))).
+ end,
+ randomize(List),
+ lists:seq(1, (T - 1))
+ ).
randomize(List) ->
D = lists:map(fun(A) -> {couch_rand:uniform(), A} end, List),
@@ -500,18 +595,24 @@ test_add_remove(Btree, OutKeyValues, RemainingKeyValues) ->
fun({K, _}, BtAcc) ->
{ok, BtAcc2} = couch_btree:add_remove(BtAcc, [], [K]),
BtAcc2
- end, Btree, OutKeyValues),
+ end,
+ Btree,
+ OutKeyValues
+ ),
true = test_btree(Btree2, RemainingKeyValues),
Btree3 = lists:foldl(
fun(KV, BtAcc) ->
{ok, BtAcc2} = couch_btree:add_remove(BtAcc, [KV], []),
BtAcc2
- end, Btree2, OutKeyValues),
+ end,
+ Btree2,
+ OutKeyValues
+ ),
true = test_btree(Btree3, OutKeyValues ++ RemainingKeyValues).
test_key_access(Btree, List) ->
- FoldFun = fun(Element, {[HAcc|TAcc], Count}) ->
+ FoldFun = fun(Element, {[HAcc | TAcc], Count}) ->
case Element == HAcc of
true -> {ok, {TAcc, Count + 1}};
_ -> {ok, {TAcc, Count + 1}}
@@ -520,8 +621,12 @@ test_key_access(Btree, List) ->
Length = length(List),
Sorted = lists:sort(List),
{ok, _, {[], Length}} = couch_btree:foldl(Btree, FoldFun, {Sorted, 0}),
- {ok, _, {[], Length}} = couch_btree:fold(Btree, FoldFun,
- {Sorted, 0}, [{dir, rev}]),
+ {ok, _, {[], Length}} = couch_btree:fold(
+ Btree,
+ FoldFun,
+ {Sorted, 0},
+ [{dir, rev}]
+ ),
ok.
test_lookup_access(Btree, KeyValues) ->
@@ -529,9 +634,15 @@ test_lookup_access(Btree, KeyValues) ->
lists:foreach(
fun({Key, Value}) ->
[{ok, {Key, Value}}] = couch_btree:lookup(Btree, [Key]),
- {ok, _, true} = couch_btree:foldl(Btree, FoldFun,
- {Key, Value}, [{start_key, Key}])
- end, KeyValues).
+ {ok, _, true} = couch_btree:foldl(
+ Btree,
+ FoldFun,
+ {Key, Value},
+ [{start_key, Key}]
+ )
+ end,
+ KeyValues
+ ).
test_final_reductions(Btree, KeyValues) ->
KVLen = length(KeyValues),
@@ -545,18 +656,28 @@ test_final_reductions(Btree, KeyValues) ->
CountToEnd = couch_btree:final_reduce(Btree, LeadingReds),
{ok, Acc + 1}
end,
- {LStartKey, _} = case KVLen of
- 0 -> {nil, nil};
- _ -> lists:nth(KVLen div 3 + 1, lists:sort(KeyValues))
- end,
- {RStartKey, _} = case KVLen of
- 0 -> {nil, nil};
- _ -> lists:nth(KVLen div 3, lists:sort(KeyValues))
- end,
- {ok, _, FoldLRed} = couch_btree:foldl(Btree, FoldLFun, 0,
- [{start_key, LStartKey}]),
- {ok, _, FoldRRed} = couch_btree:fold(Btree, FoldRFun, 0,
- [{dir, rev}, {start_key, RStartKey}]),
+ {LStartKey, _} =
+ case KVLen of
+ 0 -> {nil, nil};
+ _ -> lists:nth(KVLen div 3 + 1, lists:sort(KeyValues))
+ end,
+ {RStartKey, _} =
+ case KVLen of
+ 0 -> {nil, nil};
+ _ -> lists:nth(KVLen div 3, lists:sort(KeyValues))
+ end,
+ {ok, _, FoldLRed} = couch_btree:foldl(
+ Btree,
+ FoldLFun,
+ 0,
+ [{start_key, LStartKey}]
+ ),
+ {ok, _, FoldRRed} = couch_btree:fold(
+ Btree,
+ FoldRFun,
+ 0,
+ [{dir, rev}, {start_key, RStartKey}]
+ ),
KVLen = FoldLRed + FoldRRed,
ok.
diff --git a/src/couch/test/eunit/couch_changes_tests.erl b/src/couch/test/eunit/couch_changes_tests.erl
index 848b471f9..02b69f132 100644
--- a/src/couch/test/eunit/couch_changes_tests.erl
+++ b/src/couch/test/eunit/couch_changes_tests.erl
@@ -28,39 +28,49 @@
setup() ->
DbName = ?tempdb(),
{ok, Db} = create_db(DbName),
- Revs = [R || {ok, R} <- [
- save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
- save_doc(Db, {[{<<"_id">>, <<"doc5">>}]})
- ]],
+ Revs = [
+ R
+ || {ok, R} <- [
+ save_doc(Db, {[{<<"_id">>, <<"doc1">>}]}),
+ save_doc(Db, {[{<<"_id">>, <<"doc2">>}]}),
+ save_doc(Db, {[{<<"_id">>, <<"doc3">>}]}),
+ save_doc(Db, {[{<<"_id">>, <<"doc4">>}]}),
+ save_doc(Db, {[{<<"_id">>, <<"doc5">>}]})
+ ]
+ ],
Rev = lists:nth(3, Revs),
{ok, Db1} = couch_db:reopen(Db),
{ok, Rev1} = save_doc(Db1, {[{<<"_id">>, <<"doc3">>}, {<<"_rev">>, Rev}]}),
Revs1 = Revs ++ [Rev1],
- Revs2 = Revs1 ++ [R || {ok, R} <- [
- save_doc(Db1, {[{<<"_id">>, <<"doc6">>}]}),
- save_doc(Db1, {[{<<"_id">>, <<"_design/foo">>}]}),
- save_doc(Db1, {[{<<"_id">>, <<"doc7">>}]}),
- save_doc(Db1, {[{<<"_id">>, <<"doc8">>}]})
- ]],
- config:set("native_query_servers", "erlang", "{couch_native_process, start_link, []}", _Persist=false),
+ Revs2 =
+ Revs1 ++
+ [
+ R
+ || {ok, R} <- [
+ save_doc(Db1, {[{<<"_id">>, <<"doc6">>}]}),
+ save_doc(Db1, {[{<<"_id">>, <<"_design/foo">>}]}),
+ save_doc(Db1, {[{<<"_id">>, <<"doc7">>}]}),
+ save_doc(Db1, {[{<<"_id">>, <<"doc8">>}]})
+ ]
+ ],
+ config:set(
+ "native_query_servers", "erlang", "{couch_native_process, start_link, []}", _Persist = false
+ ),
{DbName, list_to_tuple(Revs2)}.
teardown({DbName, _}) ->
- config:delete("native_query_servers", "erlang", _Persist=false),
+ config:delete("native_query_servers", "erlang", _Persist = false),
delete_db(DbName),
ok.
-
changes_test_() ->
{
"Changes feed",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
[
filter_by_selector(),
filter_by_doc_id(),
@@ -78,7 +88,8 @@ filter_by_doc_id() ->
"Filter _doc_id",
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_filter_by_specific_doc_ids/1,
fun should_filter_by_specific_doc_ids_descending/1,
@@ -94,7 +105,8 @@ filter_by_selector() ->
"Filter _selector",
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_select_basic/1,
fun should_select_with_since/1,
@@ -108,13 +120,13 @@ filter_by_selector() ->
}
}.
-
filter_by_design() ->
{
"Filter _design",
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_emit_only_design_documents/1
]
@@ -138,7 +150,8 @@ filter_by_filter_function() ->
"Filter by filters",
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_filter_by_doc_attribute/1,
fun should_filter_by_user_ctx/1
@@ -151,7 +164,8 @@ filter_by_view() ->
"Filter _view",
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_filter_by_view/1,
fun should_filter_by_erlang_view/1
@@ -164,7 +178,8 @@ continuous_feed() ->
"Continuous Feed",
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_filter_continuous_feed_by_specific_doc_ids/1,
fun should_end_changes_when_db_deleted/1
@@ -172,7 +187,6 @@ continuous_feed() ->
}
}.
-
should_filter_by_specific_doc_ids({DbName, _}) ->
?_test(
begin
@@ -190,7 +204,8 @@ should_filter_by_specific_doc_ids({DbName, _}) ->
?assertEqual(<<"doc3">>, Id2),
?assertEqual(6, Seq2),
?assertEqual(UpSeq, LastSeq)
- end).
+ end
+ ).
should_filter_by_specific_doc_ids_descending({DbName, _}) ->
?_test(
@@ -210,7 +225,8 @@ should_filter_by_specific_doc_ids_descending({DbName, _}) ->
?assertEqual(<<"doc4">>, Id2),
?assertEqual(4, Seq2),
?assertEqual(4, LastSeq)
- end).
+ end
+ ).
should_filter_by_specific_doc_ids_with_since({DbName, _}) ->
?_test(
@@ -228,7 +244,8 @@ should_filter_by_specific_doc_ids_with_since({DbName, _}) ->
?assertEqual(<<"doc3">>, Id1),
?assertEqual(6, Seq1),
?assertEqual(UpSeq, LastSeq)
- end).
+ end
+ ).
should_filter_by_specific_doc_ids_no_result({DbName, _}) ->
?_test(
@@ -243,7 +260,8 @@ should_filter_by_specific_doc_ids_no_result({DbName, _}) ->
?assertEqual(0, length(Rows)),
?assertEqual(UpSeq, LastSeq)
- end).
+ end
+ ).
should_handle_deleted_docs({DbName, Revs}) ->
?_test(
@@ -252,9 +270,12 @@ should_handle_deleted_docs({DbName, Revs}) ->
{ok, Db} = couch_db:open_int(DbName, []),
{ok, _} = save_doc(
Db,
- {[{<<"_id">>, <<"doc3">>},
- {<<"_deleted">>, true},
- {<<"_rev">>, Rev3_2}]}),
+ {[
+ {<<"_id">>, <<"doc3">>},
+ {<<"_deleted">>, true},
+ {<<"_rev">>, Rev3_2}
+ ]}
+ ),
ChArgs = #changes_args{
filter = "_doc_ids",
@@ -270,7 +291,8 @@ should_handle_deleted_docs({DbName, Revs}) ->
Rows
),
?assertEqual(11, LastSeq)
- end).
+ end
+ ).
should_filter_continuous_feed_by_specific_doc_ids({DbName, Revs}) ->
?_test(
@@ -305,14 +327,29 @@ should_filter_continuous_feed_by_specific_doc_ids({DbName, Revs}) ->
Rev4 = element(4, Revs),
Rev3_2 = element(6, Revs),
- {ok, Rev4_2} = save_doc(Db, {[{<<"_id">>, <<"doc4">>},
- {<<"_rev">>, Rev4}]}),
+ {ok, Rev4_2} = save_doc(
+ Db,
+ {[
+ {<<"_id">>, <<"doc4">>},
+ {<<"_rev">>, Rev4}
+ ]}
+ ),
{ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc11">>}]}),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc4">>},
- {<<"_rev">>, Rev4_2}]}),
+ {ok, _} = save_doc(
+ Db,
+ {[
+ {<<"_id">>, <<"doc4">>},
+ {<<"_rev">>, Rev4_2}
+ ]}
+ ),
{ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc12">>}]}),
- {ok, Rev3_3} = save_doc(Db, {[{<<"_id">>, <<"doc3">>},
- {<<"_rev">>, Rev3_2}]}),
+ {ok, Rev3_3} = save_doc(
+ Db,
+ {[
+ {<<"_id">>, <<"doc3">>},
+ {<<"_rev">>, Rev3_2}
+ ]}
+ ),
reset_row_notifications(),
ok = unpause(Consumer),
?assertEqual(ok, wait_row_notifications(2)),
@@ -327,8 +364,13 @@ should_filter_continuous_feed_by_specific_doc_ids({DbName, Revs}) ->
?assertEqual(17, Row16#row.seq),
clear_rows(Consumer),
- {ok, _Rev3_4} = save_doc(Db, {[{<<"_id">>, <<"doc3">>},
- {<<"_rev">>, Rev3_3}]}),
+ {ok, _Rev3_4} = save_doc(
+ Db,
+ {[
+ {<<"_id">>, <<"doc3">>},
+ {<<"_rev">>, Rev3_3}
+ ]}
+ ),
reset_row_notifications(),
ok = unpause(Consumer),
?assertEqual(ok, wait_row_notifications(1)),
@@ -340,8 +382,8 @@ should_filter_continuous_feed_by_specific_doc_ids({DbName, Revs}) ->
stop_consumer(Consumer),
?assertMatch([#row{seq = 18, id = <<"doc3">>}], FinalRows)
- end).
-
+ end
+ ).
should_end_changes_when_db_deleted({DbName, _Revs}) ->
?_test(begin
@@ -361,7 +403,6 @@ should_end_changes_when_db_deleted({DbName, _Revs}) ->
ok
end).
-
should_select_basic({DbName, _}) ->
?_test(
begin
@@ -374,7 +415,8 @@ should_select_basic({DbName, _}) ->
?assertEqual(<<"doc3">>, Id),
?assertEqual(6, Seq),
?assertEqual(UpSeq, LastSeq)
- end).
+ end
+ ).
should_select_with_since({DbName, _}) ->
?_test(
@@ -389,7 +431,8 @@ should_select_with_since({DbName, _}) ->
?assertEqual(<<"doc8">>, Id),
?assertEqual(10, Seq),
?assertEqual(UpSeq, LastSeq)
- end).
+ end
+ ).
should_select_when_no_result({DbName, _}) ->
?_test(
@@ -400,7 +443,8 @@ should_select_when_no_result({DbName, _}) ->
{Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
?assertEqual(0, length(Rows)),
?assertEqual(UpSeq, LastSeq)
- end).
+ end
+ ).
should_select_with_deleted_docs({DbName, Revs}) ->
?_test(
@@ -409,9 +453,12 @@ should_select_with_deleted_docs({DbName, Revs}) ->
{ok, Db} = couch_db:open_int(DbName, []),
{ok, _} = save_doc(
Db,
- {[{<<"_id">>, <<"doc3">>},
- {<<"_deleted">>, true},
- {<<"_rev">>, Rev3_2}]}),
+ {[
+ {<<"_id">>, <<"doc3">>},
+ {<<"_deleted">>, true},
+ {<<"_rev">>, Rev3_2}
+ ]}
+ ),
ChArgs = #changes_args{filter = "_selector"},
Selector = {[{<<"_id">>, <<"doc3">>}]},
Req = {json_req, {[{<<"selector">>, Selector}]}},
@@ -421,7 +468,8 @@ should_select_with_deleted_docs({DbName, Revs}) ->
Rows
),
?assertEqual(11, LastSeq)
- end).
+ end
+ ).
should_select_with_continuous({DbName, Revs}) ->
?_test(
@@ -437,8 +485,8 @@ should_select_with_continuous({DbName, Revs}) ->
ok = pause(Consumer),
Rows = get_rows(Consumer),
?assertMatch(
- [#row{seq = 10, id = <<"doc8">>, deleted = false}],
- Rows
+ [#row{seq = 10, id = <<"doc8">>, deleted = false}],
+ Rows
),
clear_rows(Consumer),
{ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc01">>}]}),
@@ -448,45 +496,60 @@ should_select_with_continuous({DbName, Revs}) ->
?assertEqual([], get_rows(Consumer)),
Rev4 = element(4, Revs),
Rev8 = element(10, Revs),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc8">>},
- {<<"_rev">>, Rev8}]}),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"doc4">>},
- {<<"_rev">>, Rev4}]}),
+ {ok, _} = save_doc(
+ Db,
+ {[
+ {<<"_id">>, <<"doc8">>},
+ {<<"_rev">>, Rev8}
+ ]}
+ ),
+ {ok, _} = save_doc(
+ Db,
+ {[
+ {<<"_id">>, <<"doc4">>},
+ {<<"_rev">>, Rev4}
+ ]}
+ ),
reset_row_notifications(),
ok = unpause(Consumer),
?assertEqual(ok, wait_row_notifications(1)),
ok = pause(Consumer),
NewRows = get_rows(Consumer),
?assertMatch(
- [#row{seq = _, id = <<"doc8">>, deleted = false}],
- NewRows
+ [#row{seq = _, id = <<"doc8">>, deleted = false}],
+ NewRows
)
- end).
+ end
+ ).
should_stop_selector_when_db_deleted({DbName, _Revs}) ->
?_test(
- begin
- {ok, _Db} = couch_db:open_int(DbName, []),
- ChArgs = #changes_args{filter = "_selector", feed = "continuous"},
- Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector}]}},
- Consumer = spawn_consumer(DbName, ChArgs, Req),
- ok = pause(Consumer),
- ok = couch_server:delete(DbName, [?ADMIN_CTX]),
- ok = unpause(Consumer),
- {_Rows, _LastSeq} = wait_finished(Consumer),
- stop_consumer(Consumer),
- ok
- end).
-
+ begin
+ {ok, _Db} = couch_db:open_int(DbName, []),
+ ChArgs = #changes_args{filter = "_selector", feed = "continuous"},
+ Selector = {[{<<"_id">>, <<"doc3">>}]},
+ Req = {json_req, {[{<<"selector">>, Selector}]}},
+ Consumer = spawn_consumer(DbName, ChArgs, Req),
+ ok = pause(Consumer),
+ ok = couch_server:delete(DbName, [?ADMIN_CTX]),
+ ok = unpause(Consumer),
+ {_Rows, _LastSeq} = wait_finished(Consumer),
+ stop_consumer(Consumer),
+ ok
+ end
+ ).
should_select_with_empty_fields({DbName, _}) ->
?_test(
begin
- ChArgs = #changes_args{filter = "_selector", include_docs=true},
+ ChArgs = #changes_args{filter = "_selector", include_docs = true},
Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector},
- {<<"fields">>, []}]}},
+ Req =
+ {json_req,
+ {[
+ {<<"selector">>, Selector},
+ {<<"fields">>, []}
+ ]}},
{Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
?assertEqual(1, length(Rows)),
[#row{seq = Seq, id = Id, doc = Doc}] = Rows,
@@ -494,15 +557,20 @@ should_select_with_empty_fields({DbName, _}) ->
?assertEqual(6, Seq),
?assertEqual(UpSeq, LastSeq),
?assertMatch({[{_K1, _V1}, {_K2, _V2}]}, Doc)
- end).
+ end
+ ).
should_select_with_fields({DbName, _}) ->
?_test(
begin
- ChArgs = #changes_args{filter = "_selector", include_docs=true},
+ ChArgs = #changes_args{filter = "_selector", include_docs = true},
Selector = {[{<<"_id">>, <<"doc3">>}]},
- Req = {json_req, {[{<<"selector">>, Selector},
- {<<"fields">>, [<<"_id">>, <<"nope">>]}]}},
+ Req =
+ {json_req,
+ {[
+ {<<"selector">>, Selector},
+ {<<"fields">>, [<<"_id">>, <<"nope">>]}
+ ]}},
{Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
?assertEqual(1, length(Rows)),
[#row{seq = Seq, id = Id, doc = Doc}] = Rows,
@@ -510,8 +578,8 @@ should_select_with_fields({DbName, _}) ->
?assertEqual(6, Seq),
?assertEqual(UpSeq, LastSeq),
?assertMatch(Doc, {[{<<"_id">>, <<"doc3">>}]})
- end).
-
+ end
+ ).
should_emit_only_design_documents({DbName, Revs}) ->
?_test(
@@ -526,11 +594,15 @@ should_emit_only_design_documents({DbName, Revs}) ->
?assertEqual(UpSeq, LastSeq),
?assertEqual([#row{seq = 8, id = <<"_design/foo">>}], Rows),
-
{ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
- {ok, _} = save_doc(Db, {[{<<"_id">>, <<"_design/foo">>},
- {<<"_rev">>, element(8, Revs)},
- {<<"_deleted">>, true}]}),
+ {ok, _} = save_doc(
+ Db,
+ {[
+ {<<"_id">>, <<"_design/foo">>},
+ {<<"_rev">>, element(8, Revs)},
+ {<<"_deleted">>, true}
+ ]}
+ ),
couch_db:close(Db),
{Rows2, LastSeq2, _} = run_changes_query(DbName, ChArgs, Req),
@@ -539,11 +611,18 @@ should_emit_only_design_documents({DbName, Revs}) ->
?assertEqual(1, length(Rows2)),
?assertEqual(UpSeq2, LastSeq2),
- ?assertEqual([#row{seq = 11,
- id = <<"_design/foo">>,
- deleted = true}],
- Rows2)
- end).
+ ?assertEqual(
+ [
+ #row{
+ seq = 11,
+ id = <<"_design/foo">>,
+ deleted = true
+ }
+ ],
+ Rows2
+ )
+ end
+ ).
%% should_receive_heartbeats(_) ->
%% {timeout, ?TEST_TIMEOUT div 1000,
@@ -616,16 +695,21 @@ should_filter_by_doc_attribute({DbName, _}) ->
?_test(
begin
DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"javascript">>},
- {<<"filters">>, {[
- {<<"valid">>, <<"function(doc, req) {"
- " if (doc._id == 'doc3') {"
- " return true; "
- "} }">>}
- ]}}
- ]}),
+ DDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DDocId},
+ {<<"language">>, <<"javascript">>},
+ {<<"filters">>,
+ {[
+ {<<"valid">>, <<
+ "function(doc, req) {"
+ " if (doc._id == 'doc3') {"
+ " return true; "
+ "} }"
+ >>}
+ ]}}
+ ]}
+ ),
ChArgs = #changes_args{filter = "app/valid"},
Req = {json_req, null},
ok = update_ddoc(DbName, DDoc),
@@ -635,28 +719,38 @@ should_filter_by_doc_attribute({DbName, _}) ->
?assertEqual(<<"doc3">>, Id),
?assertEqual(6, Seq),
?assertEqual(UpSeq, LastSeq)
- end).
+ end
+ ).
should_filter_by_user_ctx({DbName, _}) ->
?_test(
begin
DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"javascript">>},
- {<<"filters">>, {[
- {<<"valid">>, <<"function(doc, req) {"
- " if (req.userCtx.name == doc._id) {"
- " return true; "
- "} }">>}
- ]}}
- ]}),
+ DDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DDocId},
+ {<<"language">>, <<"javascript">>},
+ {<<"filters">>,
+ {[
+ {<<"valid">>, <<
+ "function(doc, req) {"
+ " if (req.userCtx.name == doc._id) {"
+ " return true; "
+ "} }"
+ >>}
+ ]}}
+ ]}
+ ),
ChArgs = #changes_args{filter = "app/valid"},
UserCtx = #user_ctx{name = <<"doc3">>, roles = []},
{ok, DbRec} = couch_db:clustered_db(DbName, UserCtx),
- Req = {json_req, {[{
- <<"userCtx">>, couch_util:json_user_ctx(DbRec)
- }]}},
+ Req =
+ {json_req,
+ {[
+ {
+ <<"userCtx">>, couch_util:json_user_ctx(DbRec)
+ }
+ ]}},
ok = update_ddoc(DbName, DDoc),
{Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
?assertEqual(1, length(Rows)),
@@ -664,30 +758,42 @@ should_filter_by_user_ctx({DbName, _}) ->
?assertEqual(<<"doc3">>, Id),
?assertEqual(6, Seq),
?assertEqual(UpSeq, LastSeq)
- end).
+ end
+ ).
should_filter_by_view({DbName, _}) ->
?_test(
begin
DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"valid">>, {[
- {<<"map">>, <<"function(doc) {"
- " if (doc._id == 'doc3') {"
- " emit(doc); "
- "} }">>}
- ]}}
- ]}}
- ]}),
- ChArgs = #changes_args{filter = "_view"},
- Req = {json_req, {[{
- <<"query">>, {[
- {<<"view">>, <<"app/valid">>}
+ DDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DDocId},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>,
+ {[
+ {<<"valid">>,
+ {[
+ {<<"map">>, <<
+ "function(doc) {"
+ " if (doc._id == 'doc3') {"
+ " emit(doc); "
+ "} }"
+ >>}
+ ]}}
+ ]}}
]}
- }]}},
+ ),
+ ChArgs = #changes_args{filter = "_view"},
+ Req =
+ {json_req,
+ {[
+ {
+ <<"query">>,
+ {[
+ {<<"view">>, <<"app/valid">>}
+ ]}
+ }
+ ]}},
ok = update_ddoc(DbName, DDoc),
{Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
?assertEqual(1, length(Rows)),
@@ -695,32 +801,44 @@ should_filter_by_view({DbName, _}) ->
?assertEqual(<<"doc3">>, Id),
?assertEqual(6, Seq),
?assertEqual(UpSeq, LastSeq)
- end).
+ end
+ ).
should_filter_by_erlang_view({DbName, _}) ->
?_test(
begin
DDocId = <<"_design/app">>,
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDocId},
- {<<"language">>, <<"erlang">>},
- {<<"views">>, {[
- {<<"valid">>, {[
- {<<"map">>, <<"fun({Doc}) ->"
- " case lists:keyfind(<<\"_id\">>, 1, Doc) of"
- " {<<\"_id\">>, <<\"doc3\">>} -> Emit(Doc, null); "
- " false -> ok"
- " end "
- "end.">>}
- ]}}
- ]}}
- ]}),
- ChArgs = #changes_args{filter = "_view"},
- Req = {json_req, {[{
- <<"query">>, {[
- {<<"view">>, <<"app/valid">>}
+ DDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DDocId},
+ {<<"language">>, <<"erlang">>},
+ {<<"views">>,
+ {[
+ {<<"valid">>,
+ {[
+ {<<"map">>, <<
+ "fun({Doc}) ->"
+ " case lists:keyfind(<<\"_id\">>, 1, Doc) of"
+ " {<<\"_id\">>, <<\"doc3\">>} -> Emit(Doc, null); "
+ " false -> ok"
+ " end "
+ "end."
+ >>}
+ ]}}
+ ]}}
]}
- }]}},
+ ),
+ ChArgs = #changes_args{filter = "_view"},
+ Req =
+ {json_req,
+ {[
+ {
+ <<"query">>,
+ {[
+ {<<"view">>, <<"app/valid">>}
+ ]}
+ }
+ ]}},
ok = update_ddoc(DbName, DDoc),
{Rows, LastSeq, UpSeq} = run_changes_query(DbName, ChArgs, Req),
?assertEqual(1, length(Rows)),
@@ -728,7 +846,8 @@ should_filter_by_erlang_view({DbName, _}) ->
?assertEqual(<<"doc3">>, Id),
?assertEqual(6, Seq),
?assertEqual(UpSeq, LastSeq)
- end).
+ end
+ ).
update_ddoc(DbName, DDoc) ->
{ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
@@ -752,12 +871,13 @@ save_doc(Db, Json) ->
get_rows({Consumer, _}) ->
Ref = make_ref(),
Consumer ! {get_rows, Ref},
- Resp = receive
- {rows, Ref, Rows} ->
- Rows
- after ?TIMEOUT ->
- timeout
- end,
+ Resp =
+ receive
+ {rows, Ref, Rows} ->
+ Rows
+ after ?TIMEOUT ->
+ timeout
+ end,
?assertNotEqual(timeout, Resp),
Resp.
@@ -776,48 +896,52 @@ get_rows({Consumer, _}) ->
clear_rows({Consumer, _}) ->
Ref = make_ref(),
Consumer ! {reset, Ref},
- Resp = receive
- {ok, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
+ Resp =
+ receive
+ {ok, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end,
?assertNotEqual(timeout, Resp),
Resp.
stop_consumer({Consumer, _}) ->
Ref = make_ref(),
Consumer ! {stop, Ref},
- Resp = receive
- {ok, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
+ Resp =
+ receive
+ {ok, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end,
?assertNotEqual(timeout, Resp),
Resp.
pause({Consumer, _}) ->
Ref = make_ref(),
Consumer ! {pause, Ref},
- Resp = receive
- {paused, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
+ Resp =
+ receive
+ {paused, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end,
?assertNotEqual(timeout, Resp),
Resp.
unpause({Consumer, _}) ->
Ref = make_ref(),
Consumer ! {continue, Ref},
- Resp = receive
- {ok, Ref} ->
- ok
- after ?TIMEOUT ->
- timeout
- end,
+ Resp =
+ receive
+ {ok, Ref} ->
+ ok
+ after ?TIMEOUT ->
+ timeout
+ end,
?assertNotEqual(timeout, Resp),
Resp.
@@ -828,20 +952,23 @@ wait_finished({_, ConsumerRef}) ->
{'DOWN', ConsumerRef, _, _, Msg} when Msg == normal; Msg == ok ->
ok;
{'DOWN', ConsumerRef, _, _, Msg} ->
- erlang:error({consumer_died, [
+ erlang:error(
+ {consumer_died, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {value, Msg}
+ ]}
+ )
+ after ?TIMEOUT ->
+ erlang:error(
+ {consumer_died, [
{module, ?MODULE},
{line, ?LINE},
- {value, Msg}
- ]})
- after ?TIMEOUT ->
- erlang:error({consumer_died, [
- {module, ?MODULE},
- {line, ?LINE},
- {value, timeout}
- ]})
+ {value, timeout}
+ ]}
+ )
end.
-
reset_row_notifications() ->
receive
row ->
@@ -850,7 +977,6 @@ reset_row_notifications() ->
ok
end.
-
wait_row_notifications(N) ->
receive
row when N == 1 ->
@@ -861,7 +987,6 @@ wait_row_notifications(N) ->
timeout
end.
-
spawn_consumer(DbName, ChangesArgs0, Req) ->
Parent = self(),
spawn_monitor(fun() ->
@@ -884,13 +1009,16 @@ spawn_consumer(DbName, ChangesArgs0, Req) ->
maybe_pause(Parent, Acc)
end,
{ok, Db} = couch_db:open_int(DbName, []),
- ChangesArgs = case (ChangesArgs0#changes_args.timeout =:= undefined)
- andalso (ChangesArgs0#changes_args.heartbeat =:= undefined) of
- true ->
- ChangesArgs0#changes_args{timeout = 1000, heartbeat = 100};
- false ->
- ChangesArgs0
- end,
+ ChangesArgs =
+ case
+ (ChangesArgs0#changes_args.timeout =:= undefined) andalso
+ (ChangesArgs0#changes_args.heartbeat =:= undefined)
+ of
+ true ->
+ ChangesArgs0#changes_args{timeout = 1000, heartbeat = 100};
+ false ->
+ ChangesArgs0
+ end,
FeedFun = couch_changes:handle_db_changes(ChangesArgs, Req, Db),
try
FeedFun({Callback, []})
@@ -920,11 +1048,14 @@ maybe_pause(Parent, Acc) ->
Parent ! {ok, Ref},
throw({stop, Acc});
V when V /= updated ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {value, V},
- {reason, "Received unexpected message"}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {value, V},
+ {reason, "Received unexpected message"}
+ ]}
+ )
after 0 ->
Acc
end.
diff --git a/src/couch/test/eunit/couch_db_doc_tests.erl b/src/couch/test/eunit/couch_db_doc_tests.erl
index 916b63207..dc1ac79e6 100644
--- a/src/couch/test/eunit/couch_db_doc_tests.erl
+++ b/src/couch/test/eunit/couch_db_doc_tests.erl
@@ -18,7 +18,6 @@
start() ->
test_util:start_couch([ioq]).
-
setup() ->
DbName = ?tempdb(),
config:set("couchdb", "stem_interactive_updates", "false", false),
@@ -26,21 +25,21 @@ setup() ->
couch_db:close(Db),
DbName.
-
teardown(DbName) ->
ok = couch_server:delete(DbName, [?ADMIN_CTX]),
ok.
-
couch_db_doc_test_() ->
{
"CouchDB doc tests",
{
setup,
- fun start/0, fun test_util:stop_couch/1,
+ fun start/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_truncate_number_of_revisions/1,
fun should_raise_bad_request_on_invalid_rev/1,
@@ -50,7 +49,6 @@ couch_db_doc_test_() ->
}
}.
-
should_truncate_number_of_revisions(DbName) ->
DocId = <<"foo">>,
Db = open_db(DbName),
@@ -60,7 +58,6 @@ should_truncate_number_of_revisions(DbName) ->
{ok, [{ok, #doc{revs = {11, Revs}}}]} = open_doc_rev(Db, DocId, Rev10),
?_assertEqual(5, length(Revs)).
-
should_raise_bad_request_on_invalid_rev(DbName) ->
DocId = <<"foo">>,
InvalidRev1 = <<"foo">>,
@@ -70,18 +67,15 @@ should_raise_bad_request_on_invalid_rev(DbName) ->
Db = open_db(DbName),
create_doc(Db, DocId),
[
- {InvalidRev1,
- ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev1, 1))},
- {InvalidRev2,
- ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev2, 1))},
- {InvalidRev3,
- ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev3, 1))}
+ {InvalidRev1, ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev1, 1))},
+ {InvalidRev2, ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev2, 1))},
+ {InvalidRev3, ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev3, 1))}
].
should_allow_access_in_doc_keys_test(_DbName) ->
Json = <<"{\"_id\":\"foo\",\"_access\":[\"test\"]}">>,
EJson = couch_util:json_decode(Json),
- Expected = {[{<<"_id">>,<<"foo">>}, {<<"_access">>, [<<"test">>]}]},
+ Expected = {[{<<"_id">>, <<"foo">>}, {<<"_access">>, [<<"test">>]}]},
EJson = Expected,
Doc = couch_doc:from_json_obj(EJson),
NewEJson = couch_doc:to_json_obj(Doc, []),
@@ -91,31 +85,33 @@ open_db(DbName) ->
{ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
Db.
-
create_doc(Db, DocId) ->
add_revision(Db, DocId, undefined).
-
open_doc_rev(Db0, DocId, Rev) ->
{ok, Db} = couch_db:reopen(Db0),
couch_db:open_doc_revs(Db, DocId, [couch_doc:parse_rev(Rev)], []).
-
add_revision(Db, DocId, undefined) ->
add_revision(Db, DocId, []);
add_revision(Db, DocId, Rev) when is_binary(Rev) ->
add_revision(Db, DocId, [{<<"_rev">>, Rev}]);
add_revision(Db0, DocId, Rev) ->
{ok, Db} = couch_db:reopen(Db0),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, DocId},
- {<<"value">>, DocId}
- ] ++ Rev}),
+ Doc = couch_doc:from_json_obj({
+ [
+ {<<"_id">>, DocId},
+ {<<"value">>, DocId}
+ ] ++ Rev
+ }),
{ok, NewRev} = couch_db:update_doc(Db, Doc, []),
couch_doc:rev_to_str(NewRev).
-
add_revisions(Db, DocId, Rev, N) ->
- lists:foldl(fun(_, OldRev) ->
- add_revision(Db, DocId, OldRev)
- end, Rev, lists:seq(1, N)).
+ lists:foldl(
+ fun(_, OldRev) ->
+ add_revision(Db, DocId, OldRev)
+ end,
+ Rev,
+ lists:seq(1, N)
+ ).
diff --git a/src/couch/test/eunit/couch_db_mpr_tests.erl b/src/couch/test/eunit/couch_db_mpr_tests.erl
index bb97c66d7..3a9577a0d 100644
--- a/src/couch/test/eunit/couch_db_mpr_tests.erl
+++ b/src/couch/test/eunit/couch_db_mpr_tests.erl
@@ -12,7 +12,6 @@
-module(couch_db_mpr_tests).
-
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
@@ -24,41 +23,35 @@
-define(CONTENT_JSON, {"Content-Type", "application/json"}).
-define(JSON_BODY, "{\"foo\": \"bar\"}").
-define(CONTENT_MULTI_RELATED,
- {"Content-Type", "multipart/related;boundary=\"bound\""}).
-
+ {"Content-Type", "multipart/related;boundary=\"bound\""}
+).
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
TmpDb = ?tempdb(),
Addr = config:get("httpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(couch_httpd, port),
Url = lists:concat(["http://", Addr, ":", Port, "/", ?b2l(TmpDb)]),
Url.
-
teardown(Url) ->
catch delete_db(Url),
- ok = config:delete("admins", ?USER, _Persist=false).
-
+ ok = config:delete("admins", ?USER, _Persist = false).
create_db(Url) ->
{ok, Status, _, _} = test_request:put(Url, [?CONTENT_JSON, ?AUTH], "{}"),
?assert(Status =:= 201 orelse Status =:= 202).
-
delete_db(Url) ->
{ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
-
create_doc(Url, Id, Body, Type) ->
test_request:put(Url ++ "/" ++ Id, [Type, ?AUTH], Body).
-
delete_doc(Url, Id, Rev) ->
test_request:delete(Url ++ "/" ++ Id ++ "?rev=" ++ ?b2l(Rev)).
-
couch_db_mpr_test_() ->
{
"multi-part attachment tests",
@@ -77,30 +70,29 @@ couch_db_mpr_test_() ->
}
}.
-
recreate_with_mpr(Url) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocId1 = "foo",
- DocId2 = "bar",
-
- create_db(Url),
- create_and_delete_doc(Url, DocId1),
- Rev1 = create_with_mpr(Url, DocId1),
- delete_db(Url),
-
- create_db(Url),
- create_and_delete_doc(Url, DocId1),
- % We create a second unrelated doc to change the
- % position on disk where the attachment is written
- % so that we can assert that the position on disk
- % is not included when calculating a revision.
- create_and_delete_doc(Url, DocId2),
- Rev2 = create_with_mpr(Url, DocId1),
- delete_db(Url),
-
- ?assertEqual(Rev1, Rev2)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ DocId1 = "foo",
+ DocId2 = "bar",
+
+ create_db(Url),
+ create_and_delete_doc(Url, DocId1),
+ Rev1 = create_with_mpr(Url, DocId1),
+ delete_db(Url),
+
+ create_db(Url),
+ create_and_delete_doc(Url, DocId1),
+ % We create a second unrelated doc to change the
+ % position on disk where the attachment is written
+ % so that we can assert that the position on disk
+ % is not included when calculating a revision.
+ create_and_delete_doc(Url, DocId2),
+ Rev2 = create_with_mpr(Url, DocId1),
+ delete_db(Url),
+
+ ?assertEqual(Rev1, Rev2)
+ end)}.
create_and_delete_doc(Url, DocId) ->
{ok, _, _, Resp} = create_doc(Url, DocId, ?JSON_BODY, ?CONTENT_JSON),
@@ -109,7 +101,6 @@ create_and_delete_doc(Url, DocId) ->
?assert(is_binary(Rev)),
{ok, _, _, _} = delete_doc(Url, DocId, Rev).
-
create_with_mpr(Url, DocId) ->
{ok, _, _, Resp} = create_doc(Url, DocId, mpr(), ?CONTENT_MULTI_RELATED),
{Props} = ?JSON_DECODE(Resp),
@@ -117,19 +108,18 @@ create_with_mpr(Url, DocId) ->
?assert(is_binary(Rev)),
Rev.
-
mpr() ->
lists:concat([
"--bound\r\n",
"Content-Type: application/json\r\n\r\n",
"{",
- "\"body\":\"stuff\","
- "\"_attachments\":",
- "{\"foo.txt\":{",
- "\"follows\":true,",
- "\"content_type\":\"text/plain\","
- "\"length\":21",
- "}}"
+ "\"body\":\"stuff\","
+ "\"_attachments\":",
+ "{\"foo.txt\":{",
+ "\"follows\":true,",
+ "\"content_type\":\"text/plain\","
+ "\"length\":21",
+ "}}"
"}",
"\r\n--bound\r\n\r\n",
"this is 21 chars long",
diff --git a/src/couch/test/eunit/couch_db_plugin_tests.erl b/src/couch/test/eunit/couch_db_plugin_tests.erl
index 93551adbc..bcfbffb05 100644
--- a/src/couch/test/eunit/couch_db_plugin_tests.erl
+++ b/src/couch/test/eunit/couch_db_plugin_tests.erl
@@ -21,7 +21,8 @@
on_delete/2
]).
--export([ %% couch_epi_plugin behaviour
+%% couch_epi_plugin behaviour
+-export([
app/0,
providers/0,
services/0,
@@ -58,12 +59,15 @@ validate_dbname({false, _Db}, _) -> {decided, false};
validate_dbname({fail, _Db}, _) -> throw(validate_dbname);
validate_dbname({pass, _Db}, _) -> no_decision.
-before_doc_update({fail, _Doc}, _Db, interactive_edit) -> throw(before_doc_update);
-before_doc_update({true, Doc}, Db, interactive_edit) -> [{true, [before_doc_update|Doc]}, Db, interactive_edit];
-before_doc_update({false, Doc}, Db, interactive_edit) -> [{false, Doc}, Db, interactive_edit].
+before_doc_update({fail, _Doc}, _Db, interactive_edit) ->
+ throw(before_doc_update);
+before_doc_update({true, Doc}, Db, interactive_edit) ->
+ [{true, [before_doc_update | Doc]}, Db, interactive_edit];
+before_doc_update({false, Doc}, Db, interactive_edit) ->
+ [{false, Doc}, Db, interactive_edit].
after_doc_read({fail, _Doc}, _Db) -> throw(after_doc_read);
-after_doc_read({true, Doc}, Db) -> [{true, [after_doc_read|Doc]}, Db];
+after_doc_read({true, Doc}, Db) -> [{true, [after_doc_read | Doc]}, Db];
after_doc_read({false, Doc}, Db) -> [{false, Doc}, Db].
validate_docid({true, _Id}) -> true;
@@ -82,7 +86,9 @@ callback_test_() ->
{
"callback tests",
{
- setup, fun setup/0, fun teardown/1,
+ setup,
+ fun setup/0,
+ fun teardown/1,
[
{"validate_dbname_match", fun validate_dbname_match/0},
{"validate_dbname_no_match", fun validate_dbname_no_match/0},
@@ -112,59 +118,77 @@ callback_test_() ->
}
}.
-
validate_dbname_match() ->
- ?assert(couch_db_plugin:validate_dbname(
- {true, [db]}, db, fun(_, _) -> pass end)).
+ ?assert(
+ couch_db_plugin:validate_dbname(
+ {true, [db]}, db, fun(_, _) -> pass end
+ )
+ ).
validate_dbname_no_match() ->
- ?assertNot(couch_db_plugin:validate_dbname(
- {false, [db]}, db, fun(_, _) -> pass end)).
+ ?assertNot(
+ couch_db_plugin:validate_dbname(
+ {false, [db]}, db, fun(_, _) -> pass end
+ )
+ ).
validate_dbname_throw() ->
?assertThrow(
validate_dbname,
couch_db_plugin:validate_dbname(
- {fail, [db]}, db, fun(_, _) -> pass end)).
+ {fail, [db]}, db, fun(_, _) -> pass end
+ )
+ ).
validate_dbname_pass() ->
- ?assertEqual(pass, couch_db_plugin:validate_dbname(
- {pass, [db]}, db, fun(_, _) -> pass end)).
+ ?assertEqual(
+ pass,
+ couch_db_plugin:validate_dbname(
+ {pass, [db]}, db, fun(_, _) -> pass end
+ )
+ ).
before_doc_update_match() ->
?assertMatch(
{true, [before_doc_update, doc]},
couch_db_plugin:before_doc_update(
- fake_db(), {true, [doc]}, interactive_edit)).
+ fake_db(), {true, [doc]}, interactive_edit
+ )
+ ).
before_doc_update_no_match() ->
?assertMatch(
{false, [doc]},
couch_db_plugin:before_doc_update(
- fake_db(), {false, [doc]}, interactive_edit)).
+ fake_db(), {false, [doc]}, interactive_edit
+ )
+ ).
before_doc_update_throw() ->
?assertThrow(
before_doc_update,
couch_db_plugin:before_doc_update(
- fake_db(), {fail, [doc]}, interactive_edit)).
-
+ fake_db(), {fail, [doc]}, interactive_edit
+ )
+ ).
after_doc_read_match() ->
?assertMatch(
{true, [after_doc_read, doc]},
- couch_db_plugin:after_doc_read(fake_db(), {true, [doc]})).
+ couch_db_plugin:after_doc_read(fake_db(), {true, [doc]})
+ ).
after_doc_read_no_match() ->
?assertMatch(
{false, [doc]},
- couch_db_plugin:after_doc_read(fake_db(), {false, [doc]})).
+ couch_db_plugin:after_doc_read(fake_db(), {false, [doc]})
+ ).
after_doc_read_throw() ->
?assertThrow(
after_doc_read,
- couch_db_plugin:after_doc_read(fake_db(), {fail, [doc]})).
-
+ couch_db_plugin:after_doc_read(fake_db(), {fail, [doc]})
+ ).
validate_docid_match() ->
?assert(couch_db_plugin:validate_docid({true, [doc]})).
@@ -175,8 +199,8 @@ validate_docid_no_match() ->
validate_docid_throw() ->
?assertThrow(
validate_docid,
- couch_db_plugin:validate_docid({fail, [doc]})).
-
+ couch_db_plugin:validate_docid({fail, [doc]})
+ ).
check_is_admin_match() ->
?assert(couch_db_plugin:check_is_admin({true, [db]})).
@@ -187,19 +211,23 @@ check_is_admin_no_match() ->
check_is_admin_throw() ->
?assertThrow(
check_is_admin,
- couch_db_plugin:check_is_admin({fail, [db]})).
+ couch_db_plugin:check_is_admin({fail, [db]})
+ ).
on_delete_match() ->
?assertMatch(
- [true],
- couch_db_plugin:on_delete(true, [])).
+ [true],
+ couch_db_plugin:on_delete(true, [])
+ ).
on_delete_no_match() ->
?assertMatch(
- [false],
- couch_db_plugin:on_delete(false, [])).
+ [false],
+ couch_db_plugin:on_delete(false, [])
+ ).
on_delete_throw() ->
?assertThrow(
on_delete,
- couch_db_plugin:on_delete(fail, [])).
+ couch_db_plugin:on_delete(fail, [])
+ ).
diff --git a/src/couch/test/eunit/couch_db_props_upgrade_tests.erl b/src/couch/test/eunit/couch_db_props_upgrade_tests.erl
index 40ad283cf..5ca658129 100644
--- a/src/couch/test/eunit/couch_db_props_upgrade_tests.erl
+++ b/src/couch/test/eunit/couch_db_props_upgrade_tests.erl
@@ -16,7 +16,6 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-
setup() ->
DbName = <<"test">>,
DbFileName = "test.couch",
@@ -30,12 +29,10 @@ setup() ->
DbName.
-
teardown(DbName) when is_binary(DbName) ->
couch_server:delete(DbName, [?ADMIN_CTX]),
ok.
-
old_db_info_test_() ->
{
"Old database versions work",
@@ -56,14 +53,12 @@ old_db_info_test_() ->
}
}.
-
can_get_props(DbName) ->
?_test(begin
{ok, Db} = couch_db:open_int(DbName, []),
Props = couch_db_engine:get_props(Db),
?assert(is_list(Props))
- end).
-
+ end).
can_get_db_info(DbName) ->
?_test(begin
@@ -71,8 +66,7 @@ can_get_db_info(DbName) ->
{ok, Info} = couch_db:get_db_info(Db),
Props = couch_util:get_value(props, Info),
?assertEqual({[]}, Props)
- end).
-
+ end).
can_compact_db(DbName) ->
?_test(begin
diff --git a/src/couch/test/eunit/couch_db_split_tests.erl b/src/couch/test/eunit/couch_db_split_tests.erl
index b52184a8c..f0baa920b 100644
--- a/src/couch/test/eunit/couch_db_split_tests.erl
+++ b/src/couch/test/eunit/couch_db_split_tests.erl
@@ -16,8 +16,8 @@
-include_lib("couch/include/couch_db.hrl").
-define(RINGTOP, 2 bsl 31).
--define(TIMEOUT, 60). % seconds
-
+% seconds
+-define(TIMEOUT, 60).
setup() ->
DbName = ?tempdb(),
@@ -25,14 +25,12 @@ setup() ->
ok = couch_db:close(Db),
DbName.
-
teardown(DbName) ->
{ok, Db} = couch_db:open_int(DbName, []),
FilePath = couch_db:get_filepath(Db),
ok = couch_db:close(Db),
ok = file:delete(FilePath).
-
split_test_() ->
Cases = [
{"Should split an empty shard", 0, 2},
@@ -42,16 +40,19 @@ split_test_() ->
],
{
setup,
- fun test_util:start_couch/0, fun test_util:stop/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop/1,
[
{
foreachx,
- fun(_) -> setup() end, fun(_, St) -> teardown(St) end,
+ fun(_) -> setup() end,
+ fun(_, St) -> teardown(St) end,
[{Case, fun should_split_shard/2} || Case <- Cases]
},
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_fail_on_missing_source/1,
fun should_fail_on_existing_target/1,
@@ -63,37 +64,43 @@ split_test_() ->
]
}.
-
should_split_shard({Desc, TotalDocs, Q}, DbName) ->
{ok, ExpectSeq} = create_docs(DbName, TotalDocs),
Ranges = make_ranges(Q),
TMap = make_targets(Ranges),
DocsPerRange = TotalDocs div Q,
PickFun = make_pickfun(DocsPerRange),
- {Desc, timeout, ?TIMEOUT, ?_test(begin
- {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun),
- ?assertEqual(ExpectSeq, UpdateSeq),
- maps:map(fun(Range, Name) ->
- {ok, Db} = couch_db:open_int(Name, []),
- FilePath = couch_db:get_filepath(Db),
- %% target actually exists
- ?assertMatch({ok, _}, file:read_file_info(FilePath)),
- %% target's update seq is the same as source's update seq
- USeq = couch_db:get_update_seq(Db),
- ?assertEqual(ExpectSeq, USeq),
- %% target shard has all the expected in its range docs
- {ok, DocsInShard} = couch_db:fold_docs(Db, fun(FDI, Acc) ->
- DocId = FDI#full_doc_info.id,
- ExpectedRange = PickFun(DocId, Ranges, undefined),
- ?assertEqual(ExpectedRange, Range),
- {ok, Acc + 1}
- end, 0),
- ?assertEqual(DocsPerRange, DocsInShard),
- ok = couch_db:close(Db),
- ok = file:delete(FilePath)
- end, TMap)
- end)}.
-
+ {Desc, timeout, ?TIMEOUT,
+ ?_test(begin
+ {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun),
+ ?assertEqual(ExpectSeq, UpdateSeq),
+ maps:map(
+ fun(Range, Name) ->
+ {ok, Db} = couch_db:open_int(Name, []),
+ FilePath = couch_db:get_filepath(Db),
+ %% target actually exists
+ ?assertMatch({ok, _}, file:read_file_info(FilePath)),
+ %% target's update seq is the same as source's update seq
+ USeq = couch_db:get_update_seq(Db),
+ ?assertEqual(ExpectSeq, USeq),
+ %% target shard has all the expected in its range docs
+ {ok, DocsInShard} = couch_db:fold_docs(
+ Db,
+ fun(FDI, Acc) ->
+ DocId = FDI#full_doc_info.id,
+ ExpectedRange = PickFun(DocId, Ranges, undefined),
+ ?assertEqual(ExpectedRange, Range),
+ {ok, Acc + 1}
+ end,
+ 0
+ ),
+ ?assertEqual(DocsPerRange, DocsInShard),
+ ok = couch_db:close(Db),
+ ok = file:delete(FilePath)
+ end,
+ TMap
+ )
+ end)}.
should_fail_on_missing_source(_DbName) ->
DbName = ?tempdb(),
@@ -102,59 +109,67 @@ should_fail_on_missing_source(_DbName) ->
Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3),
?_assertEqual({error, missing_source}, Response).
-
should_fail_on_existing_target(DbName) ->
Ranges = make_ranges(2),
- TMap = maps:map(fun(_, TName) ->
- % We create the target but make sure to remove it from the cache so we
- % hit the eexist error instaed of already_opened
- {ok, Db} = couch_db:create(TName, [?ADMIN_CTX]),
- Pid = couch_db:get_pid(Db),
- ok = couch_db:close(Db),
- exit(Pid, kill),
- test_util:wait(fun() ->
- case ets:lookup(couch_server:couch_dbs(DbName), TName) of
- [] -> ok;
- [_ | _] -> wait
- end
- end),
- TName
- end, make_targets(Ranges)),
+ TMap = maps:map(
+ fun(_, TName) ->
+ % We create the target but make sure to remove it from the cache so we
+ % hit the eexist error instaed of already_opened
+ {ok, Db} = couch_db:create(TName, [?ADMIN_CTX]),
+ Pid = couch_db:get_pid(Db),
+ ok = couch_db:close(Db),
+ exit(Pid, kill),
+ test_util:wait(fun() ->
+ case ets:lookup(couch_server:couch_dbs(DbName), TName) of
+ [] -> ok;
+ [_ | _] -> wait
+ end
+ end),
+ TName
+ end,
+ make_targets(Ranges)
+ ),
Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3),
?_assertMatch({error, {target_create_error, _, eexist}}, Response).
-
should_fail_on_invalid_target_name(DbName) ->
Ranges = make_ranges(2),
- TMap = maps:map(fun([B, _], _) ->
- iolist_to_binary(["_$", couch_util:to_hex(<<B:32/integer>>)])
- end, make_targets(Ranges)),
- Expect = {error, {target_create_error, <<"_$00000000">>,
- {illegal_database_name, <<"_$00000000">>}}},
+ TMap = maps:map(
+ fun([B, _], _) ->
+ iolist_to_binary(["_$", couch_util:to_hex(<<B:32/integer>>)])
+ end,
+ make_targets(Ranges)
+ ),
+ Expect =
+ {error, {target_create_error, <<"_$00000000">>, {illegal_database_name, <<"_$00000000">>}}},
Response = couch_db_split:split(DbName, TMap, fun fake_pickfun/3),
?_assertMatch(Expect, Response).
-
should_crash_on_invalid_tmap(DbName) ->
Ranges = make_ranges(1),
TMap = make_targets(Ranges),
- ?_assertError(function_clause,
- couch_db_split:split(DbName, TMap, fun fake_pickfun/3)).
-
+ ?_assertError(
+ function_clause,
+ couch_db_split:split(DbName, TMap, fun fake_pickfun/3)
+ ).
should_fail_on_opened_target(DbName) ->
Ranges = make_ranges(2),
- TMap = maps:map(fun(_, TName) ->
- % We create and keep the target open but delete
- % its file on disk so we don't fail with eexist
- {ok, Db} = couch_db:create(TName, [?ADMIN_CTX]),
- FilePath = couch_db:get_filepath(Db),
- ok = file:delete(FilePath),
- TName
- end, make_targets(Ranges)),
- ?_assertMatch({error, {target_create_error, _, already_opened}},
- couch_db_split:split(DbName, TMap, fun fake_pickfun/3)).
-
+ TMap = maps:map(
+ fun(_, TName) ->
+ % We create and keep the target open but delete
+ % its file on disk so we don't fail with eexist
+ {ok, Db} = couch_db:create(TName, [?ADMIN_CTX]),
+ FilePath = couch_db:get_filepath(Db),
+ ok = file:delete(FilePath),
+ TName
+ end,
+ make_targets(Ranges)
+ ),
+ ?_assertMatch(
+ {error, {target_create_error, _, already_opened}},
+ couch_db_split:split(DbName, TMap, fun fake_pickfun/3)
+ ).
copy_local_docs_test_() ->
Cases = [
@@ -165,46 +180,55 @@ copy_local_docs_test_() ->
],
{
setup,
- fun test_util:start_couch/0, fun test_util:stop/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop/1,
[
{
foreachx,
- fun(_) -> setup() end, fun(_, St) -> teardown(St) end,
+ fun(_) -> setup() end,
+ fun(_, St) -> teardown(St) end,
[{Case, fun should_copy_local_docs/2} || Case <- Cases]
},
{"Should return error on missing source",
- fun should_fail_copy_local_on_missing_source/0}
+ fun should_fail_copy_local_on_missing_source/0}
]
}.
-
should_copy_local_docs({Desc, TotalDocs, Q}, DbName) ->
{ok, ExpectSeq} = create_docs(DbName, TotalDocs),
Ranges = make_ranges(Q),
TMap = make_targets(Ranges),
DocsPerRange = TotalDocs div Q,
PickFun = make_pickfun(DocsPerRange),
- {Desc, timeout, ?TIMEOUT, ?_test(begin
- {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun),
- ?assertEqual(ExpectSeq, UpdateSeq),
- Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun),
- ?assertEqual(ok, Response),
- maps:map(fun(Range, Name) ->
- {ok, Db} = couch_db:open_int(Name, []),
- FilePath = couch_db:get_filepath(Db),
- %% target shard has all the expected in its range docs
- {ok, DocsInShard} = couch_db:fold_local_docs(Db, fun(Doc, Acc) ->
- DocId = Doc#doc.id,
- ExpectedRange = PickFun(DocId, Ranges, undefined),
- ?assertEqual(ExpectedRange, Range),
- {ok, Acc + 1}
- end, 0, []),
- ?assertEqual(DocsPerRange, DocsInShard),
- ok = couch_db:close(Db),
- ok = file:delete(FilePath)
- end, TMap)
- end)}.
-
+ {Desc, timeout, ?TIMEOUT,
+ ?_test(begin
+ {ok, UpdateSeq} = couch_db_split:split(DbName, TMap, PickFun),
+ ?assertEqual(ExpectSeq, UpdateSeq),
+ Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun),
+ ?assertEqual(ok, Response),
+ maps:map(
+ fun(Range, Name) ->
+ {ok, Db} = couch_db:open_int(Name, []),
+ FilePath = couch_db:get_filepath(Db),
+ %% target shard has all the expected in its range docs
+ {ok, DocsInShard} = couch_db:fold_local_docs(
+ Db,
+ fun(Doc, Acc) ->
+ DocId = Doc#doc.id,
+ ExpectedRange = PickFun(DocId, Ranges, undefined),
+ ?assertEqual(ExpectedRange, Range),
+ {ok, Acc + 1}
+ end,
+ 0,
+ []
+ ),
+ ?assertEqual(DocsPerRange, DocsInShard),
+ ok = couch_db:close(Db),
+ ok = file:delete(FilePath)
+ end,
+ TMap
+ )
+ end)}.
should_fail_copy_local_on_missing_source() ->
DbName = ?tempdb(),
@@ -214,23 +238,23 @@ should_fail_copy_local_on_missing_source() ->
Response = couch_db_split:copy_local_docs(DbName, TMap, PickFun),
?assertEqual({error, missing_source}, Response).
-
cleanup_target_test_() ->
{
setup,
- fun test_util:start_couch/0, fun test_util:stop/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop/1,
[
{
setup,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
fun should_delete_existing_targets/1
},
{"Should return error on missing source",
- fun should_fail_cleanup_target_on_missing_source/0}
+ fun should_fail_cleanup_target_on_missing_source/0}
]
}.
-
should_delete_existing_targets(SourceName) ->
{ok, ExpectSeq} = create_docs(SourceName, 100),
Ranges = make_ranges(2),
@@ -239,25 +263,26 @@ should_delete_existing_targets(SourceName) ->
?_test(begin
{ok, UpdateSeq} = couch_db_split:split(SourceName, TMap, PickFun),
?assertEqual(ExpectSeq, UpdateSeq),
- maps:map(fun(_Range, TargetName) ->
- FilePath = couch_util:with_db(TargetName, fun(Db) ->
- couch_db:get_filepath(Db)
- end),
- ?assertMatch({ok, _}, file:read_file_info(FilePath)),
- Response = couch_db_split:cleanup_target(SourceName, TargetName),
- ?assertEqual(ok, Response),
- ?assertEqual({error, enoent}, file:read_file_info(FilePath))
- end, TMap)
+ maps:map(
+ fun(_Range, TargetName) ->
+ FilePath = couch_util:with_db(TargetName, fun(Db) ->
+ couch_db:get_filepath(Db)
+ end),
+ ?assertMatch({ok, _}, file:read_file_info(FilePath)),
+ Response = couch_db_split:cleanup_target(SourceName, TargetName),
+ ?assertEqual(ok, Response),
+ ?assertEqual({error, enoent}, file:read_file_info(FilePath))
+ end,
+ TMap
+ )
end).
-
should_fail_cleanup_target_on_missing_source() ->
SourceName = ?tempdb(),
TargetName = ?tempdb(),
Response = couch_db_split:cleanup_target(SourceName, TargetName),
?assertEqual({error, missing_source}, Response).
-
make_pickfun(DocsPerRange) ->
fun(DocId, Ranges, _HashFun) ->
Id = docid_to_integer(DocId),
@@ -269,41 +294,47 @@ make_pickfun(DocsPerRange) ->
end
end.
-
fake_pickfun(_, Ranges, _) ->
hd(Ranges).
-
make_targets([]) ->
maps:new();
-make_targets(Ranges) ->
- Targets = lists:map(fun(Range) ->
- {Range, ?tempdb()}
- end, Ranges),
+make_targets(Ranges) ->
+ Targets = lists:map(
+ fun(Range) ->
+ {Range, ?tempdb()}
+ end,
+ Ranges
+ ),
maps:from_list(Targets).
-
make_ranges(Q) when Q > 0 ->
Incr = (2 bsl 31) div Q,
- lists:map(fun
- (End) when End >= ?RINGTOP - 1 ->
- [End - Incr, ?RINGTOP - 1];
- (End) ->
- [End - Incr, End - 1]
- end, lists:seq(Incr, ?RINGTOP, Incr));
+ lists:map(
+ fun
+ (End) when End >= ?RINGTOP - 1 ->
+ [End - Incr, ?RINGTOP - 1];
+ (End) ->
+ [End - Incr, End - 1]
+ end,
+ lists:seq(Incr, ?RINGTOP, Incr)
+ );
make_ranges(_) ->
[].
-
create_docs(DbName, 0) ->
couch_util:with_db(DbName, fun(Db) ->
UpdateSeq = couch_db:get_update_seq(Db),
{ok, UpdateSeq}
end);
create_docs(DbName, DocNum) ->
- Docs = lists:foldl(fun(I, Acc) ->
- [create_doc(I), create_local_doc(I) | Acc]
- end, [], lists:seq(DocNum, 1, -1)),
+ Docs = lists:foldl(
+ fun(I, Acc) ->
+ [create_doc(I), create_local_doc(I) | Acc]
+ end,
+ [],
+ lists:seq(DocNum, 1, -1)
+ ),
couch_util:with_db(DbName, fun(Db) ->
{ok, _Result} = couch_db:update_docs(Db, Docs),
{ok, Db1} = couch_db:reopen(Db),
@@ -311,20 +342,16 @@ create_docs(DbName, DocNum) ->
{ok, UpdateSeq}
end).
-
create_doc(I) ->
create_prefix_id_doc(I, "").
-
create_local_doc(I) ->
create_prefix_id_doc(I, "_local/").
-
create_prefix_id_doc(I, Prefix) ->
Id = iolist_to_binary(io_lib:format(Prefix ++ "~3..0B", [I])),
couch_doc:from_json_obj({[{<<"_id">>, Id}, {<<"value">>, I}]}).
-
docid_to_integer(<<"_local/", DocId/binary>>) ->
docid_to_integer(DocId);
docid_to_integer(DocId) ->
diff --git a/src/couch/test/eunit/couch_db_tests.erl b/src/couch/test/eunit/couch_db_tests.erl
index d52a15597..82137dc40 100644
--- a/src/couch/test/eunit/couch_db_tests.erl
+++ b/src/couch/test/eunit/couch_db_tests.erl
@@ -16,14 +16,13 @@
-define(TIMEOUT, 120).
-
-
-create_delete_db_test_()->
+create_delete_db_test_() ->
{
"Database create/delete tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
fun() -> ?tempdb() end,
@@ -35,12 +34,13 @@ create_delete_db_test_()->
}
}.
-create_delete_multiple_dbs_test_()->
+create_delete_multiple_dbs_test_() ->
{
"Multiple database create/delete tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
fun() -> [?tempdb() || _ <- lists:seq(1, 6)] end,
@@ -57,7 +57,8 @@ create_delete_database_continuously_test_() ->
"Continious database create/delete tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreachx,
fun(_) -> ?tempdb() end,
@@ -69,12 +70,13 @@ create_delete_database_continuously_test_() ->
}
}.
-open_db_test_()->
+open_db_test_() ->
{
"Database open tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
fun() -> ?tempdb() end,
@@ -87,7 +89,6 @@ open_db_test_()->
}
}.
-
should_create_db(DbName) ->
?_test(begin
{ok, Before} = couch_server:all_databases(),
@@ -109,10 +110,12 @@ should_delete_db(DbName) ->
should_create_multiple_dbs(DbNames) ->
?_test(begin
- [gen_server:call(couch_server:couch_server(N), {set_max_dbs_open, 3}) ||
- N <- lists:seq(1, couch_server:num_servers())],
+ [
+ gen_server:call(couch_server:couch_server(N), {set_max_dbs_open, 3})
+ || N <- lists:seq(1, couch_server:num_servers())
+ ],
{ok, Before} = couch_server:all_databases(),
- [?assertNot(lists:member(DbName, Before)) || DbName <- DbNames],
+ [?assertNot(lists:member(DbName, Before)) || DbName <- DbNames],
[?assert(create_db(DbName)) || DbName <- DbNames],
{ok, After} = couch_server:all_databases(),
[?assert(lists:member(DbName, After)) || DbName <- DbNames]
@@ -122,21 +125,27 @@ should_delete_multiple_dbs(DbNames) ->
?_test(begin
[?assert(create_db(DbName)) || DbName <- DbNames],
{ok, Before} = couch_server:all_databases(),
- [?assert(lists:member(DbName, Before)) || DbName <- DbNames],
+ [?assert(lists:member(DbName, Before)) || DbName <- DbNames],
[?assert(delete_db(DbName)) || DbName <- DbNames],
{ok, After} = couch_server:all_databases(),
[?assertNot(lists:member(DbName, After)) || DbName <- DbNames]
end).
should_create_delete_database_continuously(Times, DbName) ->
- {lists:flatten(io_lib:format("~b times", [Times])),
- {timeout, ?TIMEOUT, ?_test(begin
- ?assert(create_db(DbName)),
- lists:foreach(fun(_) ->
- ?assert(delete_db(DbName)),
- ?assert(create_db(DbName))
- end, lists:seq(1, Times))
- end)}}.
+ {
+ lists:flatten(io_lib:format("~b times", [Times])),
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ ?assert(create_db(DbName)),
+ lists:foreach(
+ fun(_) ->
+ ?assert(delete_db(DbName)),
+ ?assert(create_db(DbName))
+ end,
+ lists:seq(1, Times)
+ )
+ end)}
+ }.
should_create_db_if_missing(DbName) ->
?_test(begin
@@ -165,8 +174,10 @@ locking_should_work(DbName) ->
?assertEqual({error, {locked, <<"x">>}}, couch_db:create(DbName, [])),
?assertEqual(ok, couch_server:unlock(DbName)),
{ok, Db} = couch_db:create(DbName, []),
- ?assertEqual({error, already_opened},
- couch_server:lock(DbName, <<>>)),
+ ?assertEqual(
+ {error, already_opened},
+ couch_server:lock(DbName, <<>>)
+ ),
ok = couch_db:close(Db),
catch exit(couch_db:get_pid(Db), kill),
@@ -175,11 +186,13 @@ locking_should_work(DbName) ->
[] -> ok;
[_ | _] -> wait
end
- end),
+ end),
?assertEqual(ok, couch_server:lock(DbName, <<"y">>)),
- ?assertEqual({error, {locked, <<"y">>}},
- couch_db:open(DbName, [])),
+ ?assertEqual(
+ {error, {locked, <<"y">>}},
+ couch_db:open(DbName, [])
+ ),
couch_server:unlock(DbName),
{ok, Db1} = couch_db:open(DbName, [{create_if_missing, true}]),
diff --git a/src/couch/test/eunit/couch_doc_json_tests.erl b/src/couch/test/eunit/couch_doc_json_tests.erl
index a68932eca..a004ed8fd 100644
--- a/src/couch/test/eunit/couch_doc_json_tests.erl
+++ b/src/couch/test/eunit/couch_doc_json_tests.erl
@@ -15,7 +15,6 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
setup() ->
mock(couch_log),
mock(config),
@@ -38,27 +37,32 @@ mock(couch_log) ->
ok;
mock(config) ->
meck:new(config, [passthrough]),
- meck:expect(config, get_integer,
- fun("couchdb", "max_document_size", 8000000) -> 1024 end),
+ meck:expect(
+ config,
+ get_integer,
+ fun("couchdb", "max_document_size", 8000000) -> 1024 end
+ ),
meck:expect(config, get, fun(_, _) -> undefined end),
meck:expect(config, get, fun(_, _, Default) -> Default end),
ok.
-
json_doc_test_() ->
{
setup,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
fun(_) ->
- [{"Document from JSON", [
- from_json_with_dbname_error_cases(),
- from_json_with_db_name_success_cases(),
- from_json_success_cases(),
- from_json_error_cases()
- ]},
- {"Document to JSON", [
- to_json_success_cases()
- ]}]
+ [
+ {"Document from JSON", [
+ from_json_with_dbname_error_cases(),
+ from_json_with_db_name_success_cases(),
+ from_json_success_cases(),
+ from_json_error_cases()
+ ]},
+ {"Document to JSON", [
+ to_json_success_cases()
+ ]}
+ ]
end
}.
@@ -95,35 +99,42 @@ from_json_success_cases() ->
"Non underscore prefixed fields stored in body."
},
{
- {[{<<"_attachments">>, {[
- {<<"my_attachment.fu">>, {[
- {<<"stub">>, true},
- {<<"content_type">>, <<"application/awesome">>},
- {<<"length">>, 45}
- ]}},
- {<<"noahs_private_key.gpg">>, {[
- {<<"data">>, <<"SSBoYXZlIGEgcGV0IGZpc2gh">>},
- {<<"content_type">>, <<"application/pgp-signature">>}
- ]}}
- ]}}]},
- #doc{atts = [
- couch_att:new([
- {name, <<"my_attachment.fu">>},
- {data, stub},
- {type, <<"application/awesome">>},
- {att_len, 45},
- {disk_len, 45},
- {revpos, undefined}
- ]),
- couch_att:new([
- {name, <<"noahs_private_key.gpg">>},
- {data, <<"I have a pet fish!">>},
- {type, <<"application/pgp-signature">>},
- {att_len, 18},
- {disk_len, 18},
- {revpos, 0}
- ])
+ {[
+ {<<"_attachments">>,
+ {[
+ {<<"my_attachment.fu">>,
+ {[
+ {<<"stub">>, true},
+ {<<"content_type">>, <<"application/awesome">>},
+ {<<"length">>, 45}
+ ]}},
+ {<<"noahs_private_key.gpg">>,
+ {[
+ {<<"data">>, <<"SSBoYXZlIGEgcGV0IGZpc2gh">>},
+ {<<"content_type">>, <<"application/pgp-signature">>}
+ ]}}
+ ]}}
]},
+ #doc{
+ atts = [
+ couch_att:new([
+ {name, <<"my_attachment.fu">>},
+ {data, stub},
+ {type, <<"application/awesome">>},
+ {att_len, 45},
+ {disk_len, 45},
+ {revpos, undefined}
+ ]),
+ couch_att:new([
+ {name, <<"noahs_private_key.gpg">>},
+ {data, <<"I have a pet fish!">>},
+ {type, <<"application/pgp-signature">>},
+ {att_len, 18},
+ {disk_len, 18},
+ {revpos, 0}
+ ])
+ ]
+ },
"Attachments are parsed correctly."
},
{
@@ -138,11 +149,13 @@ from_json_success_cases() ->
},
{
{[
- {<<"_revisions">>,
- {[{<<"start">>, 4},
- {<<"ids">>, [<<"foo1">>, <<"phi3">>, <<"omega">>]}]}},
- {<<"_rev">>, <<"6-something">>}
- ]},
+ {<<"_revisions">>,
+ {[
+ {<<"start">>, 4},
+ {<<"ids">>, [<<"foo1">>, <<"phi3">>, <<"omega">>]}
+ ]}},
+ {<<"_rev">>, <<"6-something">>}
+ ]},
#doc{revs = {4, [<<"foo1">>, <<"phi3">>, <<"omega">>]}},
"_revisions attribute are preferred to _rev."
},
@@ -171,7 +184,8 @@ from_json_success_cases() ->
fun({EJson, Expect, Msg}) ->
{Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson))}
end,
- Cases).
+ Cases
+ ).
from_json_with_db_name_success_cases() ->
Cases = [
@@ -210,7 +224,8 @@ from_json_with_db_name_success_cases() ->
fun({EJson, DbName, Expect, Msg}) ->
{Msg, ?_assertMatch(Expect, couch_doc:from_json_obj_validate(EJson, DbName))}
end,
- Cases).
+ Cases
+ ).
from_json_error_cases() ->
Cases = [
@@ -236,8 +251,7 @@ from_json_error_cases() ->
},
{
{[{<<"_id">>, <<"_random">>}]},
- {illegal_docid,
- <<"Only reserved document ids may start with underscore.">>},
+ {illegal_docid, <<"Only reserved document ids may start with underscore.">>},
"Disallow arbitrary underscore prefixed docids."
},
{
@@ -270,8 +284,13 @@ from_json_error_cases() ->
"Revision ids must be strings."
},
{
- {[{<<"_revisions">>, {[{<<"start">>, 0},
- {<<"ids">>, [<<"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">>]}]}}]},
+ {[
+ {<<"_revisions">>,
+ {[
+ {<<"start">>, 0},
+ {<<"ids">>, [<<"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">>]}
+ ]}}
+ ]},
{doc_validation, "RevId isn't a valid hexadecimal"},
"Revision ids must be a valid hex."
},
@@ -284,7 +303,7 @@ from_json_error_cases() ->
fun() ->
{[
{<<"_id">>, <<"large_doc">>},
- {<<"x">> , << <<"x">> || _ <- lists:seq(1,1025) >>}
+ {<<"x">>, <<<<"x">> || _ <- lists:seq(1, 1025)>>}
]}
end,
{request_entity_too_large, <<"large_doc">>},
@@ -292,39 +311,36 @@ from_json_error_cases() ->
}
],
- lists:map(fun
- ({Fun, Expect, Msg}) when is_function(Fun, 0) ->
- {Msg,
- ?_assertThrow(Expect, couch_doc:from_json_obj_validate(Fun()))};
- ({EJson, Expect, Msg}) ->
- {Msg,
- ?_assertThrow(Expect, couch_doc:from_json_obj_validate(EJson))};
- ({EJson, Msg}) ->
- {Msg,
- ?_assertThrow(_, couch_doc:from_json_obj_validate(EJson))}
- end, Cases).
+ lists:map(
+ fun
+ ({Fun, Expect, Msg}) when is_function(Fun, 0) ->
+ {Msg, ?_assertThrow(Expect, couch_doc:from_json_obj_validate(Fun()))};
+ ({EJson, Expect, Msg}) ->
+ {Msg, ?_assertThrow(Expect, couch_doc:from_json_obj_validate(EJson))};
+ ({EJson, Msg}) ->
+ {Msg, ?_assertThrow(_, couch_doc:from_json_obj_validate(EJson))}
+ end,
+ Cases
+ ).
from_json_with_dbname_error_cases() ->
Cases = [
{
{[{<<"_id">>, <<"_random">>}]},
<<"_dbs">>,
- {illegal_docid,
- <<"Only reserved document ids may start with underscore.">>},
+ {illegal_docid, <<"Only reserved document ids may start with underscore.">>},
"Disallow non-system-DB underscore prefixed docids in _dbs database."
},
{
{[{<<"_id">>, <<"_random">>}]},
<<"foobar">>,
- {illegal_docid,
- <<"Only reserved document ids may start with underscore.">>},
+ {illegal_docid, <<"Only reserved document ids may start with underscore.">>},
"Disallow arbitrary underscore prefixed docids in regular database."
},
{
{[{<<"_id">>, <<"_users">>}]},
<<"foobar">>,
- {illegal_docid,
- <<"Only reserved document ids may start with underscore.">>},
+ {illegal_docid, <<"Only reserved document ids may start with underscore.">>},
"Disallow system-DB docid _users in regular database."
}
],
@@ -334,7 +350,8 @@ from_json_with_dbname_error_cases() ->
Error = (catch couch_doc:from_json_obj_validate(EJson, DbName)),
{Msg, ?_assertMatch(Expect, Error)}
end,
- Cases).
+ Cases
+ ).
to_json_success_cases() ->
Cases = [
@@ -357,13 +374,14 @@ to_json_success_cases() ->
[revs],
#doc{revs = {5, [<<"first">>, <<"second">>]}},
{[
- {<<"_id">>, <<>>},
- {<<"_rev">>, <<"5-first">>},
- {<<"_revisions">>, {[
- {<<"start">>, 5},
- {<<"ids">>, [<<"first">>, <<"second">>]}
- ]}}
- ]},
+ {<<"_id">>, <<>>},
+ {<<"_rev">>, <<"5-first">>},
+ {<<"_revisions">>,
+ {[
+ {<<"start">>, 5},
+ {<<"ids">>, [<<"first">>, <<"second">>]}
+ ]}}
+ ]},
"_revisions include with revs option"
},
{
@@ -377,16 +395,18 @@ to_json_success_cases() ->
"Deleted docs no longer drop body members."
},
{
- #doc{meta = [
- {revs_info, 4, [{<<"fin">>, deleted}, {<<"zim">>, missing}]}
- ]},
+ #doc{
+ meta = [
+ {revs_info, 4, [{<<"fin">>, deleted}, {<<"zim">>, missing}]}
+ ]
+ },
{[
- {<<"_id">>, <<>>},
- {<<"_revs_info">>, [
- {[{<<"rev">>, <<"4-fin">>}, {<<"status">>, <<"deleted">>}]},
- {[{<<"rev">>, <<"3-zim">>}, {<<"status">>, <<"missing">>}]}
- ]}
- ]},
+ {<<"_id">>, <<>>},
+ {<<"_revs_info">>, [
+ {[{<<"rev">>, <<"4-fin">>}, {<<"status">>, <<"deleted">>}]},
+ {[{<<"rev">>, <<"3-zim">>}, {<<"status">>, <<"missing">>}]}
+ ]}
+ ]},
"_revs_info field is added correctly."
},
{
@@ -405,89 +425,102 @@ to_json_success_cases() ->
{
#doc{meta = [{deleted_conflicts, [{10923, <<"big_cowboy_hat">>}]}]},
{[
- {<<"_id">>, <<>>},
- {<<"_deleted_conflicts">>, [<<"10923-big_cowboy_hat">>]}
- ]},
+ {<<"_id">>, <<>>},
+ {<<"_deleted_conflicts">>, [<<"10923-big_cowboy_hat">>]}
+ ]},
"_deleted_conflicsts is added as an array of strings."
},
{
- #doc{atts = [
- couch_att:new([
- {name, <<"big.xml">>},
- {type, <<"xml/sucks">>},
- {data, fun() -> ok end},
- {revpos, 1},
- {att_len, 400},
- {disk_len, 400}
- ]),
- couch_att:new([
- {name, <<"fast.json">>},
- {type, <<"json/ftw">>},
- {data, <<"{\"so\": \"there!\"}">>},
- {revpos, 1},
- {att_len, 16},
- {disk_len, 16}
- ])
- ]},
+ #doc{
+ atts = [
+ couch_att:new([
+ {name, <<"big.xml">>},
+ {type, <<"xml/sucks">>},
+ {data, fun() -> ok end},
+ {revpos, 1},
+ {att_len, 400},
+ {disk_len, 400}
+ ]),
+ couch_att:new([
+ {name, <<"fast.json">>},
+ {type, <<"json/ftw">>},
+ {data, <<"{\"so\": \"there!\"}">>},
+ {revpos, 1},
+ {att_len, 16},
+ {disk_len, 16}
+ ])
+ ]
+ },
{[
- {<<"_id">>, <<>>},
- {<<"_attachments">>, {[
- {<<"big.xml">>, {[
- {<<"content_type">>, <<"xml/sucks">>},
- {<<"revpos">>, 1},
- {<<"length">>, 400},
- {<<"stub">>, true}
- ]}},
- {<<"fast.json">>, {[
- {<<"content_type">>, <<"json/ftw">>},
- {<<"revpos">>, 1},
- {<<"length">>, 16},
- {<<"stub">>, true}
- ]}}
- ]}}
+ {<<"_id">>, <<>>},
+ {<<"_attachments">>,
+ {[
+ {<<"big.xml">>,
+ {[
+ {<<"content_type">>, <<"xml/sucks">>},
+ {<<"revpos">>, 1},
+ {<<"length">>, 400},
+ {<<"stub">>, true}
+ ]}},
+ {<<"fast.json">>,
+ {[
+ {<<"content_type">>, <<"json/ftw">>},
+ {<<"revpos">>, 1},
+ {<<"length">>, 16},
+ {<<"stub">>, true}
+ ]}}
+ ]}}
]},
"Attachments attached as stubs only include a length."
},
{
[attachments],
- #doc{atts = [
- couch_att:new([
- {name, <<"stuff.txt">>},
- {type, <<"text/plain">>},
- {data, fun() -> <<"diet pepsi">> end},
- {revpos, 1},
- {att_len, 10},
- {disk_len, 10}
- ]),
- couch_att:new([
- {name, <<"food.now">>},
- {type, <<"application/food">>},
- {revpos, 1},
- {data, <<"sammich">>}
- ])
- ]},
+ #doc{
+ atts = [
+ couch_att:new([
+ {name, <<"stuff.txt">>},
+ {type, <<"text/plain">>},
+ {data, fun() -> <<"diet pepsi">> end},
+ {revpos, 1},
+ {att_len, 10},
+ {disk_len, 10}
+ ]),
+ couch_att:new([
+ {name, <<"food.now">>},
+ {type, <<"application/food">>},
+ {revpos, 1},
+ {data, <<"sammich">>}
+ ])
+ ]
+ },
{[
{<<"_id">>, <<>>},
- {<<"_attachments">>, {[
- {<<"stuff.txt">>, {[
- {<<"content_type">>, <<"text/plain">>},
- {<<"revpos">>, 1},
- {<<"data">>, <<"ZGlldCBwZXBzaQ==">>}
- ]}},
- {<<"food.now">>, {[
- {<<"content_type">>, <<"application/food">>},
- {<<"revpos">>, 1},
- {<<"data">>, <<"c2FtbWljaA==">>}
- ]}}
- ]}}
+ {<<"_attachments">>,
+ {[
+ {<<"stuff.txt">>,
+ {[
+ {<<"content_type">>, <<"text/plain">>},
+ {<<"revpos">>, 1},
+ {<<"data">>, <<"ZGlldCBwZXBzaQ==">>}
+ ]}},
+ {<<"food.now">>,
+ {[
+ {<<"content_type">>, <<"application/food">>},
+ {<<"revpos">>, 1},
+ {<<"data">>, <<"c2FtbWljaA==">>}
+ ]}}
+ ]}}
]},
"Attachments included inline with attachments option."
}
],
- lists:map(fun
- ({Doc, EJson, Msg}) ->
- {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, []))};
- ({Options, Doc, EJson, Msg}) ->
- {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, Options))}
- end, Cases).
+ lists:map(
+ fun
+ ({Doc, EJson, Msg}) ->
+ {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, []))};
+ ({Options, Doc, EJson, Msg}) ->
+ {Msg, ?_assertMatch(EJson, couch_doc:to_json_obj(Doc, Options))}
+ end,
+ Cases
+ ).
diff --git a/src/couch/test/eunit/couch_doc_tests.erl b/src/couch/test/eunit/couch_doc_tests.erl
index fc63d3f30..5a6e11ab2 100644
--- a/src/couch/test/eunit/couch_doc_tests.erl
+++ b/src/couch/test/eunit/couch_doc_tests.erl
@@ -15,9 +15,9 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-define(REQUEST_FIXTURE,
- filename:join([?FIXTURESDIR, "multipart.http"])).
+ filename:join([?FIXTURESDIR, "multipart.http"])
+).
parse_rev_test() ->
?assertEqual({1, <<"123">>}, couch_doc:parse_rev("1-123")),
@@ -40,24 +40,30 @@ doc_to_multi_part_stream_test() ->
JsonBytes = <<"{\n \"_id\": \"our document goes here\"\n}\n\n">>,
AttData = <<"Hello my important document">>,
AttLength = size(AttData),
- Atts = [couch_att:new([
- {name, <<"test">>}, {data, AttData}, {type, <<"text/plain">>},
- {att_len, AttLength}, {disk_len, AttLength}])],
+ Atts = [
+ couch_att:new([
+ {name, <<"test">>},
+ {data, AttData},
+ {type, <<"text/plain">>},
+ {att_len, AttLength},
+ {disk_len, AttLength}
+ ])
+ ],
couch_doc:doc_to_multi_part_stream(Boundary, JsonBytes, Atts, fun send/1, true),
AttLengthStr = integer_to_binary(AttLength),
BoundaryLen = size(Boundary),
[
- <<"--", Boundary/binary>>,
- <<"Content-Type: application/json">>,
- <<>>,
- JsonBytes,
- <<"--", Boundary/binary>>,
- <<"Content-Disposition: attachment; filename=\"test\"">>,
- <<"Content-Type: text/plain">>,
- <<"Content-Length: ", AttLengthStr/binary>>,
- <<>>,
- AttData,
- <<"--", Boundary:BoundaryLen/binary, "--">>
+ <<"--", Boundary/binary>>,
+ <<"Content-Type: application/json">>,
+ <<>>,
+ JsonBytes,
+ <<"--", Boundary/binary>>,
+ <<"Content-Disposition: attachment; filename=\"test\"">>,
+ <<"Content-Type: text/plain">>,
+ <<"Content-Length: ", AttLengthStr/binary>>,
+ <<>>,
+ AttData,
+ <<"--", Boundary:BoundaryLen/binary, "--">>
] = collected(),
ok.
@@ -67,10 +73,17 @@ len_doc_to_multi_part_stream_test() ->
ContentType = <<"multipart/related; boundary=\"", Boundary/binary, "\"">>,
AttData = <<"Hello my important document">>,
AttLength = size(AttData),
- Atts = [couch_att:new([
- {name, <<"test">>}, {data, AttData}, {type, <<"text/plain">>},
- {att_len, AttLength}, {disk_len, AttLength}])],
- {ContentType, 258} = %% 258 is expected size of the document
+ Atts = [
+ couch_att:new([
+ {name, <<"test">>},
+ {data, AttData},
+ {type, <<"text/plain">>},
+ {att_len, AttLength},
+ {disk_len, AttLength}
+ ])
+ ],
+ %% 258 is expected size of the document
+ {ContentType, 258} =
couch_doc:len_doc_to_multi_part_stream(Boundary, JsonBytes, Atts, true),
ok.
@@ -93,29 +106,46 @@ validate_docid_test_() ->
?_assertEqual(ok, couch_doc:validate_docid(<<"_users">>, <<"_dbs">>)),
?_assertEqual(ok, couch_doc:validate_docid(<<"_replicator">>, <<"_dbs">>)),
?_assertEqual(ok, couch_doc:validate_docid(<<"_global_changes">>, <<"_dbs">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<>>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<16#80>>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_idx">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_design/">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_local/">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(large_id(1025))),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_users">>, <<"foo">>)),
- ?_assertThrow({illegal_docid, _},
- couch_doc:validate_docid(<<"_weeee">>, <<"_dbs">>))
- ]
- }.
+ ?_assertThrow(
+ {illegal_docid, _},
+ couch_doc:validate_docid(<<>>)
+ ),
+ ?_assertThrow(
+ {illegal_docid, _},
+ couch_doc:validate_docid(<<16#80>>)
+ ),
+ ?_assertThrow(
+ {illegal_docid, _},
+ couch_doc:validate_docid(<<"_idx">>)
+ ),
+ ?_assertThrow(
+ {illegal_docid, _},
+ couch_doc:validate_docid(<<"_">>)
+ ),
+ ?_assertThrow(
+ {illegal_docid, _},
+ couch_doc:validate_docid(<<"_design/">>)
+ ),
+ ?_assertThrow(
+ {illegal_docid, _},
+ couch_doc:validate_docid(<<"_local/">>)
+ ),
+ ?_assertThrow(
+ {illegal_docid, _},
+ couch_doc:validate_docid(large_id(1025))
+ ),
+ ?_assertThrow(
+ {illegal_docid, _},
+ couch_doc:validate_docid(<<"_users">>, <<"foo">>)
+ ),
+ ?_assertThrow(
+ {illegal_docid, _},
+ couch_doc:validate_docid(<<"_weeee">>, <<"_dbs">>)
+ )
+ ]}.
large_id(N) ->
- << <<"x">> || _ <- lists:seq(1, N) >>.
+ <<<<"x">> || _ <- lists:seq(1, N)>>.
request(start) ->
{ok, Doc} = file:read_file(?REQUEST_FIXTURE),
@@ -128,7 +158,7 @@ send(Data) ->
send(Data, undefined) ->
send(Data, []);
send(Data, Acc) ->
- put(data, [Acc|Data]).
+ put(data, [Acc | Data]).
collected() ->
B = binary:replace(iolist_to_binary(get(data)), <<"\r\n">>, <<0>>, [global]),
@@ -136,11 +166,14 @@ collected() ->
mock_config() ->
ok = meck:new(config, [passthrough]),
- meck:expect(config, get,
- fun("couchdb", "max_document_id_length", "infinity") -> "1024";
- ("couchdb", "max_attachment_size", "infinity") -> "infinity";
- ("couchdb", "max_attachment_size", 1073741824) -> 1073741824;
- ("mem3", "shards_db", "_dbs") -> "_dbs";
+ meck:expect(
+ config,
+ get,
+ fun
+ ("couchdb", "max_document_id_length", "infinity") -> "1024";
+ ("couchdb", "max_attachment_size", "infinity") -> "infinity";
+ ("couchdb", "max_attachment_size", 1073741824) -> 1073741824;
+ ("mem3", "shards_db", "_dbs") -> "_dbs";
(Key, Val, Default) -> meck:passthrough([Key, Val, Default])
end
).
diff --git a/src/couch/test/eunit/couch_ejson_compare_tests.erl b/src/couch/test/eunit/couch_ejson_compare_tests.erl
index 1dfbad4ed..f74e40b23 100644
--- a/src/couch/test/eunit/couch_ejson_compare_tests.erl
+++ b/src/couch/test/eunit/couch_ejson_compare_tests.erl
@@ -12,8 +12,7 @@
-module(couch_ejson_compare_tests).
-
--define(MAX_UNICODE_STRING, <<255,255,255,255>>).
+-define(MAX_UNICODE_STRING, <<255, 255, 255, 255>>).
% See mango_idx_view.hrl
-define(MAX_JSON_OBJ, {?MAX_UNICODE_STRING}).
@@ -52,40 +51,39 @@
{[{<<"b">>, 2}, {<<"c">>, 2}]}
]).
-
% Propery tests
-ifdef(WITH_PROPER).
-include_lib("couch/include/couch_eunit_proper.hrl").
-
property_test_() ->
?EUNIT_QUICKCHECK(60, 400).
-
% Properties
% The main, nif-based comparison, sorts the test values correctly
prop_nif_sorts_correctly() ->
Positions = get_positions(?TEST_VALUES),
- ?FORALL(A, oneof(?TEST_VALUES),
+ ?FORALL(
+ A,
+ oneof(?TEST_VALUES),
?FORALL(B, oneof(?TEST_VALUES), begin
expected_less(A, B, Positions) =:= less_nif(A, B)
end)
).
-
% The erlang fallback comparison sorts the test values correctly
prop_erlang_sorts_correctly() ->
Positions = get_positions(?TEST_VALUES),
- ?FORALL(A, oneof(?TEST_VALUES),
+ ?FORALL(
+ A,
+ oneof(?TEST_VALUES),
?FORALL(B, oneof(?TEST_VALUES), begin
expected_less(A, B, Positions) =:= less_erl(A, B)
end)
).
-
% Zero width unicode chars are ignored
prop_equivalent_unicode_values() ->
?FORALL({Prefix, Suffix}, {zero_width_list(), zero_width_list()}, begin
@@ -93,36 +91,33 @@ prop_equivalent_unicode_values() ->
less(<<"a">>, Binary) =:= 0
end).
-
% Every test value sorts less than the special ?MAX_JSON_OBJ
prop_test_values_are_less_than_max_json() ->
?FORALL(V, oneof(?TEST_VALUES), begin
less(V, ?MAX_JSON_OBJ) =:= -1
end).
-
% Any json value sorts less than the special ?MAX_JSON_OBJ
prop_any_json_is_less_than_max_json() ->
?FORALL(V, json(), begin
less(V, ?MAX_JSON_OBJ) =:= -1
end).
-
% In general, for any json, the nif collator matches the erlang collator
prop_nif_matches_erlang() ->
- ?FORALL(A, json(),
+ ?FORALL(
+ A,
+ json(),
?FORALL(B, json(), begin
less_nif(A, B) =:= less_erl(A, B)
end)
).
-
% Generators
json() ->
?SIZED(Size, json(Size)).
-
json(0) ->
oneof([
null,
@@ -133,7 +128,6 @@ json(0) ->
[],
{[]}
]);
-
json(Size) ->
frequency([
{1, null},
@@ -147,40 +141,30 @@ json(Size) ->
{5, ?LAZY(json_object(Size))}
]).
-
json_number() ->
oneof([largeint(), int(), real()]).
-
json_string() ->
utf8().
-
json_array(0) ->
[];
-
json_array(Size) ->
vector(Size div 2, json(Size div 2)).
-
json_object(0) ->
{[]};
-
json_object(Size) ->
{vector(Size div 2, {json_string(), json(Size div 2)})}.
-
zero_width_list() ->
?SIZED(Size, vector(Size, zero_width_chars())).
-
zero_width_chars() ->
oneof([16#200B, 16#200C, 16#200D]).
-
-endif.
-
% Regular EUnit tests
get_icu_version_test() ->
@@ -192,7 +176,6 @@ get_icu_version_test() ->
?assert(is_integer(V3) andalso V3 >= 0),
?assert(is_integer(V4) andalso V4 >= 0).
-
get_uca_version_test() ->
Ver = couch_ejson_compare:get_uca_version(),
?assertMatch({_, _, _, _}, Ver),
@@ -202,7 +185,6 @@ get_uca_version_test() ->
?assert(is_integer(V3) andalso V3 >= 0),
?assert(is_integer(V4) andalso V4 >= 0).
-
max_depth_error_list_test() ->
% NIF can handle terms with depth <= 9
Nested9 = nest_list(<<"val">>, 9),
@@ -215,7 +197,6 @@ max_depth_error_list_test() ->
% Then it should transparently jump to erlang land
?assertEqual(0, less(Nested10, Nested10)).
-
max_depth_error_obj_test() ->
% NIF can handle terms with depth <= 9
Nested9 = nest_obj(<<"k">>, <<"v">>, 9),
@@ -228,13 +209,12 @@ max_depth_error_obj_test() ->
% Then it should transparently jump to erlang land
?assertEqual(0, less(Nested10, Nested10)).
-
compare_strings_nif_test() ->
?assertEqual(-1, compare_strings(<<"a">>, <<"b">>)),
?assertEqual(0, compare_strings(<<"a">>, <<"a">>)),
?assertEqual(1, compare_strings(<<"b">>, <<"a">>)),
- LargeBin1 = << <<"x">> || _ <- lists:seq(1, 1000000)>>,
+ LargeBin1 = <<<<"x">> || _ <- lists:seq(1, 1000000)>>,
LargeBin2 = <<LargeBin1/binary, "x">>,
?assertEqual(-1, compare_strings(LargeBin1, LargeBin2)),
?assertEqual(1, compare_strings(LargeBin2, LargeBin1)),
@@ -244,47 +224,41 @@ compare_strings_nif_test() ->
?assertError(badarg, compare_strings(<<"a">>, 42)),
?assertError(badarg, compare_strings(42, 42)).
-
% Helper functions
less(A, B) ->
cmp_norm(couch_ejson_compare:less(A, B)).
-
less_nif(A, B) ->
cmp_norm(couch_ejson_compare:less_nif(A, B)).
-
less_erl(A, B) ->
cmp_norm(couch_ejson_compare:less_erl(A, B)).
-
compare_strings(A, B) ->
couch_ejson_compare:compare_strings_nif(A, B).
-
nest_list(Val, 0) ->
Val;
-
nest_list(Val, Depth) when is_integer(Depth), Depth > 0 ->
[nest_list(Val, Depth - 1)].
-
nest_obj(K, V, 1) ->
{[{K, V}]};
-
nest_obj(K, V, Depth) when is_integer(Depth), Depth > 1 ->
{[{K, nest_obj(K, V, Depth - 1)}]}.
-
% Build a map of #{Val => PositionIndex} for the test values so that when any
% two are compared we can verify their position in the test list matches the
% compared result
get_positions(TestValues) ->
- lists:foldl(fun(Val, Acc) ->
- Acc#{Val => map_size(Acc)}
- end, #{}, TestValues).
-
+ lists:foldl(
+ fun(Val, Acc) ->
+ Acc#{Val => map_size(Acc)}
+ end,
+ #{},
+ TestValues
+ ).
% When two values are compared, check the test values positions index to ensure
% the order in the test value list matches the comparison result
diff --git a/src/couch/test/eunit/couch_ejson_size_tests.erl b/src/couch/test/eunit/couch_ejson_size_tests.erl
index df9168ed1..27803d8b7 100644
--- a/src/couch/test/eunit/couch_ejson_size_tests.erl
+++ b/src/couch/test/eunit/couch_ejson_size_tests.erl
@@ -14,59 +14,86 @@
-include_lib("eunit/include/eunit.hrl").
--define(HWAIR, $\x{10348}). % 4 byte utf8 encoding
--define(EURO, $\x{20ac}). % 3 byte utf8 encoding
--define(CENT, $\x{a2}). % 2 byte utf8 encoding
-
+% 4 byte utf8 encoding
+-define(HWAIR, $\x{10348}).
+% 3 byte utf8 encoding
+-define(EURO, $\x{20ac}).
+% 2 byte utf8 encoding
+-define(CENT, $\x{a2}).
ejson_size_test_() ->
- [?_assertEqual(R, couch_ejson_size:encoded_size(Input)) || {R, Input} <- [
- {1, 1}, {1, 1}, {2, -1}, {1, 9}, {2, 10}, {3, -10},
- {2, 11}, {2, 99}, {3, 100}, {3, 999}, {4, 1000}, {4, 9999},
- {5, 10000},
-
- {3, 0.0}, {3, 0.1}, {3, 1.0}, {4, -1.0}, {3, 1.0e9},
- {4, 1.0e10}, {5, 1.0e-10}, {5, 1.0e-99}, {6, 1.0e-100}, {3, 1.0e-323},
-
- {2, arr_nested(0)}, {22, arr_nested(10)}, {2002, arr_nested(1000)},
- {9, obj_nested(0)}, {69, obj_nested(10)}, {6009, obj_nested(1000)},
-
- {4, null}, {4, true}, {5, false},
-
- {3, str(1, $x)}, {4, str(1, ?CENT)}, {5, str(1, ?EURO)},
- {6, str(1, ?HWAIR)}, {3, str(1, $\x{1})}, {12, str(10, $x)},
- {22, str(10, ?CENT)}, {32, str(10, ?EURO)}, {42, str(10, ?HWAIR)},
- {12, str(10, $\x{1})}
- ]].
-
+ [
+ ?_assertEqual(R, couch_ejson_size:encoded_size(Input))
+ || {R, Input} <- [
+ {1, 1},
+ {1, 1},
+ {2, -1},
+ {1, 9},
+ {2, 10},
+ {3, -10},
+ {2, 11},
+ {2, 99},
+ {3, 100},
+ {3, 999},
+ {4, 1000},
+ {4, 9999},
+ {5, 10000},
+
+ {3, 0.0},
+ {3, 0.1},
+ {3, 1.0},
+ {4, -1.0},
+ {3, 1.0e9},
+ {4, 1.0e10},
+ {5, 1.0e-10},
+ {5, 1.0e-99},
+ {6, 1.0e-100},
+ {3, 1.0e-323},
+
+ {2, arr_nested(0)},
+ {22, arr_nested(10)},
+ {2002, arr_nested(1000)},
+ {9, obj_nested(0)},
+ {69, obj_nested(10)},
+ {6009, obj_nested(1000)},
+
+ {4, null},
+ {4, true},
+ {5, false},
+
+ {3, str(1, $x)},
+ {4, str(1, ?CENT)},
+ {5, str(1, ?EURO)},
+ {6, str(1, ?HWAIR)},
+ {3, str(1, $\x{1})},
+ {12, str(10, $x)},
+ {22, str(10, ?CENT)},
+ {32, str(10, ?EURO)},
+ {42, str(10, ?HWAIR)},
+ {12, str(10, $\x{1})}
+ ]
+ ].
%% Helper functions
arr_nested(MaxDepth) ->
arr_nested(MaxDepth, 0).
-
obj_nested(MaxDepth) ->
obj_nested(MaxDepth, 0).
-
obj(N, K, V) ->
{[{K, V} || _ <- lists:seq(1, N)]}.
-
str(N, C) ->
unicode:characters_to_binary([C || _ <- lists:seq(1, N)]).
-
arr_nested(MaxDepth, MaxDepth) ->
[];
-
arr_nested(MaxDepth, Depth) ->
[arr_nested(MaxDepth, Depth + 1)].
-
obj_nested(MaxDepth, MaxDepth) ->
obj(1, <<"k">>, <<"v">>);
-
obj_nested(MaxDepth, Depth) ->
{[{<<"k">>, obj_nested(MaxDepth, Depth + 1)}]}.
diff --git a/src/couch/test/eunit/couch_etag_tests.erl b/src/couch/test/eunit/couch_etag_tests.erl
index 9d15e483f..72db6008a 100644
--- a/src/couch/test/eunit/couch_etag_tests.erl
+++ b/src/couch/test/eunit/couch_etag_tests.erl
@@ -18,13 +18,14 @@ local_with_empty_body_test() ->
Etag = couch_httpd:doc_etag(<<"_local/local-and-empty">>, {[]}, {0, <<"1">>}),
?assertEqual(Etag, <<"\"5ZVXQYO7VLEOU0TL9VXDNP5PV\"">>).
-
local_with_body_test() ->
- DocBody = {[{<<"hello">>,<<"world">>},{<<"relax">>,true}]},
+ DocBody = {[{<<"hello">>, <<"world">>}, {<<"relax">>, true}]},
Etag = couch_httpd:doc_etag(<<"_local/local-with-body">>, DocBody, {0, <<"1">>}),
?assertEqual(Etag, <<"\"CEFXP6WH8OKYIWO1GLGBHKCCA\"">>).
normal_doc_uses_rev_test() ->
- DocBody = {[{<<"hello">>,<<"world">>},{<<"relax">>,true}]},
- Etag = couch_httpd:doc_etag(<<"nomal-doc">>, DocBody, {1, <<"efda11e34e88ebe31a2f83e84a0435b6">>}),
+ DocBody = {[{<<"hello">>, <<"world">>}, {<<"relax">>, true}]},
+ Etag = couch_httpd:doc_etag(
+ <<"nomal-doc">>, DocBody, {1, <<"efda11e34e88ebe31a2f83e84a0435b6">>}
+ ),
?assertEqual(Etag, <<"\"1-efda11e34e88ebe31a2f83e84a0435b6\"">>).
diff --git a/src/couch/test/eunit/couch_file_tests.erl b/src/couch/test/eunit/couch_file_tests.erl
index 606f4bbf4..1b54cd70e 100644
--- a/src/couch/test/eunit/couch_file_tests.erl
+++ b/src/couch/test/eunit/couch_file_tests.erl
@@ -18,7 +18,6 @@
-define(setup(F), {setup, fun setup/0, fun teardown/1, F}).
-define(foreach(Fs), {foreach, fun setup/0, fun teardown/1, Fs}).
-
setup() ->
{ok, Fd} = couch_file:open(?tempfile(), [create, overwrite]),
Fd.
@@ -34,7 +33,8 @@ open_close_test_() ->
"Test for proper file open and close",
{
setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
+ fun() -> test_util:start(?MODULE, [ioq]) end,
+ fun test_util:stop/1,
[
should_return_enoent_if_missed(),
should_ignore_invalid_flags_with_open(),
@@ -49,8 +49,10 @@ should_return_enoent_if_missed() ->
?_assertEqual({error, enoent}, couch_file:open("not a real file")).
should_ignore_invalid_flags_with_open() ->
- ?_assertMatch({ok, _},
- couch_file:open(?tempfile(), [create, invalid_option])).
+ ?_assertMatch(
+ {ok, _},
+ couch_file:open(?tempfile(), [create, invalid_option])
+ ).
should_return_pid_on_file_open(Fd) ->
?_assert(is_pid(Fd)).
@@ -63,13 +65,13 @@ should_close_file_properly() ->
should_create_empty_new_files(Fd) ->
?_assertMatch({ok, 0}, couch_file:bytes(Fd)).
-
read_write_test_() ->
{
"Common file read/write tests",
{
setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
+ fun() -> test_util:start(?MODULE, [ioq]) end,
+ fun test_util:stop/1,
?foreach([
fun should_increase_file_size_on_write/1,
fun should_return_current_file_size_on_write/1,
@@ -86,7 +88,6 @@ read_write_test_() ->
}
}.
-
should_increase_file_size_on_write(Fd) ->
{ok, 0, _} = couch_file:append_term(Fd, foo),
{ok, Size} = couch_file:bytes(Fd),
@@ -111,7 +112,7 @@ should_return_term_as_binary_for_reading_binary(Fd) ->
?_assertMatch({ok, Foo}, couch_file:pread_binary(Fd, Pos)).
should_read_term_written_as_binary(Fd) ->
- {ok, Pos, _} = couch_file:append_binary(Fd, <<131,100,0,3,102,111,111>>),
+ {ok, Pos, _} = couch_file:append_binary(Fd, <<131, 100, 0, 3, 102, 111, 111>>),
?_assertMatch({ok, foo}, couch_file:pread_term(Fd, Pos)).
should_write_and_read_large_binary(Fd) ->
@@ -139,8 +140,7 @@ should_not_read_beyond_eof(Fd) ->
ok = file:pwrite(Io, Pos, <<0:1/integer, DoubleBin:31/integer>>),
file:close(Io),
unlink(Fd),
- ExpectedError = {badmatch, {'EXIT', {bad_return_value,
- {read_beyond_eof, Filepath}}}},
+ ExpectedError = {badmatch, {'EXIT', {bad_return_value, {read_beyond_eof, Filepath}}}},
?_assertError(ExpectedError, couch_file:pread_binary(Fd, Pos)).
should_truncate(Fd) ->
@@ -180,17 +180,16 @@ should_not_read_more_than_pread_limit(Fd) ->
BigBin = list_to_binary(lists:duplicate(100000, 0)),
{ok, Pos, _Size} = couch_file:append_binary(Fd, BigBin),
unlink(Fd),
- ExpectedError = {badmatch, {'EXIT', {bad_return_value,
- {exceed_pread_limit, Filepath, 50000}}}},
+ ExpectedError = {badmatch, {'EXIT', {bad_return_value, {exceed_pread_limit, Filepath, 50000}}}},
?_assertError(ExpectedError, couch_file:pread_binary(Fd, Pos)).
-
header_test_() ->
{
"File header read/write tests",
{
setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
+ fun() -> test_util:start(?MODULE, [ioq]) end,
+ fun test_util:stop/1,
[
?foreach([
fun should_write_and_read_atom_header/1,
@@ -208,7 +207,6 @@ header_test_() ->
}
}.
-
should_write_and_read_atom_header(Fd) ->
ok = couch_file:write_header(Fd, hello),
?_assertMatch({ok, hello}, couch_file:read_header(Fd)).
@@ -243,7 +241,6 @@ should_save_headers_larger_than_block_size(Fd) ->
couch_file:write_header(Fd, Header),
{"COUCHDB-1319", ?_assertMatch({ok, Header}, couch_file:read_header(Fd))}.
-
should_recover_header_marker_corruption() ->
?_assertMatch(
ok,
@@ -252,7 +249,8 @@ should_recover_header_marker_corruption() ->
?assertNotMatch(Expect, couch_file:read_header(CouchFd)),
file:pwrite(RawFd, HeaderPos, <<0>>),
?assertMatch(Expect, couch_file:read_header(CouchFd))
- end)
+ end
+ )
).
should_recover_header_size_corruption() ->
@@ -264,7 +262,8 @@ should_recover_header_size_corruption() ->
% +1 for 0x1 byte marker
file:pwrite(RawFd, HeaderPos + 1, <<10/integer>>),
?assertMatch(Expect, couch_file:read_header(CouchFd))
- end)
+ end
+ )
).
should_recover_header_md5sig_corruption() ->
@@ -276,7 +275,8 @@ should_recover_header_md5sig_corruption() ->
% +5 = +1 for 0x1 byte and +4 for term size.
file:pwrite(RawFd, HeaderPos + 5, <<"F01034F88D320B22">>),
?assertMatch(Expect, couch_file:read_header(CouchFd))
- end)
+ end
+ )
).
should_recover_header_data_corruption() ->
@@ -288,10 +288,10 @@ should_recover_header_data_corruption() ->
% +21 = +1 for 0x1 byte, +4 for term size and +16 for MD5 sig
file:pwrite(RawFd, HeaderPos + 21, <<"some data goes here!">>),
?assertMatch(Expect, couch_file:read_header(CouchFd))
- end)
+ end
+ )
).
-
check_header_recovery(CheckFun) ->
Path = ?tempfile(),
{ok, Fd} = couch_file:open(Path, [create, overwrite]),
@@ -322,7 +322,6 @@ write_random_data(Fd, N) ->
{ok, _, _} = couch_file:append_term(Fd, Term),
write_random_data(Fd, N - 1).
-
delete_test_() ->
{
"File delete tests",
@@ -350,34 +349,33 @@ delete_test_() ->
[
fun(Cfg) ->
{"enable_database_recovery = false, context = delete",
- make_enable_recovery_test_case(Cfg, false, delete)}
+ make_enable_recovery_test_case(Cfg, false, delete)}
end,
fun(Cfg) ->
{"enable_database_recovery = true, context = delete",
- make_enable_recovery_test_case(Cfg, true, delete)}
+ make_enable_recovery_test_case(Cfg, true, delete)}
end,
fun(Cfg) ->
{"enable_database_recovery = false, context = compaction",
- make_enable_recovery_test_case(Cfg, false, compaction)}
+ make_enable_recovery_test_case(Cfg, false, compaction)}
end,
fun(Cfg) ->
{"enable_database_recovery = true, context = compaction",
- make_enable_recovery_test_case(Cfg, true, compaction)}
+ make_enable_recovery_test_case(Cfg, true, compaction)}
end,
fun(Cfg) ->
{"delete_after_rename = true",
- make_delete_after_rename_test_case(Cfg, true)}
+ make_delete_after_rename_test_case(Cfg, true)}
end,
fun(Cfg) ->
{"delete_after_rename = false",
- make_delete_after_rename_test_case(Cfg, false)}
+ make_delete_after_rename_test_case(Cfg, false)}
end
]
}
}
}.
-
make_enable_recovery_test_case({RootDir, File}, EnableRecovery, Context) ->
meck:expect(config, get_boolean, fun
("couchdb", "enable_database_recovery", _) -> EnableRecovery;
@@ -388,10 +386,11 @@ make_enable_recovery_test_case({RootDir, File}, EnableRecovery, Context) ->
FileExistsAfter = filelib:is_regular(File),
RenamedFiles = filelib:wildcard(filename:rootname(File) ++ "*.deleted.*"),
DeletedFiles = filelib:wildcard(RootDir ++ "/.delete/*"),
- {ExpectRenamedCount, ExpectDeletedCount} = if
- EnableRecovery andalso Context =:= delete -> {1, 0};
- true -> {0, 1}
- end,
+ {ExpectRenamedCount, ExpectDeletedCount} =
+ if
+ EnableRecovery andalso Context =:= delete -> {1, 0};
+ true -> {0, 1}
+ end,
[
?_assert(FileExistsBefore),
?_assertNot(FileExistsAfter),
@@ -408,14 +407,17 @@ make_delete_after_rename_test_case({RootDir, File}, DeleteAfterRename) ->
couch_file:delete(RootDir, File),
FileExistsAfter = filelib:is_regular(File),
RenamedFiles = filelib:wildcard(filename:join([RootDir, ".delete", "*"])),
- ExpectRenamedCount = if DeleteAfterRename -> 0; true -> 1 end,
+ ExpectRenamedCount =
+ if
+ DeleteAfterRename -> 0;
+ true -> 1
+ end,
[
?_assert(FileExistsBefore),
?_assertNot(FileExistsAfter),
?_assertEqual(ExpectRenamedCount, length(RenamedFiles))
].
-
nuke_dir_test_() ->
{
"Nuke directory tests",
@@ -454,27 +456,22 @@ nuke_dir_test_() ->
end,
[
fun(Cfg) ->
- {"enable_database_recovery = false",
- make_rename_dir_test_case(Cfg, false)}
+ {"enable_database_recovery = false", make_rename_dir_test_case(Cfg, false)}
end,
fun(Cfg) ->
- {"enable_database_recovery = true",
- make_rename_dir_test_case(Cfg, true)}
+ {"enable_database_recovery = true", make_rename_dir_test_case(Cfg, true)}
end,
fun(Cfg) ->
- {"delete_after_rename = true",
- make_delete_dir_test_case(Cfg, true)}
+ {"delete_after_rename = true", make_delete_dir_test_case(Cfg, true)}
end,
fun(Cfg) ->
- {"delete_after_rename = false",
- make_delete_dir_test_case(Cfg, false)}
+ {"delete_after_rename = false", make_delete_dir_test_case(Cfg, false)}
end
]
}
}
}.
-
make_rename_dir_test_case({RootDir, ViewDir}, EnableRecovery) ->
meck:expect(config, get_boolean, fun
("couchdb", "enable_database_recovery", _) -> EnableRecovery;
@@ -486,7 +483,11 @@ make_rename_dir_test_case({RootDir, ViewDir}, EnableRecovery) ->
DirExistsAfter = filelib:is_dir(ViewDir),
Ext = filename:extension(ViewDir),
RenamedDirs = filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext),
- ExpectRenamedCount = if EnableRecovery -> 1; true -> 0 end,
+ ExpectRenamedCount =
+ if
+ EnableRecovery -> 1;
+ true -> 0
+ end,
[
?_assert(DirExistsBefore),
?_assertNot(DirExistsAfter),
@@ -505,7 +506,11 @@ make_delete_dir_test_case({RootDir, ViewDir}, DeleteAfterRename) ->
Ext = filename:extension(ViewDir),
RenamedDirs = filelib:wildcard(RootDir ++ "/*.deleted" ++ Ext),
RenamedFiles = filelib:wildcard(RootDir ++ "/.delete/*"),
- ExpectRenamedCount = if DeleteAfterRename -> 0; true -> 1 end,
+ ExpectRenamedCount =
+ if
+ DeleteAfterRename -> 0;
+ true -> 1
+ end,
[
?_assert(DirExistsBefore),
?_assertNot(DirExistsAfter),
@@ -517,7 +522,6 @@ remove_dir(Dir) ->
[file:delete(File) || File <- filelib:wildcard(filename:join([Dir, "*"]))],
file:del_dir(Dir).
-
fsync_error_test_() ->
{
"Test fsync raises errors",
@@ -535,12 +539,10 @@ fsync_error_test_() ->
}
}.
-
fsync_raises_errors() ->
Fd = spawn(fun() -> fake_fsync_fd() end),
?assertError({fsync_error, eio}, couch_file:sync(Fd)).
-
fake_fsync_fd() ->
% Mocking gen_server did not go very
% well so faking the couch_file pid
diff --git a/src/couch/test/eunit/couch_flags_config_tests.erl b/src/couch/test/eunit/couch_flags_config_tests.erl
index 6fe2d5860..05707718b 100644
--- a/src/couch/test/eunit/couch_flags_config_tests.erl
+++ b/src/couch/test/eunit/couch_flags_config_tests.erl
@@ -19,9 +19,9 @@ couch_flags_config_test_() ->
setup,
fun setup/0,
fun teardown/1,
- [fun all_combinations_return_same_result/0]
- ++ latest_overide_wins()
- ++ [
+ [fun all_combinations_return_same_result/0] ++
+ latest_overide_wins() ++
+ [
{"rules_are_sorted", fun rules_are_sorted/0}
]
}
@@ -29,50 +29,72 @@ couch_flags_config_test_() ->
all_combinations_return_same_result() ->
Config = [
- {"foo, bar||*", "true"},
- {"baz, qux||*", "false"},
- {"baz||shards/test*", "true"},
- {"baz||shards/blacklist*", "false"},
- {"bar||shards/test*", "false"},
- {"bar||shards/test/blacklist*", "true"}
+ {"foo, bar||*", "true"},
+ {"baz, qux||*", "false"},
+ {"baz||shards/test*", "true"},
+ {"baz||shards/blacklist*", "false"},
+ {"bar||shards/test*", "false"},
+ {"bar||shards/test/blacklist*", "true"}
],
Expected = [
- {{<<"shards/test/blacklist*">>},{<<"shards/test/blacklist*">>,22,[bar, baz, foo]}},
- {{<<"shards/test*">>},{<<"shards/test*">>, 12, [baz, foo]}},
- {{<<"shards/blacklist*">>},{<<"shards/blacklist*">>, 17, [bar, foo]}},
- {{<<"*">>},{<<"*">>, 1, [bar, foo]}}
+ {{<<"shards/test/blacklist*">>}, {<<"shards/test/blacklist*">>, 22, [bar, baz, foo]}},
+ {{<<"shards/test*">>}, {<<"shards/test*">>, 12, [baz, foo]}},
+ {{<<"shards/blacklist*">>}, {<<"shards/blacklist*">>, 17, [bar, foo]}},
+ {{<<"*">>}, {<<"*">>, 1, [bar, foo]}}
],
Combinations = couch_tests_combinatorics:permutations(Config),
- lists:foreach(fun(Items) ->
- ?assertEqual(Expected, couch_flags_config:data(Items))
- end, Combinations).
+ lists:foreach(
+ fun(Items) ->
+ ?assertEqual(Expected, couch_flags_config:data(Items))
+ end,
+ Combinations
+ ).
rules_are_sorted() ->
Expected = [
- {{<<"shards/test/exact">>},{<<"shards/test/exact">>, 17, [baz,flag_bar,flag_foo]}},
- {{<<"shards/test/blacklist*">>},{<<"shards/test/blacklist*">>,22,[flag_foo]}},
- {{<<"shards/test*">>},{<<"shards/test*">>, 12, [baz,flag_bar,flag_foo]}},
- {{<<"shards/exact">>},{<<"shards/exact">>, 12, [flag_bar,flag_foo]}},
- {{<<"shards/blacklist*">>},{<<"shards/blacklist*">>, 17, []}},
- {{<<"*">>},{<<"*">>, 1, [flag_foo]}}
+ {{<<"shards/test/exact">>}, {<<"shards/test/exact">>, 17, [baz, flag_bar, flag_foo]}},
+ {{<<"shards/test/blacklist*">>}, {<<"shards/test/blacklist*">>, 22, [flag_foo]}},
+ {{<<"shards/test*">>}, {<<"shards/test*">>, 12, [baz, flag_bar, flag_foo]}},
+ {{<<"shards/exact">>}, {<<"shards/exact">>, 12, [flag_bar, flag_foo]}},
+ {{<<"shards/blacklist*">>}, {<<"shards/blacklist*">>, 17, []}},
+ {{<<"*">>}, {<<"*">>, 1, [flag_foo]}}
],
?assertEqual(Expected, couch_flags_config:data(test_config())).
latest_overide_wins() ->
Cases = [
- {[
- {"flag||*", "false"}, {"flag||a*", "true"},
- {"flag||ab*", "true"}, {"flag||abc*", "true"}
- ], true},
- {[
- {"flag||*", "true"}, {"flag||a*", "false"},
- {"flag||ab*", "true"}, {"flag||abc*", "false"}
- ], false}
+ {
+ [
+ {"flag||*", "false"},
+ {"flag||a*", "true"},
+ {"flag||ab*", "true"},
+ {"flag||abc*", "true"}
+ ],
+ true
+ },
+ {
+ [
+ {"flag||*", "true"},
+ {"flag||a*", "false"},
+ {"flag||ab*", "true"},
+ {"flag||abc*", "false"}
+ ],
+ false
+ }
],
- [{test_id(Rules, Expected),
- ?_assertEqual(Expected, lists:member(flag,
- flags(hd(couch_flags_config:data(Rules)))))}
- || {Rules, Expected} <- Cases].
+ [
+ {
+ test_id(Rules, Expected),
+ ?_assertEqual(
+ Expected,
+ lists:member(
+ flag,
+ flags(hd(couch_flags_config:data(Rules)))
+ )
+ )
+ }
+ || {Rules, Expected} <- Cases
+ ].
flags({{_Pattern}, {_Pattern, _Size, Flags}}) ->
Flags.
@@ -80,7 +102,6 @@ flags({{_Pattern}, {_Pattern, _Size, Flags}}) ->
test_id(Items, ExpectedResult) ->
lists:flatten(io_lib:format("~p -> ~p", [[P || {P, _} <- Items], ExpectedResult])).
-
test_config() ->
[
{"flag_foo||*", "true"},
@@ -95,22 +116,32 @@ test_config() ->
parse_flags_term_test_() ->
LongBinary = binary:copy(<<"a">>, ?MAX_FLAG_NAME_LENGTH + 1),
- ExpectedError = {error, {"Cannot parse list of tags: ~n~p",
- [{too_long, LongBinary}]}},
- ExpectedUnknownError = {error,{"Cannot parse list of tags: ~n~p",
- [{invalid_flag,<<"dddddddd">>}]}},
- [
- {"empty binary", ?_assertEqual(
- [], couch_flags_config:parse_flags_term(<<>>))},
- {"single flag", ?_assertEqual(
- [fff], couch_flags_config:parse_flags_term(<<"fff">>))},
- {"sorted", ?_assertEqual(
- [aaa,bbb,fff], couch_flags_config:parse_flags_term(<<"fff,aaa,bbb">>))},
- {"whitespace", ?_assertEqual(
- [aaa,bbb,fff], couch_flags_config:parse_flags_term(<<"fff , aaa, bbb ">>))},
- {"error", ?_assertEqual(
- ExpectedError, couch_flags_config:parse_flags_term(LongBinary))},
- {"unknown_flag", ?_assertEqual(
- ExpectedUnknownError, couch_flags_config:parse_flags_term(<<"dddddddd">>))}
- ].
-
+ ExpectedError = {error, {"Cannot parse list of tags: ~n~p", [{too_long, LongBinary}]}},
+ ExpectedUnknownError =
+ {error, {"Cannot parse list of tags: ~n~p", [{invalid_flag, <<"dddddddd">>}]}},
+ [
+ {"empty binary",
+ ?_assertEqual(
+ [], couch_flags_config:parse_flags_term(<<>>)
+ )},
+ {"single flag",
+ ?_assertEqual(
+ [fff], couch_flags_config:parse_flags_term(<<"fff">>)
+ )},
+ {"sorted",
+ ?_assertEqual(
+ [aaa, bbb, fff], couch_flags_config:parse_flags_term(<<"fff,aaa,bbb">>)
+ )},
+ {"whitespace",
+ ?_assertEqual(
+ [aaa, bbb, fff], couch_flags_config:parse_flags_term(<<"fff , aaa, bbb ">>)
+ )},
+ {"error",
+ ?_assertEqual(
+ ExpectedError, couch_flags_config:parse_flags_term(LongBinary)
+ )},
+ {"unknown_flag",
+ ?_assertEqual(
+ ExpectedUnknownError, couch_flags_config:parse_flags_term(<<"dddddddd">>)
+ )}
+ ].
diff --git a/src/couch/test/eunit/couch_flags_tests.erl b/src/couch/test/eunit/couch_flags_tests.erl
index 32ec57b77..e3635e9f2 100644
--- a/src/couch/test/eunit/couch_flags_tests.erl
+++ b/src/couch/test/eunit/couch_flags_tests.erl
@@ -55,7 +55,9 @@ rules() ->
setup() ->
%% FIXME after we upgrade couch_epi
- application:stop(couch_epi), % in case it's already running from other tests...
+
+ % in case it's already running from other tests...
+ application:stop(couch_epi),
application:unload(couch_epi),
application:load(couch_epi),
@@ -63,8 +65,7 @@ setup() ->
meck:expect(config, get, 1, []),
Ctx = test_util:start_couch([couch_epi]),
- Ctx.
-
+ Ctx.
teardown(Ctx) ->
test_util:stop_couch(Ctx),
@@ -76,57 +77,65 @@ couch_flags_test_() ->
{
"test couch_flags",
{
- setup, fun setup/0, fun teardown/1,
- enabled_flags_tests()
- ++ is_enabled()
-%% ++ match_performance()
+ setup,
+ fun setup/0,
+ fun teardown/1,
+ enabled_flags_tests() ++
+ is_enabled()
+ %% ++ match_performance()
}
}.
enabled_flags_tests() ->
-
- [{"enabled_flags_tests", [
- {"flags_default_rule",
- ?_assertEqual(
- [foo], couch_flags:enabled("something"))},
- {"flags_wildcard_rule",
- ?_assertEqual(
- [bar, baz, foo],
- couch_flags:enabled("shards/test/something"))},
- {"flags_exact_rule",
- ?_assertEqual(
- [bar, baz, foo],
- couch_flags:enabled("shards/test/exact"))},
- {"flags_blacklist_rule",
- ?_assertEqual(
- [],
- couch_flags:enabled("shards/blacklist/4"))}
- ]}].
+ [
+ {"enabled_flags_tests", [
+ {"flags_default_rule",
+ ?_assertEqual(
+ [foo], couch_flags:enabled("something")
+ )},
+ {"flags_wildcard_rule",
+ ?_assertEqual(
+ [bar, baz, foo],
+ couch_flags:enabled("shards/test/something")
+ )},
+ {"flags_exact_rule",
+ ?_assertEqual(
+ [bar, baz, foo],
+ couch_flags:enabled("shards/test/exact")
+ )},
+ {"flags_blacklist_rule",
+ ?_assertEqual(
+ [],
+ couch_flags:enabled("shards/blacklist/4")
+ )}
+ ]}
+ ].
is_enabled() ->
- [{"is_enabled_tests", [
- {"flags_default_rule [enabled]",
- ?_assert(couch_flags:is_enabled(foo, "something"))},
- {"flags_default_rule [disabled]",
- ?_assertNot(couch_flags:is_enabled(baz, "something"))},
- {"flags_default_rule [not_existent]",
- ?_assertNot(couch_flags:is_enabled(non_existent, "something"))},
-
- {"flags_wildcard_rule [enabled]",
- ?_assert(couch_flags:is_enabled(bar, "shards/test/something"))},
- {"flags_wildcard_rule [not_existent]",
- ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/something"))},
-
- {"flags_exact_rule [overide_disbled]",
- ?_assert(couch_flags:is_enabled(bar, "shards/test/exact"))},
- {"flags_exact_rule [not_existent]",
- ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/exact"))},
-
- {"flags_blacklist_rule [overide_enabled]",
- ?_assertNot(couch_flags:is_enabled(foo, "shards/blacklist/4"))},
- {"flags_blacklist_rule [not_existent]",
- ?_assertNot(couch_flags:is_enabled(non_existent, "shards/blacklist/4"))}
- ]}].
+ [
+ {"is_enabled_tests", [
+ {"flags_default_rule [enabled]", ?_assert(couch_flags:is_enabled(foo, "something"))},
+ {"flags_default_rule [disabled]",
+ ?_assertNot(couch_flags:is_enabled(baz, "something"))},
+ {"flags_default_rule [not_existent]",
+ ?_assertNot(couch_flags:is_enabled(non_existent, "something"))},
+
+ {"flags_wildcard_rule [enabled]",
+ ?_assert(couch_flags:is_enabled(bar, "shards/test/something"))},
+ {"flags_wildcard_rule [not_existent]",
+ ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/something"))},
+
+ {"flags_exact_rule [overide_disbled]",
+ ?_assert(couch_flags:is_enabled(bar, "shards/test/exact"))},
+ {"flags_exact_rule [not_existent]",
+ ?_assertNot(couch_flags:is_enabled(non_existent, "shards/test/exact"))},
+
+ {"flags_blacklist_rule [overide_enabled]",
+ ?_assertNot(couch_flags:is_enabled(foo, "shards/blacklist/4"))},
+ {"flags_blacklist_rule [not_existent]",
+ ?_assertNot(couch_flags:is_enabled(non_existent, "shards/blacklist/4"))}
+ ]}
+ ].
%% match_performance() ->
%% [{"match_performance", [
@@ -137,7 +146,6 @@ is_enabled() ->
%% end)
%% ]}].
-
test_config() ->
[
{"foo||/*", "true"},
diff --git a/src/couch/test/eunit/couch_index_tests.erl b/src/couch/test/eunit/couch_index_tests.erl
index 23c857d6c..368f7a059 100644
--- a/src/couch/test/eunit/couch_index_tests.erl
+++ b/src/couch/test/eunit/couch_index_tests.erl
@@ -36,10 +36,12 @@ couch_index_ioq_priority_test_() ->
"Test ioq_priority for views",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun check_io_priority_for_updater/1,
fun check_io_priority_for_compactor/1
@@ -48,11 +50,11 @@ couch_index_ioq_priority_test_() ->
}
}.
-
check_io_priority_for_updater(DbName) ->
?_test(begin
{ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>),
+ couch_mrview_index, DbName, <<"_design/foo">>
+ ),
CouchIndexUpdaterPid = updater_pid(IndexerPid),
tracer_record(CouchIndexUpdaterPid),
@@ -63,15 +65,23 @@ check_io_priority_for_updater(DbName) ->
[UpdaterPid] = wait_spawn_event_for_pid(CouchIndexUpdaterPid),
[UpdaterMapProcess] = wait_spawn_by_anonymous_fun(
- UpdaterPid, '-start_update/4-fun-0-'),
+ UpdaterPid, '-start_update/4-fun-0-'
+ ),
- ?assert(wait_set_io_priority(
- UpdaterMapProcess, {view_update, DbName, <<"_design/foo">>})),
+ ?assert(
+ wait_set_io_priority(
+ UpdaterMapProcess, {view_update, DbName, <<"_design/foo">>}
+ )
+ ),
[UpdaterWriterProcess] = wait_spawn_by_anonymous_fun(
- UpdaterPid, '-start_update/4-fun-1-'),
- ?assert(wait_set_io_priority(
- UpdaterWriterProcess, {view_update, DbName, <<"_design/foo">>})),
+ UpdaterPid, '-start_update/4-fun-1-'
+ ),
+ ?assert(
+ wait_set_io_priority(
+ UpdaterWriterProcess, {view_update, DbName, <<"_design/foo">>}
+ )
+ ),
ok
end).
@@ -79,7 +89,8 @@ check_io_priority_for_updater(DbName) ->
check_io_priority_for_compactor(DbName) ->
?_test(begin
{ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>),
+ couch_mrview_index, DbName, <<"_design/foo">>
+ ),
{ok, CompactorPid} = couch_index:get_compactor_pid(IndexerPid),
tracer_record(CompactorPid),
@@ -89,51 +100,65 @@ check_io_priority_for_compactor(DbName) ->
wait_spawn_event_for_pid(CompactorPid),
[CompactorProcess] = wait_spawn_by_anonymous_fun(
- CompactorPid, '-handle_call/3-fun-0-'),
- ?assert(wait_set_io_priority(
- CompactorProcess, {view_compact, DbName, <<"_design/foo">>})),
+ CompactorPid, '-handle_call/3-fun-0-'
+ ),
+ ?assert(
+ wait_set_io_priority(
+ CompactorProcess, {view_compact, DbName, <<"_design/foo">>}
+ )
+ ),
ok
end).
create_docs(DbName) ->
{ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 1}
-
- ]}),
- Doc2 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc2">>},
- {<<"value">>, 2}
-
- ]}),
- Doc3 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc3">>},
- {<<"value">>, 3}
-
- ]}),
+ Doc1 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc1">>},
+ {<<"value">>, 1}
+ ]}
+ ),
+ Doc2 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc2">>},
+ {<<"value">>, 2}
+ ]}
+ ),
+ Doc3 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc3">>},
+ {<<"value">>, 3}
+ ]}
+ ),
{ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]),
couch_db:close(Db).
create_design_doc(DbName, DDName, ViewName) ->
{ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {ViewName, {[
- {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>}
- ]}}
- ]}}
- ]}),
+ DDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DDName},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>,
+ {[
+ {ViewName,
+ {[
+ {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>}
+ ]}}
+ ]}}
+ ]}
+ ),
{ok, Rev} = couch_db:update_doc(Db, DDoc, []),
couch_db:close(Db),
Rev.
wait_set_io_priority(Pid, IOPriority) ->
- test_util:wait_value(fun() ->
- does_process_set_io_priority(Pid, IOPriority)
- end, true).
+ test_util:wait_value(
+ fun() ->
+ does_process_set_io_priority(Pid, IOPriority)
+ end,
+ true
+ ).
does_process_set_io_priority(Pid, IOPriority) ->
PutCallsArgs = find_calls_to_fun(Pid, {erlang, put, 2}),
@@ -143,36 +168,47 @@ wait_events(MatchSpec) ->
test_util:wait_other_value(fun() -> select(MatchSpec) end, []).
find_spawned_by_anonymous_fun(ParentPid, Name) ->
- AnonymousFuns = select(ets:fun2ms(fun
- ({spawned, Pid, _TS, _Name, _Dict, [PPid, {erlang, apply, [Fun, _]}]})
- when is_function(Fun) andalso PPid =:= ParentPid -> {Pid, Fun}
- end)),
- lists:filtermap(fun({Pid, Fun}) ->
- case erlang:fun_info(Fun, name) of
- {name, Name} -> {true, Pid};
- _ -> false
- end
- end, AnonymousFuns).
+ AnonymousFuns = select(
+ ets:fun2ms(fun({spawned, Pid, _TS, _Name, _Dict, [PPid, {erlang, apply, [Fun, _]}]}) when
+ is_function(Fun) andalso PPid =:= ParentPid
+ ->
+ {Pid, Fun}
+ end)
+ ),
+ lists:filtermap(
+ fun({Pid, Fun}) ->
+ case erlang:fun_info(Fun, name) of
+ {name, Name} -> {true, Pid};
+ _ -> false
+ end
+ end,
+ AnonymousFuns
+ ).
find_calls_to_fun(Pid, {Module, Function, Arity}) ->
- select(ets:fun2ms(fun
- ({call, P, _TS, _Name, _Dict, [{M, F, Args}]})
- when length(Args) =:= Arity
- andalso M =:= Module
- andalso F =:= Function
- andalso P =:= Pid
- -> Args
- end)).
+ select(
+ ets:fun2ms(fun({call, P, _TS, _Name, _Dict, [{M, F, Args}]}) when
+ length(Args) =:= Arity andalso
+ M =:= Module andalso
+ F =:= Function andalso
+ P =:= Pid
+ ->
+ Args
+ end)
+ ).
wait_spawn_event_for_pid(ParentPid) ->
- wait_events(ets:fun2ms(fun
- ({spawned, Pid, _TS, _Name, _Dict, [P, _]}) when P =:= ParentPid -> Pid
- end)).
+ wait_events(
+ ets:fun2ms(fun({spawned, Pid, _TS, _Name, _Dict, [P, _]}) when P =:= ParentPid -> Pid end)
+ ).
wait_spawn_by_anonymous_fun(ParentPid, Name) ->
- test_util:wait_other_value(fun() ->
- find_spawned_by_anonymous_fun(ParentPid, Name)
- end, []).
+ test_util:wait_other_value(
+ fun() ->
+ find_spawned_by_anonymous_fun(ParentPid, Name)
+ end,
+ []
+ ).
updater_pid(IndexerPid) ->
{links, Links} = process_info(IndexerPid, links),
@@ -180,20 +216,25 @@ updater_pid(IndexerPid) ->
Pid.
select_process_by_name_prefix(Pids, Name) ->
- lists:filter(fun(Pid) ->
- Key = couch_debug:process_name(Pid),
- string:str(Key, Name) =:= 1
- end, Pids).
+ lists:filter(
+ fun(Pid) ->
+ Key = couch_debug:process_name(Pid),
+ string:str(Key, Name) =:= 1
+ end,
+ Pids
+ ).
select(MatchSpec) ->
- lists:filtermap(fun(Event) ->
- case ets:test_ms(Event, MatchSpec) of
- {ok, false} -> false;
- {ok, Result} -> {true, Result};
- _ -> false
- end
- end, tracer_events()).
-
+ lists:filtermap(
+ fun(Event) ->
+ case ets:test_ms(Event, MatchSpec) of
+ {ok, false} -> false;
+ {ok, Result} -> {true, Result};
+ _ -> false
+ end
+ end,
+ tracer_events()
+ ).
%% ========================
%% Tracer related functions
@@ -225,7 +266,7 @@ tracer_collector(Msg, Seq) ->
normalize_trace_msg(TraceMsg) ->
case tuple_to_list(TraceMsg) of
[trace_ts, Pid, Type | Info] ->
- {TraceInfo, [Timestamp]} = lists:split(length(Info)-1, Info),
+ {TraceInfo, [Timestamp]} = lists:split(length(Info) - 1, Info),
{Type, Pid, Timestamp, couch_debug:process_name(Pid), process_info(Pid), TraceInfo};
[trace, Pid, Type | TraceInfo] ->
{Type, Pid, os:timestamp(), couch_debug:process_name(Pid), process_info(Pid), TraceInfo}
diff --git a/src/couch/test/eunit/couch_js_tests.erl b/src/couch/test/eunit/couch_js_tests.erl
index c68d60125..1079678da 100644
--- a/src/couch/test/eunit/couch_js_tests.erl
+++ b/src/couch/test/eunit/couch_js_tests.erl
@@ -13,7 +13,6 @@
-module(couch_js_tests).
-include_lib("eunit/include/eunit.hrl").
-
couch_js_test_() ->
{
"Test couchjs",
@@ -32,104 +31,131 @@ couch_js_test_() ->
}
}.
-
should_create_sandbox() ->
% Try and detect whether we can see out of the
% sandbox or not.
Src = <<
- "function(doc) {\n"
- " try {\n"
- " emit(false, typeof(Couch.compile_function));\n"
- " } catch (e) {\n"
- " emit(true, e.message);\n"
- " }\n"
- "}\n"
+ "function(doc) {\n"
+ " try {\n"
+ " emit(false, typeof(Couch.compile_function));\n"
+ " } catch (e) {\n"
+ " emit(true, e.message);\n"
+ " }\n"
+ "}\n"
>>,
Proc = couch_query_servers:get_os_process(<<"javascript">>),
true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]),
Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, <<"{}">>]),
?assertEqual([[[true, <<"Couch is not defined">>]]], Result).
-
should_roundtrip_utf8() ->
% Try round tripping UTF-8 both directions through
% couchjs. These tests use hex encoded values of
% Ä (C384) and Ü (C39C) so as to avoid odd editor/Erlang encoding
% strangeness.
Src = <<
- "function(doc) {\n"
- " emit(doc.value, \"", 16#C3, 16#9C, "\");\n"
- "}\n"
+ "function(doc) {\n"
+ " emit(doc.value, \"",
+ 16#C3,
+ 16#9C,
+ "\");\n"
+ "}\n"
>>,
Proc = couch_query_servers:get_os_process(<<"javascript">>),
true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]),
- Doc = {[
- {<<"value">>, <<16#C3, 16#84>>}
- ]},
+ Doc =
+ {[
+ {<<"value">>, <<16#C3, 16#84>>}
+ ]},
Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, Doc]),
?assertEqual([[[<<16#C3, 16#84>>, <<16#C3, 16#9C>>]]], Result).
-
should_roundtrip_modified_utf8() ->
% Mimicing the test case from the mailing list
Src = <<
- "function(doc) {\n"
- " emit(doc.value.toLowerCase(), \"", 16#C3, 16#9C, "\");\n"
- "}\n"
+ "function(doc) {\n"
+ " emit(doc.value.toLowerCase(), \"",
+ 16#C3,
+ 16#9C,
+ "\");\n"
+ "}\n"
>>,
Proc = couch_query_servers:get_os_process(<<"javascript">>),
true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]),
- Doc = {[
- {<<"value">>, <<16#C3, 16#84>>}
- ]},
+ Doc =
+ {[
+ {<<"value">>, <<16#C3, 16#84>>}
+ ]},
Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, Doc]),
?assertEqual([[[<<16#C3, 16#A4>>, <<16#C3, 16#9C>>]]], Result).
-
should_replace_broken_utf16() ->
% This test reverse the surrogate pair of
% the Boom emoji U+1F4A5
Src = <<
- "function(doc) {\n"
- " emit(doc.value.split(\"\").reverse().join(\"\"), 1);\n"
- "}\n"
+ "function(doc) {\n"
+ " emit(doc.value.split(\"\").reverse().join(\"\"), 1);\n"
+ "}\n"
>>,
Proc = couch_query_servers:get_os_process(<<"javascript">>),
true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]),
- Doc = {[
- {<<"value">>, list_to_binary(xmerl_ucs:to_utf8([16#1F4A5]))}
- ]},
+ Doc =
+ {[
+ {<<"value">>, list_to_binary(xmerl_ucs:to_utf8([16#1F4A5]))}
+ ]},
Result = couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, Doc]),
% Invalid UTF-8 gets replaced with the 16#FFFD replacement
% marker
Markers = list_to_binary(xmerl_ucs:to_utf8([16#FFFD, 16#FFFD])),
?assertEqual([[[Markers, 1]]], Result).
-
should_allow_js_string_mutations() ->
% This binary corresponds to this string: мама мыла раму
% Which I'm told translates to: "mom was washing the frame"
MomWashedTheFrame = <<
- 16#D0, 16#BC, 16#D0, 16#B0, 16#D0, 16#BC, 16#D0, 16#B0, 16#20,
- 16#D0, 16#BC, 16#D1, 16#8B, 16#D0, 16#BB, 16#D0, 16#B0, 16#20,
- 16#D1, 16#80, 16#D0, 16#B0, 16#D0, 16#BC, 16#D1, 16#83
+ 16#D0,
+ 16#BC,
+ 16#D0,
+ 16#B0,
+ 16#D0,
+ 16#BC,
+ 16#D0,
+ 16#B0,
+ 16#20,
+ 16#D0,
+ 16#BC,
+ 16#D1,
+ 16#8B,
+ 16#D0,
+ 16#BB,
+ 16#D0,
+ 16#B0,
+ 16#20,
+ 16#D1,
+ 16#80,
+ 16#D0,
+ 16#B0,
+ 16#D0,
+ 16#BC,
+ 16#D1,
+ 16#83
>>,
Mom = <<16#D0, 16#BC, 16#D0, 16#B0, 16#D0, 16#BC, 16#D0, 16#B0>>,
Washed = <<16#D0, 16#BC, 16#D1, 16#8B, 16#D0, 16#BB, 16#D0, 16#B0>>,
Src1 = <<
- "function(doc) {\n"
- " emit(\"length\", doc.value.length);\n"
- "}\n"
+ "function(doc) {\n"
+ " emit(\"length\", doc.value.length);\n"
+ "}\n"
>>,
Src2 = <<
- "function(doc) {\n"
- " emit(\"substring\", doc.value.substring(5, 9));\n"
- "}\n"
+ "function(doc) {\n"
+ " emit(\"substring\", doc.value.substring(5, 9));\n"
+ "}\n"
>>,
Src3 = <<
- "function(doc) {\n"
- " emit(\"slice\", doc.value.slice(0, 4));\n"
- "}\n"
+ "function(doc) {\n"
+ " emit(\"slice\", doc.value.slice(0, 4));\n"
+ "}\n"
>>,
Proc = couch_query_servers:get_os_process(<<"javascript">>),
true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src1]),
@@ -144,29 +170,30 @@ should_allow_js_string_mutations() ->
],
?assertEqual(Expect, Result).
-
should_exit_on_oom() ->
Src = <<
- "var state = [];\n"
- "function(doc) {\n"
- " var val = \"0123456789ABCDEF\";\n"
- " for(var i = 0; i < 665535; i++) {\n"
- " state.push([val, val]);\n"
- " emit(null, null);\n"
- " }\n"
- "}\n"
+ "var state = [];\n"
+ "function(doc) {\n"
+ " var val = \"0123456789ABCDEF\";\n"
+ " for(var i = 0; i < 665535; i++) {\n"
+ " state.push([val, val]);\n"
+ " emit(null, null);\n"
+ " }\n"
+ "}\n"
>>,
Proc = couch_query_servers:get_os_process(<<"javascript">>),
true = couch_query_servers:proc_prompt(Proc, [<<"add_fun">>, Src]),
trigger_oom(Proc).
trigger_oom(Proc) ->
- Status = try
- couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, <<"{}">>]),
- continue
- catch throw:{os_process_error, {exit_status, 1}} ->
- done
- end,
+ Status =
+ try
+ couch_query_servers:proc_prompt(Proc, [<<"map_doc">>, <<"{}">>]),
+ continue
+ catch
+ throw:{os_process_error, {exit_status, 1}} ->
+ done
+ end,
case Status of
continue -> trigger_oom(Proc);
done -> ok
diff --git a/src/couch/test/eunit/couch_key_tree_prop_tests.erl b/src/couch/test/eunit/couch_key_tree_prop_tests.erl
index 9c09aace5..d6ed26553 100644
--- a/src/couch/test/eunit/couch_key_tree_prop_tests.erl
+++ b/src/couch/test/eunit/couch_key_tree_prop_tests.erl
@@ -12,49 +12,53 @@
-module(couch_key_tree_prop_tests).
-
-ifdef(WITH_PROPER).
-include_lib("couch/include/couch_eunit_proper.hrl").
-
--define(SIZE_REDUCTION, 3). % How much to reduce size with tree depth.
--define(MAX_BRANCHES, 4). % Maximum number of branches.
+% How much to reduce size with tree depth.
+-define(SIZE_REDUCTION, 3).
+% Maximum number of branches.
+-define(MAX_BRANCHES, 4).
-define(RAND_SIZE, 1 bsl 64).
-
property_test_() ->
?EUNIT_QUICKCHECK(60).
-
%
% Properties
%
-
% Merge random paths from a revtree into itself. Check that no revisions have
% been lost in the process and that result is one of the 3 expected values.
%
prop_revtree_merge_with_subset_of_own_nodes() ->
- ?FORALL(Revs, g_revs(),
- ?FORALL({RevTree, Branch}, {g_revtree(Revs), g_revtree(Revs, 1)},
- ?IMPLIES(length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [],
+ ?FORALL(
+ Revs,
+ g_revs(),
+ ?FORALL(
+ {RevTree, Branch},
+ {g_revtree(Revs), g_revtree(Revs, 1)},
+ ?IMPLIES(
+ length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [],
begin
{Merged, Result} = couch_key_tree:merge(RevTree, hd(Branch)),
- lists:member(Result, [new_leaf, new_branch, internal_node])
- andalso same_keys(RevTree ++ Branch, Merged)
- andalso valid_revtree(Merged)
+ lists:member(Result, [new_leaf, new_branch, internal_node]) andalso
+ same_keys(RevTree ++ Branch, Merged) andalso
+ valid_revtree(Merged)
end
)
)
).
-
% Merge random trees into revtree.
%
prop_revtree_merge_random_nodes() ->
- ?FORALL({RevTree, Branch}, {g_revtree(), g_revtree([], 1)},
- ?IMPLIES(length(Branch) > 0,
+ ?FORALL(
+ {RevTree, Branch},
+ {g_revtree(), g_revtree([], 1)},
+ ?IMPLIES(
+ length(Branch) > 0,
begin
{Merged, _} = couch_key_tree:merge(RevTree, hd(Branch)),
valid_revtree(Merged)
@@ -62,33 +66,35 @@ prop_revtree_merge_random_nodes() ->
)
).
-
-
% Merge mix or random and existing revtree paths into revtree
%
prop_revtree_merge_some_existing_some_new() ->
- ?FORALL(RevTree, g_revtree(),
- ?FORALL(Branch,
+ ?FORALL(
+ RevTree,
+ g_revtree(),
+ ?FORALL(
+ Branch,
begin
KeyList = keylist(RevTree),
Half = lists:sublist(KeyList, length(KeyList) div 2),
g_revtree(Half, 1)
end,
- ?IMPLIES(length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [],
- begin
- {Merged, _} = couch_key_tree:merge(RevTree, hd(Branch)),
- valid_revtree(Merged)
- end
+ ?IMPLIES(
+ length(Branch) > 0 andalso repeating_revs(levels(RevTree ++ Branch)) == [],
+ begin
+ {Merged, _} = couch_key_tree:merge(RevTree, hd(Branch)),
+ valid_revtree(Merged)
+ end
)
)
).
-
-
% Stem deeper than the current max level. Expect no changes to the revtree
%
prop_no_change_stemming_deeper_than_current_depth() ->
- ?FORALL(RevTree, g_revtree(),
+ ?FORALL(
+ RevTree,
+ g_revtree(),
begin
StemDepth = depth(RevTree) + 1,
Stemmed = couch_key_tree:stem(RevTree, StemDepth),
@@ -98,12 +104,13 @@ prop_no_change_stemming_deeper_than_current_depth() ->
end
).
-
% Stem at a random small depth, make sure that resulting tree has
% unique revisions and the same number or less revisions than input
%
prop_stemming_results_in_same_or_less_total_revs() ->
- ?FORALL({RevTree, StemDepth}, {g_revtree(), choose(1, 20)},
+ ?FORALL(
+ {RevTree, StemDepth},
+ {g_revtree(), choose(1, 20)},
begin
Stemmed = couch_key_tree:stem(RevTree, StemDepth),
OldRealDepth = real_depth(RevTree),
@@ -111,89 +118,95 @@ prop_stemming_results_in_same_or_less_total_revs() ->
UniqueStemmedKeys = lists:usort(StemmedKeys),
UniqueInputKeys = lists:usort(keylist(RevTree)),
NewRealDepth = real_depth(Stemmed),
- length(StemmedKeys) == length(UniqueStemmedKeys)
- andalso length(UniqueStemmedKeys) =< length(UniqueInputKeys)
- andalso OldRealDepth >= NewRealDepth
+ length(StemmedKeys) == length(UniqueStemmedKeys) andalso
+ length(UniqueStemmedKeys) =< length(UniqueInputKeys) andalso
+ OldRealDepth >= NewRealDepth
end
).
-
% Generate a longer path (revtree with no branches) then stem it.
% Always expect it to shrink to stemmed depth.
prop_stem_path_expect_size_to_get_smaller() ->
- ?FORALL({RevTree, StemDepth},
+ ?FORALL(
+ {RevTree, StemDepth},
{
?SIZED(Size, g_revtree(Size * 10, [], 1)),
- choose(1,3)
+ choose(1, 3)
},
- ?IMPLIES(real_depth(RevTree) > 3,
+ ?IMPLIES(
+ real_depth(RevTree) > 3,
begin
Stemmed = couch_key_tree:stem(RevTree, StemDepth),
StemmedKeys = lists:usort(keylist(Stemmed)),
InputKeys = lists:usort(keylist(RevTree)),
- length(InputKeys) > length(StemmedKeys)
- andalso real_depth(Stemmed) == StemDepth
+ length(InputKeys) > length(StemmedKeys) andalso
+ real_depth(Stemmed) == StemDepth
end
)
).
-
% After stemming all leaves are still present
prop_after_stemming_all_leaves_are_present() ->
- ?FORALL({RevTree, StemDepth},
- {g_revtree(), choose(1,20)},
+ ?FORALL(
+ {RevTree, StemDepth},
+ {g_revtree(), choose(1, 20)},
begin
OldRealDepth = real_depth(RevTree),
OldLeaves = leaves(RevTree),
Stemmed = couch_key_tree:stem(RevTree, StemDepth),
NewRealDepth = real_depth(Stemmed),
NewLeaves = leaves(Stemmed),
- valid_revtree(Stemmed)
- andalso OldRealDepth >= NewRealDepth
- andalso OldLeaves == NewLeaves
-
+ valid_revtree(Stemmed) andalso
+ OldRealDepth >= NewRealDepth andalso
+ OldLeaves == NewLeaves
end
).
-
% After stemming paths to root didn't get longer
prop_after_stemming_paths_are_shorter() ->
- ?FORALL({StemDepth, RevTree}, {choose(2,10), g_revtree()},
+ ?FORALL(
+ {StemDepth, RevTree},
+ {choose(2, 10), g_revtree()},
begin
OldPaths = paths(RevTree),
Stemmed = couch_key_tree:stem(RevTree, StemDepth),
NewPaths = paths(Stemmed),
- GrowingPaths = orddict:fold(fun(Rev, Path, Acc) ->
- OldPath = orddict:fetch(Rev, OldPaths),
- case length(Path) > length(OldPath) of
- true ->
- [{Rev, Path, OldPath}| Acc];
- false ->
- Acc
- end
- end, [], NewPaths),
+ GrowingPaths = orddict:fold(
+ fun(Rev, Path, Acc) ->
+ OldPath = orddict:fetch(Rev, OldPaths),
+ case length(Path) > length(OldPath) of
+ true ->
+ [{Rev, Path, OldPath} | Acc];
+ false ->
+ Acc
+ end
+ end,
+ [],
+ NewPaths
+ ),
valid_revtree(Stemmed) andalso GrowingPaths == []
end
).
-
% Check leaf count
prop_leaf_count() ->
- ?FORALL(RevTree, g_revtree(),
+ ?FORALL(
+ RevTree,
+ g_revtree(),
length(leaves(RevTree)) == couch_key_tree:count_leafs(RevTree)
).
-
% Check get leafs
prop_get_leafs() ->
- ?FORALL(RevTree, g_revtree(),
+ ?FORALL(
+ RevTree,
+ g_revtree(),
begin
LeafsFull = couch_key_tree:get_all_leafs(RevTree),
lists:usort([Rev || {_V, {_D, [Rev | _]}} <- LeafsFull]) == leaves(RevTree)
end
).
-
%
% Generators
%
@@ -205,39 +218,40 @@ prop_get_leafs() ->
g_revtree() ->
?SIZED(Size, g_revtree(Size)).
-
g_revtree(Size) when is_integer(Size) ->
g_revtree(Size, [], ?MAX_BRANCHES);
g_revtree(Revs) when is_list(Revs) ->
?SIZED(Size, g_revtree(Size, Revs, ?MAX_BRANCHES)).
-
g_revtree(Size, Revs) when is_integer(Size), is_list(Revs) ->
g_revtree(Size, Revs, ?MAX_BRANCHES);
g_revtree(Revs, MaxBranches) when is_list(Revs), is_integer(MaxBranches) ->
?SIZED(Size, g_revtree(Size, Revs, MaxBranches)).
-
g_revtree(0, _Revs, _MaxBranches) ->
[];
g_revtree(Size, ERevs, MaxBranches) ->
- ?LET({Depth, Revs}, {g_stem_depth(Size), g_revs(Size, ERevs)},
- [{Depth, g_treenode(Size, Revs, MaxBranches)}]
+ ?LET(
+ {Depth, Revs},
+ {g_stem_depth(Size), g_revs(Size, ERevs)},
+ [{Depth, g_treenode(Size, Revs, MaxBranches)}]
).
-
% Generate a tree node and then recursively generate its children.
%
g_treenode(0, Revs, _) ->
{elements(Revs), x, []};
g_treenode(Size, Revs, MaxBranches) ->
- ?LAZY(?LET(N, choose(0, MaxBranches),
- begin
- [Rev | ChildRevs] = Revs,
- {Rev, x, g_nodes(Size div ?SIZE_REDUCTION, N, ChildRevs, MaxBranches)}
- end
- )).
-
+ ?LAZY(
+ ?LET(
+ N,
+ choose(0, MaxBranches),
+ begin
+ [Rev | ChildRevs] = Revs,
+ {Rev, x, g_nodes(Size div ?SIZE_REDUCTION, N, ChildRevs, MaxBranches)}
+ end
+ )
+ ).
% Generate a list of child nodes. Depending on how many children there are
% the pre-generarated revision list is split into that many sublists.
@@ -256,82 +270,70 @@ g_nodes(Size, ChildCount, Revs, MaxBranches) ->
ordered_nodes(ChildNodes)
).
-
% Generate each subtree's stem depth
%
-
g_stem_depth(Size) ->
- choose(0, expected_height(Size, ?SIZE_REDUCTION) div 2).
-
+ choose(0, expected_height(Size, ?SIZE_REDUCTION) div 2).
% Uses the shuffle/1 function to shuffle the input list. Unshuffled list is
% used as the shrink value.
%
-g_shuffle([]) -> [];
+g_shuffle([]) ->
+ [];
g_shuffle(L) when is_list(L) ->
- ?LET(X, elements(L), [X | g_shuffle(lists:delete(X,L))]).
-
+ ?LET(X, elements(L), [X | g_shuffle(lists:delete(X, L))]).
% Wrapper to make a list shuffling generator that doesn't shrink
%
g_shuffle_noshrink(L) when is_list(L) ->
proper_types:noshrink(g_shuffle(L)).
-
% Generate shuffled sublists up to N items long from a list.
%
g_shuffled_sublists(L, N) ->
?LET(Shuffled, g_shuffle_noshrink(L), lists:sublist(Shuffled, N)).
-
% Generate revision lists.
%
g_revs() ->
?SIZED(Size, g_revs(Size)).
-
g_revs(Size) when is_integer(Size) ->
g_revs(Size, []).
-
g_revs(Size, Existing) when is_integer(Size), is_list(Existing) ->
Expected = keys_needed(Size, ?SIZE_REDUCTION, ?MAX_BRANCHES),
Revs = revs(Expected, Existing),
case length(Revs) > Expected of
- true -> % have extra, try various sublists
+ % have extra, try various sublists
+ true ->
g_shuffled_sublists(Revs, Expected);
false ->
proper_types:return(Revs)
end.
-
%
% Helper functions
%
-
valid_revtree(RevTree) ->
repeating_revs(levels(RevTree)) == [] andalso children_sorted(RevTree).
-
same_keys(RevTree1, RevTree2) ->
Keys1 = lists:usort(keylist(RevTree1)),
Keys2 = lists:usort(keylist(RevTree2)),
Keys1 == Keys2.
-
all(L) ->
lists:all(fun(E) -> E end, L).
-
% Generate list of relateively unique large random numbers
rand_list(N) when N =< 0 ->
[];
rand_list(N) ->
[rand:uniform(?RAND_SIZE) || _ <- lists:seq(1, N)].
-
% Generate a list of revisions to be used as key in revision trees. Expected
% must the number of maximum expected nodes in a revision tree. Existing is an
% optional list revisions which must be included in the result. The output list
@@ -342,7 +344,6 @@ revs(Expected, Existing) when is_integer(Expected), is_list(Existing) ->
Need = Expected - length(Existing),
lists:usort(lists:append(Existing, rand_list(Need))).
-
% Get the list of all the keys in a revision tree. The input can also be a
% an individual tree (tagged with the depth to virtual root) or a node.
% Yes, this is not tail recursive but the idea is to keep it simple.
@@ -354,7 +355,6 @@ keylist({K, _V, Nodes}) ->
keylist(Nodes) ->
lists:append([keylist(Node) || Node <- Nodes]).
-
% Get the list of leaves from a revision tree.
leaves([]) ->
[];
@@ -367,7 +367,6 @@ leaves({_K, _V, Nodes}) ->
leaves(Nodes) ->
lists:usort(lists:append([leaves(N) || N <- Nodes])).
-
% Get paths from leaf to root. Result is an orddict of [{LeafRev, [Rev]}]
%
paths([]) ->
@@ -382,14 +381,20 @@ paths({K, _V, Nodes}) ->
CombinedDict = paths_merge_dicts([paths(N) || N <- Nodes]),
orddict:map(fun(_LeafKey, Path) -> Path ++ [K] end, CombinedDict).
-
paths_merge_dicts(Dicts) ->
- lists:foldl(fun(D, AccD) ->
- orddict:merge(fun(K, V1, V2) ->
- throw({found_duplicates, K, V1, V2})
- end, D, AccD)
- end, orddict:new(), Dicts).
-
+ lists:foldl(
+ fun(D, AccD) ->
+ orddict:merge(
+ fun(K, V1, V2) ->
+ throw({found_duplicates, K, V1, V2})
+ end,
+ D,
+ AccD
+ )
+ end,
+ orddict:new(),
+ Dicts
+ ).
% Get lists of all the keys at each depth level. Result is an orddict that
% looks like [{depth, [key]}]. The depth used here is the "virtual" depth as
@@ -400,32 +405,36 @@ levels([]) ->
levels(RevTree) when is_list(RevTree) ->
lists:foldl(fun(T, Dict) -> levels(T, Dict) end, orddict:new(), RevTree).
-
levels({Depth, Node}, Dict) when is_tuple(Node) ->
levels(Node, Depth, Dict).
-
levels({K, _V, Nodes}, Depth, Dict) ->
- Dict1 = case orddict:is_key(Depth, Dict) of
- true -> orddict:append(Depth, K, Dict);
- false -> orddict:store(Depth, [K], Dict)
- end,
+ Dict1 =
+ case orddict:is_key(Depth, Dict) of
+ true -> orddict:append(Depth, K, Dict);
+ false -> orddict:store(Depth, [K], Dict)
+ end,
levels(Nodes, Depth + 1, Dict1);
levels(Nodes, Depth, Dict) ->
- lists:foldl(fun(Node, AccDict) ->
- levels(Node, Depth, AccDict)
- end, Dict, Nodes).
-
+ lists:foldl(
+ fun(Node, AccDict) ->
+ levels(Node, Depth, AccDict)
+ end,
+ Dict,
+ Nodes
+ ).
% Using the output of leaves/1 as input return any repeating revisions if
% there are any at a particular level. Levels which have not revisions are
% not returned.
%
repeating_revs(Dict) ->
- orddict:filter(fun(_Depth, Revs) ->
- length(lists:usort(Revs)) =/= length(Revs)
- end, Dict).
-
+ orddict:filter(
+ fun(_Depth, Revs) ->
+ length(lists:usort(Revs)) =/= length(Revs)
+ end,
+ Dict
+ ).
% Check that children of all nodes are sorted
children_sorted([]) ->
@@ -437,7 +446,6 @@ children_sorted({_D, Node}) when is_tuple(Node) ->
children_sorted({_K, _V, Nodes}) ->
children_sorted(Nodes).
-
% Get the maximum depth of a revtree. The depth is "virtual" as it takes into
% account the distance to the now stemmed root node as indicated by the top
% level subtrees.
@@ -449,7 +457,6 @@ depth(RevTree) when is_list(RevTree) ->
depth({Depth, Node}) when is_tuple(Node) ->
depth(Node, Depth - 1).
-
depth({_K, _V, Nodes}, Depth) ->
depth(Nodes, Depth + 1);
depth([], Depth) ->
@@ -457,7 +464,6 @@ depth([], Depth) ->
depth(Nodes, Depth) ->
lists:max([depth(Node, Depth) || Node <- Nodes]).
-
% Get the "real" tree depth, not the virtual one. As revtrees gets stemmed they
% will keep their virtual depth but the actual number of nodes in the tree
% could be reduced.
@@ -467,8 +473,8 @@ real_depth([]) ->
real_depth(RevTree) when is_list(RevTree) ->
lists:max([real_depth(T) || T <- RevTree]);
real_depth({_Depth, Node}) when is_tuple(Node) ->
- depth(Node, 0). % Note from here on use the depth/3 function
-
+ % Note from here on use the depth/3 function
+ depth(Node, 0).
% Return an ordered list of revtree nodes. When sorting only immediate keys
% (revisions) are looked at and comparison doesn't descent into the treee.
@@ -476,7 +482,6 @@ real_depth({_Depth, Node}) when is_tuple(Node) ->
ordered_nodes(Nodes) ->
lists:sort(fun({K1, _, _}, {K2, _, _}) -> K1 =< K2 end, Nodes).
-
% Calculate a maximum number of rev tree nodes needed for a tree of a given
% height and branchiness. Height is derived from Size and LevelReductionFactor,
% that is how big the sample should be and quickly the size parameter would
@@ -487,20 +492,17 @@ keys_needed(0, _, _) ->
keys_needed(Size, LevelReductionFactor, 1) ->
expected_height(Size, LevelReductionFactor);
keys_needed(Size, LevelReductionFactor, Branches) ->
- Height = expected_height(Size, LevelReductionFactor),
+ Height = expected_height(Size, LevelReductionFactor),
trunc(math:pow(Branches, Height + 1)) + 1.
-
% Calculate expected tree height for a given sample size and branchiness.
% At each step the size is divided by the reduction factor.
expected_height(Size, LevelReductionFactor) ->
trunc(log(LevelReductionFactor, Size)) + 1.
-
log(B, X) ->
math:log(X) / math:log(B).
-
% Distribute items in a list into roughly equal chunks of a given size.
%
distribute(_ChunkSize, []) ->
@@ -511,7 +513,6 @@ distribute(ChunkSize, L) ->
{L1, L2} = lists:split(ChunkSize, L),
[L1 | distribute(ChunkSize, L2)].
-
% Split a single (parent) revision list into chunks (sub-lists), one for each
% child. Also, for safety, double check that at this point in the process the
% list of revisions is sufficiently large. If it isn't something went wrong and
diff --git a/src/couch/test/eunit/couch_key_tree_tests.erl b/src/couch/test/eunit/couch_key_tree_tests.erl
index 5d9cc8372..f571139c9 100644
--- a/src/couch/test/eunit/couch_key_tree_tests.erl
+++ b/src/couch/test/eunit/couch_key_tree_tests.erl
@@ -16,8 +16,7 @@
-define(DEPTH, 10).
-
-key_tree_merge_test_()->
+key_tree_merge_test_() ->
{
"Key tree merge",
[
@@ -40,16 +39,16 @@ key_tree_merge_test_()->
]
}.
-key_tree_missing_leaves_test_()->
+key_tree_missing_leaves_test_() ->
{
- "Missing tree leaves",
- [
- should_not_find_missing_leaves(),
- should_find_missing_leaves()
- ]
+ "Missing tree leaves",
+ [
+ should_not_find_missing_leaves(),
+ should_find_missing_leaves()
+ ]
}.
-key_tree_remove_leaves_test_()->
+key_tree_remove_leaves_test_() ->
{
"Remove tree leaves",
[
@@ -62,7 +61,7 @@ key_tree_remove_leaves_test_()->
]
}.
-key_tree_get_leaves_test_()->
+key_tree_get_leaves_test_() ->
{
"Leaves retrieving",
[
@@ -80,7 +79,7 @@ key_tree_get_leaves_test_()->
]
}.
-key_tree_leaf_counting_test_()->
+key_tree_leaf_counting_test_() ->
{
"Leaf counting",
[
@@ -91,7 +90,7 @@ key_tree_leaf_counting_test_()->
]
}.
-key_tree_stemming_test_()->
+key_tree_stemming_test_() ->
{
"Stemming",
[
@@ -101,48 +100,71 @@ key_tree_stemming_test_()->
]
}.
-
-should_merge_with_empty_tree()->
- One = {1, {"1","foo",[]}},
- ?_assertEqual({[One], new_leaf},
- merge_and_stem([], One)).
-
-should_merge_reflexive()->
- One = {1, {"1","foo",[]}},
- ?_assertEqual({[One], internal_node},
- merge_and_stem([One], One)).
-
-should_merge_prefix_of_a_tree_with_tree()->
- One = {1, {"1","foo",[]}},
- TwoSibs = [{1, {"1","foo",[]}},
- {1, {"2","foo",[]}}],
- ?_assertEqual({TwoSibs, internal_node},
- merge_and_stem(TwoSibs, One)).
-
-should_produce_conflict_on_merge_with_unrelated_branch()->
- TwoSibs = [{1, {"1","foo",[]}},
- {1, {"2","foo",[]}}],
- Three = {1, {"3","foo",[]}},
- ThreeSibs = [{1, {"1","foo",[]}},
- {1, {"2","foo",[]}},
- {1, {"3","foo",[]}}],
- ?_assertEqual({ThreeSibs, new_branch},
- merge_and_stem(TwoSibs, Three)).
-
-should_merge_reflexive_for_child_nodes()->
- TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
- ?_assertEqual({[TwoChild], internal_node},
- merge_and_stem([TwoChild], TwoChild)).
-
-should_merge_tree_to_itself()->
- TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
- {"1b", "bar", []}]}},
+should_merge_with_empty_tree() ->
+ One = {1, {"1", "foo", []}},
+ ?_assertEqual(
+ {[One], new_leaf},
+ merge_and_stem([], One)
+ ).
+
+should_merge_reflexive() ->
+ One = {1, {"1", "foo", []}},
+ ?_assertEqual(
+ {[One], internal_node},
+ merge_and_stem([One], One)
+ ).
+
+should_merge_prefix_of_a_tree_with_tree() ->
+ One = {1, {"1", "foo", []}},
+ TwoSibs = [
+ {1, {"1", "foo", []}},
+ {1, {"2", "foo", []}}
+ ],
+ ?_assertEqual(
+ {TwoSibs, internal_node},
+ merge_and_stem(TwoSibs, One)
+ ).
+
+should_produce_conflict_on_merge_with_unrelated_branch() ->
+ TwoSibs = [
+ {1, {"1", "foo", []}},
+ {1, {"2", "foo", []}}
+ ],
+ Three = {1, {"3", "foo", []}},
+ ThreeSibs = [
+ {1, {"1", "foo", []}},
+ {1, {"2", "foo", []}},
+ {1, {"3", "foo", []}}
+ ],
+ ?_assertEqual(
+ {ThreeSibs, new_branch},
+ merge_and_stem(TwoSibs, Three)
+ ).
+
+should_merge_reflexive_for_child_nodes() ->
+ TwoChild = {1, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ ?_assertEqual(
+ {[TwoChild], internal_node},
+ merge_and_stem([TwoChild], TwoChild)
+ ).
+
+should_merge_tree_to_itself() ->
+ TwoChildSibs =
+ {1,
+ {"1", "foo", [
+ {"1a", "bar", []},
+ {"1b", "bar", []}
+ ]}},
Leafs = couch_key_tree:get_all_leafs([TwoChildSibs]),
Paths = lists:map(fun leaf_to_path/1, Leafs),
- FinalTree = lists:foldl(fun(Path, TreeAcc) ->
- {NewTree, internal_node} = merge_and_stem(TreeAcc, Path),
- NewTree
- end, [TwoChildSibs], Paths),
+ FinalTree = lists:foldl(
+ fun(Path, TreeAcc) ->
+ {NewTree, internal_node} = merge_and_stem(TreeAcc, Path),
+ NewTree
+ end,
+ [TwoChildSibs],
+ Paths
+ ),
?_assertEqual([TwoChildSibs], FinalTree).
leaf_to_path({Value, {Start, Keys}}) ->
@@ -154,260 +176,355 @@ to_branch(Value, [Key]) ->
to_branch(Value, [Key | RestKeys]) ->
[{Key, [], to_branch(Value, RestKeys)}].
-
-should_merge_tree_of_odd_length()->
- TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
- TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
- {"1b", "bar", []}]}},
- TwoChildPlusSibs = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]},
- {"1b", "bar", []}]}},
- ?_assertEqual({[TwoChildPlusSibs], new_leaf},
- merge_and_stem([TwoChildSibs], TwoChild)).
-
-should_merge_tree_with_stem()->
+should_merge_tree_of_odd_length() ->
+ TwoChild = {1, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ TwoChildSibs =
+ {1,
+ {"1", "foo", [
+ {"1a", "bar", []},
+ {"1b", "bar", []}
+ ]}},
+ TwoChildPlusSibs =
+ {1,
+ {"1", "foo", [
+ {"1a", "bar", [{"1aa", "bar", []}]},
+ {"1b", "bar", []}
+ ]}},
+ ?_assertEqual(
+ {[TwoChildPlusSibs], new_leaf},
+ merge_and_stem([TwoChildSibs], TwoChild)
+ ).
+
+should_merge_tree_with_stem() ->
Stemmed = {2, {"1a", "bar", []}},
- TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
- {"1b", "bar", []}]}},
-
- ?_assertEqual({[TwoChildSibs], internal_node},
- merge_and_stem([TwoChildSibs], Stemmed)).
-
-should_merge_with_stem_at_deeper_level()->
+ TwoChildSibs =
+ {1,
+ {"1", "foo", [
+ {"1a", "bar", []},
+ {"1b", "bar", []}
+ ]}},
+
+ ?_assertEqual(
+ {[TwoChildSibs], internal_node},
+ merge_and_stem([TwoChildSibs], Stemmed)
+ ).
+
+should_merge_with_stem_at_deeper_level() ->
Stemmed = {3, {"1bb", "boo", []}},
- TwoChildSibs = {1, {"1","foo", [{"1a", "bar", []},
- {"1b", "bar", [{"1bb", "boo", []}]}]}},
- ?_assertEqual({[TwoChildSibs], internal_node},
- merge_and_stem([TwoChildSibs], Stemmed)).
-
-should_merge_with_stem_at_deeper_level_with_deeper_paths()->
+ TwoChildSibs =
+ {1,
+ {"1", "foo", [
+ {"1a", "bar", []},
+ {"1b", "bar", [{"1bb", "boo", []}]}
+ ]}},
+ ?_assertEqual(
+ {[TwoChildSibs], internal_node},
+ merge_and_stem([TwoChildSibs], Stemmed)
+ ).
+
+should_merge_with_stem_at_deeper_level_with_deeper_paths() ->
Stemmed = {3, {"1bb", "boo", []}},
- StemmedTwoChildSibs = [{2,{"1a", "bar", []}},
- {2,{"1b", "bar", [{"1bb", "boo", []}]}}],
- ?_assertEqual({StemmedTwoChildSibs, internal_node},
- merge_and_stem(StemmedTwoChildSibs, Stemmed)).
-
-should_merge_single_tree_with_deeper_stem()->
+ StemmedTwoChildSibs = [
+ {2, {"1a", "bar", []}},
+ {2, {"1b", "bar", [{"1bb", "boo", []}]}}
+ ],
+ ?_assertEqual(
+ {StemmedTwoChildSibs, internal_node},
+ merge_and_stem(StemmedTwoChildSibs, Stemmed)
+ ).
+
+should_merge_single_tree_with_deeper_stem() ->
Stemmed = {3, {"1aa", "bar", []}},
- TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
- ?_assertEqual({[TwoChild], internal_node},
- merge_and_stem([TwoChild], Stemmed)).
+ TwoChild = {1, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ ?_assertEqual(
+ {[TwoChild], internal_node},
+ merge_and_stem([TwoChild], Stemmed)
+ ).
-should_merge_tree_with_large_stem()->
+should_merge_tree_with_large_stem() ->
Stemmed = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
- TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
- ?_assertEqual({[TwoChild], internal_node},
- merge_and_stem([TwoChild], Stemmed)).
+ TwoChild = {1, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ ?_assertEqual(
+ {[TwoChild], internal_node},
+ merge_and_stem([TwoChild], Stemmed)
+ ).
-should_merge_stems()->
+should_merge_stems() ->
StemmedA = {2, {"1a", "bar", [{"1aa", "bar", []}]}},
StemmedB = {3, {"1aa", "bar", []}},
- ?_assertEqual({[StemmedA], internal_node},
- merge_and_stem([StemmedA], StemmedB)).
+ ?_assertEqual(
+ {[StemmedA], internal_node},
+ merge_and_stem([StemmedA], StemmedB)
+ ).
-should_create_conflicts_on_merge()->
- OneChild = {1, {"1","foo",[{"1a", "bar", []}]}},
+should_create_conflicts_on_merge() ->
+ OneChild = {1, {"1", "foo", [{"1a", "bar", []}]}},
Stemmed = {3, {"1aa", "bar", []}},
- ?_assertEqual({[OneChild, Stemmed], new_branch},
- merge_and_stem([OneChild], Stemmed)).
+ ?_assertEqual(
+ {[OneChild, Stemmed], new_branch},
+ merge_and_stem([OneChild], Stemmed)
+ ).
-should_create_no_conflicts_on_merge()->
- OneChild = {1, {"1","foo",[{"1a", "bar", []}]}},
+should_create_no_conflicts_on_merge() ->
+ OneChild = {1, {"1", "foo", [{"1a", "bar", []}]}},
Stemmed = {3, {"1aa", "bar", []}},
- TwoChild = {1, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
- ?_assertEqual({[TwoChild], new_leaf},
- merge_and_stem([OneChild, Stemmed], TwoChild)).
+ TwoChild = {1, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}},
+ ?_assertEqual(
+ {[TwoChild], new_leaf},
+ merge_and_stem([OneChild, Stemmed], TwoChild)
+ ).
-should_ignore_conflicting_branch()->
+should_ignore_conflicting_branch() ->
%% this test is based on couch-902-test-case2.py
%% foo has conflicts from replication at depth two
%% foo3 is the current value
- Foo = {1, {"foo",
- "val1",
- [{"foo2","val2",[]},
+ Foo =
+ {1,
+ {"foo", "val1", [
+ {"foo2", "val2", []},
{"foo3", "val3", []}
- ]}},
+ ]}},
%% foo now has an attachment added, which leads to foo4 and val4
%% off foo3
- Bar = {1, {"foo",
- [],
- [{"foo3",
- [],
- [{"foo4","val4",[]}
- ]}]}},
+ Bar = {1, {"foo", [], [{"foo3", [], [{"foo4", "val4", []}]}]}},
%% this is what the merge returns
%% note that it ignore the conflicting branch as there's no match
- FooBar = {1, {"foo",
- "val1",
- [{"foo2","val2",[]},
- {"foo3", "val3", [{"foo4","val4",[]}]}
- ]}},
+ FooBar =
+ {1,
+ {"foo", "val1", [
+ {"foo2", "val2", []},
+ {"foo3", "val3", [{"foo4", "val4", []}]}
+ ]}},
{
"COUCHDB-902",
- ?_assertEqual({[FooBar], new_leaf},
- merge_and_stem([Foo], Bar))
+ ?_assertEqual(
+ {[FooBar], new_leaf},
+ merge_and_stem([Foo], Bar)
+ )
}.
-should_not_find_missing_leaves()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual([],
- couch_key_tree:find_missing(TwoChildSibs,
- [{0,"1"}, {1,"1a"}])).
-
-should_find_missing_leaves()->
+should_not_find_missing_leaves() ->
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual(
+ [],
+ couch_key_tree:find_missing(
+ TwoChildSibs,
+ [{0, "1"}, {1, "1a"}]
+ )
+ ).
+
+should_find_missing_leaves() ->
Stemmed1 = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
Stemmed2 = [{2, {"1aa", "bar", []}}],
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
[
?_assertEqual(
[{0, "10"}, {100, "x"}],
couch_key_tree:find_missing(
TwoChildSibs,
- [{0,"1"}, {0, "10"}, {1,"1a"}, {100, "x"}])),
+ [{0, "1"}, {0, "10"}, {1, "1a"}, {100, "x"}]
+ )
+ ),
?_assertEqual(
[{0, "1"}, {100, "x"}],
couch_key_tree:find_missing(
Stemmed1,
- [{0,"1"}, {1,"1a"}, {100, "x"}])),
+ [{0, "1"}, {1, "1a"}, {100, "x"}]
+ )
+ ),
?_assertEqual(
- [{0, "1"}, {1,"1a"}, {100, "x"}],
+ [{0, "1"}, {1, "1a"}, {100, "x"}],
couch_key_tree:find_missing(
Stemmed2,
- [{0,"1"}, {1,"1a"}, {100, "x"}]))
+ [{0, "1"}, {1, "1a"}, {100, "x"}]
+ )
+ )
].
-should_have_no_effect_on_removing_no_leaves()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({TwoChildSibs, []},
- couch_key_tree:remove_leafs(TwoChildSibs,
- [])).
-
-should_have_no_effect_on_removing_non_existant_branch()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({TwoChildSibs, []},
- couch_key_tree:remove_leafs(TwoChildSibs,
- [{0, "1"}])).
-
-should_remove_leaf()->
- OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}],
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({OneChild, [{1, "1b"}]},
- couch_key_tree:remove_leafs(TwoChildSibs,
- [{1, "1b"}])).
-
-should_produce_empty_tree_on_removing_all_leaves()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[], [{1, "1b"}, {1, "1a"}]},
- couch_key_tree:remove_leafs(TwoChildSibs,
- [{1, "1b"}, {1, "1a"}])).
-
-should_have_no_effect_on_removing_non_existant_node()->
+should_have_no_effect_on_removing_no_leaves() ->
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual(
+ {TwoChildSibs, []},
+ couch_key_tree:remove_leafs(
+ TwoChildSibs,
+ []
+ )
+ ).
+
+should_have_no_effect_on_removing_non_existant_branch() ->
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual(
+ {TwoChildSibs, []},
+ couch_key_tree:remove_leafs(
+ TwoChildSibs,
+ [{0, "1"}]
+ )
+ ).
+
+should_remove_leaf() ->
+ OneChild = [{0, {"1", "foo", [{"1a", "bar", []}]}}],
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual(
+ {OneChild, [{1, "1b"}]},
+ couch_key_tree:remove_leafs(
+ TwoChildSibs,
+ [{1, "1b"}]
+ )
+ ).
+
+should_produce_empty_tree_on_removing_all_leaves() ->
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual(
+ {[], [{1, "1b"}, {1, "1a"}]},
+ couch_key_tree:remove_leafs(
+ TwoChildSibs,
+ [{1, "1b"}, {1, "1a"}]
+ )
+ ).
+
+should_have_no_effect_on_removing_non_existant_node() ->
Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- ?_assertEqual({Stemmed, []},
- couch_key_tree:remove_leafs(Stemmed,
- [{1, "1a"}])).
-
-should_produce_empty_tree_on_removing_last_leaf()->
+ ?_assertEqual(
+ {Stemmed, []},
+ couch_key_tree:remove_leafs(
+ Stemmed,
+ [{1, "1a"}]
+ )
+ ).
+
+should_produce_empty_tree_on_removing_last_leaf() ->
Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- ?_assertEqual({[], [{2, "1aa"}]},
- couch_key_tree:remove_leafs(Stemmed,
- [{2, "1aa"}])).
-
-should_extract_subtree()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[{"foo", {0, ["1"]}}],[]},
- couch_key_tree:get(TwoChildSibs, [{0, "1"}])).
-
-should_extract_subsubtree()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[{"bar", {1, ["1a", "1"]}}],[]},
- couch_key_tree:get(TwoChildSibs, [{1, "1a"}])).
-
-should_gather_non_existant_leaf()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[],[{0, "x"}]},
- couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "x"}])).
-
-should_gather_leaf()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[{"bar", {1, ["1a","1"]}}],[]},
- couch_key_tree:get_key_leafs(TwoChildSibs, [{1, "1a"}])).
-
-shoul_gather_multiple_leaves()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]},
- couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "1"}])).
+ ?_assertEqual(
+ {[], [{2, "1aa"}]},
+ couch_key_tree:remove_leafs(
+ Stemmed,
+ [{2, "1aa"}]
+ )
+ ).
+
+should_extract_subtree() ->
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual(
+ {[{"foo", {0, ["1"]}}], []},
+ couch_key_tree:get(TwoChildSibs, [{0, "1"}])
+ ).
+
+should_extract_subsubtree() ->
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual(
+ {[{"bar", {1, ["1a", "1"]}}], []},
+ couch_key_tree:get(TwoChildSibs, [{1, "1a"}])
+ ).
+
+should_gather_non_existant_leaf() ->
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual(
+ {[], [{0, "x"}]},
+ couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "x"}])
+ ).
+
+should_gather_leaf() ->
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual(
+ {[{"bar", {1, ["1a", "1"]}}], []},
+ couch_key_tree:get_key_leafs(TwoChildSibs, [{1, "1a"}])
+ ).
+
+shoul_gather_multiple_leaves() ->
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual(
+ {[{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b", "1"]}}], []},
+ couch_key_tree:get_key_leafs(TwoChildSibs, [{0, "1"}])
+ ).
should_gather_single_leaf_for_multiple_revs() ->
- OneChild = [{0, {"1","foo",[{"1a", "bar", []}]}}],
+ OneChild = [{0, {"1", "foo", [{"1a", "bar", []}]}}],
ToFind = [{0, "1"}, {1, "1a"}],
- ?_assertEqual({[{"bar", {1, ["1a", "1"]}}],[]},
- couch_key_tree:get_key_leafs(OneChild, ToFind)).
+ ?_assertEqual(
+ {[{"bar", {1, ["1a", "1"]}}], []},
+ couch_key_tree:get_key_leafs(OneChild, ToFind)
+ ).
should_gather_multiple_for_multiple_revs() ->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
ToFind = [{0, "1"}, {1, "1a"}],
- ?_assertEqual({[{"bar", {1, ["1a","1"]}},{"bar",{1, ["1b","1"]}}],[]},
- couch_key_tree:get_key_leafs(TwoChildSibs, ToFind)).
-
-should_retrieve_full_key_path()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[{0,[{"1", "foo"}]}],[]},
- couch_key_tree:get_full_key_paths(TwoChildSibs, [{0, "1"}])).
-
-should_retrieve_full_key_path_for_node()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
- ?_assertEqual({[{1,[{"1a", "bar"},{"1", "foo"}]}],[]},
- couch_key_tree:get_full_key_paths(TwoChildSibs, [{1, "1a"}])).
-
-should_retrieve_leaves_with_parent_node()->
+ ?_assertEqual(
+ {[{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b", "1"]}}], []},
+ couch_key_tree:get_key_leafs(TwoChildSibs, ToFind)
+ ).
+
+should_retrieve_full_key_path() ->
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual(
+ {[{0, [{"1", "foo"}]}], []},
+ couch_key_tree:get_full_key_paths(TwoChildSibs, [{0, "1"}])
+ ).
+
+should_retrieve_full_key_path_for_node() ->
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ ?_assertEqual(
+ {[{1, [{"1a", "bar"}, {"1", "foo"}]}], []},
+ couch_key_tree:get_full_key_paths(TwoChildSibs, [{1, "1a"}])
+ ).
+
+should_retrieve_leaves_with_parent_node() ->
Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
[
- ?_assertEqual([{2, [{"1aa", "bar"},{"1a", "bar"}]}],
- couch_key_tree:get_all_leafs_full(Stemmed)),
- ?_assertEqual([{1, [{"1a", "bar"},{"1", "foo"}]},
- {1, [{"1b", "bar"},{"1", "foo"}]}],
- couch_key_tree:get_all_leafs_full(TwoChildSibs))
+ ?_assertEqual(
+ [{2, [{"1aa", "bar"}, {"1a", "bar"}]}],
+ couch_key_tree:get_all_leafs_full(Stemmed)
+ ),
+ ?_assertEqual(
+ [
+ {1, [{"1a", "bar"}, {"1", "foo"}]},
+ {1, [{"1b", "bar"}, {"1", "foo"}]}
+ ],
+ couch_key_tree:get_all_leafs_full(TwoChildSibs)
+ )
].
-should_retrieve_all_leaves()->
+should_retrieve_all_leaves() ->
Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
[
- ?_assertEqual([{"bar", {2, ["1aa","1a"]}}],
- couch_key_tree:get_all_leafs(Stemmed)),
- ?_assertEqual([{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b","1"]}}],
- couch_key_tree:get_all_leafs(TwoChildSibs))
+ ?_assertEqual(
+ [{"bar", {2, ["1aa", "1a"]}}],
+ couch_key_tree:get_all_leafs(Stemmed)
+ ),
+ ?_assertEqual(
+ [{"bar", {1, ["1a", "1"]}}, {"bar", {1, ["1b", "1"]}}],
+ couch_key_tree:get_all_leafs(TwoChildSibs)
+ )
].
-should_have_no_leaves_for_empty_tree()->
+should_have_no_leaves_for_empty_tree() ->
?_assertEqual(0, couch_key_tree:count_leafs([])).
-should_have_single_leaf_for_tree_with_single_node()->
- ?_assertEqual(1, couch_key_tree:count_leafs([{0, {"1","foo",[]}}])).
+should_have_single_leaf_for_tree_with_single_node() ->
+ ?_assertEqual(1, couch_key_tree:count_leafs([{0, {"1", "foo", []}}])).
-should_have_two_leaves_for_tree_with_chindler_siblings()->
- TwoChildSibs = [{0, {"1","foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
+should_have_two_leaves_for_tree_with_chindler_siblings() ->
+ TwoChildSibs = [{0, {"1", "foo", [{"1a", "bar", []}, {"1b", "bar", []}]}}],
?_assertEqual(2, couch_key_tree:count_leafs(TwoChildSibs)).
-should_not_affect_on_leaf_counting_for_stemmed_tree()->
+should_not_affect_on_leaf_counting_for_stemmed_tree() ->
?_assertEqual(1, couch_key_tree:count_leafs([{2, {"1bb", "boo", []}}])).
-should_have_no_effect_for_stemming_more_levels_than_exists()->
- TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
+should_have_no_effect_for_stemming_more_levels_than_exists() ->
+ TwoChild = [{0, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
?_assertEqual(TwoChild, couch_key_tree:stem(TwoChild, 3)).
-should_return_one_deepest_node()->
- TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
+should_return_one_deepest_node() ->
+ TwoChild = [{0, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
Stemmed = [{2, {"1aa", "bar", []}}],
?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 1)).
-should_return_two_deepest_nodes()->
- TwoChild = [{0, {"1","foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
+should_return_two_deepest_nodes() ->
+ TwoChild = [{0, {"1", "foo", [{"1a", "bar", [{"1aa", "bar", []}]}]}}],
Stemmed = [{1, {"1a", "bar", [{"1aa", "bar", []}]}}],
?_assertEqual(Stemmed, couch_key_tree:stem(TwoChild, 2)).
-
merge_and_stem(RevTree, Tree) ->
{Merged, Result} = couch_key_tree:merge(RevTree, Tree),
{couch_key_tree:stem(Merged, ?DEPTH), Result}.
diff --git a/src/couch/test/eunit/couch_passwords_tests.erl b/src/couch/test/eunit/couch_passwords_tests.erl
index 88de8530f..6b67a99e3 100644
--- a/src/couch/test/eunit/couch_passwords_tests.erl
+++ b/src/couch/test/eunit/couch_passwords_tests.erl
@@ -14,41 +14,52 @@
-include_lib("couch/include/couch_eunit.hrl").
+pbkdf2_test_() ->
+ {"PBKDF2", [
+ {"Iterations: 1, length: 20",
+ ?_assertEqual(
+ {ok, <<"0c60c80f961f0e71f3a9b524af6012062fe037a6">>},
+ couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 1, 20)
+ )},
-pbkdf2_test_()->
- {"PBKDF2",
- [
- {"Iterations: 1, length: 20",
- ?_assertEqual(
- {ok, <<"0c60c80f961f0e71f3a9b524af6012062fe037a6">>},
- couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 1, 20))},
+ {"Iterations: 2, length: 20",
+ ?_assertEqual(
+ {ok, <<"ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957">>},
+ couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 2, 20)
+ )},
- {"Iterations: 2, length: 20",
- ?_assertEqual(
- {ok, <<"ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957">>},
- couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 2, 20))},
+ {"Iterations: 4096, length: 20",
+ ?_assertEqual(
+ {ok, <<"4b007901b765489abead49d926f721d065a429c1">>},
+ couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 4096, 20)
+ )},
- {"Iterations: 4096, length: 20",
- ?_assertEqual(
- {ok, <<"4b007901b765489abead49d926f721d065a429c1">>},
- couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 4096, 20))},
+ {"Iterations: 4096, length: 25",
+ ?_assertEqual(
+ {ok, <<"3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038">>},
+ couch_passwords:pbkdf2(
+ <<"passwordPASSWORDpassword">>,
+ <<"saltSALTsaltSALTsaltSALTsaltSALTsalt">>,
+ 4096,
+ 25
+ )
+ )},
+ {"Null byte",
+ ?_assertEqual(
+ {ok, <<"56fa6aa75548099dcc37d7f03425e0c3">>},
+ couch_passwords:pbkdf2(
+ <<"pass\0word">>,
+ <<"sa\0lt">>,
+ 4096,
+ 16
+ )
+ )},
- {"Iterations: 4096, length: 25",
- ?_assertEqual(
- {ok, <<"3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038">>},
- couch_passwords:pbkdf2(<<"passwordPASSWORDpassword">>,
- <<"saltSALTsaltSALTsaltSALTsaltSALTsalt">>,
- 4096, 25))},
- {"Null byte",
- ?_assertEqual(
- {ok, <<"56fa6aa75548099dcc37d7f03425e0c3">>},
- couch_passwords:pbkdf2(<<"pass\0word">>,
- <<"sa\0lt">>,
- 4096, 16))},
-
- {timeout, 600, %% this may runs too long on slow hosts
- {"Iterations: 16777216 - this may take some time",
- ?_assertEqual(
- {ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>},
- couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 16777216, 20)
- )}}]}.
+ %% this may runs too long on slow hosts
+ {timeout, 600,
+ {"Iterations: 16777216 - this may take some time",
+ ?_assertEqual(
+ {ok, <<"eefe3d61cd4da4e4e9945b3d6ba2158c2634e984">>},
+ couch_passwords:pbkdf2(<<"password">>, <<"salt">>, 16777216, 20)
+ )}}
+ ]}.
diff --git a/src/couch/test/eunit/couch_query_servers_tests.erl b/src/couch/test/eunit/couch_query_servers_tests.erl
index 440fc8e1b..01631ba28 100644
--- a/src/couch/test/eunit/couch_query_servers_tests.erl
+++ b/src/couch/test/eunit/couch_query_servers_tests.erl
@@ -15,24 +15,19 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_eunit.hrl").
-
setup() ->
meck:new([config, couch_log]).
-
teardown(_) ->
meck:unload().
-
setup_oom() ->
test_util:start_couch([ioq]).
-
teardown_oom(Ctx) ->
meck:unload(),
test_util:stop_couch(Ctx).
-
sum_overflow_test_() ->
{
"Test overflow detection in the _sum reduce function",
@@ -48,26 +43,27 @@ sum_overflow_test_() ->
}
}.
-
filter_oom_test_() ->
-{
- "Test recovery from oom in filters",
{
- setup,
- fun setup_oom/0,
- fun teardown_oom/1,
- [
- fun should_split_large_batches/0
- ]
- }
-}.
+ "Test recovery from oom in filters",
+ {
+ setup,
+ fun setup_oom/0,
+ fun teardown_oom/1,
+ [
+ fun should_split_large_batches/0
+ ]
+ }
+ }.
should_return_error_on_overflow() ->
meck:reset([config, couch_log]),
meck:expect(
- config, get, ["query_server_config", "reduce_limit", "true"],
- "true"
- ),
+ config,
+ get,
+ ["query_server_config", "reduce_limit", "true"],
+ "true"
+ ),
meck:expect(couch_log, error, ['_', '_'], ok),
KVs = gen_sum_kvs(),
{ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs),
@@ -75,13 +71,14 @@ should_return_error_on_overflow() ->
?assert(meck:called(config, get, '_')),
?assert(meck:called(couch_log, error, '_')).
-
should_return_object_on_log() ->
meck:reset([config, couch_log]),
meck:expect(
- config, get, ["query_server_config", "reduce_limit", "true"],
- "log"
- ),
+ config,
+ get,
+ ["query_server_config", "reduce_limit", "true"],
+ "log"
+ ),
meck:expect(couch_log, error, ['_', '_'], ok),
KVs = gen_sum_kvs(),
{ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs),
@@ -91,13 +88,14 @@ should_return_object_on_log() ->
?assert(meck:called(config, get, '_')),
?assert(meck:called(couch_log, error, '_')).
-
should_return_object_on_false() ->
meck:reset([config, couch_log]),
meck:expect(
- config, get, ["query_server_config", "reduce_limit", "true"],
- "false"
- ),
+ config,
+ get,
+ ["query_server_config", "reduce_limit", "true"],
+ "false"
+ ),
meck:expect(couch_log, error, ['_', '_'], ok),
KVs = gen_sum_kvs(),
{ok, [Result]} = couch_query_servers:reduce(<<"foo">>, [<<"_sum">>], KVs),
@@ -107,18 +105,19 @@ should_return_object_on_false() ->
?assert(meck:called(config, get, '_')),
?assertNot(meck:called(couch_log, error, '_')).
-
should_split_large_batches() ->
Req = {json_req, {[]}},
Db = undefined,
DDoc = #doc{
id = <<"_design/foo">>,
revs = {0, [<<"bork bork bork">>]},
- body = {[
- {<<"filters">>, {[
- {<<"bar">>, <<"function(req, doc) {return true;}">>}
- ]}}
- ]}
+ body =
+ {[
+ {<<"filters">>,
+ {[
+ {<<"bar">>, <<"function(req, doc) {return true;}">>}
+ ]}}
+ ]}
},
FName = <<"bar">>,
Docs = [
@@ -139,12 +138,17 @@ should_split_large_batches() ->
{ok, Ret} = couch_query_servers:filter_docs(Req, Db, DDoc, FName, Docs),
?assertEqual([split_batch, split_batch], Ret).
-
gen_sum_kvs() ->
- lists:map(fun(I) ->
- Props = lists:map(fun(_) ->
- K = couch_util:encodeBase64Url(crypto:strong_rand_bytes(16)),
- {K, 1}
- end, lists:seq(1, 20)),
- [I, {Props}]
- end, lists:seq(1, 10)).
+ lists:map(
+ fun(I) ->
+ Props = lists:map(
+ fun(_) ->
+ K = couch_util:encodeBase64Url(crypto:strong_rand_bytes(16)),
+ {K, 1}
+ end,
+ lists:seq(1, 20)
+ ),
+ [I, {Props}]
+ end,
+ lists:seq(1, 10)
+ ).
diff --git a/src/couch/test/eunit/couch_server_tests.erl b/src/couch/test/eunit/couch_server_tests.erl
index 66533d48c..a43106d89 100644
--- a/src/couch/test/eunit/couch_server_tests.erl
+++ b/src/couch/test/eunit/couch_server_tests.erl
@@ -45,13 +45,13 @@ teardown(rename, Db) ->
teardown(_, Db) ->
teardown(Db).
-
delete_db_test_() ->
{
"Test for proper deletion of db file",
{
setup,
- fun start/0, fun test_util:stop/1,
+ fun start/0,
+ fun test_util:stop/1,
[
make_test_case(rename, [fun should_rename_on_delete/2]),
make_test_case(delete, [fun should_delete/2])
@@ -76,7 +76,8 @@ should_rename_on_delete(_, Db) ->
?assertMatch([_], DeletedFiles),
[Renamed] = DeletedFiles,
?assertEqual(
- filename:extension(Origin), filename:extension(Renamed)),
+ filename:extension(Origin), filename:extension(Renamed)
+ ),
?assert(filelib:is_regular(Renamed))
end).
@@ -93,7 +94,6 @@ should_delete(_, Db) ->
deleted_files(ViewFile) ->
filelib:wildcard(filename:rootname(ViewFile) ++ "*.deleted.*").
-
bad_engine_option_test_() ->
{
setup,
@@ -104,19 +104,19 @@ bad_engine_option_test_() ->
]
}.
-
t_bad_engine_option() ->
Resp = couch_server:create(?tempdb(), [{engine, <<"cowabunga!">>}]),
?assertEqual(Resp, {error, {invalid_engine_extension, <<"cowabunga!">>}}).
-
get_engine_path_test_() ->
{
setup,
- fun start/0, fun test_util:stop/1,
+ fun start/0,
+ fun test_util:stop/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_return_engine_path/1,
fun should_return_invalid_engine_error/1
@@ -124,7 +124,6 @@ get_engine_path_test_() ->
}
}.
-
should_return_engine_path(Db) ->
DbName = couch_db:name(Db),
Engine = couch_db_engine:get_engine(Db),
@@ -132,14 +131,12 @@ should_return_engine_path(Db) ->
FilePath = couch_db:get_filepath(Db),
?_assertMatch({ok, FilePath}, Resp).
-
should_return_invalid_engine_error(Db) ->
DbName = couch_db:name(Db),
Engine = fake_engine,
Resp = couch_server:get_engine_path(DbName, Engine),
?_assertMatch({error, {invalid_engine, Engine}}, Resp).
-
interleaved_requests_test_() ->
{
setup,
@@ -148,7 +145,6 @@ interleaved_requests_test_() ->
fun make_interleaved_requests/1
}.
-
start_interleaved() ->
TestDbName = ?tempdb(),
meck:new(couch_db, [passthrough]),
@@ -180,19 +176,16 @@ start_interleaved() ->
end),
{test_util:start_couch(), TestDbName}.
-
stop_interleaved({Ctx, TestDbName}) ->
couch_server:delete(TestDbName, [?ADMIN_CTX]),
meck:unload(),
test_util:stop_couch(Ctx).
-
make_interleaved_requests({_, TestDbName}) ->
[
fun() -> t_interleaved_create_delete_open(TestDbName) end
].
-
t_interleaved_create_delete_open(DbName) ->
{CrtRef, OpenRef} = {make_ref(), make_ref()},
CrtMsg = {'$gen_call', {self(), CrtRef}, {create, DbName, [?ADMIN_CTX]}},
@@ -247,7 +240,6 @@ t_interleaved_create_delete_open(DbName) ->
?assert(is_process_alive(CouchServer)),
check_monitor_not_triggered(CSRef).
-
get_opener_pid(DbName) ->
WaitFun = fun() ->
case ets:lookup(couch_server:couch_dbs(DbName), DbName) of
@@ -259,23 +251,28 @@ get_opener_pid(DbName) ->
end,
test_util:wait(WaitFun).
-
wait_for_open_async_result(CouchServer, Opener) ->
WaitFun = fun() ->
{_, Messages} = erlang:process_info(CouchServer, messages),
- Found = lists:foldl(fun(Msg, Acc) ->
- case Msg of
- {'$gen_call', {Opener, _}, {open_result, _, {ok, _}}} ->
- true;
- _ ->
- Acc
- end
- end, false, Messages),
- if Found -> ok; true -> wait end
+ Found = lists:foldl(
+ fun(Msg, Acc) ->
+ case Msg of
+ {'$gen_call', {Opener, _}, {open_result, _, {ok, _}}} ->
+ true;
+ _ ->
+ Acc
+ end
+ end,
+ false,
+ Messages
+ ),
+ if
+ Found -> ok;
+ true -> wait
+ end
end,
test_util:wait(WaitFun).
-
check_monitor_not_triggered(Ref) ->
receive
{'DOWN', Ref, _, _, Reason0} ->
@@ -284,7 +281,6 @@ check_monitor_not_triggered(Ref) ->
ok
end.
-
get_next_message() ->
receive
Msg ->
diff --git a/src/couch/test/eunit/couch_stream_tests.erl b/src/couch/test/eunit/couch_stream_tests.erl
index a7fedf0af..4146a9139 100644
--- a/src/couch/test/eunit/couch_stream_tests.erl
+++ b/src/couch/test/eunit/couch_stream_tests.erl
@@ -24,16 +24,17 @@ setup() ->
teardown({Fd, _}) ->
ok = couch_file:close(Fd).
-
stream_test_() ->
{
"CouchDB stream tests",
{
setup,
- fun() -> test_util:start(?MODULE, [ioq]) end, fun test_util:stop/1,
+ fun() -> test_util:start(?MODULE, [ioq]) end,
+ fun test_util:stop/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_write/1,
fun should_write_consecutive/1,
@@ -49,7 +50,6 @@ stream_test_() ->
}
}.
-
should_write({_, Stream}) ->
?_assertEqual(ok, couch_stream:write(Stream, <<"food">>)).
@@ -98,7 +98,10 @@ should_stream_more_with_4K_chunk_size({Fd, _}) ->
Data = <<"a1b2c">>,
couch_stream:write(Stream, Data),
[Data | Acc]
- end, [], lists:seq(1, 1024)),
+ end,
+ [],
+ lists:seq(1, 1024)
+ ),
{NewEngine, Length, _, _, _} = couch_stream:close(Stream),
{ok, Ptrs} = couch_stream:to_disk_term(NewEngine),
?_assertMatch({[{0, 4100}, {4106, 1020}], 5120}, {Ptrs, Length}).
@@ -109,16 +112,17 @@ should_stop_on_normal_exit_of_stream_opener({Fd, _}) ->
fun() ->
{ok, StreamPid} = couch_stream:open(?ENGINE(Fd)),
RunnerPid ! {pid, StreamPid}
- end),
- StreamPid = receive
- {pid, StreamPid0} -> StreamPid0
- end,
+ end
+ ),
+ StreamPid =
+ receive
+ {pid, StreamPid0} -> StreamPid0
+ end,
% Confirm the validity of the test by verifying the stream opener has died
?assertNot(is_process_alive(OpenerPid)),
% Verify the stream itself has also died
?_assertNot(is_process_alive(StreamPid)).
-
read_all(Engine) ->
Data = couch_stream:foldl(Engine, fun(Bin, Acc) -> [Bin, Acc] end, []),
iolist_to_binary(Data).
diff --git a/src/couch/test/eunit/couch_task_status_tests.erl b/src/couch/test/eunit/couch_task_status_tests.erl
index 0ec03563b..f888dd596 100644
--- a/src/couch/test/eunit/couch_task_status_tests.erl
+++ b/src/couch/test/eunit/couch_task_status_tests.erl
@@ -17,27 +17,30 @@
-define(TIMEOUT, 1000).
-
setup() ->
Ctx = test_util:start(?MODULE, [couch_log], [{dont_mock, [config]}]),
{ok, TaskStatusPid} = couch_task_status:start_link(),
TaskUpdaterPid = spawn(fun() -> loop() end),
{TaskStatusPid, TaskUpdaterPid, Ctx}.
-
-teardown({TaskStatusPid, _, Ctx})->
- test_util:stop_sync_throw(TaskStatusPid, fun() ->
- couch_task_status:stop()
- end, timeout_error, ?TIMEOUT),
+teardown({TaskStatusPid, _, Ctx}) ->
+ test_util:stop_sync_throw(
+ TaskStatusPid,
+ fun() ->
+ couch_task_status:stop()
+ end,
+ timeout_error,
+ ?TIMEOUT
+ ),
test_util:stop(Ctx).
-
couch_task_status_test_() ->
{
"CouchDB task status updates",
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_register_task/1,
fun should_set_task_startup_time/1,
@@ -51,12 +54,10 @@ couch_task_status_test_() ->
fun should_reset_control_update_frequency/1,
fun should_track_multiple_tasks/1,
fun should_finish_task/1
-
]
}
}.
-
should_register_task({_, Pid, _Ctx}) ->
ok = call(Pid, add, [{type, replication}, {progress, 0}]),
?_assertEqual(1, length(couch_task_status:all())).
@@ -76,8 +77,10 @@ should_set_task_type({_, Pid, _Ctx}) ->
should_not_register_multiple_tasks_for_same_pid({_, Pid, _Ctx}) ->
ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ?_assertEqual({add_task_error, already_registered},
- call(Pid, add, [{type, compaction}, {progress, 0}])).
+ ?_assertEqual(
+ {add_task_error, already_registered},
+ call(Pid, add, [{type, compaction}, {progress, 0}])
+ ).
should_set_task_progress({_, Pid, _Ctx}) ->
ok = call(Pid, add, [{type, replication}, {progress, 0}]),
@@ -92,10 +95,12 @@ should_update_time_changes_on_task_progress({_, Pid, _Ctx}) ->
?_assert(
begin
ok = call(Pid, add, [{type, replication}, {progress, 0}]),
- ok = timer:sleep(1000), % sleep awhile to customize update time
+ % sleep awhile to customize update time
+ ok = timer:sleep(1000),
call(Pid, update, [{progress, 25}]),
get_task_prop(Pid, updated_on) > get_task_prop(Pid, started_on)
- end).
+ end
+ ).
%%should_control_update_frequency({_, Pid, _Ctx}) ->
%% ?_assertEqual(66,
@@ -109,7 +114,8 @@ should_update_time_changes_on_task_progress({_, Pid, _Ctx}) ->
%% end).
should_reset_control_update_frequency({_, Pid, _Ctx}) ->
- ?_assertEqual(87,
+ ?_assertEqual(
+ 87,
begin
ok = call(Pid, add, [{type, replication}, {progress, 0}]),
call(Pid, update, [{progress, 50}]),
@@ -119,7 +125,8 @@ should_reset_control_update_frequency({_, Pid, _Ctx}) ->
call(Pid, update_frequency, 0),
call(Pid, update, [{progress, 87}]),
get_task_prop(Pid, progress)
- end).
+ end
+ ).
should_track_multiple_tasks(_) ->
?_assert(run_multiple_tasks()).
@@ -130,7 +137,6 @@ should_finish_task({_, Pid, _Ctx}) ->
ok = call(Pid, done),
?_assertEqual(0, length(couch_task_status:all())).
-
run_multiple_tasks() ->
Pid1 = spawn(fun() -> loop() end),
Pid2 = spawn(fun() -> loop() end),
@@ -161,7 +167,6 @@ run_multiple_tasks() ->
true.
-
loop() ->
receive
{add, Props, From} ->
@@ -188,7 +193,7 @@ call(Pid, done) ->
{'DOWN', Ref, _Type, Pid, _Info} ->
Res
after ?TIMEOUT ->
- throw(timeout_error)
+ throw(timeout_error)
end;
call(Pid, Command) ->
Pid ! {Command, self()},
@@ -217,17 +222,22 @@ get_task_prop(Pid, Prop) ->
Acc
end
end,
- [], couch_task_status:all()
+ [],
+ couch_task_status:all()
),
case couch_util:get_value(Prop, hd(Element), nil) of
nil ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Could not get property '"
- ++ couch_util:to_list(Prop)
- ++ "' for task "
- ++ pid_to_list(Pid)}]});
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason,
+ "Could not get property '" ++
+ couch_util:to_list(Prop) ++
+ "' for task " ++
+ pid_to_list(Pid)}
+ ]}
+ );
Value ->
Value
end.
diff --git a/src/couch/test/eunit/couch_util_tests.erl b/src/couch/test/eunit/couch_util_tests.erl
index 44a5cce0a..c07ddc093 100644
--- a/src/couch/test/eunit/couch_util_tests.erl
+++ b/src/couch/test/eunit/couch_util_tests.erl
@@ -14,7 +14,6 @@
-include_lib("couch/include/couch_eunit.hrl").
-
validate_callback_exists_test_() ->
{
"validate_callback_exists tests",
@@ -33,8 +32,10 @@ implode_test() ->
?assertEqual([1, 38, 2, 38, 3], couch_util:implode([1, 2, 3], "&")).
trim_test() ->
- lists:map(fun(S) -> ?assertEqual("foo", couch_util:trim(S)) end,
- [" foo", "foo ", "\tfoo", " foo ", "foo\t", "foo\n", "\nfoo"]).
+ lists:map(
+ fun(S) -> ?assertEqual("foo", couch_util:trim(S)) end,
+ [" foo", "foo ", "\tfoo", " foo ", "foo\t", "foo\n", "\nfoo"]
+ ).
abs_pathname_test() ->
{ok, Cwd} = file:get_cwd(),
@@ -44,8 +45,10 @@ flush_test() ->
?assertNot(couch_util:should_flush()),
AcquireMem = fun() ->
_IntsToAGazillion = lists:seq(1, 200000),
- _LotsOfData = lists:map(fun(_) -> <<"foobar">> end,
- lists:seq(1, 500000)),
+ _LotsOfData = lists:map(
+ fun(_) -> <<"foobar">> end,
+ lists:seq(1, 500000)
+ ),
_ = list_to_binary(_LotsOfData),
%% Allocation 200K tuples puts us above the memory threshold
@@ -97,11 +100,20 @@ find_in_binary_test_() ->
],
lists:map(
fun({Needle, Haystack, Result}) ->
- Msg = lists:flatten(io_lib:format("Looking for ~s in ~s",
- [Needle, Haystack])),
- {Msg, ?_assertMatch(Result,
- couch_util:find_in_binary(Needle, Haystack))}
- end, Cases).
+ Msg = lists:flatten(
+ io_lib:format(
+ "Looking for ~s in ~s",
+ [Needle, Haystack]
+ )
+ ),
+ {Msg,
+ ?_assertMatch(
+ Result,
+ couch_util:find_in_binary(Needle, Haystack)
+ )}
+ end,
+ Cases
+ ).
should_succeed_for_existent_cb() ->
?_assert(couch_util:validate_callback_exists(lists, any, 2)).
@@ -115,10 +127,14 @@ should_fail_for_missing_cb() ->
lists:map(
fun({M, F, A} = MFA) ->
Name = lists:flatten(io_lib:format("~w:~w/~w", [M, F, A])),
- {Name, ?_assertThrow(
- {error, {undefined_callback, Name, MFA}},
- couch_util:validate_callback_exists(M, F, A))}
- end, Cases).
+ {Name,
+ ?_assertThrow(
+ {error, {undefined_callback, Name, MFA}},
+ couch_util:validate_callback_exists(M, F, A)
+ )}
+ end,
+ Cases
+ ).
to_hex_test_() ->
[
diff --git a/src/couch/test/eunit/couch_uuids_tests.erl b/src/couch/test/eunit/couch_uuids_tests.erl
index 9ca2c8a84..6546779bb 100644
--- a/src/couch/test/eunit/couch_uuids_tests.erl
+++ b/src/couch/test/eunit/couch_uuids_tests.erl
@@ -16,17 +16,14 @@
-define(TIMEOUT, 20).
-
setup_all() ->
test_util:start_applications([config]),
couch_uuids:start().
-
teardown_all(_) ->
couch_uuids:stop(),
test_util:stop_applications([config]).
-
uuids_test_() ->
{
setup,
@@ -40,25 +37,21 @@ uuids_test_() ->
]
}.
-
default_algorithm() ->
config:delete("uuids", "algorithm", false),
check_unique().
-
sequential_algorithm() ->
config:set("uuids", "algorithm", "sequential", false),
check_unique(),
check_increment_monotonically(),
check_rollover().
-
utc_algorithm() ->
config:set("uuids", "algorithm", "utc_random", false),
check_unique(),
check_increment_monotonically().
-
utc_id_suffix_algorithm() ->
config:set("uuids", "algorithm", "utc_id", false),
config:set("uuids", "utc_id_suffix", "bozo", false),
@@ -66,36 +59,30 @@ utc_id_suffix_algorithm() ->
check_increment_monotonically(),
check_preserve_suffix().
-
check_unique() ->
%% this one may really runs for too long on slow hosts
?assert(test_unique(10000, [couch_uuids:new()])).
-
check_increment_monotonically() ->
?assert(couch_uuids:new() < couch_uuids:new()).
-
check_rollover() ->
UUID = binary_to_list(couch_uuids:new()),
Prefix = element(1, lists:split(26, UUID)),
N = gen_until_pref_change(Prefix, 0),
?assert(N >= 5000 andalso N =< 11000).
-
check_preserve_suffix() ->
UUID = binary_to_list(couch_uuids:new()),
Suffix = get_suffix(UUID),
?assert(test_same_suffix(10000, Suffix)).
-
test_unique(0, _) ->
true;
test_unique(N, UUIDs) ->
UUID = couch_uuids:new(),
?assertNot(lists:member(UUID, UUIDs)),
- test_unique(N - 1, [UUID| UUIDs]).
-
+ test_unique(N - 1, [UUID | UUIDs]).
gen_until_pref_change(_, Count) when Count > 8251 ->
Count;
@@ -105,7 +92,6 @@ gen_until_pref_change(Prefix, N) ->
_ -> N
end.
-
test_same_suffix(0, _) ->
true;
test_same_suffix(N, Suffix) ->
@@ -114,11 +100,9 @@ test_same_suffix(N, Suffix) ->
_ -> false
end.
-
get_prefix(UUID) ->
element(1, lists:split(26, binary_to_list(UUID))).
-
get_suffix(UUID) when is_binary(UUID) ->
get_suffix(binary_to_list(UUID));
get_suffix(UUID) ->
diff --git a/src/couch/test/eunit/couch_work_queue_tests.erl b/src/couch/test/eunit/couch_work_queue_tests.erl
index a192230ef..acf0e45dc 100644
--- a/src/couch/test/eunit/couch_work_queue_tests.erl
+++ b/src/couch/test/eunit/couch_work_queue_tests.erl
@@ -16,7 +16,6 @@
-define(TIMEOUT, 100).
-
setup(Opts) ->
{ok, Q} = couch_work_queue:new(Opts),
Producer = spawn_producer(Q),
@@ -33,9 +32,11 @@ setup_max_items_and_size() ->
setup([{max_size, 160}, {max_items, 3}]).
setup_multi_workers() ->
- {Q, Producer, Consumer1} = setup([{max_size, 160},
- {max_items, 3},
- {multi_workers, true}]),
+ {Q, Producer, Consumer1} = setup([
+ {max_size, 160},
+ {max_items, 3},
+ {multi_workers, true}
+ ]),
Consumer2 = spawn_consumer(Q),
Consumer3 = spawn_consumer(Q),
{Q, Producer, [Consumer1, Consumer2, Consumer3]}.
@@ -52,7 +53,6 @@ teardown({Q, Producer, Consumers}) when is_list(Consumers) ->
teardown({Q, Producer, Consumer}) ->
teardown({Q, Producer, [Consumer]}).
-
single_consumer_test_() ->
{
"Single producer and consumer",
@@ -61,7 +61,8 @@ single_consumer_test_() ->
"Queue with 3 max items",
{
foreach,
- fun setup_max_items/0, fun teardown/1,
+ fun setup_max_items/0,
+ fun teardown/1,
single_consumer_max_item_count() ++ common_cases()
}
},
@@ -69,7 +70,8 @@ single_consumer_test_() ->
"Queue with max size of 160 bytes",
{
foreach,
- fun setup_max_size/0, fun teardown/1,
+ fun setup_max_size/0,
+ fun teardown/1,
single_consumer_max_size() ++ common_cases()
}
},
@@ -77,7 +79,8 @@ single_consumer_test_() ->
"Queue with max size of 160 bytes and 3 max items",
{
foreach,
- fun setup_max_items_and_size/0, fun teardown/1,
+ fun setup_max_items_and_size/0,
+ fun teardown/1,
single_consumer_max_items_and_size() ++ common_cases()
}
}
@@ -92,15 +95,15 @@ multiple_consumers_test_() ->
"Queue with max size of 160 bytes and 3 max items",
{
foreach,
- fun setup_multi_workers/0, fun teardown/1,
+ fun setup_multi_workers/0,
+ fun teardown/1,
common_cases() ++ multiple_consumers()
}
-
}
]
}.
-common_cases()->
+common_cases() ->
[
fun should_block_consumer_on_dequeue_from_empty_queue/1,
fun should_consume_right_item/1,
@@ -109,7 +112,7 @@ common_cases()->
fun should_be_closed/1
].
-single_consumer_max_item_count()->
+single_consumer_max_item_count() ->
[
fun should_have_no_items_for_new_queue/1,
fun should_block_producer_on_full_queue_count/1,
@@ -118,7 +121,7 @@ single_consumer_max_item_count()->
fun should_consume_all/1
].
-single_consumer_max_size()->
+single_consumer_max_size() ->
[
fun should_have_zero_size_for_new_queue/1,
fun should_block_producer_on_full_queue_size/1,
@@ -138,7 +141,6 @@ multiple_consumers() ->
fun should_increase_queue_size_on_produce/1
].
-
should_have_no_items_for_new_queue({Q, _, _}) ->
?_assertEqual(0, couch_work_queue:item_count(Q)).
@@ -172,8 +174,10 @@ should_consume_right_item({Q, Producer, Consumers}) when is_list(Consumers) ->
?assertEqual(0, couch_work_queue:item_count(Q)),
?assertEqual(0, couch_work_queue:size(Q)),
- R = [{ping(C), Item}
- || {C, Item} <- lists:zip(Consumers, [Item1, Item2, Item3])],
+ R = [
+ {ping(C), Item}
+ || {C, Item} <- lists:zip(Consumers, [Item1, Item2, Item3])
+ ],
?_assertEqual([{ok, Item1}, {ok, Item2}, {ok, Item3}], R);
should_consume_right_item({Q, Producer, Consumer}) ->
@@ -284,8 +288,10 @@ should_be_closed({Q, _, Consumers}) when is_list(Consumers) ->
ItemsCount = couch_work_queue:item_count(Q),
Size = couch_work_queue:size(Q),
- ?_assertEqual({[closed, closed, closed], closed, closed},
- {LastConsumerItems, ItemsCount, Size});
+ ?_assertEqual(
+ {[closed, closed, closed], closed, closed},
+ {LastConsumerItems, ItemsCount, Size}
+ );
should_be_closed({Q, _, Consumer}) ->
ok = close_queue(Q),
@@ -295,14 +301,19 @@ should_be_closed({Q, _, Consumer}) ->
ItemsCount = couch_work_queue:item_count(Q),
Size = couch_work_queue:size(Q),
- ?_assertEqual({closed, closed, closed},
- {LastConsumerItems, ItemsCount, Size}).
-
+ ?_assertEqual(
+ {closed, closed, closed},
+ {LastConsumerItems, ItemsCount, Size}
+ ).
close_queue(Q) ->
- test_util:stop_sync(Q, fun() ->
- ok = couch_work_queue:close(Q)
- end, ?TIMEOUT).
+ test_util:stop_sync(
+ Q,
+ fun() ->
+ ok = couch_work_queue:close(Q)
+ end,
+ ?TIMEOUT
+ ).
spawn_consumer(Q) ->
Parent = self(),
@@ -365,10 +376,13 @@ produce(Q, Producer, Size, Wait) ->
{item, Ref, Item} ->
Item
after ?TIMEOUT ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Timeout asking producer to produce an item"}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout asking producer to produce an item"}
+ ]}
+ )
end.
ping(Pid) ->
@@ -393,10 +407,10 @@ stop(Pid, Name) ->
wait_increment(Q, ItemsCount) ->
test_util:wait(fun() ->
- case couch_work_queue:item_count(Q) > ItemsCount of
- true ->
- ok;
- false ->
- wait
- end
+ case couch_work_queue:item_count(Q) > ItemsCount of
+ true ->
+ ok;
+ false ->
+ wait
+ end
end).
diff --git a/src/couch/test/eunit/couchdb_attachments_tests.erl b/src/couch/test/eunit/couchdb_attachments_tests.erl
index 04859dbc9..376553985 100644
--- a/src/couch/test/eunit/couchdb_attachments_tests.erl
+++ b/src/couch/test/eunit/couchdb_attachments_tests.erl
@@ -26,12 +26,15 @@
-define(TIMEWAIT, 1000).
-define(i2l(I), integer_to_list(I)).
-
start() ->
Ctx = test_util:start_couch(),
% ensure in default compression settings for attachments_compression_tests
- config:set("attachments", "compression_level",
- ?i2l(?COMPRESSION_LEVEL), false),
+ config:set(
+ "attachments",
+ "compression_level",
+ ?i2l(?COMPRESSION_LEVEL),
+ false
+ ),
config:set("attachments", "compressible_types", "text/*", false),
Ctx.
@@ -46,7 +49,7 @@ setup() ->
setup({binary, standalone}) ->
{Host, DbName} = setup(),
- setup_att(fun create_standalone_png_att/2, Host, DbName, ?FIXTURE_PNG);
+ setup_att(fun create_standalone_png_att/2, Host, DbName, ?FIXTURE_PNG);
setup({text, standalone}) ->
{Host, DbName} = setup(),
setup_att(fun create_standalone_text_att/2, Host, DbName, ?FIXTURE_TXT);
@@ -76,13 +79,13 @@ teardown(DbName) ->
ok = couch_server:delete(?l2b(DbName), []),
ok.
-
attachments_test_() ->
{
"Attachments tests",
{
setup,
- fun start/0, fun test_util:stop_couch/1,
+ fun start/0,
+ fun test_util:stop_couch/1,
[
attachments_md5_tests(),
attachments_compression_tests()
@@ -95,7 +98,8 @@ attachments_md5_tests() ->
"Attachments MD5 tests",
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_upload_attachment_without_md5/1,
fun should_upload_attachment_by_chunks_without_md5/1,
@@ -111,12 +115,12 @@ attachments_md5_tests() ->
attachments_compression_tests() ->
Funs = [
- fun should_get_att_without_accept_gzip_encoding/2,
- fun should_get_att_with_accept_gzip_encoding/2,
- fun should_get_att_with_accept_deflate_encoding/2,
- fun should_return_406_response_on_unsupported_encoding/2,
- fun should_get_doc_with_att_data/2,
- fun should_get_doc_with_att_data_stub/2
+ fun should_get_att_without_accept_gzip_encoding/2,
+ fun should_get_att_with_accept_gzip_encoding/2,
+ fun should_get_att_with_accept_deflate_encoding/2,
+ fun should_return_406_response_on_unsupported_encoding/2,
+ fun should_get_doc_with_att_data/2,
+ fun should_get_doc_with_att_data_stub/2
],
{
"Attachments compression tests",
@@ -133,13 +137,15 @@ attachments_compression_tests() ->
"Created already been compressed via Attachments API",
{
foreachx,
- fun setup/1, fun teardown/2,
+ fun setup/1,
+ fun teardown/2,
[{compressed, Fun} || Fun <- Funs]
}
},
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_not_create_compressed_att_with_deflate_encoding/1,
fun should_not_create_compressed_att_with_compress_encoding/1,
@@ -155,7 +161,8 @@ created_attachments_compression_tests(Mod, Funs) ->
"Compressiable attachments",
{
foreachx,
- fun setup/1, fun teardown/2,
+ fun setup/1,
+ fun teardown/2,
[{{text, Mod}, Fun} || Fun <- Funs]
}
},
@@ -163,14 +170,13 @@ created_attachments_compression_tests(Mod, Funs) ->
"Uncompressiable attachments",
{
foreachx,
- fun setup/1, fun teardown/2,
+ fun setup/1,
+ fun teardown/2,
[{{binary, Mod}, Fun} || Fun <- Funs]
}
}
].
-
-
should_upload_attachment_without_md5({Host, DbName}) ->
?_test(begin
AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
@@ -238,9 +244,12 @@ should_upload_attachment_by_chunks_with_valid_md5_trailer({Host, DbName}) ->
AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
AttData = <<"We all live in a yellow submarine!">>,
<<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [chunked_body([Part1, Part2]),
- "Content-MD5: ", base64:encode(couch_hash:md5_hash(AttData)),
- "\r\n\r\n"],
+ Body = [
+ chunked_body([Part1, Part2]),
+ "Content-MD5: ",
+ base64:encode(couch_hash:md5_hash(AttData)),
+ "\r\n\r\n"
+ ],
Headers = [
{"Content-Type", "text/plain"},
{"Host", Host},
@@ -264,11 +273,12 @@ should_reject_attachment_with_invalid_md5({Host, DbName}) ->
],
{ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
?assertEqual(400, Code),
- ?assertEqual(<<"content_md5_mismatch">>,
- get_json(Json, [<<"error">>]))
+ ?assertEqual(
+ <<"content_md5_mismatch">>,
+ get_json(Json, [<<"error">>])
+ )
end).
-
should_reject_chunked_attachment_with_invalid_md5({Host, DbName}) ->
?_test(begin
AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
@@ -283,8 +293,10 @@ should_reject_chunked_attachment_with_invalid_md5({Host, DbName}) ->
],
{ok, Code, Json} = request("PUT", AttUrl, Headers, Body),
?assertEqual(400, Code),
- ?assertEqual(<<"content_md5_mismatch">>,
- get_json(Json, [<<"error">>]))
+ ?assertEqual(
+ <<"content_md5_mismatch">>,
+ get_json(Json, [<<"error">>])
+ )
end).
should_reject_chunked_attachment_with_invalid_md5_trailer({Host, DbName}) ->
@@ -292,9 +304,12 @@ should_reject_chunked_attachment_with_invalid_md5_trailer({Host, DbName}) ->
AttUrl = string:join(["", DbName, ?docid(), "readme.txt"], "/"),
AttData = <<"We all live in a yellow submarine!">>,
<<Part1:21/binary, Part2:13/binary>> = AttData,
- Body = [chunked_body([Part1, Part2]),
- "Content-MD5: ", base64:encode(<<"foobar!">>),
- "\r\n\r\n"],
+ Body = [
+ chunked_body([Part1, Part2]),
+ "Content-MD5: ",
+ base64:encode(<<"foobar!">>),
+ "\r\n\r\n"
+ ],
Headers = [
{"Content-Type", "text/plain"},
{"Host", Host},
@@ -317,7 +332,8 @@ should_get_att_without_accept_gzip_encoding(_, {Data, {_, _, AttUrl}}) ->
should_get_att_with_accept_gzip_encoding(compressed, {Data, {_, _, AttUrl}}) ->
?_test(begin
{ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "gzip"}]),
+ AttUrl, [{"Accept-Encoding", "gzip"}]
+ ),
?assertEqual(200, Code),
?assert(lists:member({"Content-Encoding", "gzip"}, Headers)),
?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body)))
@@ -325,7 +341,8 @@ should_get_att_with_accept_gzip_encoding(compressed, {Data, {_, _, AttUrl}}) ->
should_get_att_with_accept_gzip_encoding({text, _}, {Data, {_, _, AttUrl}}) ->
?_test(begin
{ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "gzip"}]),
+ AttUrl, [{"Accept-Encoding", "gzip"}]
+ ),
?assertEqual(200, Code),
?assert(lists:member({"Content-Encoding", "gzip"}, Headers)),
?assertEqual(Data, zlib:gunzip(iolist_to_binary(Body)))
@@ -333,77 +350,98 @@ should_get_att_with_accept_gzip_encoding({text, _}, {Data, {_, _, AttUrl}}) ->
should_get_att_with_accept_gzip_encoding({binary, _}, {Data, {_, _, AttUrl}}) ->
?_test(begin
{ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "gzip"}]),
+ AttUrl, [{"Accept-Encoding", "gzip"}]
+ ),
?assertEqual(200, Code),
- ?assertEqual(undefined,
- couch_util:get_value("Content-Encoding", Headers)),
+ ?assertEqual(
+ undefined,
+ couch_util:get_value("Content-Encoding", Headers)
+ ),
?assertEqual(Data, iolist_to_binary(Body))
end).
should_get_att_with_accept_deflate_encoding(_, {Data, {_, _, AttUrl}}) ->
?_test(begin
{ok, Code, Headers, Body} = test_request:get(
- AttUrl, [{"Accept-Encoding", "deflate"}]),
+ AttUrl, [{"Accept-Encoding", "deflate"}]
+ ),
?assertEqual(200, Code),
- ?assertEqual(undefined,
- couch_util:get_value("Content-Encoding", Headers)),
+ ?assertEqual(
+ undefined,
+ couch_util:get_value("Content-Encoding", Headers)
+ ),
?assertEqual(Data, iolist_to_binary(Body))
end).
should_return_406_response_on_unsupported_encoding(_, {_, {_, _, AttUrl}}) ->
- ?_assertEqual(406,
+ ?_assertEqual(
+ 406,
begin
{ok, Code, _, _} = test_request:get(
- AttUrl, [{"Accept-Encoding", "deflate, *;q=0"}]),
+ AttUrl, [{"Accept-Encoding", "deflate, *;q=0"}]
+ ),
Code
- end).
+ end
+ ).
should_get_doc_with_att_data(compressed, {Data, {_, DocUrl, _}}) ->
?_test(begin
Url = DocUrl ++ "?attachments=true",
{ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
+ Url, [{"Accept", "application/json"}]
+ ),
?assertEqual(200, Code),
Json = jiffy:decode(Body),
AttJson = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
+ Json, [<<"_attachments">>, ?ATT_TXT_NAME]
+ ),
AttData = couch_util:get_nested_json_value(
- AttJson, [<<"data">>]),
+ AttJson, [<<"data">>]
+ ),
?assertEqual(
<<"text/plain">>,
- couch_util:get_nested_json_value(AttJson,[<<"content_type">>])),
+ couch_util:get_nested_json_value(AttJson, [<<"content_type">>])
+ ),
?assertEqual(Data, base64:decode(AttData))
end);
should_get_doc_with_att_data({text, _}, {Data, {_, DocUrl, _}}) ->
?_test(begin
Url = DocUrl ++ "?attachments=true",
{ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
+ Url, [{"Accept", "application/json"}]
+ ),
?assertEqual(200, Code),
Json = jiffy:decode(Body),
AttJson = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
+ Json, [<<"_attachments">>, ?ATT_TXT_NAME]
+ ),
AttData = couch_util:get_nested_json_value(
- AttJson, [<<"data">>]),
+ AttJson, [<<"data">>]
+ ),
?assertEqual(
<<"text/plain">>,
- couch_util:get_nested_json_value(AttJson,[<<"content_type">>])),
+ couch_util:get_nested_json_value(AttJson, [<<"content_type">>])
+ ),
?assertEqual(Data, base64:decode(AttData))
end);
should_get_doc_with_att_data({binary, _}, {Data, {_, DocUrl, _}}) ->
?_test(begin
Url = DocUrl ++ "?attachments=true",
{ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
+ Url, [{"Accept", "application/json"}]
+ ),
?assertEqual(200, Code),
Json = jiffy:decode(Body),
AttJson = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_BIN_NAME]),
+ Json, [<<"_attachments">>, ?ATT_BIN_NAME]
+ ),
AttData = couch_util:get_nested_json_value(
- AttJson, [<<"data">>]),
+ AttJson, [<<"data">>]
+ ),
?assertEqual(
<<"image/png">>,
- couch_util:get_nested_json_value(AttJson,[<<"content_type">>])),
+ couch_util:get_nested_json_value(AttJson, [<<"content_type">>])
+ ),
?assertEqual(Data, base64:decode(AttData))
end).
@@ -411,13 +449,17 @@ should_get_doc_with_att_data_stub(compressed, {Data, {_, DocUrl, _}}) ->
?_test(begin
Url = DocUrl ++ "?att_encoding_info=true",
{ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
+ Url, [{"Accept", "application/json"}]
+ ),
?assertEqual(200, Code),
Json = jiffy:decode(Body),
{AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
- ?assertEqual(<<"gzip">>,
- couch_util:get_value(<<"encoding">>, AttJson)),
+ Json, [<<"_attachments">>, ?ATT_TXT_NAME]
+ ),
+ ?assertEqual(
+ <<"gzip">>,
+ couch_util:get_value(<<"encoding">>, AttJson)
+ ),
AttLength = couch_util:get_value(<<"length">>, AttJson),
EncLength = couch_util:get_value(<<"encoded_length">>, AttJson),
?assertEqual(AttLength, EncLength),
@@ -427,38 +469,55 @@ should_get_doc_with_att_data_stub({text, _}, {Data, {_, DocUrl, _}}) ->
?_test(begin
Url = DocUrl ++ "?att_encoding_info=true",
{ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
+ Url, [{"Accept", "application/json"}]
+ ),
?assertEqual(200, Code),
Json = jiffy:decode(Body),
{AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
- ?assertEqual(<<"gzip">>,
- couch_util:get_value(<<"encoding">>, AttJson)),
+ Json, [<<"_attachments">>, ?ATT_TXT_NAME]
+ ),
+ ?assertEqual(
+ <<"gzip">>,
+ couch_util:get_value(<<"encoding">>, AttJson)
+ ),
AttEncLength = iolist_size(gzip(Data)),
- ?assertEqual(AttEncLength,
- couch_util:get_value(<<"encoded_length">>, AttJson)),
- ?assertEqual(byte_size(Data),
- couch_util:get_value(<<"length">>, AttJson))
+ ?assertEqual(
+ AttEncLength,
+ couch_util:get_value(<<"encoded_length">>, AttJson)
+ ),
+ ?assertEqual(
+ byte_size(Data),
+ couch_util:get_value(<<"length">>, AttJson)
+ )
end);
should_get_doc_with_att_data_stub({binary, _}, {Data, {_, DocUrl, _}}) ->
?_test(begin
Url = DocUrl ++ "?att_encoding_info=true",
{ok, Code, _, Body} = test_request:get(
- Url, [{"Accept", "application/json"}]),
+ Url, [{"Accept", "application/json"}]
+ ),
?assertEqual(200, Code),
Json = jiffy:decode(Body),
{AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_BIN_NAME]),
- ?assertEqual(undefined,
- couch_util:get_value(<<"encoding">>, AttJson)),
- ?assertEqual(undefined,
- couch_util:get_value(<<"encoded_length">>, AttJson)),
- ?assertEqual(byte_size(Data),
- couch_util:get_value(<<"length">>, AttJson))
+ Json, [<<"_attachments">>, ?ATT_BIN_NAME]
+ ),
+ ?assertEqual(
+ undefined,
+ couch_util:get_value(<<"encoding">>, AttJson)
+ ),
+ ?assertEqual(
+ undefined,
+ couch_util:get_value(<<"encoded_length">>, AttJson)
+ ),
+ ?assertEqual(
+ byte_size(Data),
+ couch_util:get_value(<<"length">>, AttJson)
+ )
end).
should_not_create_compressed_att_with_deflate_encoding({Host, DbName}) ->
- ?_assertEqual(415,
+ ?_assertEqual(
+ 415,
begin
HttpHost = "http://" ++ Host,
AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"),
@@ -470,14 +529,16 @@ should_not_create_compressed_att_with_deflate_encoding({Host, DbName}) ->
],
{ok, Code, _, _} = test_request:put(AttUrl, Headers, Body),
Code
- end).
+ end
+ ).
should_not_create_compressed_att_with_compress_encoding({Host, DbName}) ->
% Note: As of OTP R13B04, it seems there's no LZW compression
% (i.e. UNIX compress utility implementation) lib in OTP.
% However there's a simple working Erlang implementation at:
% http://scienceblogs.com/goodmath/2008/01/simple_lempelziv_compression_i.php
- ?_assertEqual(415,
+ ?_assertEqual(
+ 415,
begin
HttpHost = "http://" ++ Host,
AttUrl = string:join([HttpHost, DbName, ?docid(), "file.txt"], "/"),
@@ -488,33 +549,42 @@ should_not_create_compressed_att_with_compress_encoding({Host, DbName}) ->
],
{ok, Code, _, _} = test_request:put(AttUrl, Headers, Data),
Code
- end).
+ end
+ ).
should_create_compressible_att_with_ctype_params({Host, DbName}) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(begin
- HttpHost = "http://" ++ Host,
- DocUrl = string:join([HttpHost, DbName, ?docid()], "/"),
- AttUrl = string:join([DocUrl, ?b2l(?ATT_TXT_NAME)], "/"),
- {ok, Data} = file:read_file(?FIXTURE_TXT),
- Headers = [{"Content-Type", "text/plain; charset=UTF-8"}],
- {ok, Code0, _, _} = test_request:put(AttUrl, Headers, Data),
- ?assertEqual(201, Code0),
-
- {ok, Code1, _, Body} = test_request:get(
- DocUrl ++ "?att_encoding_info=true"),
- ?assertEqual(200, Code1),
- Json = jiffy:decode(Body),
- {AttJson} = couch_util:get_nested_json_value(
- Json, [<<"_attachments">>, ?ATT_TXT_NAME]),
- ?assertEqual(<<"gzip">>,
- couch_util:get_value(<<"encoding">>, AttJson)),
- AttEncLength = iolist_size(gzip(Data)),
- ?assertEqual(AttEncLength,
- couch_util:get_value(<<"encoded_length">>, AttJson)),
- ?assertEqual(byte_size(Data),
- couch_util:get_value(<<"length">>, AttJson))
- end)}.
-
+ {timeout, ?TIMEOUT_EUNIT,
+ ?_test(begin
+ HttpHost = "http://" ++ Host,
+ DocUrl = string:join([HttpHost, DbName, ?docid()], "/"),
+ AttUrl = string:join([DocUrl, ?b2l(?ATT_TXT_NAME)], "/"),
+ {ok, Data} = file:read_file(?FIXTURE_TXT),
+ Headers = [{"Content-Type", "text/plain; charset=UTF-8"}],
+ {ok, Code0, _, _} = test_request:put(AttUrl, Headers, Data),
+ ?assertEqual(201, Code0),
+
+ {ok, Code1, _, Body} = test_request:get(
+ DocUrl ++ "?att_encoding_info=true"
+ ),
+ ?assertEqual(200, Code1),
+ Json = jiffy:decode(Body),
+ {AttJson} = couch_util:get_nested_json_value(
+ Json, [<<"_attachments">>, ?ATT_TXT_NAME]
+ ),
+ ?assertEqual(
+ <<"gzip">>,
+ couch_util:get_value(<<"encoding">>, AttJson)
+ ),
+ AttEncLength = iolist_size(gzip(Data)),
+ ?assertEqual(
+ AttEncLength,
+ couch_util:get_value(<<"encoded_length">>, AttJson)
+ ),
+ ?assertEqual(
+ byte_size(Data),
+ couch_util:get_value(<<"length">>, AttJson)
+ )
+ end)}.
compact_after_lowering_attachment_size_limit_test_() ->
{
@@ -539,33 +609,33 @@ compact_after_lowering_attachment_size_limit_test_() ->
}
}.
-
should_compact_after_lowering_attachment_size_limit({_Ctx, DbName}) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(begin
- {ok, Db1} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc1 = #doc{id = <<"doc1">>, atts = att(1000)},
- {ok, _} = couch_db:update_doc(Db1, Doc1, []),
- couch_db:close(Db1),
- config:set("couchdb", "max_attachment_size", "1", _Persist = false),
- compact_db(DbName),
- {ok, Db2} = couch_db:open_int(DbName, []),
- {ok, Doc2} = couch_db:open_doc(Db2, <<"doc1">>),
- couch_db:close(Db2),
- [Att] = Doc2#doc.atts,
- ?assertEqual(1000, couch_att:fetch(att_len, Att))
- end)}.
-
+ {timeout, ?TIMEOUT_EUNIT,
+ ?_test(begin
+ {ok, Db1} = couch_db:open(DbName, [?ADMIN_CTX]),
+ Doc1 = #doc{id = <<"doc1">>, atts = att(1000)},
+ {ok, _} = couch_db:update_doc(Db1, Doc1, []),
+ couch_db:close(Db1),
+ config:set("couchdb", "max_attachment_size", "1", _Persist = false),
+ compact_db(DbName),
+ {ok, Db2} = couch_db:open_int(DbName, []),
+ {ok, Doc2} = couch_db:open_doc(Db2, <<"doc1">>),
+ couch_db:close(Db2),
+ [Att] = Doc2#doc.atts,
+ ?assertEqual(1000, couch_att:fetch(att_len, Att))
+ end)}.
att(Size) when is_integer(Size), Size >= 1 ->
- [couch_att:new([
- {name, <<"att">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, fun(_Bytes) ->
- << <<"x">> || _ <- lists:seq(1, Size) >>
- end}
- ])].
-
+ [
+ couch_att:new([
+ {name, <<"att">>},
+ {type, <<"app/binary">>},
+ {att_len, Size},
+ {data, fun(_Bytes) ->
+ <<<<"x">> || _ <- lists:seq(1, Size)>>
+ end}
+ ])
+ ].
compact_db(DbName) ->
{ok, Db} = couch_db:open_int(DbName, []),
@@ -573,34 +643,35 @@ compact_db(DbName) ->
wait_compaction(DbName, "database", ?LINE),
ok = couch_db:close(Db).
-
wait_compaction(DbName, Kind, Line) ->
WaitFun = fun() ->
- case is_compaction_running(DbName) of
- true -> wait;
- false -> ok
- end
+ case is_compaction_running(DbName) of
+ true -> wait;
+ false -> ok
+ end
end,
case test_util:wait(WaitFun, ?TIMEOUT) of
timeout ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, Line},
- {reason, "Timeout waiting for "
- ++ Kind
- ++ " database compaction"}]});
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, Line},
+ {reason,
+ "Timeout waiting for " ++
+ Kind ++
+ " database compaction"}
+ ]}
+ );
_ ->
ok
end.
-
is_compaction_running(DbName) ->
{ok, Db} = couch_db:open_int(DbName, []),
{ok, DbInfo} = couch_db:get_db_info(Db),
couch_db:close(Db),
couch_util:get_value(compact_running, DbInfo) =:= true.
-
internal_replication_after_lowering_attachment_size_limit_test_() ->
{
"Internal replication after lowering max attachment size",
@@ -629,23 +700,23 @@ internal_replication_after_lowering_attachment_size_limit_test_() ->
}.
should_replicate_after_lowering_attachment_size({_Ctx, SrcName, TgtName}) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(begin
- {ok, SrcDb} = couch_db:open(SrcName, [?ADMIN_CTX]),
- SrcDoc = #doc{id = <<"doc">>, atts = att(1000)},
- {ok, _} = couch_db:update_doc(SrcDb, SrcDoc, []),
- couch_db:close(SrcDb),
- config:set("couchdb", "max_attachment_size", "1", _Persist = false),
- % Create a pair of "fake" shards
- SrcShard = #shard{name = SrcName, node = node()},
- TgtShard = #shard{name = TgtName, node = node()},
- mem3_rep:go(SrcShard, TgtShard, []),
- {ok, TgtDb} = couch_db:open_int(TgtName, []),
- {ok, TgtDoc} = couch_db:open_doc(TgtDb, <<"doc">>),
- couch_db:close(TgtDb),
- [Att] = TgtDoc#doc.atts,
- ?assertEqual(1000, couch_att:fetch(att_len, Att))
- end)}.
-
+ {timeout, ?TIMEOUT_EUNIT,
+ ?_test(begin
+ {ok, SrcDb} = couch_db:open(SrcName, [?ADMIN_CTX]),
+ SrcDoc = #doc{id = <<"doc">>, atts = att(1000)},
+ {ok, _} = couch_db:update_doc(SrcDb, SrcDoc, []),
+ couch_db:close(SrcDb),
+ config:set("couchdb", "max_attachment_size", "1", _Persist = false),
+ % Create a pair of "fake" shards
+ SrcShard = #shard{name = SrcName, node = node()},
+ TgtShard = #shard{name = TgtName, node = node()},
+ mem3_rep:go(SrcShard, TgtShard, []),
+ {ok, TgtDb} = couch_db:open_int(TgtName, []),
+ {ok, TgtDoc} = couch_db:open_doc(TgtDb, <<"doc">>),
+ couch_db:close(TgtDb),
+ [Att] = TgtDoc#doc.atts,
+ ?assertEqual(1000, couch_att:fetch(att_len, Att))
+ end)}.
get_json(Json, Path) ->
couch_util:get_nested_json_value(Json, Path).
@@ -684,12 +755,15 @@ bind_address() ->
request(Method, Url, Headers, Body) ->
RequestHead = [Method, " ", Url, " HTTP/1.1"],
- RequestHeaders = [[string:join([Key, Value], ": "), "\r\n"]
- || {Key, Value} <- Headers],
+ RequestHeaders = [
+ [string:join([Key, Value], ": "), "\r\n"]
+ || {Key, Value} <- Headers
+ ],
Request = [RequestHead, "\r\n", RequestHeaders, "\r\n", Body],
Sock = get_socket(),
gen_tcp:send(Sock, list_to_binary(lists:flatten(Request))),
- timer:sleep(?TIMEWAIT), % must wait to receive complete response
+ % must wait to receive complete response
+ timer:sleep(?TIMEWAIT),
{ok, R} = gen_tcp:recv(Sock, 0),
gen_tcp:close(Sock),
[Header, Body1] = re:split(R, "\r\n\r\n", [{return, binary}]),
@@ -702,7 +776,8 @@ create_standalone_text_att(Host, DbName) ->
{ok, Data} = file:read_file(?FIXTURE_TXT),
Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"),
{ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "text/plain"}], Data),
+ Url, [{"Content-Type", "text/plain"}], Data
+ ),
?assertEqual(201, Code),
Url.
@@ -710,39 +785,48 @@ create_standalone_png_att(Host, DbName) ->
{ok, Data} = file:read_file(?FIXTURE_PNG),
Url = string:join([Host, DbName, "doc", ?b2l(?ATT_BIN_NAME)], "/"),
{ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "image/png"}], Data),
+ Url, [{"Content-Type", "image/png"}], Data
+ ),
?assertEqual(201, Code),
Url.
create_inline_text_att(Host, DbName) ->
{ok, Data} = file:read_file(?FIXTURE_TXT),
Url = string:join([Host, DbName, "doc"], "/"),
- Doc = {[
- {<<"_attachments">>, {[
- {?ATT_TXT_NAME, {[
- {<<"content_type">>, <<"text/plain">>},
- {<<"data">>, base64:encode(Data)}
- ]}
- }]}}
- ]},
+ Doc =
+ {[
+ {<<"_attachments">>,
+ {[
+ {?ATT_TXT_NAME,
+ {[
+ {<<"content_type">>, <<"text/plain">>},
+ {<<"data">>, base64:encode(Data)}
+ ]}}
+ ]}}
+ ]},
{ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)),
+ Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)
+ ),
?assertEqual(201, Code),
string:join([Url, ?b2l(?ATT_TXT_NAME)], "/").
create_inline_png_att(Host, DbName) ->
{ok, Data} = file:read_file(?FIXTURE_PNG),
Url = string:join([Host, DbName, "doc"], "/"),
- Doc = {[
- {<<"_attachments">>, {[
- {?ATT_BIN_NAME, {[
- {<<"content_type">>, <<"image/png">>},
- {<<"data">>, base64:encode(Data)}
- ]}
- }]}}
- ]},
+ Doc =
+ {[
+ {<<"_attachments">>,
+ {[
+ {?ATT_BIN_NAME,
+ {[
+ {<<"content_type">>, <<"image/png">>},
+ {<<"data">>, base64:encode(Data)}
+ ]}}
+ ]}}
+ ]},
{ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)),
+ Url, [{"Content-Type", "application/json"}], jiffy:encode(Doc)
+ ),
?assertEqual(201, Code),
string:join([Url, ?b2l(?ATT_BIN_NAME)], "/").
@@ -750,8 +834,10 @@ create_already_compressed_att(Host, DbName) ->
{ok, Data} = file:read_file(?FIXTURE_TXT),
Url = string:join([Host, DbName, "doc", ?b2l(?ATT_TXT_NAME)], "/"),
{ok, Code, _Headers, _Body} = test_request:put(
- Url, [{"Content-Type", "text/plain"}, {"Content-Encoding", "gzip"}],
- zlib:gzip(Data)),
+ Url,
+ [{"Content-Type", "text/plain"}, {"Content-Encoding", "gzip"}],
+ zlib:gzip(Data)
+ ),
?assertEqual(201, Code),
Url.
diff --git a/src/couch/test/eunit/couchdb_auth_tests.erl b/src/couch/test/eunit/couchdb_auth_tests.erl
index 19d32d0c5..dfb22dc25 100644
--- a/src/couch/test/eunit/couchdb_auth_tests.erl
+++ b/src/couch/test/eunit/couchdb_auth_tests.erl
@@ -14,23 +14,21 @@
-include_lib("couch/include/couch_eunit.hrl").
-
setup(PortType) ->
Hashed = couch_passwords:hash_admin_password("artischocko"),
- ok = config:set("admins", "rocko", binary_to_list(Hashed), _Persist=false),
+ ok = config:set("admins", "rocko", binary_to_list(Hashed), _Persist = false),
Addr = config:get("httpd", "bind_address", "127.0.0.1"),
lists:concat(["http://", Addr, ":", port(PortType), "/_session"]).
setup_require_valid_user(PortType) ->
- ok = config:set("chttpd", "require_valid_user", "true", _Persist=false),
+ ok = config:set("chttpd", "require_valid_user", "true", _Persist = false),
setup(PortType).
teardown(_, _) ->
ok.
teardown_require_valid_user(_, _) ->
- config:set("chttpd", "require_valid_user", "false", _Persist=false).
-
+ config:set("chttpd", "require_valid_user", "false", _Persist = false).
auth_test_() ->
Tests = [
@@ -46,7 +44,8 @@ auth_test_() ->
"Auth tests",
{
setup,
- fun() -> test_util:start_couch([chttpd]) end, fun test_util:stop_couch/1,
+ fun() -> test_util:start_couch([chttpd]) end,
+ fun test_util:stop_couch/1,
[
make_test_cases(clustered, Tests),
make_test_cases(backdoor, Tests),
@@ -64,49 +63,67 @@ make_test_cases(Mod, Funs) ->
make_require_valid_user_test_cases(Mod, Funs) ->
{
lists:flatten(io_lib:format("~s require_valid_user=true", [Mod])),
- {foreachx, fun setup_require_valid_user/1, fun teardown_require_valid_user/2,
- [{Mod, Fun} || Fun <- Funs]}
+ {foreachx, fun setup_require_valid_user/1, fun teardown_require_valid_user/2, [
+ {Mod, Fun}
+ || Fun <- Funs
+ ]}
}.
should_return_username_on_post_to_session(_PortType, Url) ->
- ?_assertEqual(<<"rocko">>,
+ ?_assertEqual(
+ <<"rocko">>,
begin
Hashed = couch_passwords:hash_admin_password(<<"artischocko">>),
ok = config:set("admins", "rocko", binary_to_list(Hashed), false),
- {ok, _, _, Body} = test_request:post(Url, [{"Content-Type", "application/json"}],
- "{\"name\":\"rocko\", \"password\":\"artischocko\"}"),
+ {ok, _, _, Body} = test_request:post(
+ Url,
+ [{"Content-Type", "application/json"}],
+ "{\"name\":\"rocko\", \"password\":\"artischocko\"}"
+ ),
{Json} = jiffy:decode(Body),
proplists:get_value(<<"name">>, Json)
- end).
+ end
+ ).
should_not_return_authenticated_field(_PortType, Url) ->
- ?_assertThrow({not_found, _},
+ ?_assertThrow(
+ {not_found, _},
begin
couch_util:get_nested_json_value(session(Url), [
- <<"info">>, <<"authenticated">>])
- end).
+ <<"info">>, <<"authenticated">>
+ ])
+ end
+ ).
should_return_list_of_handlers(backdoor, Url) ->
- ?_assertEqual([<<"cookie">>,<<"default">>],
+ ?_assertEqual(
+ [<<"cookie">>, <<"default">>],
begin
couch_util:get_nested_json_value(session(Url), [
- <<"info">>, <<"authentication_handlers">>])
- end);
+ <<"info">>, <<"authentication_handlers">>
+ ])
+ end
+ );
should_return_list_of_handlers(clustered, Url) ->
- ?_assertEqual([<<"cookie">>,<<"default">>],
+ ?_assertEqual(
+ [<<"cookie">>, <<"default">>],
begin
couch_util:get_nested_json_value(session(Url), [
- <<"info">>, <<"authentication_handlers">>])
- end).
-
+ <<"info">>, <<"authentication_handlers">>
+ ])
+ end
+ ).
%% ------------------------------------------------------------------
%% Internal Function Definitions
%% ------------------------------------------------------------------
session(Url) ->
- {ok, _, _, Body} = test_request:get(Url, [{"Content-Type", "application/json"}],
- "{\"name\":\"rocko\", \"password\":\"artischocko\"}"),
+ {ok, _, _, Body} = test_request:get(
+ Url,
+ [{"Content-Type", "application/json"}],
+ "{\"name\":\"rocko\", \"password\":\"artischocko\"}"
+ ),
jiffy:decode(Body).
port(clustered) ->
diff --git a/src/couch/test/eunit/couchdb_cookie_domain_tests.erl b/src/couch/test/eunit/couchdb_cookie_domain_tests.erl
index c46352f35..17c41dafe 100755
--- a/src/couch/test/eunit/couchdb_cookie_domain_tests.erl
+++ b/src/couch/test/eunit/couchdb_cookie_domain_tests.erl
@@ -21,7 +21,7 @@
setup() ->
Ctx = test_util:start_couch([chttpd]),
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
Addr = config:get("httpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
Url = ?l2b(io_lib:format("http://~s:~b/_session", [Addr, Port])),
@@ -30,7 +30,7 @@ setup() ->
{ok, ?b2l(Url), ContentType, ?b2l(Payload), Ctx}.
teardown({ok, _, _, _, Ctx}) ->
- ok = config:delete("admins", ?USER, _Persist=false),
+ ok = config:delete("admins", ?USER, _Persist = false),
test_util:stop_couch(Ctx).
cookie_test_() ->
@@ -52,8 +52,12 @@ cookie_test_() ->
should_set_cookie_domain(Url, ContentType, Payload) ->
?_test(begin
- ok = config:set("couch_httpd_auth", "cookie_domain",
- "example.com", false),
+ ok = config:set(
+ "couch_httpd_auth",
+ "cookie_domain",
+ "example.com",
+ false
+ ),
{ok, Code, Headers, _} = test_request:post(Url, ContentType, Payload),
?assertEqual(200, Code),
Cookie = proplists:get_value("Set-Cookie", Headers),
@@ -71,8 +75,12 @@ should_not_set_cookie_domain(Url, ContentType, Payload) ->
should_delete_cookie_domain(Url, ContentType, Payload) ->
?_test(begin
- ok = config:set("couch_httpd_auth", "cookie_domain",
- "example.com", false),
+ ok = config:set(
+ "couch_httpd_auth",
+ "cookie_domain",
+ "example.com",
+ false
+ ),
{ok, Code, Headers, _} = test_request:delete(Url, ContentType, Payload),
?assertEqual(200, Code),
Cookie = proplists:get_value("Set-Cookie", Headers),
diff --git a/src/couch/test/eunit/couchdb_cors_tests.erl b/src/couch/test/eunit/couchdb_cors_tests.erl
index 8ec61cc8a..dce07fd28 100644
--- a/src/couch/test/eunit/couchdb_cors_tests.erl
+++ b/src/couch/test/eunit/couchdb_cors_tests.erl
@@ -20,10 +20,12 @@
-define(TIMEOUT, 1000).
-define(_assertEqualLists(A, B),
- ?_assertEqual(lists:usort(A), lists:usort(B))).
+ ?_assertEqual(lists:usort(A), lists:usort(B))
+).
-define(assertEqualLists(A, B),
- ?assertEqual(lists:usort(A), lists:usort(B))).
+ ?assertEqual(lists:usort(A), lists:usort(B))
+).
start() ->
Ctx = test_util:start_couch([ioq]),
@@ -46,14 +48,16 @@ setup() ->
setup({Mod, VHost}) ->
{Host, DbName} = setup(),
- Url = case Mod of
- server ->
- Host;
- db ->
- Host ++ "/" ++ DbName
- end,
- DefaultHeaders = [{"Origin", "http://example.com"}]
- ++ maybe_append_vhost(VHost),
+ Url =
+ case Mod of
+ server ->
+ Host;
+ db ->
+ Host ++ "/" ++ DbName
+ end,
+ DefaultHeaders =
+ [{"Origin", "http://example.com"}] ++
+ maybe_append_vhost(VHost),
{Host, DbName, Url, DefaultHeaders}.
teardown(DbName) when is_list(DbName) ->
@@ -65,7 +69,6 @@ teardown({_, DbName}) ->
teardown(_, {_, DbName, _, _}) ->
teardown(DbName).
-
cors_test_() ->
Funs = [
fun should_not_allow_origin/2,
@@ -85,7 +88,8 @@ cors_test_() ->
"CORS (COUCHDB-431)",
{
setup,
- fun start/0, fun test_util:stop_couch/1,
+ fun start/0,
+ fun test_util:stop_couch/1,
[
cors_tests(Funs),
vhost_cors_tests(Funs),
@@ -99,7 +103,8 @@ headers_tests() ->
"Various headers tests",
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_not_return_cors_headers_for_invalid_origin/1,
fun should_not_return_cors_headers_for_invalid_origin_preflight/1,
@@ -130,206 +135,288 @@ vhost_cors_tests(Funs) ->
make_test_case(Mod, UseVhost, Funs) ->
{
- case Mod of server -> "Server"; db -> "Database" end,
- {foreachx, fun setup/1, fun teardown/2, [{{Mod, UseVhost}, Fun}
- || Fun <- Funs]}
+ case Mod of
+ server -> "Server";
+ db -> "Database"
+ end,
+ {foreachx, fun setup/1, fun teardown/2, [
+ {{Mod, UseVhost}, Fun}
+ || Fun <- Funs
+ ]}
}.
-
should_not_allow_origin(_, {_, _, Url, Headers0}) ->
- ?_assertEqual(undefined,
+ ?_assertEqual(
+ undefined,
begin
config:delete("cors", "origins", false),
Headers1 = proplists:delete("Origin", Headers0),
- Headers = [{"Origin", "http://127.0.0.1"}]
- ++ Headers1,
+ Headers =
+ [{"Origin", "http://127.0.0.1"}] ++
+ Headers1,
{ok, _, Resp, _} = test_request:get(Url, Headers),
proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
+ end
+ ).
should_not_allow_origin_with_port_mismatch({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual(undefined,
+ ?_assertEqual(
+ undefined,
begin
- Headers = [{"Origin", "http://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
+ Headers =
+ [
+ {"Origin", "http://example.com:5984"},
+ {"Access-Control-Request-Method", "GET"}
+ ] ++
+ maybe_append_vhost(VHost),
{ok, _, Resp, _} = test_request:options(Url, Headers),
proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
+ end
+ ).
should_not_allow_origin_with_scheme_mismatch({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual(undefined,
+ ?_assertEqual(
+ undefined,
begin
- Headers = [{"Origin", "http://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
+ Headers =
+ [
+ {"Origin", "http://example.com:5984"},
+ {"Access-Control-Request-Method", "GET"}
+ ] ++
+ maybe_append_vhost(VHost),
{ok, _, Resp, _} = test_request:options(Url, Headers),
proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
+ end
+ ).
should_not_all_origin_due_case_mismatch({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual(undefined,
+ ?_assertEqual(
+ undefined,
begin
- Headers = [{"Origin", "http://ExAmPlE.CoM"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
+ Headers =
+ [
+ {"Origin", "http://ExAmPlE.CoM"},
+ {"Access-Control-Request-Method", "GET"}
+ ] ++
+ maybe_append_vhost(VHost),
{ok, _, Resp, _} = test_request:options(Url, Headers),
proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
+ end
+ ).
should_make_simple_request(_, {_, _, Url, DefaultHeaders}) ->
?_test(begin
{ok, _, Resp, _} = test_request:get(Url, DefaultHeaders),
?assertEqual(
undefined,
- proplists:get_value("Access-Control-Allow-Credentials", Resp)),
+ proplists:get_value("Access-Control-Allow-Credentials", Resp)
+ ),
?assertEqual(
"http://example.com",
- proplists:get_value("Access-Control-Allow-Origin", Resp)),
+ proplists:get_value("Access-Control-Allow-Origin", Resp)
+ ),
?assertEqualLists(
?COUCH_HEADERS ++ list_simple_headers(Resp),
- split_list(proplists:get_value("Access-Control-Expose-Headers", Resp)))
+ split_list(proplists:get_value("Access-Control-Expose-Headers", Resp))
+ )
end).
should_make_preflight_request(_, {_, _, Url, DefaultHeaders}) ->
- ?_assertEqualLists(?SUPPORTED_METHODS,
+ ?_assertEqualLists(
+ ?SUPPORTED_METHODS,
begin
- Headers = DefaultHeaders
- ++ [{"Access-Control-Request-Method", "GET"}],
+ Headers =
+ DefaultHeaders ++
+ [{"Access-Control-Request-Method", "GET"}],
{ok, _, Resp, _} = test_request:options(Url, Headers),
split_list(proplists:get_value("Access-Control-Allow-Methods", Resp))
- end).
+ end
+ ).
should_make_prefligh_request_with_port({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual("http://example.com:5984",
+ ?_assertEqual(
+ "http://example.com:5984",
begin
- config:set("cors", "origins", "http://example.com:5984",
- false),
- Headers = [{"Origin", "http://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
+ config:set(
+ "cors",
+ "origins",
+ "http://example.com:5984",
+ false
+ ),
+ Headers =
+ [
+ {"Origin", "http://example.com:5984"},
+ {"Access-Control-Request-Method", "GET"}
+ ] ++
+ maybe_append_vhost(VHost),
{ok, _, Resp, _} = test_request:options(Url, Headers),
proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
+ end
+ ).
should_make_prefligh_request_with_scheme({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual("https://example.com:5984",
+ ?_assertEqual(
+ "https://example.com:5984",
begin
- config:set("cors", "origins", "https://example.com:5984",
- false),
- Headers = [{"Origin", "https://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
+ config:set(
+ "cors",
+ "origins",
+ "https://example.com:5984",
+ false
+ ),
+ Headers =
+ [
+ {"Origin", "https://example.com:5984"},
+ {"Access-Control-Request-Method", "GET"}
+ ] ++
+ maybe_append_vhost(VHost),
{ok, _, Resp, _} = test_request:options(Url, Headers),
proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
+ end
+ ).
should_make_prefligh_request_with_wildcard_origin({_, VHost}, {_, _, Url, _}) ->
- ?_assertEqual("https://example.com:5984",
+ ?_assertEqual(
+ "https://example.com:5984",
begin
config:set("cors", "origins", "*", false),
- Headers = [{"Origin", "https://example.com:5984"},
- {"Access-Control-Request-Method", "GET"}]
- ++ maybe_append_vhost(VHost),
+ Headers =
+ [
+ {"Origin", "https://example.com:5984"},
+ {"Access-Control-Request-Method", "GET"}
+ ] ++
+ maybe_append_vhost(VHost),
{ok, _, Resp, _} = test_request:options(Url, Headers),
proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
+ end
+ ).
should_make_request_with_credentials(_, {_, _, Url, DefaultHeaders}) ->
- ?_assertEqual("true",
+ ?_assertEqual(
+ "true",
begin
ok = config:set("cors", "credentials", "true", false),
{ok, _, Resp, _} = test_request:options(Url, DefaultHeaders),
proplists:get_value("Access-Control-Allow-Credentials", Resp)
- end).
+ end
+ ).
should_make_origin_request_with_auth(_, {_, _, Url, DefaultHeaders}) ->
- ?_assertEqual("http://example.com",
+ ?_assertEqual(
+ "http://example.com",
begin
Hashed = couch_passwords:hash_admin_password(<<"test">>),
config:set("admins", "test", ?b2l(Hashed), false),
{ok, _, Resp, _} = test_request:get(
- Url, DefaultHeaders, [{basic_auth, {"test", "test"}}]),
+ Url, DefaultHeaders, [{basic_auth, {"test", "test"}}]
+ ),
config:delete("admins", "test", false),
proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
+ end
+ ).
should_make_preflight_request_with_auth(_, {_, _, Url, DefaultHeaders}) ->
- ?_assertEqualLists(?SUPPORTED_METHODS,
+ ?_assertEqualLists(
+ ?SUPPORTED_METHODS,
begin
Hashed = couch_passwords:hash_admin_password(<<"test">>),
config:set("admins", "test", ?b2l(Hashed), false),
- Headers = DefaultHeaders
- ++ [{"Access-Control-Request-Method", "GET"}],
+ Headers =
+ DefaultHeaders ++
+ [{"Access-Control-Request-Method", "GET"}],
{ok, _, Resp, _} = test_request:options(
- Url, Headers, [{basic_auth, {"test", "test"}}]),
+ Url, Headers, [{basic_auth, {"test", "test"}}]
+ ),
config:delete("admins", "test", false),
split_list(proplists:get_value("Access-Control-Allow-Methods", Resp))
- end).
+ end
+ ).
should_not_return_cors_headers_for_invalid_origin({Host, _}) ->
- ?_assertEqual(undefined,
+ ?_assertEqual(
+ undefined,
begin
Headers = [{"Origin", "http://127.0.0.1"}],
{ok, _, Resp, _} = test_request:get(Host, Headers),
proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
+ end
+ ).
should_not_return_cors_headers_for_invalid_origin_preflight({Host, _}) ->
- ?_assertEqual(undefined,
+ ?_assertEqual(
+ undefined,
begin
- Headers = [{"Origin", "http://127.0.0.1"},
- {"Access-Control-Request-Method", "GET"}],
+ Headers = [
+ {"Origin", "http://127.0.0.1"},
+ {"Access-Control-Request-Method", "GET"}
+ ],
{ok, _, Resp, _} = test_request:options(Host, Headers),
proplists:get_value("Access-Control-Allow-Origin", Resp)
- end).
+ end
+ ).
should_make_request_against_attachment({Host, DbName}) ->
{"COUCHDB-1689",
- ?_assertEqual(200,
- begin
- Url = Host ++ "/" ++ DbName,
- {ok, Code0, _, _} = test_request:put(
- Url ++ "/doc/file.txt", [{"Content-Type", "text/plain"}],
- "hello, couch!"),
- ?assert(Code0 =:= 201),
- {ok, Code, _, _} = test_request:get(
- Url ++ "/doc?attachments=true",
- [{"Origin", "http://example.com"}]),
- Code
- end)}.
+ ?_assertEqual(
+ 200,
+ begin
+ Url = Host ++ "/" ++ DbName,
+ {ok, Code0, _, _} = test_request:put(
+ Url ++ "/doc/file.txt",
+ [{"Content-Type", "text/plain"}],
+ "hello, couch!"
+ ),
+ ?assert(Code0 =:= 201),
+ {ok, Code, _, _} = test_request:get(
+ Url ++ "/doc?attachments=true",
+ [{"Origin", "http://example.com"}]
+ ),
+ Code
+ end
+ )}.
should_make_range_request_against_attachment({Host, DbName}) ->
{"COUCHDB-1689",
- ?_assertEqual(206,
- begin
- Url = Host ++ "/" ++ DbName,
- {ok, Code0, _, _} = test_request:put(
- Url ++ "/doc/file.txt",
- [{"Content-Type", "application/octet-stream"}],
- "hello, couch!"),
- ?assert(Code0 =:= 201),
- {ok, Code, _, _} = test_request:get(
- Url ++ "/doc/file.txt", [{"Origin", "http://example.com"},
- {"Range", "bytes=0-6"}]),
- Code
- end)}.
+ ?_assertEqual(
+ 206,
+ begin
+ Url = Host ++ "/" ++ DbName,
+ {ok, Code0, _, _} = test_request:put(
+ Url ++ "/doc/file.txt",
+ [{"Content-Type", "application/octet-stream"}],
+ "hello, couch!"
+ ),
+ ?assert(Code0 =:= 201),
+ {ok, Code, _, _} = test_request:get(
+ Url ++ "/doc/file.txt", [
+ {"Origin", "http://example.com"},
+ {"Range", "bytes=0-6"}
+ ]
+ ),
+ Code
+ end
+ )}.
should_make_request_with_if_none_match_header({Host, DbName}) ->
{"COUCHDB-1697",
- ?_assertEqual(304,
- begin
- Url = Host ++ "/" ++ DbName,
- {ok, Code0, Headers0, _} = test_request:put(
- Url ++ "/doc", [{"Content-Type", "application/json"}], "{}"),
- ?assert(Code0 =:= 201),
- ETag = proplists:get_value("ETag", Headers0),
- {ok, Code, _, _} = test_request:get(
- Url ++ "/doc", [{"Origin", "http://example.com"},
- {"If-None-Match", ETag}]),
- Code
- end)}.
-
+ ?_assertEqual(
+ 304,
+ begin
+ Url = Host ++ "/" ++ DbName,
+ {ok, Code0, Headers0, _} = test_request:put(
+ Url ++ "/doc", [{"Content-Type", "application/json"}], "{}"
+ ),
+ ?assert(Code0 =:= 201),
+ ETag = proplists:get_value("ETag", Headers0),
+ {ok, Code, _, _} = test_request:get(
+ Url ++ "/doc", [
+ {"Origin", "http://example.com"},
+ {"If-None-Match", ETag}
+ ]
+ ),
+ Code
+ end
+ )}.
maybe_append_vhost(true) ->
[{"Host", "http://example.com"}];
diff --git a/src/couch/test/eunit/couchdb_db_tests.erl b/src/couch/test/eunit/couchdb_db_tests.erl
index 338f2cd3c..2f6993576 100644
--- a/src/couch/test/eunit/couchdb_db_tests.erl
+++ b/src/couch/test/eunit/couchdb_db_tests.erl
@@ -21,12 +21,10 @@ setup() ->
fabric:create_db(DbName),
DbName.
-
teardown(DbName) ->
(catch fabric:delete_db(DbName)),
ok.
-
clustered_db_test_() ->
{
"Checking clustered db API",
@@ -39,7 +37,8 @@ clustered_db_test_() ->
"DB deletion",
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_close_deleted_db/1,
fun should_kill_caller_from_load_validation_funs_for_deleted_db/1
@@ -50,7 +49,6 @@ clustered_db_test_() ->
}
}.
-
should_close_deleted_db(DbName) ->
?_test(begin
[#shard{name = ShardName} | _] = mem3:shards(DbName),
@@ -60,7 +58,7 @@ should_close_deleted_db(DbName) ->
fabric:delete_db(DbName),
receive
{'DOWN', MonitorRef, _Type, _Pid, _Info} ->
- ok
+ ok
after 2000 ->
throw(timeout_error)
end,
@@ -71,8 +69,7 @@ should_close_deleted_db(DbName) ->
end
end),
?assertEqual([], ets:lookup(couch_server:couch_dbs(DbName), DbName))
- end).
-
+ end).
should_kill_caller_from_load_validation_funs_for_deleted_db(DbName) ->
?_test(begin
@@ -85,7 +82,7 @@ should_kill_caller_from_load_validation_funs_for_deleted_db(DbName) ->
{'DOWN', MonitorRef, _Type, _Pid, _Info} ->
ok
after 2000 ->
- throw(timeout_error)
+ throw(timeout_error)
end,
?assertError(database_does_not_exist, couch_db:load_validation_funs(Db))
end).
diff --git a/src/couch/test/eunit/couchdb_design_doc_tests.erl b/src/couch/test/eunit/couchdb_design_doc_tests.erl
index 653a6cb17..c51d56f0b 100644
--- a/src/couch/test/eunit/couchdb_design_doc_tests.erl
+++ b/src/couch/test/eunit/couchdb_design_doc_tests.erl
@@ -25,21 +25,21 @@ setup() ->
BaseUrl = "http://" ++ Addr ++ ":" ++ Port,
{?b2l(DbName), BaseUrl}.
-
teardown({DbName, _}) ->
couch_server:delete(?l2b(DbName), [?ADMIN_CTX]),
ok.
-
design_list_test_() ->
{
"Check _list functionality",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_return_empty_when_plain_return/1,
fun should_return_empty_when_no_docs/1
@@ -50,38 +50,50 @@ design_list_test_() ->
should_return_empty_when_plain_return({DbName, BaseUrl}) ->
?_test(begin
- ?assertEqual(<<>>,
- query_text(BaseUrl, DbName, "foo", "_list/plain_return/simple_view"))
+ ?assertEqual(
+ <<>>,
+ query_text(BaseUrl, DbName, "foo", "_list/plain_return/simple_view")
+ )
end).
should_return_empty_when_no_docs({DbName, BaseUrl}) ->
?_test(begin
- ?assertEqual(<<>>,
- query_text(BaseUrl, DbName, "foo", "_list/simple_render/simple_view"))
+ ?assertEqual(
+ <<>>,
+ query_text(BaseUrl, DbName, "foo", "_list/simple_render/simple_view")
+ )
end).
create_design_doc(DbName, DDName) ->
{ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"simple_view">>, {[
- {<<"map">>, <<"function(doc) {emit(doc._id, doc)}">> },
- {<<"reduce">>, <<"function (key, values, rereduce) {return sum(values);}">> }
- ]}}
- ]}},
- {<<"lists">>, {[
- {<<"plain_return">>, <<"function(head, req) {return;}">>},
- {<<"simple_render">>, <<"function(head, req) {var row; while(row=getRow()) {send(JSON.stringify(row)); }}">>}
- ]}}
- ]}),
+ DDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DDName},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>,
+ {[
+ {<<"simple_view">>,
+ {[
+ {<<"map">>, <<"function(doc) {emit(doc._id, doc)}">>},
+ {<<"reduce">>,
+ <<"function (key, values, rereduce) {return sum(values);}">>}
+ ]}}
+ ]}},
+ {<<"lists">>,
+ {[
+ {<<"plain_return">>, <<"function(head, req) {return;}">>},
+ {<<"simple_render">>,
+ <<"function(head, req) {var row; while(row=getRow()) {send(JSON.stringify(row)); }}">>}
+ ]}}
+ ]}
+ ),
{ok, Rev} = couch_db:update_doc(Db, DDoc, []),
couch_db:close(Db),
Rev.
query_text(BaseUrl, DbName, DDoc, Path) ->
{ok, Code, _Headers, Body} = test_request:get(
- BaseUrl ++ "/" ++ DbName ++ "/_design/" ++ DDoc ++ "/" ++ Path),
+ BaseUrl ++ "/" ++ DbName ++ "/_design/" ++ DDoc ++ "/" ++ Path
+ ),
?assertEqual(200, Code),
Body.
diff --git a/src/couch/test/eunit/couchdb_file_compression_tests.erl b/src/couch/test/eunit/couchdb_file_compression_tests.erl
index 77250337c..75bf18a12 100644
--- a/src/couch/test/eunit/couchdb_file_compression_tests.erl
+++ b/src/couch/test/eunit/couchdb_file_compression_tests.erl
@@ -25,26 +25,27 @@ setup_all() ->
DbName = ?tempdb(),
{ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
ok = populate_db(Db, ?DOCS_COUNT),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?DDOC_ID},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"by_id">>, {[
- {<<"map">>, <<"function(doc){emit(doc._id, doc.string);}">>}
+ DDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, ?DDOC_ID},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>,
+ {[
+ {<<"by_id">>,
+ {[
+ {<<"map">>, <<"function(doc){emit(doc._id, doc.string);}">>}
+ ]}}
]}}
- ]}
- }
- ]}),
+ ]}
+ ),
{ok, _} = couch_db:update_doc(Db, DDoc, []),
ok = couch_db:close(Db),
{Ctx, DbName}.
-
teardown_all({Ctx, DbName}) ->
ok = couch_server:delete(DbName, [?ADMIN_CTX]),
test_util:stop_couch(Ctx).
-
couch_file_compression_test_() ->
{
"CouchDB file compression tests",
@@ -62,13 +63,11 @@ couch_file_compression_test_() ->
}
}.
-
should_use_none({_, DbName}) -> run_test(DbName, "none").
should_use_deflate_1({_, DbName}) -> run_test(DbName, "deflate_1").
should_use_deflate_9({_, DbName}) -> run_test(DbName, "deflate_9").
should_use_snappy({_, DbName}) -> run_test(DbName, "snappy").
-
should_compare_compression_methods({_, DbName}) ->
TestDb = setup_db(DbName),
Name = "none > snappy > deflate_1 > deflate_9",
@@ -78,7 +77,6 @@ should_compare_compression_methods({_, DbName}) ->
couch_server:delete(TestDb, [?ADMIN_CTX])
end.
-
run_test(DbName, Comp) ->
config:set("couchdb", "file_compression", Comp, false),
Timeout = 5 + ?TIMEOUT,
@@ -93,7 +91,6 @@ run_test(DbName, Comp) ->
ok = couch_server:delete(TestDb, [?ADMIN_CTX])
end.
-
compare_methods(DbName) ->
config:set("couchdb", "file_compression", "none", false),
ExternalSizePreCompact = db_external_size(DbName),
@@ -140,22 +137,23 @@ compare_methods(DbName) ->
?assert(ViewExternalSizeNone =:= ViewExternalSizeSnappy),
?assert(ViewExternalSizeNone =:= ViewExternalSizeDeflate9).
-
populate_db(_Db, NumDocs) when NumDocs =< 0 ->
ok;
populate_db(Db, NumDocs) ->
Docs = lists:map(
fun(_) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, couch_uuids:random()},
- {<<"string">>, ?l2b(lists:duplicate(1000, $X))}
- ]})
+ couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, couch_uuids:random()},
+ {<<"string">>, ?l2b(lists:duplicate(1000, $X))}
+ ]}
+ )
end,
- lists:seq(1, 500)),
+ lists:seq(1, 500)
+ ),
{ok, _} = couch_db:update_docs(Db, Docs, []),
populate_db(Db, NumDocs - 500).
-
setup_db(SrcDbName) ->
TgtDbName = ?tempdb(),
TgtDbFileName = binary_to_list(TgtDbName) ++ ".couch",
@@ -167,7 +165,6 @@ setup_db(SrcDbName) ->
refresh_index(TgtDbName),
TgtDbName.
-
refresh_index(DbName) ->
{ok, Db} = couch_db:open_int(DbName, []),
{ok, DDoc} = couch_db:open_doc(Db, ?DDOC_ID, [ejson_body]),
@@ -224,19 +221,23 @@ external_size(Info) ->
wait_compaction(DbName, Kind, Line) ->
WaitFun = fun() ->
- case is_compaction_running(DbName) of
- true -> wait;
- false -> ok
- end
+ case is_compaction_running(DbName) of
+ true -> wait;
+ false -> ok
+ end
end,
case test_util:wait(WaitFun, ?TIMEOUT) of
timeout ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, Line},
- {reason, "Timeout waiting for "
- ++ Kind
- ++ " database compaction"}]});
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, Line},
+ {reason,
+ "Timeout waiting for " ++
+ Kind ++
+ " database compaction"}
+ ]}
+ );
_ ->
ok
end.
@@ -246,5 +247,5 @@ is_compaction_running(DbName) ->
{ok, DbInfo} = couch_db:get_db_info(Db),
{ok, ViewInfo} = couch_mrview:get_info(Db, ?DDOC_ID),
couch_db:close(Db),
- (couch_util:get_value(compact_running, ViewInfo) =:= true)
- orelse (couch_util:get_value(compact_running, DbInfo) =:= true).
+ (couch_util:get_value(compact_running, ViewInfo) =:= true) orelse
+ (couch_util:get_value(compact_running, DbInfo) =:= true).
diff --git a/src/couch/test/eunit/couchdb_location_header_tests.erl b/src/couch/test/eunit/couchdb_location_header_tests.erl
index c6c039eb0..08870f8c8 100644
--- a/src/couch/test/eunit/couchdb_location_header_tests.erl
+++ b/src/couch/test/eunit/couchdb_location_header_tests.erl
@@ -17,7 +17,6 @@
-define(TIMEOUT, 1000).
-
setup() ->
DbName = ?tempdb(),
{ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
@@ -32,16 +31,17 @@ teardown({_, DbName}) ->
ok = couch_server:delete(?l2b(DbName), [?ADMIN_CTX]),
ok.
-
header_test_() ->
{
"CouchDB Location Header Tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_work_with_newlines_in_docs/1,
fun should_work_with_newlines_in_attachments/1
@@ -56,10 +56,14 @@ should_work_with_newlines_in_docs({Host, DbName}) ->
?_assertEqual(
Url,
begin
- {ok, _, Headers, _} = test_request:put(Url,
- [{"Content-Type", "application/json"}], "{}"),
+ {ok, _, Headers, _} = test_request:put(
+ Url,
+ [{"Content-Type", "application/json"}],
+ "{}"
+ ),
proplists:get_value("Location", Headers)
- end)}.
+ end
+ )}.
should_work_with_newlines_in_attachments({Host, DbName}) ->
Url = Host ++ "/" ++ DbName,
@@ -75,4 +79,5 @@ should_work_with_newlines_in_attachments({Host, DbName}) ->
],
{ok, _, Headers, _} = test_request:put(AttUrl, Headers0, Body),
proplists:get_value("Location", Headers)
- end)}.
+ end
+ )}.
diff --git a/src/couch/test/eunit/couchdb_mrview_cors_tests.erl b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl
index 3a560edce..9822542f3 100644
--- a/src/couch/test/eunit/couchdb_mrview_cors_tests.erl
+++ b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl
@@ -15,24 +15,24 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-
--define(DDOC, {[
- {<<"_id">>, <<"_design/foo">>},
- {<<"shows">>, {[
- {<<"bar">>, <<"function(doc, req) {return '<h1>wosh</h1>';}">>}
- ]}}
-]}).
+-define(DDOC,
+ {[
+ {<<"_id">>, <<"_design/foo">>},
+ {<<"shows">>,
+ {[
+ {<<"bar">>, <<"function(doc, req) {return '<h1>wosh</h1>';}">>}
+ ]}}
+ ]}
+).
-define(USER, "mrview_cors_test_admin").
-define(PASS, "pass").
-define(AUTH, {basic_auth, {?USER, ?PASS}}).
-
start() ->
Ctx = test_util:start_couch([chttpd]),
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
ok = config:set("chttpd", "enable_cors", "true", false),
ok = config:set("vhosts", "example.com", "/", false),
Ctx.
@@ -49,7 +49,7 @@ setup(PortType) ->
{Host, ?b2l(DbName)}.
teardown(Ctx) ->
- ok = config:delete("admins", ?USER, _Persist=false),
+ ok = config:delete("admins", ?USER, _Persist = false),
test_util:stop_couch(Ctx).
teardown(PortType, {_Host, DbName}) ->
@@ -61,7 +61,8 @@ cors_test_() ->
"CORS for mrview",
{
setup,
- fun start/0, fun teardown/1,
+ fun start/0,
+ fun teardown/1,
[show_tests()]
}
}.
@@ -83,13 +84,16 @@ make_test_case(Mod, Funs) ->
should_make_shows_request(_, {Host, DbName}) ->
?_test(begin
- ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_show/bar",
- Headers = [{"Origin", "http://example.com"},
- {"Access-Control-Request-Method", "GET"}, ?AUTH],
- {ok, _, Resp, Body} = test_request:get(ReqUrl, Headers),
- Origin = proplists:get_value("Access-Control-Allow-Origin", Resp),
- ?assertEqual("http://example.com", Origin),
- ?assertEqual(<<"<h1>wosh</h1>">>, Body)
+ ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_show/bar",
+ Headers = [
+ {"Origin", "http://example.com"},
+ {"Access-Control-Request-Method", "GET"},
+ ?AUTH
+ ],
+ {ok, _, Resp, Body} = test_request:get(ReqUrl, Headers),
+ Origin = proplists:get_value("Access-Control-Allow-Origin", Resp),
+ ?assertEqual("http://example.com", Origin),
+ ?assertEqual(<<"<h1>wosh</h1>">>, Body)
end).
create_db(backdoor, DbName) ->
@@ -111,7 +115,6 @@ assert_success(create_db, Status) ->
true = lists:member(Status, [201, 202]);
assert_success(delete_db, Status) ->
true = lists:member(Status, [200, 202]).
-
host_url(PortType) ->
"http://" ++ bind_address(PortType) ++ ":" ++ port(PortType).
@@ -132,7 +135,6 @@ port(clustered) ->
port(backdoor) ->
integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
-
upload_ddoc(Host, DbName) ->
Url = Host ++ "/" ++ DbName ++ "/_design/foo",
Body = couch_util:json_encode(?DDOC),
diff --git a/src/couch/test/eunit/couchdb_mrview_tests.erl b/src/couch/test/eunit/couchdb_mrview_tests.erl
index ec77b190d..606c9c39a 100644
--- a/src/couch/test/eunit/couchdb_mrview_tests.erl
+++ b/src/couch/test/eunit/couchdb_mrview_tests.erl
@@ -15,41 +15,46 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-
--define(DDOC, {[
- {<<"_id">>, <<"_design/foo">>},
- {<<"shows">>, {[
- {<<"bar">>, <<"function(doc, req) {return '<h1>wosh</h1>';}">>}
- ]}},
- {<<"updates">>, {[
- {<<"report">>, <<"function(doc, req) {"
- "var data = JSON.parse(req.body); "
- "return ['test', data];"
- "}">>}
- ]}},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, <<"function(doc){emit(doc._id, doc._rev)}">>}
- ]}}
- ]}}
-]}).
+-define(DDOC,
+ {[
+ {<<"_id">>, <<"_design/foo">>},
+ {<<"shows">>,
+ {[
+ {<<"bar">>, <<"function(doc, req) {return '<h1>wosh</h1>';}">>}
+ ]}},
+ {<<"updates">>,
+ {[
+ {<<"report">>, <<
+ "function(doc, req) {"
+ "var data = JSON.parse(req.body); "
+ "return ['test', data];"
+ "}"
+ >>}
+ ]}},
+ {<<"views">>,
+ {[
+ {<<"view1">>,
+ {[
+ {<<"map">>, <<"function(doc){emit(doc._id, doc._rev)}">>}
+ ]}}
+ ]}}
+ ]}
+).
-define(USER, "admin").
-define(PASS, "pass").
-define(AUTH, {basic_auth, {?USER, ?PASS}}).
-
setup_all() ->
Ctx = test_util:start_couch([chttpd]),
ok = meck:new(mochiweb_socket, [passthrough]),
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
Ctx.
teardown_all(Ctx) ->
meck:unload(),
- ok = config:delete("admins", ?USER, _Persist=false),
+ ok = config:delete("admins", ?USER, _Persist = false),
test_util:stop_couch(Ctx).
setup(PortType) ->
@@ -108,7 +113,6 @@ mrview_cleanup_index_files_test_() ->
}
}.
-
make_test_case(Mod, Funs) ->
{
lists:flatten(io_lib:format("~s", [Mod])),
@@ -122,33 +126,38 @@ make_test_case(Mod, Funs) ->
should_return_invalid_request_body(PortType, {Host, DbName}) ->
?_test(begin
- ok = create_doc(PortType, ?l2b(DbName), <<"doc_id">>, {[]}),
- ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_update/report/doc_id",
- {ok, Status, _Headers, Body} =
- test_request:post(ReqUrl, [?AUTH], <<"{truncated}">>),
- {Props} = jiffy:decode(Body),
- ?assertEqual(
- <<"bad_request">>, couch_util:get_value(<<"error">>, Props)),
- ?assertEqual(
- <<"Invalid request body">>, couch_util:get_value(<<"reason">>, Props)),
- ?assertEqual(400, Status),
- ok
+ ok = create_doc(PortType, ?l2b(DbName), <<"doc_id">>, {[]}),
+ ReqUrl = Host ++ "/" ++ DbName ++ "/_design/foo/_update/report/doc_id",
+ {ok, Status, _Headers, Body} =
+ test_request:post(ReqUrl, [?AUTH], <<"{truncated}">>),
+ {Props} = jiffy:decode(Body),
+ ?assertEqual(
+ <<"bad_request">>, couch_util:get_value(<<"error">>, Props)
+ ),
+ ?assertEqual(
+ <<"Invalid request body">>, couch_util:get_value(<<"reason">>, Props)
+ ),
+ ?assertEqual(400, Status),
+ ok
end).
should_return_400_for_wrong_order_of_keys(_PortType, {Host, DbName}) ->
Args = [{start_key, "\"bbb\""}, {end_key, "\"aaa\""}],
?_test(begin
- ReqUrl = Host ++ "/" ++ DbName
- ++ "/_design/foo/_view/view1?" ++ mochiweb_util:urlencode(Args),
- {ok, Status, _Headers, Body} = test_request:get(ReqUrl, [?AUTH]),
- {Props} = jiffy:decode(Body),
- ?assertEqual(
- <<"query_parse_error">>, couch_util:get_value(<<"error">>, Props)),
- ?assertEqual(
+ ReqUrl =
+ Host ++ "/" ++ DbName ++
+ "/_design/foo/_view/view1?" ++ mochiweb_util:urlencode(Args),
+ {ok, Status, _Headers, Body} = test_request:get(ReqUrl, [?AUTH]),
+ {Props} = jiffy:decode(Body),
+ ?assertEqual(
+ <<"query_parse_error">>, couch_util:get_value(<<"error">>, Props)
+ ),
+ ?assertEqual(
<<"No rows can match your key range, reverse your start_key and end_key or set descending=true">>,
- couch_util:get_value(<<"reason">>, Props)),
- ?assertEqual(400, Status),
- ok
+ couch_util:get_value(<<"reason">>, Props)
+ ),
+ ?assertEqual(400, Status),
+ ok
end).
should_cleanup_index_files(_PortType, {Host, DbName}) ->
@@ -167,30 +176,34 @@ should_cleanup_index_files(_PortType, {Host, DbName}) ->
% It is hard to simulate inactive view.
% Since couch_mrview:cleanup is called on view definition change.
% That's why we just create extra files in place
- ToDelete = lists:map(fun(FilePath) ->
- ViewFile = filename:join([
- filename:dirname(FilePath),
- "11111111111111111111111111111111.view"]),
- file:write_file(ViewFile, <<>>),
- ViewFile
- end, FileList0),
+ ToDelete = lists:map(
+ fun(FilePath) ->
+ ViewFile = filename:join([
+ filename:dirname(FilePath),
+ "11111111111111111111111111111111.view"
+ ]),
+ file:write_file(ViewFile, <<>>),
+ ViewFile
+ end,
+ FileList0
+ ),
FileList1 = filelib:wildcard(IndexWildCard),
?assertEqual([], lists:usort(FileList1 -- (FileList0 ++ ToDelete))),
CleanupUrl = Host ++ "/" ++ DbName ++ "/_view_cleanup",
{ok, _Status1, _Headers1, _Body1} = test_request:post(
- CleanupUrl, [], <<>>, [?AUTH]),
+ CleanupUrl, [], <<>>, [?AUTH]
+ ),
test_util:wait(fun() ->
- IndexFiles = filelib:wildcard(IndexWildCard),
- case lists:usort(FileList0) == lists:usort(IndexFiles) of
- false -> wait;
- true -> ok
- end
+ IndexFiles = filelib:wildcard(IndexWildCard),
+ case lists:usort(FileList0) == lists:usort(IndexFiles) of
+ false -> wait;
+ true -> ok
+ end
end),
ok
end).
-
create_doc(backdoor, DbName, Id, Body) ->
JsonDoc = couch_util:json_apply_field({<<"_id">>, Id}, Body),
Doc = couch_doc:from_json_obj(JsonDoc),
@@ -223,7 +236,6 @@ assert_success(create_db, Status) ->
assert_success(delete_db, Status) ->
?assert(lists:member(Status, [200, 202])).
-
host_url(PortType) ->
"http://" ++ bind_address(PortType) ++ ":" ++ port(PortType).
@@ -243,7 +255,6 @@ port(clustered) ->
port(backdoor) ->
integer_to_list(mochiweb_socket_server:get(couch_httpd, port)).
-
upload_ddoc(Host, DbName) ->
Url = Host ++ "/" ++ DbName ++ "/_design/foo",
Body = couch_util:json_encode(?DDOC),
diff --git a/src/couch/test/eunit/couchdb_os_proc_pool.erl b/src/couch/test/eunit/couchdb_os_proc_pool.erl
index b552e114a..620265b32 100644
--- a/src/couch/test/eunit/couchdb_os_proc_pool.erl
+++ b/src/couch/test/eunit/couchdb_os_proc_pool.erl
@@ -17,7 +17,6 @@
-define(TIMEOUT, 1000).
-
setup() ->
ok = couch_proc_manager:reload(),
meck:new(couch_os_process, [passthrough]),
@@ -32,15 +31,17 @@ os_proc_pool_test_() ->
"OS processes pool tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
should_block_new_proc_on_full_pool(),
should_free_slot_on_proc_unexpected_exit(),
should_reuse_known_proc(),
-% should_process_waiting_queue_as_fifo(),
+ % should_process_waiting_queue_as_fifo(),
should_reduce_pool_on_idle_os_procs(),
should_not_return_broken_process_to_the_pool()
]
@@ -48,7 +49,6 @@ os_proc_pool_test_() ->
}
}.
-
should_block_new_proc_on_full_pool() ->
?_test(begin
Client1 = spawn_client(),
@@ -78,12 +78,14 @@ should_block_new_proc_on_full_pool() ->
?assertEqual(Proc1#proc.pid, Proc4#proc.pid),
?assertNotEqual(Proc1#proc.client, Proc4#proc.client),
- lists:map(fun(C) ->
- ?assertEqual(ok, stop_client(C))
- end, [Client2, Client3, Client4])
+ lists:map(
+ fun(C) ->
+ ?assertEqual(ok, stop_client(C))
+ end,
+ [Client2, Client3, Client4]
+ )
end).
-
should_free_slot_on_proc_unexpected_exit() ->
?_test(begin
Client1 = spawn_client(),
@@ -119,12 +121,14 @@ should_free_slot_on_proc_unexpected_exit() ->
?assertNotEqual(Proc3#proc.pid, Proc4#proc.pid),
?assertNotEqual(Proc3#proc.client, Proc4#proc.client),
- lists:map(fun(C) ->
- ?assertEqual(ok, stop_client(C))
- end, [Client2, Client3, Client4])
+ lists:map(
+ fun(C) ->
+ ?assertEqual(ok, stop_client(C))
+ end,
+ [Client2, Client3, Client4]
+ )
end).
-
should_reuse_known_proc() ->
?_test(begin
Client1 = spawn_client(<<"ddoc1">>),
@@ -150,7 +154,6 @@ should_reuse_known_proc() ->
?assertEqual(ok, stop_client(Client1Again))
end).
-
%should_process_waiting_queue_as_fifo() ->
% ?_test(begin
% Client1 = spawn_client(<<"ddoc1">>),
@@ -181,12 +184,15 @@ should_reuse_known_proc() ->
% ?assertEqual(ok, stop_client(Client5))
% end).
-
should_reduce_pool_on_idle_os_procs() ->
?_test(begin
%% os_process_idle_limit is in sec
- config:set("query_server_config",
- "os_process_idle_limit", "1", false),
+ config:set(
+ "query_server_config",
+ "os_process_idle_limit",
+ "1",
+ false
+ ),
ok = confirm_config("os_process_idle_limit", "1"),
Client1 = spawn_client(<<"ddoc1">>),
@@ -207,15 +213,22 @@ should_reduce_pool_on_idle_os_procs() ->
?assertEqual(1, couch_proc_manager:get_proc_count())
end).
-
should_not_return_broken_process_to_the_pool() ->
?_test(begin
- config:set("query_server_config",
- "os_process_soft_limit", "1", false),
+ config:set(
+ "query_server_config",
+ "os_process_soft_limit",
+ "1",
+ false
+ ),
ok = confirm_config("os_process_soft_limit", "1"),
- config:set("query_server_config",
- "os_process_limit", "1", false),
+ config:set(
+ "query_server_config",
+ "os_process_limit",
+ "1",
+ false
+ ),
ok = confirm_config("os_process_limit", "1"),
DDoc1 = ddoc(<<"_design/ddoc1">>),
@@ -227,9 +240,12 @@ should_not_return_broken_process_to_the_pool() ->
?assertEqual(0, meck:num_calls(couch_os_process, stop, 1)),
?assertEqual(1, couch_proc_manager:get_proc_count()),
- ?assertError(bad, couch_query_servers:with_ddoc_proc(DDoc1, fun(_) ->
- error(bad)
- end)),
+ ?assertError(
+ bad,
+ couch_query_servers:with_ddoc_proc(DDoc1, fun(_) ->
+ error(bad)
+ end)
+ ),
?assertEqual(1, meck:num_calls(couch_os_process, stop, 1)),
WaitFun = fun() ->
@@ -250,19 +266,21 @@ should_not_return_broken_process_to_the_pool() ->
?assertEqual(1, couch_proc_manager:get_proc_count())
end).
-
ddoc(DDocId) ->
#doc{
id = DDocId,
revs = {1, [<<"abc">>]},
- body = {[
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"v1">>, {[
- {<<"map">>, <<"function(doc) {emit(doc.value,1);}">>}
- ]}}
- ]}}
- ]}
+ body =
+ {[
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>,
+ {[
+ {<<"v1">>,
+ {[
+ {<<"map">>, <<"function(doc) {emit(doc.value,1);}">>}
+ ]}}
+ ]}}
+ ]}
}.
setup_config() ->
@@ -279,11 +297,13 @@ confirm_config(Key, Value, Count) ->
Value ->
ok;
_ when Count > 10 ->
- erlang:error({config_setup, [
- {module, ?MODULE},
- {line, ?LINE},
- {value, timeout}
- ]});
+ erlang:error(
+ {config_setup, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {value, timeout}
+ ]}
+ );
_ ->
%% we need to wait to let gen_server:cast finish
timer:sleep(10),
@@ -304,7 +324,7 @@ spawn_client(DDocId) ->
Ref = make_ref(),
Pid = spawn(fun() ->
DDocKey = {DDocId, <<"1-abcdefgh">>},
- DDoc = #doc{body={[{<<"language">>, <<"erlang">>}]}},
+ DDoc = #doc{body = {[{<<"language">>, <<"erlang">>}]}},
Proc = couch_query_servers:get_ddoc_process(DDoc, DDocKey),
loop(Parent, Ref, Proc)
end),
@@ -324,11 +344,15 @@ get_client_proc({Pid, Ref}, ClientName) ->
receive
{proc, Ref, Proc} -> Proc
after ?TIMEOUT ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Timeout getting client "
- ++ ClientName ++ " proc"}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason,
+ "Timeout getting client " ++
+ ClientName ++ " proc"}
+ ]}
+ )
end.
stop_client({Pid, Ref}) ->
@@ -354,7 +378,7 @@ loop(Parent, Ref, Proc) ->
ping ->
Parent ! {pong, Ref},
loop(Parent, Ref, Proc);
- get_proc ->
+ get_proc ->
Parent ! {proc, Ref, Proc},
loop(Parent, Ref, Proc);
stop ->
diff --git a/src/couch/test/eunit/couchdb_update_conflicts_tests.erl b/src/couch/test/eunit/couchdb_update_conflicts_tests.erl
index 1329aba27..a7d449a2d 100644
--- a/src/couch/test/eunit/couchdb_update_conflicts_tests.erl
+++ b/src/couch/test/eunit/couchdb_update_conflicts_tests.erl
@@ -24,12 +24,15 @@
start() ->
test_util:start_couch().
-
setup() ->
DbName = ?tempdb(),
{ok, Db} = couch_db:create(DbName, [?ADMIN_CTX, overwrite]),
- Doc = couch_doc:from_json_obj({[{<<"_id">>, ?DOC_ID},
- {<<"value">>, 0}]}),
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, ?DOC_ID},
+ {<<"value">>, 0}
+ ]}
+ ),
{ok, Rev} = couch_db:update_doc(Db, Doc, []),
ok = couch_db:close(Db),
RevStr = couch_doc:rev_to_str(Rev),
@@ -43,13 +46,13 @@ teardown({DbName, _}) ->
teardown(_, {DbName, _RevStr}) ->
teardown({DbName, _RevStr}).
-
view_indexes_cleanup_test_() ->
{
"Update conflicts",
{
setup,
- fun start/0, fun test_util:stop_couch/1,
+ fun start/0,
+ fun test_util:stop_couch/1,
[
concurrent_updates(),
bulk_docs_updates()
@@ -57,23 +60,27 @@ view_indexes_cleanup_test_() ->
}
}.
-concurrent_updates()->
+concurrent_updates() ->
{
"Concurrent updates",
{
foreachx,
- fun setup/1, fun teardown/2,
- [{NumClients, fun should_concurrently_update_doc/2}
- || NumClients <- ?NUM_CLIENTS]
+ fun setup/1,
+ fun teardown/2,
+ [
+ {NumClients, fun should_concurrently_update_doc/2}
+ || NumClients <- ?NUM_CLIENTS
+ ]
}
}.
-bulk_docs_updates()->
+bulk_docs_updates() ->
{
"Bulk docs updates",
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_bulk_create_delete_doc/1,
fun should_bulk_create_local_doc/1,
@@ -82,38 +89,41 @@ bulk_docs_updates()->
}
}.
+should_concurrently_update_doc(NumClients, {DbName, InitRev}) ->
+ {
+ ?i2l(NumClients) ++ " clients",
+ {inorder, [
+ {"update doc",
+ {timeout, ?TIMEOUT div 1000,
+ ?_test(concurrent_doc_update(NumClients, DbName, InitRev))}},
+ {"ensure in single leaf", ?_test(ensure_in_single_revision_leaf(DbName))}
+ ]}
+ }.
-should_concurrently_update_doc(NumClients, {DbName, InitRev})->
- {?i2l(NumClients) ++ " clients",
- {inorder,
- [{"update doc",
- {timeout, ?TIMEOUT div 1000,
- ?_test(concurrent_doc_update(NumClients, DbName, InitRev))}},
- {"ensure in single leaf",
- ?_test(ensure_in_single_revision_leaf(DbName))}]}}.
-
-should_bulk_create_delete_doc({DbName, InitRev})->
+should_bulk_create_delete_doc({DbName, InitRev}) ->
?_test(bulk_delete_create(DbName, InitRev)).
-should_bulk_create_local_doc({DbName, _})->
+should_bulk_create_local_doc({DbName, _}) ->
?_test(bulk_create_local_doc(DbName)).
-should_ignore_invalid_local_doc({DbName, _})->
+should_ignore_invalid_local_doc({DbName, _}) ->
?_test(ignore_invalid_local_doc(DbName)).
-
concurrent_doc_update(NumClients, DbName, InitRev) ->
Clients = lists:map(
fun(Value) ->
- ClientDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?DOC_ID},
- {<<"_rev">>, InitRev},
- {<<"value">>, Value}
- ]}),
+ ClientDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, ?DOC_ID},
+ {<<"_rev">>, InitRev},
+ {<<"value">>, Value}
+ ]}
+ ),
Pid = spawn_client(DbName, ClientDoc),
{Value, Pid, erlang:monitor(process, Pid)}
end,
- lists:seq(1, NumClients)),
+ lists:seq(1, NumClients)
+ ),
lists:foreach(fun({_, Pid, _}) -> Pid ! go end, Clients),
@@ -125,20 +135,31 @@ concurrent_doc_update(NumClients, DbName, InitRev) ->
{'DOWN', MonRef, process, Pid, conflict} ->
{AccConflicts + 1, AccValue};
{'DOWN', MonRef, process, Pid, Error} ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Client " ++ ?i2l(Value)
- ++ " got update error: "
- ++ couch_util:to_list(Error)}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason,
+ "Client " ++ ?i2l(Value) ++
+ " got update error: " ++
+ couch_util:to_list(Error)}
+ ]}
+ )
after ?TIMEOUT div 2 ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Timeout waiting for client "
- ++ ?i2l(Value) ++ " to die"}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason,
+ "Timeout waiting for client " ++
+ ?i2l(Value) ++ " to die"}
+ ]}
+ )
end
- end, {0, nil}, Clients),
+ end,
+ {0, nil},
+ Clients
+ ),
?assertEqual(NumClients - 1, NumConflicts),
{ok, Db} = couch_db:open_int(DbName, []),
@@ -171,15 +192,19 @@ ensure_in_single_revision_leaf(DbName) ->
bulk_delete_create(DbName, InitRev) ->
{ok, Db} = couch_db:open_int(DbName, []),
- DeletedDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?DOC_ID},
- {<<"_rev">>, InitRev},
- {<<"_deleted">>, true}
- ]}),
- NewDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?DOC_ID},
- {<<"value">>, 666}
- ]}),
+ DeletedDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, ?DOC_ID},
+ {<<"_rev">>, InitRev},
+ {<<"_deleted">>, true}
+ ]}
+ ),
+ NewDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, ?DOC_ID},
+ {<<"value">>, 666}
+ ]}
+ ),
{ok, Results} = couch_db:update_docs(Db, [DeletedDoc, NewDoc], []),
ok = couch_db:close(Db),
@@ -189,9 +214,11 @@ bulk_delete_create(DbName, InitRev) ->
{ok, Db2} = couch_db:open_int(DbName, []),
{ok, [{ok, Doc1}]} = couch_db:open_doc_revs(
- Db2, ?DOC_ID, [Rev1], [conflicts, deleted_conflicts]),
+ Db2, ?DOC_ID, [Rev1], [conflicts, deleted_conflicts]
+ ),
{ok, [{ok, Doc2}]} = couch_db:open_doc_revs(
- Db2, ?DOC_ID, [Rev2], [conflicts, deleted_conflicts]),
+ Db2, ?DOC_ID, [Rev2], [conflicts, deleted_conflicts]
+ ),
ok = couch_db:close(Db2),
{Doc1Props} = couch_doc:to_json_obj(Doc1, []),
@@ -200,40 +227,75 @@ bulk_delete_create(DbName, InitRev) ->
%% Document was deleted
?assert(couch_util:get_value(<<"_deleted">>, Doc1Props)),
%% New document not flagged as deleted
- ?assertEqual(undefined, couch_util:get_value(<<"_deleted">>,
- Doc2Props)),
+ ?assertEqual(
+ undefined,
+ couch_util:get_value(
+ <<"_deleted">>,
+ Doc2Props
+ )
+ ),
%% New leaf revision has the right value
- ?assertEqual(666, couch_util:get_value(<<"value">>,
- Doc2Props)),
+ ?assertEqual(
+ 666,
+ couch_util:get_value(
+ <<"value">>,
+ Doc2Props
+ )
+ ),
%% Deleted document has no conflicts
- ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>,
- Doc1Props)),
+ ?assertEqual(
+ undefined,
+ couch_util:get_value(
+ <<"_conflicts">>,
+ Doc1Props
+ )
+ ),
%% Deleted document has no deleted conflicts
- ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>,
- Doc1Props)),
+ ?assertEqual(
+ undefined,
+ couch_util:get_value(
+ <<"_deleted_conflicts">>,
+ Doc1Props
+ )
+ ),
%% New leaf revision doesn't have conflicts
- ?assertEqual(undefined, couch_util:get_value(<<"_conflicts">>,
- Doc1Props)),
+ ?assertEqual(
+ undefined,
+ couch_util:get_value(
+ <<"_conflicts">>,
+ Doc1Props
+ )
+ ),
%% New leaf revision doesn't have deleted conflicts
- ?assertEqual(undefined, couch_util:get_value(<<"_deleted_conflicts">>,
- Doc1Props)),
+ ?assertEqual(
+ undefined,
+ couch_util:get_value(
+ <<"_deleted_conflicts">>,
+ Doc1Props
+ )
+ ),
%% Deleted revision has position 2
?assertEqual(2, element(1, Rev1)),
%% New leaf revision has position 3
?assertEqual(3, element(1, Rev2)).
-
bulk_create_local_doc(DbName) ->
{ok, Db} = couch_db:open_int(DbName, []),
- LocalDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?LOCAL_DOC_ID},
- {<<"_rev">>, <<"0-1">>}
- ]}),
-
- {ok, Results} = couch_db:update_docs(Db, [LocalDoc],
- [], replicated_changes),
+ LocalDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, ?LOCAL_DOC_ID},
+ {<<"_rev">>, <<"0-1">>}
+ ]}
+ ),
+
+ {ok, Results} = couch_db:update_docs(
+ Db,
+ [LocalDoc],
+ [],
+ replicated_changes
+ ),
ok = couch_db:close(Db),
?assertEqual([], Results),
@@ -243,17 +305,22 @@ bulk_create_local_doc(DbName) ->
?assertEqual(?LOCAL_DOC_ID, LocalDoc1#doc.id),
?assertEqual({0, [<<"2">>]}, LocalDoc1#doc.revs).
-
ignore_invalid_local_doc(DbName) ->
{ok, Db} = couch_db:open_int(DbName, []),
- LocalDoc = couch_doc:from_json_obj({[
- {<<"_id">>, ?LOCAL_DOC_ID},
- {<<"_rev">>, <<"0-abcdef">>}
- ]}),
-
- {ok, Results} = couch_db:update_docs(Db, [LocalDoc],
- [], replicated_changes),
+ LocalDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, ?LOCAL_DOC_ID},
+ {<<"_rev">>, <<"0-abcdef">>}
+ ]}
+ ),
+
+ {ok, Results} = couch_db:update_docs(
+ Db,
+ [LocalDoc],
+ [],
+ replicated_changes
+ ),
ok = couch_db:close(Db),
?assertEqual([], Results),
@@ -262,7 +329,6 @@ ignore_invalid_local_doc(DbName) ->
ok = couch_db:close(Db2),
?assertEqual({not_found, missing}, Result2).
-
spawn_client(DbName, Doc) ->
spawn(fun() ->
{ok, Db} = couch_db:open_int(DbName, []),
@@ -270,11 +336,13 @@ spawn_client(DbName, Doc) ->
go -> ok
end,
erlang:yield(),
- Result = try
- couch_db:update_doc(Db, Doc, [])
- catch _:Error ->
- Error
- end,
+ Result =
+ try
+ couch_db:update_doc(Db, Doc, [])
+ catch
+ _:Error ->
+ Error
+ end,
ok = couch_db:close(Db),
exit(Result)
end).
diff --git a/src/couch/test/eunit/couchdb_vhosts_tests.erl b/src/couch/test/eunit/couchdb_vhosts_tests.erl
index fbe5579cd..635b8f9a6 100644
--- a/src/couch/test/eunit/couchdb_vhosts_tests.erl
+++ b/src/couch/test/eunit/couchdb_vhosts_tests.erl
@@ -18,30 +18,35 @@
-define(TIMEOUT, 1000).
-define(iofmt(S, A), lists:flatten(io_lib:format(S, A))).
-
setup() ->
DbName = ?tempdb(),
{ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 666}
- ]}),
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc1">>},
+ {<<"value">>, 666}
+ ]}
+ ),
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/doc1">>},
- {<<"shows">>, {[
- {<<"test">>, <<"function(doc, req) {
- return { json: {
- requested_path: '/' + req.requested_path.join('/'),
- path: '/' + req.path.join('/')}};}">>}
- ]}},
- {<<"rewrites">>, [
- {[
- {<<"from">>, <<"/">>},
- {<<"to">>, <<"_show/test">>}
+ Doc1 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/doc1">>},
+ {<<"shows">>,
+ {[
+ {<<"test">>,
+ <<"function(doc, req) {\n"
+ " return { json: {\n"
+ " requested_path: '/' + req.requested_path.join('/'),\n"
+ " path: '/' + req.path.join('/')}};}">>}
+ ]}},
+ {<<"rewrites">>, [
+ {[
+ {<<"from">>, <<"/">>},
+ {<<"to">>, <<"_show/test">>}
+ ]}
]}
]}
- ]}),
+ ),
{ok, _} = couch_db:update_docs(Db, [Doc, Doc1]),
couch_db:close(Db),
@@ -54,16 +59,17 @@ teardown({_, DbName}) ->
ok = couch_server:delete(?l2b(DbName), []),
ok.
-
vhosts_test_() ->
{
"Virtual Hosts rewrite tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_return_database_info/1,
fun should_return_revs_info/1,
@@ -89,103 +95,146 @@ should_return_database_info({Url, DbName}) ->
{JsonBody} = jiffy:decode(Body),
?assert(proplists:is_defined(<<"db_name">>, JsonBody));
Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}
+ ]}
+ )
end
end).
should_return_revs_info({Url, DbName}) ->
?_test(begin
ok = config:set("vhosts", "example.com", "/" ++ DbName, false),
- case test_request:get(Url ++ "/doc1?revs_info=true", [],
- [{host_header, "example.com"}]) of
+ case
+ test_request:get(
+ Url ++ "/doc1?revs_info=true",
+ [],
+ [{host_header, "example.com"}]
+ )
+ of
{ok, _, _, Body} ->
{JsonBody} = jiffy:decode(Body),
?assert(proplists:is_defined(<<"_revs_info">>, JsonBody));
Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}
+ ]}
+ )
end
end).
should_return_virtual_request_path_field_in_request({Url, DbName}) ->
?_test(begin
- ok = config:set("vhosts", "example1.com",
- "/" ++ DbName ++ "/_design/doc1/_rewrite/",
- false),
+ ok = config:set(
+ "vhosts",
+ "example1.com",
+ "/" ++ DbName ++ "/_design/doc1/_rewrite/",
+ false
+ ),
case test_request:get(Url, [], [{host_header, "example1.com"}]) of
{ok, _, _, Body} ->
{Json} = jiffy:decode(Body),
- ?assertEqual(<<"/">>,
- proplists:get_value(<<"requested_path">>, Json));
+ ?assertEqual(
+ <<"/">>,
+ proplists:get_value(<<"requested_path">>, Json)
+ );
Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}
+ ]}
+ )
end
end).
should_return_real_request_path_field_in_request({Url, DbName}) ->
?_test(begin
- ok = config:set("vhosts", "example1.com",
- "/" ++ DbName ++ "/_design/doc1/_rewrite/",
- false),
+ ok = config:set(
+ "vhosts",
+ "example1.com",
+ "/" ++ DbName ++ "/_design/doc1/_rewrite/",
+ false
+ ),
case test_request:get(Url, [], [{host_header, "example1.com"}]) of
{ok, _, _, Body} ->
{Json} = jiffy:decode(Body),
Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
?assertEqual(Path, proplists:get_value(<<"path">>, Json));
Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}
+ ]}
+ )
end
end).
should_match_wildcard_vhost({Url, DbName}) ->
?_test(begin
- ok = config:set("vhosts", "*.example.com",
- "/" ++ DbName ++ "/_design/doc1/_rewrite", false),
+ ok = config:set(
+ "vhosts",
+ "*.example.com",
+ "/" ++ DbName ++ "/_design/doc1/_rewrite",
+ false
+ ),
case test_request:get(Url, [], [{host_header, "test.example.com"}]) of
{ok, _, _, Body} ->
{Json} = jiffy:decode(Body),
Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
?assertEqual(Path, proplists:get_value(<<"path">>, Json));
Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}
+ ]}
+ )
end
end).
should_return_db_info_for_wildcard_vhost_for_custom_db({Url, DbName}) ->
?_test(begin
- ok = config:set("vhosts", ":dbname.example1.com",
- "/:dbname", false),
+ ok = config:set(
+ "vhosts",
+ ":dbname.example1.com",
+ "/:dbname",
+ false
+ ),
Host = DbName ++ ".example1.com",
case test_request:get(Url, [], [{host_header, Host}]) of
{ok, _, _, Body} ->
{JsonBody} = jiffy:decode(Body),
?assert(proplists:is_defined(<<"db_name">>, JsonBody));
Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}
+ ]}
+ )
end
end).
should_replace_rewrite_variables_for_db_and_doc({Url, DbName}) ->
?_test(begin
- ok = config:set("vhosts",":appname.:dbname.example1.com",
- "/:dbname/_design/:appname/_rewrite/", false),
+ ok = config:set(
+ "vhosts",
+ ":appname.:dbname.example1.com",
+ "/:dbname/_design/:appname/_rewrite/",
+ false
+ ),
Host = "doc1." ++ DbName ++ ".example1.com",
case test_request:get(Url, [], [{host_header, Host}]) of
{ok, _, _, Body} ->
@@ -193,45 +242,61 @@ should_replace_rewrite_variables_for_db_and_doc({Url, DbName}) ->
Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
?assertEqual(Path, proplists:get_value(<<"path">>, Json));
Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}
+ ]}
+ )
end
end).
should_return_db_info_for_vhost_with_resource({Url, DbName}) ->
?_test(begin
- ok = config:set("vhosts",
- "example.com/test", "/" ++ DbName, false),
+ ok = config:set(
+ "vhosts",
+ "example.com/test",
+ "/" ++ DbName,
+ false
+ ),
ReqUrl = Url ++ "/test",
case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of
{ok, _, _, Body} ->
{JsonBody} = jiffy:decode(Body),
?assert(proplists:is_defined(<<"db_name">>, JsonBody));
Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}
+ ]}
+ )
end
end).
-
should_return_revs_info_for_vhost_with_resource({Url, DbName}) ->
?_test(begin
- ok = config:set("vhosts",
- "example.com/test", "/" ++ DbName, false),
+ ok = config:set(
+ "vhosts",
+ "example.com/test",
+ "/" ++ DbName,
+ false
+ ),
ReqUrl = Url ++ "/test/doc1?revs_info=true",
case test_request:get(ReqUrl, [], [{host_header, "example.com"}]) of
{ok, _, _, Body} ->
{JsonBody} = jiffy:decode(Body),
?assert(proplists:is_defined(<<"_revs_info">>, JsonBody));
Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}
+ ]}
+ )
end
end).
@@ -245,27 +310,36 @@ should_return_db_info_for_vhost_with_wildcard_resource({Url, DbName}) ->
{JsonBody} = jiffy:decode(Body),
?assert(proplists:is_defined(<<"db_name">>, JsonBody));
Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}
+ ]}
+ )
end
end).
should_return_path_for_vhost_with_wildcard_host({Url, DbName}) ->
?_test(begin
- ok = config:set("vhosts", "*/test1",
- "/" ++ DbName ++ "/_design/doc1/_show/test",
- false),
+ ok = config:set(
+ "vhosts",
+ "*/test1",
+ "/" ++ DbName ++ "/_design/doc1/_show/test",
+ false
+ ),
case test_request:get(Url ++ "/test1") of
{ok, _, _, Body} ->
{Json} = jiffy:decode(Body),
Path = ?l2b("/" ++ DbName ++ "/_design/doc1/_show/test"),
?assertEqual(Path, proplists:get_value(<<"path">>, Json));
Else ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, ?iofmt("Request failed: ~p", [Else])}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, ?iofmt("Request failed: ~p", [Else])}
+ ]}
+ )
end
end).
diff --git a/src/couch/test/eunit/couchdb_views_tests.erl b/src/couch/test/eunit/couchdb_views_tests.erl
index 06e2f03eb..f4d51bdd0 100644
--- a/src/couch/test/eunit/couchdb_views_tests.erl
+++ b/src/couch/test/eunit/couchdb_views_tests.erl
@@ -48,10 +48,18 @@ setup_legacy() ->
DbDir = config:get("couchdb", "database_dir"),
ViewDir = config:get("couchdb", "view_index_dir"),
- OldViewFilePath = filename:join([ViewDir, ".test_design", "mrview",
- OldViewName]),
- NewViewFilePath = filename:join([ViewDir, ".test_design", "mrview",
- NewViewName]),
+ OldViewFilePath = filename:join([
+ ViewDir,
+ ".test_design",
+ "mrview",
+ OldViewName
+ ]),
+ NewViewFilePath = filename:join([
+ ViewDir,
+ ".test_design",
+ "mrview",
+ NewViewName
+ ]),
NewDbFilePath = filename:join([DbDir, DbFileName]),
@@ -84,10 +92,12 @@ view_indexes_cleanup_test_() ->
"View indexes cleanup",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_have_two_indexes_alive_before_deletion/1,
fun should_cleanup_index_file_after_ddoc_deletion/1,
@@ -102,10 +112,12 @@ view_group_db_leaks_test_() ->
"View group db leaks",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup_with_docs/0, fun teardown/1,
+ fun setup_with_docs/0,
+ fun teardown/1,
[
fun couchdb_1138/1,
fun couchdb_1309/1
@@ -136,10 +148,12 @@ backup_restore_test_() ->
"Upgrade and bugs related tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup_with_docs/0, fun teardown/1,
+ fun setup_with_docs/0,
+ fun teardown/1,
[
fun should_not_remember_docs_in_index_after_backup_restore/1
]
@@ -147,16 +161,17 @@ backup_restore_test_() ->
}
}.
-
upgrade_test_() ->
{
"Upgrade tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup_legacy/0, fun teardown_legacy/1,
+ fun setup_legacy/0,
+ fun teardown_legacy/1,
[
fun should_upgrade_legacy_view_files/1
]
@@ -184,7 +199,7 @@ should_not_remember_docs_in_index_after_backup_restore(DbName) ->
?assert(has_doc("doc2", Rows1)),
?assert(has_doc("doc3", Rows1)),
?assertNot(has_doc("doc666", Rows1))
- end).
+ end).
should_upgrade_legacy_view_files({DbName, Files}) ->
?_test(begin
@@ -206,21 +221,23 @@ should_upgrade_legacy_view_files({DbName, Files}) ->
% add doc to trigger update
DocUrl = db_url(DbName) ++ "/bar",
{ok, _, _, _} = test_request:put(
- DocUrl, [{"Content-Type", "application/json"}], <<"{\"a\":4}">>),
+ DocUrl, [{"Content-Type", "application/json"}], <<"{\"a\":4}">>
+ ),
% query view for expected results
Rows1 = query_view(DbName, "test", "test"),
?assertEqual(4, length(Rows1)),
% ensure new header
- timer:sleep(2000), % have to wait for awhile to upgrade the index
+
+ % have to wait for awhile to upgrade the index
+ timer:sleep(2000),
NewHeader = read_header(NewViewFilePath),
?assertMatch(#mrheader{}, NewHeader),
NewViewStatus = hd(NewHeader#mrheader.view_states),
?assertEqual(3, tuple_size(NewViewStatus))
end).
-
should_have_two_indexes_alive_before_deletion({DbName, _}) ->
view_cleanup(DbName),
?_assertEqual(2, count_index_files(DbName)).
@@ -230,7 +247,7 @@ should_cleanup_index_file_after_ddoc_deletion({DbName, {FooRev, _}}) ->
view_cleanup(DbName),
?_assertEqual(1, count_index_files(DbName)).
-should_cleanup_all_index_files({DbName, {FooRev, BooRev}})->
+should_cleanup_all_index_files({DbName, {FooRev, BooRev}}) ->
delete_design_doc(DbName, <<"_design/foo">>, FooRev),
delete_design_doc(DbName, <<"_design/boo">>, BooRev),
view_cleanup(DbName),
@@ -239,7 +256,8 @@ should_cleanup_all_index_files({DbName, {FooRev, BooRev}})->
couchdb_1138(DbName) ->
?_test(begin
{ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>),
+ couch_mrview_index, DbName, <<"_design/foo">>
+ ),
?assert(is_pid(IndexerPid)),
?assert(is_process_alive(IndexerPid)),
?assertEqual(2, count_users(DbName)),
@@ -277,7 +295,8 @@ couchdb_1138(DbName) ->
couchdb_1309(DbName) ->
?_test(begin
{ok, IndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>),
+ couch_mrview_index, DbName, <<"_design/foo">>
+ ),
?assert(is_pid(IndexerPid)),
?assert(is_process_alive(IndexerPid)),
?assertEqual(2, count_users(DbName)),
@@ -292,18 +311,21 @@ couchdb_1309(DbName) ->
?assert(is_process_alive(IndexerPid)),
- update_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
+ update_design_doc(DbName, <<"_design/foo">>, <<"bar">>),
{ok, NewIndexerPid} = couch_index_server:get_index(
- couch_mrview_index, DbName, <<"_design/foo">>),
+ couch_mrview_index, DbName, <<"_design/foo">>
+ ),
?assert(is_pid(NewIndexerPid)),
?assert(is_process_alive(NewIndexerPid)),
?assertNotEqual(IndexerPid, NewIndexerPid),
- UserCnt = case count_users(DbName) of
- N when N > 2 ->
- timer:sleep(1000),
- count_users(DbName);
- N -> N
- end,
+ UserCnt =
+ case count_users(DbName) of
+ N when N > 2 ->
+ timer:sleep(1000),
+ count_users(DbName);
+ N ->
+ N
+ end,
?assertEqual(2, UserCnt),
Rows1 = query_view(DbName, "foo", "bar", ok),
@@ -312,15 +334,20 @@ couchdb_1309(DbName) ->
check_rows_value(Rows2, 1),
?assertEqual(4, length(Rows2)),
- ok = stop_indexer( %% FIXME we need to grab monitor earlier
- fun() -> ok end,
- IndexerPid, ?LINE,
- "old view group is not dead after ddoc update"),
+ %% FIXME we need to grab monitor earlier
+ ok = stop_indexer(
+ fun() -> ok end,
+ IndexerPid,
+ ?LINE,
+ "old view group is not dead after ddoc update"
+ ),
ok = stop_indexer(
- fun() -> couch_server:delete(DbName, [?ADMIN_USER]) end,
- NewIndexerPid, ?LINE,
- "new view group did not die after DB deletion")
+ fun() -> couch_server:delete(DbName, [?ADMIN_USER]) end,
+ NewIndexerPid,
+ ?LINE,
+ "new view group did not die after DB deletion"
+ )
end).
couchdb_1283() ->
@@ -328,41 +355,54 @@ couchdb_1283() ->
ok = config:set("couchdb", "max_dbs_open", "3", false),
{ok, MDb1} = couch_db:create(?tempdb(), [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/foo">>},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {<<"foo">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo2">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo3">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo4">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}},
- {<<"foo5">>, {[
- {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
- ]}}
- ]}}
- ]}),
+ DDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/foo">>},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>,
+ {[
+ {<<"foo">>,
+ {[
+ {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
+ ]}},
+ {<<"foo2">>,
+ {[
+ {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
+ ]}},
+ {<<"foo3">>,
+ {[
+ {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
+ ]}},
+ {<<"foo4">>,
+ {[
+ {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
+ ]}},
+ {<<"foo5">>,
+ {[
+ {<<"map">>, <<"function(doc) { emit(doc._id, null); }">>}
+ ]}}
+ ]}}
+ ]}
+ ),
{ok, _} = couch_db:update_doc(MDb1, DDoc, []),
ok = populate_db(MDb1, 100, 100),
query_view(couch_db:name(MDb1), "foo", "foo"),
ok = couch_db:close(MDb1),
{ok, Pid} = couch_index_server:get_index(
- couch_mrview_index, couch_db:name(MDb1), <<"_design/foo">>),
+ couch_mrview_index, couch_db:name(MDb1), <<"_design/foo">>
+ ),
% Start and pause compacton
WaitRef = erlang:make_ref(),
meck:expect(couch_mrview_index, compact, fun(Db, State, Opts) ->
- receive {WaitRef, From, init} -> ok end,
+ receive
+ {WaitRef, From, init} -> ok
+ end,
From ! {WaitRef, inited},
- receive {WaitRef, go} -> ok end,
+ receive
+ {WaitRef, go} -> ok
+ end,
meck:passthrough([Db, State, Opts])
end),
@@ -373,7 +413,9 @@ couchdb_1283() ->
% Make sure that our compactor is waiting for us
% before we continue our assertions
CPid ! {WaitRef, self(), init},
- receive {WaitRef, inited} -> ok end,
+ receive
+ {WaitRef, inited} -> ok
+ end,
% Make sure that a compaction process takes a monitor
% on the database's main_pid
@@ -382,64 +424,74 @@ couchdb_1283() ->
% Finish compaction to and make sure the monitor
% disappears
CPid ! {WaitRef, go},
- wait_for_process_shutdown(CRef, normal,
- {reason, "Failure compacting view group"}),
+ wait_for_process_shutdown(
+ CRef,
+ normal,
+ {reason, "Failure compacting view group"}
+ ),
% Make sure that the monitor was removed
?assertEqual(false, lists:member(CPid, couch_db:monitored_by(MDb1)))
end).
wait_for_process_shutdown(Pid, ExpectedReason, Error) ->
- receive
- {'DOWN', Pid, process, _, Reason} ->
- ?assertEqual(ExpectedReason, Reason)
- after ?TIMEOUT ->
- erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, ?LINE}, Error]})
- end.
-
+ receive
+ {'DOWN', Pid, process, _, Reason} ->
+ ?assertEqual(ExpectedReason, Reason)
+ after ?TIMEOUT ->
+ erlang:error(
+ {assertion_failed, [{module, ?MODULE}, {line, ?LINE}, Error]}
+ )
+ end.
create_doc(DbName, DocId) when is_list(DocId) ->
create_doc(DbName, ?l2b(DocId));
create_doc(DbName, DocId) when is_binary(DocId) ->
{ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc666 = couch_doc:from_json_obj({[
- {<<"_id">>, DocId},
- {<<"value">>, 999}
- ]}),
+ Doc666 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DocId},
+ {<<"value">>, 999}
+ ]}
+ ),
{ok, _} = couch_db:update_docs(Db, [Doc666]),
couch_db:close(Db).
create_docs(DbName) ->
{ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"value">>, 1}
-
- ]}),
- Doc2 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc2">>},
- {<<"value">>, 2}
-
- ]}),
- Doc3 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc3">>},
- {<<"value">>, 3}
-
- ]}),
+ Doc1 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc1">>},
+ {<<"value">>, 1}
+ ]}
+ ),
+ Doc2 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc2">>},
+ {<<"value">>, 2}
+ ]}
+ ),
+ Doc3 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc3">>},
+ {<<"value">>, 3}
+ ]}
+ ),
{ok, _} = couch_db:update_docs(Db, [Doc1, Doc2, Doc3]),
couch_db:close(Db).
populate_db(Db, BatchSize, N) when N > 0 ->
Docs = lists:map(
fun(_) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, couch_uuids:new()},
- {<<"value">>, base64:encode(crypto:strong_rand_bytes(1000))}
- ]})
+ couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, couch_uuids:new()},
+ {<<"value">>, base64:encode(crypto:strong_rand_bytes(1000))}
+ ]}
+ )
end,
- lists:seq(1, BatchSize)),
+ lists:seq(1, BatchSize)
+ ),
{ok, _} = couch_db:update_docs(Db, Docs, []),
populate_db(Db, BatchSize, N - length(Docs));
populate_db(_Db, _, _) ->
@@ -447,15 +499,19 @@ populate_db(_Db, _, _) ->
create_design_doc(DbName, DDName, ViewName) ->
{ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {ViewName, {[
- {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>}
- ]}}
- ]}}
- ]}),
+ DDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DDName},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>,
+ {[
+ {ViewName,
+ {[
+ {<<"map">>, <<"function(doc) { emit(doc.value, null); }">>}
+ ]}}
+ ]}}
+ ]}
+ ),
{ok, Rev} = couch_db:update_doc(Db, DDoc, []),
couch_db:close(Db),
Rev.
@@ -465,27 +521,33 @@ update_design_doc(DbName, DDName, ViewName) ->
{ok, Doc} = couch_db:open_doc(Db, DDName, [?ADMIN_CTX]),
{Props} = couch_doc:to_json_obj(Doc, []),
Rev = couch_util:get_value(<<"_rev">>, Props),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"_rev">>, Rev},
- {<<"language">>, <<"javascript">>},
- {<<"views">>, {[
- {ViewName, {[
- {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>}
- ]}}
- ]}}
- ]}),
+ DDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DDName},
+ {<<"_rev">>, Rev},
+ {<<"language">>, <<"javascript">>},
+ {<<"views">>,
+ {[
+ {ViewName,
+ {[
+ {<<"map">>, <<"function(doc) { emit(doc.value, 1); }">>}
+ ]}}
+ ]}}
+ ]}
+ ),
{ok, NewRev} = couch_db:update_doc(Db, DDoc, [?ADMIN_CTX]),
couch_db:close(Db),
NewRev.
delete_design_doc(DbName, DDName, Rev) ->
{ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DDName},
- {<<"_rev">>, couch_doc:rev_to_str(Rev)},
- {<<"_deleted">>, true}
- ]}),
+ DDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DDName},
+ {<<"_rev">>, couch_doc:rev_to_str(Rev)},
+ {<<"_deleted">>, true}
+ ]}
+ ),
{ok, _} = couch_db:update_doc(Db, DDoc, [Rev]),
couch_db:close(Db).
@@ -499,11 +561,12 @@ query_view(DbName, DDoc, View) ->
query_view(DbName, DDoc, View, Stale) ->
{ok, Code, _Headers, Body} = test_request:get(
- db_url(DbName) ++ "/_design/" ++ DDoc ++ "/_view/" ++ View
- ++ case Stale of
- false -> [];
- _ -> "?stale=" ++ atom_to_list(Stale)
- end),
+ db_url(DbName) ++ "/_design/" ++ DDoc ++ "/_view/" ++ View ++
+ case Stale of
+ false -> [];
+ _ -> "?stale=" ++ atom_to_list(Stale)
+ end
+ ),
?assertEqual(200, Code),
{Props} = jiffy:decode(Body),
couch_util:get_value(<<"rows">>, Props, []).
@@ -512,7 +575,9 @@ check_rows_value(Rows, Value) ->
lists:foreach(
fun({Row}) ->
?assertEqual(Value, couch_util:get_value(<<"value">>, Row))
- end, Rows).
+ end,
+ Rows
+ ).
view_cleanup(DbName) ->
{ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
@@ -531,8 +596,12 @@ count_users(DbName) ->
count_index_files(DbName) ->
% call server to fetch the index files
RootDir = config:get("couchdb", "view_index_dir"),
- length(filelib:wildcard(RootDir ++ "/." ++
- binary_to_list(DbName) ++ "_design"++"/mrview/*")).
+ length(
+ filelib:wildcard(
+ RootDir ++ "/." ++
+ binary_to_list(DbName) ++ "_design" ++ "/mrview/*"
+ )
+ ).
has_doc(DocId1, Rows) ->
DocId = iolist_to_binary(DocId1),
@@ -542,10 +611,11 @@ backup_db_file(DbName) ->
{ok, Db} = couch_db:open_int(DbName, []),
try
SrcPath = couch_db:get_filepath(Db),
- Src = if
- is_list(SrcPath) -> SrcPath;
- true -> binary_to_list(SrcPath)
- end,
+ Src =
+ if
+ is_list(SrcPath) -> SrcPath;
+ true -> binary_to_list(SrcPath)
+ end,
ok = copy_tree(Src, Src ++ ".backup")
after
couch_db:close(Db)
@@ -559,17 +629,21 @@ restore_backup_db_file(DbName) ->
exit(DbPid, shutdown),
ok = copy_tree(Src ++ ".backup", Src),
- test_util:wait(fun() ->
- case couch_server:open(DbName, [{timeout, ?TIMEOUT}]) of
- {ok, WaitDb} ->
- case couch_db:get_pid(WaitDb) == DbPid of
- true -> wait;
- false -> ok
- end;
- Else ->
- Else
- end
- end, ?TIMEOUT, ?DELAY).
+ test_util:wait(
+ fun() ->
+ case couch_server:open(DbName, [{timeout, ?TIMEOUT}]) of
+ {ok, WaitDb} ->
+ case couch_db:get_pid(WaitDb) == DbPid of
+ true -> wait;
+ false -> ok
+ end;
+ Else ->
+ Else
+ end
+ end,
+ ?TIMEOUT,
+ ?DELAY
+ ).
compact_db(DbName) ->
{ok, Db} = couch_db:open_int(DbName, []),
@@ -578,20 +652,23 @@ compact_db(DbName) ->
wait_db_compact_done(DbName, ?WAIT_DELAY_COUNT).
wait_db_compact_done(_DbName, 0) ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "DB compaction failed to finish"}]});
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, "DB compaction failed to finish"}
+ ]}
+ );
wait_db_compact_done(DbName, N) ->
{ok, Db} = couch_db:open_int(DbName, []),
ok = couch_db:close(Db),
CompactorPid = couch_db:get_compactor_pid(Db),
case is_pid(CompactorPid) of
- false ->
- ok;
- true ->
- ok = timer:sleep(?DELAY),
- wait_db_compact_done(DbName, N - 1)
+ false ->
+ ok;
+ true ->
+ ok = timer:sleep(?DELAY),
+ wait_db_compact_done(DbName, N - 1)
end.
compact_view_group(DbName, DDocId) when is_list(DDocId) ->
@@ -601,13 +678,17 @@ compact_view_group(DbName, DDocId) when is_binary(DDocId) ->
wait_view_compact_done(DbName, DDocId, 10).
wait_view_compact_done(_DbName, _DDocId, 0) ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "DB compaction failed to finish"}]});
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, "DB compaction failed to finish"}
+ ]}
+ );
wait_view_compact_done(DbName, DDocId, N) ->
{ok, Code, _Headers, Body} = test_request:get(
- db_url(DbName) ++ "/" ++ ?b2l(DDocId) ++ "/_info"),
+ db_url(DbName) ++ "/" ++ ?b2l(DDocId) ++ "/_info"
+ ),
?assertEqual(200, Code),
{Info} = jiffy:decode(Body),
{IndexInfo} = couch_util:get_value(<<"view_index">>, Info),
@@ -628,13 +709,16 @@ read_header(File) ->
stop_indexer(StopFun, Pid, Line, Reason) ->
case test_util:stop_sync(Pid, StopFun) of
- timeout ->
- erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, Line},
- {reason, Reason}]});
- ok ->
- ok
+ timeout ->
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, Line},
+ {reason, Reason}
+ ]}
+ );
+ ok ->
+ ok
end.
wait_indexer(IndexerPid) ->
diff --git a/src/couch/test/eunit/global_changes_tests.erl b/src/couch/test/eunit/global_changes_tests.erl
index 4392aafac..92964bb74 100644
--- a/src/couch/test/eunit/global_changes_tests.erl
+++ b/src/couch/test/eunit/global_changes_tests.erl
@@ -35,7 +35,7 @@ http_create_db(Name) ->
{ok, Status, _, _} = test_request:put(db_url(Name), [?AUTH], ""),
true = lists:member(Status, [201, 202]),
ok.
-
+
http_delete_db(Name) ->
{ok, Status, _, _} = test_request:delete(db_url(Name), [?AUTH]),
true = lists:member(Status, [200, 202]),
@@ -75,7 +75,8 @@ check_response() ->
"Check response",
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_return_correct_response_on_create/1,
fun should_return_correct_response_on_update/1
@@ -105,9 +106,11 @@ should_return_correct_response_on_update({Host, DbName}) ->
create_doc(Host, DbName, Id) ->
Headers = [?AUTH],
Url = Host ++ "/" ++ escape(DbName) ++ "/" ++ escape(Id),
- Body = jiffy:encode({[
- {key, "value"}
- ]}),
+ Body = jiffy:encode(
+ {[
+ {key, "value"}
+ ]}
+ ),
{ok, Status, _Headers, _Body} = test_request:put(Url, Headers, Body),
?assert(Status =:= 201 orelse Status =:= 202),
timer:sleep(1000),
@@ -118,10 +121,12 @@ update_doc(Host, DbName, Id, Value) ->
Url = Host ++ "/" ++ escape(DbName) ++ "/" ++ escape(Id),
{ok, 200, _Headers0, BinBody} = test_request:get(Url, Headers),
[Rev] = decode_response(BinBody, [<<"_rev">>]),
- Body = jiffy:encode({[
- {key, Value},
- {'_rev', Rev}
- ]}),
+ Body = jiffy:encode(
+ {[
+ {key, Value},
+ {'_rev', Rev}
+ ]}
+ ),
{ok, Status, _Headers1, _Body} = test_request:put(Url, Headers, Body),
?assert(Status =:= 201 orelse Status =:= 202),
timer:sleep(1000),
@@ -145,7 +150,7 @@ decode_response(BinBody, ToDecode) ->
add_admin(User, Pass) ->
Hashed = couch_passwords:hash_admin_password(Pass),
- config:set("admins", User, ?b2l(Hashed), _Persist=false).
+ config:set("admins", User, ?b2l(Hashed), _Persist = false).
delete_admin(User) ->
config:delete("admins", User, false).
diff --git a/src/couch/test/eunit/json_stream_parse_tests.erl b/src/couch/test/eunit/json_stream_parse_tests.erl
index e690d7728..ab26be725 100644
--- a/src/couch/test/eunit/json_stream_parse_tests.erl
+++ b/src/couch/test/eunit/json_stream_parse_tests.erl
@@ -14,83 +14,88 @@
-include_lib("couch/include/couch_eunit.hrl").
--define(CASES,
- [
- {1, "1", "integer numeric literial"},
- {3.1416, "3.14160", "float numeric literal"}, % text representation may truncate, trail zeroes
- {-1, "-1", "negative integer numeric literal"},
- {-3.1416, "-3.14160", "negative float numeric literal"},
- {12.0e10, "1.20000e+11", "float literal in scientific notation"},
- {1.234E+10, "1.23400e+10", "another float literal in scientific notation"},
- {-1.234E-10, "-1.23400e-10", "negative float literal in scientific notation"},
- {10.0, "1.0e+01", "yet another float literal in scientific notation"},
- {123.456, "1.23456E+2", "yet another float literal in scientific notation"},
- {10.0, "1e1", "yet another float literal in scientific notation"},
- {<<"foo">>, "\"foo\"", "string literal"},
- {<<"foo", 5, "bar">>, "\"foo\\u0005bar\"", "string literal with \\u0005"},
- {<<"">>, "\"\"", "empty string literal"},
- {<<"\n\n\n">>, "\"\\n\\n\\n\"", "only new lines literal"},
- {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\"",
- "only white spaces string literal"},
- {null, "null", "null literal"},
- {true, "true", "true literal"},
- {false, "false", "false literal"},
- {<<"null">>, "\"null\"", "null string literal"},
- {<<"true">>, "\"true\"", "true string literal"},
- {<<"false">>, "\"false\"", "false string literal"},
- {{[]}, "{}", "empty object literal"},
- {{[{<<"foo">>, <<"bar">>}]}, "{\"foo\":\"bar\"}",
- "simple object literal"},
- {{[{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]},
- "{\"foo\":\"bar\",\"baz\":123}", "another simple object literal"},
- {[], "[]", "empty array literal"},
- {[[]], "[[]]", "empty array literal inside a single element array literal"},
- {[1, <<"foo">>], "[1,\"foo\"]", "simple non-empty array literal"},
- {[1199344435545.0, 1], "[1199344435545.0,1]",
- "another simple non-empty array literal"},
- {[false, true, 321, null], "[false, true, 321, null]", "array of literals"},
- {{[{<<"foo">>, [123]}]}, "{\"foo\":[123]}",
- "object literal with an array valued property"},
- {{[{<<"foo">>, {[{<<"bar">>, true}]}}]},
- "{\"foo\":{\"bar\":true}}", "nested object literal"},
- {{[{<<"foo">>, []}, {<<"bar">>, {[{<<"baz">>, true}]}},
- {<<"alice">>, <<"bob">>}]},
- "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}",
- "complex object literal"},
- {[-123, <<"foo">>, {[{<<"bar">>, []}]}, null],
- "[-123,\"foo\",{\"bar\":[]},null]",
- "complex array literal"}
- ]
-).
-
+-define(CASES, [
+ {1, "1", "integer numeric literial"},
+ % text representation may truncate, trail zeroes
+ {3.1416, "3.14160", "float numeric literal"},
+ {-1, "-1", "negative integer numeric literal"},
+ {-3.1416, "-3.14160", "negative float numeric literal"},
+ {12.0e10, "1.20000e+11", "float literal in scientific notation"},
+ {1.234E+10, "1.23400e+10", "another float literal in scientific notation"},
+ {-1.234E-10, "-1.23400e-10", "negative float literal in scientific notation"},
+ {10.0, "1.0e+01", "yet another float literal in scientific notation"},
+ {123.456, "1.23456E+2", "yet another float literal in scientific notation"},
+ {10.0, "1e1", "yet another float literal in scientific notation"},
+ {<<"foo">>, "\"foo\"", "string literal"},
+ {<<"foo", 5, "bar">>, "\"foo\\u0005bar\"", "string literal with \\u0005"},
+ {<<"">>, "\"\"", "empty string literal"},
+ {<<"\n\n\n">>, "\"\\n\\n\\n\"", "only new lines literal"},
+ {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\"", "only white spaces string literal"},
+ {null, "null", "null literal"},
+ {true, "true", "true literal"},
+ {false, "false", "false literal"},
+ {<<"null">>, "\"null\"", "null string literal"},
+ {<<"true">>, "\"true\"", "true string literal"},
+ {<<"false">>, "\"false\"", "false string literal"},
+ {{[]}, "{}", "empty object literal"},
+ {{[{<<"foo">>, <<"bar">>}]}, "{\"foo\":\"bar\"}", "simple object literal"},
+ {
+ {[{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]},
+ "{\"foo\":\"bar\",\"baz\":123}",
+ "another simple object literal"
+ },
+ {[], "[]", "empty array literal"},
+ {[[]], "[[]]", "empty array literal inside a single element array literal"},
+ {[1, <<"foo">>], "[1,\"foo\"]", "simple non-empty array literal"},
+ {[1199344435545.0, 1], "[1199344435545.0,1]", "another simple non-empty array literal"},
+ {[false, true, 321, null], "[false, true, 321, null]", "array of literals"},
+ {{[{<<"foo">>, [123]}]}, "{\"foo\":[123]}", "object literal with an array valued property"},
+ {{[{<<"foo">>, {[{<<"bar">>, true}]}}]}, "{\"foo\":{\"bar\":true}}", "nested object literal"},
+ {
+ {[
+ {<<"foo">>, []},
+ {<<"bar">>, {[{<<"baz">>, true}]}},
+ {<<"alice">>, <<"bob">>}
+ ]},
+ "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}",
+ "complex object literal"
+ },
+ {
+ [-123, <<"foo">>, {[{<<"bar">>, []}]}, null],
+ "[-123,\"foo\",{\"bar\":[]},null]",
+ "complex array literal"
+ }
+]).
raw_json_input_test_() ->
Tests = lists:map(
fun({EJson, JsonString, Desc}) ->
- {Desc,
- ?_assert(equiv(EJson, json_stream_parse:to_ejson(JsonString)))}
- end, ?CASES),
+ {Desc, ?_assert(equiv(EJson, json_stream_parse:to_ejson(JsonString)))}
+ end,
+ ?CASES
+ ),
{"Tests with raw JSON string as the input", Tests}.
one_byte_data_fun_test_() ->
Tests = lists:map(
fun({EJson, JsonString, Desc}) ->
DataFun = fun() -> single_byte_data_fun(JsonString) end,
- {Desc,
- ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))}
- end, ?CASES),
+ {Desc, ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))}
+ end,
+ ?CASES
+ ),
{"Tests with a 1 byte output data function as the input", Tests}.
test_multiple_bytes_data_fun_test_() ->
Tests = lists:map(
fun({EJson, JsonString, Desc}) ->
DataFun = fun() -> multiple_bytes_data_fun(JsonString) end,
- {Desc,
- ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))}
- end, ?CASES),
+ {Desc, ?_assert(equiv(EJson, json_stream_parse:to_ejson(DataFun)))}
+ end,
+ ?CASES
+ ),
{"Tests with a multiple bytes output data function as the input", Tests}.
-
%% Test for equivalence of Erlang terms.
%% Due to arbitrary order of construction, equivalent objects might
%% compare unequal as erlang terms, so we need to carefully recurse
@@ -120,7 +125,8 @@ equiv_object(Props1, Props2) ->
fun({{K1, V1}, {K2, V2}}) ->
equiv(K1, K2) andalso equiv(V1, V2)
end,
- Pairs).
+ Pairs
+ ).
%% Recursively compare tuple elements for equivalence.
equiv_list([], []) ->
@@ -147,5 +153,5 @@ split(L, N) ->
take(0, L, Acc) ->
{lists:reverse(Acc), L};
-take(N, [H|L], Acc) ->
+take(N, [H | L], Acc) ->
take(N - 1, L, [H | Acc]).
diff --git a/src/couch/test/eunit/test_web.erl b/src/couch/test/eunit/test_web.erl
index b1b3e65c9..8998dad52 100644
--- a/src/couch/test/eunit/test_web.erl
+++ b/src/couch/test/eunit/test_web.erl
@@ -73,18 +73,18 @@ terminate(_Reason, _State) ->
stop() ->
mochiweb_http:stop(?SERVER).
-
handle_call({check_request, Req}, _From, State) when is_function(State, 1) ->
- Resp2 = case (catch State(Req)) of
- {ok, Resp} ->
- {reply, {ok, Resp}, was_ok};
- {raw, Resp} ->
- {reply, {raw, Resp}, was_ok};
- {chunked, Resp} ->
- {reply, {chunked, Resp}, was_ok};
- Error ->
- {reply, {error, Error}, not_ok}
- end,
+ Resp2 =
+ case (catch State(Req)) of
+ {ok, Resp} ->
+ {reply, {ok, Resp}, was_ok};
+ {raw, Resp} ->
+ {reply, {raw, Resp}, was_ok};
+ {chunked, Resp} ->
+ {reply, {chunked, Resp}, was_ok};
+ Error ->
+ {reply, {error, Error}, not_ok}
+ end,
Req:cleanup(),
Resp2;
handle_call({check_request, _Req}, _From, _State) ->
diff --git a/src/couch_dist/src/couch_dist.erl b/src/couch_dist/src/couch_dist.erl
index 21afd0a91..9a6b26d07 100644
--- a/src/couch_dist/src/couch_dist.erl
+++ b/src/couch_dist/src/couch_dist.erl
@@ -12,77 +12,77 @@
-module(couch_dist).
--export([childspecs/0, listen/2, accept/1, accept_connection/5,
- setup/5, close/1, select/1, is_node_name/1]).
+-export([
+ childspecs/0,
+ listen/2,
+ accept/1,
+ accept_connection/5,
+ setup/5,
+ close/1,
+ select/1,
+ is_node_name/1
+]).
% Just for tests
-export([no_tls/1, get_init_args/0]).
-
childspecs() ->
- {ok, [{ssl_dist_sup, {ssl_dist_sup, start_link, []},
- permanent, infinity, supervisor, [ssl_dist_sup]}]}.
-
+ {ok, [
+ {ssl_dist_sup, {ssl_dist_sup, start_link, []}, permanent, infinity, supervisor, [
+ ssl_dist_sup
+ ]}
+ ]}.
listen(Name, Host) ->
- NodeName = case is_atom(Name) of
- true -> atom_to_list(Name);
- false -> Name
- end,
+ NodeName =
+ case is_atom(Name) of
+ true -> atom_to_list(Name);
+ false -> Name
+ end,
Mod = inet_dist(NodeName ++ "@" ++ Host),
Mod:listen(NodeName, Host).
-
accept(Listen) ->
Mod = inet_dist(node()),
Mod:accept(Listen).
-
accept_connection(AcceptPid, DistCtrl, MyNode, Allowed, SetupTime) ->
Mod = inet_dist(MyNode),
Mod:accept_connection(AcceptPid, DistCtrl, MyNode, Allowed, SetupTime).
-
setup(Node, Type, MyNode, LongOrShortNames, SetupTime) ->
Mod = inet_dist(Node),
Mod:setup(Node, Type, MyNode, LongOrShortNames, SetupTime).
-
close(Socket) ->
inet_tls_dist:close(Socket).
-
select(Node) ->
inet_tls_dist:select(Node).
-
is_node_name(Node) ->
inet_tls_dist:is_node_name(Node).
-
get_init_args() ->
init:get_argument(couch_dist).
-
inet_dist(Node) ->
case no_tls(Node) of
true -> inet_tcp_dist;
false -> inet_tls_dist
end.
-
no_tls(NodeName) when is_atom(NodeName) ->
no_tls(atom_to_list(NodeName));
-
no_tls(NodeName) when is_list(NodeName) ->
case ?MODULE:get_init_args() of
{ok, Args} ->
GlobPatterns = [V || [K, V] <- Args, K == "no_tls"],
lists:any(fun(P) -> match(NodeName, P) end, GlobPatterns);
- error -> false
+ error ->
+ false
end.
-
match(_NodeName, "true") ->
true;
match(_NodeName, "false") ->
@@ -95,11 +95,9 @@ match(NodeName, Pattern) ->
end,
re:run(NodeName, RE) /= nomatch.
-
to_re(GlobPattern) ->
re:compile([$^, lists:flatmap(fun glob_re/1, GlobPattern), $$]).
-
glob_re($*) ->
".*";
glob_re($?) ->
diff --git a/src/couch_dist/test/eunit/couch_dist_tests.erl b/src/couch_dist/test/eunit/couch_dist_tests.erl
index 55e9af376..abe5ff572 100644
--- a/src/couch_dist/test/eunit/couch_dist_tests.erl
+++ b/src/couch_dist/test/eunit/couch_dist_tests.erl
@@ -14,7 +14,6 @@
-include_lib("eunit/include/eunit.hrl").
-
no_tls_test_() ->
{
"test couch_dist no_tls/1",
@@ -33,19 +32,17 @@ no_tls_test_() ->
}
}.
-
mock_get_init_args(Reply) ->
meck:expect(couch_dist, get_init_args, fun() -> Reply end).
-
no_tls_test_with_true() ->
?_test(
begin
mock_get_init_args({ok, [["no_tls", "true"]]}),
?assert(couch_dist:no_tls('abc123')),
?assert(couch_dist:no_tls("123abd"))
- end).
-
+ end
+ ).
no_tls_test_with_false() ->
?_test(
@@ -53,8 +50,8 @@ no_tls_test_with_false() ->
mock_get_init_args({ok, [["no_tls", "false"]]}),
?assertNot(couch_dist:no_tls('abc123')),
?assertNot(couch_dist:no_tls("123abc"))
- end).
-
+ end
+ ).
no_tls_test_with_character() ->
?_test(
@@ -62,8 +59,8 @@ no_tls_test_with_character() ->
mock_get_init_args({ok, [["no_tls", "node@127.0.0.1"]]}),
?assert(couch_dist:no_tls('node@127.0.0.1')),
?assert(couch_dist:no_tls("node@127.0.0.1"))
- end).
-
+ end
+ ).
no_tls_test_with_wildcard() ->
?_test(
@@ -74,8 +71,8 @@ no_tls_test_with_wildcard() ->
?assert(couch_dist:no_tls("a2")),
?assertNot(couch_dist:no_tls('a')),
?assertNot(couch_dist:no_tls("2"))
- end).
-
+ end
+ ).
no_tls_test_with_question_mark() ->
?_test(
@@ -85,8 +82,8 @@ no_tls_test_with_question_mark() ->
?assert(couch_dist:no_tls("ab2")),
?assertNot(couch_dist:no_tls('a2')),
?assertNot(couch_dist:no_tls("a"))
- end).
-
+ end
+ ).
no_tls_test_with_error() ->
?_test(
@@ -94,4 +91,5 @@ no_tls_test_with_error() ->
mock_get_init_args(error),
?assertNot(couch_dist:no_tls('abc123')),
?assertNot(couch_dist:no_tls("123abc"))
- end).
+ end
+ ).
diff --git a/src/couch_epi/src/couch_epi.erl b/src/couch_epi/src/couch_epi.erl
index 0e5c233ab..c708e5a0b 100644
--- a/src/couch_epi/src/couch_epi.erl
+++ b/src/couch_epi/src/couch_epi.erl
@@ -17,9 +17,14 @@
%% queries and introspection
-export([
- dump/1, get/2, get_value/3,
- by_key/1, by_key/2, by_source/1, by_source/2,
- keys/1, subscribers/1]).
+ dump/1,
+ get/2,
+ get_value/3,
+ by_key/1, by_key/2,
+ by_source/1, by_source/2,
+ keys/1,
+ subscribers/1
+]).
%% apply
-export([apply/5, decide/5]).
@@ -51,19 +56,18 @@
-opaque handle() :: module().
--type apply_opt()
- :: ignore_errors
- | concurrent
- | pipe.
+-type apply_opt() ::
+ ignore_errors
+ | concurrent
+ | pipe.
-type apply_opts() :: [apply_opt()].
--type data_spec()
- :: {static_module, module()}
- | {callback_module, module()}
- | {priv_file, FileName :: string()}
- | {file, FileName :: string()}.
-
+-type data_spec() ::
+ {static_module, module()}
+ | {callback_module, module()}
+ | {priv_file, FileName :: string()}
+ | {file, FileName :: string()}.
%% ------------------------------------------------------------------
%% API Function Definitions
@@ -87,93 +91,109 @@ get(Handle, Key) when Handle /= undefined ->
get_value(Handle, Subscriber, Key) when Handle /= undefined ->
couch_epi_data_gen:get(Handle, Subscriber, Key).
-
-spec by_key(Handle :: handle()) ->
[{Key :: key(), [{Source :: app(), properties()}]}].
by_key(Handle) when Handle /= undefined ->
couch_epi_data_gen:by_key(Handle).
-
-spec by_key(Handle :: handle(), Key :: key()) ->
[{Source :: app(), properties()}].
by_key(Handle, Key) when Handle /= undefined ->
couch_epi_data_gen:by_key(Handle, Key).
-
-spec by_source(Handle :: handle()) ->
[{Source :: app(), [{Key :: key(), properties()}]}].
by_source(Handle) when Handle /= undefined ->
couch_epi_data_gen:by_source(Handle).
-
-spec by_source(Handle :: handle(), Subscriber :: app()) ->
[{Key :: key(), properties()}].
by_source(Handle, Subscriber) when Handle /= undefined ->
couch_epi_data_gen:by_source(Handle, Subscriber).
-
-spec keys(Handle :: handle()) ->
[Key :: key()].
keys(Handle) when Handle /= undefined ->
couch_epi_data_gen:keys(Handle).
-
-spec subscribers(Handle :: handle()) ->
[Subscriber :: app()].
subscribers(Handle) when Handle /= undefined ->
couch_epi_data_gen:subscribers(Handle).
--spec apply(Handle :: handle(), ServiceId :: atom(), Function :: atom(),
- Args :: [term()], Opts :: apply_opts()) -> [any()].
+-spec apply(
+ Handle :: handle(),
+ ServiceId :: atom(),
+ Function :: atom(),
+ Args :: [term()],
+ Opts :: apply_opts()
+) -> [any()].
apply(Handle, ServiceId, Function, Args, Opts) when Handle /= undefined ->
couch_epi_functions_gen:apply(Handle, ServiceId, Function, Args, Opts).
--spec get_handle({ServiceId :: service_id(), Key :: key()}) -> handle();
- (ServiceId :: service_id()) -> handle().
+-spec get_handle
+ ({ServiceId :: service_id(), Key :: key()}) -> handle();
+ (ServiceId :: service_id()) -> handle().
get_handle({_ServiceId, _Key} = EPIKey) ->
couch_epi_data_gen:get_handle(EPIKey);
get_handle(ServiceId) when is_atom(ServiceId) ->
couch_epi_functions_gen:get_handle(ServiceId).
--spec any(Handle :: handle(), ServiceId :: atom(), Function :: atom(),
- Args :: [term()], Opts :: apply_opts()) -> boolean().
+-spec any(
+ Handle :: handle(),
+ ServiceId :: atom(),
+ Function :: atom(),
+ Args :: [term()],
+ Opts :: apply_opts()
+) -> boolean().
any(Handle, ServiceId, Function, Args, Opts) when Handle /= undefined ->
Replies = apply(Handle, ServiceId, Function, Args, Opts),
[] /= [Reply || Reply <- Replies, Reply == true].
--spec all(Handle :: handle(), ServiceId :: atom(), Function :: atom(),
- Args :: [term()], Opts :: apply_opts()) -> boolean().
+-spec all(
+ Handle :: handle(),
+ ServiceId :: atom(),
+ Function :: atom(),
+ Args :: [term()],
+ Opts :: apply_opts()
+) -> boolean().
all(Handle, ServiceId, Function, Args, Opts) when Handle /= undefined ->
Replies = apply(Handle, ServiceId, Function, Args, Opts),
[] == [Reply || Reply <- Replies, Reply == false].
-spec is_configured(
- Handle :: handle(), Function :: atom(), Arity :: pos_integer()) -> boolean().
+ Handle :: handle(), Function :: atom(), Arity :: pos_integer()
+) -> boolean().
is_configured(Handle, Function, Arity) when Handle /= undefined ->
[] /= couch_epi_functions_gen:modules(Handle, Function, Arity).
-
-spec register_service(
- PluginId :: plugin_id(), Children :: [supervisor:child_spec()]) ->
- [supervisor:child_spec()].
+ PluginId :: plugin_id(), Children :: [supervisor:child_spec()]
+) ->
+ [supervisor:child_spec()].
register_service(Plugin, Children) ->
couch_epi_sup:plugin_childspecs(Plugin, Children).
--spec decide(Handle :: handle(), ServiceId :: atom(), Function :: atom(),
- Args :: [term()], Opts :: apply_opts()) ->
- no_decision | {decided, term()}.
+-spec decide(
+ Handle :: handle(),
+ ServiceId :: atom(),
+ Function :: atom(),
+ Args :: [term()],
+ Opts :: apply_opts()
+) ->
+ no_decision | {decided, term()}.
decide(Handle, ServiceId, Function, Args, Opts) when Handle /= undefined ->
couch_epi_functions_gen:decide(Handle, ServiceId, Function, Args, Opts).
diff --git a/src/couch_epi/src/couch_epi_codechange_monitor.erl b/src/couch_epi/src/couch_epi_codechange_monitor.erl
index 738480448..214aea14d 100644
--- a/src/couch_epi/src/couch_epi_codechange_monitor.erl
+++ b/src/couch_epi/src/couch_epi_codechange_monitor.erl
@@ -24,8 +24,14 @@
%% gen_server Function Exports
%% ------------------------------------------------------------------
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
%% ------------------------------------------------------------------
%% API Function Definitions
diff --git a/src/couch_epi/src/couch_epi_codegen.erl b/src/couch_epi/src/couch_epi_codegen.erl
index 89b82a1f9..212a4e31a 100644
--- a/src/couch_epi/src/couch_epi_codegen.erl
+++ b/src/couch_epi/src/couch_epi_codegen.erl
@@ -25,21 +25,29 @@ generate(ModuleName, Forms0) ->
scan(String) ->
Exprs = [E || E <- re:split(String, "\\.\n", [{return, list}, trim])],
- FormsTokens = lists:foldl(fun(Expr, Acc) ->
- case erl_scan:string(Expr) of
- {ok, [], _} ->
- Acc;
- {ok, Tokens, _} ->
- [{Expr, fixup_terminator(Tokens)} | Acc]
- end
- end, [], Exprs),
+ FormsTokens = lists:foldl(
+ fun(Expr, Acc) ->
+ case erl_scan:string(Expr) of
+ {ok, [], _} ->
+ Acc;
+ {ok, Tokens, _} ->
+ [{Expr, fixup_terminator(Tokens)} | Acc]
+ end
+ end,
+ [],
+ Exprs
+ ),
lists:reverse(FormsTokens).
parse(FormsTokens) ->
- ASTForms = lists:foldl(fun(Tokens, Forms) ->
- {ok, AST} = parse_form(Tokens),
- [AST | Forms]
- end, [], FormsTokens),
+ ASTForms = lists:foldl(
+ fun(Tokens, Forms) ->
+ {ok, AST} = parse_form(Tokens),
+ [AST | Forms]
+ end,
+ [],
+ FormsTokens
+ ),
lists:reverse(ASTForms).
format_term(Data) ->
@@ -49,11 +57,11 @@ parse_form(Tokens) ->
{Expr, Forms} = split_expression(Tokens),
case erl_parse:parse_form(Forms) of
{ok, AST} -> {ok, AST};
- {error,{_,_, Reason}} ->
- {error, Expr, Reason}
+ {error, {_, _, Reason}} -> {error, Expr, Reason}
end.
-split_expression({Expr, Forms}) -> {Expr, Forms};
+split_expression({Expr, Forms}) ->
+ {Expr, Forms};
split_expression(Tokens) ->
{Exprs, Forms} = lists:unzip(Tokens),
{string:join(Exprs, "\n"), lists:append(Forms)}.
@@ -63,14 +71,15 @@ function(Clauses) ->
fixup_terminator(Tokens) ->
case lists:last(Tokens) of
- {dot, _} -> Tokens;
- {';', _} -> Tokens;
+ {dot, _} ->
+ Tokens;
+ {';', _} ->
+ Tokens;
Token ->
Line = line(Token),
Tokens ++ [{dot, Line}]
end.
-
-ifdef(pre18).
line(Token) ->
diff --git a/src/couch_epi/src/couch_epi_data.erl b/src/couch_epi/src/couch_epi_data.erl
index 2bb09f6cf..ec554a40e 100644
--- a/src/couch_epi/src/couch_epi_data.erl
+++ b/src/couch_epi/src/couch_epi_data.erl
@@ -60,10 +60,13 @@ minimal_interval({_App, #couch_epi_spec{options = Options}}, Min) ->
end.
locate_sources(Specs) ->
- lists:map(fun({ProviderApp, #couch_epi_spec{value = Src}}) ->
- {ok, Locator} = locate(ProviderApp, Src),
- {ProviderApp, Locator}
- end, Specs).
+ lists:map(
+ fun({ProviderApp, #couch_epi_spec{value = Src}}) ->
+ {ok, Locator} = locate(ProviderApp, Src),
+ {ProviderApp, Locator}
+ end,
+ Specs
+ ).
locate(App, {priv_file, FileName}) ->
case priv_path(App, FileName) of
diff --git a/src/couch_epi/src/couch_epi_data_gen.erl b/src/couch_epi/src/couch_epi_data_gen.erl
index 4a283450d..65d689fbf 100644
--- a/src/couch_epi/src/couch_epi_data_gen.erl
+++ b/src/couch_epi/src/couch_epi_data_gen.erl
@@ -61,57 +61,57 @@ get_handle({Service, Key}) ->
%% ------------------------------------------------------------------
preamble() ->
- "
- -export([by_key/0, by_key/1]).
- -export([by_source/0, by_source/1]).
- -export([all/0, all/1, get/2]).
- -export([version/0, version/1]).
- -export([keys/0, subscribers/0]).
- -compile({no_auto_import,[get/0, get/1]}).
- all() ->
- lists:foldl(fun({Key, Defs}, Acc) ->
- [D || {_Subscriber, D} <- Defs ] ++ Acc
- end, [], by_key()).
-
- all(Key) ->
- lists:foldl(fun({Subscriber, Data}, Acc) ->
- [Data | Acc]
- end, [], by_key(Key)).
-
- by_key() ->
- [{Key, by_key(Key)} || Key <- keys()].
-
- by_key(Key) ->
- lists:foldl(
- fun(Source, Acc) -> append_if_defined(Source, get(Source, Key), Acc)
- end, [], subscribers()).
-
-
- by_source() ->
- [{Source, by_source(Source)} || Source <- subscribers()].
-
- by_source(Source) ->
- lists:foldl(
- fun(Key, Acc) -> append_if_defined(Key, get(Source, Key), Acc)
- end, [], keys()).
-
- version() ->
- [{Subscriber, version(Subscriber)} || Subscriber <- subscribers()].
-
- %% Helper functions
- append_if_defined(Type, undefined, Acc) -> Acc;
- append_if_defined(Type, Value, Acc) -> [{Type, Value} | Acc].
- "
- %% In addition to preamble we also generate following methods
- %% get(Source1, Key1) -> Data;
- %% get(Source, Key) -> undefined.
-
- %% version(Source1) -> "HASH";
- %% version(Source) -> {error, {unknown, Source}}.
-
- %% keys() -> [].
- %% subscribers() -> [].
- .
+ "\n"
+ " -export([by_key/0, by_key/1]).\n"
+ " -export([by_source/0, by_source/1]).\n"
+ " -export([all/0, all/1, get/2]).\n"
+ " -export([version/0, version/1]).\n"
+ " -export([keys/0, subscribers/0]).\n"
+ " -compile({no_auto_import,[get/0, get/1]}).\n"
+ " all() ->\n"
+ " lists:foldl(fun({Key, Defs}, Acc) ->\n"
+ " [D || {_Subscriber, D} <- Defs ] ++ Acc\n"
+ " end, [], by_key()).\n"
+ "\n"
+ " all(Key) ->\n"
+ " lists:foldl(fun({Subscriber, Data}, Acc) ->\n"
+ " [Data | Acc]\n"
+ " end, [], by_key(Key)).\n"
+ "\n"
+ " by_key() ->\n"
+ " [{Key, by_key(Key)} || Key <- keys()].\n"
+ "\n"
+ " by_key(Key) ->\n"
+ " lists:foldl(\n"
+ " fun(Source, Acc) -> append_if_defined(Source, get(Source, Key), Acc)\n"
+ " end, [], subscribers()).\n"
+ "\n"
+ "\n"
+ " by_source() ->\n"
+ " [{Source, by_source(Source)} || Source <- subscribers()].\n"
+ "\n"
+ " by_source(Source) ->\n"
+ " lists:foldl(\n"
+ " fun(Key, Acc) -> append_if_defined(Key, get(Source, Key), Acc)\n"
+ " end, [], keys()).\n"
+ "\n"
+ " version() ->\n"
+ " [{Subscriber, version(Subscriber)} || Subscriber <- subscribers()].\n"
+ "\n"
+ " %% Helper functions\n"
+ " append_if_defined(Type, undefined, Acc) -> Acc;\n"
+ " append_if_defined(Type, Value, Acc) -> [{Type, Value} | Acc].\n"
+ " "
+%% In addition to preamble we also generate following methods
+%% get(Source1, Key1) -> Data;
+%% get(Source, Key) -> undefined.
+
+%% version(Source1) -> "HASH";
+%% version(Source) -> {error, {unknown, Source}}.
+
+%% keys() -> [].
+%% subscribers() -> [].
+.
generate(Handle, Defs) ->
GetFunForms = couch_epi_codegen:function(getters(Defs)),
@@ -119,9 +119,10 @@ generate(Handle, Defs) ->
KeysForms = keys_method(Defs),
SubscribersForms = subscribers_method(Defs),
- Forms = couch_epi_codegen:scan(preamble())
- ++ GetFunForms ++ VersionFunForms
- ++ KeysForms ++ SubscribersForms,
+ Forms =
+ couch_epi_codegen:scan(preamble()) ++
+ GetFunForms ++ VersionFunForms ++
+ KeysForms ++ SubscribersForms,
couch_epi_codegen:generate(Handle, Forms).
@@ -135,22 +136,30 @@ subscribers_method(Defs) ->
getters(Defs) ->
DefaultClause = "get(_S, _K) -> undefined.",
- fold_defs(Defs, [couch_epi_codegen:scan(DefaultClause)],
+ fold_defs(
+ Defs,
+ [couch_epi_codegen:scan(DefaultClause)],
fun({Source, Key, Data}, Acc) ->
getter(Source, Key, Data) ++ Acc
- end).
+ end
+ ).
version_method(Defs) ->
DefaultClause = "version(S) -> {error, {unknown, S}}.",
- lists:foldl(fun({Source, Data}, Clauses) ->
- version(Source, Data) ++ Clauses
- end, [couch_epi_codegen:scan(DefaultClause)], Defs).
+ lists:foldl(
+ fun({Source, Data}, Clauses) ->
+ version(Source, Data) ++ Clauses
+ end,
+ [couch_epi_codegen:scan(DefaultClause)],
+ Defs
+ ).
getter(Source, Key, Data) ->
D = couch_epi_codegen:format_term(Data),
Src = atom_to_list(Source),
couch_epi_codegen:scan(
- "get(" ++ Src ++ ", " ++ format_key(Key) ++ ") ->" ++ D ++ ";").
+ "get(" ++ Src ++ ", " ++ format_key(Key) ++ ") ->" ++ D ++ ";"
+ ).
version(Source, Data) ->
Src = atom_to_list(Source),
@@ -183,7 +192,6 @@ format_key(Key) ->
module_name({Service, Key}) when is_list(Service) andalso is_list(Key) ->
list_to_atom(string:join([atom_to_list(?MODULE), Service, Key], "_")).
-
get_current_definitions(Handle) ->
if_exists(Handle, by_source, 0, [], fun() ->
Handle:by_source()
@@ -205,11 +213,19 @@ defined_subscribers(Defs) ->
[Source || {Source, _} <- Defs].
fold_defs(Defs, Acc, Fun) ->
- lists:foldr(fun({Source, SourceData}, Clauses) ->
- lists:foldr(fun({Key, Data}, InAcc) ->
- Fun({Source, Key, Data}, InAcc)
- end, [], SourceData) ++ Clauses
- end, Acc, Defs).
+ lists:foldr(
+ fun({Source, SourceData}, Clauses) ->
+ lists:foldr(
+ fun({Key, Data}, InAcc) ->
+ Fun({Source, Key, Data}, InAcc)
+ end,
+ [],
+ SourceData
+ ) ++ Clauses
+ end,
+ Acc,
+ Defs
+ ).
%% ------------------------------------------------------------------
%% Tests
@@ -243,15 +259,20 @@ basic_test() ->
?assertEqual("3KZ4EG4WBF4J683W8GSDDPYR3", Module:version(app1)),
?assertEqual("4EFUU47W9XDNMV9RMZSSJQU3Y", Module:version(app2)),
- ?assertEqual({error,{unknown,bad}}, Module:version(bad)),
+ ?assertEqual({error, {unknown, bad}}, Module:version(bad)),
?assertEqual(
- [{app1,"3KZ4EG4WBF4J683W8GSDDPYR3"},
- {app2,"4EFUU47W9XDNMV9RMZSSJQU3Y"}], lists:usort(Module:version())),
+ [
+ {app1, "3KZ4EG4WBF4J683W8GSDDPYR3"},
+ {app2, "4EFUU47W9XDNMV9RMZSSJQU3Y"}
+ ],
+ lists:usort(Module:version())
+ ),
?assertEqual(
- [{app1,[some_nice_data]},{app2,"other data"}],
- lists:usort(Module:by_key(foo))),
+ [{app1, [some_nice_data]}, {app2, "other data"}],
+ lists:usort(Module:by_key(foo))
+ ),
?assertEqual([], lists:usort(Module:by_key(bad))),
@@ -260,8 +281,8 @@ basic_test() ->
{bar, [{app2, {"even more data"}}]},
{foo, [{app2, "other data"}, {app1, [some_nice_data]}]}
],
- lists:usort(Module:by_key())),
-
+ lists:usort(Module:by_key())
+ ),
?assertEqual(Defs1, lists:usort(Module:by_source(app1))),
?assertEqual(Defs2, lists:usort(Module:by_source(app2))),
@@ -273,10 +294,12 @@ basic_test() ->
{app1, [{foo, [some_nice_data]}]},
{app2, [{foo, "other data"}, {bar, {"even more data"}}]}
],
- lists:usort(Module:by_source())),
+ lists:usort(Module:by_source())
+ ),
?assertEqual(
- lists:usort([Data1, Data2, Data3]), lists:usort(Module:all())),
+ lists:usort([Data1, Data2, Data3]), lists:usort(Module:all())
+ ),
?assertEqual(lists:usort([Data1, Data2]), lists:usort(Module:all(foo))),
?assertEqual([], lists:usort(Module:all(bad))),
ok.
diff --git a/src/couch_epi/src/couch_epi_functions.erl b/src/couch_epi/src/couch_epi_functions.erl
index ac9373928..1c5fd3403 100644
--- a/src/couch_epi/src/couch_epi_functions.erl
+++ b/src/couch_epi/src/couch_epi_functions.erl
@@ -43,7 +43,11 @@ definitions(Modules) ->
[{M, M:module_info(exports) -- Blacklist} || M <- Modules].
group(KV) ->
- Dict = lists:foldr(fun({K,V}, D) ->
- dict:append_list(K, V, D)
- end, dict:new(), KV),
+ Dict = lists:foldr(
+ fun({K, V}, D) ->
+ dict:append_list(K, V, D)
+ end,
+ dict:new(),
+ KV
+ ),
[{K, lists:reverse(V)} || {K, V} <- dict:to_list(Dict)].
diff --git a/src/couch_epi/src/couch_epi_functions_gen.erl b/src/couch_epi/src/couch_epi_functions_gen.erl
index 7408593b8..d7364c044 100644
--- a/src/couch_epi/src/couch_epi_functions_gen.erl
+++ b/src/couch_epi/src/couch_epi_functions_gen.erl
@@ -45,20 +45,30 @@ get_handle(ServiceId) ->
apply(ServiceId, Function, Args, Opts) when is_atom(ServiceId) ->
apply(get_handle(ServiceId), ServiceId, Function, Args, Opts).
--spec apply(Handle :: atom(), ServiceId :: atom(), Function :: atom(),
- Args :: [term()], Opts :: couch_epi:apply_opts()) -> [any()].
+-spec apply(
+ Handle :: atom(),
+ ServiceId :: atom(),
+ Function :: atom(),
+ Args :: [term()],
+ Opts :: couch_epi:apply_opts()
+) -> [any()].
apply(Handle, _ServiceId, Function, Args, Opts) ->
DispatchOpts = parse_opts(Opts),
Modules = providers(Handle, Function, length(Args), DispatchOpts),
dispatch(Handle, Modules, Function, Args, DispatchOpts).
--spec decide(Handle :: atom(), ServiceId :: atom(), Function :: atom(),
- Args :: [term()], Opts :: couch_epi:apply_opts()) ->
- no_decision | {decided, term()}.
+-spec decide(
+ Handle :: atom(),
+ ServiceId :: atom(),
+ Function :: atom(),
+ Args :: [term()],
+ Opts :: couch_epi:apply_opts()
+) ->
+ no_decision | {decided, term()}.
decide(Handle, _ServiceId, Function, Args, Opts) ->
- DispatchOpts = parse_opts([interruptible|Opts]),
+ DispatchOpts = parse_opts([interruptible | Opts]),
Modules = providers(Handle, Function, length(Args), DispatchOpts),
dispatch(Handle, Modules, Function, Args, DispatchOpts).
@@ -67,33 +77,33 @@ decide(Handle, _ServiceId, Function, Args, Opts) ->
%% ------------------------------------------------------------------
preamble() ->
- "
- -export([version/0, version/1]).
- -export([providers/0, providers/2]).
- -export([definitions/0, definitions/1]).
- -export([dispatch/3]).
- -export([callbacks/2]).
-
- version() ->
- [{Provider, version(Provider)} || Provider <- providers()].
-
- definitions() ->
- [{Provider, definitions(Provider)} || Provider <- providers()].
-
- callbacks(Provider, Function) ->
- [].
-
- "
- %% In addition to preamble we also generate following methods
- %% dispatch(Module, Function, [A1, A2]) -> Module:Function(A1, A2);
-
- %% version(Source1) -> "HASH";
- %% version(Source) -> {error, {unknown, Source}}.
-
- %% providers() -> [].
- %% providers(Function, Arity) -> [].
- %% definitions(Provider) -> [{Module, [{Fun, Arity}]}].
- .
+ "\n"
+ " -export([version/0, version/1]).\n"
+ " -export([providers/0, providers/2]).\n"
+ " -export([definitions/0, definitions/1]).\n"
+ " -export([dispatch/3]).\n"
+ " -export([callbacks/2]).\n"
+ "\n"
+ " version() ->\n"
+ " [{Provider, version(Provider)} || Provider <- providers()].\n"
+ "\n"
+ " definitions() ->\n"
+ " [{Provider, definitions(Provider)} || Provider <- providers()].\n"
+ "\n"
+ " callbacks(Provider, Function) ->\n"
+ " [].\n"
+ "\n"
+ " "
+%% In addition to preamble we also generate following methods
+%% dispatch(Module, Function, [A1, A2]) -> Module:Function(A1, A2);
+
+%% version(Source1) -> "HASH";
+%% version(Source) -> {error, {unknown, Source}}.
+
+%% providers() -> [].
+%% providers(Function, Arity) -> [].
+%% definitions(Provider) -> [{Module, [{Fun, Arity}]}].
+.
generate(Handle, Defs) ->
DispatchFunForms = couch_epi_codegen:function(dispatchers(Defs)),
@@ -103,10 +113,11 @@ generate(Handle, Defs) ->
ProvidersForms = couch_epi_codegen:function(providers_method(Defs)),
DefinitionsForms = couch_epi_codegen:function(definitions_method(Defs)),
- Forms = couch_epi_codegen:scan(preamble())
- ++ DispatchFunForms ++ VersionFunForms
- ++ ProvidersForms ++ AllProvidersForms
- ++ DefinitionsForms,
+ Forms =
+ couch_epi_codegen:scan(preamble()) ++
+ DispatchFunForms ++ VersionFunForms ++
+ ProvidersForms ++ AllProvidersForms ++
+ DefinitionsForms,
couch_epi_codegen:generate(Handle, Forms).
@@ -117,9 +128,13 @@ all_providers_method(Defs) ->
providers_method(Defs) ->
Providers = providers_by_function(Defs),
DefaultClause = "providers(_, _) -> [].",
- lists:foldl(fun({{Fun, Arity}, Modules}, Clauses) ->
- providers(Fun, Arity, Modules) ++ Clauses
- end, [couch_epi_codegen:scan(DefaultClause)], Providers).
+ lists:foldl(
+ fun({{Fun, Arity}, Modules}, Clauses) ->
+ providers(Fun, Arity, Modules) ++ Clauses
+ end,
+ [couch_epi_codegen:scan(DefaultClause)],
+ Providers
+ ).
providers(Function, Arity, Modules) ->
ArityStr = integer_to_list(Arity),
@@ -127,26 +142,38 @@ providers(Function, Arity, Modules) ->
Fun = atom_to_list(Function),
%% providers(Function, Arity) -> [Module];
couch_epi_codegen:scan(
- "providers(" ++ Fun ++ "," ++ ArityStr ++ ") ->" ++ Mods ++ ";").
+ "providers(" ++ Fun ++ "," ++ ArityStr ++ ") ->" ++ Mods ++ ";"
+ ).
dispatchers(Defs) ->
DefaultClause = "dispatch(_Module, _Fun, _Args) -> ok.",
- fold_defs(Defs, [couch_epi_codegen:scan(DefaultClause)],
+ fold_defs(
+ Defs,
+ [couch_epi_codegen:scan(DefaultClause)],
fun({_Source, Module, Function, Arity}, Acc) ->
dispatcher(Module, Function, Arity) ++ Acc
- end).
+ end
+ ).
version_method(Defs) ->
DefaultClause = "version(S) -> {error, {unknown, S}}.",
- lists:foldl(fun({Source, SrcDefs}, Clauses) ->
- version(Source, SrcDefs) ++ Clauses
- end, [couch_epi_codegen:scan(DefaultClause)], Defs).
+ lists:foldl(
+ fun({Source, SrcDefs}, Clauses) ->
+ version(Source, SrcDefs) ++ Clauses
+ end,
+ [couch_epi_codegen:scan(DefaultClause)],
+ Defs
+ ).
definitions_method(Defs) ->
DefaultClause = "definitions(S) -> {error, {unknown, S}}.",
- lists:foldl(fun({Source, SrcDefs}, Clauses) ->
- definition(Source, SrcDefs) ++ Clauses
- end, [couch_epi_codegen:scan(DefaultClause)], Defs).
+ lists:foldl(
+ fun({Source, SrcDefs}, Clauses) ->
+ definition(Source, SrcDefs) ++ Clauses
+ end,
+ [couch_epi_codegen:scan(DefaultClause)],
+ Defs
+ ).
definition(Source, Defs) ->
Src = atom_to_list(Source),
@@ -159,27 +186,28 @@ dispatcher(Module, Function, 0) ->
%% dispatch(Module, Function, []) -> Module:Function();
couch_epi_codegen:scan(
- "dispatch(" ++ M ++ "," ++ Fun ++ ", []) ->"
- ++ M ++ ":" ++ Fun ++ "();");
+ "dispatch(" ++ M ++ "," ++ Fun ++ ", []) ->" ++
+ M ++ ":" ++ Fun ++ "();"
+ );
dispatcher(Module, Function, Arity) ->
Args = args_string(Arity),
M = atom_to_list(Module),
Fun = atom_to_list(Function),
%% dispatch(Module, Function, [A1, A2]) -> Module:Function(A1, A2);
couch_epi_codegen:scan(
- "dispatch(" ++ M ++ "," ++ Fun ++ ", [" ++ Args ++ "]) ->"
- ++ M ++ ":" ++ Fun ++ "(" ++ Args ++ ");").
+ "dispatch(" ++ M ++ "," ++ Fun ++ ", [" ++ Args ++ "]) ->" ++
+ M ++ ":" ++ Fun ++ "(" ++ Args ++ ");"
+ ).
args_string(Arity) ->
- Vars = ["A" ++ integer_to_list(Seq) || Seq <- lists:seq(1, Arity)],
+ Vars = ["A" ++ integer_to_list(Seq) || Seq <- lists:seq(1, Arity)],
string:join(Vars, ", ").
version(Source, SrcDefs) ->
Modules = [Module || {Module, _Exports} <- SrcDefs],
couch_epi_codegen:scan(
- "version(" ++ atom_to_list(Source) ++ ") ->" ++ hash(Modules) ++ ";").
-
-
+ "version(" ++ atom_to_list(Source) ++ ") ->" ++ hash(Modules) ++ ";"
+ ).
%% ------------------------------------------------------------------
%% Helper functions
@@ -204,26 +232,48 @@ defined_providers(Defs) ->
%% Defs = [{Source, [{Module, [{Fun, Arity}]}]}]
fold_defs(Defs, Acc, Fun) ->
- lists:foldl(fun({Source, SourceData}, Clauses) ->
- lists:foldl(fun({Module, Exports}, ExportsAcc) ->
- lists:foldl(fun({Function, Arity}, InAcc) ->
- Fun({Source, Module, Function, Arity}, InAcc)
- end, [], Exports) ++ ExportsAcc
- end, [], SourceData) ++ Clauses
- end, Acc, Defs).
+ lists:foldl(
+ fun({Source, SourceData}, Clauses) ->
+ lists:foldl(
+ fun({Module, Exports}, ExportsAcc) ->
+ lists:foldl(
+ fun({Function, Arity}, InAcc) ->
+ Fun({Source, Module, Function, Arity}, InAcc)
+ end,
+ [],
+ Exports
+ ) ++ ExportsAcc
+ end,
+ [],
+ SourceData
+ ) ++ Clauses
+ end,
+ Acc,
+ Defs
+ ).
providers_by_function(Defs) ->
- Providers = fold_defs(Defs, [],
+ Providers = fold_defs(
+ Defs,
+ [],
fun({_Source, Module, Function, Arity}, Acc) ->
[{{Function, Arity}, Module} | Acc]
end
),
- Dict = lists:foldl(fun({K, V}, Acc) ->
- dict:update(K, fun(Modules) ->
- append_if_missing(Modules, V)
- end, [V], Acc)
-
- end, dict:new(), Providers),
+ Dict = lists:foldl(
+ fun({K, V}, Acc) ->
+ dict:update(
+ K,
+ fun(Modules) ->
+ append_if_missing(Modules, V)
+ end,
+ [V],
+ Acc
+ )
+ end,
+ dict:new(),
+ Providers
+ ),
dict:to_list(Dict).
append_if_missing(List, Value) ->
@@ -238,36 +288,75 @@ hash(Modules) ->
dispatch(_Handle, _Modules, _Func, _Args, #opts{concurrent = true, pipe = true}) ->
throw({error, {incompatible_options, [concurrent, pipe]}});
-dispatch(Handle, Modules, Function, Args,
- #opts{pipe = true, ignore_errors = true}) ->
- lists:foldl(fun(Module, Acc) ->
- try
+dispatch(
+ Handle,
+ Modules,
+ Function,
+ Args,
+ #opts{pipe = true, ignore_errors = true}
+) ->
+ lists:foldl(
+ fun(Module, Acc) ->
+ try
+ Handle:dispatch(Module, Function, Acc)
+ catch
+ _:_ ->
+ Acc
+ end
+ end,
+ Args,
+ Modules
+ );
+dispatch(
+ Handle,
+ Modules,
+ Function,
+ Args,
+ #opts{pipe = true}
+) ->
+ lists:foldl(
+ fun(Module, Acc) ->
Handle:dispatch(Module, Function, Acc)
- catch _:_ ->
- Acc
- end
- end, Args, Modules);
-dispatch(Handle, Modules, Function, Args,
- #opts{pipe = true}) ->
- lists:foldl(fun(Module, Acc) ->
- Handle:dispatch(Module, Function, Acc)
- end, Args, Modules);
-dispatch(Handle, Modules, Function, Args,
- #opts{interruptible = true}) ->
+ end,
+ Args,
+ Modules
+ );
+dispatch(
+ Handle,
+ Modules,
+ Function,
+ Args,
+ #opts{interruptible = true}
+) ->
apply_while(Modules, Handle, Function, Args);
dispatch(Handle, Modules, Function, Args, #opts{} = Opts) ->
[do_dispatch(Handle, Module, Function, Args, Opts) || Module <- Modules].
-do_dispatch(Handle, Module, Function, Args,
- #opts{concurrent = true, ignore_errors = true}) ->
+do_dispatch(
+ Handle,
+ Module,
+ Function,
+ Args,
+ #opts{concurrent = true, ignore_errors = true}
+) ->
spawn(fun() ->
(catch Handle:dispatch(Module, Function, Args))
end);
-do_dispatch(Handle, Module, Function, Args,
- #opts{ignore_errors = true}) ->
+do_dispatch(
+ Handle,
+ Module,
+ Function,
+ Args,
+ #opts{ignore_errors = true}
+) ->
(catch Handle:dispatch(Module, Function, Args));
-do_dispatch(Handle, Module, Function, Args,
- #opts{concurrent = true}) ->
+do_dispatch(
+ Handle,
+ Module,
+ Function,
+ Args,
+ #opts{concurrent = true}
+) ->
spawn(fun() -> Handle:dispatch(Module, Function, Args) end);
do_dispatch(Handle, Module, Function, Args, #opts{}) ->
Handle:dispatch(Module, Function, Args).
@@ -285,13 +374,13 @@ apply_while([Module | Modules], Handle, Function, Args) ->
parse_opts(Opts) ->
parse_opts(Opts, #opts{}).
-parse_opts([ignore_errors|Rest], #opts{} = Acc) ->
+parse_opts([ignore_errors | Rest], #opts{} = Acc) ->
parse_opts(Rest, Acc#opts{ignore_errors = true});
-parse_opts([pipe|Rest], #opts{} = Acc) ->
+parse_opts([pipe | Rest], #opts{} = Acc) ->
parse_opts(Rest, Acc#opts{pipe = true});
-parse_opts([concurrent|Rest], #opts{} = Acc) ->
+parse_opts([concurrent | Rest], #opts{} = Acc) ->
parse_opts(Rest, Acc#opts{concurrent = true});
-parse_opts([interruptible|Rest], #opts{} = Acc) ->
+parse_opts([interruptible | Rest], #opts{} = Acc) ->
parse_opts(Rest, Acc#opts{interruptible = true});
parse_opts([], Acc) ->
Acc.
@@ -324,16 +413,17 @@ basic_test() ->
generate(Module, [{app1, Defs}, {app2, Defs}]),
Exports = lists:sort([
- {callbacks,2},
- {version,1},
- {providers,2},
- {definitions,1},
- {module_info,0},
- {version,0},
- {dispatch,3},
- {providers,0},
- {module_info,1},
- {definitions,0}]),
+ {callbacks, 2},
+ {version, 1},
+ {providers, 2},
+ {definitions, 1},
+ {module_info, 0},
+ {version, 0},
+ {dispatch, 3},
+ {providers, 0},
+ {module_info, 1},
+ {definitions, 0}
+ ]),
?assertEqual(Exports, lists:sort(Module:module_info(exports))),
?assertEqual([app1, app2], lists:sort(Module:providers())),
@@ -356,19 +446,19 @@ generate_module(Name, Body) ->
couch_epi_codegen:generate(Name, Tokens).
decide_module(decide) ->
- "
- -export([inc/1]).
-
- inc(A) ->
- {decided, A + 1}.
- ";
+ "\n"
+ " -export([inc/1]).\n"
+ "\n"
+ " inc(A) ->\n"
+ " {decided, A + 1}.\n"
+ " ";
decide_module(no_decision) ->
- "
- -export([inc/1]).
-
- inc(_A) ->
- no_decision.
- ".
+ "\n"
+ " -export([inc/1]).\n"
+ "\n"
+ " inc(_A) ->\n"
+ " no_decision.\n"
+ " ".
decide_test() ->
ok = generate_module(decide, decide_module(decide)),
@@ -380,12 +470,12 @@ decide_test() ->
DecideFirstHandle = decide_first_handle,
ok = generate(DecideFirstHandle, [DecideDef, NoDecissionDef]),
?assertMatch([decide, no_decision], DecideFirstHandle:providers(inc, 1)),
- ?assertMatch({decided,4}, decide(DecideFirstHandle, anything, inc, [3], [])),
+ ?assertMatch({decided, 4}, decide(DecideFirstHandle, anything, inc, [3], [])),
DecideSecondHandle = decide_second_handle,
ok = generate(DecideSecondHandle, [NoDecissionDef, DecideDef]),
?assertMatch([no_decision, decide], DecideSecondHandle:providers(inc, 1)),
- ?assertMatch({decided,4}, decide(DecideSecondHandle, anything, inc, [3], [])),
+ ?assertMatch({decided, 4}, decide(DecideSecondHandle, anything, inc, [3], [])),
NoDecissionHandle = no_decision_handle,
ok = generate(NoDecissionHandle, [NoDecissionDef]),
diff --git a/src/couch_epi/src/couch_epi_module_keeper.erl b/src/couch_epi/src/couch_epi_module_keeper.erl
index 36376fec0..97420ea7b 100644
--- a/src/couch_epi/src/couch_epi_module_keeper.erl
+++ b/src/couch_epi/src/couch_epi_module_keeper.erl
@@ -12,7 +12,6 @@
-module(couch_epi_module_keeper).
-
-behaviour(gen_server).
%% ------------------------------------------------------------------
@@ -22,17 +21,29 @@
-export([start_link/3, stop/1]).
-export([reload/1]).
-
%% ------------------------------------------------------------------
%% gen_server Function Exports
%% ------------------------------------------------------------------
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
-record(state, {
- codegen, module, key, type, handle, hash, kind,
- timer = {undefined, undefined}}).
+ codegen,
+ module,
+ key,
+ type,
+ handle,
+ hash,
+ kind,
+ timer = {undefined, undefined}
+}).
%% ------------------------------------------------------------------
%% API Function Definitions
@@ -41,7 +52,8 @@
start_link(Type, Key, Codegen) ->
Handle = Codegen:get_handle(Key),
gen_server:start_link(
- {local, Handle}, ?MODULE, [Type, Codegen, Key, Handle], []).
+ {local, Handle}, ?MODULE, [Type, Codegen, Key, Handle], []
+ ).
stop(Server) ->
catch gen_server:call(Server, stop).
@@ -151,8 +163,9 @@ safe_set(Hash, Data, #state{} = State) ->
OldData = CodeGen:get_current_definitions(Handle),
ok = CodeGen:generate(Handle, Data),
{ok, OldData, State#state{hash = Hash}}
- catch Class:Reason ->
- {{Class, Reason}, State}
+ catch
+ Class:Reason ->
+ {{Class, Reason}, State}
end.
notify(Key, OldData, NewData, Defs) ->
diff --git a/src/couch_epi/src/couch_epi_plugin.erl b/src/couch_epi/src/couch_epi_plugin.erl
index 2cb1f3ebe..1ec09d8dc 100644
--- a/src/couch_epi/src/couch_epi_plugin.erl
+++ b/src/couch_epi/src/couch_epi_plugin.erl
@@ -28,25 +28,25 @@
%% Types Definitions
%% ------------------------------------------------------------------
--type kind()
- :: providers
- | data_providers
- | services
- | data_subscriptions
- .
+-type kind() ::
+ providers
+ | data_providers
+ | services
+ | data_subscriptions.
--type key()
- :: {ServiceId :: couch_epi:service_id(), Key :: couch_epi:key()}
- | couch_epi:service_id().
+-type key() ::
+ {ServiceId :: couch_epi:service_id(), Key :: couch_epi:key()}
+ | couch_epi:service_id().
-callback app() -> couch_epi:app().
-callback providers() -> [{couch_epi:service_id(), module()}].
-callback services() -> [{couch_epi:service_id(), module()}].
-callback data_subscriptions() -> [{couch_epi:service_id(), couch_epi:key()}].
--callback data_providers() -> [
- {couch_epi:key(), couch_epi:data_spec()}
+-callback data_providers() ->
+ [
+ {couch_epi:key(), couch_epi:data_spec()}
| {couch_epi:key(), couch_epi:data_spec(), [couch_epi:data_spec_opt()]}
-].
+ ].
-callback processes() -> [{couch_epi:plugin_id(), [supervisor:child_spec()]}].
-callback notify(Key :: term(), Old :: term(), New :: term()) -> ok.
@@ -58,8 +58,7 @@ definitions(Plugins) ->
lists:append([extract_definitions(Plugin) || Plugin <- Plugins]).
plugin_processes(Plugin, Plugins) ->
- lists:append([
- Specs || P0 <- Plugins, {P1, Specs} <- P0:processes(), P1 =:= Plugin]).
+ lists:append([Specs || P0 <- Plugins, {P1, Specs} <- P0:processes(), P1 =:= Plugin]).
grouped_definitions(Plugins) ->
Defs = lists:append([extract_definitions(Plugin) || Plugin <- Plugins]),
@@ -87,7 +86,6 @@ notify_plugin(Plugin, Key, OldData, NewData) ->
App = Plugin:app(),
Plugin:notify(Key, app_data(App, OldData), app_data(App, NewData)).
-
app_data(App, Data) ->
case lists:keyfind(App, 1, Data) of
{App, AppData} -> AppData;
@@ -100,12 +98,11 @@ filter_by_key(Definitions, Kind, Key) ->
by_key(#couch_epi_spec{kind = Kind, key = Key}, Kind, Key) -> true;
by_key(_, _, _) -> false.
-
extract_definitions(Plugin) ->
- specs(Plugin, providers)
- ++ specs(Plugin, data_providers)
- ++ specs(Plugin, services)
- ++ specs(Plugin, data_subscriptions).
+ specs(Plugin, providers) ++
+ specs(Plugin, data_providers) ++
+ specs(Plugin, services) ++
+ specs(Plugin, data_subscriptions).
-spec group_specs(Specs :: [#couch_epi_spec{}]) -> GroupedSpecs when
GroupedSpecs ::
@@ -113,15 +110,23 @@ extract_definitions(Plugin) ->
group_specs(Specs) ->
Grouped = group(
- [{{Kind, Key}, group([{App, Spec}])}
- || #couch_epi_spec{kind = Kind, key = Key, app = App} = Spec <- Specs]),
+ [
+ {{Kind, Key}, group([{App, Spec}])}
+ || #couch_epi_spec{kind = Kind, key = Key, app = App} = Spec <- Specs
+ ]
+ ),
[{K, lists:reverse(V)} || {K, V} <- Grouped].
-
group(KV) ->
- dict:to_list(lists:foldr(fun({K,V}, D) ->
- dict:append_list(K, V, D)
- end, dict:new(), KV)).
+ dict:to_list(
+ lists:foldr(
+ fun({K, V}, D) ->
+ dict:append_list(K, V, D)
+ end,
+ dict:new(),
+ KV
+ )
+ ).
specs(Plugin, Kind) ->
[spec(parse(Spec, Kind), Plugin, Kind) || Spec <- Plugin:Kind()].
@@ -156,7 +161,6 @@ type(services, _) -> couch_epi_functions;
type(data_providers, _) -> couch_epi_data;
type(data_subscriptions, _) -> undefined.
-
%% ------------------------------------------------------------------
%% Tests
%% ------------------------------------------------------------------
@@ -165,66 +169,66 @@ type(data_subscriptions, _) -> undefined.
-include_lib("eunit/include/eunit.hrl").
plugin_module(foo_epi) ->
- "
- -compile([export_all]).
-
- app() -> foo.
- providers() ->
- [
- {chttpd_handlers, foo_provider},
- {bar_handlers, bar_provider1},
- {bar_handlers, bar_provider2}
- ].
-
- services() ->
- [
- {foo_handlers, foo_service}
- ].
-
- data_providers() ->
- [
- {{foo_service, data1}, {file, \"abs_file\"}, [{interval, 5000}]},
- {{foo_service, data2}, {priv_file, \"priv_file\"}},
- {{foo_service, data3}, {module, foo_data}}
- ].
-
- data_subscriptions() ->
- [
- {stats, foo_definitions}
- ].
-
- processes() -> [].
-
- notify(_, _, _) -> ok.
- ";
+ "\n"
+ " -compile([export_all]).\n"
+ "\n"
+ " app() -> foo.\n"
+ " providers() ->\n"
+ " [\n"
+ " {chttpd_handlers, foo_provider},\n"
+ " {bar_handlers, bar_provider1},\n"
+ " {bar_handlers, bar_provider2}\n"
+ " ].\n"
+ "\n"
+ " services() ->\n"
+ " [\n"
+ " {foo_handlers, foo_service}\n"
+ " ].\n"
+ "\n"
+ " data_providers() ->\n"
+ " [\n"
+ " {{foo_service, data1}, {file, \"abs_file\"}, [{interval, 5000}]},\n"
+ " {{foo_service, data2}, {priv_file, \"priv_file\"}},\n"
+ " {{foo_service, data3}, {module, foo_data}}\n"
+ " ].\n"
+ "\n"
+ " data_subscriptions() ->\n"
+ " [\n"
+ " {stats, foo_definitions}\n"
+ " ].\n"
+ "\n"
+ " processes() -> [].\n"
+ "\n"
+ " notify(_, _, _) -> ok.\n"
+ " ";
plugin_module(bar_epi) ->
- "
- -compile([export_all]).
-
- app() -> bar.
- providers() ->
- [
- {chttpd_handlers, bar_provider},
- {bar_handlers, bar_provider}
- ].
-
- services() ->
- [
- {bar_handlers, bar_service}
- ].
-
- data_providers() ->
- [].
-
- data_subscriptions() ->
- [
- {foo_service, data1}
- ].
-
- processes() -> [].
-
- notify(_, _, _) -> ok.
- ".
+ "\n"
+ " -compile([export_all]).\n"
+ "\n"
+ " app() -> bar.\n"
+ " providers() ->\n"
+ " [\n"
+ " {chttpd_handlers, bar_provider},\n"
+ " {bar_handlers, bar_provider}\n"
+ " ].\n"
+ "\n"
+ " services() ->\n"
+ " [\n"
+ " {bar_handlers, bar_service}\n"
+ " ].\n"
+ "\n"
+ " data_providers() ->\n"
+ " [].\n"
+ "\n"
+ " data_subscriptions() ->\n"
+ " [\n"
+ " {foo_service, data1}\n"
+ " ].\n"
+ "\n"
+ " processes() -> [].\n"
+ "\n"
+ " notify(_, _, _) -> ok.\n"
+ " ".
generate_module(Name, Body) ->
Tokens = couch_epi_codegen:scan(Body),
@@ -234,7 +238,7 @@ generate_modules(Kind, Providers) ->
[generate_module(P, Kind(P)) || P <- Providers].
provider_modules_order_test() ->
- [ok,ok] = generate_modules(fun plugin_module/1, [foo_epi, bar_epi]),
+ [ok, ok] = generate_modules(fun plugin_module/1, [foo_epi, bar_epi]),
ok = application:set_env(couch_epi, plugins, [foo_epi, bar_epi]),
Expected = [
{foo, bar_provider1},
@@ -249,7 +253,7 @@ provider_modules_order_test() ->
ok.
providers_order_test() ->
- [ok,ok] = generate_modules(fun plugin_module/1, [foo_epi, bar_epi]),
+ [ok, ok] = generate_modules(fun plugin_module/1, [foo_epi, bar_epi]),
Expected = [
{foo, bar_provider1},
{foo, bar_provider2},
@@ -331,7 +335,8 @@ definitions_test() ->
key = chttpd_handlers,
value = foo_provider,
codegen = couch_epi_functions_gen,
- type = couch_epi_functions},
+ type = couch_epi_functions
+ },
#couch_epi_spec{
behaviour = foo_epi,
app = foo,
@@ -340,14 +345,15 @@ definitions_test() ->
key = foo_handlers,
value = foo_service,
codegen = couch_epi_functions_gen,
- type = couch_epi_functions},
+ type = couch_epi_functions
+ },
#couch_epi_spec{
behaviour = foo_epi,
app = foo,
kind = data_providers,
options = [{interval, 5000}],
key = {foo_service, data1},
- value = {file,"abs_file"},
+ value = {file, "abs_file"},
codegen = couch_epi_data_gen,
type = couch_epi_data
},
@@ -382,7 +388,7 @@ definitions_test() ->
}
]),
- [ok,ok] = generate_modules(fun plugin_module/1, [foo_epi, bar_epi]),
+ [ok, ok] = generate_modules(fun plugin_module/1, [foo_epi, bar_epi]),
Tests = lists:zip(Expected, lists:sort(definitions([foo_epi, bar_epi]))),
[?assertEqual(Expect, Result) || {Expect, Result} <- Tests],
ok.
diff --git a/src/couch_epi/src/couch_epi_sup.erl b/src/couch_epi/src/couch_epi_sup.erl
index 477cbe79e..aca423a7d 100644
--- a/src/couch_epi/src/couch_epi_sup.erl
+++ b/src/couch_epi/src/couch_epi_sup.erl
@@ -61,7 +61,7 @@ plugin_childspecs(Plugin, Children) ->
%% ===================================================================
init([]) ->
- {ok, { {one_for_one, 5, 10}, keepers()} }.
+ {ok, {{one_for_one, 5, 10}, keepers()}}.
%% ------------------------------------------------------------------
%% Internal Function Definitions
@@ -79,13 +79,16 @@ plugin_childspecs(Plugin, Plugins, Children) ->
merge(ExtraChildren, Children) ++ childspecs(Definitions).
childspecs(Definitions) ->
- lists:map(fun({{Kind, Key}, Defs}) ->
- CodeGen = couch_epi_plugin:codegen(Kind),
- Handle = CodeGen:get_handle(Key),
- Modules = lists:append([modules(Spec) || {_App, Spec} <- Defs]),
- Name = service_name(Key) ++ "|" ++ atom_to_list(Kind),
- code_monitor(Name, [Handle], [Handle|Modules])
- end, Definitions).
+ lists:map(
+ fun({{Kind, Key}, Defs}) ->
+ CodeGen = couch_epi_plugin:codegen(Kind),
+ Handle = CodeGen:get_handle(Key),
+ Modules = lists:append([modules(Spec) || {_App, Spec} <- Defs]),
+ Name = service_name(Key) ++ "|" ++ atom_to_list(Kind),
+ code_monitor(Name, [Handle], [Handle | Modules])
+ end,
+ Definitions
+ ).
%% ------------------------------------------------------------------
%% Helper Function Definitions
@@ -95,21 +98,36 @@ remove_duplicates(Definitions) ->
lists:ukeysort(1, Definitions).
keeper_childspecs(Definitions) ->
- lists:map(fun({{Kind, Key}, _Specs}) ->
- Name = service_name(Key) ++ "|keeper",
- CodeGen = couch_epi_plugin:codegen(Kind),
- Handle = CodeGen:get_handle(Key),
- keeper(Name, [provider_kind(Kind), Key, CodeGen], [Handle])
- end, Definitions).
+ lists:map(
+ fun({{Kind, Key}, _Specs}) ->
+ Name = service_name(Key) ++ "|keeper",
+ CodeGen = couch_epi_plugin:codegen(Kind),
+ Handle = CodeGen:get_handle(Key),
+ keeper(Name, [provider_kind(Kind), Key, CodeGen], [Handle])
+ end,
+ Definitions
+ ).
keeper(Name, Args, Modules) ->
- {"couch_epi|" ++ Name, {couch_epi_module_keeper, start_link,
- Args}, permanent, 5000, worker, Modules}.
+ {
+ "couch_epi|" ++ Name,
+ {couch_epi_module_keeper, start_link, Args},
+ permanent,
+ 5000,
+ worker,
+ Modules
+ }.
code_monitor(Name, Args, Modules0) ->
Modules = [couch_epi_codechange_monitor | Modules0],
- {"couch_epi_codechange_monitor|" ++ Name,
- {couch_epi_codechange_monitor, start_link, Args}, permanent, 5000, worker, Modules}.
+ {
+ "couch_epi_codechange_monitor|" ++ Name,
+ {couch_epi_codechange_monitor, start_link, Args},
+ permanent,
+ 5000,
+ worker,
+ Modules
+ }.
provider_kind(services) -> providers;
provider_kind(data_subscriptions) -> data_providers;
@@ -138,5 +156,8 @@ merge([], Children) ->
merge([{Id, _, _, _, _, _} = Spec | Rest], Children) ->
merge(Rest, lists:keystore(Id, 1, Children, Spec));
merge([#{id := Id} = Spec | Rest], Children) ->
- Replace = fun(#{id := I}) when I == Id -> Spec; (E) -> E end,
+ Replace = fun
+ (#{id := I}) when I == Id -> Spec;
+ (E) -> E
+ end,
merge(Rest, lists:map(Replace, Children)).
diff --git a/src/couch_epi/src/couch_epi_util.erl b/src/couch_epi/src/couch_epi_util.erl
index ea4b10ea8..2c86a96e2 100644
--- a/src/couch_epi/src/couch_epi_util.erl
+++ b/src/couch_epi/src/couch_epi_util.erl
@@ -23,7 +23,7 @@ module_version(Module) ->
hash(Term) ->
<<SigInt:128/integer>> = couch_hash:md5_hash(term_to_binary(Term)),
- lists:flatten(io_lib:format("\"~.36B\"",[SigInt])).
+ lists:flatten(io_lib:format("\"~.36B\"", [SigInt])).
module_exists(Module) ->
erlang:function_exported(Module, module_info, 0).
diff --git a/src/couch_epi/test/eunit/couch_epi_basic_test.erl b/src/couch_epi/test/eunit/couch_epi_basic_test.erl
index 5ba6c9f87..a99e9f900 100644
--- a/src/couch_epi/test/eunit/couch_epi_basic_test.erl
+++ b/src/couch_epi/test/eunit/couch_epi_basic_test.erl
@@ -28,63 +28,56 @@
-define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
start_link() -> ok.
-
%% BEGIN couch_epi_plugin behaviour callbacks
-
app() -> test_app.
-
providers() ->
[
{my_service, provider1},
{my_service, provider2}
].
-
services() ->
[
{my_service, ?MODULE}
].
-
data_providers() ->
[
{{test_app, descriptions}, {static_module, ?MODULE}, [{interval, 100}]}
].
-
data_subscriptions() ->
[
{test_app, descriptions}
].
-
processes() ->
[
{?MODULE, [?CHILD(extra_process, worker)]},
- {?MODULE, [{to_replace, {new, start_link, [bar]},
- permanent, 5000, worker, [bar]}]},
- {?MODULE, [#{id => to_replace_map,
- start => {new, start_link, [bar]}, modules => [bar]}]}
+ {?MODULE, [{to_replace, {new, start_link, [bar]}, permanent, 5000, worker, [bar]}]},
+ {?MODULE, [
+ #{
+ id => to_replace_map,
+ start => {new, start_link, [bar]},
+ modules => [bar]
+ }
+ ]}
].
-
notify(_Key, _OldData, _NewData) ->
ok.
-
%% END couch_epi_plugin behaviour callbacks
-
parse_child_id(Id) when is_atom(Id) ->
Id;
parse_child_id(Id) ->
- ["couch_epi_codechange_monitor", ServiceName, KindStr]
- = string:tokens(Id, "|"),
+ ["couch_epi_codechange_monitor", ServiceName, KindStr] =
+ string:tokens(Id, "|"),
Kind = list_to_atom(KindStr),
case string:tokens(ServiceName, ":") of
[ServiceId, Key] ->
@@ -93,7 +86,6 @@ parse_child_id(Id) ->
{list_to_atom(Key), Kind}
end.
-
-include_lib("eunit/include/eunit.hrl").
basic_test() ->
@@ -101,49 +93,75 @@ basic_test() ->
{extra_process, [], [extra_process]},
{to_replace, [bar], [bar]},
{to_replace_map, [bar], [bar]},
- {{my_service, providers},
+ {{my_service, providers}, [couch_epi_functions_gen_my_service], [
+ couch_epi_codechange_monitor,
+ couch_epi_functions_gen_my_service,
+ provider1,
+ provider2
+ ]},
+ {
+ {my_service, services},
[couch_epi_functions_gen_my_service],
- [couch_epi_codechange_monitor, couch_epi_functions_gen_my_service,
- provider1, provider2]},
- {{my_service, services},
- [couch_epi_functions_gen_my_service],
- lists:sort([couch_epi_codechange_monitor,
- couch_epi_functions_gen_my_service, ?MODULE])},
- {{{test_app, descriptions}, data_subscriptions},
+ lists:sort([
+ couch_epi_codechange_monitor,
+ couch_epi_functions_gen_my_service,
+ ?MODULE
+ ])
+ },
+ {
+ {{test_app, descriptions}, data_subscriptions},
[couch_epi_data_gen_test_app_descriptions],
- lists:sort([couch_epi_codechange_monitor,
- couch_epi_data_gen_test_app_descriptions, ?MODULE])},
- {{{test_app, descriptions}, data_providers},
+ lists:sort([
+ couch_epi_codechange_monitor,
+ couch_epi_data_gen_test_app_descriptions,
+ ?MODULE
+ ])
+ },
+ {
+ {{test_app, descriptions}, data_providers},
[couch_epi_data_gen_test_app_descriptions],
- lists:sort([couch_epi_codechange_monitor,
- couch_epi_data_gen_test_app_descriptions, ?MODULE])}
+ lists:sort([
+ couch_epi_codechange_monitor,
+ couch_epi_data_gen_test_app_descriptions,
+ ?MODULE
+ ])
+ }
],
ToReplace = [
{to_replace, {old, start_link, [foo]}, permanent, 5000, worker, [foo]},
#{id => to_replace_map, start => {old, start_link, [foo]}}
],
- Children = lists:sort(couch_epi_sup:plugin_childspecs(
- ?MODULE, [?MODULE], ToReplace)),
-
- Results = lists:map(fun
- ({Id, {_M, _F, Args}, _, _, _, Modules}) ->
- {parse_child_id(Id), Args, lists:sort(Modules)};
- (#{id := Id, start := {_M, _F, Args}, modules := Modules}) ->
- {parse_child_id(Id), Args, lists:sort(Modules)}
- end, Children),
+ Children = lists:sort(
+ couch_epi_sup:plugin_childspecs(
+ ?MODULE, [?MODULE], ToReplace
+ )
+ ),
+
+ Results = lists:map(
+ fun
+ ({Id, {_M, _F, Args}, _, _, _, Modules}) ->
+ {parse_child_id(Id), Args, lists:sort(Modules)};
+ (#{id := Id, start := {_M, _F, Args}, modules := Modules}) ->
+ {parse_child_id(Id), Args, lists:sort(Modules)}
+ end,
+ Children
+ ),
Tests = lists:zip(lists:sort(Expected), lists:sort(Results)),
[?assertEqual(Expect, Result) || {Expect, Result} <- Tests],
- ExpectedChild = {to_replace, {new, start_link, [bar]},
- permanent, 5000, worker, [bar]},
+ ExpectedChild = {to_replace, {new, start_link, [bar]}, permanent, 5000, worker, [bar]},
?assertEqual(
ExpectedChild,
- lists:keyfind(to_replace, 1, Children)),
-
- ExpectedMapChildSpec = #{id => to_replace_map,
- start => {new, start_link, [bar]}, modules => [bar]},
+ lists:keyfind(to_replace, 1, Children)
+ ),
+
+ ExpectedMapChildSpec = #{
+ id => to_replace_map,
+ start => {new, start_link, [bar]},
+ modules => [bar]
+ },
[MapChildSpec] = [E || #{id := to_replace_map} = E <- Children],
?assertEqual(ExpectedMapChildSpec, MapChildSpec),
ok.
diff --git a/src/couch_epi/test/eunit/couch_epi_tests.erl b/src/couch_epi/test/eunit/couch_epi_tests.erl
index 12d8610c1..08307fe30 100644
--- a/src/couch_epi/test/eunit/couch_epi_tests.erl
+++ b/src/couch_epi/test/eunit/couch_epi_tests.erl
@@ -24,72 +24,77 @@
-define(TIMEOUT, 5000).
-define(RELOAD_WAIT, 1000).
--define(temp_atom,
- fun() ->
- {A, B, C} = os:timestamp(),
- list_to_atom(lists:flatten(io_lib:format("~p~p~p", [A, B, C])))
- end).
-
--define(MODULE1(Name), "
- -export([inc/2, fail/2]).
-
- inc(KV, A) ->
- Reply = A + 1,
- couch_epi_tests:save(KV, inc1, Reply),
- [KV, Reply].
-
- fail(KV, A) ->
- inc(KV, A).
-").
-
--define(MODULE2(Name), "
- -export([inc/2, fail/2]).
-
- inc(KV, A) ->
- Reply = A + 1,
- couch_epi_tests:save(KV, inc2, Reply),
- [KV, Reply].
-
- fail(KV, _A) ->
- couch_epi_tests:save(KV, inc2, check_error),
- throw(check_error).
-").
-
--define(DATA_MODULE1(Name), "
- -export([data/0]).
-
- data() ->
- [
- {[complex, key, 1], [
- {type, counter},
- {desc, foo}
- ]}
- ].
-").
-
--define(DATA_MODULE2(Name), "
- -export([data/0]).
-
- data() ->
- [
- {[complex, key, 2], [
- {type, counter},
- {desc, bar}
- ]},
- {[complex, key, 1], [
- {type, counter},
- {desc, updated_foo}
- ]}
- ].
-").
-
--define(DATA_MODULE3(Name, Kv), "
- -export([data/0]).
-
-data() ->
- {ok, Data} = couch_epi_tests:get('" ++ atom_to_list(Kv) ++ "', data),
- Data.
-").
+-define(temp_atom, fun() ->
+ {A, B, C} = os:timestamp(),
+ list_to_atom(lists:flatten(io_lib:format("~p~p~p", [A, B, C])))
+end).
+
+-define(MODULE1(Name),
+ "\n"
+ " -export([inc/2, fail/2]).\n"
+ "\n"
+ " inc(KV, A) ->\n"
+ " Reply = A + 1,\n"
+ " couch_epi_tests:save(KV, inc1, Reply),\n"
+ " [KV, Reply].\n"
+ "\n"
+ " fail(KV, A) ->\n"
+ " inc(KV, A).\n"
+).
+
+-define(MODULE2(Name),
+ "\n"
+ " -export([inc/2, fail/2]).\n"
+ "\n"
+ " inc(KV, A) ->\n"
+ " Reply = A + 1,\n"
+ " couch_epi_tests:save(KV, inc2, Reply),\n"
+ " [KV, Reply].\n"
+ "\n"
+ " fail(KV, _A) ->\n"
+ " couch_epi_tests:save(KV, inc2, check_error),\n"
+ " throw(check_error).\n"
+).
+
+-define(DATA_MODULE1(Name),
+ "\n"
+ " -export([data/0]).\n"
+ "\n"
+ " data() ->\n"
+ " [\n"
+ " {[complex, key, 1], [\n"
+ " {type, counter},\n"
+ " {desc, foo}\n"
+ " ]}\n"
+ " ].\n"
+).
+
+-define(DATA_MODULE2(Name),
+ "\n"
+ " -export([data/0]).\n"
+ "\n"
+ " data() ->\n"
+ " [\n"
+ " {[complex, key, 2], [\n"
+ " {type, counter},\n"
+ " {desc, bar}\n"
+ " ]},\n"
+ " {[complex, key, 1], [\n"
+ " {type, counter},\n"
+ " {desc, updated_foo}\n"
+ " ]}\n"
+ " ].\n"
+).
+
+-define(DATA_MODULE3(Name, Kv),
+ "\n"
+ " -export([data/0]).\n"
+ "\n"
+ "data() ->\n"
+ " {ok, Data} = couch_epi_tests:get('" ++ atom_to_list(Kv) ++
+ "', data),\n"
+ " Data.\n"
+).
%% ------------------------------------------------------------------
%% couch_epi_plugin behaviour
@@ -98,69 +103,76 @@ data() ->
plugin_module([KV, Spec]) when is_tuple(Spec) ->
SpecStr = io_lib:format("~w", [Spec]),
KVStr = "'" ++ atom_to_list(KV) ++ "'",
- "
- -compile([export_all]).
-
- app() -> test_app.
- providers() ->
- [].
-
- services() ->
- [].
-
- data_providers() ->
- [
- {{test_app, descriptions}, " ++ SpecStr ++ ", [{interval, 100}]}
- ].
-
- data_subscriptions() ->
- [
- {test_app, descriptions}
- ].
-
- processes() -> [].
-
- notify(Key, OldData, Data) ->
- couch_epi_tests:notify_cb(Key, OldData, Data, " ++ KVStr ++ ").
- ";
+ "\n"
+ " -compile([export_all]).\n"
+ "\n"
+ " app() -> test_app.\n"
+ " providers() ->\n"
+ " [].\n"
+ "\n"
+ " services() ->\n"
+ " [].\n"
+ "\n"
+ " data_providers() ->\n"
+ " [\n"
+ " {{test_app, descriptions}, " ++ SpecStr ++
+ ", [{interval, 100}]}\n"
+ " ].\n"
+ "\n"
+ " data_subscriptions() ->\n"
+ " [\n"
+ " {test_app, descriptions}\n"
+ " ].\n"
+ "\n"
+ " processes() -> [].\n"
+ "\n"
+ " notify(Key, OldData, Data) ->\n"
+ " couch_epi_tests:notify_cb(Key, OldData, Data, " ++ KVStr ++
+ ").\n"
+ " ";
plugin_module([KV, Provider]) when is_atom(Provider) ->
KVStr = "'" ++ atom_to_list(KV) ++ "'",
- "
- -compile([export_all]).
-
- app() -> test_app.
- providers() ->
- [
- {my_service, " ++ atom_to_list(Provider) ++ "}
- ].
-
- services() ->
- [
- {my_service, " ++ atom_to_list(Provider) ++ "}
- ].
-
- data_providers() ->
- [].
-
- data_subscriptions() ->
- [].
-
- processes() -> [].
-
- notify(Key, OldData, Data) ->
- couch_epi_tests:notify_cb(Key, OldData, Data, " ++ KVStr ++ ").
- ".
-
+ "\n"
+ " -compile([export_all]).\n"
+ "\n"
+ " app() -> test_app.\n"
+ " providers() ->\n"
+ " [\n"
+ " {my_service, " ++ atom_to_list(Provider) ++
+ "}\n"
+ " ].\n"
+ "\n"
+ " services() ->\n"
+ " [\n"
+ " {my_service, " ++ atom_to_list(Provider) ++
+ "}\n"
+ " ].\n"
+ "\n"
+ " data_providers() ->\n"
+ " [].\n"
+ "\n"
+ " data_subscriptions() ->\n"
+ " [].\n"
+ "\n"
+ " processes() -> [].\n"
+ "\n"
+ " notify(Key, OldData, Data) ->\n"
+ " couch_epi_tests:notify_cb(Key, OldData, Data, " ++ KVStr ++
+ ").\n"
+ " ".
notify_cb(Key, OldData, Data, KV) ->
save(KV, is_called, {Key, OldData, Data}).
start_epi(Plugins) ->
application:load(couch_epi),
- PluginsModules = lists:map(fun({Module, Body}) ->
- ok = generate_module(Module, Body),
- Module
- end, Plugins),
+ PluginsModules = lists:map(
+ fun({Module, Body}) ->
+ ok = generate_module(Module, Body),
+ Module
+ end,
+ Plugins
+ ),
application:set_env(couch_epi, plugins, PluginsModules),
application:start(couch_epi).
@@ -176,13 +188,13 @@ setup(data_file) ->
Pid = whereis(couch_epi:get_handle(Key)),
-
#ctx{
file = File,
key = Key,
handle = couch_epi:get_handle(Key),
kv = KV,
- pid = Pid};
+ pid = Pid
+ };
setup(static_data_module) ->
error_logger:tty(false),
@@ -201,7 +213,8 @@ setup(static_data_module) ->
handle = Handle,
modules = [Handle, provider],
kv = KV,
- pid = Pid};
+ pid = Pid
+ };
setup(callback_data_module) ->
error_logger:tty(false),
@@ -224,11 +237,12 @@ setup(callback_data_module) ->
Handle = couch_epi:get_handle(Key),
#ctx{
- key = Key,
- handle = Handle,
- modules = [Handle, provider],
- kv = KV,
- pid = Pid};
+ key = Key,
+ handle = Handle,
+ modules = [Handle, provider],
+ kv = KV,
+ pid = Pid
+ };
setup(functions) ->
Key = my_service,
error_logger:tty(false),
@@ -251,7 +265,8 @@ setup(functions) ->
handle = Handle,
modules = [Handle, provider1, provider2],
kv = KV,
- pid = Pid};
+ pid = Pid
+ };
setup({options, _Opts}) ->
setup(functions).
@@ -308,7 +323,6 @@ epi_data_source_test_() ->
[make_case("Check query API for: ", Cases, Funs)]
}.
-
epi_apply_test_() ->
{
"epi dispatch tests",
@@ -338,7 +352,6 @@ epi_providers_order_test_() ->
}
}.
-
epi_reload_test_() ->
Cases = [
data_file,
@@ -364,19 +377,21 @@ apply_options_test_() ->
[make_case("Apply with options: ", Setups, Funs)]
}.
-
make_case(Msg, {Tag, P}, Funs) ->
Cases = [{Tag, Case} || Case <- P],
make_case(Msg, Cases, Funs);
make_case(Msg, P, Funs) ->
- [{format_case_name(Msg, Case), [
- {
- foreachx, fun setup/1, fun teardown/2,
- [
- {Case, make_fun(Fun, 2)} || Fun <- Funs
- ]
- }
- ]} || Case <- P].
+ [
+ {format_case_name(Msg, Case), [
+ {
+ foreachx,
+ fun setup/1,
+ fun teardown/2,
+ [{Case, make_fun(Fun, 2)} || Fun <- Funs]
+ }
+ ]}
+ || Case <- P
+ ].
make_fun(Fun, Arity) ->
{arity, A} = lists:keyfind(arity, 1, erlang:fun_info(Fun)),
@@ -405,8 +420,8 @@ ensure_notified_when_changed(functions, #ctx{key = Key} = Ctx) ->
update(functions, Ctx),
Result = get(Ctx, is_called),
ExpectedDefs = [
- {provider1,[{inc,2},{fail,2}]},
- {provider2,[{inc,2},{fail,2}]}
+ {provider1, [{inc, 2}, {fail, 2}]},
+ {provider2, [{inc, 2}, {fail, 2}]}
],
?assertEqual({ok, {Key, ExpectedDefs, ExpectedDefs}}, Result),
ok
@@ -425,7 +440,8 @@ ensure_notified_when_changed(Case, #ctx{key = Key} = Ctx) ->
?assertMatch(ExpectedData, lists:usort(Data)),
?assertMatch(
[{[complex, key, 1], [{type, counter}, {desc, foo}]}],
- lists:usort(OldData))
+ lists:usort(OldData)
+ )
end).
ensure_not_notified_when_no_change(_Case, #ctx{key = Key} = Ctx) ->
@@ -463,15 +479,19 @@ check_broken_pipe(#ctx{handle = Handle, kv = KV, key = Key} = Ctx) ->
ensure_fail_pipe(#ctx{handle = Handle, kv = KV, key = Key}) ->
?_test(begin
- ?assertThrow(check_error,
- couch_epi:apply(Handle, Key, fail, [KV, 2], [pipe])),
+ ?assertThrow(
+ check_error,
+ couch_epi:apply(Handle, Key, fail, [KV, 2], [pipe])
+ ),
ok
end).
ensure_fail(#ctx{handle = Handle, kv = KV, key = Key}) ->
?_test(begin
- ?assertThrow(check_error,
- couch_epi:apply(Handle, Key, fail, [KV, 2], [])),
+ ?assertThrow(
+ check_error,
+ couch_epi:apply(Handle, Key, fail, [KV, 2], [])
+ ),
ok
end).
@@ -483,52 +503,56 @@ check_dump(_Case, #ctx{handle = Handle}) ->
?_test(begin
?assertMatch(
[[{type, counter}, {desc, foo}]],
- couch_epi:dump(Handle))
+ couch_epi:dump(Handle)
+ )
end).
check_get(_Case, #ctx{handle = Handle}) ->
?_test(begin
?assertMatch(
[[{type, counter}, {desc, foo}]],
- couch_epi:get(Handle, [complex,key, 1]))
+ couch_epi:get(Handle, [complex, key, 1])
+ )
end).
check_get_value(_Case, #ctx{handle = Handle}) ->
?_test(begin
?assertMatch(
[{type, counter}, {desc, foo}],
- couch_epi:get_value(Handle, test_app, [complex,key, 1]))
+ couch_epi:get_value(Handle, test_app, [complex, key, 1])
+ )
end).
check_by_key(_Case, #ctx{handle = Handle}) ->
?_test(begin
?assertMatch(
- [{[complex, key, 1],
- [{test_app, [{type, counter}, {desc, foo}]}]}],
- couch_epi:by_key(Handle)),
+ [{[complex, key, 1], [{test_app, [{type, counter}, {desc, foo}]}]}],
+ couch_epi:by_key(Handle)
+ ),
?assertMatch(
[{test_app, [{type, counter}, {desc, foo}]}],
- couch_epi:by_key(Handle, [complex, key, 1]))
+ couch_epi:by_key(Handle, [complex, key, 1])
+ )
end).
check_by_source(_Case, #ctx{handle = Handle}) ->
?_test(begin
?assertMatch(
- [{test_app,
- [{[complex,key, 1], [{type, counter}, {desc, foo}]}]}],
- couch_epi:by_source(Handle)),
+ [{test_app, [{[complex, key, 1], [{type, counter}, {desc, foo}]}]}],
+ couch_epi:by_source(Handle)
+ ),
?assertMatch(
- [{[complex,key, 1], [{type, counter}, {desc, foo}]}],
- couch_epi:by_source(Handle, test_app))
+ [{[complex, key, 1], [{type, counter}, {desc, foo}]}],
+ couch_epi:by_source(Handle, test_app)
+ )
end).
check_keys(_Case, #ctx{handle = Handle}) ->
- ?_assertMatch([[complex,key,1]], couch_epi:keys(Handle)).
+ ?_assertMatch([[complex, key, 1]], couch_epi:keys(Handle)).
check_subscribers(_Case, #ctx{handle = Handle}) ->
?_assertMatch([test_app], couch_epi:subscribers(Handle)).
-
ensure_reload_if_manually_triggered(Case, #ctx{pid = Pid, key = Key} = Ctx) ->
?_test(begin
subscribe(Ctx, test_app, Key),
@@ -538,8 +562,10 @@ ensure_reload_if_manually_triggered(Case, #ctx{pid = Pid, key = Key} = Ctx) ->
?assertNotEqual(error, get(Ctx, is_called))
end).
-ensure_reload_if_changed(data_file = Case,
- #ctx{key = Key, handle = Handle} = Ctx) ->
+ensure_reload_if_changed(
+ data_file = Case,
+ #ctx{key = Key, handle = Handle} = Ctx
+) ->
?_test(begin
Version = Handle:version(),
subscribe(Ctx, test_app, Key),
@@ -548,19 +574,24 @@ ensure_reload_if_changed(data_file = Case,
?assertNotEqual(Version, Handle:version()),
?assertNotEqual(error, get(Ctx, is_called))
end);
-ensure_reload_if_changed(Case,
- #ctx{key = Key, handle = Handle} = Ctx) ->
+ensure_reload_if_changed(
+ Case,
+ #ctx{key = Key, handle = Handle} = Ctx
+) ->
?_test(begin
Version = Handle:version(),
subscribe(Ctx, test_app, Key),
update(Case, Ctx),
?assertNotEqual(Version, Handle:version()),
- timer:sleep(?RELOAD_WAIT), %% Allow some time for notify to be called
+ %% Allow some time for notify to be called
+ timer:sleep(?RELOAD_WAIT),
?assertNotEqual(error, get(Ctx, is_called))
end).
-ensure_no_reload_when_no_change(functions,
- #ctx{pid = Pid, key = Key, handle = Handle, modules = Modules} = Ctx) ->
+ensure_no_reload_when_no_change(
+ functions,
+ #ctx{pid = Pid, key = Key, handle = Handle, modules = Modules} = Ctx
+) ->
?_test(begin
Version = Handle:version(),
subscribe(Ctx, test_app, Key),
@@ -568,8 +599,10 @@ ensure_no_reload_when_no_change(functions,
?assertEqual(Version, Handle:version()),
?assertEqual(error, get(Ctx, is_called))
end);
-ensure_no_reload_when_no_change(_Case,
- #ctx{key = Key, handle = Handle} = Ctx) ->
+ensure_no_reload_when_no_change(
+ _Case,
+ #ctx{key = Key, handle = Handle} = Ctx
+) ->
?_test(begin
Version = Handle:version(),
subscribe(Ctx, test_app, Key),
@@ -638,7 +671,8 @@ wait_update(Ctx) ->
error ->
timer:sleep(?RELOAD_WAIT),
wait_update(Ctx);
- _ -> ok
+ _ ->
+ ok
end.
%% ------------
diff --git a/src/couch_event/src/couch_event.erl b/src/couch_event/src/couch_event.erl
index 9f8e501df..2579349d7 100644
--- a/src/couch_event/src/couch_event.erl
+++ b/src/couch_event/src/couch_event.erl
@@ -29,37 +29,28 @@
unregister/1
]).
-
-define(SERVER, couch_event_server).
-
notify(DbName, Event) ->
gen_server:cast(?SERVER, {notify, DbName, Event}).
-
listen(Module, Function, State, Options) ->
couch_event_listener_mfa:enter_loop(Module, Function, State, Options).
-
link_listener(Module, Function, State, Options) ->
couch_event_listener_mfa:start_link(Module, Function, State, Options).
-
stop_listener(Pid) ->
couch_event_listener_mfa:stop(Pid).
-
register(Pid, DbName) ->
gen_server:call(?SERVER, {register, Pid, [DbName]}).
-
register_many(Pid, DbNames) when is_list(DbNames) ->
gen_server:call(?SERVER, {register, Pid, DbNames}).
-
register_all(Pid) ->
gen_server:call(?SERVER, {register, Pid, [all_dbs]}).
-
unregister(Pid) ->
gen_server:call(?SERVER, {unregister, Pid}).
diff --git a/src/couch_event/src/couch_event_app.erl b/src/couch_event/src/couch_event_app.erl
index 3a8341b9e..19621f0bf 100644
--- a/src/couch_event/src/couch_event_app.erl
+++ b/src/couch_event/src/couch_event_app.erl
@@ -18,10 +18,8 @@
stop/1
]).
-
start(_StartType, _StartArgs) ->
couch_event_sup2:start_link().
-
stop(_State) ->
ok.
diff --git a/src/couch_event/src/couch_event_listener.erl b/src/couch_event/src/couch_event_listener.erl
index a9ed33199..40f1a5c65 100644
--- a/src/couch_event/src/couch_event_listener.erl
+++ b/src/couch_event/src/couch_event_listener.erl
@@ -12,7 +12,6 @@
-module(couch_event_listener).
-
-export([
start/3,
start/4,
@@ -27,13 +26,11 @@
loop/2
]).
-
-record(st, {
module,
state
}).
-
-callback init(Arg :: term()) ->
term().
@@ -49,12 +46,10 @@
-callback handle_info(Message :: term(), State :: term()) ->
term().
-
start(Mod, Arg, Options) ->
Pid = erlang:spawn(?MODULE, do_init, [Mod, Arg, Options]),
{ok, Pid}.
-
start(Name, Mod, Arg, Options) ->
case where(Name) of
undefined ->
@@ -63,12 +58,10 @@ start(Name, Mod, Arg, Options) ->
{error, {already_started, Pid}}
end.
-
start_link(Mod, Arg, Options) ->
Pid = erlang:spawn_link(?MODULE, do_init, [Mod, Arg, Options]),
{ok, Pid}.
-
start_link(Name, Mod, Arg, Options) ->
case where(Name) of
undefined ->
@@ -77,30 +70,26 @@ start_link(Name, Mod, Arg, Options) ->
{error, {already_started, Pid}}
end.
-
enter_loop(Module, State, Options) ->
ok = register_listeners(Options),
- ?MODULE:loop(#st{module=Module, state=State}, infinity).
-
+ ?MODULE:loop(#st{module = Module, state = State}, infinity).
cast(Pid, Message) ->
Pid ! {'$couch_event_cast', Message},
ok.
-
do_init(Module, Arg, Options) ->
ok = maybe_name_process(Options),
ok = register_listeners(Options),
case (catch Module:init(Arg)) of
{ok, State} ->
- ?MODULE:loop(#st{module=Module, state=State}, infinity);
+ ?MODULE:loop(#st{module = Module, state = State}, infinity);
{ok, State, Timeout} when is_integer(Timeout), Timeout >= 0 ->
- ?MODULE:loop(#st{module=Module, state=State}, Timeout);
+ ?MODULE:loop(#st{module = Module, state = State}, Timeout);
Else ->
erlang:exit(Else)
end.
-
loop(St, Timeout) ->
receive
{'$couch_event', DbName, Event} ->
@@ -113,7 +102,6 @@ loop(St, Timeout) ->
do_info(St, timeout)
end.
-
maybe_name_process(Options) ->
case proplists:lookup(name, Options) of
{name, Name} ->
@@ -127,7 +115,6 @@ maybe_name_process(Options) ->
ok
end.
-
register_listeners(Options) ->
case get_all_dbnames(Options) of
all_dbs ->
@@ -137,85 +124,79 @@ register_listeners(Options) ->
end,
ok.
-
-do_event(#st{module=Module, state=State}=St, DbName, Event) ->
+do_event(#st{module = Module, state = State} = St, DbName, Event) ->
case (catch Module:handle_event(DbName, Event, State)) of
{ok, NewState} ->
- ?MODULE:loop(St#st{state=NewState}, infinity);
+ ?MODULE:loop(St#st{state = NewState}, infinity);
{ok, NewState, Timeout} when is_integer(Timeout), Timeout >= 0 ->
- ?MODULE:loop(St#st{state=NewState}, Timeout);
+ ?MODULE:loop(St#st{state = NewState}, Timeout);
{stop, Reason, NewState} ->
- do_terminate(Reason, St#st{state=NewState});
+ do_terminate(Reason, St#st{state = NewState});
Else ->
erlang:error(Else)
end.
-
-do_cast(#st{module=Module, state=State}=St, Message) ->
+do_cast(#st{module = Module, state = State} = St, Message) ->
case (catch Module:handle_cast(Message, State)) of
{ok, NewState} ->
- ?MODULE:loop(St#st{state=NewState}, infinity);
+ ?MODULE:loop(St#st{state = NewState}, infinity);
{ok, NewState, Timeout} when is_integer(Timeout), Timeout >= 0 ->
- ?MODULE:loop(St#st{state=NewState}, Timeout);
+ ?MODULE:loop(St#st{state = NewState}, Timeout);
{stop, Reason, NewState} ->
- do_terminate(Reason, St#st{state=NewState});
+ do_terminate(Reason, St#st{state = NewState});
Else ->
erlang:error(Else)
end.
-
-do_info(#st{module=Module, state=State}=St, Message) ->
+do_info(#st{module = Module, state = State} = St, Message) ->
case (catch Module:handle_info(Message, State)) of
{ok, NewState} ->
- ?MODULE:loop(St#st{state=NewState}, infinity);
+ ?MODULE:loop(St#st{state = NewState}, infinity);
{ok, NewState, Timeout} when is_integer(Timeout), Timeout >= 0 ->
- ?MODULE:loop(St#st{state=NewState}, Timeout);
+ ?MODULE:loop(St#st{state = NewState}, Timeout);
{stop, Reason, NewState} ->
- do_terminate(Reason, St#st{state=NewState});
+ do_terminate(Reason, St#st{state = NewState});
Else ->
erlang:error(Else)
end.
-
-do_terminate(Reason, #st{module=Module, state=State}) ->
+do_terminate(Reason, #st{module = Module, state = State}) ->
% Order matters. We want to make sure Module:terminate/1
% is called even if couch_event:unregister/1 hangs
% indefinitely.
catch Module:terminate(Reason, State),
catch couch_event:unregister(self()),
- Status = case Reason of
- normal -> normal;
- shutdown -> normal;
- ignore -> normal;
- Else -> Else
- end,
+ Status =
+ case Reason of
+ normal -> normal;
+ shutdown -> normal;
+ ignore -> normal;
+ Else -> Else
+ end,
erlang:exit(Status).
-
where({global, Name}) -> global:whereis_name(Name);
where({local, Name}) -> whereis(Name).
-
-name_register({global, Name}=GN) ->
+name_register({global, Name} = GN) ->
case global:register_name(Name, self()) of
yes -> true;
no -> {false, where(GN)}
end;
-name_register({local, Name}=LN) ->
+name_register({local, Name} = LN) ->
try register(Name, self()) of
true -> true
- catch error:_ ->
- {false, where(LN)}
+ catch
+ error:_ ->
+ {false, where(LN)}
end.
-
get_all_dbnames(Options) ->
case proplists:get_value(all_dbs, Options) of
true -> all_dbs;
_ -> get_all_dbnames(Options, [])
end.
-
get_all_dbnames([], []) ->
erlang:error(no_dbnames_provided);
get_all_dbnames([], Acc) ->
@@ -227,7 +208,6 @@ get_all_dbnames([{dbnames, DbNames} | Rest], Acc) when is_list(DbNames) ->
get_all_dbnames([_Ignored | Rest], Acc) ->
get_all_dbnames(Rest, Acc).
-
convert_dbname_list([]) ->
[];
convert_dbname_list([DbName | Rest]) when is_binary(DbName) ->
diff --git a/src/couch_event/src/couch_event_listener_mfa.erl b/src/couch_event/src/couch_event_listener_mfa.erl
index 9be58880a..b4cd9148a 100644
--- a/src/couch_event/src/couch_event_listener_mfa.erl
+++ b/src/couch_event/src/couch_event_listener_mfa.erl
@@ -13,7 +13,6 @@
-module(couch_event_listener_mfa).
-behavior(couch_event_listener).
-
-export([
start_link/4,
enter_loop/4,
@@ -28,7 +27,6 @@
handle_info/2
]).
-
-record(st, {
mod,
func,
@@ -36,24 +34,24 @@
parent
}).
-
start_link(Mod, Func, State, Options) ->
- Parent = case proplists:get_value(parent, Options) of
- P when is_pid(P) -> P;
- _ -> self()
- end,
+ Parent =
+ case proplists:get_value(parent, Options) of
+ P when is_pid(P) -> P;
+ _ -> self()
+ end,
Arg = {Parent, Mod, Func, State},
couch_event_listener:start_link(?MODULE, Arg, Options).
-
enter_loop(Mod, Func, State, Options) ->
- Parent = case proplists:get_value(parent, Options) of
- P when is_pid(P) ->
- erlang:monitor(process, P),
- P;
- _ ->
- undefined
- end,
+ Parent =
+ case proplists:get_value(parent, Options) of
+ P when is_pid(P) ->
+ erlang:monitor(process, P),
+ P;
+ _ ->
+ undefined
+ end,
St = #st{
mod = Mod,
func = Func,
@@ -62,11 +60,9 @@ enter_loop(Mod, Func, State, Options) ->
},
couch_event_listener:enter_loop(?MODULE, St, Options).
-
stop(Pid) ->
couch_event_listener:cast(Pid, shutdown).
-
init({Parent, Mod, Func, State}) ->
erlang:monitor(process, Parent),
{ok, #st{
@@ -76,32 +72,25 @@ init({Parent, Mod, Func, State}) ->
parent = Parent
}}.
-
terminate(_Reason, _MFA) ->
ok.
-
-handle_event(DbName, Event, #st{mod=Mod, func=Func, state=State}=St) ->
+handle_event(DbName, Event, #st{mod = Mod, func = Func, state = State} = St) ->
case (catch Mod:Func(DbName, Event, State)) of
{ok, NewState} ->
- {ok, St#st{state=NewState}};
+ {ok, St#st{state = NewState}};
stop ->
{stop, normal, St};
Else ->
erlang:error(Else)
end.
-
handle_cast(shutdown, St) ->
{stop, normal, St};
-
handle_cast(_Msg, St) ->
{ok, St}.
-
-handle_info({'DOWN', _Ref, process, Parent, _Reason}, #st{parent=Parent}=St) ->
+handle_info({'DOWN', _Ref, process, Parent, _Reason}, #st{parent = Parent} = St) ->
{stop, normal, St};
-
handle_info(_Msg, St) ->
{ok, St}.
-
diff --git a/src/couch_event/src/couch_event_os_listener.erl b/src/couch_event/src/couch_event_os_listener.erl
index 4de0a4416..ef379402a 100644
--- a/src/couch_event/src/couch_event_os_listener.erl
+++ b/src/couch_event/src/couch_event_os_listener.erl
@@ -14,7 +14,6 @@
-behaviour(gen_server).
-vsn(1).
-
-export([
start_link/1
]).
@@ -28,49 +27,41 @@
code_change/3
]).
-
start_link(Exe) when is_list(Exe) ->
gen_server:start_link(?MODULE, Exe, []).
-
init(Exe) ->
process_flag(trap_exit, true),
ok = couch_event:register_all(self()),
couch_os_process:start_link(Exe, []).
-
terminate(_Reason, Pid) when is_pid(Pid) ->
couch_os_process:stop(Pid);
terminate(_Reason, _Pid) ->
ok.
-
handle_call(Msg, From, Pid) ->
couch_log:notice("~s ignoring call ~w from ~w", [?MODULE, Msg, From]),
{reply, ignored, Pid, 0}.
-
handle_cast(Msg, Pid) ->
couch_log:notice("~s ignoring cast ~w", [?MODULE, Msg]),
{noreply, Pid, 0}.
-
handle_info({'$couch_event', DbName, Event}, Pid) ->
- Obj = {[
- {db, DbName},
- {type, list_to_binary(atom_to_list(Event))}
- ]},
+ Obj =
+ {[
+ {db, DbName},
+ {type, list_to_binary(atom_to_list(Event))}
+ ]},
ok = couch_os_process:send(Pid, Obj),
{noreply, Pid};
-
handle_info({'EXIT', Pid, Reason}, Pid) ->
couch_log:error("Update notificatio process ~w died: ~w", [Pid, Reason]),
{stop, normal, nil};
-
handle_info(Msg, Pid) ->
couch_log:notice("~s ignoring info ~w", [?MODULE, Msg]),
{noreply, Pid, 0}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
diff --git a/src/couch_event/src/couch_event_server.erl b/src/couch_event/src/couch_event_server.erl
index 321e8fafd..f633a8409 100644
--- a/src/couch_event/src/couch_event_server.erl
+++ b/src/couch_event/src/couch_event_server.erl
@@ -14,7 +14,6 @@
-behaviour(gen_server).
-vsn(1).
-
-export([
start_link/0
]).
@@ -28,20 +27,16 @@
code_change/3
]).
-
-include("couch_event_int.hrl").
-
-record(st, {
by_pid,
by_dbname
}).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, nil, []).
-
init(_) ->
{ok, ByPid} = khash:new(),
{ok, ByDbName} = khash:new(),
@@ -50,11 +45,9 @@ init(_) ->
by_dbname = ByDbName
}}.
-
terminate(_Reason, _St) ->
ok.
-
handle_call({register, Pid, NewDbNames}, _From, St) ->
case khash:get(St#st.by_pid, Pid) of
undefined ->
@@ -65,32 +58,28 @@ handle_call({register, Pid, NewDbNames}, _From, St) ->
register(St, ReuseRef, Pid, NewDbNames)
end,
{reply, ok, St};
-
handle_call({unregister, Pid}, _From, St) ->
- Reply = case khash:get(St#st.by_pid, Pid) of
- undefined ->
- not_registered;
- {Ref, OldDbNames} ->
- unregister(St, Pid, OldDbNames),
- erlang:demonitor(Ref, [flush]),
- ok
- end,
+ Reply =
+ case khash:get(St#st.by_pid, Pid) of
+ undefined ->
+ not_registered;
+ {Ref, OldDbNames} ->
+ unregister(St, Pid, OldDbNames),
+ erlang:demonitor(Ref, [flush]),
+ ok
+ end,
{reply, Reply, St};
-
handle_call(Msg, From, St) ->
couch_log:notice("~s ignoring call ~w from ~w", [?MODULE, Msg, From]),
{reply, ignored, St}.
-
handle_cast({notify, DbName, Event}, St) ->
notify_listeners(St#st.by_dbname, DbName, Event),
{noreply, St};
-
handle_cast(Msg, St) ->
couch_log:notice("~s ignoring cast ~w", [?MODULE, Msg]),
{noreply, St}.
-
handle_info({'DOWN', Ref, process, Pid, _Reason}, St) ->
case khash:get(St#st.by_pid, Pid) of
{Ref, OldDbNames} ->
@@ -99,35 +88,38 @@ handle_info({'DOWN', Ref, process, Pid, _Reason}, St) ->
ok
end,
{noreply, St};
-
-
handle_info(Msg, St) ->
couch_log:notice("~s ignoring info ~w", [?MODULE, Msg]),
{noreply, St}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
notify_listeners(ByDbName, DbName, Event) ->
Msg = {'$couch_event', DbName, Event},
notify_listeners(khash:get(ByDbName, all_dbs), Msg),
notify_listeners(khash:get(ByDbName, DbName), Msg).
-
notify_listeners(undefined, _) ->
ok;
notify_listeners(Listeners, Msg) ->
- khash:fold(Listeners, fun(Pid, _, _) -> Pid ! Msg, nil end, nil).
-
+ khash:fold(
+ Listeners,
+ fun(Pid, _, _) ->
+ Pid ! Msg,
+ nil
+ end,
+ nil
+ ).
register(St, Ref, Pid, DbNames) ->
khash:put(St#st.by_pid, Pid, {Ref, DbNames}),
- lists:foreach(fun(DbName) ->
- add_listener(St#st.by_dbname, DbName, Pid)
- end, DbNames).
-
+ lists:foreach(
+ fun(DbName) ->
+ add_listener(St#st.by_dbname, DbName, Pid)
+ end,
+ DbNames
+ ).
add_listener(ByDbName, DbName, Pid) ->
case khash:lookup(ByDbName, DbName) of
@@ -139,18 +131,20 @@ add_listener(ByDbName, DbName, Pid) ->
khash:put(ByDbName, DbName, NewListeners)
end.
-
unregister(St, Pid, OldDbNames) ->
ok = khash:del(St#st.by_pid, Pid),
- lists:foreach(fun(DbName) ->
- rem_listener(St#st.by_dbname, DbName, Pid)
- end, OldDbNames).
-
+ lists:foreach(
+ fun(DbName) ->
+ rem_listener(St#st.by_dbname, DbName, Pid)
+ end,
+ OldDbNames
+ ).
rem_listener(ByDbName, DbName, Pid) ->
{value, Listeners} = khash:lookup(ByDbName, DbName),
khash:del(Listeners, Pid),
Size = khash:size(Listeners),
- if Size > 0 -> ok; true ->
- khash:del(ByDbName, DbName)
+ if
+ Size > 0 -> ok;
+ true -> khash:del(ByDbName, DbName)
end.
diff --git a/src/couch_event/src/couch_event_sup2.erl b/src/couch_event/src/couch_event_sup2.erl
index 2d88b93d4..a815c440b 100644
--- a/src/couch_event/src/couch_event_sup2.erl
+++ b/src/couch_event/src/couch_event_sup2.erl
@@ -19,26 +19,18 @@
-module(couch_event_sup2).
-behavior(supervisor).
-
-export([
start_link/0,
init/1
]).
-
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, nil).
-
init(_) ->
Children = [
- {couch_event_server,
- {couch_event_server, start_link, []},
- permanent,
- 5000,
- worker,
- [couch_event_server]
- }
+ {couch_event_server, {couch_event_server, start_link, []}, permanent, 5000, worker, [
+ couch_event_server
+ ]}
],
{ok, {{one_for_one, 5, 10}, Children}}.
-
diff --git a/src/couch_index/src/couch_index.erl b/src/couch_index/src/couch_index.erl
index 83eadc706..a6b62be7c 100644
--- a/src/couch_index/src/couch_index.erl
+++ b/src/couch_index/src/couch_index.erl
@@ -26,48 +26,40 @@
-export([init/1, terminate/2, code_change/3]).
-export([handle_call/3, handle_cast/2, handle_info/2]).
-
-include_lib("couch/include/couch_db.hrl").
-
--define(CHECK_INTERVAL, 600000). % 10 minutes
+% 10 minutes
+-define(CHECK_INTERVAL, 600000).
-record(st, {
mod,
idx_state,
updater,
compactor,
- waiters=[],
- committed=true,
- shutdown=false
+ waiters = [],
+ committed = true,
+ shutdown = false
}).
-
start_link({Module0, IdxState0}) ->
[Module, IdxState] = couch_index_plugin:before_open(Module0, IdxState0),
proc_lib:start_link(?MODULE, init, [{Module, IdxState}]).
-
stop(Pid) ->
gen_server:cast(Pid, stop).
-
get_state(Pid, RequestSeq) ->
gen_server:call(Pid, {get_state, RequestSeq}, infinity).
-
get_info(Pid) ->
gen_server:call(Pid, get_info, group_info_timeout_msec()).
-
trigger_update(Pid, UpdateSeq) ->
gen_server:cast(Pid, {trigger_update, UpdateSeq}).
-
compact(Pid) ->
compact(Pid, []).
-
compact(Pid, Options) ->
{ok, CPid} = gen_server:call(Pid, compact),
case lists:member(monitor, Options) of
@@ -75,7 +67,6 @@ compact(Pid, Options) ->
false -> ok
end.
-
get_compactor_pid(Pid) ->
gen_server:call(Pid, get_compactor_pid).
@@ -96,10 +87,10 @@ init({Mod, IdxState}) ->
{ok, UPid} = couch_index_updater:start_link(self(), Mod),
{ok, CPid} = couch_index_compactor:start_link(self(), Mod),
State = #st{
- mod=Mod,
- idx_state=NewIdxState,
- updater=UPid,
- compactor=CPid
+ mod = Mod,
+ idx_state = NewIdxState,
+ updater = UPid,
+ compactor = CPid
},
Args = [
Mod:get(db_name, IdxState),
@@ -113,9 +104,8 @@ init({Mod, IdxState}) ->
proc_lib:init_ack(Other)
end.
-
terminate(Reason0, State) ->
- #st{mod=Mod, idx_state=IdxState}=State,
+ #st{mod = Mod, idx_state = IdxState} = State,
case Reason0 of
{shutdown, ddoc_updated} ->
Mod:shutdown(IdxState),
@@ -136,24 +126,24 @@ terminate(Reason0, State) ->
couch_log:debug("Closing index for db: ~s idx: ~s sig: ~p because ~r", Args),
ok.
-
handle_call({get_state, ReqSeq}, From, State) ->
#st{
- mod=Mod,
- idx_state=IdxState,
- waiters=Waiters
+ mod = Mod,
+ idx_state = IdxState,
+ waiters = Waiters
} = State,
IdxSeq = Mod:get(update_seq, IdxState),
case ReqSeq =< IdxSeq of
true ->
{reply, {ok, IdxState}, State};
- _ -> % View update required
+ % View update required
+ _ ->
couch_index_updater:run(State#st.updater, IdxState),
Waiters2 = [{From, ReqSeq} | Waiters],
- {noreply, State#st{waiters=Waiters2}, infinity}
+ {noreply, State#st{waiters = Waiters2}, infinity}
end;
handle_call(get_info, _From, State) ->
- #st{mod=Mod} = State,
+ #st{mod = Mod} = State,
IdxState = State#st.idx_state,
{ok, Info0} = Mod:get(info, IdxState),
IsUpdating = couch_index_updater:is_running(State#st.updater),
@@ -162,21 +152,23 @@ handle_call(get_info, _From, State) ->
GetCommSeq = fun(Db) -> couch_db:get_committed_update_seq(Db) end,
DbName = Mod:get(db_name, IdxState),
CommittedSeq = couch_util:with_db(DbName, GetCommSeq),
- Info = Info0 ++ [
- {updater_running, IsUpdating},
- {compact_running, IsCompacting},
- {waiting_commit, State#st.committed == false},
- {waiting_clients, length(State#st.waiters)},
- {pending_updates, max(CommittedSeq - IdxSeq, 0)}
- ],
+ Info =
+ Info0 ++
+ [
+ {updater_running, IsUpdating},
+ {compact_running, IsCompacting},
+ {waiting_commit, State#st.committed == false},
+ {waiting_clients, length(State#st.waiters)},
+ {pending_updates, max(CommittedSeq - IdxSeq, 0)}
+ ],
{reply, {ok, Info}, State};
handle_call(reset, _From, State) ->
#st{
- mod=Mod,
- idx_state=IdxState
+ mod = Mod,
+ idx_state = IdxState
} = State,
{ok, NewIdxState} = Mod:reset(IdxState),
- {reply, {ok, NewIdxState}, State#st{idx_state=NewIdxState}};
+ {reply, {ok, NewIdxState}, State#st{idx_state = NewIdxState}};
handle_call(compact, _From, State) ->
Resp = couch_index_compactor:run(State#st.compactor, State#st.idx_state),
{reply, Resp, State};
@@ -184,8 +176,8 @@ handle_call(get_compactor_pid, _From, State) ->
{reply, {ok, State#st.compactor}, State};
handle_call({compacted, NewIdxState}, _From, State) ->
#st{
- mod=Mod,
- idx_state=OldIdxState
+ mod = Mod,
+ idx_state = OldIdxState
} = State,
assert_signature_match(Mod, OldIdxState, NewIdxState),
NewSeq = Mod:get(update_seq, NewIdxState),
@@ -215,8 +207,8 @@ handle_call({compaction_failed, Reason}, _From, State) ->
handle_cast({trigger_update, UpdateSeq}, State) ->
#st{
- mod=Mod,
- idx_state=IdxState
+ mod = Mod,
+ idx_state = IdxState
} = State,
case UpdateSeq =< Mod:get(update_seq, IdxState) of
true ->
@@ -236,8 +228,8 @@ handle_cast({updated, NewIdxState}, State) ->
end;
handle_cast({new_state, NewIdxState}, State) ->
#st{
- mod=Mod,
- idx_state=OldIdxState
+ mod = Mod,
+ idx_state = OldIdxState
} = State,
OldFd = Mod:get(fd, OldIdxState),
NewFd = Mod:get(fd, NewIdxState),
@@ -257,9 +249,9 @@ handle_cast({new_state, NewIdxState}, State) ->
false -> ok
end,
{noreply, State#st{
- idx_state=NewIdxState,
- waiters=Rest,
- committed=false
+ idx_state = NewIdxState,
+ waiters = Rest,
+ committed = false
}};
false ->
Fmt = "Ignoring update from old indexer for db: ~s idx: ~s",
@@ -272,25 +264,26 @@ handle_cast({new_state, NewIdxState}, State) ->
end;
handle_cast({update_error, Error}, State) ->
send_all(State#st.waiters, Error),
- {noreply, State#st{waiters=[]}};
+ {noreply, State#st{waiters = []}};
handle_cast(stop, State) ->
{stop, normal, State};
handle_cast(delete, State) ->
- #st{mod=Mod, idx_state=IdxState} = State,
+ #st{mod = Mod, idx_state = IdxState} = State,
ok = Mod:delete(IdxState),
{stop, normal, State};
handle_cast({ddoc_updated, DDocResult}, State) ->
#st{mod = Mod, idx_state = IdxState} = State,
- Shutdown = case DDocResult of
- {not_found, deleted} ->
- true;
- {ok, DDoc} ->
- DbName = Mod:get(db_name, IdxState),
- couch_util:with_db(DbName, fun(Db) ->
- {ok, NewIdxState} = Mod:init(Db, DDoc),
- Mod:get(signature, NewIdxState) =/= Mod:get(signature, IdxState)
- end)
- end,
+ Shutdown =
+ case DDocResult of
+ {not_found, deleted} ->
+ true;
+ {ok, DDoc} ->
+ DbName = Mod:get(db_name, IdxState),
+ couch_util:with_db(DbName, fun(Db) ->
+ {ok, NewIdxState} = Mod:init(Db, DDoc),
+ Mod:get(signature, NewIdxState) =/= Mod:get(signature, IdxState)
+ end)
+ end,
case Shutdown of
true ->
{stop, {shutdown, ddoc_updated}, State#st{shutdown = true}};
@@ -319,10 +312,10 @@ handle_cast(ddoc_updated, State) ->
handle_cast(_Mesg, State) ->
{stop, unhandled_cast, State}.
-handle_info(commit, #st{committed=true}=State) ->
+handle_info(commit, #st{committed = true} = State) ->
{noreply, State};
handle_info(commit, State) ->
- #st{mod=Mod, idx_state=IdxState} = State,
+ #st{mod = Mod, idx_state = IdxState} = State,
DbName = Mod:get(db_name, IdxState),
IdxName = Mod:get(idx_name, IdxState),
GetCommSeq = fun(Db) -> couch_db:get_committed_update_seq(Db) end,
@@ -332,7 +325,7 @@ handle_info(commit, State) ->
% Commit the updates
ok = Mod:commit(IdxState),
couch_event:notify(DbName, {index_commit, IdxName}),
- {noreply, State#st{committed=true}};
+ {noreply, State#st{committed = true}};
_ ->
% We can't commit the header because the database seq that's
% fully committed to disk is still behind us. If we committed
@@ -366,25 +359,23 @@ handle_info(maybe_close, State) ->
erlang:send_after(?CHECK_INTERVAL, self(), maybe_close),
{noreply, State}
end;
-handle_info({'DOWN', _, _, _Pid, _}, #st{mod=Mod, idx_state=IdxState}=State) ->
+handle_info({'DOWN', _, _, _Pid, _}, #st{mod = Mod, idx_state = IdxState} = State) ->
Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
couch_log:debug("Index shutdown by monitor notice for db: ~s idx: ~s", Args),
catch send_all(State#st.waiters, shutdown),
- {stop, normal, State#st{waiters=[]}}.
+ {stop, normal, State#st{waiters = []}}.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-maybe_restart_updater(#st{waiters=[]}) ->
+maybe_restart_updater(#st{waiters = []}) ->
ok;
-maybe_restart_updater(#st{idx_state=IdxState}=State) ->
+maybe_restart_updater(#st{idx_state = IdxState} = State) ->
couch_index_updater:run(State#st.updater, IdxState).
-
send_all(Waiters, Reply) ->
[gen_server:reply(From, Reply) || {From, _} <- Waiters].
-
send_replies(Waiters, UpdateSeq, IdxState) ->
Pred = fun({_, S}) -> S =< UpdateSeq end,
{ToSend, Remaining} = lists:partition(Pred, Waiters),
@@ -399,9 +390,9 @@ assert_signature_match(Mod, OldIdxState, NewIdxState) ->
commit_compacted(NewIdxState, State) ->
#st{
- mod=Mod,
- idx_state=OldIdxState,
- updater=Updater
+ mod = Mod,
+ idx_state = OldIdxState,
+ updater = Updater
} = State,
{ok, NewIdxState1} = Mod:swap_compacted(OldIdxState, NewIdxState),
% Restart the indexer if it's running.
@@ -414,9 +405,9 @@ commit_compacted(NewIdxState, State) ->
false -> ok
end,
State#st{
- idx_state=NewIdxState1,
- committed=false
- }.
+ idx_state = NewIdxState1,
+ committed = false
+ }.
is_recompaction_enabled(IdxState, #st{mod = Mod}) ->
DbName = binary_to_list(Mod:get(db_name, IdxState)),
@@ -449,7 +440,6 @@ get_value(Section, Key) ->
commit_delay() ->
config:get_integer("query_server_config", "commit_freq", 5) * 1000.
-
group_info_timeout_msec() ->
Timeout = config:get("query_server_config", "group_info_timeout", "5000"),
case Timeout of
@@ -459,7 +449,6 @@ group_info_timeout_msec() ->
list_to_integer(Milliseconds)
end.
-
-ifdef(TEST).
-include_lib("couch/include/couch_eunit.hrl").
@@ -468,7 +457,7 @@ get(db_name, _, _) ->
get(idx_name, _, _) ->
<<"idx_name">>;
get(signature, _, _) ->
- <<61,237,157,230,136,93,96,201,204,17,137,186,50,249,44,135>>.
+ <<61, 237, 157, 230, 136, 93, 96, 201, 204, 17, 137, 186, 50, 249, 44, 135>>.
setup_all() ->
Ctx = test_util:start_couch(),
@@ -526,7 +515,7 @@ recompaction_configuration_tests() ->
EnabledCases = [
[undefined, undefined, undefined],
- [undefined, undefined,"enabled"],
+ [undefined, undefined, "enabled"],
[undefined, "enabled", undefined],
[undefined, "disabled", "enabled"],
[undefined, "enabled", "enabled"],
@@ -563,21 +552,26 @@ recompaction_configuration_tests() ->
?assertEqual([], AllCases -- (EnabledCases ++ DisabledCases)),
- [{Settings, fun should_not_call_recompact/2} || Settings <- DisabledCases]
- ++
- [{Settings, fun should_call_recompact/2} || Settings <- EnabledCases].
+ [{Settings, fun should_not_call_recompact/2} || Settings <- DisabledCases] ++
+ [{Settings, fun should_call_recompact/2} || Settings <- EnabledCases].
should_call_recompact(Settings, {IdxState, State}) ->
- {test_id(Settings), ?_test(begin
- ?assert(is_recompaction_enabled(IdxState, State)),
- ok
- end)}.
+ {
+ test_id(Settings),
+ ?_test(begin
+ ?assert(is_recompaction_enabled(IdxState, State)),
+ ok
+ end)
+ }.
should_not_call_recompact(Settings, {IdxState, State}) ->
- {test_id(Settings), ?_test(begin
- ?assertNot(is_recompaction_enabled(IdxState, State)),
- ok
- end)}.
+ {
+ test_id(Settings),
+ ?_test(begin
+ ?assertNot(is_recompaction_enabled(IdxState, State)),
+ ok
+ end)
+ }.
to_string(undefined) -> "undefined";
to_string(Value) -> Value.
@@ -586,7 +580,6 @@ test_id(Settings0) ->
Settings1 = [to_string(Value) || Value <- Settings0],
"[ " ++ lists:flatten(string:join(Settings1, " , ")) ++ " ]".
-
get_group_timeout_info_test_() ->
{
foreach,
@@ -598,25 +591,28 @@ get_group_timeout_info_test_() ->
]
}.
-
t_group_timeout_info_integer() ->
- ?_test(begin
- meck:expect(config, get,
+ ?_test(begin
+ meck:expect(
+ config,
+ get,
fun("query_server_config", "group_info_timeout", _) ->
- "5001"
- end),
+ "5001"
+ end
+ ),
?assertEqual(5001, group_info_timeout_msec())
end).
-
t_group_timeout_info_infinity() ->
- ?_test(begin
- meck:expect(config, get,
+ ?_test(begin
+ meck:expect(
+ config,
+ get,
fun("query_server_config", "group_info_timeout", _) ->
"infinity"
- end),
+ end
+ ),
?assertEqual(infinity, group_info_timeout_msec())
end).
-
-endif.
diff --git a/src/couch_index/src/couch_index_compactor.erl b/src/couch_index/src/couch_index_compactor.erl
index 8849cf67d..8b592d140 100644
--- a/src/couch_index/src/couch_index_compactor.erl
+++ b/src/couch_index/src/couch_index_compactor.erl
@@ -13,7 +13,6 @@
-module(couch_index_compactor).
-behaviour(gen_server).
-
%% API
-export([start_link/2, run/2, cancel/1, is_running/1, get_compacting_pid/1]).
@@ -21,29 +20,23 @@
-export([init/1, terminate/2, code_change/3]).
-export([handle_call/3, handle_cast/2, handle_info/2]).
-
-include_lib("couch/include/couch_db.hrl").
-
-record(st, {
idx,
mod,
pid
}).
-
start_link(Index, Module) ->
gen_server:start_link(?MODULE, {Index, Module}, []).
-
run(Pid, IdxState) ->
gen_server:call(Pid, {compact, IdxState}).
-
cancel(Pid) ->
gen_server:call(Pid, cancel).
-
is_running(Pid) ->
gen_server:call(Pid, is_running).
@@ -52,39 +45,35 @@ get_compacting_pid(Pid) ->
init({Index, Module}) ->
process_flag(trap_exit, true),
- {ok, #st{idx=Index, mod=Module}}.
-
+ {ok, #st{idx = Index, mod = Module}}.
terminate(_Reason, State) ->
couch_util:shutdown_sync(State#st.pid),
ok.
-
-handle_call({compact, _}, _From, #st{pid=Pid}=State) when is_pid(Pid) ->
+handle_call({compact, _}, _From, #st{pid = Pid} = State) when is_pid(Pid) ->
{reply, {ok, Pid}, State};
-handle_call({compact, IdxState}, _From, #st{idx=Idx}=State) ->
+handle_call({compact, IdxState}, _From, #st{idx = Idx} = State) ->
Pid = spawn_link(fun() -> compact(Idx, State#st.mod, IdxState) end),
- {reply, {ok, Pid}, State#st{pid=Pid}};
-handle_call(cancel, _From, #st{pid=undefined}=State) ->
+ {reply, {ok, Pid}, State#st{pid = Pid}};
+handle_call(cancel, _From, #st{pid = undefined} = State) ->
{reply, ok, State};
-handle_call(cancel, _From, #st{pid=Pid}=State) ->
+handle_call(cancel, _From, #st{pid = Pid} = State) ->
unlink(Pid),
exit(Pid, kill),
- {reply, ok, State#st{pid=undefined}};
-handle_call(get_compacting_pid, _From, #st{pid=Pid}=State) ->
+ {reply, ok, State#st{pid = undefined}};
+handle_call(get_compacting_pid, _From, #st{pid = Pid} = State) ->
{reply, {ok, Pid}, State};
-handle_call(is_running, _From, #st{pid=Pid}=State) when is_pid(Pid) ->
+handle_call(is_running, _From, #st{pid = Pid} = State) when is_pid(Pid) ->
{reply, true, State};
handle_call(is_running, _From, State) ->
{reply, false, State}.
-
handle_cast(_Mesg, State) ->
{stop, unknown_cast, State}.
-
-handle_info({'EXIT', Pid, normal}, #st{pid=Pid}=State) ->
- {noreply, State#st{pid=undefined}};
+handle_info({'EXIT', Pid, normal}, #st{pid = Pid} = State) ->
+ {noreply, State#st{pid = undefined}};
handle_info({'EXIT', Pid, Reason}, #st{pid = Pid} = State) ->
#st{idx = Idx, mod = Mod} = State,
{ok, IdxState} = gen_server:call(Idx, {compaction_failed, Reason}),
@@ -95,16 +84,14 @@ handle_info({'EXIT', Pid, Reason}, #st{pid = Pid} = State) ->
{noreply, State#st{pid = undefined}};
handle_info({'EXIT', _Pid, normal}, State) ->
{noreply, State};
-handle_info({'EXIT', Pid, _Reason}, #st{idx=Pid}=State) ->
+handle_info({'EXIT', Pid, _Reason}, #st{idx = Pid} = State) ->
{stop, normal, State};
handle_info(_Mesg, State) ->
{stop, unknown_info, State}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
compact(Parent, Mod, IdxState) ->
DbName = Mod:get(db_name, IdxState),
%% We use with_db here to make sure we hold db open
diff --git a/src/couch_index/src/couch_index_plugin_couch_db.erl b/src/couch_index/src/couch_index_plugin_couch_db.erl
index 0af22e396..b90fa6cf2 100644
--- a/src/couch_index/src/couch_index_plugin_couch_db.erl
+++ b/src/couch_index/src/couch_index_plugin_couch_db.erl
@@ -17,10 +17,8 @@
on_compact/2
]).
-
is_valid_purge_client(DbName, Props) ->
couch_mrview_index:verify_index_exists(DbName, Props).
-
on_compact(DbName, DDocs) ->
couch_mrview_index:ensure_local_purge_docs(DbName, DDocs).
diff --git a/src/couch_index/src/couch_index_server.erl b/src/couch_index/src/couch_index_server.erl
index 6bebff2d8..77f91cc5b 100644
--- a/src/couch_index/src/couch_index_server.erl
+++ b/src/couch_index/src/couch_index_server.erl
@@ -40,37 +40,37 @@
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
validate(Db, DDoc) ->
LoadModFun = fun
({ModNameList, "true"}) ->
try
[list_to_existing_atom(ModNameList)]
- catch error:badarg ->
- []
+ catch
+ error:badarg ->
+ []
end;
({_ModNameList, _Enabled}) ->
[]
end,
- ValidateFun = fun
- (ModName) ->
- ModName:validate(Db, DDoc)
+ ValidateFun = fun(ModName) ->
+ ModName:validate(Db, DDoc)
end,
EnabledIndexers = lists:flatmap(LoadModFun, config:get("indexers")),
lists:foreach(ValidateFun, EnabledIndexers).
-
-get_index(Module, <<"shards/", _/binary>> = DbName, DDoc)
- when is_record(DDoc, doc) ->
+get_index(Module, <<"shards/", _/binary>> = DbName, DDoc) when
+ is_record(DDoc, doc)
+->
get_index(Module, DbName, DDoc, nil);
get_index(Module, <<"shards/", _/binary>> = DbName, DDoc) ->
{Pid, Ref} = spawn_monitor(fun() ->
exit(fabric:open_doc(mem3:dbname(DbName), DDoc, [ejson_body, ?ADMIN_CTX]))
end),
- receive {'DOWN', Ref, process, Pid, {ok, Doc}} ->
- get_index(Module, DbName, Doc, nil);
- {'DOWN', Ref, process, Pid, Error} ->
- Error
+ receive
+ {'DOWN', Ref, process, Pid, {ok, Doc}} ->
+ get_index(Module, DbName, Doc, nil);
+ {'DOWN', Ref, process, Pid, Error} ->
+ Error
after 61000 ->
erlang:demonitor(Ref, [flush]),
{error, timeout}
@@ -80,7 +80,6 @@ get_index(Module, DbName, DDoc) when is_binary(DbName) ->
get_index(Module, Db, DDoc) ->
get_index(Module, couch_db:name(Db), DDoc).
-
get_index(Module, DbName, DDoc, Fun) when is_binary(DbName) ->
couch_util:with_db(DbName, fun(Db) ->
get_index(Module, Db, DDoc, Fun)
@@ -99,7 +98,6 @@ get_index(Module, Db, DDoc, _Fun) ->
{ok, InitState} = Module:init(Db, DDoc),
get_index(Module, InitState).
-
get_index(Module, IdxState) ->
DbName = Module:get(db_name, IdxState),
Sig = Module:get(signature, IdxState),
@@ -110,7 +108,8 @@ get_index(Module, IdxState) ->
[] ->
Args = [Pid, DbName, DDocId, Sig],
gen_server:cast(?MODULE, {add_to_ets, Args});
- _ -> ok
+ _ ->
+ ok
end,
{ok, Pid};
_ ->
@@ -118,7 +117,6 @@ get_index(Module, IdxState) ->
gen_server:call(?MODULE, {get_index, Args}, infinity)
end.
-
init([]) ->
process_flag(trap_exit, true),
ok = config:listen_for_changes(?MODULE, couch_index_util:root_dir()),
@@ -128,16 +126,14 @@ init([]) ->
couch_event:link_listener(?MODULE, handle_db_event, nil, [all_dbs]),
RootDir = couch_index_util:root_dir(),
couch_file:init_delete_dir(RootDir),
- {ok, #st{root_dir=RootDir}}.
-
+ {ok, #st{root_dir = RootDir}}.
terminate(_Reason, _State) ->
Pids = [Pid || {Pid, _} <- ets:tab2list(?BY_PID)],
lists:map(fun couch_util:shutdown_sync/1, Pids),
ok.
-
-handle_call({get_index, {_Mod, _IdxState, DbName, Sig}=Args}, From, State) ->
+handle_call({get_index, {_Mod, _IdxState, DbName, Sig} = Args}, From, State) ->
case ets:lookup(?BY_SIG, {DbName, Sig}) of
[] ->
spawn_link(fun() -> new_index(Args) end),
@@ -164,7 +160,6 @@ handle_call({reset_indexes, DbName}, _From, State) ->
reset_indexes(DbName, State#st.root_dir),
{reply, ok, State}.
-
handle_cast({reset_indexes, DbName}, State) ->
reset_indexes(DbName, State#st.root_dir),
{noreply, State};
@@ -173,7 +168,8 @@ handle_cast({add_to_ets, [Pid, DbName, DDocId, Sig]}, State) ->
case ets:lookup(?BY_PID, Pid) of
[{Pid, {DbName, Sig}}] when is_pid(Pid) ->
ets:insert(?BY_DB, {DbName, {DDocId, Sig}});
- _ -> ok
+ _ ->
+ ok
end,
{noreply, State};
handle_cast({rem_from_ets, [DbName, DDocId, Sig]}, State) ->
@@ -183,8 +179,11 @@ handle_cast({rem_from_ets, [DbName, DDocId, Sig]}, State) ->
handle_info({'EXIT', Pid, Reason}, Server) ->
case ets:lookup(?BY_PID, Pid) of
[{Pid, {DbName, Sig}}] ->
- DDocIds = [DDocId || {_, {DDocId, _}}
- <- ets:match_object(?BY_DB, {DbName, {'$1', Sig}})],
+ DDocIds = [
+ DDocId
+ || {_, {DDocId, _}} <-
+ ets:match_object(?BY_DB, {DbName, {'$1', Sig}})
+ ],
rem_from_ets(DbName, Sig, DDocIds, Pid);
[] when Reason /= normal ->
exit(Reason);
@@ -199,11 +198,9 @@ handle_info(Msg, State) ->
couch_log:warning("~p did not expect ~p", [?MODULE, Msg]),
{noreply, State}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
handle_config_change("couchdb", "index_dir", RootDir, _, RootDir) ->
{ok, RootDir};
handle_config_change("couchdb", "view_index_dir", RootDir, _, RootDir) ->
@@ -228,19 +225,24 @@ new_index({Mod, IdxState, DbName, Sig}) ->
case couch_index:start_link({Mod, IdxState}) of
{ok, Pid} ->
ok = gen_server:call(
- ?MODULE, {async_open, {DbName, DDocId, Sig}, {ok, Pid}}),
+ ?MODULE, {async_open, {DbName, DDocId, Sig}, {ok, Pid}}
+ ),
unlink(Pid);
Error ->
ok = gen_server:call(
- ?MODULE, {async_error, {DbName, DDocId, Sig}, Error})
+ ?MODULE, {async_error, {DbName, DDocId, Sig}, Error}
+ )
end.
-
reset_indexes(DbName, Root) ->
% shutdown all the updaters and clear the files, the db got changed
- SigDDocIds = lists:foldl(fun({_, {DDocId, Sig}}, DDict) ->
- dict:append(Sig, DDocId, DDict)
- end, dict:new(), ets:lookup(?BY_DB, DbName)),
+ SigDDocIds = lists:foldl(
+ fun({_, {DDocId, Sig}}, DDict) ->
+ dict:append(Sig, DDocId, DDict)
+ end,
+ dict:new(),
+ ets:lookup(?BY_DB, DbName)
+ ),
Fun = fun({Sig, DDocIds}) ->
[{_, Pid}] = ets:lookup(?BY_SIG, {DbName, Sig}),
unlink(Pid),
@@ -248,9 +250,8 @@ reset_indexes(DbName, Root) ->
receive
{'EXIT', Pid, _} ->
ok
- after
- 0 ->
- ok
+ after 0 ->
+ ok
end,
rem_from_ets(DbName, Sig, DDocIds, Pid)
end,
@@ -258,20 +259,20 @@ reset_indexes(DbName, Root) ->
Path = couch_index_util:index_dir("", DbName),
couch_file:nuke_dir(Root, Path).
-
add_to_ets(DbName, Sig, DDocId, Pid) ->
ets:insert(?BY_SIG, {{DbName, Sig}, Pid}),
ets:insert(?BY_PID, {Pid, {DbName, Sig}}),
ets:insert(?BY_DB, {DbName, {DDocId, Sig}}).
-
rem_from_ets(DbName, Sig, DDocIds, Pid) ->
ets:delete(?BY_SIG, {DbName, Sig}),
ets:delete(?BY_PID, Pid),
- lists:foreach(fun(DDocId) ->
- ets:delete_object(?BY_DB, {DbName, {DDocId, Sig}})
- end, DDocIds).
-
+ lists:foreach(
+ fun(DDocId) ->
+ ets:delete_object(?BY_DB, {DbName, {DDocId, Sig}})
+ end,
+ DDocIds
+ ).
handle_db_event(DbName, created, St) ->
gen_server:cast(?MODULE, {reset_indexes, DbName}),
@@ -279,44 +280,57 @@ handle_db_event(DbName, created, St) ->
handle_db_event(DbName, deleted, St) ->
gen_server:cast(?MODULE, {reset_indexes, DbName}),
{ok, St};
-handle_db_event(<<"shards/", _/binary>> = DbName, {ddoc_updated,
- DDocId}, St) ->
+handle_db_event(<<"shards/", _/binary>> = DbName, {ddoc_updated, DDocId}, St) ->
DDocResult = couch_util:with_db(DbName, fun(Db) ->
couch_db:open_doc(Db, DDocId, [ejson_body, ?ADMIN_CTX])
end),
- LocalShards = try mem3:local_shards(mem3:dbname(DbName))
- catch error:database_does_not_exist ->
- []
- end,
+ LocalShards =
+ try
+ mem3:local_shards(mem3:dbname(DbName))
+ catch
+ error:database_does_not_exist ->
+ []
+ end,
DbShards = [mem3:name(Sh) || Sh <- LocalShards],
- lists:foreach(fun(DbShard) ->
- lists:foreach(fun({_DbShard, {_DDocId, Sig}}) ->
- % check if there are other ddocs with the same Sig for the same db
- SigDDocs = ets:match_object(?BY_DB, {DbShard, {'$1', Sig}}),
- if length(SigDDocs) > 1 ->
- % remove records from ?BY_DB for this DDoc
- Args = [DbShard, DDocId, Sig],
- gen_server:cast(?MODULE, {rem_from_ets, Args});
- true ->
- % single DDoc with this Sig - close couch_index processes
- case ets:lookup(?BY_SIG, {DbShard, Sig}) of
- [{_, IndexPid}] -> (catch
- gen_server:cast(IndexPid, {ddoc_updated, DDocResult}));
- [] -> []
- end
- end
- end, ets:match_object(?BY_DB, {DbShard, {DDocId, '$1'}}))
- end, DbShards),
+ lists:foreach(
+ fun(DbShard) ->
+ lists:foreach(
+ fun({_DbShard, {_DDocId, Sig}}) ->
+ % check if there are other ddocs with the same Sig for the same db
+ SigDDocs = ets:match_object(?BY_DB, {DbShard, {'$1', Sig}}),
+ if
+ length(SigDDocs) > 1 ->
+ % remove records from ?BY_DB for this DDoc
+ Args = [DbShard, DDocId, Sig],
+ gen_server:cast(?MODULE, {rem_from_ets, Args});
+ true ->
+ % single DDoc with this Sig - close couch_index processes
+ case ets:lookup(?BY_SIG, {DbShard, Sig}) of
+ [{_, IndexPid}] ->
+ (catch gen_server:cast(IndexPid, {ddoc_updated, DDocResult}));
+ [] ->
+ []
+ end
+ end
+ end,
+ ets:match_object(?BY_DB, {DbShard, {DDocId, '$1'}})
+ )
+ end,
+ DbShards
+ ),
{ok, St};
handle_db_event(DbName, {ddoc_updated, DDocId}, St) ->
- lists:foreach(fun({_DbName, {_DDocId, Sig}}) ->
- case ets:lookup(?BY_SIG, {DbName, Sig}) of
- [{_, IndexPid}] ->
- (catch gen_server:cast(IndexPid, ddoc_updated));
- [] ->
- ok
- end
- end, ets:match_object(?BY_DB, {DbName, {DDocId, '$1'}})),
+ lists:foreach(
+ fun({_DbName, {_DDocId, Sig}}) ->
+ case ets:lookup(?BY_SIG, {DbName, Sig}) of
+ [{_, IndexPid}] ->
+ (catch gen_server:cast(IndexPid, ddoc_updated));
+ [] ->
+ ok
+ end
+ end,
+ ets:match_object(?BY_DB, {DbName, {DDocId, '$1'}})
+ ),
{ok, St};
handle_db_event(_DbName, _Event, St) ->
{ok, St}.
diff --git a/src/couch_index/src/couch_index_sup.erl b/src/couch_index/src/couch_index_sup.erl
index 2d4f671e2..eea4cc3ab 100644
--- a/src/couch_index/src/couch_index_sup.erl
+++ b/src/couch_index/src/couch_index_sup.erl
@@ -16,9 +16,8 @@
-export([start_link/1]).
-
start_link(Args) ->
- supervisor:start_link({local,?MODULE}, ?MODULE, Args).
+ supervisor:start_link({local, ?MODULE}, ?MODULE, Args).
init([]) ->
{ok, {{one_for_one, 3, 10}, couch_epi:register_service(couch_index_epi, [])}}.
diff --git a/src/couch_index/src/couch_index_updater.erl b/src/couch_index/src/couch_index_updater.erl
index fb15db052..fe2150505 100644
--- a/src/couch_index/src/couch_index_updater.erl
+++ b/src/couch_index/src/couch_index_updater.erl
@@ -13,7 +13,6 @@
-module(couch_index_updater).
-behaviour(gen_server).
-
%% API
-export([start_link/2, run/2, is_running/1, update/2, restart/2]).
@@ -29,48 +28,40 @@
-record(st, {
idx,
mod,
- pid=nil
+ pid = nil
}).
-
start_link(Index, Module) ->
gen_server:start_link(?MODULE, {Index, Module}, []).
-
run(Pid, IdxState) ->
gen_server:call(Pid, {update, IdxState}).
-
is_running(Pid) ->
gen_server:call(Pid, is_running).
-
update(Mod, State) ->
update(nil, Mod, State).
-
restart(Pid, IdxState) ->
gen_server:call(Pid, {restart, IdxState}).
-
init({Index, Module}) ->
process_flag(trap_exit, true),
- {ok, #st{idx=Index, mod=Module}}.
-
+ {ok, #st{idx = Index, mod = Module}}.
terminate(_Reason, State) ->
couch_util:shutdown_sync(State#st.pid),
ok.
-
-handle_call({update, _IdxState}, _From, #st{pid=Pid}=State) when is_pid(Pid) ->
+handle_call({update, _IdxState}, _From, #st{pid = Pid} = State) when is_pid(Pid) ->
{reply, ok, State};
-handle_call({update, IdxState}, _From, #st{idx=Idx, mod=Mod}=State) ->
+handle_call({update, IdxState}, _From, #st{idx = Idx, mod = Mod} = State) ->
Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
couch_log:info("Starting index update for db: ~s idx: ~s", Args),
Pid = spawn_link(?MODULE, update, [Idx, Mod, IdxState]),
- {reply, ok, State#st{pid=Pid}};
-handle_call({restart, IdxState}, _From, #st{idx=Idx, mod=Mod}=State) ->
+ {reply, ok, State#st{pid = Pid}};
+handle_call({restart, IdxState}, _From, #st{idx = Idx, mod = Mod} = State) ->
Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
couch_log:info("Restarting index update for db: ~s idx: ~s", Args),
Pid = State#st.pid,
@@ -86,46 +77,42 @@ handle_call({restart, IdxState}, _From, #st{idx=Idx, mod=Mod}=State) ->
ok
end,
NewPid = spawn_link(?MODULE, update, [Idx, State#st.mod, IdxState]),
- {reply, ok, State#st{pid=NewPid}};
-handle_call(is_running, _From, #st{pid=Pid}=State) when is_pid(Pid) ->
+ {reply, ok, State#st{pid = NewPid}};
+handle_call(is_running, _From, #st{pid = Pid} = State) when is_pid(Pid) ->
{reply, true, State};
handle_call(is_running, _From, State) ->
{reply, false, State}.
-
handle_cast(_Mesg, State) ->
{stop, unknown_cast, State}.
-
-handle_info({'EXIT', _, {updated, Pid, IdxState}}, #st{pid=Pid}=State) ->
+handle_info({'EXIT', _, {updated, Pid, IdxState}}, #st{pid = Pid} = State) ->
Mod = State#st.mod,
Args = [Mod:get(db_name, IdxState), Mod:get(idx_name, IdxState)],
couch_log:info("Index update finished for db: ~s idx: ~s", Args),
ok = gen_server:cast(State#st.idx, {updated, IdxState}),
- {noreply, State#st{pid=undefined}};
-handle_info({'EXIT', _, {reset, Pid}}, #st{idx=Idx, pid=Pid}=State) ->
+ {noreply, State#st{pid = undefined}};
+handle_info({'EXIT', _, {reset, Pid}}, #st{idx = Idx, pid = Pid} = State) ->
{ok, NewIdxState} = gen_server:call(State#st.idx, reset),
Pid2 = spawn_link(?MODULE, update, [Idx, State#st.mod, NewIdxState]),
- {noreply, State#st{pid=Pid2}};
-handle_info({'EXIT', Pid, normal}, #st{pid=Pid}=State) ->
- {noreply, State#st{pid=undefined}};
+ {noreply, State#st{pid = Pid2}};
+handle_info({'EXIT', Pid, normal}, #st{pid = Pid} = State) ->
+ {noreply, State#st{pid = undefined}};
handle_info({'EXIT', Pid, {{nocatch, Error}, _Trace}}, State) ->
handle_info({'EXIT', Pid, Error}, State);
-handle_info({'EXIT', Pid, Error}, #st{pid=Pid}=State) ->
+handle_info({'EXIT', Pid, Error}, #st{pid = Pid} = State) ->
ok = gen_server:cast(State#st.idx, {update_error, Error}),
- {noreply, State#st{pid=undefined}};
-handle_info({'EXIT', Pid, _Reason}, #st{idx=Pid}=State) ->
+ {noreply, State#st{pid = undefined}};
+handle_info({'EXIT', Pid, _Reason}, #st{idx = Pid} = State) ->
{stop, normal, State};
handle_info({'EXIT', _Pid, normal}, State) ->
{noreply, State};
handle_info(_Mesg, State) ->
{stop, unknown_info, State}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
update(Idx, Mod, IdxState) ->
DbName = Mod:get(db_name, IdxState),
IndexName = Mod:get(idx_name, IdxState),
@@ -134,10 +121,11 @@ update(Idx, Mod, IdxState) ->
UpdateOpts = Mod:get(update_options, IdxState),
CommittedOnly = lists:member(committed_only, UpdateOpts),
IncludeDesign = lists:member(include_design, UpdateOpts),
- DocOpts = case lists:member(local_seq, UpdateOpts) of
- true -> [conflicts, deleted_conflicts, local_seq];
- _ -> [conflicts, deleted_conflicts]
- end,
+ DocOpts =
+ case lists:member(local_seq, UpdateOpts) of
+ true -> [conflicts, deleted_conflicts, local_seq];
+ _ -> [conflicts, deleted_conflicts]
+ end,
couch_util:with_db(DbName, fun(Db) ->
DbUpdateSeq = couch_db:get_update_seq(Db),
@@ -149,14 +137,14 @@ update(Idx, Mod, IdxState) ->
{ok, PurgedIdxState} = purge_index(Db, Mod, IdxState),
GetSeq = fun
- (#full_doc_info{update_seq=Seq}) -> Seq;
- (#doc_info{high_seq=Seq}) -> Seq
+ (#full_doc_info{update_seq = Seq}) -> Seq;
+ (#doc_info{high_seq = Seq}) -> Seq
end,
GetInfo = fun
- (#full_doc_info{id=Id, update_seq=Seq, deleted=Del}=FDI) ->
+ (#full_doc_info{id = Id, update_seq = Seq, deleted = Del} = FDI) ->
{Id, Seq, Del, couch_doc:to_doc_info(FDI)};
- (#doc_info{id=Id, high_seq=Seq, revs=[RI|_]}=DI) ->
+ (#doc_info{id = Id, high_seq = Seq, revs = [RI | _]} = DI) ->
{Id, Seq, RI#rev_info.deleted, DI}
end,
@@ -167,7 +155,7 @@ update(Idx, Mod, IdxState) ->
{false, <<"_design/", _/binary>>} ->
{nil, Seq};
_ when Deleted ->
- {#doc{id=DocId, deleted=true}, Seq};
+ {#doc{id = DocId, deleted = true}, Seq};
_ ->
{ok, Doc} = couch_db:open_doc_int(Db, DocInfo, DocOpts),
{Doc, Seq}
@@ -186,11 +174,11 @@ update(Idx, Mod, IdxState) ->
end
end,
{ok, InitIdxState} = Mod:start_update(
- Idx,
- PurgedIdxState,
- TotalChanges,
- NumPurgeChanges
- ),
+ Idx,
+ PurgedIdxState,
+ TotalChanges,
+ NumPurgeChanges
+ ),
Acc0 = {InitIdxState, true},
{ok, Acc} = couch_db:fold_changes(Db, CurrSeq, Proc, Acc0, []),
@@ -198,41 +186,45 @@ update(Idx, Mod, IdxState) ->
% If we didn't bail due to hitting the last committed seq we need
% to send our last update_seq through.
- {ok, LastIdxSt} = case SendLast of
- true ->
- Mod:process_doc(nil, DbUpdateSeq, ProcIdxSt);
- _ ->
- {ok, ProcIdxSt}
- end,
+ {ok, LastIdxSt} =
+ case SendLast of
+ true ->
+ Mod:process_doc(nil, DbUpdateSeq, ProcIdxSt);
+ _ ->
+ {ok, ProcIdxSt}
+ end,
{ok, FinalIdxState} = Mod:finish_update(LastIdxSt),
exit({updated, self(), FinalIdxState})
end).
-
purge_index(Db, Mod, IdxState) ->
DbPurgeSeq = couch_db:get_purge_seq(Db),
IdxPurgeSeq = Mod:get(purge_seq, IdxState),
- if IdxPurgeSeq == DbPurgeSeq -> {ok, IdxState}; true ->
- FoldFun = fun({PurgeSeq, _UUId, Id, Revs}, Acc) ->
- Mod:purge(Db, PurgeSeq, [{Id, Revs}], Acc)
- end,
- {ok, NewStateAcc} = try
- couch_db:fold_purge_infos(
- Db,
- IdxPurgeSeq,
- FoldFun,
- IdxState,
- []
- )
- catch error:{invalid_start_purge_seq, _} ->
- exit({reset, self()})
- end,
- Mod:update_local_purge_doc(Db, NewStateAcc),
- {ok, NewStateAcc}
+ if
+ IdxPurgeSeq == DbPurgeSeq ->
+ {ok, IdxState};
+ true ->
+ FoldFun = fun({PurgeSeq, _UUId, Id, Revs}, Acc) ->
+ Mod:purge(Db, PurgeSeq, [{Id, Revs}], Acc)
+ end,
+ {ok, NewStateAcc} =
+ try
+ couch_db:fold_purge_infos(
+ Db,
+ IdxPurgeSeq,
+ FoldFun,
+ IdxState,
+ []
+ )
+ catch
+ error:{invalid_start_purge_seq, _} ->
+ exit({reset, self()})
+ end,
+ Mod:update_local_purge_doc(Db, NewStateAcc),
+ {ok, NewStateAcc}
end.
-
count_pending_purged_docs_since(Db, Mod, IdxState) ->
DbPurgeSeq = couch_db:get_purge_seq(Db),
IdxPurgeSeq = Mod:get(purge_seq, IdxState),
diff --git a/src/couch_index/src/couch_index_util.erl b/src/couch_index/src/couch_index_util.erl
index dcb33b5b0..3a7d283bf 100644
--- a/src/couch_index/src/couch_index_util.erl
+++ b/src/couch_index/src/couch_index_util.erl
@@ -17,10 +17,8 @@
-include_lib("couch/include/couch_db.hrl").
-
root_dir() ->
- config:get("couchdb", "view_index_dir").
-
+ config:get("couchdb", "view_index_dir").
index_dir(Module, DbName) when is_binary(DbName) ->
DbDir = "." ++ binary_to_list(DbName) ++ "_design",
@@ -28,33 +26,32 @@ index_dir(Module, DbName) when is_binary(DbName) ->
index_dir(Module, Db) ->
index_dir(Module, couch_db:name(Db)).
-
index_file(Module, DbName, FileName) ->
filename:join(index_dir(Module, DbName), FileName).
-
-load_doc(Db, #doc_info{}=DI, Opts) ->
+load_doc(Db, #doc_info{} = DI, Opts) ->
Deleted = lists:member(deleted, Opts),
case (catch couch_db:open_doc(Db, DI, Opts)) of
- {ok, #doc{deleted=false}=Doc} -> Doc;
- {ok, #doc{deleted=true}=Doc} when Deleted -> Doc;
+ {ok, #doc{deleted = false} = Doc} -> Doc;
+ {ok, #doc{deleted = true} = Doc} when Deleted -> Doc;
_Else -> null
end;
load_doc(Db, {DocId, Rev}, Opts) ->
case (catch load_doc(Db, DocId, Rev, Opts)) of
- #doc{deleted=false} = Doc -> Doc;
+ #doc{deleted = false} = Doc -> Doc;
_ -> null
end.
-
load_doc(Db, DocId, Rev, Options) ->
case Rev of
- nil -> % open most recent rev
+ % open most recent rev
+ nil ->
case (catch couch_db:open_doc(Db, DocId, Options)) of
{ok, Doc} -> Doc;
_Error -> null
end;
- _ -> % open a specific rev (deletions come back as stubs)
+ % open a specific rev (deletions come back as stubs)
+ _ ->
case (catch couch_db:open_doc_revs(Db, DocId, [Rev], Options)) of
{ok, [{ok, Doc}]} -> Doc;
{ok, [{{not_found, missing}, Rev}]} -> null;
@@ -62,17 +59,16 @@ load_doc(Db, DocId, Rev, Options) ->
end
end.
-
sort_lib({Lib}) ->
sort_lib(Lib, []).
sort_lib([], LAcc) ->
lists:keysort(1, LAcc);
-sort_lib([{LName, {LObj}}|Rest], LAcc) ->
- LSorted = sort_lib(LObj, []), % descend into nested object
- sort_lib(Rest, [{LName, LSorted}|LAcc]);
-sort_lib([{LName, LCode}|Rest], LAcc) ->
- sort_lib(Rest, [{LName, LCode}|LAcc]).
-
+sort_lib([{LName, {LObj}} | Rest], LAcc) ->
+ % descend into nested object
+ LSorted = sort_lib(LObj, []),
+ sort_lib(Rest, [{LName, LSorted} | LAcc]);
+sort_lib([{LName, LCode} | Rest], LAcc) ->
+ sort_lib(Rest, [{LName, LCode} | LAcc]).
hexsig(Sig) ->
couch_util:to_hex(binary_to_list(Sig)).
diff --git a/src/couch_index/test/eunit/couch_index_compaction_tests.erl b/src/couch_index/test/eunit/couch_index_compaction_tests.erl
index ab493a969..fee04fd9c 100644
--- a/src/couch_index/test/eunit/couch_index_compaction_tests.erl
+++ b/src/couch_index/test/eunit/couch_index_compaction_tests.erl
@@ -17,7 +17,6 @@
-define(WAIT_TIMEOUT, 1000).
-
setup_all() ->
Ctx = test_util:start_couch(),
meck:new([test_index], [non_strict]),
@@ -41,8 +40,13 @@ fake_index(DbName) ->
ok = meck:expect(test_index, open, fun(_Db, State) ->
{ok, State}
end),
- ok = meck:expect(test_index, compact, ['_', '_', '_'],
- meck:seq([{ok, 9}, {ok, 10}])), %% to trigger recompaction
+ ok = meck:expect(
+ test_index,
+ compact,
+ ['_', '_', '_'],
+ %% to trigger recompaction
+ meck:seq([{ok, 9}, {ok, 10}])
+ ),
ok = meck:expect(test_index, commit, ['_'], ok),
ok = meck:expect(test_index, get, fun
(db_name, _) ->
@@ -50,7 +54,7 @@ fake_index(DbName) ->
(idx_name, _) ->
<<"idx_name">>;
(signature, _) ->
- <<61,237,157,230,136,93,96,201,204,17,137,186,50,249,44,135>>;
+ <<61, 237, 157, 230, 136, 93, 96, 201, 204, 17, 137, 186, 50, 249, 44, 135>>;
(update_seq, Seq) ->
Seq
end),
@@ -80,7 +84,6 @@ compaction_test_() ->
}
}.
-
hold_db_for_recompaction({Db, Idx}) ->
?_test(begin
?assertNot(is_opened(Db)),
@@ -105,12 +108,15 @@ hold_db_for_recompaction({Db, Idx}) ->
end).
wait_db_close(Db) ->
- test_util:wait(fun() ->
- case is_opened(Db) of
- false -> ok;
- true -> wait
- end
- end, ?WAIT_TIMEOUT).
+ test_util:wait(
+ fun() ->
+ case is_opened(Db) of
+ false -> ok;
+ true -> wait
+ end
+ end,
+ ?WAIT_TIMEOUT
+ ).
is_opened(Db) ->
Monitors = [M || M <- couch_db:monitored_by(Db), M =/= self()],
diff --git a/src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl b/src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl
index 0e23adf91..7bee8baae 100644
--- a/src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl
+++ b/src/couch_index/test/eunit/couch_index_ddoc_updated_tests.erl
@@ -15,7 +15,6 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
start() ->
fake_index(),
Ctx = test_util:start_couch([mem3, fabric]),
@@ -23,56 +22,68 @@ start() ->
ok = fabric:create_db(DbName, [?ADMIN_CTX]),
{Ctx, DbName}.
-
stop({Ctx, DbName}) ->
meck:unload(test_index),
ok = fabric:delete_db(DbName, [?ADMIN_CTX]),
DbDir = config:get("couchdb", "database_dir", "."),
WaitFun = fun() ->
- filelib:fold_files(DbDir, <<".*", DbName/binary, "\.[0-9]+.*">>,
- true, fun(_F, _A) -> wait end, ok)
+ filelib:fold_files(
+ DbDir,
+ <<".*", DbName/binary, "\.[0-9]+.*">>,
+ true,
+ fun(_F, _A) -> wait end,
+ ok
+ )
end,
ok = test_util:wait(WaitFun),
test_util:stop_couch(Ctx),
ok.
-
ddoc_update_test_() ->
{
"Check ddoc update actions",
{
setup,
- fun start/0, fun stop/1,
+ fun start/0,
+ fun stop/1,
fun check_all_indexers_exit_on_ddoc_change/1
}
}.
-
check_all_indexers_exit_on_ddoc_change({_Ctx, DbName}) ->
?_test(begin
- [DbShard1 | RestDbShards] = lists:map(fun(Sh) ->
- {ok, ShardDb} = couch_db:open(mem3:name(Sh), []),
- ShardDb
- end, mem3:local_shards(mem3:dbname(DbName))),
+ [DbShard1 | RestDbShards] = lists:map(
+ fun(Sh) ->
+ {ok, ShardDb} = couch_db:open(mem3:name(Sh), []),
+ ShardDb
+ end,
+ mem3:local_shards(mem3:dbname(DbName))
+ ),
% create a DDoc on Db1
DDocID = <<"idx_name">>,
- DDocJson = couch_doc:from_json_obj({[
- {<<"_id">>, DDocID},
- {<<"value">>, 1}
- ]}),
+ DDocJson = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DDocID},
+ {<<"value">>, 1}
+ ]}
+ ),
{ok, _Rev} = couch_db:update_doc(DbShard1, DDocJson, []),
{ok, DbShard} = couch_db:reopen(DbShard1),
{ok, DDoc} = couch_db:open_doc(
- DbShard, DDocID, [ejson_body, ?ADMIN_CTX]),
+ DbShard, DDocID, [ejson_body, ?ADMIN_CTX]
+ ),
DbShards = [DbShard | RestDbShards],
N = length(DbShards),
% run couch_index process for each shard database
ok = meck:reset(test_index),
- lists:foreach(fun(ShardDb) ->
- couch_index_server:get_index(test_index, ShardDb, DDoc)
- end, DbShards),
+ lists:foreach(
+ fun(ShardDb) ->
+ couch_index_server:get_index(test_index, ShardDb, DDoc)
+ end,
+ DbShards
+ ),
IndexesBefore = get_indexes_by_ddoc(DDocID, N),
?assertEqual(N, length(IndexesBefore)),
@@ -81,17 +92,20 @@ check_all_indexers_exit_on_ddoc_change({_Ctx, DbName}) ->
?assertEqual(N, length(AliveBefore)),
% update ddoc
- DDocJson2 = couch_doc:from_json_obj({[
- {<<"_id">>, DDocID},
- {<<"value">>, 2},
- {<<"_rev">>, couch_doc:rev_to_str(DDoc#doc.revs)}
- ]}),
+ DDocJson2 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DDocID},
+ {<<"value">>, 2},
+ {<<"_rev">>, couch_doc:rev_to_str(DDoc#doc.revs)}
+ ]}
+ ),
{ok, _} = couch_db:update_doc(DbShard, DDocJson2, []),
% assert that all index processes exit after ddoc updated
ok = meck:reset(test_index),
couch_index_server:handle_db_event(
- couch_db:name(DbShard), {ddoc_updated, DDocID}, {st, ""}),
+ couch_db:name(DbShard), {ddoc_updated, DDocID}, {st, ""}
+ ),
ok = meck:wait(N, test_index, init, ['_', '_'], 5000),
IndexesAfter = get_indexes_by_ddoc(DDocID, 0),
@@ -103,7 +117,6 @@ check_all_indexers_exit_on_ddoc_change({_Ctx, DbName}) ->
ok
end).
-
fake_index() ->
ok = meck:new([test_index], [non_strict]),
ok = meck:expect(test_index, init, fun(Db, DDoc) ->
@@ -124,11 +137,11 @@ fake_index() ->
end),
ok = meck:expect(test_index, shutdown, ['_'], ok).
-
get_indexes_by_ddoc(DDocID, N) ->
Indexes = test_util:wait(fun() ->
Indxs = ets:match_object(
- couchdb_indexes_by_db, {'$1', {DDocID, '$2'}}),
+ couchdb_indexes_by_db, {'$1', {DDocID, '$2'}}
+ ),
case length(Indxs) == N of
true ->
Indxs;
@@ -136,10 +149,13 @@ get_indexes_by_ddoc(DDocID, N) ->
wait
end
end),
- lists:foldl(fun({DbName, {_DDocID, Sig}}, Acc) ->
- case ets:lookup(couchdb_indexes_by_sig, {DbName, Sig}) of
- [{_, Pid}] -> [Pid|Acc];
- _ -> Acc
- end
- end, [], Indexes).
-
+ lists:foldl(
+ fun({DbName, {_DDocID, Sig}}, Acc) ->
+ case ets:lookup(couchdb_indexes_by_sig, {DbName, Sig}) of
+ [{_, Pid}] -> [Pid | Acc];
+ _ -> Acc
+ end
+ end,
+ [],
+ Indexes
+ ).
diff --git a/src/couch_log/src/couch_log.erl b/src/couch_log/src/couch_log.erl
index a8dc5d48d..b8a1ca4bd 100644
--- a/src/couch_log/src/couch_log.erl
+++ b/src/couch_log/src/couch_log.erl
@@ -12,7 +12,6 @@
-module(couch_log).
-
-export([
debug/2,
info/2,
@@ -26,44 +25,34 @@
set_level/1
]).
-
-spec debug(string(), list()) -> ok.
debug(Fmt, Args) -> log(debug, Fmt, Args).
-
-spec info(string(), list()) -> ok.
info(Fmt, Args) -> log(info, Fmt, Args).
-
-spec notice(string(), list()) -> ok.
notice(Fmt, Args) -> log(notice, Fmt, Args).
-
-spec warning(string(), list()) -> ok.
warning(Fmt, Args) -> log(warning, Fmt, Args).
-
-spec error(string(), list()) -> ok.
error(Fmt, Args) -> log(error, Fmt, Args).
-
-spec critical(string(), list()) -> ok.
critical(Fmt, Args) -> log(critical, Fmt, Args).
-
-spec alert(string(), list()) -> ok.
alert(Fmt, Args) -> log(alert, Fmt, Args).
-
-spec emergency(string(), list()) -> ok.
emergency(Fmt, Args) -> log(emergency, Fmt, Args).
-
-spec set_level(atom() | string() | integer()) -> true.
set_level(Level) ->
config:set("log", "level", couch_log_util:level_to_string(Level)).
-
-spec log(atom(), string(), list()) -> ok.
log(Level, Fmt, Args) ->
case couch_log_util:should_log(Level) of
diff --git a/src/couch_log/src/couch_log_app.erl b/src/couch_log/src/couch_log_app.erl
index 91a8ecc4d..28c8bb193 100644
--- a/src/couch_log/src/couch_log_app.erl
+++ b/src/couch_log/src/couch_log_app.erl
@@ -16,7 +16,6 @@
-export([start/2, stop/1]).
-
start(_Type, _StartArgs) ->
couch_log_sup:start_link().
diff --git a/src/couch_log/src/couch_log_config.erl b/src/couch_log/src/couch_log_config.erl
index 55925c39f..925973178 100644
--- a/src/couch_log/src/couch_log_config.erl
+++ b/src/couch_log/src/couch_log_config.erl
@@ -14,23 +14,19 @@
-module(couch_log_config).
-
-export([
init/0,
reconfigure/0,
get/1
]).
-
-define(MOD_NAME, couch_log_config_dyn).
-define(ERL_FILE, "couch_log_config_dyn.erl").
-
-spec init() -> ok.
init() ->
reconfigure().
-
-spec reconfigure() -> ok.
reconfigure() ->
{ok, ?MOD_NAME, Bin} = compile:forms(forms(), [verbose, report_errors]),
@@ -38,12 +34,10 @@ reconfigure() ->
{module, ?MOD_NAME} = code:load_binary(?MOD_NAME, ?ERL_FILE, Bin),
ok.
-
-spec get(atom()) -> term().
get(Key) ->
?MOD_NAME:get(Key).
-
-spec entries() -> [string()].
entries() ->
[
@@ -52,17 +46,19 @@ entries() ->
{max_message_size, "max_message_size", "16000"},
{strip_last_msg, "strip_last_msg", "true"},
{filter_fields, "filter_fields", "[pid, registered_name, error_info, messages]"}
- ].
-
+ ].
-spec forms() -> [erl_syntax:syntaxTree()].
forms() ->
- GetFunClauses = lists:map(fun({FunKey, CfgKey, Default}) ->
- FunVal = transform(FunKey, config:get("log", CfgKey, Default)),
- Patterns = [erl_syntax:abstract(FunKey)],
- Bodies = [erl_syntax:abstract(FunVal)],
- erl_syntax:clause(Patterns, none, Bodies)
- end, entries()),
+ GetFunClauses = lists:map(
+ fun({FunKey, CfgKey, Default}) ->
+ FunVal = transform(FunKey, config:get("log", CfgKey, Default)),
+ Patterns = [erl_syntax:abstract(FunKey)],
+ Bodies = [erl_syntax:abstract(FunVal)],
+ erl_syntax:clause(Patterns, none, Bodies)
+ end,
+ entries()
+ ),
Statements = [
% -module(?MOD_NAME)
@@ -74,11 +70,14 @@ forms() ->
% -export([lookup/1]).
erl_syntax:attribute(
erl_syntax:atom(export),
- [erl_syntax:list([
- erl_syntax:arity_qualifier(
- erl_syntax:atom(get),
- erl_syntax:integer(1))
- ])]
+ [
+ erl_syntax:list([
+ erl_syntax:arity_qualifier(
+ erl_syntax:atom(get),
+ erl_syntax:integer(1)
+ )
+ ])
+ ]
),
% list(Key) -> Value.
@@ -86,27 +85,22 @@ forms() ->
],
[erl_syntax:revert(X) || X <- Statements].
-
transform(level, LevelStr) ->
couch_log_util:level_to_atom(LevelStr);
-
transform(level_int, LevelStr) ->
Level = couch_log_util:level_to_atom(LevelStr),
couch_log_util:level_to_integer(Level);
-
transform(max_message_size, SizeStr) ->
try list_to_integer(SizeStr) of
Size -> Size
- catch _:_ ->
- 16000
+ catch
+ _:_ ->
+ 16000
end;
-
transform(strip_last_msg, "false") ->
false;
-
transform(strip_last_msg, _) ->
true;
-
transform(filter_fields, FieldsStr) ->
Default = [pid, registered_name, error_info, messages],
case parse_term(FieldsStr) of
@@ -121,7 +115,6 @@ transform(filter_fields, FieldsStr) ->
Default
end.
-
parse_term(List) ->
{ok, Tokens, _} = erl_scan:string(List ++ "."),
erl_parse:parse_term(Tokens).
diff --git a/src/couch_log/src/couch_log_config_dyn.erl b/src/couch_log/src/couch_log_config_dyn.erl
index 1e1c927ae..ff781d3a0 100644
--- a/src/couch_log/src/couch_log_config_dyn.erl
+++ b/src/couch_log/src/couch_log_config_dyn.erl
@@ -17,12 +17,10 @@
-module(couch_log_config_dyn).
-
-export([
get/1
]).
-
get(level) -> info;
get(level_int) -> 2;
get(max_message_size) -> 16000;
diff --git a/src/couch_log/src/couch_log_error_logger_h.erl b/src/couch_log/src/couch_log_error_logger_h.erl
index c0765c61a..ff7ae045f 100644
--- a/src/couch_log/src/couch_log_error_logger_h.erl
+++ b/src/couch_log/src/couch_log_error_logger_h.erl
@@ -14,10 +14,8 @@
% https://github.com/basho/lager which is available under the
% above marked ASFL v2 license.
-
-module(couch_log_error_logger_h).
-
-behaviour(gen_event).
-export([
@@ -29,29 +27,22 @@
code_change/3
]).
-
init(_) ->
{ok, undefined}.
-
terminate(_Reason, _St) ->
ok.
-
handle_call(_, St) ->
{ok, ignored, St}.
-
handle_event(Event, St) ->
Entry = couch_log_formatter:format(Event),
ok = couch_log_server:log(Entry),
{ok, St}.
-
handle_info(_, St) ->
{ok, St}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
diff --git a/src/couch_log/src/couch_log_formatter.erl b/src/couch_log/src/couch_log_formatter.erl
index 3553666f6..2ce0fba6d 100644
--- a/src/couch_log/src/couch_log_formatter.erl
+++ b/src/couch_log/src/couch_log_formatter.erl
@@ -14,10 +14,8 @@
% from lager's error_logger_lager_h.erl which is available
% under the ASFv2 license.
-
-module(couch_log_formatter).
-
-export([
format/4,
format/3,
@@ -29,13 +27,10 @@
format_args/3
]).
-
-include("couch_log.hrl").
-
-define(DEFAULT_TRUNCATION, 1024).
-
format(Level, Pid, Fmt, Args) ->
#log_entry{
level = couch_log_util:level_to_atom(Level),
@@ -45,7 +40,6 @@ format(Level, Pid, Fmt, Args) ->
time_stamp = couch_log_util:iso8601_timestamp()
}.
-
format(Level, Pid, Msg) ->
#log_entry{
level = couch_log_util:level_to_atom(Level),
@@ -55,79 +49,82 @@ format(Level, Pid, Msg) ->
time_stamp = couch_log_util:iso8601_timestamp()
}.
-
format(Event) ->
try
do_format(Event)
catch
Tag:Err ->
Msg = "Encountered error ~w when formatting ~w",
- format(error, self(), Msg, [{Tag, Err}, Event])
+ format(error, self(), Msg, [{Tag, Err}, Event])
end.
-
do_format({error, _GL, {Pid, "** Generic server " ++ _, Args}}) ->
%% gen_server terminate
[Name, LastMsg0, State, Reason | Extra] = Args,
- LastMsg = case couch_log_config:get(strip_last_msg) of
- true ->
- redacted;
- false ->
- LastMsg0
- end,
- MsgFmt = "gen_server ~w terminated with reason: ~s~n" ++
- " last msg: ~p~n state: ~p~n extra: ~p",
+ LastMsg =
+ case couch_log_config:get(strip_last_msg) of
+ true ->
+ redacted;
+ false ->
+ LastMsg0
+ end,
+ MsgFmt =
+ "gen_server ~w terminated with reason: ~s~n" ++
+ " last msg: ~p~n state: ~p~n extra: ~p",
MsgArgs = [Name, format_reason(Reason), LastMsg, State, Extra],
format(error, Pid, MsgFmt, MsgArgs);
-
do_format({error, _GL, {Pid, "** State machine " ++ _, Args}}) ->
%% gen_fsm terminate
[Name, LastMsg0, StateName, State, Reason | Extra] = Args,
- LastMsg = case couch_log_config:get(strip_last_msg) of
- true ->
- redacted;
- false ->
- LastMsg0
- end,
- MsgFmt = "gen_fsm ~w in state ~w terminated with reason: ~s~n" ++
- " last msg: ~p~n state: ~p~n extra: ~p",
+ LastMsg =
+ case couch_log_config:get(strip_last_msg) of
+ true ->
+ redacted;
+ false ->
+ LastMsg0
+ end,
+ MsgFmt =
+ "gen_fsm ~w in state ~w terminated with reason: ~s~n" ++
+ " last msg: ~p~n state: ~p~n extra: ~p",
MsgArgs = [Name, StateName, format_reason(Reason), LastMsg, State, Extra],
format(error, Pid, MsgFmt, MsgArgs);
-
do_format({error, _GL, {Pid, "** gen_event handler" ++ _, Args}}) ->
%% gen_event handler terminate
[ID, Name, LastMsg0, State, Reason] = Args,
- LastMsg = case couch_log_config:get(strip_last_msg) of
- true ->
- redacted;
- false ->
- LastMsg0
- end,
- MsgFmt = "gen_event ~w installed in ~w terminated with reason: ~s~n" ++
- " last msg: ~p~n state: ~p",
+ LastMsg =
+ case couch_log_config:get(strip_last_msg) of
+ true ->
+ redacted;
+ false ->
+ LastMsg0
+ end,
+ MsgFmt =
+ "gen_event ~w installed in ~w terminated with reason: ~s~n" ++
+ " last msg: ~p~n state: ~p",
MsgArgs = [ID, Name, format_reason(Reason), LastMsg, State],
format(error, Pid, MsgFmt, MsgArgs);
-
do_format({error, _GL, {emulator, "~s~n", [Msg]}}) when is_list(Msg) ->
% These messages are for whenever any process exits due
% to a throw or error. We intercept here to remove the
% extra newlines.
NewMsg = lists:sublist(Msg, length(Msg) - 1),
format(error, emulator, NewMsg);
-
do_format({error, _GL, {Pid, Fmt, Args}}) ->
format(error, Pid, Fmt, Args);
-
do_format({error_report, _GL, {Pid, std_error, D}}) ->
format(error, Pid, print_silly_list(D));
-
do_format({error_report, _GL, {Pid, supervisor_report, D}}) ->
case lists:sort(D) of
- [{errorContext, Ctx}, {offender, Off},
- {reason, Reason}, {supervisor, Name}] ->
+ [
+ {errorContext, Ctx},
+ {offender, Off},
+ {reason, Reason},
+ {supervisor, Name}
+ ] ->
Offender = format_offender(Off),
- MsgFmt = "Supervisor ~w had child ~s exit " ++
- "with reason ~s in context ~w",
+ MsgFmt =
+ "Supervisor ~w had child ~s exit " ++
+ "with reason ~s in context ~w",
Args = [
supervisor_name(Name),
Offender,
@@ -138,20 +135,15 @@ do_format({error_report, _GL, {Pid, supervisor_report, D}}) ->
_ ->
format(error, Pid, "SUPERVISOR REPORT " ++ print_silly_list(D))
end;
-
do_format({error_report, _GL, {Pid, crash_report, [Report, Neighbors]}}) ->
Msg = "CRASH REPORT " ++ format_crash_report(Report, Neighbors),
format(error, Pid, Msg);
-
do_format({warning_msg, _GL, {Pid, Fmt, Args}}) ->
format(warning, Pid, Fmt, Args);
-
do_format({warning_report, _GL, {Pid, std_warning, Report}}) ->
format(warning, Pid, print_silly_list(Report));
-
do_format({info_msg, _GL, {Pid, Fmt, Args}}) ->
format(info, Pid, Fmt, Args);
-
do_format({info_report, _GL, {Pid, std_info, D}}) when is_list(D) ->
case lists:sort(D) of
[{application, App}, {exited, Reason}, {type, _Type}] ->
@@ -160,10 +152,8 @@ do_format({info_report, _GL, {Pid, std_info, D}}) when is_list(D) ->
_ ->
format(info, Pid, print_silly_list(D))
end;
-
do_format({info_report, _GL, {Pid, std_info, D}}) ->
format(info, Pid, "~w", [D]);
-
do_format({info_report, _GL, {Pid, progress, D}}) ->
case lists:sort(D) of
[{application, App}, {started_at, Node}] ->
@@ -177,25 +167,25 @@ do_format({info_report, _GL, {Pid, progress, D}}) ->
_ ->
format(info, Pid, "PROGRESS REPORT " ++ print_silly_list(D))
end;
-
do_format(Event) ->
format(warning, self(), "Unexpected error_logger event ~w", [Event]).
-
format_crash_report(Report, Neighbours) ->
Pid = get_value(pid, Report),
- Name = case get_value(registered_name, Report) of
- undefined ->
- pid_to_list(Pid);
- Atom ->
- io_lib:format("~s (~w)", [Atom, Pid])
- end,
+ Name =
+ case get_value(registered_name, Report) of
+ undefined ->
+ pid_to_list(Pid);
+ Atom ->
+ io_lib:format("~s (~w)", [Atom, Pid])
+ end,
{Class, Reason, Trace} = get_value(error_info, Report),
ReasonStr = format_reason({Reason, Trace}),
- Type = case Class of
- exit -> "exited";
- _ -> "crashed"
- end,
+ Type =
+ case Class of
+ exit -> "exited";
+ _ -> "crashed"
+ end,
MsgFmt = "Process ~s with ~w neighbors ~s with reason: ~s",
Args = [Name, length(Neighbours), Type, ReasonStr],
Msg = io_lib:format(MsgFmt, Args),
@@ -206,7 +196,6 @@ format_crash_report(Report, Neighbours) ->
Msg ++ "; " ++ print_silly_list(Rest)
end.
-
format_offender(Off) ->
case get_value(mfargs, Off) of
undefined ->
@@ -219,129 +208,131 @@ format_offender(Off) ->
%% In 2014 the error report changed from `name' to
%% `id', so try that first.
- Name = case get_value(id, Off) of
- undefined ->
- get_value(name, Off);
- Id ->
- Id
- end,
+ Name =
+ case get_value(id, Off) of
+ undefined ->
+ get_value(name, Off);
+ Id ->
+ Id
+ end,
Args = [Name, MFA, get_value(pid, Off)],
io_lib:format("~p started with ~s at ~w", Args)
end.
-
format_reason({'function not exported', [{M, F, A} | Trace]}) ->
- ["call to unexported function ", format_mfa({M, F, A}),
- " at ", format_trace(Trace)];
-
+ [
+ "call to unexported function ",
+ format_mfa({M, F, A}),
+ " at ",
+ format_trace(Trace)
+ ];
format_reason({'function not exported' = C, [{M, F, A, _Props} | Rest]}) ->
%% Drop line number from undefined function
format_reason({C, [{M, F, A} | Rest]});
-
format_reason({undef, [MFA | Trace]}) ->
- ["call to undefined function ", format_mfa(MFA),
- " at ", format_trace(Trace)];
-
+ [
+ "call to undefined function ",
+ format_mfa(MFA),
+ " at ",
+ format_trace(Trace)
+ ];
format_reason({bad_return, {MFA, Val}}) ->
["bad return value ", print_val(Val), " from ", format_mfa(MFA)];
-
format_reason({bad_return_value, Val}) ->
["bad return value ", print_val(Val)];
-
format_reason({{bad_return_value, Val}, MFA}) ->
["bad return value ", print_val(Val), " at ", format_mfa(MFA)];
-
format_reason({{badrecord, Record}, Trace}) ->
["bad record ", print_val(Record), " at ", format_trace(Trace)];
-
format_reason({{case_clause, Val}, Trace}) ->
["no case clause matching ", print_val(Val), " at ", format_trace(Trace)];
-
format_reason({function_clause, [MFA | Trace]}) ->
- ["no function clause matching ", format_mfa(MFA),
- " at ", format_trace(Trace)];
-
+ [
+ "no function clause matching ",
+ format_mfa(MFA),
+ " at ",
+ format_trace(Trace)
+ ];
format_reason({if_clause, Trace}) ->
- ["no true branch found while evaluating if expression at ",
- format_trace(Trace)];
-
+ [
+ "no true branch found while evaluating if expression at ",
+ format_trace(Trace)
+ ];
format_reason({{try_clause, Val}, Trace}) ->
["no try clause matching ", print_val(Val), " at ", format_trace(Trace)];
-
format_reason({badarith, Trace}) ->
["bad arithmetic expression at ", format_trace(Trace)];
-
format_reason({{badmatch, Val}, Trace}) ->
- ["no match of right hand value ", print_val(Val),
- " at ", format_trace(Trace)];
-
+ [
+ "no match of right hand value ",
+ print_val(Val),
+ " at ",
+ format_trace(Trace)
+ ];
format_reason({emfile, Trace}) ->
- ["maximum number of file descriptors exhausted, check ulimit -n; ",
- format_trace(Trace)];
-
+ [
+ "maximum number of file descriptors exhausted, check ulimit -n; ",
+ format_trace(Trace)
+ ];
format_reason({system_limit, [{M, F, A} | Trace]}) ->
- Limit = case {M, F} of
- {erlang, open_port} ->
- "maximum number of ports exceeded";
- {erlang, spawn} ->
- "maximum number of processes exceeded";
- {erlang, spawn_opt} ->
- "maximum number of processes exceeded";
- {erlang, list_to_atom} ->
- "tried to create an atom larger than 255, or maximum atom count exceeded";
- {ets, new} ->
- "maximum number of ETS tables exceeded";
- _ ->
- format_mfa({M, F, A})
- end,
+ Limit =
+ case {M, F} of
+ {erlang, open_port} ->
+ "maximum number of ports exceeded";
+ {erlang, spawn} ->
+ "maximum number of processes exceeded";
+ {erlang, spawn_opt} ->
+ "maximum number of processes exceeded";
+ {erlang, list_to_atom} ->
+ "tried to create an atom larger than 255, or maximum atom count exceeded";
+ {ets, new} ->
+ "maximum number of ETS tables exceeded";
+ _ ->
+ format_mfa({M, F, A})
+ end,
["system limit: ", Limit, " at ", format_trace(Trace)];
-
format_reason({badarg, [MFA | Trace]}) ->
- ["bad argument in call to ", format_mfa(MFA),
- " at ", format_trace(Trace)];
-
+ [
+ "bad argument in call to ",
+ format_mfa(MFA),
+ " at ",
+ format_trace(Trace)
+ ];
format_reason({{badarg, Stack}, _}) ->
format_reason({badarg, Stack});
-
format_reason({{badarity, {Fun, Args}}, Trace}) ->
{arity, Arity} = lists:keyfind(arity, 1, erlang:fun_info(Fun)),
MsgFmt = "function called with wrong arity of ~w instead of ~w at ",
[io_lib:format(MsgFmt, [length(Args), Arity]), format_trace(Trace)];
-
format_reason({noproc, MFA}) ->
["no such process or port in call to ", format_mfa(MFA)];
-
format_reason({{badfun, Term}, Trace}) ->
["bad function ", print_val(Term), " called at ", format_trace(Trace)];
-
-format_reason({Reason, [{M, F, A} | _] = Trace})
- when is_atom(M), is_atom(F), is_integer(A) ->
+format_reason({Reason, [{M, F, A} | _] = Trace}) when
+ is_atom(M), is_atom(F), is_integer(A)
+->
[format_reason(Reason), " at ", format_trace(Trace)];
-
-format_reason({Reason, [{M, F, A} | _] = Trace})
- when is_atom(M), is_atom(F), is_list(A) ->
+format_reason({Reason, [{M, F, A} | _] = Trace}) when
+ is_atom(M), is_atom(F), is_list(A)
+->
[format_reason(Reason), " at ", format_trace(Trace)];
-
-format_reason({Reason, [{M, F, A, Props} | _] = Trace})
- when is_atom(M), is_atom(F), is_integer(A), is_list(Props) ->
+format_reason({Reason, [{M, F, A, Props} | _] = Trace}) when
+ is_atom(M), is_atom(F), is_integer(A), is_list(Props)
+->
[format_reason(Reason), " at ", format_trace(Trace)];
-
-format_reason({Reason, [{M, F, A, Props} | _] = Trace})
- when is_atom(M), is_atom(F), is_list(A), is_list(Props) ->
+format_reason({Reason, [{M, F, A, Props} | _] = Trace}) when
+ is_atom(M), is_atom(F), is_list(A), is_list(Props)
+->
[format_reason(Reason), " at ", format_trace(Trace)];
-
format_reason(Reason) ->
{Str, _} = couch_log_trunc_io:print(Reason, 500),
Str.
-
format_mfa({M, F, A}) when is_list(A) ->
{FmtStr, Args} = format_args(A, [], []),
io_lib:format("~w:~w(" ++ FmtStr ++ ")", [M, F | Args]);
-
format_mfa({M, F, A}) when is_integer(A) ->
io_lib:format("~w:~w/~w", [M, F, A]);
-
format_mfa({M, F, A, Props}) when is_list(Props) ->
case get_value(line, Props) of
undefined ->
@@ -349,47 +340,35 @@ format_mfa({M, F, A, Props}) when is_list(Props) ->
Line ->
[format_mfa({M, F, A}), io_lib:format("(line:~w)", [Line])]
end;
-
format_mfa(Trace) when is_list(Trace) ->
format_trace(Trace);
-
format_mfa(Other) ->
io_lib:format("~w", [Other]).
-
format_trace([MFA]) ->
[trace_mfa(MFA)];
-
format_trace([MFA | Rest]) ->
[trace_mfa(MFA), " <= ", format_trace(Rest)];
-
format_trace(Other) ->
io_lib:format("~w", [Other]).
-
trace_mfa({M, F, A}) when is_list(A) ->
format_mfa({M, F, length(A)});
-
trace_mfa({M, F, A, Props}) when is_list(A) ->
format_mfa({M, F, length(A), Props});
-
trace_mfa(Other) ->
format_mfa(Other).
-
format_args([], FmtAcc, ArgsAcc) ->
{string:join(lists:reverse(FmtAcc), ", "), lists:reverse(ArgsAcc)};
-
-format_args([H|T], FmtAcc, ArgsAcc) ->
+format_args([H | T], FmtAcc, ArgsAcc) ->
{Str, _} = couch_log_trunc_io:print(H, 100),
format_args(T, ["~s" | FmtAcc], [Str | ArgsAcc]).
-
maybe_truncate(Fmt, Args) ->
MaxMsgSize = couch_log_config:get(max_message_size),
couch_log_trunc_io:format(Fmt, Args, MaxMsgSize).
-
maybe_truncate(Msg) ->
MaxMsgSize = couch_log_config:get(max_message_size),
case iolist_size(Msg) > MaxMsgSize of
@@ -402,7 +381,6 @@ maybe_truncate(Msg) ->
Msg
end.
-
print_silly_list(L) when is_list(L) ->
case couch_log_util:string_p(L) of
true ->
@@ -410,23 +388,21 @@ print_silly_list(L) when is_list(L) ->
_ ->
print_silly_list(L, [], [])
end;
-
print_silly_list(L) ->
{Str, _} = couch_log_trunc_io:print(L, ?DEFAULT_TRUNCATION),
Str.
-
print_silly_list([], Fmt, Acc) ->
- couch_log_trunc_io:format(string:join(lists:reverse(Fmt), ", "),
- lists:reverse(Acc), ?DEFAULT_TRUNCATION);
-
+ couch_log_trunc_io:format(
+ string:join(lists:reverse(Fmt), ", "),
+ lists:reverse(Acc),
+ ?DEFAULT_TRUNCATION
+ );
print_silly_list([{K, V} | T], Fmt, Acc) ->
print_silly_list(T, ["~p: ~p" | Fmt], [V, K | Acc]);
-
print_silly_list([H | T], Fmt, Acc) ->
print_silly_list(T, ["~p" | Fmt], [H | Acc]).
-
print_val(Val) ->
{Str, _} = couch_log_trunc_io:print(Val, 500),
Str.
@@ -439,7 +415,6 @@ filter_silly_list(KV) ->
filter_silly_list([], _) ->
[];
-
filter_silly_list([{K, V} | T], Filter) ->
case lists:member(K, Filter) of
true ->
@@ -447,11 +422,9 @@ filter_silly_list([{K, V} | T], Filter) ->
false ->
[{K, V} | filter_silly_list(T, Filter)]
end;
-
filter_silly_list([H | T], Filter) ->
[H | filter_silly_list(T, Filter)].
-
get_value(Key, Value) ->
get_value(Key, Value, undefined).
diff --git a/src/couch_log/src/couch_log_monitor.erl b/src/couch_log/src/couch_log_monitor.erl
index 96d7f3698..b5ac0a844 100644
--- a/src/couch_log/src/couch_log_monitor.erl
+++ b/src/couch_log/src/couch_log_monitor.erl
@@ -15,7 +15,6 @@
-behaviour(gen_server).
-vsn(1).
-
-export([
start_link/0
]).
@@ -29,14 +28,11 @@
code_change/3
]).
-
-define(HANDLER_MOD, couch_log_error_logger_h).
-
start_link() ->
gen_server:start_link(?MODULE, [], []).
-
% OTP_RELEASE defined in OTP >= 21 only
-ifdef(OTP_RELEASE).
@@ -55,26 +51,19 @@ init(_) ->
-endif.
-
terminate(_, _) ->
ok.
-
handle_call(_Msg, _From, St) ->
{reply, ignored, St}.
-
handle_cast(_Msg, St) ->
{noreply, St}.
-
handle_info({gen_event_EXIT, ?HANDLER_MOD, Reason}, St) ->
{stop, Reason, St};
-
-
handle_info(_Msg, St) ->
{noreply, St}.
-
code_change(_, State, _) ->
{ok, State}.
diff --git a/src/couch_log/src/couch_log_server.erl b/src/couch_log/src/couch_log_server.erl
index 8432b9aa3..05cf92a75 100644
--- a/src/couch_log/src/couch_log_server.erl
+++ b/src/couch_log/src/couch_log_server.erl
@@ -13,7 +13,6 @@
-module(couch_log_server).
-behavior(gen_server).
-
-export([
start_link/0,
reconfigure/0,
@@ -21,42 +20,35 @@
]).
-export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- code_change/3
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3
]).
-
-include("couch_log.hrl").
-
-record(st, {
writer
}).
-
-ifdef(TEST).
-define(SEND(Entry), gen_server:call(?MODULE, {log, Entry})).
-else.
-define(SEND(Entry), gen_server:cast(?MODULE, {log, Entry})).
-endif.
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
reconfigure() ->
gen_server:call(?MODULE, reconfigure).
-
log(Entry) ->
?SEND(Entry).
-
init(_) ->
couch_util:set_mqd_off_heap(?MODULE),
process_flag(trap_exit, true),
@@ -64,17 +56,14 @@ init(_) ->
writer = couch_log_writer:init()
}}.
-
terminate(Reason, St) ->
ok = couch_log_writer:terminate(Reason, St#st.writer).
-
handle_call(reconfigure, _From, St) ->
ok = couch_log_writer:terminate(reconfiguring, St#st.writer),
{reply, ok, St#st{
writer = couch_log_writer:init()
}};
-
handle_call({log, Entry}, _From, St) ->
% We re-check if we should log here in case an operator
% adjusted the log level and then realized it was a bad
@@ -86,22 +75,18 @@ handle_call({log, Entry}, _From, St) ->
false ->
{reply, ok, St}
end;
-
handle_call(Ignore, From, St) ->
Args = [?MODULE, Ignore],
Entry = couch_log_formatter:format(error, ?MODULE, "~s ignored ~p", Args),
handle_call({log, Entry}, From, St).
-
handle_cast(Msg, St) ->
{reply, ok, NewSt} = handle_call(Msg, nil, St),
{noreply, NewSt}.
-
handle_info(Msg, St) ->
{reply, ok, NewSt} = handle_call(Msg, nil, St),
{noreply, NewSt}.
-
code_change(_Vsn, St, _Extra) ->
{ok, St}.
diff --git a/src/couch_log/src/couch_log_sup.erl b/src/couch_log/src/couch_log_sup.erl
index 6cb8d7395..0167192d8 100644
--- a/src/couch_log/src/couch_log_sup.erl
+++ b/src/couch_log/src/couch_log_sup.erl
@@ -23,12 +23,10 @@
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
init([]) ->
ok = couch_log_config:init(),
{ok, {{one_for_one, 10, 10}, children()}}.
-
children() ->
[
{
@@ -74,7 +72,6 @@ handle_config_change("log", Key, _, _, S) ->
end,
notify_listeners(),
{ok, S};
-
handle_config_change(_, _, _, _, S) ->
{ok, S}.
@@ -84,9 +81,12 @@ handle_config_terminate(_Server, _Reason, _State) ->
-ifdef(TEST).
notify_listeners() ->
Listeners = application:get_env(couch_log, config_listeners, []),
- lists:foreach(fun(L) ->
- L ! couch_log_config_change_finished
- end, Listeners).
+ lists:foreach(
+ fun(L) ->
+ L ! couch_log_config_change_finished
+ end,
+ Listeners
+ ).
-else.
notify_listeners() ->
ok.
diff --git a/src/couch_log/src/couch_log_trunc_io.erl b/src/couch_log/src/couch_log_trunc_io.erl
index 636dfdc1f..9736e87e1 100644
--- a/src/couch_log/src/couch_log_trunc_io.erl
+++ b/src/couch_log/src/couch_log_trunc_io.erl
@@ -36,33 +36,37 @@
-module(couch_log_trunc_io).
-author('matthias@corelatus.se').
%% And thanks to Chris Newcombe for a bug fix
--export([format/3, format/4, print/2, print/3, fprint/2, fprint/3, safe/2]). % interface functions
+
+% interface functions
+-export([format/3, format/4, print/2, print/3, fprint/2, fprint/3, safe/2]).
-version("$Id: trunc_io.erl,v 1.11 2009-02-23 12:01:06 matthias Exp $").
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
--type option() :: {'depth', integer()}
+-type option() ::
+ {'depth', integer()}
| {'lists_as_strings', boolean()}
| {'force_strings', boolean()}.
-type options() :: [option()].
-record(print_options, {
- %% negative depth means no depth limiting
- depth = -1 :: integer(),
- %% whether to print lists as strings, if possible
- lists_as_strings = true :: boolean(),
- %% force strings, or binaries to be printed as a string,
- %% even if they're not printable
- force_strings = false :: boolean()
- }).
+ %% negative depth means no depth limiting
+ depth = -1 :: integer(),
+ %% whether to print lists as strings, if possible
+ lists_as_strings = true :: boolean(),
+ %% force strings, or binaries to be printed as a string,
+ %% even if they're not printable
+ force_strings = false :: boolean()
+}).
format(Fmt, Args, Max) ->
format(Fmt, Args, Max, []).
format(Fmt, Args, Max, Options) ->
- try couch_log_trunc_io_fmt:format(Fmt, Args, Max, Options)
+ try
+ couch_log_trunc_io_fmt:format(Fmt, Args, Max, Options)
catch
_What:_Why ->
erlang:error(badarg, [Fmt, Args])
@@ -74,7 +78,6 @@ format(Fmt, Args, Max, Options) ->
fprint(Term, Max) ->
fprint(Term, Max, []).
-
%% @doc Returns an flattened list containing the ASCII representation of the given
%% term.
-spec fprint(term(), pos_integer(), options()) -> string().
@@ -108,30 +111,29 @@ print(Term, Max) ->
print(Term, Max, Options) when is_list(Options) ->
%% need to convert the proplist to a record
print(Term, Max, prepare_options(Options, #print_options{}));
-
-print(Term, _Max, #print_options{force_strings=true}) when not is_list(Term), not is_binary(Term), not is_atom(Term) ->
+print(Term, _Max, #print_options{force_strings = true}) when
+ not is_list(Term), not is_binary(Term), not is_atom(Term)
+->
erlang:error(badarg);
-
print(_, Max, _Options) when Max < 0 -> {"...", 3};
-print(_, _, #print_options{depth=0}) -> {"...", 3};
-
-
+print(_, _, #print_options{depth = 0}) ->
+ {"...", 3};
%% @doc We assume atoms, floats, funs, integers, PIDs, ports and refs never need
%% to be truncated. This isn't strictly true, someone could make an
%% arbitrarily long bignum. Let's assume that won't happen unless someone
%% is being malicious.
%%
-print(Atom, _Max, #print_options{force_strings=NoQuote}) when is_atom(Atom) ->
+print(Atom, _Max, #print_options{force_strings = NoQuote}) when is_atom(Atom) ->
L = atom_to_list(Atom),
- R = case atom_needs_quoting_start(L) andalso not NoQuote of
- true -> lists:flatten([$', L, $']);
- false -> L
- end,
+ R =
+ case atom_needs_quoting_start(L) andalso not NoQuote of
+ true -> lists:flatten([$', L, $']);
+ false -> L
+ end,
{R, length(R)};
-
-print(<<>>, _Max, #print_options{depth=1}) ->
+print(<<>>, _Max, #print_options{depth = 1}) ->
{"<<>>", 4};
-print(Bin, _Max, #print_options{depth=1}) when is_binary(Bin) ->
+print(Bin, _Max, #print_options{depth = 1}) when is_binary(Bin) ->
{"<<...>>", 7};
print(<<>>, _Max, Options) ->
case Options#print_options.force_strings of
@@ -140,70 +142,79 @@ print(<<>>, _Max, Options) ->
false ->
{"<<>>", 4}
end;
-
print(Binary, 0, _Options) when is_bitstring(Binary) ->
{"<<..>>", 6};
-
print(Bin, Max, _Options) when is_binary(Bin), Max < 2 ->
{"<<...>>", 7};
print(Binary, Max, Options) when is_binary(Binary) ->
B = binary_to_list(Binary, 1, lists:min([Max, byte_size(Binary)])),
- {Res, Length} = case Options#print_options.lists_as_strings orelse
- Options#print_options.force_strings of
- true ->
- Depth = Options#print_options.depth,
- MaxSize = (Depth - 1) * 4,
- %% check if we need to truncate based on depth
- In = case Depth > -1 andalso MaxSize < length(B) andalso
- not Options#print_options.force_strings of
- true ->
- string:substr(B, 1, MaxSize);
- false -> B
- end,
- MaxLen = case Options#print_options.force_strings of
- true ->
- Max;
- false ->
- %% make room for the leading doublequote
- Max - 1
- end,
- try alist(In, MaxLen, Options) of
- {L0, Len0} ->
- case Options#print_options.force_strings of
- false ->
- case B /= In of
- true ->
- {[$", L0, "..."], Len0+4};
- false ->
- {[$"|L0], Len0+1}
- end;
+ {Res, Length} =
+ case
+ Options#print_options.lists_as_strings orelse
+ Options#print_options.force_strings
+ of
+ true ->
+ Depth = Options#print_options.depth,
+ MaxSize = (Depth - 1) * 4,
+ %% check if we need to truncate based on depth
+ In =
+ case
+ Depth > -1 andalso MaxSize < length(B) andalso
+ not Options#print_options.force_strings
+ of
true ->
- {L0, Len0}
- end
- catch
- throw:{unprintable, C} ->
- Index = string:chr(In, C),
- case Index > 1 andalso Options#print_options.depth =< Index andalso
- Options#print_options.depth > -1 andalso
- not Options#print_options.force_strings of
+ string:substr(B, 1, MaxSize);
+ false ->
+ B
+ end,
+ MaxLen =
+ case Options#print_options.force_strings of
true ->
- %% print first Index-1 characters followed by ...
- {L0, Len0} = alist_start(string:substr(In, 1, Index - 1), Max - 1, Options),
- {L0++"...", Len0+3};
+ Max;
false ->
- list_body(In, Max-4, dec_depth(Options), true)
- end
- end;
- _ ->
- list_body(B, Max-4, dec_depth(Options), true)
- end,
+ %% make room for the leading doublequote
+ Max - 1
+ end,
+ try alist(In, MaxLen, Options) of
+ {L0, Len0} ->
+ case Options#print_options.force_strings of
+ false ->
+ case B /= In of
+ true ->
+ {[$", L0, "..."], Len0 + 4};
+ false ->
+ {[$" | L0], Len0 + 1}
+ end;
+ true ->
+ {L0, Len0}
+ end
+ catch
+ throw:{unprintable, C} ->
+ Index = string:chr(In, C),
+ case
+ Index > 1 andalso Options#print_options.depth =< Index andalso
+ Options#print_options.depth > -1 andalso
+ not Options#print_options.force_strings
+ of
+ true ->
+ %% print first Index-1 characters followed by ...
+ {L0, Len0} = alist_start(
+ string:substr(In, 1, Index - 1), Max - 1, Options
+ ),
+ {L0 ++ "...", Len0 + 3};
+ false ->
+ list_body(In, Max - 4, dec_depth(Options), true)
+ end
+ end;
+ _ ->
+ list_body(B, Max - 4, dec_depth(Options), true)
+ end,
case Options#print_options.force_strings of
true ->
{Res, Length};
_ ->
- {["<<", Res, ">>"], Length+4}
+ {["<<", Res, ">>"], Length + 4}
end;
-
%% bitstrings are binary's evil brother who doesn't end on an 8 bit boundary.
%% This makes printing them extremely annoying, so list_body/list_bodyc has
%% some magic for dealing with the output of bitstring_to_list, which returns
@@ -214,27 +225,26 @@ print({inline_bitstring, B}, _Max, _Options) when is_bitstring(B) ->
<<Value:Size>> = B,
ValueStr = integer_to_list(Value),
SizeStr = integer_to_list(Size),
- {[ValueStr, $:, SizeStr], length(ValueStr) + length(SizeStr) +1};
+ {[ValueStr, $:, SizeStr], length(ValueStr) + length(SizeStr) + 1};
print(BitString, Max, Options) when is_bitstring(BitString) ->
- BL = case byte_size(BitString) > Max of
- true ->
- binary_to_list(BitString, 1, Max);
- _ ->
- R = erlang:bitstring_to_list(BitString),
- {Bytes, [Bits]} = lists:splitwith(fun erlang:is_integer/1, R),
- %% tag the trailing bits with a special tuple we catch when
- %% list_body calls print again
- Bytes ++ [{inline_bitstring, Bits}]
- end,
+ BL =
+ case byte_size(BitString) > Max of
+ true ->
+ binary_to_list(BitString, 1, Max);
+ _ ->
+ R = erlang:bitstring_to_list(BitString),
+ {Bytes, [Bits]} = lists:splitwith(fun erlang:is_integer/1, R),
+ %% tag the trailing bits with a special tuple we catch when
+ %% list_body calls print again
+ Bytes ++ [{inline_bitstring, Bits}]
+ end,
{X, Len0} = list_body(BL, Max - 4, dec_depth(Options), true),
{["<<", X, ">>"], Len0 + 4};
-
print(Float, _Max, _Options) when is_float(Float) ->
%% use the same function io_lib:format uses to print floats
%% float_to_list is way too verbose.
L = io_lib_format:fwrite_g(Float),
{L, length(L)};
-
print(Fun, Max, _Options) when is_function(Fun) ->
L = erlang:fun_to_list(Fun),
case length(L) > Max of
@@ -245,42 +255,36 @@ print(Fun, Max, _Options) when is_function(Fun) ->
_ ->
{L, length(L)}
end;
-
print(Integer, _Max, _Options) when is_integer(Integer) ->
L = integer_to_list(Integer),
{L, length(L)};
-
print(Pid, _Max, _Options) when is_pid(Pid) ->
L = pid_to_list(Pid),
{L, length(L)};
-
print(Ref, _Max, _Options) when is_reference(Ref) ->
L = erlang:ref_to_list(Ref),
{L, length(L)};
-
print(Port, _Max, _Options) when is_port(Port) ->
L = erlang:port_to_list(Port),
{L, length(L)};
-
print({'$lager_record', Name, Fields}, Max, Options) ->
Leader = "#" ++ atom_to_list(Name) ++ "{",
{RC, Len} = record_fields(Fields, Max - length(Leader) + 1, dec_depth(Options)),
{[Leader, RC, "}"], Len + length(Leader) + 1};
-
print(Tuple, Max, Options) when is_tuple(Tuple) ->
- {TC, Len} = tuple_contents(Tuple, Max-2, Options),
+ {TC, Len} = tuple_contents(Tuple, Max - 2, Options),
{[${, TC, $}], Len + 2};
-
print(List, Max, Options) when is_list(List) ->
- case Options#print_options.lists_as_strings orelse
- Options#print_options.force_strings of
+ case
+ Options#print_options.lists_as_strings orelse
+ Options#print_options.force_strings
+ of
true ->
alist_start(List, Max, dec_depth(Options));
_ ->
{R, Len} = list_body(List, Max - 2, dec_depth(Options), false),
{[$[, R, $]], Len + 2}
end;
-
print(Map, Max, Options) ->
case erlang:is_builtin(erlang, is_map, 1) andalso erlang:is_map(Map) of
true ->
@@ -297,43 +301,52 @@ tuple_contents(Tuple, Max, Options) ->
%% Format the inside of a list, i.e. do not add a leading [ or trailing ].
%% Returns {List, Length}
-list_body([], _Max, _Options, _Tuple) -> {[], 0};
+list_body([], _Max, _Options, _Tuple) ->
+ {[], 0};
list_body(_, Max, _Options, _Tuple) when Max < 4 -> {"...", 3};
-list_body(_, _Max, #print_options{depth=0}, _Tuple) -> {"...", 3};
-list_body([H], Max, Options=#print_options{depth=1}, _Tuple) ->
+list_body(_, _Max, #print_options{depth = 0}, _Tuple) ->
+ {"...", 3};
+list_body([H], Max, Options = #print_options{depth = 1}, _Tuple) ->
print(H, Max, Options);
-list_body([H|_], Max, Options=#print_options{depth=1}, Tuple) ->
- {List, Len} = print(H, Max-4, Options),
- Sep = case Tuple of
- true -> $,;
- false -> $|
- end,
+list_body([H | _], Max, Options = #print_options{depth = 1}, Tuple) ->
+ {List, Len} = print(H, Max - 4, Options),
+ Sep =
+ case Tuple of
+ true -> $,;
+ false -> $|
+ end,
{[List ++ [Sep | "..."]], Len + 4};
-list_body([H|T], Max, Options, Tuple) ->
+list_body([H | T], Max, Options, Tuple) ->
{List, Len} = print(H, Max, Options),
{Final, FLen} = list_bodyc(T, Max - Len, Options, Tuple),
- {[List|Final], FLen + Len};
-list_body(X, Max, Options, _Tuple) -> %% improper list
+ {[List | Final], FLen + Len};
+%% improper list
+list_body(X, Max, Options, _Tuple) ->
{List, Len} = print(X, Max - 1, Options),
- {[$|,List], Len + 1}.
+ {[$|, List], Len + 1}.
-list_bodyc([], _Max, _Options, _Tuple) -> {[], 0};
+list_bodyc([], _Max, _Options, _Tuple) ->
+ {[], 0};
list_bodyc(_, Max, _Options, _Tuple) when Max < 5 -> {",...", 4};
-list_bodyc(_, _Max, #print_options{depth=1}, true) -> {",...", 4};
-list_bodyc(_, _Max, #print_options{depth=1}, false) -> {"|...", 4};
-list_bodyc([H|T], Max, #print_options{depth=Depth} = Options, Tuple) ->
+list_bodyc(_, _Max, #print_options{depth = 1}, true) ->
+ {",...", 4};
+list_bodyc(_, _Max, #print_options{depth = 1}, false) ->
+ {"|...", 4};
+list_bodyc([H | T], Max, #print_options{depth = Depth} = Options, Tuple) ->
{List, Len} = print(H, Max, dec_depth(Options)),
{Final, FLen} = list_bodyc(T, Max - Len - 1, dec_depth(Options), Tuple),
- Sep = case Depth == 1 andalso not Tuple of
- true -> $|;
- _ -> $,
- end,
- {[Sep, List|Final], FLen + Len + 1};
-list_bodyc(X, Max, Options, _Tuple) -> %% improper list
+ Sep =
+ case Depth == 1 andalso not Tuple of
+ true -> $|;
+ _ -> $,
+ end,
+ {[Sep, List | Final], FLen + Len + 1};
+%% improper list
+list_bodyc(X, Max, Options, _Tuple) ->
{List, Len} = print(X, Max - 1, Options),
- {[$|,List], Len + 1}.
+ {[$|, List], Len + 1}.
-map_body(Map, Max, #print_options{depth=Depth}) when Max < 4; Depth =:= 0 ->
+map_body(Map, Max, #print_options{depth = Depth}) when Max < 4; Depth =:= 0 ->
case erlang:map_size(Map) of
0 -> {[], 0};
_ -> {"...", 3}
@@ -353,7 +366,7 @@ map_body(Map, Max, Options) ->
map_bodyc([], _Max, _Options) ->
{[], 0};
-map_bodyc(_Rest, Max,#print_options{depth=Depth}) when Max < 5; Depth =:= 0 ->
+map_bodyc(_Rest, Max, #print_options{depth = Depth}) when Max < 5; Depth =:= 0 ->
{",...", 4};
map_bodyc([{Key, Value} | Rest], Max, Options) ->
{KeyStr, KeyLen} = print(Key, Max - 5, Options),
@@ -370,70 +383,86 @@ map_bodyc([{Key, Value} | Rest], Max, Options) ->
%% [0,65,66] -> [0,65,66]
%% [65,b,66] -> "A"[b,66]
%%
-alist_start([], _Max, #print_options{force_strings=true}) -> {"", 0};
-alist_start([], _Max, _Options) -> {"[]", 2};
+alist_start([], _Max, #print_options{force_strings = true}) ->
+ {"", 0};
+alist_start([], _Max, _Options) ->
+ {"[]", 2};
alist_start(_, Max, _Options) when Max < 4 -> {"...", 3};
-alist_start(_, _Max, #print_options{depth=0}) -> {"[...]", 5};
-alist_start(L, Max, #print_options{force_strings=true} = Options) ->
+alist_start(_, _Max, #print_options{depth = 0}) ->
+ {"[...]", 5};
+alist_start(L, Max, #print_options{force_strings = true} = Options) ->
alist(L, Max, Options);
%alist_start([H|_T], _Max, #print_options{depth=1}) when is_integer(H) -> {[$[, H, $|, $., $., $., $]], 7};
-alist_start([H|T], Max, Options) when is_integer(H), H >= 16#20, H =< 16#7e -> % definitely printable
- try alist([H|T], Max -1, Options) of
+
+% definitely printable
+alist_start([H | T], Max, Options) when is_integer(H), H >= 16#20, H =< 16#7e ->
+ try alist([H | T], Max - 1, Options) of
{L, Len} ->
- {[$"|L], Len + 1}
+ {[$" | L], Len + 1}
catch
throw:{unprintable, _} ->
- {R, Len} = list_body([H|T], Max-2, Options, false),
+ {R, Len} = list_body([H | T], Max - 2, Options, false),
{[$[, R, $]], Len + 2}
end;
-alist_start([H|T], Max, Options) when is_integer(H), H >= 16#a0, H =< 16#ff -> % definitely printable
- try alist([H|T], Max -1, Options) of
+% definitely printable
+alist_start([H | T], Max, Options) when is_integer(H), H >= 16#a0, H =< 16#ff ->
+ try alist([H | T], Max - 1, Options) of
{L, Len} ->
- {[$"|L], Len + 1}
+ {[$" | L], Len + 1}
catch
throw:{unprintable, _} ->
- {R, Len} = list_body([H|T], Max-2, Options, false),
+ {R, Len} = list_body([H | T], Max - 2, Options, false),
{[$[, R, $]], Len + 2}
end;
-alist_start([H|T], Max, Options) when H =:= $\t; H =:= $\n; H =:= $\r; H =:= $\v; H =:= $\e; H=:= $\f; H=:= $\b ->
- try alist([H|T], Max -1, Options) of
+alist_start([H | T], Max, Options) when
+ H =:= $\t; H =:= $\n; H =:= $\r; H =:= $\v; H =:= $\e; H =:= $\f; H =:= $\b
+->
+ try alist([H | T], Max - 1, Options) of
{L, Len} ->
- {[$"|L], Len + 1}
+ {[$" | L], Len + 1}
catch
throw:{unprintable, _} ->
- {R, Len} = list_body([H|T], Max-2, Options, false),
+ {R, Len} = list_body([H | T], Max - 2, Options, false),
{[$[, R, $]], Len + 2}
end;
alist_start(L, Max, Options) ->
- {R, Len} = list_body(L, Max-2, Options, false),
+ {R, Len} = list_body(L, Max - 2, Options, false),
{[$[, R, $]], Len + 2}.
-alist([], _Max, #print_options{force_strings=true}) -> {"", 0};
-alist([], _Max, _Options) -> {"\"", 1};
-alist(_, Max, #print_options{force_strings=true}) when Max < 4 -> {"...", 3};
-alist(_, Max, #print_options{force_strings=false}) when Max < 5 -> {"...\"", 4};
-alist([H|T], Max, Options = #print_options{force_strings=false,lists_as_strings=true}) when H =:= $"; H =:= $\\ ->
+alist([], _Max, #print_options{force_strings = true}) ->
+ {"", 0};
+alist([], _Max, _Options) ->
+ {"\"", 1};
+alist(_, Max, #print_options{force_strings = true}) when Max < 4 -> {"...", 3};
+alist(_, Max, #print_options{force_strings = false}) when Max < 5 -> {"...\"", 4};
+alist([H | T], Max, Options = #print_options{force_strings = false, lists_as_strings = true}) when
+ H =:= $"; H =:= $\\
+->
%% preserve escaping around quotes
- {L, Len} = alist(T, Max-1, Options),
- {[$\\,H|L], Len + 2};
-alist([H|T], Max, Options) when is_integer(H), H >= 16#20, H =< 16#7e -> % definitely printable
- {L, Len} = alist(T, Max-1, Options),
- {[H|L], Len + 1};
-alist([H|T], Max, Options) when is_integer(H), H >= 16#a0, H =< 16#ff -> % definitely printable
- {L, Len} = alist(T, Max-1, Options),
- {[H|L], Len + 1};
-alist([H|T], Max, Options) when H =:= $\t; H =:= $\n; H =:= $\r; H =:= $\v; H =:= $\e; H=:= $\f; H=:= $\b ->
- {L, Len} = alist(T, Max-1, Options),
+ {L, Len} = alist(T, Max - 1, Options),
+ {[$\\, H | L], Len + 2};
+% definitely printable
+alist([H | T], Max, Options) when is_integer(H), H >= 16#20, H =< 16#7e ->
+ {L, Len} = alist(T, Max - 1, Options),
+ {[H | L], Len + 1};
+% definitely printable
+alist([H | T], Max, Options) when is_integer(H), H >= 16#a0, H =< 16#ff ->
+ {L, Len} = alist(T, Max - 1, Options),
+ {[H | L], Len + 1};
+alist([H | T], Max, Options) when
+ H =:= $\t; H =:= $\n; H =:= $\r; H =:= $\v; H =:= $\e; H =:= $\f; H =:= $\b
+->
+ {L, Len} = alist(T, Max - 1, Options),
case Options#print_options.force_strings of
true ->
- {[H|L], Len + 1};
+ {[H | L], Len + 1};
_ ->
- {[escape(H)|L], Len + 1}
+ {[escape(H) | L], Len + 1}
end;
-alist([H|T], Max, #print_options{force_strings=true} = Options) when is_integer(H) ->
- {L, Len} = alist(T, Max-1, Options),
- {[H|L], Len + 1};
-alist([H|T], Max, Options = #print_options{force_strings=true}) when is_binary(H); is_list(H) ->
+alist([H | T], Max, #print_options{force_strings = true} = Options) when is_integer(H) ->
+ {L, Len} = alist(T, Max - 1, Options),
+ {[H | L], Len + 1};
+alist([H | T], Max, Options = #print_options{force_strings = true}) when is_binary(H); is_list(H) ->
{List, Len} = print(H, Max, Options),
case (Max - Len) =< 0 of
true ->
@@ -442,28 +471,31 @@ alist([H|T], Max, Options = #print_options{force_strings=true}) when is_binary(H
false ->
%% no need to decrement depth, as we're in printable string mode
{Final, FLen} = alist(T, Max - Len, Options),
- {[List|Final], FLen+Len}
+ {[List | Final], FLen + Len}
end;
-alist(_, _, #print_options{force_strings=true}) ->
+alist(_, _, #print_options{force_strings = true}) ->
erlang:error(badarg);
-alist([H|_L], _Max, _Options) ->
+alist([H | _L], _Max, _Options) ->
throw({unprintable, H});
alist(H, _Max, _Options) ->
%% improper list
throw({unprintable, H}).
%% is the first character in the atom alphabetic & lowercase?
-atom_needs_quoting_start([H|T]) when H >= $a, H =< $z ->
+atom_needs_quoting_start([H | T]) when H >= $a, H =< $z ->
atom_needs_quoting(T);
atom_needs_quoting_start(_) ->
true.
atom_needs_quoting([]) ->
false;
-atom_needs_quoting([H|T]) when (H >= $a andalso H =< $z);
- (H >= $A andalso H =< $Z);
- (H >= $0 andalso H =< $9);
- H == $@; H == $_ ->
+atom_needs_quoting([H | T]) when
+ (H >= $a andalso H =< $z);
+ (H >= $A andalso H =< $Z);
+ (H >= $0 andalso H =< $9);
+ H == $@;
+ H == $_
+->
atom_needs_quoting(T);
atom_needs_quoting(_) ->
true.
@@ -471,15 +503,15 @@ atom_needs_quoting(_) ->
-spec prepare_options(options(), #print_options{}) -> #print_options{}.
prepare_options([], Options) ->
Options;
-prepare_options([{depth, Depth}|T], Options) when is_integer(Depth) ->
- prepare_options(T, Options#print_options{depth=Depth});
-prepare_options([{lists_as_strings, Bool}|T], Options) when is_boolean(Bool) ->
+prepare_options([{depth, Depth} | T], Options) when is_integer(Depth) ->
+ prepare_options(T, Options#print_options{depth = Depth});
+prepare_options([{lists_as_strings, Bool} | T], Options) when is_boolean(Bool) ->
prepare_options(T, Options#print_options{lists_as_strings = Bool});
-prepare_options([{force_strings, Bool}|T], Options) when is_boolean(Bool) ->
+prepare_options([{force_strings, Bool} | T], Options) when is_boolean(Bool) ->
prepare_options(T, Options#print_options{force_strings = Bool}).
-dec_depth(#print_options{depth=Depth} = Options) when Depth > 0 ->
- Options#print_options{depth=Depth-1};
+dec_depth(#print_options{depth = Depth} = Options) when Depth > 0 ->
+ Options#print_options{depth = Depth - 1};
dec_depth(Options) ->
Options.
@@ -493,20 +525,20 @@ escape($\v) -> "\\v".
record_fields([], _, _) ->
{"", 0};
-record_fields(_, Max, #print_options{depth=D}) when Max < 4; D == 0 ->
+record_fields(_, Max, #print_options{depth = D}) when Max < 4; D == 0 ->
{"...", 3};
-record_fields([{Field, Value}|T], Max, Options) ->
- {ExtraChars, Terminator} = case T of
- [] ->
- {1, []};
- _ ->
- {2, ","}
- end,
+record_fields([{Field, Value} | T], Max, Options) ->
+ {ExtraChars, Terminator} =
+ case T of
+ [] ->
+ {1, []};
+ _ ->
+ {2, ","}
+ end,
{FieldStr, FieldLen} = print(Field, Max - ExtraChars, Options),
{ValueStr, ValueLen} = print(Value, Max - (FieldLen + ExtraChars), Options),
{Final, FLen} = record_fields(T, Max - (FieldLen + ValueLen + ExtraChars), dec_depth(Options)),
- {[FieldStr++"="++ValueStr++Terminator|Final], FLen + FieldLen + ValueLen + ExtraChars}.
-
+ {[FieldStr ++ "=" ++ ValueStr ++ Terminator | Final], FLen + FieldLen + ValueLen + ExtraChars}.
-ifdef(TEST).
%%--------------------
@@ -516,19 +548,27 @@ format_test() ->
?assertEqual("foobar", lists:flatten(format("~s", [["foo", $b, $a, $r]], 50))),
?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~p", [["foo", $b, $a, $r]], 50))),
?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~P", [["foo", $b, $a, $r], 10], 50))),
- ?assertEqual("[[102,111,111],98,97,114]", lists:flatten(format("~w", [["foo", $b, $a, $r]], 50))),
+ ?assertEqual(
+ "[[102,111,111],98,97,114]", lists:flatten(format("~w", [["foo", $b, $a, $r]], 50))
+ ),
%% complex ones
?assertEqual(" foobar", lists:flatten(format("~10s", [["foo", $b, $a, $r]], 50))),
?assertEqual("f", lists:flatten(format("~1s", [["foo", $b, $a, $r]], 50))),
?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~22p", [["foo", $b, $a, $r]], 50))),
- ?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~22P", [["foo", $b, $a, $r], 10], 50))),
+ ?assertEqual(
+ "[\"foo\",98,97,114]", lists:flatten(format("~22P", [["foo", $b, $a, $r], 10], 50))
+ ),
?assertEqual("**********", lists:flatten(format("~10W", [["foo", $b, $a, $r], 10], 50))),
- ?assertEqual("[[102,111,111],98,97,114]", lists:flatten(format("~25W", [["foo", $b, $a, $r], 10], 50))),
+ ?assertEqual(
+ "[[102,111,111],98,97,114]", lists:flatten(format("~25W", [["foo", $b, $a, $r], 10], 50))
+ ),
% Note these next two diverge from io_lib:format; the field width is
% ignored, when it should be used as max line length.
?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~10p", [["foo", $b, $a, $r]], 50))),
- ?assertEqual("[\"foo\",98,97,114]", lists:flatten(format("~10P", [["foo", $b, $a, $r], 10], 50))),
+ ?assertEqual(
+ "[\"foo\",98,97,114]", lists:flatten(format("~10P", [["foo", $b, $a, $r], 10], 50))
+ ),
ok.
atom_quoting_test() ->
@@ -545,12 +585,15 @@ sane_float_printing_test() ->
?assertEqual("1.0", lists:flatten(format("~p", [1.0], 50))),
?assertEqual("1.23456789", lists:flatten(format("~p", [1.23456789], 50))),
?assertEqual("1.23456789", lists:flatten(format("~p", [1.234567890], 50))),
- ?assertEqual("0.3333333333333333", lists:flatten(format("~p", [1/3], 50))),
+ ?assertEqual("0.3333333333333333", lists:flatten(format("~p", [1 / 3], 50))),
?assertEqual("0.1234567", lists:flatten(format("~p", [0.1234567], 50))),
ok.
float_inside_list_test() ->
- ?assertEqual("[97,38.233913133184835,99]", lists:flatten(format("~p", [[$a, 38.233913133184835, $c]], 50))),
+ ?assertEqual(
+ "[97,38.233913133184835,99]",
+ lists:flatten(format("~p", [[$a, 38.233913133184835, $c]], 50))
+ ),
?assertError(badarg, lists:flatten(format("~s", [[$a, 38.233913133184835, $c]], 50))),
ok.
@@ -572,14 +615,18 @@ binary_printing_test() ->
?assertEqual("<<\"hello\">>", lists:flatten(format("~p", [<<"hello">>], 50))),
?assertEqual("<<104,101,108,108,111>>", lists:flatten(format("~w", [<<"hello">>], 50))),
?assertEqual("<<1,2,3,4>>", lists:flatten(format("~p", [<<1, 2, 3, 4>>], 50))),
- ?assertEqual([1,2,3,4], lists:flatten(format("~s", [<<1, 2, 3, 4>>], 50))),
+ ?assertEqual([1, 2, 3, 4], lists:flatten(format("~s", [<<1, 2, 3, 4>>], 50))),
?assertEqual("hello", lists:flatten(format("~s", [<<"hello">>], 50))),
?assertEqual("hello\nworld", lists:flatten(format("~s", [<<"hello\nworld">>], 50))),
?assertEqual("<<\"hello\\nworld\">>", lists:flatten(format("~p", [<<"hello\nworld">>], 50))),
- ?assertEqual("<<\"\\\"hello world\\\"\">>", lists:flatten(format("~p", [<<"\"hello world\"">>], 50))),
+ ?assertEqual(
+ "<<\"\\\"hello world\\\"\">>", lists:flatten(format("~p", [<<"\"hello world\"">>], 50))
+ ),
?assertEqual("<<\"hello\\\\world\">>", lists:flatten(format("~p", [<<"hello\\world">>], 50))),
?assertEqual("<<\"hello\\\\\world\">>", lists:flatten(format("~p", [<<"hello\\\world">>], 50))),
- ?assertEqual("<<\"hello\\\\\\\\world\">>", lists:flatten(format("~p", [<<"hello\\\\world">>], 50))),
+ ?assertEqual(
+ "<<\"hello\\\\\\\\world\">>", lists:flatten(format("~p", [<<"hello\\\\world">>], 50))
+ ),
?assertEqual("<<\"hello\\bworld\">>", lists:flatten(format("~p", [<<"hello\bworld">>], 50))),
?assertEqual("<<\"hello\\tworld\">>", lists:flatten(format("~p", [<<"hello\tworld">>], 50))),
?assertEqual("<<\"hello\\nworld\">>", lists:flatten(format("~p", [<<"hello\nworld">>], 50))),
@@ -594,20 +641,68 @@ binary_printing_test() ->
ok.
bitstring_printing_test() ->
- ?assertEqual("<<1,2,3,1:7>>", lists:flatten(format("~p",
- [<<1, 2, 3, 1:7>>], 100))),
- ?assertEqual("<<1:7>>", lists:flatten(format("~p",
- [<<1:7>>], 100))),
- ?assertEqual("<<1,2,3,...>>", lists:flatten(format("~p",
- [<<1, 2, 3, 1:7>>], 12))),
- ?assertEqual("<<1,2,3,...>>", lists:flatten(format("~p",
- [<<1, 2, 3, 1:7>>], 13))),
- ?assertEqual("<<1,2,3,1:7>>", lists:flatten(format("~p",
- [<<1, 2, 3, 1:7>>], 14))),
+ ?assertEqual(
+ "<<1,2,3,1:7>>",
+ lists:flatten(
+ format(
+ "~p",
+ [<<1, 2, 3, 1:7>>],
+ 100
+ )
+ )
+ ),
+ ?assertEqual(
+ "<<1:7>>",
+ lists:flatten(
+ format(
+ "~p",
+ [<<1:7>>],
+ 100
+ )
+ )
+ ),
+ ?assertEqual(
+ "<<1,2,3,...>>",
+ lists:flatten(
+ format(
+ "~p",
+ [<<1, 2, 3, 1:7>>],
+ 12
+ )
+ )
+ ),
+ ?assertEqual(
+ "<<1,2,3,...>>",
+ lists:flatten(
+ format(
+ "~p",
+ [<<1, 2, 3, 1:7>>],
+ 13
+ )
+ )
+ ),
+ ?assertEqual(
+ "<<1,2,3,1:7>>",
+ lists:flatten(
+ format(
+ "~p",
+ [<<1, 2, 3, 1:7>>],
+ 14
+ )
+ )
+ ),
?assertEqual("<<..>>", lists:flatten(format("~p", [<<1:7>>], 0))),
?assertEqual("<<...>>", lists:flatten(format("~p", [<<1:7>>], 1))),
- ?assertEqual("[<<1>>,<<2>>]", lists:flatten(format("~p", [[<<1>>, <<2>>]],
- 100))),
+ ?assertEqual(
+ "[<<1>>,<<2>>]",
+ lists:flatten(
+ format(
+ "~p",
+ [[<<1>>, <<2>>]],
+ 100
+ )
+ )
+ ),
?assertEqual("{<<1:7>>}", lists:flatten(format("~p", [{<<1:7>>}], 50))),
ok.
@@ -617,48 +712,91 @@ list_printing_test() ->
?assertEqual("", lists:flatten(format("~s", [[]], 50))),
?assertEqual("...", lists:flatten(format("~s", [[]], -1))),
?assertEqual("[[]]", lists:flatten(format("~p", [[[]]], 50))),
- ?assertEqual("[13,11,10,8,5,4]", lists:flatten(format("~p", [[13,11,10,8,5,4]], 50))),
- ?assertEqual("\"\\rabc\"", lists:flatten(format("~p", [[13,$a, $b, $c]], 50))),
- ?assertEqual("[1,2,3|4]", lists:flatten(format("~p", [[1, 2, 3|4]], 50))),
- ?assertEqual("[...]", lists:flatten(format("~p", [[1, 2, 3,4]], 4))),
+ ?assertEqual("[13,11,10,8,5,4]", lists:flatten(format("~p", [[13, 11, 10, 8, 5, 4]], 50))),
+ ?assertEqual("\"\\rabc\"", lists:flatten(format("~p", [[13, $a, $b, $c]], 50))),
+ ?assertEqual("[1,2,3|4]", lists:flatten(format("~p", [[1, 2, 3 | 4]], 50))),
+ ?assertEqual("[...]", lists:flatten(format("~p", [[1, 2, 3, 4]], 4))),
?assertEqual("[1,...]", lists:flatten(format("~p", [[1, 2, 3, 4]], 6))),
?assertEqual("[1,...]", lists:flatten(format("~p", [[1, 2, 3, 4]], 7))),
?assertEqual("[1,2,...]", lists:flatten(format("~p", [[1, 2, 3, 4]], 8))),
- ?assertEqual("[1|4]", lists:flatten(format("~p", [[1|4]], 50))),
+ ?assertEqual("[1|4]", lists:flatten(format("~p", [[1 | 4]], 50))),
?assertEqual("[1]", lists:flatten(format("~p", [[1]], 50))),
- ?assertError(badarg, lists:flatten(format("~s", [[1|4]], 50))),
+ ?assertError(badarg, lists:flatten(format("~s", [[1 | 4]], 50))),
?assertEqual("\"hello...\"", lists:flatten(format("~p", ["hello world"], 10))),
?assertEqual("hello w...", lists:flatten(format("~s", ["hello world"], 10))),
?assertEqual("hello world\r\n", lists:flatten(format("~s", ["hello world\r\n"], 50))),
?assertEqual("\rhello world\r\n", lists:flatten(format("~s", ["\rhello world\r\n"], 50))),
- ?assertEqual("\"\\rhello world\\r\\n\"", lists:flatten(format("~p", ["\rhello world\r\n"], 50))),
- ?assertEqual("[13,104,101,108,108,111,32,119,111,114,108,100,13,10]", lists:flatten(format("~w", ["\rhello world\r\n"], 60))),
+ ?assertEqual(
+ "\"\\rhello world\\r\\n\"", lists:flatten(format("~p", ["\rhello world\r\n"], 50))
+ ),
+ ?assertEqual(
+ "[13,104,101,108,108,111,32,119,111,114,108,100,13,10]",
+ lists:flatten(format("~w", ["\rhello world\r\n"], 60))
+ ),
?assertEqual("...", lists:flatten(format("~s", ["\rhello world\r\n"], 3))),
- ?assertEqual("[22835963083295358096932575511191922182123945984,...]",
- lists:flatten(format("~p", [
- [22835963083295358096932575511191922182123945984,
- 22835963083295358096932575511191922182123945984]], 9))),
- ?assertEqual("[22835963083295358096932575511191922182123945984,...]",
- lists:flatten(format("~p", [
- [22835963083295358096932575511191922182123945984,
- 22835963083295358096932575511191922182123945984]], 53))),
+ ?assertEqual(
+ "[22835963083295358096932575511191922182123945984,...]",
+ lists:flatten(
+ format(
+ "~p",
+ [
+ [
+ 22835963083295358096932575511191922182123945984,
+ 22835963083295358096932575511191922182123945984
+ ]
+ ],
+ 9
+ )
+ )
+ ),
+ ?assertEqual(
+ "[22835963083295358096932575511191922182123945984,...]",
+ lists:flatten(
+ format(
+ "~p",
+ [
+ [
+ 22835963083295358096932575511191922182123945984,
+ 22835963083295358096932575511191922182123945984
+ ]
+ ],
+ 53
+ )
+ )
+ ),
%%improper list
- ?assertEqual("[1,2,3|4]", lists:flatten(format("~P", [[1|[2|[3|4]]], 5], 50))),
- ?assertEqual("[1|1]", lists:flatten(format("~P", [[1|1], 5], 50))),
- ?assertEqual("[9|9]", lists:flatten(format("~p", [[9|9]], 50))),
+ ?assertEqual("[1,2,3|4]", lists:flatten(format("~P", [[1 | [2 | [3 | 4]]], 5], 50))),
+ ?assertEqual("[1|1]", lists:flatten(format("~P", [[1 | 1], 5], 50))),
+ ?assertEqual("[9|9]", lists:flatten(format("~p", [[9 | 9]], 50))),
ok.
iolist_printing_test() ->
- ?assertEqual("iolist: HelloIamaniolist",
- lists:flatten(format("iolist: ~s", [[$H, $e, $l, $l, $o, "I", ["am", [<<"an">>], [$i, $o, $l, $i, $s, $t]]]], 1000))),
- ?assertEqual("123...",
- lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 6))),
- ?assertEqual("123456...",
- lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 9))),
- ?assertEqual("123456789H...",
- lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 13))),
- ?assertEqual("123456789HellIamaniolist",
- lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 30))),
+ ?assertEqual(
+ "iolist: HelloIamaniolist",
+ lists:flatten(
+ format(
+ "iolist: ~s",
+ [[$H, $e, $l, $l, $o, "I", ["am", [<<"an">>], [$i, $o, $l, $i, $s, $t]]]],
+ 1000
+ )
+ )
+ ),
+ ?assertEqual(
+ "123...",
+ lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 6))
+ ),
+ ?assertEqual(
+ "123456...",
+ lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 9))
+ ),
+ ?assertEqual(
+ "123456789H...",
+ lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 13))
+ ),
+ ?assertEqual(
+ "123456789HellIamaniolist",
+ lists:flatten(format("~s", [[<<"123456789">>, "HellIamaniolist"]], 30))
+ ),
ok.
@@ -671,22 +809,48 @@ tuple_printing_test() ->
?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 3))),
?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 4))),
?assertEqual("{...}", lists:flatten(format("~p", [{foo}], 5))),
- ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo,bar}], 6))),
- ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo,bar}], 7))),
- ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo,bar}], 9))),
- ?assertEqual("{foo,bar}", lists:flatten(format("~p", [{foo,bar}], 10))),
- ?assertEqual("{22835963083295358096932575511191922182123945984,...}",
- lists:flatten(format("~w", [
+ ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo, bar}], 6))),
+ ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo, bar}], 7))),
+ ?assertEqual("{foo,...}", lists:flatten(format("~p", [{foo, bar}], 9))),
+ ?assertEqual("{foo,bar}", lists:flatten(format("~p", [{foo, bar}], 10))),
+ ?assertEqual(
+ "{22835963083295358096932575511191922182123945984,...}",
+ lists:flatten(
+ format(
+ "~w",
+ [
{22835963083295358096932575511191922182123945984,
- 22835963083295358096932575511191922182123945984}], 10))),
- ?assertEqual("{22835963083295358096932575511191922182123945984,...}",
- lists:flatten(format("~w", [
+ 22835963083295358096932575511191922182123945984}
+ ],
+ 10
+ )
+ )
+ ),
+ ?assertEqual(
+ "{22835963083295358096932575511191922182123945984,...}",
+ lists:flatten(
+ format(
+ "~w",
+ [
+ {22835963083295358096932575511191922182123945984, bar}
+ ],
+ 10
+ )
+ )
+ ),
+ ?assertEqual(
+ "{22835963083295358096932575511191922182123945984,...}",
+ lists:flatten(
+ format(
+ "~w",
+ [
{22835963083295358096932575511191922182123945984,
- bar}], 10))),
- ?assertEqual("{22835963083295358096932575511191922182123945984,...}",
- lists:flatten(format("~w", [
- {22835963083295358096932575511191922182123945984,
- 22835963083295358096932575511191922182123945984}], 53))),
+ 22835963083295358096932575511191922182123945984}
+ ],
+ 53
+ )
+ )
+ ),
ok.
map_printing_test() ->
@@ -698,40 +862,102 @@ map_printing_test() ->
?assertError(badarg, lists:flatten(format("~s", [maps:new()], 50))),
?assertEqual("#{...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 1))),
?assertEqual("#{...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 6))),
- ?assertEqual("#{bar => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 7))),
- ?assertEqual("#{bar => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 9))),
- ?assertEqual("#{bar => foo}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 10))),
- ?assertEqual("#{bar => ...,...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 9))),
- ?assertEqual("#{bar => foo,...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 10))),
- ?assertEqual("#{bar => foo,...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 17))),
- ?assertEqual("#{bar => foo,foo => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 18))),
- ?assertEqual("#{bar => foo,foo => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 19))),
- ?assertEqual("#{bar => foo,foo => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 20))),
- ?assertEqual("#{bar => foo,foo => bar}", lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 21))),
- ?assertEqual("#{22835963083295358096932575511191922182123945984 => ...}",
- lists:flatten(format("~w", [
- maps:from_list([{22835963083295358096932575511191922182123945984,
- 22835963083295358096932575511191922182123945984}])], 10))),
- ?assertEqual("#{22835963083295358096932575511191922182123945984 => ...}",
- lists:flatten(format("~w", [
- maps:from_list([{22835963083295358096932575511191922182123945984,
- bar}])], 10))),
- ?assertEqual("#{22835963083295358096932575511191922182123945984 => ...}",
- lists:flatten(format("~w", [
- maps:from_list([{22835963083295358096932575511191922182123945984,
- bar}])], 53))),
- ?assertEqual("#{22835963083295358096932575511191922182123945984 => bar}",
- lists:flatten(format("~w", [
- maps:from_list([{22835963083295358096932575511191922182123945984,
- bar}])], 54))),
+ ?assertEqual(
+ "#{bar => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 7))
+ ),
+ ?assertEqual(
+ "#{bar => ...}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 9))
+ ),
+ ?assertEqual(
+ "#{bar => foo}", lists:flatten(format("~p", [maps:from_list([{bar, foo}])], 10))
+ ),
+ ?assertEqual(
+ "#{bar => ...,...}",
+ lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 9))
+ ),
+ ?assertEqual(
+ "#{bar => foo,...}",
+ lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 10))
+ ),
+ ?assertEqual(
+ "#{bar => foo,...}",
+ lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 17))
+ ),
+ ?assertEqual(
+ "#{bar => foo,foo => ...}",
+ lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 18))
+ ),
+ ?assertEqual(
+ "#{bar => foo,foo => ...}",
+ lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 19))
+ ),
+ ?assertEqual(
+ "#{bar => foo,foo => ...}",
+ lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 20))
+ ),
+ ?assertEqual(
+ "#{bar => foo,foo => bar}",
+ lists:flatten(format("~p", [maps:from_list([{bar, foo}, {foo, bar}])], 21))
+ ),
+ ?assertEqual(
+ "#{22835963083295358096932575511191922182123945984 => ...}",
+ lists:flatten(
+ format(
+ "~w",
+ [
+ maps:from_list([
+ {22835963083295358096932575511191922182123945984,
+ 22835963083295358096932575511191922182123945984}
+ ])
+ ],
+ 10
+ )
+ )
+ ),
+ ?assertEqual(
+ "#{22835963083295358096932575511191922182123945984 => ...}",
+ lists:flatten(
+ format(
+ "~w",
+ [
+ maps:from_list([{22835963083295358096932575511191922182123945984, bar}])
+ ],
+ 10
+ )
+ )
+ ),
+ ?assertEqual(
+ "#{22835963083295358096932575511191922182123945984 => ...}",
+ lists:flatten(
+ format(
+ "~w",
+ [
+ maps:from_list([{22835963083295358096932575511191922182123945984, bar}])
+ ],
+ 53
+ )
+ )
+ ),
+ ?assertEqual(
+ "#{22835963083295358096932575511191922182123945984 => bar}",
+ lists:flatten(
+ format(
+ "~w",
+ [
+ maps:from_list([{22835963083295358096932575511191922182123945984, bar}])
+ ],
+ 54
+ )
+ )
+ ),
ok;
false ->
ok
end.
unicode_test() ->
- ?assertEqual([231,167,129], lists:flatten(format("~s", [<<231,167,129>>], 50))),
- ?assertEqual([31169], lists:flatten(format("~ts", [<<231,167,129>>], 50))),
+ ?assertEqual([231, 167, 129], lists:flatten(format("~s", [<<231, 167, 129>>], 50))),
+ ?assertEqual([31169], lists:flatten(format("~ts", [<<231, 167, 129>>], 50))),
ok.
depth_limit_test() ->
@@ -754,21 +980,54 @@ depth_limit_test() ->
case erlang:is_builtin(erlang, is_map, 1) of
true ->
- ?assertEqual("#{a => #{...}}",
- lists:flatten(format("~P",
- [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 2], 50))),
- ?assertEqual("#{a => #{b => #{...}}}",
- lists:flatten(format("~P",
- [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 3], 50))),
- ?assertEqual("#{a => #{b => #{c => d}}}",
- lists:flatten(format("~P",
- [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 4], 50))),
+ ?assertEqual(
+ "#{a => #{...}}",
+ lists:flatten(
+ format(
+ "~P",
+ [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 2],
+ 50
+ )
+ )
+ ),
+ ?assertEqual(
+ "#{a => #{b => #{...}}}",
+ lists:flatten(
+ format(
+ "~P",
+ [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 3],
+ 50
+ )
+ )
+ ),
+ ?assertEqual(
+ "#{a => #{b => #{c => d}}}",
+ lists:flatten(
+ format(
+ "~P",
+ [maps:from_list([{a, maps:from_list([{b, maps:from_list([{c, d}])}])}]), 4],
+ 50
+ )
+ )
+ ),
?assertEqual("#{}", lists:flatten(format("~P", [maps:new(), 1], 50))),
- ?assertEqual("#{...}", lists:flatten(format("~P", [maps:from_list([{1,1}, {2,2}, {3,3}]), 1], 50))),
- ?assertEqual("#{1 => 1,...}", lists:flatten(format("~P", [maps:from_list([{1,1}, {2,2}, {3,3}]), 2], 50))),
- ?assertEqual("#{1 => 1,2 => 2,...}", lists:flatten(format("~P", [maps:from_list([{1,1}, {2,2}, {3,3}]), 3], 50))),
- ?assertEqual("#{1 => 1,2 => 2,3 => 3}", lists:flatten(format("~P", [maps:from_list([{1,1}, {2,2}, {3,3}]), 4], 50))),
+ ?assertEqual(
+ "#{...}",
+ lists:flatten(format("~P", [maps:from_list([{1, 1}, {2, 2}, {3, 3}]), 1], 50))
+ ),
+ ?assertEqual(
+ "#{1 => 1,...}",
+ lists:flatten(format("~P", [maps:from_list([{1, 1}, {2, 2}, {3, 3}]), 2], 50))
+ ),
+ ?assertEqual(
+ "#{1 => 1,2 => 2,...}",
+ lists:flatten(format("~P", [maps:from_list([{1, 1}, {2, 2}, {3, 3}]), 3], 50))
+ ),
+ ?assertEqual(
+ "#{1 => 1,2 => 2,3 => 3}",
+ lists:flatten(format("~P", [maps:from_list([{1, 1}, {2, 2}, {3, 3}]), 4], 50))
+ ),
ok;
false ->
@@ -776,8 +1035,14 @@ depth_limit_test() ->
end,
?assertEqual("{\"a\",[...]}", lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 3], 50))),
- ?assertEqual("{\"a\",[\"b\",[[...]|...]]}", lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 6], 50))),
- ?assertEqual("{\"a\",[\"b\",[\"c\",[\"d\"]]]}", lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 9], 50))),
+ ?assertEqual(
+ "{\"a\",[\"b\",[[...]|...]]}",
+ lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 6], 50))
+ ),
+ ?assertEqual(
+ "{\"a\",[\"b\",[\"c\",[\"d\"]]]}",
+ lists:flatten(format("~P", [{"a", ["b", ["c", ["d"]]]}, 9], 50))
+ ),
?assertEqual("[...]", lists:flatten(format("~P", [[1, 2, 3], 1], 50))),
?assertEqual("[1|...]", lists:flatten(format("~P", [[1, 2, 3], 2], 50))),
@@ -808,21 +1073,23 @@ depth_limit_test() ->
%% depth limiting for some reason works in 4 byte chunks on printable binaries?
?assertEqual("<<\"hell\"...>>", lists:flatten(format("~P", [<<"hello world">>, 2], 50))),
- ?assertEqual("<<\"abcd\"...>>", lists:flatten(format("~P", [<<$a, $b, $c, $d, $e, 0>>, 2], 50))),
+ ?assertEqual(
+ "<<\"abcd\"...>>", lists:flatten(format("~P", [<<$a, $b, $c, $d, $e, 0>>, 2], 50))
+ ),
%% I don't even know...
?assertEqual("<<>>", lists:flatten(format("~P", [<<>>, 1], 50))),
?assertEqual("<<>>", lists:flatten(format("~W", [<<>>, 1], 50))),
- ?assertEqual("{abc,<<\"abc\\\"\">>}", lists:flatten(format("~P", [{abc,<<"abc\"">>}, 4], 50))),
+ ?assertEqual("{abc,<<\"abc\\\"\">>}", lists:flatten(format("~P", [{abc, <<"abc\"">>}, 4], 50))),
ok.
print_terms_without_format_string_test() ->
?assertError(badarg, format({hello, world}, [], 50)),
?assertError(badarg, format([{google, bomb}], [], 50)),
- ?assertError(badarg, format([$h,$e,$l,$l,$o, 3594], [], 50)),
- ?assertEqual("helloworld", lists:flatten(format([$h,$e,$l,$l,$o, "world"], [], 50))),
+ ?assertError(badarg, format([$h, $e, $l, $l, $o, 3594], [], 50)),
+ ?assertEqual("helloworld", lists:flatten(format([$h, $e, $l, $l, $o, "world"], [], 50))),
?assertEqual("hello", lists:flatten(format(<<"hello">>, [], 50))),
?assertEqual("hello", lists:flatten(format('hello', [], 50))),
?assertError(badarg, format(<<1, 2, 3, 1:7>>, [], 100)),
@@ -830,9 +1097,9 @@ print_terms_without_format_string_test() ->
ok.
improper_io_list_test() ->
- ?assertEqual(">hello", lists:flatten(format('~s', [[$>|<<"hello">>]], 50))),
- ?assertEqual(">hello", lists:flatten(format('~ts', [[$>|<<"hello">>]], 50))),
- ?assertEqual("helloworld", lists:flatten(format('~ts', [[<<"hello">>|<<"world">>]], 50))),
+ ?assertEqual(">hello", lists:flatten(format('~s', [[$> | <<"hello">>]], 50))),
+ ?assertEqual(">hello", lists:flatten(format('~ts', [[$> | <<"hello">>]], 50))),
+ ?assertEqual("helloworld", lists:flatten(format('~ts', [[<<"hello">> | <<"world">>]], 50))),
ok.
--endif. \ No newline at end of file
+-endif.
diff --git a/src/couch_log/src/couch_log_trunc_io_fmt.erl b/src/couch_log/src/couch_log_trunc_io_fmt.erl
index 77f0b2e0d..cf18019ad 100644
--- a/src/couch_log/src/couch_log_trunc_io_fmt.erl
+++ b/src/couch_log/src/couch_log_trunc_io_fmt.erl
@@ -22,12 +22,11 @@
%% lager_Format.
-module(couch_log_trunc_io_fmt).
-
-export([format/3, format/4]).
-record(options, {
- chomp = false :: boolean()
- }).
+ chomp = false :: boolean()
+}).
format(FmtStr, Args, MaxLen) ->
format(FmtStr, Args, MaxLen, []).
@@ -46,11 +45,15 @@ format(FmtStr, Args, MaxLen, Opts) when is_list(FmtStr) ->
{Cs2, MaxLen2} = build(Cs, [], MaxLen, Options),
%% count how many terms remain
{Count, StrLen} = lists:foldl(
- fun({_C, _As, _F, _Adj, _P, _Pad, _Enc}, {Terms, Chars}) ->
+ fun
+ ({_C, _As, _F, _Adj, _P, _Pad, _Enc}, {Terms, Chars}) ->
{Terms + 1, Chars};
(_, {Terms, Chars}) ->
{Terms, Chars + 1}
- end, {0, 0}, Cs2),
+ end,
+ {0, 0},
+ Cs2
+ ),
build2(Cs2, Count, MaxLen2 - StrLen);
false ->
erlang:error(badarg)
@@ -58,114 +61,116 @@ format(FmtStr, Args, MaxLen, Opts) when is_list(FmtStr) ->
format(_FmtStr, _Args, _MaxLen, _Opts) ->
erlang:error(badarg).
-collect([$~|Fmt0], Args0) ->
- {C,Fmt1,Args1} = collect_cseq(Fmt0, Args0),
- [C|collect(Fmt1, Args1)];
-collect([C|Fmt], Args) ->
- [C|collect(Fmt, Args)];
-collect([], []) -> [].
+collect([$~ | Fmt0], Args0) ->
+ {C, Fmt1, Args1} = collect_cseq(Fmt0, Args0),
+ [C | collect(Fmt1, Args1)];
+collect([C | Fmt], Args) ->
+ [C | collect(Fmt, Args)];
+collect([], []) ->
+ [].
collect_cseq(Fmt0, Args0) ->
- {F,Ad,Fmt1,Args1} = field_width(Fmt0, Args0),
- {P,Fmt2,Args2} = precision(Fmt1, Args1),
- {Pad,Fmt3,Args3} = pad_char(Fmt2, Args2),
- {Encoding,Fmt4,Args4} = encoding(Fmt3, Args3),
- {C,As,Fmt5,Args5} = collect_cc(Fmt4, Args4),
- {{C,As,F,Ad,P,Pad,Encoding},Fmt5,Args5}.
-
-encoding([$t|Fmt],Args) ->
- {unicode,Fmt,Args};
-encoding(Fmt,Args) ->
- {latin1,Fmt,Args}.
-
-field_width([$-|Fmt0], Args0) ->
- {F,Fmt,Args} = field_value(Fmt0, Args0),
+ {F, Ad, Fmt1, Args1} = field_width(Fmt0, Args0),
+ {P, Fmt2, Args2} = precision(Fmt1, Args1),
+ {Pad, Fmt3, Args3} = pad_char(Fmt2, Args2),
+ {Encoding, Fmt4, Args4} = encoding(Fmt3, Args3),
+ {C, As, Fmt5, Args5} = collect_cc(Fmt4, Args4),
+ {{C, As, F, Ad, P, Pad, Encoding}, Fmt5, Args5}.
+
+encoding([$t | Fmt], Args) ->
+ {unicode, Fmt, Args};
+encoding(Fmt, Args) ->
+ {latin1, Fmt, Args}.
+
+field_width([$- | Fmt0], Args0) ->
+ {F, Fmt, Args} = field_value(Fmt0, Args0),
field_width(-F, Fmt, Args);
field_width(Fmt0, Args0) ->
- {F,Fmt,Args} = field_value(Fmt0, Args0),
+ {F, Fmt, Args} = field_value(Fmt0, Args0),
field_width(F, Fmt, Args).
field_width(F, Fmt, Args) when F < 0 ->
- {-F,left,Fmt,Args};
+ {-F, left, Fmt, Args};
field_width(F, Fmt, Args) when F >= 0 ->
- {F,right,Fmt,Args}.
+ {F, right, Fmt, Args}.
-precision([$.|Fmt], Args) ->
+precision([$. | Fmt], Args) ->
field_value(Fmt, Args);
precision(Fmt, Args) ->
- {none,Fmt,Args}.
+ {none, Fmt, Args}.
-field_value([$*|Fmt], [A|Args]) when is_integer(A) ->
- {A,Fmt,Args};
-field_value([C|Fmt], Args) when is_integer(C), C >= $0, C =< $9 ->
- field_value([C|Fmt], Args, 0);
+field_value([$* | Fmt], [A | Args]) when is_integer(A) ->
+ {A, Fmt, Args};
+field_value([C | Fmt], Args) when is_integer(C), C >= $0, C =< $9 ->
+ field_value([C | Fmt], Args, 0);
field_value(Fmt, Args) ->
- {none,Fmt,Args}.
+ {none, Fmt, Args}.
-field_value([C|Fmt], Args, F) when is_integer(C), C >= $0, C =< $9 ->
- field_value(Fmt, Args, 10*F + (C - $0));
-field_value(Fmt, Args, F) -> %Default case
- {F,Fmt,Args}.
+field_value([C | Fmt], Args, F) when is_integer(C), C >= $0, C =< $9 ->
+ field_value(Fmt, Args, 10 * F + (C - $0));
+%Default case
+field_value(Fmt, Args, F) ->
+ {F, Fmt, Args}.
-pad_char([$.,$*|Fmt], [Pad|Args]) -> {Pad,Fmt,Args};
-pad_char([$.,Pad|Fmt], Args) -> {Pad,Fmt,Args};
-pad_char(Fmt, Args) -> {$\s,Fmt,Args}.
+pad_char([$., $* | Fmt], [Pad | Args]) -> {Pad, Fmt, Args};
+pad_char([$., Pad | Fmt], Args) -> {Pad, Fmt, Args};
+pad_char(Fmt, Args) -> {$\s, Fmt, Args}.
%% collect_cc([FormatChar], [Argument]) ->
%% {Control,[ControlArg],[FormatChar],[Arg]}.
%% Here we collect the argments for each control character.
%% Be explicit to cause failure early.
-collect_cc([$w|Fmt], [A|Args]) -> {$w,[A],Fmt,Args};
-collect_cc([$p|Fmt], [A|Args]) -> {$p,[A],Fmt,Args};
-collect_cc([$W|Fmt], [A,Depth|Args]) -> {$W,[A,Depth],Fmt,Args};
-collect_cc([$P|Fmt], [A,Depth|Args]) -> {$P,[A,Depth],Fmt,Args};
-collect_cc([$s|Fmt], [A|Args]) -> {$s,[A],Fmt,Args};
-collect_cc([$r|Fmt], [A|Args]) -> {$r,[A],Fmt,Args};
-collect_cc([$e|Fmt], [A|Args]) -> {$e,[A],Fmt,Args};
-collect_cc([$f|Fmt], [A|Args]) -> {$f,[A],Fmt,Args};
-collect_cc([$g|Fmt], [A|Args]) -> {$g,[A],Fmt,Args};
-collect_cc([$b|Fmt], [A|Args]) -> {$b,[A],Fmt,Args};
-collect_cc([$B|Fmt], [A|Args]) -> {$B,[A],Fmt,Args};
-collect_cc([$x|Fmt], [A,Prefix|Args]) -> {$x,[A,Prefix],Fmt,Args};
-collect_cc([$X|Fmt], [A,Prefix|Args]) -> {$X,[A,Prefix],Fmt,Args};
-collect_cc([$+|Fmt], [A|Args]) -> {$+,[A],Fmt,Args};
-collect_cc([$#|Fmt], [A|Args]) -> {$#,[A],Fmt,Args};
-collect_cc([$c|Fmt], [A|Args]) -> {$c,[A],Fmt,Args};
-collect_cc([$~|Fmt], Args) when is_list(Args) -> {$~,[],Fmt,Args};
-collect_cc([$n|Fmt], Args) when is_list(Args) -> {$n,[],Fmt,Args};
-collect_cc([$i|Fmt], [A|Args]) -> {$i,[A],Fmt,Args}.
-
+collect_cc([$w | Fmt], [A | Args]) -> {$w, [A], Fmt, Args};
+collect_cc([$p | Fmt], [A | Args]) -> {$p, [A], Fmt, Args};
+collect_cc([$W | Fmt], [A, Depth | Args]) -> {$W, [A, Depth], Fmt, Args};
+collect_cc([$P | Fmt], [A, Depth | Args]) -> {$P, [A, Depth], Fmt, Args};
+collect_cc([$s | Fmt], [A | Args]) -> {$s, [A], Fmt, Args};
+collect_cc([$r | Fmt], [A | Args]) -> {$r, [A], Fmt, Args};
+collect_cc([$e | Fmt], [A | Args]) -> {$e, [A], Fmt, Args};
+collect_cc([$f | Fmt], [A | Args]) -> {$f, [A], Fmt, Args};
+collect_cc([$g | Fmt], [A | Args]) -> {$g, [A], Fmt, Args};
+collect_cc([$b | Fmt], [A | Args]) -> {$b, [A], Fmt, Args};
+collect_cc([$B | Fmt], [A | Args]) -> {$B, [A], Fmt, Args};
+collect_cc([$x | Fmt], [A, Prefix | Args]) -> {$x, [A, Prefix], Fmt, Args};
+collect_cc([$X | Fmt], [A, Prefix | Args]) -> {$X, [A, Prefix], Fmt, Args};
+collect_cc([$+ | Fmt], [A | Args]) -> {$+, [A], Fmt, Args};
+collect_cc([$# | Fmt], [A | Args]) -> {$#, [A], Fmt, Args};
+collect_cc([$c | Fmt], [A | Args]) -> {$c, [A], Fmt, Args};
+collect_cc([$~ | Fmt], Args) when is_list(Args) -> {$~, [], Fmt, Args};
+collect_cc([$n | Fmt], Args) when is_list(Args) -> {$n, [], Fmt, Args};
+collect_cc([$i | Fmt], [A | Args]) -> {$i, [A], Fmt, Args}.
%% build([Control], Pc, Indentation) -> [Char].
%% Interpret the control structures. Count the number of print
%% remaining and only calculate indentation when necessary. Must also
%% be smart when calculating indentation for characters in format.
-build([{$n, _, _, _, _, _, _}], Acc, MaxLen, #options{chomp=true}) ->
+build([{$n, _, _, _, _, _, _}], Acc, MaxLen, #options{chomp = true}) ->
%% trailing ~n, ignore
{lists:reverse(Acc), MaxLen};
-build([{C,As,F,Ad,P,Pad,Enc}|Cs], Acc, MaxLen, O) ->
+build([{C, As, F, Ad, P, Pad, Enc} | Cs], Acc, MaxLen, O) ->
{S, MaxLen2} = control(C, As, F, Ad, P, Pad, Enc, MaxLen),
- build(Cs, [S|Acc], MaxLen2, O);
-build([$\n], Acc, MaxLen, #options{chomp=true}) ->
+ build(Cs, [S | Acc], MaxLen2, O);
+build([$\n], Acc, MaxLen, #options{chomp = true}) ->
%% trailing \n, ignore
{lists:reverse(Acc), MaxLen};
-build([$\n|Cs], Acc, MaxLen, O) ->
- build(Cs, [$\n|Acc], MaxLen - 1, O);
-build([$\t|Cs], Acc, MaxLen, O) ->
- build(Cs, [$\t|Acc], MaxLen - 1, O);
-build([C|Cs], Acc, MaxLen, O) ->
- build(Cs, [C|Acc], MaxLen - 1, O);
+build([$\n | Cs], Acc, MaxLen, O) ->
+ build(Cs, [$\n | Acc], MaxLen - 1, O);
+build([$\t | Cs], Acc, MaxLen, O) ->
+ build(Cs, [$\t | Acc], MaxLen - 1, O);
+build([C | Cs], Acc, MaxLen, O) ->
+ build(Cs, [C | Acc], MaxLen - 1, O);
build([], Acc, MaxLen, _O) ->
{lists:reverse(Acc), MaxLen}.
-build2([{C,As,F,Ad,P,Pad,Enc}|Cs], Count, MaxLen) ->
+build2([{C, As, F, Ad, P, Pad, Enc} | Cs], Count, MaxLen) ->
{S, Len} = control2(C, As, F, Ad, P, Pad, Enc, MaxLen div Count),
- [S|build2(Cs, Count - 1, MaxLen - Len)];
-build2([C|Cs], Count, MaxLen) ->
- [C|build2(Cs, Count, MaxLen)];
-build2([], _, _) -> [].
+ [S | build2(Cs, Count - 1, MaxLen - Len)];
+build2([C | Cs], Count, MaxLen) ->
+ [C | build2(Cs, Count, MaxLen)];
+build2([], _, _) ->
+ [].
%% control(FormatChar, [Argument], FieldWidth, Adjust, Precision, PadChar,
%% Indentation) -> [Char]
@@ -187,20 +192,26 @@ control($b, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
control($B, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
Res = unprefixed_integer(A, F, Adj, base(P), Pad, false),
{Res, L - lists:flatlength(Res)};
-control($x, [A,Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A),
- is_atom(Prefix) ->
+control($x, [A, Prefix], F, Adj, P, Pad, _Enc, L) when
+ is_integer(A),
+ is_atom(Prefix)
+->
Res = prefixed_integer(A, F, Adj, base(P), Pad, atom_to_list(Prefix), true),
{Res, L - lists:flatlength(Res)};
-control($x, [A,Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
- true = io_lib:deep_char_list(Prefix), %Check if Prefix a character list
+control($x, [A, Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
+ %Check if Prefix a character list
+ true = io_lib:deep_char_list(Prefix),
Res = prefixed_integer(A, F, Adj, base(P), Pad, Prefix, true),
{Res, L - lists:flatlength(Res)};
-control($X, [A,Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A),
- is_atom(Prefix) ->
+control($X, [A, Prefix], F, Adj, P, Pad, _Enc, L) when
+ is_integer(A),
+ is_atom(Prefix)
+->
Res = prefixed_integer(A, F, Adj, base(P), Pad, atom_to_list(Prefix), false),
{Res, L - lists:flatlength(Res)};
-control($X, [A,Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
- true = io_lib:deep_char_list(Prefix), %Check if Prefix a character list
+control($X, [A, Prefix], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
+ %Check if Prefix a character list
+ true = io_lib:deep_char_list(Prefix),
Res = prefixed_integer(A, F, Adj, base(P), Pad, Prefix, false),
{Res, L - lists:flatlength(Res)};
control($+, [A], F, Adj, P, Pad, _Enc, L) when is_integer(A) ->
@@ -241,11 +252,11 @@ control2($w, [A], F, Adj, P, Pad, _Enc, L) ->
control2($p, [A], _F, _Adj, _P, _Pad, _Enc, L) ->
Term = couch_log_trunc_io:fprint(A, L, [{lists_as_strings, true}]),
{Term, lists:flatlength(Term)};
-control2($W, [A,Depth], F, Adj, P, Pad, _Enc, L) when is_integer(Depth) ->
+control2($W, [A, Depth], F, Adj, P, Pad, _Enc, L) when is_integer(Depth) ->
Term = couch_log_trunc_io:fprint(A, L, [{depth, Depth}, {lists_as_strings, false}]),
Res = term(Term, F, Adj, P, Pad),
{Res, lists:flatlength(Res)};
-control2($P, [A,Depth], _F, _Adj, _P, _Pad, _Enc, L) when is_integer(Depth) ->
+control2($P, [A, Depth], _F, _Adj, _P, _Pad, _Enc, L) when is_integer(Depth) ->
Term = couch_log_trunc_io:fprint(A, L, [{depth, Depth}, {lists_as_strings, true}]),
{Term, lists:flatlength(Term)};
control2($s, [L0], F, Adj, P, Pad, latin1, L) ->
@@ -261,18 +272,18 @@ control2($r, [R], F, Adj, P, Pad, _Enc, _L) ->
Res = string(List, F, Adj, P, Pad),
{Res, lists:flatlength(Res)}.
-iolist_to_chars([C|Cs]) when is_integer(C), C >= $\000, C =< $\377 ->
+iolist_to_chars([C | Cs]) when is_integer(C), C >= $\000, C =< $\377 ->
[C | iolist_to_chars(Cs)];
-iolist_to_chars([I|Cs]) ->
+iolist_to_chars([I | Cs]) ->
[iolist_to_chars(I) | iolist_to_chars(Cs)];
iolist_to_chars([]) ->
[];
iolist_to_chars(B) when is_binary(B) ->
binary_to_list(B).
-cdata_to_chars([C|Cs]) when is_integer(C), C >= $\000 ->
+cdata_to_chars([C | Cs]) when is_integer(C), C >= $\000 ->
[C | cdata_to_chars(Cs)];
-cdata_to_chars([I|Cs]) ->
+cdata_to_chars([I | Cs]) ->
[cdata_to_chars(I) | cdata_to_chars(Cs)];
cdata_to_chars([]) ->
[];
@@ -284,12 +295,12 @@ cdata_to_chars(B) when is_binary(B) ->
make_options([], Options) ->
Options;
-make_options([{chomp, Bool}|T], Options) when is_boolean(Bool) ->
- make_options(T, Options#options{chomp=Bool}).
+make_options([{chomp, Bool} | T], Options) when is_boolean(Bool) ->
+ make_options(T, Options#options{chomp = Bool}).
-ifdef(UNICODE_AS_BINARIES).
uniconv(C) ->
- unicode:characters_to_binary(C,unicode).
+ unicode:characters_to_binary(C, unicode).
-else.
uniconv(C) ->
C.
@@ -305,21 +316,28 @@ base(B) when is_integer(B) ->
%% Adjust the characters within the field if length less than Max padding
%% with PadChar.
-term(T, none, _Adj, none, _Pad) -> T;
-term(T, none, Adj, P, Pad) -> term(T, P, Adj, P, Pad);
+term(T, none, _Adj, none, _Pad) ->
+ T;
+term(T, none, Adj, P, Pad) ->
+ term(T, P, Adj, P, Pad);
term(T, F, Adj, P0, Pad) ->
L = lists:flatlength(T),
- P = case P0 of none -> erlang:min(L, F); _ -> P0 end,
+ P =
+ case P0 of
+ none -> erlang:min(L, F);
+ _ -> P0
+ end,
if
L > P ->
- adjust(chars($*, P), chars(Pad, F-P), Adj);
+ adjust(chars($*, P), chars(Pad, F - P), Adj);
F >= P ->
- adjust(T, chars(Pad, F-L), Adj)
+ adjust(T, chars(Pad, F - L), Adj)
end.
%% fwrite_e(Float, Field, Adjust, Precision, PadChar)
-fwrite_e(Fl, none, Adj, none, Pad) -> %Default values
+%Default values
+fwrite_e(Fl, none, Adj, none, Pad) ->
fwrite_e(Fl, none, Adj, 6, Pad);
fwrite_e(Fl, none, _Adj, P, _Pad) when P >= 2 ->
float_e(Fl, float_data(Fl), P);
@@ -328,12 +346,13 @@ fwrite_e(Fl, F, Adj, none, Pad) ->
fwrite_e(Fl, F, Adj, P, Pad) when P >= 2 ->
term(float_e(Fl, float_data(Fl), P), F, Adj, F, Pad).
-float_e(Fl, Fd, P) when Fl < 0.0 -> %Negative numbers
- [$-|float_e(-Fl, Fd, P)];
-float_e(_Fl, {Ds,E}, P) ->
- case float_man(Ds, 1, P-1) of
- {[$0|Fs],true} -> [[$1|Fs]|float_exp(E)];
- {Fs,false} -> [Fs|float_exp(E-1)]
+%Negative numbers
+float_e(Fl, Fd, P) when Fl < 0.0 ->
+ [$- | float_e(-Fl, Fd, P)];
+float_e(_Fl, {Ds, E}, P) ->
+ case float_man(Ds, 1, P - 1) of
+ {[$0 | Fs], true} -> [[$1 | Fs] | float_exp(E)];
+ {Fs, false} -> [Fs | float_exp(E - 1)]
end.
%% float_man([Digit], Icount, Dcount) -> {[Chars],CarryFlag}.
@@ -342,38 +361,43 @@ float_e(_Fl, {Ds,E}, P) ->
%% caller decide what to do at top.
float_man(Ds, 0, Dc) ->
- {Cs,C} = float_man(Ds, Dc),
- {[$.|Cs],C};
-float_man([D|Ds], I, Dc) ->
- case float_man(Ds, I-1, Dc) of
- {Cs,true} when D =:= $9 -> {[$0|Cs],true};
- {Cs,true} -> {[D+1|Cs],false};
- {Cs,false} -> {[D|Cs],false}
+ {Cs, C} = float_man(Ds, Dc),
+ {[$. | Cs], C};
+float_man([D | Ds], I, Dc) ->
+ case float_man(Ds, I - 1, Dc) of
+ {Cs, true} when D =:= $9 -> {[$0 | Cs], true};
+ {Cs, true} -> {[D + 1 | Cs], false};
+ {Cs, false} -> {[D | Cs], false}
end;
-float_man([], I, Dc) -> %Pad with 0's
- {string:chars($0, I, [$.|string:chars($0, Dc)]),false}.
-
-float_man([D|_], 0) when D >= $5 -> {[],true};
-float_man([_|_], 0) -> {[],false};
-float_man([D|Ds], Dc) ->
- case float_man(Ds, Dc-1) of
- {Cs,true} when D =:= $9 -> {[$0|Cs],true};
- {Cs,true} -> {[D+1|Cs],false};
- {Cs,false} -> {[D|Cs],false}
+%Pad with 0's
+float_man([], I, Dc) ->
+ {string:chars($0, I, [$. | string:chars($0, Dc)]), false}.
+
+float_man([D | _], 0) when D >= $5 -> {[], true};
+float_man([_ | _], 0) ->
+ {[], false};
+float_man([D | Ds], Dc) ->
+ case float_man(Ds, Dc - 1) of
+ {Cs, true} when D =:= $9 -> {[$0 | Cs], true};
+ {Cs, true} -> {[D + 1 | Cs], false};
+ {Cs, false} -> {[D | Cs], false}
end;
-float_man([], Dc) -> {string:chars($0, Dc),false}. %Pad with 0's
+%Pad with 0's
+float_man([], Dc) ->
+ {string:chars($0, Dc), false}.
%% float_exp(Exponent) -> [Char].
%% Generate the exponent of a floating point number. Always include sign.
float_exp(E) when E >= 0 ->
- [$e,$+|integer_to_list(E)];
+ [$e, $+ | integer_to_list(E)];
float_exp(E) ->
- [$e|integer_to_list(E)].
+ [$e | integer_to_list(E)].
%% fwrite_f(FloatData, Field, Adjust, Precision, PadChar)
-fwrite_f(Fl, none, Adj, none, Pad) -> %Default values
+%Default values
+fwrite_f(Fl, none, Adj, none, Pad) ->
fwrite_f(Fl, none, Adj, 6, Pad);
fwrite_f(Fl, none, _Adj, P, _Pad) when P >= 1 ->
float_f(Fl, float_data(Fl), P);
@@ -383,13 +407,15 @@ fwrite_f(Fl, F, Adj, P, Pad) when P >= 1 ->
term(float_f(Fl, float_data(Fl), P), F, Adj, F, Pad).
float_f(Fl, Fd, P) when Fl < 0.0 ->
- [$-|float_f(-Fl, Fd, P)];
-float_f(Fl, {Ds,E}, P) when E =< 0 ->
- float_f(Fl, {string:chars($0, -E+1, Ds),1}, P); %Prepend enough 0's
-float_f(_Fl, {Ds,E}, P) ->
+ [$- | float_f(-Fl, Fd, P)];
+float_f(Fl, {Ds, E}, P) when E =< 0 ->
+ %Prepend enough 0's
+ float_f(Fl, {string:chars($0, -E + 1, Ds), 1}, P);
+float_f(_Fl, {Ds, E}, P) ->
case float_man(Ds, E, P) of
- {Fs,true} -> "1" ++ Fs; %Handle carry
- {Fs,false} -> Fs
+ %Handle carry
+ {Fs, true} -> "1" ++ Fs;
+ {Fs, false} -> Fs
end.
%% float_data([FloatChar]) -> {[Digit],Exponent}
@@ -397,11 +423,11 @@ float_f(_Fl, {Ds,E}, P) ->
float_data(Fl) ->
float_data(float_to_list(Fl), []).
-float_data([$e|E], Ds) ->
- {lists:reverse(Ds),list_to_integer(E)+1};
-float_data([D|Cs], Ds) when D >= $0, D =< $9 ->
- float_data(Cs, [D|Ds]);
-float_data([_|Cs], Ds) ->
+float_data([$e | E], Ds) ->
+ {lists:reverse(Ds), list_to_integer(E) + 1};
+float_data([D | Cs], Ds) when D >= $0, D =< $9 ->
+ float_data(Cs, [D | Ds]);
+float_data([_ | Cs], Ds) ->
float_data(Cs, Ds).
%% fwrite_g(Float, Field, Adjust, Precision, PadChar)
@@ -413,83 +439,98 @@ fwrite_g(Fl, F, Adj, none, Pad) ->
fwrite_g(Fl, F, Adj, 6, Pad);
fwrite_g(Fl, F, Adj, P, Pad) when P >= 1 ->
A = abs(Fl),
- E = if A < 1.0e-1 -> -2;
- A < 1.0e0 -> -1;
- A < 1.0e1 -> 0;
- A < 1.0e2 -> 1;
- A < 1.0e3 -> 2;
- A < 1.0e4 -> 3;
- true -> fwrite_f
- end,
- if P =< 1, E =:= -1;
- P-1 > E, E >= -1 ->
- fwrite_f(Fl, F, Adj, P-1-E, Pad);
- P =< 1 ->
- fwrite_e(Fl, F, Adj, 2, Pad);
- true ->
- fwrite_e(Fl, F, Adj, P, Pad)
+ E =
+ if
+ A < 1.0e-1 -> -2;
+ A < 1.0e0 -> -1;
+ A < 1.0e1 -> 0;
+ A < 1.0e2 -> 1;
+ A < 1.0e3 -> 2;
+ A < 1.0e4 -> 3;
+ true -> fwrite_f
+ end,
+ if
+ P =< 1, E =:= -1;
+ P - 1 > E, E >= -1 ->
+ fwrite_f(Fl, F, Adj, P - 1 - E, Pad);
+ P =< 1 ->
+ fwrite_e(Fl, F, Adj, 2, Pad);
+ true ->
+ fwrite_e(Fl, F, Adj, P, Pad)
end.
-
%% string(String, Field, Adjust, Precision, PadChar)
-string(S, none, _Adj, none, _Pad) -> S;
+string(S, none, _Adj, none, _Pad) ->
+ S;
string(S, F, Adj, none, Pad) ->
string_field(S, F, Adj, lists:flatlength(S), Pad);
string(S, none, _Adj, P, Pad) ->
string_field(S, P, left, lists:flatlength(S), Pad);
string(S, F, Adj, P, Pad) when F >= P ->
N = lists:flatlength(S),
- if F > P ->
- if N > P ->
- adjust(flat_trunc(S, P), chars(Pad, F-P), Adj);
+ if
+ F > P ->
+ if
+ N > P ->
+ adjust(flat_trunc(S, P), chars(Pad, F - P), Adj);
N < P ->
- adjust([S|chars(Pad, P-N)], chars(Pad, F-P), Adj);
- true -> % N == P
- adjust(S, chars(Pad, F-P), Adj)
+ adjust([S | chars(Pad, P - N)], chars(Pad, F - P), Adj);
+ % N == P
+ true ->
+ adjust(S, chars(Pad, F - P), Adj)
end;
- true -> % F == P
- string_field(S, F, Adj, N, Pad)
+ % F == P
+ true ->
+ string_field(S, F, Adj, N, Pad)
end.
string_field(S, F, _Adj, N, _Pad) when N > F ->
flat_trunc(S, F);
string_field(S, F, Adj, N, Pad) when N < F ->
- adjust(S, chars(Pad, F-N), Adj);
-string_field(S, _, _, _, _) -> % N == F
+ adjust(S, chars(Pad, F - N), Adj);
+% N == F
+string_field(S, _, _, _, _) ->
S.
%% unprefixed_integer(Int, Field, Adjust, Base, PadChar, Lowercase)
%% -> [Char].
-unprefixed_integer(Int, F, Adj, Base, Pad, Lowercase)
- when Base >= 2, Base =< 1+$Z-$A+10 ->
- if Int < 0 ->
+unprefixed_integer(Int, F, Adj, Base, Pad, Lowercase) when
+ Base >= 2, Base =< 1 + $Z - $A + 10
+->
+ if
+ Int < 0 ->
S = cond_lowercase(erlang:integer_to_list(-Int, Base), Lowercase),
- term([$-|S], F, Adj, none, Pad);
- true ->
- S = cond_lowercase(erlang:integer_to_list(Int, Base), Lowercase),
- term(S, F, Adj, none, Pad)
+ term([$- | S], F, Adj, none, Pad);
+ true ->
+ S = cond_lowercase(erlang:integer_to_list(Int, Base), Lowercase),
+ term(S, F, Adj, none, Pad)
end.
%% prefixed_integer(Int, Field, Adjust, Base, PadChar, Prefix, Lowercase)
%% -> [Char].
-prefixed_integer(Int, F, Adj, Base, Pad, Prefix, Lowercase)
- when Base >= 2, Base =< 1+$Z-$A+10 ->
- if Int < 0 ->
+prefixed_integer(Int, F, Adj, Base, Pad, Prefix, Lowercase) when
+ Base >= 2, Base =< 1 + $Z - $A + 10
+->
+ if
+ Int < 0 ->
S = cond_lowercase(erlang:integer_to_list(-Int, Base), Lowercase),
- term([$-,Prefix|S], F, Adj, none, Pad);
- true ->
- S = cond_lowercase(erlang:integer_to_list(Int, Base), Lowercase),
- term([Prefix|S], F, Adj, none, Pad)
+ term([$-, Prefix | S], F, Adj, none, Pad);
+ true ->
+ S = cond_lowercase(erlang:integer_to_list(Int, Base), Lowercase),
+ term([Prefix | S], F, Adj, none, Pad)
end.
%% char(Char, Field, Adjust, Precision, PadChar) -> [Char].
-char(C, none, _Adj, none, _Pad) -> [C];
-char(C, F, _Adj, none, _Pad) -> chars(C, F);
-char(C, none, _Adj, P, _Pad) -> chars(C, P);
+char(C, none, _Adj, none, _Pad) ->
+ [C];
+char(C, F, _Adj, none, _Pad) ->
+ chars(C, F);
+char(C, none, _Adj, P, _Pad) ->
+ chars(C, P);
char(C, F, Adj, P, Pad) when F >= P ->
adjust(chars(C, P), chars(Pad, F - P), Adj).
@@ -503,8 +544,8 @@ newline(F, right, _P, _Pad) -> chars($\n, F).
%%
adjust(Data, [], _) -> Data;
-adjust(Data, Pad, left) -> [Data|Pad];
-adjust(Data, Pad, right) -> [Pad|Data].
+adjust(Data, Pad, left) -> [Data | Pad];
+adjust(Data, Pad, right) -> [Pad | Data].
%% Flatten and truncate a deep list to at most N elements.
flat_trunc(List, N) when is_integer(N), N >= 0 ->
@@ -512,8 +553,8 @@ flat_trunc(List, N) when is_integer(N), N >= 0 ->
flat_trunc(L, 0, R) when is_list(L) ->
lists:reverse(R);
-flat_trunc([H|T], N, R) ->
- flat_trunc(T, N-1, [H|R]);
+flat_trunc([H | T], N, R) ->
+ flat_trunc(T, N - 1, [H | R]);
flat_trunc([], _, R) ->
lists:reverse(R).
@@ -524,15 +565,15 @@ chars(_C, 0) ->
chars(C, 1) ->
[C];
chars(C, 2) ->
- [C,C];
+ [C, C];
chars(C, 3) ->
- [C,C,C];
+ [C, C, C];
chars(C, N) when is_integer(N), (N band 1) =:= 0 ->
S = chars(C, N bsr 1),
- [S|S];
+ [S | S];
chars(C, N) when is_integer(N) ->
S = chars(C, N bsr 1),
- [C,S|S].
+ [C, S | S].
%chars(C, N, Tail) ->
% [chars(C, N)|Tail].
@@ -541,12 +582,12 @@ chars(C, N) when is_integer(N) ->
cond_lowercase(String, true) ->
lowercase(String);
-cond_lowercase(String,false) ->
+cond_lowercase(String, false) ->
String.
-lowercase([H|T]) when is_integer(H), H >= $A, H =< $Z ->
- [(H-$A+$a)|lowercase(T)];
-lowercase([H|T]) ->
- [H|lowercase(T)];
+lowercase([H | T]) when is_integer(H), H >= $A, H =< $Z ->
+ [(H - $A + $a) | lowercase(T)];
+lowercase([H | T]) ->
+ [H | lowercase(T)];
lowercase([]) ->
- []. \ No newline at end of file
+ [].
diff --git a/src/couch_log/src/couch_log_util.erl b/src/couch_log/src/couch_log_util.erl
index c8b8e54ea..8be11e12d 100644
--- a/src/couch_log/src/couch_log_util.erl
+++ b/src/couch_log/src/couch_log_util.erl
@@ -12,7 +12,6 @@
-module(couch_log_util).
-
-export([
should_log/1,
iso8601_timestamp/0,
@@ -25,26 +24,21 @@
string_p/1
]).
-
-include("couch_log.hrl").
-
-spec should_log(#log_entry{} | atom()) -> boolean().
should_log(#log_entry{level = Level}) ->
should_log(Level);
-
should_log(Level) ->
level_to_integer(Level) >= couch_log_config:get(level_int).
-
-spec iso8601_timestamp() -> string().
iso8601_timestamp() ->
- {_,_,Micro} = Now = os:timestamp(),
- {{Year,Month,Date},{Hour,Minute,Second}} = calendar:now_to_datetime(Now),
+ {_, _, Micro} = Now = os:timestamp(),
+ {{Year, Month, Date}, {Hour, Minute, Second}} = calendar:now_to_datetime(Now),
Format = "~4.10.0B-~2.10.0B-~2.10.0BT~2.10.0B:~2.10.0B:~2.10.0B.~6.10.0BZ",
io_lib:format(Format, [Year, Month, Date, Hour, Minute, Second, Micro]).
-
-spec get_msg_id() -> string().
get_msg_id() ->
case erlang:get(nonce) of
@@ -52,78 +46,73 @@ get_msg_id() ->
MsgId -> MsgId
end.
-
-spec level_to_integer(atom() | string() | integer()) -> integer().
level_to_integer(L) when L >= 0, L =< 9 -> L;
-level_to_integer(debug) -> 1;
-level_to_integer(info) -> 2;
-level_to_integer(notice) -> 3;
-level_to_integer(warning) -> 4;
-level_to_integer(warn) -> 4;
-level_to_integer(error) -> 5;
-level_to_integer(err) -> 5;
-level_to_integer(critical) -> 6;
-level_to_integer(crit) -> 6;
-level_to_integer(alert) -> 7;
-level_to_integer(emergency) -> 8;
-level_to_integer(emerg) -> 8;
-level_to_integer(none) -> 9;
-level_to_integer("debug") -> 1;
-level_to_integer("info") -> 2;
-level_to_integer("notice") -> 3;
-level_to_integer("warning") -> 4;
-level_to_integer("warn") -> 4;
-level_to_integer("error") -> 5;
-level_to_integer("err") -> 5;
-level_to_integer("critical") -> 6;
-level_to_integer("crit") -> 6;
-level_to_integer("alert") -> 7;
-level_to_integer("emergency") -> 8;
-level_to_integer("emerg") -> 8;
-level_to_integer("none") -> 9;
-level_to_integer("1") -> 1;
-level_to_integer("2") -> 2;
-level_to_integer("3") -> 3;
-level_to_integer("4") -> 4;
-level_to_integer("5") -> 5;
-level_to_integer("6") -> 6;
-level_to_integer("7") -> 7;
-level_to_integer("8") -> 8;
-level_to_integer("9") -> 9.
-
+level_to_integer(debug) -> 1;
+level_to_integer(info) -> 2;
+level_to_integer(notice) -> 3;
+level_to_integer(warning) -> 4;
+level_to_integer(warn) -> 4;
+level_to_integer(error) -> 5;
+level_to_integer(err) -> 5;
+level_to_integer(critical) -> 6;
+level_to_integer(crit) -> 6;
+level_to_integer(alert) -> 7;
+level_to_integer(emergency) -> 8;
+level_to_integer(emerg) -> 8;
+level_to_integer(none) -> 9;
+level_to_integer("debug") -> 1;
+level_to_integer("info") -> 2;
+level_to_integer("notice") -> 3;
+level_to_integer("warning") -> 4;
+level_to_integer("warn") -> 4;
+level_to_integer("error") -> 5;
+level_to_integer("err") -> 5;
+level_to_integer("critical") -> 6;
+level_to_integer("crit") -> 6;
+level_to_integer("alert") -> 7;
+level_to_integer("emergency") -> 8;
+level_to_integer("emerg") -> 8;
+level_to_integer("none") -> 9;
+level_to_integer("1") -> 1;
+level_to_integer("2") -> 2;
+level_to_integer("3") -> 3;
+level_to_integer("4") -> 4;
+level_to_integer("5") -> 5;
+level_to_integer("6") -> 6;
+level_to_integer("7") -> 7;
+level_to_integer("8") -> 8;
+level_to_integer("9") -> 9.
-spec level_to_atom(atom() | string() | integer()) -> atom().
-level_to_atom(L) when is_atom(L) -> L;
-level_to_atom("1") -> debug;
-level_to_atom("debug") -> debug;
-level_to_atom("2") -> info;
-level_to_atom("info") -> info;
-level_to_atom("3") -> notice;
-level_to_atom("notice") -> notice;
-level_to_atom("4") -> warning;
-level_to_atom("warning") -> warning;
-level_to_atom("warn") -> warning;
-level_to_atom("5") -> error;
-level_to_atom("error") -> error;
-level_to_atom("err") -> error;
-level_to_atom("6") -> critical;
-level_to_atom("critical") -> critical;
-level_to_atom("crit") -> critical;
-level_to_atom("7") -> alert;
-level_to_atom("alert") -> alert;
-level_to_atom("8") -> emergency;
-level_to_atom("emergency") -> emergency;
-level_to_atom("emerg") -> emergency;
-level_to_atom("9") -> none;
-level_to_atom("none") -> none;
+level_to_atom(L) when is_atom(L) -> L;
+level_to_atom("1") -> debug;
+level_to_atom("debug") -> debug;
+level_to_atom("2") -> info;
+level_to_atom("info") -> info;
+level_to_atom("3") -> notice;
+level_to_atom("notice") -> notice;
+level_to_atom("4") -> warning;
+level_to_atom("warning") -> warning;
+level_to_atom("warn") -> warning;
+level_to_atom("5") -> error;
+level_to_atom("error") -> error;
+level_to_atom("err") -> error;
+level_to_atom("6") -> critical;
+level_to_atom("critical") -> critical;
+level_to_atom("crit") -> critical;
+level_to_atom("7") -> alert;
+level_to_atom("alert") -> alert;
+level_to_atom("8") -> emergency;
+level_to_atom("emergency") -> emergency;
+level_to_atom("emerg") -> emergency;
+level_to_atom("9") -> none;
+level_to_atom("none") -> none;
level_to_atom(V) when is_integer(V) -> level_to_atom(integer_to_list(V));
-level_to_atom(V) when is_list(V) -> info.
-
-
-level_to_string(L) when is_atom(L) -> atom_to_list(L);
-level_to_string(L) -> atom_to_list(level_to_atom(L)).
-
+level_to_atom(V) when is_list(V) -> info.
+level_to_string(L) when is_atom(L) -> atom_to_list(L);
+level_to_string(L) -> atom_to_list(level_to_atom(L)).
% From error_logger_file_h via lager_stdlib.erl
string_p([]) ->
@@ -131,19 +120,28 @@ string_p([]) ->
string_p(Term) ->
string_p1(Term).
-string_p1([H|T]) when is_integer(H), H >= $\s, H < 256 ->
+string_p1([H | T]) when is_integer(H), H >= $\s, H < 256 ->
+ string_p1(T);
+string_p1([$\n | T]) ->
+ string_p1(T);
+string_p1([$\r | T]) ->
+ string_p1(T);
+string_p1([$\t | T]) ->
+ string_p1(T);
+string_p1([$\v | T]) ->
+ string_p1(T);
+string_p1([$\b | T]) ->
+ string_p1(T);
+string_p1([$\f | T]) ->
+ string_p1(T);
+string_p1([$\e | T]) ->
string_p1(T);
-string_p1([$\n|T]) -> string_p1(T);
-string_p1([$\r|T]) -> string_p1(T);
-string_p1([$\t|T]) -> string_p1(T);
-string_p1([$\v|T]) -> string_p1(T);
-string_p1([$\b|T]) -> string_p1(T);
-string_p1([$\f|T]) -> string_p1(T);
-string_p1([$\e|T]) -> string_p1(T);
-string_p1([H|T]) when is_list(H) ->
+string_p1([H | T]) when is_list(H) ->
case string_p1(H) of
true -> string_p1(T);
- _ -> false
+ _ -> false
end;
-string_p1([]) -> true;
-string_p1(_) -> false.
+string_p1([]) ->
+ true;
+string_p1(_) ->
+ false.
diff --git a/src/couch_log/src/couch_log_writer.erl b/src/couch_log/src/couch_log_writer.erl
index 5e28a0775..18bb557ae 100644
--- a/src/couch_log/src/couch_log_writer.erl
+++ b/src/couch_log/src/couch_log_writer.erl
@@ -13,28 +13,22 @@
% @doc Modules wishing to handle writing log
% messages should implement this behavior.
-
-module(couch_log_writer).
-
-export([
init/0,
terminate/2,
write/2
]).
-
-include("couch_log.hrl").
-
-define(DEFAULT_WRITER, couch_log_writer_stderr).
-
--callback init() -> {ok, State::term()}.
--callback terminate(Reason::term(), State::term()) -> ok.
--callback write(LogEntry::#log_entry{}, State::term()) ->
- {ok, NewState::term()}.
-
+-callback init() -> {ok, State :: term()}.
+-callback terminate(Reason :: term(), State :: term()) -> ok.
+-callback write(LogEntry :: #log_entry{}, State :: term()) ->
+ {ok, NewState :: term()}.
-spec init() -> {atom(), term()}.
init() ->
@@ -42,18 +36,15 @@ init() ->
{ok, St} = Writer:init(),
{Writer, St}.
-
-spec terminate(term(), {atom(), term()}) -> ok.
terminate(Reason, {Writer, St}) ->
ok = Writer:terminate(Reason, St).
-
-spec write(#log_entry{}, {atom(), term()}) -> {atom(), term()}.
write(Entry, {Writer, St}) ->
{ok, NewSt} = Writer:write(Entry, St),
{Writer, NewSt}.
-
get_writer_mod() ->
WriterStr = config:get("log", "writer", "stderr"),
ModName1 = to_atom("couch_log_writer_" ++ WriterStr),
@@ -70,14 +61,13 @@ get_writer_mod() ->
end
end.
-
to_atom(Str) ->
try list_to_existing_atom(Str) of
Atom -> Atom
- catch _:_ ->
- undefined
+ catch
+ _:_ ->
+ undefined
end.
-
mod_exists(ModName) ->
code:which(ModName) /= non_existing.
diff --git a/src/couch_log/src/couch_log_writer_file.erl b/src/couch_log/src/couch_log_writer_file.erl
index 1fe35a8ab..9b7255050 100644
--- a/src/couch_log/src/couch_log_writer_file.erl
+++ b/src/couch_log/src/couch_log_writer_file.erl
@@ -13,18 +13,15 @@
-module(couch_log_writer_file).
-behaviour(couch_log_writer).
-
-export([
init/0,
terminate/2,
write/2
]).
-
-include_lib("kernel/include/file.hrl").
-include("couch_log.hrl").
-
-record(st, {
file_path,
fd,
@@ -32,17 +29,14 @@
last_check
}).
-
-define(CHECK_INTERVAL, 30000000).
-
-ifdef(TEST).
-export([
maybe_reopen/1
]).
-endif.
-
init() ->
FilePath = config:get("log", "file", "./couch.log"),
Opts = [append, raw] ++ buffer_opt(),
@@ -69,14 +63,12 @@ init() ->
EnsureDirError
end.
-
terminate(_, St) ->
% Apparently delayed_write can require two closes
file:close(St#st.fd),
file:close(St#st.fd),
ok.
-
write(Entry, St) ->
{ok, NewSt} = maybe_reopen(St),
#log_entry{
@@ -99,7 +91,6 @@ write(Entry, St) ->
ok = file:write(NewSt#st.fd, [Data, Msg, "\n"]),
{ok, NewSt}.
-
buffer_opt() ->
WriteBuffer = config:get_integer("log", "write_buffer", 0),
WriteDelay = config:get_integer("log", "write_delay", 0),
@@ -110,7 +101,6 @@ buffer_opt() ->
[]
end.
-
maybe_reopen(St) ->
#st{
last_check = LastCheck
@@ -121,7 +111,6 @@ maybe_reopen(St) ->
false -> {ok, St}
end.
-
reopen(St) ->
case file:read_file_info(St#st.file_path) of
{ok, FInfo} ->
diff --git a/src/couch_log/src/couch_log_writer_journald.erl b/src/couch_log/src/couch_log_writer_journald.erl
index 02a9c6900..c2bdd940c 100644
--- a/src/couch_log/src/couch_log_writer_journald.erl
+++ b/src/couch_log/src/couch_log_writer_journald.erl
@@ -13,25 +13,20 @@
-module(couch_log_writer_journald).
-behaviour(couch_log_writer).
-
-export([
init/0,
terminate/2,
write/2
]).
-
-include("couch_log.hrl").
-
init() ->
{ok, nil}.
-
terminate(_, _St) ->
ok.
-
write(Entry, St) ->
#log_entry{
level = Level,
@@ -51,19 +46,18 @@ write(Entry, St) ->
io:format(standard_error, [Data, Msg, "\n"], []),
{ok, St}.
-
% log level mapping from sd-daemon(3)
% https://www.freedesktop.org/software/systemd/man/sd-daemon.html
-spec level_for_journald(atom()) -> integer().
level_for_journald(Level) when is_atom(Level) ->
case Level of
- debug -> 7;
- info -> 6;
- notice -> 5;
- warning -> 4;
- error -> 3;
- critical -> 2;
- alert -> 1;
- emergency -> 0;
- _ -> 3
+ debug -> 7;
+ info -> 6;
+ notice -> 5;
+ warning -> 4;
+ error -> 3;
+ critical -> 2;
+ alert -> 1;
+ emergency -> 0;
+ _ -> 3
end.
diff --git a/src/couch_log/src/couch_log_writer_stderr.erl b/src/couch_log/src/couch_log_writer_stderr.erl
index 7c5fc6ca0..01e350971 100644
--- a/src/couch_log/src/couch_log_writer_stderr.erl
+++ b/src/couch_log/src/couch_log_writer_stderr.erl
@@ -13,25 +13,20 @@
-module(couch_log_writer_stderr).
-behaviour(couch_log_writer).
-
-export([
init/0,
terminate/2,
write/2
]).
-
-include("couch_log.hrl").
-
init() ->
{ok, nil}.
-
terminate(_, _St) ->
ok.
-
write(Entry, St) ->
#log_entry{
level = Level,
diff --git a/src/couch_log/src/couch_log_writer_syslog.erl b/src/couch_log/src/couch_log_writer_syslog.erl
index e3a6fc4b6..b95cf018c 100644
--- a/src/couch_log/src/couch_log_writer_syslog.erl
+++ b/src/couch_log/src/couch_log_writer_syslog.erl
@@ -13,17 +13,14 @@
-module(couch_log_writer_syslog).
-behavior(couch_log_writer).
-
-export([
init/0,
terminate/2,
write/2
]).
-
-include("couch_log.hrl").
-
-record(st, {
socket,
host,
@@ -34,10 +31,8 @@
facility
}).
-
-define(SYSLOG_VERSION, 1).
-
-ifdef(TEST).
-export([
get_facility/1,
@@ -45,20 +40,20 @@
]).
-endif.
-
init() ->
{ok, Socket} = gen_udp:open(0),
- Host = case config:get("log", "syslog_host") of
- undefined ->
- undefined;
- SysLogHost ->
- case inet:getaddr(SysLogHost, inet) of
- {ok, Address} ->
- Address;
- _ ->
- undefined
- end
+ Host =
+ case config:get("log", "syslog_host") of
+ undefined ->
+ undefined;
+ SysLogHost ->
+ case inet:getaddr(SysLogHost, inet) of
+ {ok, Address} ->
+ Address;
+ _ ->
+ undefined
+ end
end,
{ok, #st{
@@ -71,11 +66,9 @@ init() ->
facility = get_facility(config:get("log", "syslog_facility", "local2"))
}}.
-
terminate(_Reason, St) ->
gen_udp:close(St#st.socket).
-
write(Entry, St) ->
#log_entry{
level = Level,
@@ -98,10 +91,8 @@ write(Entry, St) ->
ok = send(St, [Pre, Msg, $\n]),
{ok, St}.
-
-send(#st{host=undefined}, Packet) ->
+send(#st{host = undefined}, Packet) ->
io:format(standard_error, "~s", [Packet]);
-
send(St, Packet) ->
#st{
socket = Socket,
@@ -110,53 +101,101 @@ send(St, Packet) ->
} = St,
gen_udp:send(Socket, Host, Port, Packet).
-
get_facility(Name) ->
- FacId = case Name of
- "kern" -> 0; % Kernel messages
- "user" -> 1; % Random user-level messages
- "mail" -> 2; % Mail system
- "daemon" -> 3; % System daemons
- "auth" -> 4; % Security/Authorization messages
- "syslog" -> 5; % Internal Syslog messages
- "lpr" -> 6; % Line printer subsystem
- "news" -> 7; % Network news subsystems
- "uucp" -> 8; % UUCP subsystem
- "clock" -> 9; % Clock daemon
- "authpriv" -> 10; % Security/Authorization messages
- "ftp" -> 11; % FTP daemon
- "ntp" -> 12; % NTP subsystem
- "audit" -> 13; % Log audit
- "alert" -> 14; % Log alert
- "cron" -> 15; % Scheduling daemon
- "local0" -> 16; % Local use 0
- "local1" -> 17; % Local use 1
- "local2" -> 18; % Local use 2
- "local3" -> 19; % Local use 3
- "local4" -> 20; % Local use 4
- "local5" -> 21; % Local use 5
- "local6" -> 22; % Local use 6
- "local7" -> 23; % Local use 7
- _ ->
- try list_to_integer(Name) of
- N when N >= 0, N =< 23 -> N;
- _ -> 23
- catch _:_ ->
- 23
- end
- end,
+ FacId =
+ case Name of
+ % Kernel messages
+ "kern" ->
+ 0;
+ % Random user-level messages
+ "user" ->
+ 1;
+ % Mail system
+ "mail" ->
+ 2;
+ % System daemons
+ "daemon" ->
+ 3;
+ % Security/Authorization messages
+ "auth" ->
+ 4;
+ % Internal Syslog messages
+ "syslog" ->
+ 5;
+ % Line printer subsystem
+ "lpr" ->
+ 6;
+ % Network news subsystems
+ "news" ->
+ 7;
+ % UUCP subsystem
+ "uucp" ->
+ 8;
+ % Clock daemon
+ "clock" ->
+ 9;
+ % Security/Authorization messages
+ "authpriv" ->
+ 10;
+ % FTP daemon
+ "ftp" ->
+ 11;
+ % NTP subsystem
+ "ntp" ->
+ 12;
+ % Log audit
+ "audit" ->
+ 13;
+ % Log alert
+ "alert" ->
+ 14;
+ % Scheduling daemon
+ "cron" ->
+ 15;
+ % Local use 0
+ "local0" ->
+ 16;
+ % Local use 1
+ "local1" ->
+ 17;
+ % Local use 2
+ "local2" ->
+ 18;
+ % Local use 3
+ "local3" ->
+ 19;
+ % Local use 4
+ "local4" ->
+ 20;
+ % Local use 5
+ "local5" ->
+ 21;
+ % Local use 6
+ "local6" ->
+ 22;
+ % Local use 7
+ "local7" ->
+ 23;
+ _ ->
+ try list_to_integer(Name) of
+ N when N >= 0, N =< 23 -> N;
+ _ -> 23
+ catch
+ _:_ ->
+ 23
+ end
+ end,
FacId bsl 3.
-
get_level(Name) when is_atom(Name) ->
case Name of
- debug -> 7;
- info -> 6;
- notice -> 5;
- warning -> 4;
- error -> 3;
- critical -> 2;
- alert -> 1;
- emergency -> 0;
- _ -> 3
+ debug -> 7;
+ info -> 6;
+ notice -> 5;
+ warning -> 4;
+ error -> 3;
+ critical -> 2;
+ alert -> 1;
+ emergency -> 0;
+ _ -> 3
end.
diff --git a/src/couch_log/test/eunit/couch_log_config_listener_test.erl b/src/couch_log/test/eunit/couch_log_config_listener_test.erl
index 07abae1ff..c955972ff 100644
--- a/src/couch_log/test/eunit/couch_log_config_listener_test.erl
+++ b/src/couch_log/test/eunit/couch_log_config_listener_test.erl
@@ -12,22 +12,16 @@
-module(couch_log_config_listener_test).
-
-include_lib("couch_log/include/couch_log.hrl").
-include_lib("eunit/include/eunit.hrl").
-define(TIMEOUT, 1000).
couch_log_config_test_() ->
- {setup,
- fun couch_log_test_util:start/0,
- fun couch_log_test_util:stop/1,
- [
- fun check_restart_listener/0,
- fun check_ignore_non_log/0
- ]
- }.
-
+ {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
+ fun check_restart_listener/0,
+ fun check_ignore_non_log/0
+ ]}.
check_restart_listener() ->
Listener1 = get_listener(),
@@ -41,16 +35,20 @@ check_restart_listener() ->
receive
{'DOWN', Ref, process, _, _} ->
?assertNot(is_process_alive(Listener1))
- after ?TIMEOUT ->
- erlang:error({timeout, config_listener_mon_death})
+ after ?TIMEOUT ->
+ erlang:error({timeout, config_listener_mon_death})
end,
- NewHandler = test_util:wait(fun() ->
- case get_handler() of
- not_found -> wait;
- Reply -> Reply
- end
- end, ?TIMEOUT, 20),
+ NewHandler = test_util:wait(
+ fun() ->
+ case get_handler() of
+ not_found -> wait;
+ Reply -> Reply
+ end
+ end,
+ ?TIMEOUT,
+ 20
+ ),
?assertEqual(Handler1, NewHandler),
Listener2 = get_listener(),
@@ -67,7 +65,6 @@ check_ignore_non_log() ->
end,
?assertError(config_change_timeout, Run()).
-
get_handler() ->
FoldFun = fun
({config_listener, {couch_log_sup, _}} = H, not_found) ->
diff --git a/src/couch_log/test/eunit/couch_log_config_test.erl b/src/couch_log/test/eunit/couch_log_config_test.erl
index e47a52bc2..df7cdf977 100644
--- a/src/couch_log/test/eunit/couch_log_config_test.erl
+++ b/src/couch_log/test/eunit/couch_log_config_test.erl
@@ -12,28 +12,22 @@
-module(couch_log_config_test).
-
-include_lib("couch_log/include/couch_log.hrl").
-include_lib("eunit/include/eunit.hrl").
-define(T(Name), {atom_to_list(Name), fun Name/0}).
couch_log_config_test_() ->
- {setup,
- fun couch_log_test_util:start/0,
- fun couch_log_test_util:stop/1,
- [
- ?T(check_level),
- ?T(check_max_message_size),
- ?T(check_bad_level),
- ?T(check_bad_max_message_size),
- ?T(check_strip_last_msg),
- ?T(check_bad_strip_last_msg),
- ?T(check_filter_fields),
- ?T(check_bad_filter_fields)
- ]
- }.
-
+ {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
+ ?T(check_level),
+ ?T(check_max_message_size),
+ ?T(check_bad_level),
+ ?T(check_bad_max_message_size),
+ ?T(check_strip_last_msg),
+ ?T(check_bad_strip_last_msg),
+ ?T(check_filter_fields),
+ ?T(check_bad_filter_fields)
+ ]}.
check_level() ->
% Default level is info
@@ -57,7 +51,6 @@ check_level() ->
?assertEqual(2, couch_log_config:get(level_int))
end).
-
check_max_message_size() ->
% Default is 16000
?assertEqual(16000, couch_log_config:get(max_message_size)),
@@ -72,7 +65,6 @@ check_max_message_size() ->
?assertEqual(16000, couch_log_config:get(max_message_size))
end).
-
check_bad_level() ->
% Default level is info
?assertEqual(info, couch_log_config:get(level)),
@@ -95,7 +87,6 @@ check_bad_level() ->
?assertEqual(2, couch_log_config:get(level_int))
end).
-
check_bad_max_message_size() ->
% Default level is 16000
?assertEqual(16000, couch_log_config:get(max_message_size)),
@@ -114,7 +105,6 @@ check_bad_max_message_size() ->
?assertEqual(16000, couch_log_config:get(max_message_size))
end).
-
check_strip_last_msg() ->
% Default is true
?assertEqual(true, couch_log_config:get(strip_last_msg)),
@@ -147,7 +137,6 @@ check_bad_strip_last_msg() ->
?assertEqual(true, couch_log_config:get(strip_last_msg))
end).
-
check_filter_fields() ->
Default = [pid, registered_name, error_info, messages],
?assertEqual(Default, couch_log_config:get(filter_fields)),
diff --git a/src/couch_log/test/eunit/couch_log_error_logger_h_test.erl b/src/couch_log/test/eunit/couch_log_error_logger_h_test.erl
index b78598fa4..cb053d611 100644
--- a/src/couch_log/test/eunit/couch_log_error_logger_h_test.erl
+++ b/src/couch_log/test/eunit/couch_log_error_logger_h_test.erl
@@ -12,23 +12,15 @@
-module(couch_log_error_logger_h_test).
-
-include_lib("eunit/include/eunit.hrl").
-
-define(HANDLER, couch_log_error_logger_h).
-
couch_log_error_logger_h_test_() ->
- {setup,
- fun couch_log_test_util:start/0,
- fun couch_log_test_util:stop/1,
- [
- fun handler_ignores_unknown_messages/0,
- fun coverage_test/0
- ]
- }.
-
+ {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
+ fun handler_ignores_unknown_messages/0,
+ fun coverage_test/0
+ ]}.
handler_ignores_unknown_messages() ->
Handlers1 = gen_event:which_handlers(error_logger),
@@ -39,7 +31,6 @@ handler_ignores_unknown_messages() ->
Handlers2 = gen_event:which_handlers(error_logger),
?assert(lists:member(?HANDLER, Handlers2)).
-
coverage_test() ->
Resp = couch_log_error_logger_h:code_change(foo, bazinga, baz),
?assertEqual({ok, bazinga}, Resp).
diff --git a/src/couch_log/test/eunit/couch_log_formatter_test.erl b/src/couch_log/test/eunit/couch_log_formatter_test.erl
index 24de346c6..d516c2bc5 100644
--- a/src/couch_log/test/eunit/couch_log_formatter_test.erl
+++ b/src/couch_log/test/eunit/couch_log_formatter_test.erl
@@ -12,23 +12,19 @@
-module(couch_log_formatter_test).
-
-include("couch_log.hrl").
-include_lib("eunit/include/eunit.hrl").
-
truncate_fmt_test() ->
Msg = [0 || _ <- lists:seq(1, 1048576)],
Entry = couch_log_formatter:format(info, self(), "~w", [Msg]),
?assert(length(Entry#log_entry.msg) =< 16000).
-
truncate_test() ->
Msg = [0 || _ <- lists:seq(1, 1048576)],
Entry = couch_log_formatter:format(info, self(), Msg),
?assert(length(Entry#log_entry.msg) =< 16000).
-
format_reason_test() ->
MsgFmt = "This is a reason: ~r",
Reason = {foo, [{x, k, 3}, {c, d, 2}]},
@@ -36,7 +32,6 @@ format_reason_test() ->
Formatted = "This is a reason: foo at x:k/3 <= c:d/2",
?assertEqual(Formatted, lists:flatten(Entry#log_entry.msg)).
-
crashing_formatting_test() ->
Pid = self(),
Event = {
@@ -45,7 +40,8 @@ crashing_formatting_test() ->
{
Pid,
"** Generic server and some stuff",
- [a_gen_server, {foo, bar}, server_state] % not enough args!
+ % not enough args!
+ [a_gen_server, {foo, bar}, server_state]
}
},
?assertMatch(
@@ -59,7 +55,6 @@ crashing_formatting_test() ->
"Encountered error {error,{badmatch"
]).
-
gen_server_error_test() ->
Pid = self(),
Event = {
@@ -86,7 +81,6 @@ gen_server_error_test() ->
"extra: \\[\\]"
]).
-
gen_server_error_with_extra_args_test() ->
Pid = self(),
Event = {
@@ -113,7 +107,6 @@ gen_server_error_with_extra_args_test() ->
"extra: \\[sad,args\\]"
]).
-
gen_fsm_error_test() ->
Pid = self(),
Event = {
@@ -122,7 +115,7 @@ gen_fsm_error_test() ->
{
Pid,
"** State machine did a thing",
- [a_gen_fsm, {ohai,there}, state_name, curr_state, barf]
+ [a_gen_fsm, {ohai, there}, state_name, curr_state, barf]
}
},
?assertMatch(
@@ -140,7 +133,6 @@ gen_fsm_error_test() ->
"extra: \\[\\]"
]).
-
gen_fsm_error_with_extra_args_test() ->
Pid = self(),
Event = {
@@ -149,7 +141,7 @@ gen_fsm_error_with_extra_args_test() ->
{
Pid,
"** State machine did a thing",
- [a_gen_fsm, {ohai,there}, state_name, curr_state, barf, sad, args]
+ [a_gen_fsm, {ohai, there}, state_name, curr_state, barf, sad, args]
}
},
?assertMatch(
@@ -167,7 +159,6 @@ gen_fsm_error_with_extra_args_test() ->
"extra: \\[sad,args\\]"
]).
-
gen_event_error_test() ->
Pid = self(),
Event = {
@@ -179,7 +170,7 @@ gen_event_error_test() ->
[
handler_id,
a_gen_event,
- {ohai,there},
+ {ohai, there},
curr_state,
barf
]
@@ -199,7 +190,6 @@ gen_event_error_test() ->
"state: curr_state"
]).
-
emulator_error_test() ->
Event = {
error,
@@ -219,7 +209,6 @@ emulator_error_test() ->
do_format(Event)
).
-
normal_error_test() ->
Pid = self(),
Event = {
@@ -243,7 +232,6 @@ normal_error_test() ->
do_format(Event)
).
-
error_report_std_error_test() ->
Pid = self(),
Event = {
@@ -264,7 +252,6 @@ error_report_std_error_test() ->
do_format(Event)
).
-
supervisor_report_test() ->
Pid = self(),
% A standard supervisor report
@@ -382,7 +369,6 @@ supervisor_report_test() ->
do_format(Event4)
).
-
crash_report_test() ->
Pid = self(),
% A standard crash report
@@ -395,11 +381,12 @@ crash_report_test() ->
[
[
{pid, list_to_pid("<0.2.0>")},
- {error_info, {
- exit,
- undef,
- [{mod_name, fun_name, [a, b]}]
- }}
+ {error_info,
+ {
+ exit,
+ undef,
+ [{mod_name, fun_name, [a, b]}]
+ }}
],
[list_to_pid("<0.3.0>"), list_to_pid("<0.4.0>")]
]
@@ -429,11 +416,12 @@ crash_report_test() ->
[
{pid, list_to_pid("<0.2.0>")},
{registered_name, couch_log_server},
- {error_info, {
- exit,
- undef,
- [{mod_name, fun_name, [a, b]}]
- }}
+ {error_info,
+ {
+ exit,
+ undef,
+ [{mod_name, fun_name, [a, b]}]
+ }}
],
[list_to_pid("<0.3.0>"), list_to_pid("<0.4.0>")]
]
@@ -453,11 +441,12 @@ crash_report_test() ->
[
{pid, list_to_pid("<0.2.0>")},
{registered_name, couch_log_server},
- {error_info, {
- killed,
- undef,
- [{mod_name, fun_name, [a, b]}]
- }}
+ {error_info,
+ {
+ killed,
+ undef,
+ [{mod_name, fun_name, [a, b]}]
+ }}
],
[list_to_pid("<0.3.0>"), list_to_pid("<0.4.0>")]
]
@@ -476,11 +465,12 @@ crash_report_test() ->
[
[
{pid, list_to_pid("<0.2.0>")},
- {error_info, {
- killed,
- undef,
- [{mod_name, fun_name, [a, b]}]
- }},
+ {error_info,
+ {
+ killed,
+ undef,
+ [{mod_name, fun_name, [a, b]}]
+ }},
{another, entry},
yep
],
@@ -492,7 +482,6 @@ crash_report_test() ->
"; another: entry, yep"
]).
-
warning_report_test() ->
Pid = self(),
% A warning message
@@ -532,7 +521,6 @@ warning_report_test() ->
do_format(Event2)
).
-
info_report_test() ->
Pid = self(),
% An info message
@@ -615,7 +603,6 @@ info_report_test() ->
do_format(Event4)
).
-
progress_report_test() ->
Pid = self(),
% Application started
@@ -656,8 +643,9 @@ progress_report_test() ->
#log_entry{
level = debug,
pid = Pid,
- msg = "Supervisor sup_dude started mod_name:fun_name/1"
- " at pid <0.5.0>"
+ msg =
+ "Supervisor sup_dude started mod_name:fun_name/1"
+ " at pid <0.5.0>"
},
do_format(Event2)
),
@@ -680,7 +668,6 @@ progress_report_test() ->
do_format(Event3)
).
-
log_unknown_event_test() ->
Pid = self(),
?assertMatch(
@@ -692,7 +679,6 @@ log_unknown_event_test() ->
do_format(an_unknown_event)
).
-
format_reason_test_() ->
Cases = [
{
@@ -805,14 +791,15 @@ format_reason_test_() ->
}
],
[
- {Msg, fun() -> ?assertEqual(
- Msg,
- lists:flatten(couch_log_formatter:format_reason(Reason))
- ) end}
- || {Reason, Msg} <- Cases
+ {Msg, fun() ->
+ ?assertEqual(
+ Msg,
+ lists:flatten(couch_log_formatter:format_reason(Reason))
+ )
+ end}
+ || {Reason, Msg} <- Cases
].
-
coverage_test() ->
% MFA's that aren't
?assertEqual(["foo"], couch_log_formatter:format_mfa(foo)),
@@ -830,11 +817,13 @@ coverage_test() ->
level = error,
msg = "foobar"
},
- do_format({
- error_report,
- erlang:group_leader(),
- {self(), std_error, "foobar"}
- })
+ do_format(
+ {
+ error_report,
+ erlang:group_leader(),
+ {self(), std_error, "foobar"}
+ }
+ )
),
% Excercising print_silly_list
@@ -843,11 +832,13 @@ coverage_test() ->
level = error,
msg = "dang"
},
- do_format({
- error_report,
- erlang:group_leader(),
- {self(), std_error, dang}
- })
+ do_format(
+ {
+ error_report,
+ erlang:group_leader(),
+ {self(), std_error, dang}
+ }
+ )
).
gen_server_error_with_last_msg_test() ->
@@ -889,7 +880,7 @@ gen_event_error_with_last_msg_test() ->
[
handler_id,
a_gen_event,
- {ohai,there},
+ {ohai, there},
curr_state,
barf
]
@@ -911,7 +902,6 @@ gen_event_error_with_last_msg_test() ->
])
end).
-
gen_fsm_error_with_last_msg_test() ->
Pid = self(),
Event = {
@@ -920,7 +910,7 @@ gen_fsm_error_with_last_msg_test() ->
{
Pid,
"** State machine did a thing",
- [a_gen_fsm, {ohai,there}, state_name, curr_state, barf]
+ [a_gen_fsm, {ohai, there}, state_name, curr_state, barf]
}
},
?assertMatch(
@@ -940,7 +930,6 @@ gen_fsm_error_with_last_msg_test() ->
])
end).
-
with_last(Fun) ->
meck:new(couch_log_config_dyn, [passthrough]),
try
@@ -963,10 +952,8 @@ do_format(Event) ->
time_stamp = lists:flatten(E#log_entry.time_stamp)
}.
-
do_matches(_, []) ->
ok;
-
do_matches(#log_entry{msg = Msg} = E, [Pattern | RestPatterns]) ->
case re:run(Msg, Pattern) of
{match, _} ->
diff --git a/src/couch_log/test/eunit/couch_log_monitor_test.erl b/src/couch_log/test/eunit/couch_log_monitor_test.erl
index eec008522..ceeb98b4e 100644
--- a/src/couch_log/test/eunit/couch_log_monitor_test.erl
+++ b/src/couch_log/test/eunit/couch_log_monitor_test.erl
@@ -12,24 +12,16 @@
-module(couch_log_monitor_test).
-
-include_lib("eunit/include/eunit.hrl").
-
-define(HANDLER, couch_log_error_logger_h).
-
couch_log_monitor_test_() ->
- {setup,
- fun couch_log_test_util:start/0,
- fun couch_log_test_util:stop/1,
- [
- fun monitor_ignores_unknown_messages/0,
- fun monitor_restarts_handler/0,
- fun coverage_test/0
- ]
- }.
-
+ {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
+ fun monitor_ignores_unknown_messages/0,
+ fun monitor_restarts_handler/0,
+ fun coverage_test/0
+ ]}.
monitor_ignores_unknown_messages() ->
Pid1 = get_monitor_pid(),
@@ -41,7 +33,6 @@ monitor_ignores_unknown_messages() ->
timer:sleep(250),
?assert(is_process_alive(Pid1)).
-
monitor_restarts_handler() ->
Pid1 = get_monitor_pid(),
error_logger:delete_report_handler(?HANDLER),
@@ -55,12 +46,10 @@ monitor_restarts_handler() ->
Handlers = gen_event:which_handlers(error_logger),
?assert(lists:member(?HANDLER, Handlers)).
-
coverage_test() ->
Resp = couch_log_monitor:code_change(foo, bazinga, baz),
?assertEqual({ok, bazinga}, Resp).
-
get_monitor_pid() ->
Children = supervisor:which_children(couch_log_sup),
[MonPid] = [Pid || {couch_log_monitor, Pid, _, _} <- Children, is_pid(Pid)],
diff --git a/src/couch_log/test/eunit/couch_log_server_test.erl b/src/couch_log/test/eunit/couch_log_server_test.erl
index 7af570e90..a2334b048 100644
--- a/src/couch_log/test/eunit/couch_log_server_test.erl
+++ b/src/couch_log/test/eunit/couch_log_server_test.erl
@@ -12,23 +12,16 @@
-module(couch_log_server_test).
-
-include("couch_log.hrl").
-include_lib("eunit/include/eunit.hrl").
-
couch_log_server_test_() ->
- {setup,
- fun couch_log_test_util:start/0,
- fun couch_log_test_util:stop/1,
- [
- fun check_can_reconfigure/0,
- fun check_can_restart/0,
- fun check_can_cast_log_entry/0,
- fun check_logs_ignored_messages/0
- ]
- }.
-
+ {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
+ fun check_can_reconfigure/0,
+ fun check_can_restart/0,
+ fun check_can_cast_log_entry/0,
+ fun check_logs_ignored_messages/0
+ ]}.
check_can_reconfigure() ->
couch_log:error("a message", []),
@@ -44,7 +37,6 @@ check_can_reconfigure() ->
?assertEqual('$end_of_table', couch_log_test_util:last_log_key())
end).
-
check_can_restart() ->
Pid1 = whereis(couch_log_server),
Ref = erlang:monitor(process, Pid1),
@@ -65,7 +57,6 @@ check_can_restart() ->
?assertNotEqual(Pid2, Pid1),
?assert(is_process_alive(Pid2)).
-
check_can_cast_log_entry() ->
Entry = #log_entry{
level = critical,
@@ -75,10 +66,10 @@ check_can_cast_log_entry() ->
time_stamp = "2016-07-20-almost-my-birthday"
},
ok = gen_server:cast(couch_log_server, {log, Entry}),
- timer:sleep(500), % totes gross
+ % totes gross
+ timer:sleep(500),
?assertEqual(Entry, couch_log_test_util:last_log()).
-
check_logs_ignored_messages() ->
gen_server:call(couch_log_server, a_call),
?assertMatch(
@@ -91,7 +82,8 @@ check_logs_ignored_messages() ->
),
gen_server:cast(couch_log_server, a_cast),
- timer:sleep(500), % yes gross
+ % yes gross
+ timer:sleep(500),
?assertMatch(
#log_entry{
level = error,
@@ -102,7 +94,8 @@ check_logs_ignored_messages() ->
),
couch_log_server ! an_info,
- timer:sleep(500), % still gross
+ % still gross
+ timer:sleep(500),
?assertMatch(
#log_entry{
level = error,
@@ -112,7 +105,6 @@ check_logs_ignored_messages() ->
couch_log_test_util:last_log()
).
-
coverage_test() ->
Resp = couch_log_server:code_change(foo, bazinga, baz),
?assertEqual({ok, bazinga}, Resp).
diff --git a/src/couch_log/test/eunit/couch_log_test.erl b/src/couch_log/test/eunit/couch_log_test.erl
index c7195f65f..1538934b3 100644
--- a/src/couch_log/test/eunit/couch_log_test.erl
+++ b/src/couch_log/test/eunit/couch_log_test.erl
@@ -12,24 +12,17 @@
-module(couch_log_test).
-
-include_lib("couch_log/include/couch_log.hrl").
-include_lib("eunit/include/eunit.hrl").
-
couch_log_test_() ->
- {setup,
- fun couch_log_test_util:start/0,
- fun couch_log_test_util:stop/1,
- gen() ++ [fun check_set_level/0]
- }.
-
+ {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1,
+ gen() ++ [fun check_set_level/0]}.
check_set_level() ->
couch_log:set_level(crit),
?assertEqual("crit", config:get("log", "level")).
-
levels() ->
[
debug,
@@ -43,17 +36,17 @@ levels() ->
none
].
-
gen() ->
- lists:map(fun(L) ->
- Name = "Test log level: " ++ couch_log_util:level_to_string(L),
- {Name, fun() -> check_levels(L, levels()) end}
- end, levels() -- [none]).
-
+ lists:map(
+ fun(L) ->
+ Name = "Test log level: " ++ couch_log_util:level_to_string(L),
+ {Name, fun() -> check_levels(L, levels()) end}
+ end,
+ levels() -- [none]
+ ).
check_levels(_, []) ->
ok;
-
check_levels(TestLevel, [CfgLevel | RestLevels]) ->
TestInt = couch_log_util:level_to_integer(TestLevel),
CfgInt = couch_log_util:level_to_integer(CfgLevel),
@@ -78,7 +71,6 @@ check_levels(TestLevel, [CfgLevel | RestLevels]) ->
end),
check_levels(TestLevel, RestLevels).
-
new_msg() ->
Bin = list_to_binary([couch_rand:uniform(255) || _ <- lists:seq(1, 16)]),
couch_util:to_hex(Bin).
diff --git a/src/couch_log/test/eunit/couch_log_test_util.erl b/src/couch_log/test/eunit/couch_log_test_util.erl
index 00f3981fc..9a170bdbd 100644
--- a/src/couch_log/test/eunit/couch_log_test_util.erl
+++ b/src/couch_log/test/eunit/couch_log_test_util.erl
@@ -25,7 +25,6 @@
-include("couch_log.hrl").
-
start() ->
remove_error_loggers(),
application:set_env(config, ini_files, config_files()),
@@ -35,13 +34,11 @@ start() ->
meck:new(couch_stats),
ok = meck:expect(couch_stats, increment_counter, ['_'], ok).
-
stop(_) ->
application:stop(config),
application:stop(couch_log),
meck:unload(couch_stats).
-
with_level(Name, Fun) ->
with_config_listener(fun() ->
try
@@ -54,7 +51,6 @@ with_level(Name, Fun) ->
end
end).
-
with_config_listener(Fun) ->
Listener = self(),
try
@@ -64,7 +60,6 @@ with_config_listener(Fun) ->
rem_listener(Listener)
end.
-
wait_for_config() ->
receive
couch_log_config_change_finished -> ok
@@ -72,47 +67,53 @@ wait_for_config() ->
erlang:error(config_change_timeout)
end.
-
with_meck(Mods, Fun) ->
- lists:foreach(fun(M) ->
- case M of
- {Name, Opts} -> meck:new(Name, Opts);
- Name -> meck:new(Name)
- end
- end, Mods),
+ lists:foreach(
+ fun(M) ->
+ case M of
+ {Name, Opts} -> meck:new(Name, Opts);
+ Name -> meck:new(Name)
+ end
+ end,
+ Mods
+ ),
try
Fun()
after
- lists:foreach(fun(M) ->
- case M of
- {Name, _} -> meck:unload(Name);
- Name -> meck:unload(Name)
- end
- end, Mods)
+ lists:foreach(
+ fun(M) ->
+ case M of
+ {Name, _} -> meck:unload(Name);
+ Name -> meck:unload(Name)
+ end
+ end,
+ Mods
+ )
end.
-
ignore_common_loggers() ->
IgnoreSet = [
application_controller,
config,
config_event
],
- lists:foreach(fun(Proc) ->
- disable_logs_from(Proc)
- end, IgnoreSet).
-
+ lists:foreach(
+ fun(Proc) ->
+ disable_logs_from(Proc)
+ end,
+ IgnoreSet
+ ).
disable_logs_from(Pid) when is_pid(Pid) ->
- Ignored = case application:get_env(couch_log, ignored_pids) of
- {ok, L} when is_list(L) ->
- lists:usort([Pid | L]);
- _E ->
- [Pid]
- end,
+ Ignored =
+ case application:get_env(couch_log, ignored_pids) of
+ {ok, L} when is_list(L) ->
+ lists:usort([Pid | L]);
+ _E ->
+ [Pid]
+ end,
IgnoredAlive = [P || P <- Ignored, is_process_alive(P)],
application:set_env(couch_log, ignored_pids, IgnoredAlive);
-
disable_logs_from(Name) when is_atom(Name) ->
case whereis(Name) of
P when is_pid(P) ->
@@ -121,48 +122,49 @@ disable_logs_from(Name) when is_atom(Name) ->
erlang:error({unknown_pid_name, Name})
end.
-
last_log_key() ->
ets:last(?COUCH_LOG_TEST_TABLE).
-
last_log() ->
[{_, Entry}] = ets:lookup(?COUCH_LOG_TEST_TABLE, last_log_key()),
Entry.
-
remove_error_loggers() ->
ErrorLoggerPid = whereis(error_logger),
- if ErrorLoggerPid == undefined -> ok; true ->
- lists:foreach(fun(Handler) ->
- error_logger:delete_report_handler(Handler)
- end, gen_event:which_handlers(ErrorLoggerPid))
+ if
+ ErrorLoggerPid == undefined ->
+ ok;
+ true ->
+ lists:foreach(
+ fun(Handler) ->
+ error_logger:delete_report_handler(Handler)
+ end,
+ gen_event:which_handlers(ErrorLoggerPid)
+ )
end.
-
config_files() ->
Path = filename:dirname(code:which(?MODULE)),
Name = filename:join(Path, "couch_log_test.ini"),
ok = file:write_file(Name, "[log]\nwriter = ets\n"),
[Name].
-
add_listener(Listener) ->
- Listeners = case application:get_env(couch_log, config_listeners) of
- {ok, L} when is_list(L) ->
- lists:usort([Listener | L]);
- _ ->
- [Listener]
- end,
+ Listeners =
+ case application:get_env(couch_log, config_listeners) of
+ {ok, L} when is_list(L) ->
+ lists:usort([Listener | L]);
+ _ ->
+ [Listener]
+ end,
application:set_env(couch_log, config_listeners, Listeners).
-
rem_listener(Listener) ->
- Listeners = case application:get_env(couch_lig, config_listeners) of
- {ok, L} when is_list(L) ->
- L -- [Listener];
- _ ->
- []
- end,
+ Listeners =
+ case application:get_env(couch_lig, config_listeners) of
+ {ok, L} when is_list(L) ->
+ L -- [Listener];
+ _ ->
+ []
+ end,
application:set_env(couch_log, config_listeners, Listeners).
-
diff --git a/src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl b/src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl
index 77d555440..8d1fdeffb 100644
--- a/src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl
+++ b/src/couch_log/test/eunit/couch_log_trunc_io_fmt_test.erl
@@ -12,22 +12,22 @@
-module(couch_log_trunc_io_fmt_test).
-
-include_lib("eunit/include/eunit.hrl").
-
format_test_() ->
- lists:map(fun({Fmt, Args, Expect}) ->
- Name = io_lib:format("~p", [Expect]),
- {lists:flatten(Name),
- ?_assertEqual(
- Expect,
- lists:flatten(couch_log_trunc_io_fmt:format(Fmt, Args, 1024))
- )
- }
- end, cases()).
-
-
+ lists:map(
+ fun({Fmt, Args, Expect}) ->
+ Name = io_lib:format("~p", [Expect]),
+ {
+ lists:flatten(Name),
+ ?_assertEqual(
+ Expect,
+ lists:flatten(couch_log_trunc_io_fmt:format(Fmt, Args, 1024))
+ )
+ }
+ end,
+ cases()
+ ).
chomp_test() ->
R1 = couch_log_trunc_io_fmt:format("\n", [], 1024, [{chomp, true}]),
@@ -35,7 +35,6 @@ chomp_test() ->
R2 = couch_log_trunc_io_fmt:format("~n", [], 1024, [{chomp, true}]),
?assertEqual("", lists:flatten(R2)).
-
cases() ->
[
{"", [], ""},
diff --git a/src/couch_log/test/eunit/couch_log_util_test.erl b/src/couch_log/test/eunit/couch_log_util_test.erl
index e97911aa9..ade968146 100644
--- a/src/couch_log/test/eunit/couch_log_util_test.erl
+++ b/src/couch_log/test/eunit/couch_log_util_test.erl
@@ -12,25 +12,24 @@
-module(couch_log_util_test).
-
-include_lib("couch_log/include/couch_log.hrl").
-include_lib("eunit/include/eunit.hrl").
-
get_message_id_test() ->
?assertEqual("--------", couch_log_util:get_msg_id()),
erlang:put(nonce, "deadbeef"),
?assertEqual("deadbeef", couch_log_util:get_msg_id()),
erlang:put(nonce, undefined).
-
level_to_atom_test() ->
- lists:foreach(fun(L) ->
- ?assert(is_atom(couch_log_util:level_to_atom(L))),
- ?assert(is_integer(couch_log_util:level_to_integer(L))),
- ?assert(is_list(couch_log_util:level_to_string(L)))
- end, levels()).
-
+ lists:foreach(
+ fun(L) ->
+ ?assert(is_atom(couch_log_util:level_to_atom(L))),
+ ?assert(is_integer(couch_log_util:level_to_integer(L))),
+ ?assert(is_list(couch_log_util:level_to_string(L)))
+ end,
+ levels()
+ ).
string_p_test() ->
?assertEqual(false, couch_log_util:string_p([])),
@@ -43,13 +42,50 @@ string_p_test() ->
?assertEqual(true, couch_log_util:string_p([$\f])),
?assertEqual(true, couch_log_util:string_p([$\e])).
-
levels() ->
[
- 1, 2, 3, 4, 5, 6, 7, 8, 9,
- "1", "2", "3", "4", "5", "6", "7", "8", "9",
- debug, info, notice, warning, warn, error, err,
- critical, crit, alert, emergency, emerg, none,
- "debug", "info", "notice", "warning", "warn", "error", "err",
- "critical", "crit", "alert", "emergency", "emerg", "none"
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ "1",
+ "2",
+ "3",
+ "4",
+ "5",
+ "6",
+ "7",
+ "8",
+ "9",
+ debug,
+ info,
+ notice,
+ warning,
+ warn,
+ error,
+ err,
+ critical,
+ crit,
+ alert,
+ emergency,
+ emerg,
+ none,
+ "debug",
+ "info",
+ "notice",
+ "warning",
+ "warn",
+ "error",
+ "err",
+ "critical",
+ "crit",
+ "alert",
+ "emergency",
+ "emerg",
+ "none"
].
diff --git a/src/couch_log/test/eunit/couch_log_writer_ets.erl b/src/couch_log/test/eunit/couch_log_writer_ets.erl
index d5fd327ac..7ddb9f39e 100644
--- a/src/couch_log/test/eunit/couch_log_writer_ets.erl
+++ b/src/couch_log/test/eunit/couch_log_writer_ets.erl
@@ -13,27 +13,22 @@
-module(couch_log_writer_ets).
-behaviour(couch_log_writer).
-
-export([
init/0,
terminate/2,
write/2
]).
-
-include("couch_log.hrl").
-
init() ->
ets:new(?COUCH_LOG_TEST_TABLE, [named_table, public, ordered_set]),
{ok, 0}.
-
terminate(_, _St) ->
ets:delete(?COUCH_LOG_TEST_TABLE),
ok.
-
write(Entry0, St) ->
Entry = Entry0#log_entry{
msg = lists:flatten(Entry0#log_entry.msg),
diff --git a/src/couch_log/test/eunit/couch_log_writer_file_test.erl b/src/couch_log/test/eunit/couch_log_writer_file_test.erl
index ba042610a..2e40088f4 100644
--- a/src/couch_log/test/eunit/couch_log_writer_file_test.erl
+++ b/src/couch_log/test/eunit/couch_log_writer_file_test.erl
@@ -12,65 +12,54 @@
-module(couch_log_writer_file_test).
-
-include_lib("kernel/include/file.hrl").
-include_lib("couch_log/include/couch_log.hrl").
-include_lib("eunit/include/eunit.hrl").
-
-define(WRITER, couch_log_writer_file).
-
couch_log_writer_file_test_() ->
- {setup,
- fun couch_log_test_util:start/0,
- fun couch_log_test_util:stop/1,
- [
- fun check_init_terminate/0,
- fun() ->
- couch_log_test_util:with_meck(
- [{filelib, [unstick]}],
- fun check_ensure_dir_fail/0
- )
- end,
- fun() ->
- couch_log_test_util:with_meck(
- [{file, [unstick, passthrough]}],
- fun check_open_fail/0
- )
- end,
- fun() ->
- couch_log_test_util:with_meck(
- [{file, [unstick, passthrough]}],
- fun check_read_file_info_fail/0
- )
- end,
- fun check_file_write/0,
- fun check_buffered_file_write/0,
- fun check_reopen/0
- ]
- }.
-
+ {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
+ fun check_init_terminate/0,
+ fun() ->
+ couch_log_test_util:with_meck(
+ [{filelib, [unstick]}],
+ fun check_ensure_dir_fail/0
+ )
+ end,
+ fun() ->
+ couch_log_test_util:with_meck(
+ [{file, [unstick, passthrough]}],
+ fun check_open_fail/0
+ )
+ end,
+ fun() ->
+ couch_log_test_util:with_meck(
+ [{file, [unstick, passthrough]}],
+ fun check_read_file_info_fail/0
+ )
+ end,
+ fun check_file_write/0,
+ fun check_buffered_file_write/0,
+ fun check_reopen/0
+ ]}.
check_init_terminate() ->
{ok, St} = ?WRITER:init(),
ok = ?WRITER:terminate(stop, St).
-
check_ensure_dir_fail() ->
meck:expect(filelib, ensure_dir, 1, {error, eperm}),
?assertEqual({error, eperm}, ?WRITER:init()),
?assert(meck:called(filelib, ensure_dir, 1)),
?assert(meck:validate(filelib)).
-
check_open_fail() ->
meck:expect(file, open, 2, {error, enotfound}),
?assertEqual({error, enotfound}, ?WRITER:init()),
?assert(meck:called(file, open, 2)),
?assert(meck:validate(file)).
-
check_read_file_info_fail() ->
RFI = fun
("./couch.log") -> {error, enoent};
@@ -81,12 +70,12 @@ check_read_file_info_fail() ->
?assert(meck:called(file, read_file_info, 1)),
?assert(meck:validate(file)).
-
check_file_write() ->
% Make sure we have an empty log for this test
IsFile = filelib:is_file("./couch.log"),
- if not IsFile -> ok; true ->
- file:delete("./couch.log")
+ if
+ not IsFile -> ok;
+ true -> file:delete("./couch.log")
end,
Entry = #log_entry{
@@ -104,12 +93,12 @@ check_file_write() ->
Expect = <<"[info] time_stamp nonode@nohost <0.1.0> msg_id stuff\n">>,
?assertEqual(Expect, Data).
-
check_buffered_file_write() ->
% Make sure we have an empty log for this test
IsFile = filelib:is_file("./couch.log"),
- if not IsFile -> ok; true ->
- file:delete("./couch.log")
+ if
+ not IsFile -> ok;
+ true -> file:delete("./couch.log")
end,
config:set("log", "write_buffer", "1024"),
@@ -135,7 +124,6 @@ check_buffered_file_write() ->
Expect = <<"[info] time_stamp nonode@nohost <0.1.0> msg_id stuff\n">>,
?assertEqual(Expect, Data).
-
check_reopen() ->
{ok, St1} = clear_clock(?WRITER:init()),
{ok, St2} = clear_clock(couch_log_writer_file:maybe_reopen(St1)),
@@ -160,10 +148,8 @@ check_reopen() ->
?assert(element(3, St4) /= element(3, St2))
end.
-
clear_clock({ok, St}) ->
{ok, clear_clock(St)};
-
clear_clock(St) ->
{st, Path, Fd, INode, _} = St,
{st, Path, Fd, INode, {0, 0, 0}}.
diff --git a/src/couch_log/test/eunit/couch_log_writer_stderr_test.erl b/src/couch_log/test/eunit/couch_log_writer_stderr_test.erl
index 1e99263dd..04f1e9a41 100644
--- a/src/couch_log/test/eunit/couch_log_writer_stderr_test.erl
+++ b/src/couch_log/test/eunit/couch_log_writer_stderr_test.erl
@@ -12,35 +12,26 @@
-module(couch_log_writer_stderr_test).
-
-include_lib("couch_log/include/couch_log.hrl").
-include_lib("eunit/include/eunit.hrl").
-
-define(WRITER, couch_log_writer_stderr).
-
couch_log_writer_stderr_test_() ->
- {setup,
- fun couch_log_test_util:start/0,
- fun couch_log_test_util:stop/1,
- [
- fun check_init_terminate/0,
- fun() ->
- couch_log_test_util:with_meck(
- [{io, [unstick]}],
- fun check_write/0
- )
- end
- ]
- }.
-
+ {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
+ fun check_init_terminate/0,
+ fun() ->
+ couch_log_test_util:with_meck(
+ [{io, [unstick]}],
+ fun check_write/0
+ )
+ end
+ ]}.
check_init_terminate() ->
{ok, St} = ?WRITER:init(),
ok = ?WRITER:terminate(stop, St).
-
check_write() ->
meck:expect(io, format, 3, ok),
diff --git a/src/couch_log/test/eunit/couch_log_writer_syslog_test.erl b/src/couch_log/test/eunit/couch_log_writer_syslog_test.erl
index c32b5c6bf..5a3f89520 100644
--- a/src/couch_log/test/eunit/couch_log_writer_syslog_test.erl
+++ b/src/couch_log/test/eunit/couch_log_writer_syslog_test.erl
@@ -12,41 +12,32 @@
-module(couch_log_writer_syslog_test).
-
-include_lib("couch_log/include/couch_log.hrl").
-include_lib("eunit/include/eunit.hrl").
-
-define(WRITER, couch_log_writer_syslog).
-
couch_log_writer_syslog_test_() ->
- {setup,
- fun couch_log_test_util:start/0,
- fun couch_log_test_util:stop/1,
- [
- fun check_init_terminate/0,
- fun() ->
- couch_log_test_util:with_meck(
- [{io, [unstick]}],
- fun check_stderr_write/0
- )
- end,
- fun() ->
- couch_log_test_util:with_meck(
- [{gen_udp, [unstick]}],
- fun check_udp_send/0
- )
- end
- ]
- }.
-
+ {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
+ fun check_init_terminate/0,
+ fun() ->
+ couch_log_test_util:with_meck(
+ [{io, [unstick]}],
+ fun check_stderr_write/0
+ )
+ end,
+ fun() ->
+ couch_log_test_util:with_meck(
+ [{gen_udp, [unstick]}],
+ fun check_udp_send/0
+ )
+ end
+ ]}.
check_init_terminate() ->
{ok, St} = ?WRITER:init(),
ok = ?WRITER:terminate(stop, St).
-
check_stderr_write() ->
meck:expect(io, format, 3, ok),
@@ -64,7 +55,6 @@ check_stderr_write() ->
?assert(meck:called(io, format, 3)),
?assert(meck:validate(io)).
-
check_udp_send() ->
meck:expect(gen_udp, open, 1, {ok, socket}),
meck:expect(gen_udp, send, 4, ok),
@@ -91,32 +81,64 @@ check_udp_send() ->
?assert(meck:called(gen_udp, close, 1)),
?assert(meck:validate(gen_udp)).
-
facility_test() ->
Names = [
- "kern", "user", "mail", "daemon", "auth", "syslog", "lpr",
- "news", "uucp", "clock", "authpriv", "ftp", "ntp", "audit",
- "alert", "cron", "local0", "local1", "local2", "local3",
- "local4", "local5", "local6", "local7"
+ "kern",
+ "user",
+ "mail",
+ "daemon",
+ "auth",
+ "syslog",
+ "lpr",
+ "news",
+ "uucp",
+ "clock",
+ "authpriv",
+ "ftp",
+ "ntp",
+ "audit",
+ "alert",
+ "cron",
+ "local0",
+ "local1",
+ "local2",
+ "local3",
+ "local4",
+ "local5",
+ "local6",
+ "local7"
],
- lists:foldl(fun(Name, Id) ->
- IdStr = lists:flatten(io_lib:format("~w", [Id])),
- ?assertEqual(Id bsl 3, couch_log_writer_syslog:get_facility(Name)),
- ?assertEqual(Id bsl 3, couch_log_writer_syslog:get_facility(IdStr)),
- Id + 1
- end, 0, Names),
+ lists:foldl(
+ fun(Name, Id) ->
+ IdStr = lists:flatten(io_lib:format("~w", [Id])),
+ ?assertEqual(Id bsl 3, couch_log_writer_syslog:get_facility(Name)),
+ ?assertEqual(Id bsl 3, couch_log_writer_syslog:get_facility(IdStr)),
+ Id + 1
+ end,
+ 0,
+ Names
+ ),
?assertEqual(23 bsl 3, couch_log_writer_syslog:get_facility("foo")),
?assertEqual(23 bsl 3, couch_log_writer_syslog:get_facility("-1")),
?assertEqual(23 bsl 3, couch_log_writer_syslog:get_facility("24")).
-
level_test() ->
Levels = [
- emergency, alert, critical, error,
- warning, notice, info, debug
+ emergency,
+ alert,
+ critical,
+ error,
+ warning,
+ notice,
+ info,
+ debug
],
- lists:foldl(fun(Name, Id) ->
- ?assertEqual(Id, couch_log_writer_syslog:get_level(Name)),
- Id + 1
- end, 0, Levels),
+ lists:foldl(
+ fun(Name, Id) ->
+ ?assertEqual(Id, couch_log_writer_syslog:get_level(Name)),
+ Id + 1
+ end,
+ 0,
+ Levels
+ ),
?assertEqual(3, couch_log_writer_syslog:get_level(foo)).
diff --git a/src/couch_log/test/eunit/couch_log_writer_test.erl b/src/couch_log/test/eunit/couch_log_writer_test.erl
index d0bb347fe..e758c9f60 100644
--- a/src/couch_log/test/eunit/couch_log_writer_test.erl
+++ b/src/couch_log/test/eunit/couch_log_writer_test.erl
@@ -12,20 +12,13 @@
-module(couch_log_writer_test).
-
-include_lib("couch_log/include/couch_log.hrl").
-include_lib("eunit/include/eunit.hrl").
-
couch_log_writer_test_() ->
- {setup,
- fun couch_log_test_util:start/0,
- fun couch_log_test_util:stop/1,
- [
- fun check_writer_change/0
- ]
- }.
-
+ {setup, fun couch_log_test_util:start/0, fun couch_log_test_util:stop/1, [
+ fun check_writer_change/0
+ ]}.
check_writer_change() ->
% Change to file and back
@@ -51,4 +44,3 @@ check_writer_change() ->
couch_log_test_util:wait_for_config(),
?assertEqual(0, ets:info(?COUCH_LOG_TEST_TABLE, size))
end).
-
diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl
index 1cdc91809..798b939c7 100644
--- a/src/couch_mrview/src/couch_mrview.erl
+++ b/src/couch_mrview/src/couch_mrview.erl
@@ -27,7 +27,7 @@
-record(mracc, {
db,
- meta_sent=false,
+ meta_sent = false,
total_rows,
offset,
limit,
@@ -36,82 +36,100 @@
doc_info,
callback,
user_acc,
- last_go=ok,
+ last_go = ok,
reduce_fun,
finalizer,
update_seq,
args
}).
-
-
validate_ddoc_fields(DDoc) ->
MapFuncType = map_function_type(DDoc),
- lists:foreach(fun(Path) ->
- validate_ddoc_fields(DDoc, Path)
- end, [
- [{<<"filters">>, object}, {any, [object, string]}],
- [{<<"language">>, string}],
- [{<<"lists">>, object}, {any, [object, string]}],
- [{<<"options">>, object}],
- [{<<"options">>, object}, {<<"include_design">>, boolean}],
- [{<<"options">>, object}, {<<"local_seq">>, boolean}],
- [{<<"options">>, object}, {<<"partitioned">>, boolean}],
- [{<<"rewrites">>, [string, array]}],
- [{<<"shows">>, object}, {any, [object, string]}],
- [{<<"updates">>, object}, {any, [object, string]}],
- [{<<"validate_doc_update">>, string}],
- [{<<"views">>, object}, {<<"lib">>, object}],
- [{<<"views">>, object}, {any, object}, {<<"map">>, MapFuncType}],
- [{<<"views">>, object}, {any, object}, {<<"reduce">>, string}]
- ]),
+ lists:foreach(
+ fun(Path) ->
+ validate_ddoc_fields(DDoc, Path)
+ end,
+ [
+ [{<<"filters">>, object}, {any, [object, string]}],
+ [{<<"language">>, string}],
+ [{<<"lists">>, object}, {any, [object, string]}],
+ [{<<"options">>, object}],
+ [{<<"options">>, object}, {<<"include_design">>, boolean}],
+ [{<<"options">>, object}, {<<"local_seq">>, boolean}],
+ [{<<"options">>, object}, {<<"partitioned">>, boolean}],
+ [{<<"rewrites">>, [string, array]}],
+ [{<<"shows">>, object}, {any, [object, string]}],
+ [{<<"updates">>, object}, {any, [object, string]}],
+ [{<<"validate_doc_update">>, string}],
+ [{<<"views">>, object}, {<<"lib">>, object}],
+ [{<<"views">>, object}, {any, object}, {<<"map">>, MapFuncType}],
+ [{<<"views">>, object}, {any, object}, {<<"reduce">>, string}]
+ ]
+ ),
require_map_function_for_views(DDoc),
ok.
require_map_function_for_views({Props}) ->
case couch_util:get_value(<<"views">>, Props) of
- undefined -> ok;
+ undefined ->
+ ok;
{Views} ->
- lists:foreach(fun
- ({<<"lib">>, _}) -> ok;
- ({Key, {Value}}) ->
- case couch_util:get_value(<<"map">>, Value) of
- undefined -> throw({invalid_design_doc,
- <<"View `", Key/binary, "` must contain map function">>});
- _ -> ok
- end
- end, Views),
+ lists:foreach(
+ fun
+ ({<<"lib">>, _}) ->
+ ok;
+ ({Key, {Value}}) ->
+ case couch_util:get_value(<<"map">>, Value) of
+ undefined ->
+ throw(
+ {invalid_design_doc,
+ <<"View `", Key/binary, "` must contain map function">>}
+ );
+ _ ->
+ ok
+ end
+ end,
+ Views
+ ),
ok
end.
validate_ddoc_fields(DDoc, Path) ->
case validate_ddoc_fields(DDoc, Path, []) of
- ok -> ok;
+ ok ->
+ ok;
{error, {FailedPath0, Type0}} ->
FailedPath = iolist_to_binary(join(FailedPath0, <<".">>)),
Type = format_type(Type0),
- throw({invalid_design_doc,
- <<"`", FailedPath/binary, "` field must have ",
- Type/binary, " type">>})
+ throw(
+ {invalid_design_doc,
+ <<"`", FailedPath/binary, "` field must have ", Type/binary, " type">>}
+ )
end.
validate_ddoc_fields(undefined, _, _) ->
ok;
validate_ddoc_fields(_, [], _) ->
ok;
-validate_ddoc_fields({KVS}=Props, [{any, Type} | Rest], Acc) ->
- lists:foldl(fun
- ({Key, _}, ok) ->
- validate_ddoc_fields(Props, [{Key, Type} | Rest], Acc);
- ({_, _}, {error, _}=Error) ->
- Error
- end, ok, KVS);
-validate_ddoc_fields({KVS}=Props, [{Key, Type} | Rest], Acc) ->
+validate_ddoc_fields({KVS} = Props, [{any, Type} | Rest], Acc) ->
+ lists:foldl(
+ fun
+ ({Key, _}, ok) ->
+ validate_ddoc_fields(Props, [{Key, Type} | Rest], Acc);
+ ({_, _}, {error, _} = Error) ->
+ Error
+ end,
+ ok,
+ KVS
+ );
+validate_ddoc_fields({KVS} = Props, [{Key, Type} | Rest], Acc) ->
case validate_ddoc_field(Props, {Key, Type}) of
ok ->
- validate_ddoc_fields(couch_util:get_value(Key, KVS),
- Rest,
- [Key | Acc]);
+ validate_ddoc_fields(
+ couch_util:get_value(Key, KVS),
+ Rest,
+ [Key | Acc]
+ );
error ->
{error, {[Key | Acc], Type}};
{error, Key1} ->
@@ -123,10 +141,14 @@ validate_ddoc_field(undefined, Type) when is_atom(Type) ->
validate_ddoc_field(_, any) ->
ok;
validate_ddoc_field(Value, Types) when is_list(Types) ->
- lists:foldl(fun
- (_, ok) -> ok;
- (Type, _) -> validate_ddoc_field(Value, Type)
- end, error, Types);
+ lists:foldl(
+ fun
+ (_, ok) -> ok;
+ (Type, _) -> validate_ddoc_field(Value, Type)
+ end,
+ error,
+ Types
+ );
validate_ddoc_field(Value, string) when is_binary(Value) ->
ok;
validate_ddoc_field(Value, array) when is_list(Value) ->
@@ -165,36 +187,38 @@ format_type(Types) when is_list(Types) ->
join(L, Sep) ->
join(L, Sep, []).
-join([H|[]], _, Acc) ->
+join([H | []], _, Acc) ->
[H | Acc];
-join([H|T], Sep, Acc) ->
+join([H | T], Sep, Acc) ->
join(T, Sep, [Sep, H | Acc]).
-
-validate(Db, DDoc) ->
+validate(Db, DDoc) ->
ok = validate_ddoc_fields(DDoc#doc.body),
GetName = fun
(#mrview{map_names = [Name | _]}) -> Name;
(#mrview{reduce_funs = [{Name, _} | _]}) -> Name;
(_) -> null
end,
- ValidateView = fun(Proc, #mrview{def=MapSrc, reduce_funs=Reds}=View) ->
+ ValidateView = fun(Proc, #mrview{def = MapSrc, reduce_funs = Reds} = View) ->
couch_query_servers:try_compile(Proc, map, GetName(View), MapSrc),
- lists:foreach(fun
- ({_RedName, <<"_sum", _/binary>>}) ->
- ok;
- ({_RedName, <<"_count", _/binary>>}) ->
- ok;
- ({_RedName, <<"_stats", _/binary>>}) ->
- ok;
- ({_RedName, <<"_approx_count_distinct", _/binary>>}) ->
- ok;
- ({_RedName, <<"_", _/binary>> = Bad}) ->
- Msg = ["`", Bad, "` is not a supported reduce function."],
- throw({invalid_design_doc, Msg});
- ({RedName, RedSrc}) ->
- couch_query_servers:try_compile(Proc, reduce, RedName, RedSrc)
- end, Reds)
+ lists:foreach(
+ fun
+ ({_RedName, <<"_sum", _/binary>>}) ->
+ ok;
+ ({_RedName, <<"_count", _/binary>>}) ->
+ ok;
+ ({_RedName, <<"_stats", _/binary>>}) ->
+ ok;
+ ({_RedName, <<"_approx_count_distinct", _/binary>>}) ->
+ ok;
+ ({_RedName, <<"_", _/binary>> = Bad}) ->
+ Msg = ["`", Bad, "` is not a supported reduce function."],
+ throw({invalid_design_doc, Msg});
+ ({RedName, RedSrc}) ->
+ couch_query_servers:try_compile(Proc, reduce, RedName, RedSrc)
+ end,
+ Reds
+ )
end,
{ok, #mrst{
language = Lang,
@@ -204,9 +228,12 @@ validate(Db, DDoc) ->
case {couch_db:is_partitioned(Db), Partitioned} of
{false, true} ->
- throw({invalid_design_doc,
- <<"partitioned option cannot be true in a "
- "non-partitioned database.">>});
+ throw(
+ {invalid_design_doc, <<
+ "partitioned option cannot be true in a "
+ "non-partitioned database."
+ >>}
+ );
{_, _} ->
ok
end,
@@ -220,16 +247,15 @@ validate(Db, DDoc) ->
after
couch_query_servers:ret_os_process(Proc)
end
- catch {unknown_query_language, _Lang} ->
- %% Allow users to save ddocs written in unknown languages
- ok
+ catch
+ {unknown_query_language, _Lang} ->
+ %% Allow users to save ddocs written in unknown languages
+ ok
end.
-
query_all_docs(Db, Args) ->
query_all_docs(Db, Args, fun default_cb/2, []).
-
query_all_docs(Db, Args, Callback, Acc) when is_list(Args) ->
query_all_docs(Db, to_mrargs(Args), Callback, Acc);
query_all_docs(Db, Args0, Callback, Acc) ->
@@ -237,44 +263,41 @@ query_all_docs(Db, Args0, Callback, Acc) ->
{ok, Info} = couch_db:get_db_info(WDb),
couch_index_util:hexsig(couch_hash:md5_hash(term_to_binary(Info)))
end),
- Args1 = Args0#mrargs{view_type=map},
+ Args1 = Args0#mrargs{view_type = map},
Args2 = couch_mrview_util:validate_all_docs_args(Db, Args1),
- {ok, Acc1} = case Args2#mrargs.preflight_fun of
- PFFun when is_function(PFFun, 2) -> PFFun(Sig, Acc);
- _ -> {ok, Acc}
- end,
+ {ok, Acc1} =
+ case Args2#mrargs.preflight_fun of
+ PFFun when is_function(PFFun, 2) -> PFFun(Sig, Acc);
+ _ -> {ok, Acc}
+ end,
all_docs_fold(Db, Args2, Callback, Acc1).
-
query_view(Db, DDoc, VName) ->
query_view(Db, DDoc, VName, #mrargs{}).
-
query_view(Db, DDoc, VName, Args) when is_list(Args) ->
query_view(Db, DDoc, VName, to_mrargs(Args), fun default_cb/2, []);
query_view(Db, DDoc, VName, Args) ->
query_view(Db, DDoc, VName, Args, fun default_cb/2, []).
-
query_view(Db, DDoc, VName, Args, Callback, Acc) when is_list(Args) ->
query_view(Db, DDoc, VName, to_mrargs(Args), Callback, Acc);
query_view(Db, DDoc, VName, Args0, Callback, Acc0) ->
case couch_mrview_util:get_view(Db, DDoc, VName, Args0) of
{ok, VInfo, Sig, Args} ->
- {ok, Acc1} = case Args#mrargs.preflight_fun of
- PFFun when is_function(PFFun, 2) -> PFFun(Sig, Acc0);
- _ -> {ok, Acc0}
- end,
+ {ok, Acc1} =
+ case Args#mrargs.preflight_fun of
+ PFFun when is_function(PFFun, 2) -> PFFun(Sig, Acc0);
+ _ -> {ok, Acc0}
+ end,
query_view(Db, VInfo, Args, Callback, Acc1);
ddoc_updated ->
Callback(ok, ddoc_updated)
end.
-
get_view_index_pid(Db, DDoc, ViewName, Args0) ->
couch_mrview_util:get_view_index_pid(Db, DDoc, ViewName, Args0).
-
query_view(Db, {Type, View, Ref}, Args, Callback, Acc) ->
try
case Type of
@@ -285,12 +308,10 @@ query_view(Db, {Type, View, Ref}, Args, Callback, Acc) ->
erlang:demonitor(Ref, [flush])
end.
-
get_info(Db, DDoc) ->
{ok, Pid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
couch_index:get_info(Pid).
-
trigger_update(Db, DDoc) ->
trigger_update(Db, DDoc, couch_db:get_update_seq(Db)).
@@ -300,22 +321,27 @@ trigger_update(Db, DDoc, UpdateSeq) ->
%% get informations on a view
get_view_info(Db, DDoc, VName) ->
- {ok, {_, View, _}, _, _Args} = couch_mrview_util:get_view(Db, DDoc, VName,
- #mrargs{}),
+ {ok, {_, View, _}, _, _Args} = couch_mrview_util:get_view(
+ Db,
+ DDoc,
+ VName,
+ #mrargs{}
+ ),
%% get the total number of rows
- {ok, TotalRows} = couch_mrview_util:get_row_count(View),
-
- {ok, [{update_seq, View#mrview.update_seq},
- {purge_seq, View#mrview.purge_seq},
- {total_rows, TotalRows}]}.
+ {ok, TotalRows} = couch_mrview_util:get_row_count(View),
+ {ok, [
+ {update_seq, View#mrview.update_seq},
+ {purge_seq, View#mrview.purge_seq},
+ {total_rows, TotalRows}
+ ]}.
%% @doc refresh a view index
-refresh(DbName, DDoc) when is_binary(DbName)->
+refresh(DbName, DDoc) when is_binary(DbName) ->
UpdateSeq = couch_util:with_db(DbName, fun(WDb) ->
- couch_db:get_update_seq(WDb)
- end),
+ couch_db:get_update_seq(WDb)
+ end),
case couch_index_server:get_index(couch_mrview_index, DbName, DDoc) of
{ok, Pid} ->
@@ -326,48 +352,43 @@ refresh(DbName, DDoc) when is_binary(DbName)->
Error ->
{error, Error}
end;
-
refresh(Db, DDoc) ->
refresh(couch_db:name(Db), DDoc).
compact(Db, DDoc) ->
compact(Db, DDoc, []).
-
compact(Db, DDoc, Opts) ->
{ok, Pid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
couch_index:compact(Pid, Opts).
-
cancel_compaction(Db, DDoc) ->
{ok, IPid} = couch_index_server:get_index(couch_mrview_index, Db, DDoc),
{ok, CPid} = couch_index:get_compactor_pid(IPid),
ok = couch_index_compactor:cancel(CPid),
% Cleanup the compaction file if it exists
- {ok, #mrst{sig=Sig, db_name=DbName}} = couch_index:get_state(IPid, 0),
+ {ok, #mrst{sig = Sig, db_name = DbName}} = couch_index:get_state(IPid, 0),
couch_mrview_util:delete_compaction_file(DbName, Sig),
ok.
-
cleanup(Db) ->
couch_mrview_cleanup:run(Db).
-
-all_docs_fold(Db, #mrargs{keys=undefined}=Args, Callback, UAcc) ->
+all_docs_fold(Db, #mrargs{keys = undefined} = Args, Callback, UAcc) ->
ReduceFun = get_reduce_fun(Args),
Total = get_total_rows(Db, Args),
UpdateSeq = get_update_seq(Db, Args),
Acc = #mracc{
- db=Db,
- total_rows=Total,
- limit=Args#mrargs.limit,
- skip=Args#mrargs.skip,
- callback=Callback,
- user_acc=UAcc,
- reduce_fun=ReduceFun,
- update_seq=UpdateSeq,
- args=Args
+ db = Db,
+ total_rows = Total,
+ limit = Args#mrargs.limit,
+ skip = Args#mrargs.skip,
+ callback = Callback,
+ user_acc = UAcc,
+ reduce_fun = ReduceFun,
+ update_seq = UpdateSeq,
+ args = Args
},
[Opts1] = couch_mrview_util:all_docs_key_opts(Args),
% TODO: This is a terrible hack for now. We'll probably have
@@ -375,243 +396,262 @@ all_docs_fold(Db, #mrargs{keys=undefined}=Args, Callback, UAcc) ->
% a btree. For now non-btree's will just have to pass 0 or
% some fake reductions to get an offset.
Opts2 = [include_reductions | Opts1],
- FunName = case couch_util:get_value(namespace, Args#mrargs.extra) of
- <<"_design">> -> fold_design_docs;
- <<"_local">> -> fold_local_docs;
- _ -> fold_docs
- end,
+ FunName =
+ case couch_util:get_value(namespace, Args#mrargs.extra) of
+ <<"_design">> -> fold_design_docs;
+ <<"_local">> -> fold_local_docs;
+ _ -> fold_docs
+ end,
{ok, Offset, FinalAcc} = couch_db:FunName(Db, fun map_fold/3, Acc, Opts2),
finish_fold(FinalAcc, [{total, Total}, {offset, Offset}]);
-all_docs_fold(Db, #mrargs{direction=Dir, keys=Keys0}=Args, Callback, UAcc) ->
+all_docs_fold(Db, #mrargs{direction = Dir, keys = Keys0} = Args, Callback, UAcc) ->
ReduceFun = get_reduce_fun(Args),
Total = get_total_rows(Db, Args),
UpdateSeq = get_update_seq(Db, Args),
Acc = #mracc{
- db=Db,
- total_rows=Total,
- limit=Args#mrargs.limit,
- skip=Args#mrargs.skip,
- callback=Callback,
- user_acc=UAcc,
- reduce_fun=ReduceFun,
- update_seq=UpdateSeq,
- args=Args
+ db = Db,
+ total_rows = Total,
+ limit = Args#mrargs.limit,
+ skip = Args#mrargs.skip,
+ callback = Callback,
+ user_acc = UAcc,
+ reduce_fun = ReduceFun,
+ update_seq = UpdateSeq,
+ args = Args
},
% Backwards compatibility hack. The old _all_docs iterates keys
% in reverse if descending=true was passed. Here we'll just
% reverse the list instead.
- Keys = if Dir =:= fwd -> Keys0; true -> lists:reverse(Keys0) end,
+ Keys =
+ if
+ Dir =:= fwd -> Keys0;
+ true -> lists:reverse(Keys0)
+ end,
FoldFun = fun(Key, Acc0) ->
DocInfo = (catch couch_db:get_doc_info(Db, Key)),
- {Doc, Acc1} = case DocInfo of
- {ok, #doc_info{id=Id, revs=[RevInfo | _RestRevs]}=DI} ->
- Rev = couch_doc:rev_to_str(RevInfo#rev_info.rev),
- Props = [{rev, Rev}] ++ case RevInfo#rev_info.deleted of
- true -> [{deleted, true}];
- false -> []
- end,
- {{{Id, Id}, {Props}}, Acc0#mracc{doc_info=DI}};
- not_found ->
- {{{Key, error}, not_found}, Acc0}
- end,
+ {Doc, Acc1} =
+ case DocInfo of
+ {ok, #doc_info{id = Id, revs = [RevInfo | _RestRevs]} = DI} ->
+ Rev = couch_doc:rev_to_str(RevInfo#rev_info.rev),
+ Props =
+ [{rev, Rev}] ++
+ case RevInfo#rev_info.deleted of
+ true -> [{deleted, true}];
+ false -> []
+ end,
+ {{{Id, Id}, {Props}}, Acc0#mracc{doc_info = DI}};
+ not_found ->
+ {{{Key, error}, not_found}, Acc0}
+ end,
{_, Acc2} = map_fold(Doc, {[], [{0, 0, 0}]}, Acc1),
Acc2
end,
FinalAcc = lists:foldl(FoldFun, Acc, Keys),
finish_fold(FinalAcc, [{total, Total}]).
-
map_fold(Db, View, Args, Callback, UAcc) ->
{ok, Total} = couch_mrview_util:get_row_count(View),
Acc = #mracc{
- db=Db,
- total_rows=Total,
- limit=Args#mrargs.limit,
- skip=Args#mrargs.skip,
- callback=Callback,
- user_acc=UAcc,
- reduce_fun=fun couch_mrview_util:reduce_to_count/1,
- update_seq=View#mrview.update_seq,
- args=Args
+ db = Db,
+ total_rows = Total,
+ limit = Args#mrargs.limit,
+ skip = Args#mrargs.skip,
+ callback = Callback,
+ user_acc = UAcc,
+ reduce_fun = fun couch_mrview_util:reduce_to_count/1,
+ update_seq = View#mrview.update_seq,
+ args = Args
},
OptList = couch_mrview_util:key_opts(Args),
- {Reds, Acc2} = lists:foldl(fun(Opts, {_, Acc0}) ->
- {ok, R, A} = couch_mrview_util:fold(View, fun map_fold/3, Acc0, Opts),
- {R, A}
- end, {nil, Acc}, OptList),
+ {Reds, Acc2} = lists:foldl(
+ fun(Opts, {_, Acc0}) ->
+ {ok, R, A} = couch_mrview_util:fold(View, fun map_fold/3, Acc0, Opts),
+ {R, A}
+ end,
+ {nil, Acc},
+ OptList
+ ),
Offset = couch_mrview_util:reduce_to_count(Reds),
finish_fold(Acc2, [{total, Total}, {offset, Offset}]).
-
map_fold(#full_doc_info{} = FullDocInfo, OffsetReds, Acc) ->
% matches for _all_docs and translates #full_doc_info{} -> KV pair
case couch_doc:to_doc_info(FullDocInfo) of
- #doc_info{id=Id, revs=[#rev_info{deleted=false, rev=Rev}|_]} = DI ->
+ #doc_info{id = Id, revs = [#rev_info{deleted = false, rev = Rev} | _]} = DI ->
Value = {[{rev, couch_doc:rev_to_str(Rev)}]},
- map_fold({{Id, Id}, Value}, OffsetReds, Acc#mracc{doc_info=DI});
- #doc_info{revs=[#rev_info{deleted=true}|_]} ->
+ map_fold({{Id, Id}, Value}, OffsetReds, Acc#mracc{doc_info = DI});
+ #doc_info{revs = [#rev_info{deleted = true} | _]} ->
{ok, Acc}
end;
-map_fold(_KV, _Offset, #mracc{skip=N}=Acc) when N > 0 ->
- {ok, Acc#mracc{skip=N-1, last_go=ok}};
-map_fold(KV, OffsetReds, #mracc{offset=undefined}=Acc) ->
+map_fold(_KV, _Offset, #mracc{skip = N} = Acc) when N > 0 ->
+ {ok, Acc#mracc{skip = N - 1, last_go = ok}};
+map_fold(KV, OffsetReds, #mracc{offset = undefined} = Acc) ->
#mracc{
- total_rows=Total,
- callback=Callback,
- user_acc=UAcc0,
- reduce_fun=Reduce,
- update_seq=UpdateSeq,
- args=Args
+ total_rows = Total,
+ callback = Callback,
+ user_acc = UAcc0,
+ reduce_fun = Reduce,
+ update_seq = UpdateSeq,
+ args = Args
} = Acc,
Offset = Reduce(OffsetReds),
Meta = make_meta(Args, UpdateSeq, [{total, Total}, {offset, Offset}]),
{Go, UAcc1} = Callback(Meta, UAcc0),
- Acc1 = Acc#mracc{meta_sent=true, offset=Offset, user_acc=UAcc1, last_go=Go},
+ Acc1 = Acc#mracc{meta_sent = true, offset = Offset, user_acc = UAcc1, last_go = Go},
case Go of
ok -> map_fold(KV, OffsetReds, Acc1);
stop -> {stop, Acc1}
end;
-map_fold(_KV, _Offset, #mracc{limit=0}=Acc) ->
+map_fold(_KV, _Offset, #mracc{limit = 0} = Acc) ->
{stop, Acc};
map_fold({{Key, Id}, Val}, _Offset, Acc) ->
#mracc{
- db=Db,
- limit=Limit,
- doc_info=DI,
- callback=Callback,
- user_acc=UAcc0,
- args=Args
+ db = Db,
+ limit = Limit,
+ doc_info = DI,
+ callback = Callback,
+ user_acc = UAcc0,
+ args = Args
} = Acc,
- Doc = case DI of
- #doc_info{} -> couch_mrview_util:maybe_load_doc(Db, DI, Args);
- _ -> couch_mrview_util:maybe_load_doc(Db, Id, Val, Args)
- end,
+ Doc =
+ case DI of
+ #doc_info{} -> couch_mrview_util:maybe_load_doc(Db, DI, Args);
+ _ -> couch_mrview_util:maybe_load_doc(Db, Id, Val, Args)
+ end,
Row = [{id, Id}, {key, Key}, {value, Val}] ++ Doc,
{Go, UAcc1} = Callback({row, Row}, UAcc0),
{Go, Acc#mracc{
- limit=Limit-1,
- doc_info=undefined,
- user_acc=UAcc1,
- last_go=Go
+ limit = Limit - 1,
+ doc_info = undefined,
+ user_acc = UAcc1,
+ last_go = Go
}};
map_fold(#doc{id = <<"_local/", _/binary>>} = Doc, _Offset, #mracc{} = Acc) ->
#mracc{
- limit=Limit,
- callback=Callback,
- user_acc=UAcc0,
- args=Args
+ limit = Limit,
+ callback = Callback,
+ user_acc = UAcc0,
+ args = Args
} = Acc,
#doc{
id = DocId,
revs = {Pos, [RevId | _]}
} = Doc,
Rev = {Pos, RevId},
- Row = [
- {id, DocId},
- {key, DocId},
- {value, {[{rev, couch_doc:rev_to_str(Rev)}]}}
- ] ++ if not Args#mrargs.include_docs -> []; true ->
- [{doc, couch_doc:to_json_obj(Doc, Args#mrargs.doc_options)}]
- end,
+ Row =
+ [
+ {id, DocId},
+ {key, DocId},
+ {value, {[{rev, couch_doc:rev_to_str(Rev)}]}}
+ ] ++
+ if
+ not Args#mrargs.include_docs -> [];
+ true -> [{doc, couch_doc:to_json_obj(Doc, Args#mrargs.doc_options)}]
+ end,
{Go, UAcc1} = Callback({row, Row}, UAcc0),
{Go, Acc#mracc{
- limit=Limit-1,
- reduce_fun=undefined,
- doc_info=undefined,
- user_acc=UAcc1,
- last_go=Go
+ limit = Limit - 1,
+ reduce_fun = undefined,
+ doc_info = undefined,
+ user_acc = UAcc1,
+ last_go = Go
}}.
-red_fold(Db, {NthRed, _Lang, View}=RedView, Args, Callback, UAcc) ->
- Finalizer = case couch_util:get_value(finalizer, Args#mrargs.extra) of
- undefined ->
- {_, FunSrc} = lists:nth(NthRed, View#mrview.reduce_funs),
- FunSrc;
- CustomFun->
- CustomFun
- end,
+red_fold(Db, {NthRed, _Lang, View} = RedView, Args, Callback, UAcc) ->
+ Finalizer =
+ case couch_util:get_value(finalizer, Args#mrargs.extra) of
+ undefined ->
+ {_, FunSrc} = lists:nth(NthRed, View#mrview.reduce_funs),
+ FunSrc;
+ CustomFun ->
+ CustomFun
+ end,
Acc = #mracc{
- db=Db,
- total_rows=null,
- limit=Args#mrargs.limit,
- skip=Args#mrargs.skip,
- group_level=Args#mrargs.group_level,
- callback=Callback,
- user_acc=UAcc,
- update_seq=View#mrview.update_seq,
- finalizer=Finalizer,
- args=Args
+ db = Db,
+ total_rows = null,
+ limit = Args#mrargs.limit,
+ skip = Args#mrargs.skip,
+ group_level = Args#mrargs.group_level,
+ callback = Callback,
+ user_acc = UAcc,
+ update_seq = View#mrview.update_seq,
+ finalizer = Finalizer,
+ args = Args
},
Grouping = {key_group_level, Args#mrargs.group_level},
OptList = couch_mrview_util:key_opts(Args, [Grouping]),
- Acc2 = lists:foldl(fun(Opts, Acc0) ->
- {ok, Acc1} =
- couch_mrview_util:fold_reduce(RedView, fun red_fold/3, Acc0, Opts),
- Acc1
- end, Acc, OptList),
+ Acc2 = lists:foldl(
+ fun(Opts, Acc0) ->
+ {ok, Acc1} =
+ couch_mrview_util:fold_reduce(RedView, fun red_fold/3, Acc0, Opts),
+ Acc1
+ end,
+ Acc,
+ OptList
+ ),
finish_fold(Acc2, []).
red_fold({p, _Partition, Key}, Red, Acc) ->
red_fold(Key, Red, Acc);
-red_fold(_Key, _Red, #mracc{skip=N}=Acc) when N > 0 ->
- {ok, Acc#mracc{skip=N-1, last_go=ok}};
-red_fold(Key, Red, #mracc{meta_sent=false}=Acc) ->
+red_fold(_Key, _Red, #mracc{skip = N} = Acc) when N > 0 ->
+ {ok, Acc#mracc{skip = N - 1, last_go = ok}};
+red_fold(Key, Red, #mracc{meta_sent = false} = Acc) ->
#mracc{
- args=Args,
- callback=Callback,
- user_acc=UAcc0,
- update_seq=UpdateSeq
+ args = Args,
+ callback = Callback,
+ user_acc = UAcc0,
+ update_seq = UpdateSeq
} = Acc,
Meta = make_meta(Args, UpdateSeq, []),
{Go, UAcc1} = Callback(Meta, UAcc0),
- Acc1 = Acc#mracc{user_acc=UAcc1, meta_sent=true, last_go=Go},
+ Acc1 = Acc#mracc{user_acc = UAcc1, meta_sent = true, last_go = Go},
case Go of
ok -> red_fold(Key, Red, Acc1);
_ -> {Go, Acc1}
end;
-red_fold(_Key, _Red, #mracc{limit=0} = Acc) ->
+red_fold(_Key, _Red, #mracc{limit = 0} = Acc) ->
{stop, Acc};
-red_fold(_Key, Red, #mracc{group_level=0} = Acc) ->
+red_fold(_Key, Red, #mracc{group_level = 0} = Acc) ->
#mracc{
- finalizer=Finalizer,
- limit=Limit,
- callback=Callback,
- user_acc=UAcc0
+ finalizer = Finalizer,
+ limit = Limit,
+ callback = Callback,
+ user_acc = UAcc0
} = Acc,
Row = [{key, null}, {value, maybe_finalize(Red, Finalizer)}],
{Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}};
-red_fold(Key, Red, #mracc{group_level=exact} = Acc) ->
+ {Go, Acc#mracc{user_acc = UAcc1, limit = Limit - 1, last_go = Go}};
+red_fold(Key, Red, #mracc{group_level = exact} = Acc) ->
#mracc{
- finalizer=Finalizer,
- limit=Limit,
- callback=Callback,
- user_acc=UAcc0
+ finalizer = Finalizer,
+ limit = Limit,
+ callback = Callback,
+ user_acc = UAcc0
} = Acc,
Row = [{key, Key}, {value, maybe_finalize(Red, Finalizer)}],
{Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}};
-red_fold(K, Red, #mracc{group_level=I} = Acc) when I > 0, is_list(K) ->
+ {Go, Acc#mracc{user_acc = UAcc1, limit = Limit - 1, last_go = Go}};
+red_fold(K, Red, #mracc{group_level = I} = Acc) when I > 0, is_list(K) ->
#mracc{
- finalizer=Finalizer,
- limit=Limit,
- callback=Callback,
- user_acc=UAcc0
+ finalizer = Finalizer,
+ limit = Limit,
+ callback = Callback,
+ user_acc = UAcc0
} = Acc,
Row = [{key, lists:sublist(K, I)}, {value, maybe_finalize(Red, Finalizer)}],
{Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}};
-red_fold(K, Red, #mracc{group_level=I} = Acc) when I > 0 ->
+ {Go, Acc#mracc{user_acc = UAcc1, limit = Limit - 1, last_go = Go}};
+red_fold(K, Red, #mracc{group_level = I} = Acc) when I > 0 ->
#mracc{
- finalizer=Finalizer,
- limit=Limit,
- callback=Callback,
- user_acc=UAcc0
+ finalizer = Finalizer,
+ limit = Limit,
+ callback = Callback,
+ user_acc = UAcc0
} = Acc,
Row = [{key, K}, {value, maybe_finalize(Red, Finalizer)}],
{Go, UAcc1} = Callback({row, Row}, UAcc0),
- {Go, Acc#mracc{user_acc=UAcc1, limit=Limit-1, last_go=Go}}.
+ {Go, Acc#mracc{user_acc = UAcc1, limit = Limit - 1, last_go = Go}}.
maybe_finalize(Red, null) ->
Red;
@@ -619,31 +659,31 @@ maybe_finalize(Red, RedSrc) ->
{ok, Finalized} = couch_query_servers:finalize(RedSrc, Red),
Finalized.
-finish_fold(#mracc{last_go=ok, update_seq=UpdateSeq}=Acc, ExtraMeta) ->
- #mracc{callback=Callback, user_acc=UAcc, args=Args}=Acc,
+finish_fold(#mracc{last_go = ok, update_seq = UpdateSeq} = Acc, ExtraMeta) ->
+ #mracc{callback = Callback, user_acc = UAcc, args = Args} = Acc,
% Possible send meta info
Meta = make_meta(Args, UpdateSeq, ExtraMeta),
- {Go, UAcc1} = case Acc#mracc.meta_sent of
- false -> Callback(Meta, UAcc);
- _ -> {ok, Acc#mracc.user_acc}
- end,
+ {Go, UAcc1} =
+ case Acc#mracc.meta_sent of
+ false -> Callback(Meta, UAcc);
+ _ -> {ok, Acc#mracc.user_acc}
+ end,
% Notify callback that the fold is complete.
- {_, UAcc2} = case Go of
- ok -> Callback(complete, UAcc1);
- _ -> {ok, UAcc1}
- end,
+ {_, UAcc2} =
+ case Go of
+ ok -> Callback(complete, UAcc1);
+ _ -> {ok, UAcc1}
+ end,
{ok, UAcc2};
-finish_fold(#mracc{user_acc=UAcc}, _ExtraMeta) ->
+finish_fold(#mracc{user_acc = UAcc}, _ExtraMeta) ->
{ok, UAcc}.
-
make_meta(Args, UpdateSeq, Base) ->
case Args#mrargs.update_seq of
true -> {meta, Base ++ [{update_seq, UpdateSeq}]};
_ -> {meta, Base}
end.
-
get_reduce_fun(#mrargs{extra = Extra}) ->
case couch_util:get_value(namespace, Extra) of
<<"_local">> ->
@@ -652,7 +692,6 @@ get_reduce_fun(#mrargs{extra = Extra}) ->
fun couch_mrview_util:all_docs_reduce_to_count/1
end.
-
get_total_rows(Db, #mrargs{extra = Extra}) ->
case couch_util:get_value(namespace, Extra) of
<<"_local">> ->
@@ -665,7 +704,6 @@ get_total_rows(Db, #mrargs{extra = Extra}) ->
couch_util:get_value(doc_count, Info)
end.
-
get_update_seq(Db, #mrargs{extra = Extra}) ->
case couch_util:get_value(namespace, Extra) of
<<"_local">> ->
@@ -674,7 +712,6 @@ get_update_seq(Db, #mrargs{extra = Extra}) ->
couch_db:get_update_seq(Db)
end.
-
default_cb(complete, Acc) ->
{ok, lists:reverse(Acc)};
default_cb({final, Info}, []) ->
@@ -686,13 +723,15 @@ default_cb(ok, ddoc_updated) ->
default_cb(Row, Acc) ->
{ok, [Row | Acc]}.
-
to_mrargs(KeyList) ->
- lists:foldl(fun({Key, Value}, Acc) ->
- Index = lookup_index(couch_util:to_existing_atom(Key)),
- setelement(Index, Acc, Value)
- end, #mrargs{}, KeyList).
-
+ lists:foldl(
+ fun({Key, Value}, Acc) ->
+ Index = lookup_index(couch_util:to_existing_atom(Key)),
+ setelement(Index, Acc, Value)
+ end,
+ #mrargs{},
+ KeyList
+ ).
lookup_index(Key) ->
Index = lists:zip(
diff --git a/src/couch_mrview/src/couch_mrview_cleanup.erl b/src/couch_mrview/src/couch_mrview_cleanup.erl
index e0cb1c64f..417605c55 100644
--- a/src/couch_mrview/src/couch_mrview_cleanup.erl
+++ b/src/couch_mrview/src/couch_mrview_cleanup.erl
@@ -14,24 +14,26 @@
-export([run/1]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-
run(Db) ->
RootDir = couch_index_util:root_dir(),
DbName = couch_db:name(Db),
{ok, DesignDocs} = couch_db:get_design_docs(Db),
- SigFiles = lists:foldl(fun(DDocInfo, SFAcc) ->
- {ok, DDoc} = couch_db:open_doc_int(Db, DDocInfo, [ejson_body]),
- {ok, InitState} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
- Sig = InitState#mrst.sig,
- IFName = couch_mrview_util:index_file(DbName, Sig),
- CFName = couch_mrview_util:compaction_file(DbName, Sig),
- [IFName, CFName | SFAcc]
- end, [], [DD || DD <- DesignDocs, DD#full_doc_info.deleted == false]),
+ SigFiles = lists:foldl(
+ fun(DDocInfo, SFAcc) ->
+ {ok, DDoc} = couch_db:open_doc_int(Db, DDocInfo, [ejson_body]),
+ {ok, InitState} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
+ Sig = InitState#mrst.sig,
+ IFName = couch_mrview_util:index_file(DbName, Sig),
+ CFName = couch_mrview_util:compaction_file(DbName, Sig),
+ [IFName, CFName | SFAcc]
+ end,
+ [],
+ [DD || DD <- DesignDocs, DD#full_doc_info.deleted == false]
+ ),
IdxDir = couch_index_util:index_dir(mrview, DbName),
DiskFiles = filelib:wildcard(filename:join(IdxDir, "*")),
@@ -39,21 +41,28 @@ run(Db) ->
% We need to delete files that have no ddoc.
ToDelete = DiskFiles -- SigFiles,
- lists:foreach(fun(FN) ->
- couch_log:debug("Deleting stale view file: ~s", [FN]),
- couch_file:delete(RootDir, FN, [sync]),
- case couch_mrview_util:verify_view_filename(FN) of
- true ->
- Sig = couch_mrview_util:get_signature_from_filename(FN),
- DocId = couch_mrview_util:get_local_purge_doc_id(Sig),
- case couch_db:open_doc(Db, DocId, []) of
- {ok, LocalPurgeDoc} ->
- couch_db:update_doc(Db,
- LocalPurgeDoc#doc{deleted=true}, [?ADMIN_CTX]);
- {not_found, _} ->
- ok
- end;
- false -> ok
- end
- end, ToDelete),
+ lists:foreach(
+ fun(FN) ->
+ couch_log:debug("Deleting stale view file: ~s", [FN]),
+ couch_file:delete(RootDir, FN, [sync]),
+ case couch_mrview_util:verify_view_filename(FN) of
+ true ->
+ Sig = couch_mrview_util:get_signature_from_filename(FN),
+ DocId = couch_mrview_util:get_local_purge_doc_id(Sig),
+ case couch_db:open_doc(Db, DocId, []) of
+ {ok, LocalPurgeDoc} ->
+ couch_db:update_doc(
+ Db,
+ LocalPurgeDoc#doc{deleted = true},
+ [?ADMIN_CTX]
+ );
+ {not_found, _} ->
+ ok
+ end;
+ false ->
+ ok
+ end
+ end,
+ ToDelete
+ ),
ok.
diff --git a/src/couch_mrview/src/couch_mrview_compactor.erl b/src/couch_mrview/src/couch_mrview_compactor.erl
index d42edc054..28e5a9b3d 100644
--- a/src/couch_mrview/src/couch_mrview_compactor.erl
+++ b/src/couch_mrview/src/couch_mrview_compactor.erl
@@ -18,12 +18,12 @@
-export([compact/3, swap_compacted/2, remove_compacted/1]).
-record(acc, {
- btree = nil,
- last_id = nil,
- kvs = [],
- kvs_size = 0,
- changes = 0,
- total_changes
+ btree = nil,
+ last_id = nil,
+ kvs = [],
+ kvs_size = 0,
+ changes = 0,
+ total_changes
}).
-define(DEFAULT_RECOMPACT_RETRY_COUNT, 3).
@@ -36,12 +36,12 @@ compact(_Db, State, Opts) ->
compact(State) ->
#mrst{
- db_name=DbName,
- idx_name=IdxName,
- sig=Sig,
- update_seq=Seq,
- id_btree=IdBtree,
- views=Views
+ db_name = DbName,
+ idx_name = IdxName,
+ sig = Sig,
+ update_seq = Seq,
+ id_btree = IdBtree,
+ views = Views
} = State,
erlang:put(io_priority, {view_compact, DbName, IdxName}),
@@ -65,7 +65,9 @@ compact(State) ->
{ok, Kvs} = couch_mrview_util:get_row_count(View),
Acc + Kvs
end,
- NumDocIds, Views),
+ NumDocIds,
+ Views
+ ),
couch_task_status:add_task([
{type, view_compaction},
@@ -83,24 +85,29 @@ compact(State) ->
FoldFun = fun({DocId, ViewIdKeys} = KV, Acc) ->
#acc{btree = Bt, kvs = Kvs, kvs_size = KvsSize} = Acc,
- NewKvs = case Kvs of
- [{DocId, OldViewIdKeys} | Rest] ->
- couch_log:error("Dupes of ~s in ~s ~s",
- [DocId, DbName, IdxName]),
- [{DocId, ViewIdKeys ++ OldViewIdKeys} | Rest];
- _ ->
- [KV | Kvs]
- end,
+ NewKvs =
+ case Kvs of
+ [{DocId, OldViewIdKeys} | Rest] ->
+ couch_log:error(
+ "Dupes of ~s in ~s ~s",
+ [DocId, DbName, IdxName]
+ ),
+ [{DocId, ViewIdKeys ++ OldViewIdKeys} | Rest];
+ _ ->
+ [KV | Kvs]
+ end,
KvsSize2 = KvsSize + ?term_size(KV),
case KvsSize2 >= BufferSize of
true ->
{ok, Bt2} = couch_btree:add(Bt, lists:reverse(NewKvs)),
Acc2 = update_task(Acc, length(NewKvs)),
{ok, Acc2#acc{
- btree = Bt2, kvs = [], kvs_size = 0, last_id = DocId}};
+ btree = Bt2, kvs = [], kvs_size = 0, last_id = DocId
+ }};
_ ->
{ok, Acc#acc{
- kvs = NewKvs, kvs_size = KvsSize2, last_id = DocId}}
+ kvs = NewKvs, kvs_size = KvsSize2, last_id = DocId
+ }}
end
end,
@@ -110,26 +117,26 @@ compact(State) ->
{ok, NewIdBtree} = couch_btree:add(Bt3, lists:reverse(Uncopied)),
FinalAcc2 = update_task(FinalAcc, length(Uncopied)),
-
- {NewViews, _} = lists:mapfoldl(fun({View, EmptyView}, Acc) ->
- compact_view(View, EmptyView, BufferSize, Acc)
- end, FinalAcc2, lists:zip(Views, EmptyViews)),
+ {NewViews, _} = lists:mapfoldl(
+ fun({View, EmptyView}, Acc) ->
+ compact_view(View, EmptyView, BufferSize, Acc)
+ end,
+ FinalAcc2,
+ lists:zip(Views, EmptyViews)
+ ),
unlink(EmptyState#mrst.fd),
{ok, EmptyState#mrst{
- id_btree=NewIdBtree,
- views=NewViews,
- update_seq=Seq
+ id_btree = NewIdBtree,
+ views = NewViews,
+ update_seq = Seq
}}.
-
recompact(State) ->
recompact(State, recompact_retry_count()).
-recompact(#mrst{db_name=DbName, idx_name=IdxName}, 0) ->
- erlang:error({exceeded_recompact_retry_count,
- [{db_name, DbName}, {idx_name, IdxName}]});
-
+recompact(#mrst{db_name = DbName, idx_name = IdxName}, 0) ->
+ erlang:error({exceeded_recompact_retry_count, [{db_name, DbName}, {idx_name, IdxName}]});
recompact(State, RetryCount) ->
Self = self(),
link(State#mrst.fd),
@@ -159,27 +166,35 @@ recompact_retry_count() ->
?DEFAULT_RECOMPACT_RETRY_COUNT
).
-
%% @spec compact_view(View, EmptyView, Retry, Acc) -> {CompactView, NewAcc}
-compact_view(#mrview{id_num=VID}=View, EmptyView, BufferSize, Acc0) ->
-
- {NewBt, FinalAcc} = compact_view_btree(View#mrview.btree,
- EmptyView#mrview.btree,
- VID, BufferSize, Acc0),
+compact_view(#mrview{id_num = VID} = View, EmptyView, BufferSize, Acc0) ->
+ {NewBt, FinalAcc} = compact_view_btree(
+ View#mrview.btree,
+ EmptyView#mrview.btree,
+ VID,
+ BufferSize,
+ Acc0
+ ),
- {EmptyView#mrview{btree=NewBt,
- update_seq=View#mrview.update_seq,
- purge_seq=View#mrview.purge_seq}, FinalAcc}.
+ {
+ EmptyView#mrview{
+ btree = NewBt,
+ update_seq = View#mrview.update_seq,
+ purge_seq = View#mrview.purge_seq
+ },
+ FinalAcc
+ }.
compact_view_btree(Btree, EmptyBtree, VID, BufferSize, Acc0) ->
Fun = fun(KV, #acc{btree = Bt, kvs = Kvs, kvs_size = KvsSize} = Acc) ->
KvsSize2 = KvsSize + ?term_size(KV),
- if KvsSize2 >= BufferSize ->
- {ok, Bt2} = couch_btree:add(Bt, lists:reverse([KV | Kvs])),
- Acc2 = update_task(VID, Acc, 1 + length(Kvs)),
- {ok, Acc2#acc{btree = Bt2, kvs = [], kvs_size = 0}};
- true ->
- {ok, Acc#acc{kvs = [KV | Kvs], kvs_size = KvsSize2}}
+ if
+ KvsSize2 >= BufferSize ->
+ {ok, Bt2} = couch_btree:add(Bt, lists:reverse([KV | Kvs])),
+ Acc2 = update_task(VID, Acc, 1 + length(Kvs)),
+ {ok, Acc2#acc{btree = Bt2, kvs = [], kvs_size = 0}};
+ true ->
+ {ok, Acc#acc{kvs = [KV | Kvs], kvs_size = KvsSize2}}
end
end,
@@ -193,11 +208,18 @@ compact_view_btree(Btree, EmptyBtree, VID, BufferSize, Acc0) ->
update_task(Acc, ChangesInc) ->
update_task(null, Acc, ChangesInc).
-
-update_task(VID, #acc{changes=Changes, total_changes=Total}=Acc, ChangesInc) ->
- Phase = if is_integer(VID) -> view; true -> ids end,
+update_task(VID, #acc{changes = Changes, total_changes = Total} = Acc, ChangesInc) ->
+ Phase =
+ if
+ is_integer(VID) -> view;
+ true -> ids
+ end,
Changes2 = Changes + ChangesInc,
- Progress = if Total == 0 -> 0; true -> (Changes2 * 100) div Total end,
+ Progress =
+ if
+ Total == 0 -> 0;
+ true -> (Changes2 * 100) div Total
+ end,
couch_task_status:update([
{phase, Phase},
{view, VID},
@@ -207,15 +229,14 @@ update_task(VID, #acc{changes=Changes, total_changes=Total}=Acc, ChangesInc) ->
]),
Acc#acc{changes = Changes2}.
-
swap_compacted(OldState, NewState) ->
#mrst{
fd = Fd
} = OldState,
#mrst{
- sig=Sig,
- db_name=DbName,
- fd=NewFd
+ sig = Sig,
+ db_name = DbName,
+ fd = NewFd
} = NewState,
link(NewState#mrst.fd),
@@ -227,16 +248,18 @@ swap_compacted(OldState, NewState) ->
{ok, Pre} = couch_file:bytes(Fd),
{ok, Post} = couch_file:bytes(NewFd),
- couch_log:notice("Compaction swap for view ~s ~p ~p", [IndexFName,
- Pre, Post]),
+ couch_log:notice("Compaction swap for view ~s ~p ~p", [
+ IndexFName,
+ Pre,
+ Post
+ ]),
ok = couch_file:delete(RootDir, IndexFName),
ok = file:rename(CompactFName, IndexFName),
unlink(OldState#mrst.fd),
erlang:demonitor(OldState#mrst.fd_monitor, [flush]),
- {ok, NewState#mrst{fd_monitor=Ref}}.
-
+ {ok, NewState#mrst{fd_monitor = Ref}}.
remove_compacted(#mrst{sig = Sig, db_name = DbName} = State) ->
RootDir = couch_index_util:root_dir(),
@@ -244,7 +267,6 @@ remove_compacted(#mrst{sig = Sig, db_name = DbName} = State) ->
ok = couch_file:delete(RootDir, CompactFName),
{ok, State}.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -268,27 +290,28 @@ recompact_test_() ->
recompact_success_after_progress() ->
?_test(begin
- ok = meck:expect(couch_index_updater, update, fun
- (Pid, _, #mrst{update_seq=0} = State) ->
- Pid ! {'$gen_cast', {new_state, State#mrst{update_seq = 1}}},
- timer:sleep(100),
- exit({updated, self(), State#mrst{update_seq = 2}})
+ ok = meck:expect(couch_index_updater, update, fun(Pid, _, #mrst{update_seq = 0} = State) ->
+ Pid ! {'$gen_cast', {new_state, State#mrst{update_seq = 1}}},
+ timer:sleep(100),
+ exit({updated, self(), State#mrst{update_seq = 2}})
end),
- State = #mrst{fd=self(), update_seq=0},
+ State = #mrst{fd = self(), update_seq = 0},
?assertEqual({ok, State#mrst{update_seq = 2}}, recompact(State))
end).
recompact_exceeded_retry_count() ->
?_test(begin
- ok = meck:expect(couch_index_updater, update,
+ ok = meck:expect(
+ couch_index_updater,
+ update,
fun(_, _, _) ->
exit(error)
- end),
+ end
+ ),
ok = meck:expect(couch_log, warning, fun(_, _) -> ok end),
- State = #mrst{fd=self(), db_name=foo, idx_name=bar},
- ExpectedError = {exceeded_recompact_retry_count,
- [{db_name, foo}, {idx_name, bar}]},
- ?assertError(ExpectedError, recompact(State))
+ State = #mrst{fd = self(), db_name = foo, idx_name = bar},
+ ExpectedError = {exceeded_recompact_retry_count, [{db_name, foo}, {idx_name, bar}]},
+ ?assertError(ExpectedError, recompact(State))
end).
-endif.
diff --git a/src/couch_mrview/src/couch_mrview_http.erl b/src/couch_mrview/src/couch_mrview_http.erl
index 802739b82..fa3fab386 100644
--- a/src/couch_mrview/src/couch_mrview_http.erl
+++ b/src/couch_mrview/src/couch_mrview_http.erl
@@ -43,37 +43,41 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-
-handle_all_docs_req(#httpd{method='GET'}=Req, Db) ->
+handle_all_docs_req(#httpd{method = 'GET'} = Req, Db) ->
all_docs_req(Req, Db, undefined);
-handle_all_docs_req(#httpd{method='POST'}=Req, Db) ->
+handle_all_docs_req(#httpd{method = 'POST'} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
Keys = couch_mrview_util:get_view_keys(chttpd:json_body_obj(Req)),
all_docs_req(Req, Db, Keys);
handle_all_docs_req(Req, _Db) ->
chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-handle_local_docs_req(#httpd{method='GET'}=Req, Db) ->
+handle_local_docs_req(#httpd{method = 'GET'} = Req, Db) ->
all_docs_req(Req, Db, undefined, <<"_local">>);
-handle_local_docs_req(#httpd{method='POST'}=Req, Db) ->
+handle_local_docs_req(#httpd{method = 'POST'} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
Keys = couch_mrview_util:get_view_keys(chttpd:json_body_obj(Req)),
all_docs_req(Req, Db, Keys, <<"_local">>);
handle_local_docs_req(Req, _Db) ->
chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-handle_design_docs_req(#httpd{method='GET'}=Req, Db) ->
+handle_design_docs_req(#httpd{method = 'GET'} = Req, Db) ->
all_docs_req(Req, Db, undefined, <<"_design">>);
-handle_design_docs_req(#httpd{method='POST'}=Req, Db) ->
+handle_design_docs_req(#httpd{method = 'POST'} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
Keys = couch_mrview_util:get_view_keys(chttpd:json_body_obj(Req)),
all_docs_req(Req, Db, Keys, <<"_design">>);
handle_design_docs_req(Req, _Db) ->
chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-handle_reindex_req(#httpd{method='POST',
- path_parts=[_, _, DName,<<"_reindex">>]}=Req,
- Db, _DDoc) ->
+handle_reindex_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [_, _, DName, <<"_reindex">>]
+ } = Req,
+ Db,
+ _DDoc
+) ->
chttpd:validate_ctype(Req, "application/json"),
ok = couch_db:check_is_admin(Db),
couch_mrview:trigger_update(Db, <<"_design/", DName/binary>>),
@@ -81,23 +85,30 @@ handle_reindex_req(#httpd{method='POST',
handle_reindex_req(Req, _Db, _DDoc) ->
chttpd:send_method_not_allowed(Req, "POST").
-
-handle_view_req(#httpd{method='GET',
- path_parts=[_, _, DDocName, _, VName, <<"_info">>]}=Req,
- Db, _DDoc) ->
+handle_view_req(
+ #httpd{
+ method = 'GET',
+ path_parts = [_, _, DDocName, _, VName, <<"_info">>]
+ } = Req,
+ Db,
+ _DDoc
+) ->
DbName = couch_db:name(Db),
- DDocId = <<"_design/", DDocName/binary >>,
+ DDocId = <<"_design/", DDocName/binary>>,
{ok, Info} = couch_mrview:get_view_info(DbName, DDocId, VName),
- FinalInfo = [{db_name, DbName},
- {ddoc, DDocId},
- {view, VName}] ++ Info,
+ FinalInfo =
+ [
+ {db_name, DbName},
+ {ddoc, DDocId},
+ {view, VName}
+ ] ++ Info,
chttpd:send_json(Req, 200, {FinalInfo});
-handle_view_req(#httpd{method='GET'}=Req, Db, DDoc) ->
+handle_view_req(#httpd{method = 'GET'} = Req, Db, DDoc) ->
[_, _, _, _, ViewName] = Req#httpd.path_parts,
couch_stats:increment_counter([couchdb, httpd, view_reads]),
design_doc_view(Req, Db, DDoc, ViewName, undefined);
-handle_view_req(#httpd{method='POST'}=Req, Db, DDoc) ->
+handle_view_req(#httpd{method = 'POST'} = Req, Db, DDoc) ->
chttpd:validate_ctype(Req, "application/json"),
[_, _, _, _, ViewName] = Req#httpd.path_parts,
Props = chttpd:json_body_obj(Req),
@@ -122,8 +133,7 @@ handle_view_req(#httpd{method='POST'}=Req, Db, DDoc) ->
handle_view_req(Req, _Db, _DDoc) ->
chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
-handle_temp_view_req(#httpd{method='POST'}=Req, Db) ->
+handle_temp_view_req(#httpd{method = 'POST'} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
ok = couch_db:check_is_admin(Db),
{Body} = chttpd:json_body_obj(Req),
@@ -134,19 +144,21 @@ handle_temp_view_req(#httpd{method='POST'}=Req, Db) ->
handle_temp_view_req(Req, _Db) ->
chttpd:send_method_not_allowed(Req, "POST").
-
-handle_info_req(#httpd{method='GET'}=Req, Db, DDoc) ->
+handle_info_req(#httpd{method = 'GET'} = Req, Db, DDoc) ->
[_, _, Name, _] = Req#httpd.path_parts,
{ok, Info} = couch_mrview:get_info(Db, DDoc),
- chttpd:send_json(Req, 200, {[
- {name, Name},
- {view_index, {Info}}
- ]});
+ chttpd:send_json(
+ Req,
+ 200,
+ {[
+ {name, Name},
+ {view_index, {Info}}
+ ]}
+ );
handle_info_req(Req, _Db, _DDoc) ->
chttpd:send_method_not_allowed(Req, "GET").
-
-handle_compact_req(#httpd{method='POST'}=Req, Db, DDoc) ->
+handle_compact_req(#httpd{method = 'POST'} = Req, Db, DDoc) ->
chttpd:validate_ctype(Req, "application/json"),
ok = couch_db:check_is_admin(Db),
ok = couch_mrview:compact(Db, DDoc),
@@ -154,8 +166,7 @@ handle_compact_req(#httpd{method='POST'}=Req, Db, DDoc) ->
handle_compact_req(Req, _Db, _DDoc) ->
chttpd:send_method_not_allowed(Req, "POST").
-
-handle_cleanup_req(#httpd{method='POST'}=Req, Db) ->
+handle_cleanup_req(#httpd{method = 'POST'} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
ok = couch_db:check_is_admin(Db),
ok = couch_mrview:cleanup(Db),
@@ -163,29 +174,30 @@ handle_cleanup_req(#httpd{method='POST'}=Req, Db) ->
handle_cleanup_req(Req, _Db) ->
chttpd:send_method_not_allowed(Req, "POST").
-
all_docs_req(Req, Db, Keys) ->
all_docs_req(Req, Db, Keys, undefined).
all_docs_req(Req, Db, Keys, NS) ->
case is_restricted(Db, NS) of
- true ->
- case (catch couch_db:check_is_admin(Db)) of
- ok ->
- do_all_docs_req(Req, Db, Keys, NS);
- _ when NS == <<"_local">> ->
- throw({forbidden, <<"Only admins can access _local_docs">>});
- _ ->
- case is_public_fields_configured(Db) of
- true ->
+ true ->
+ case (catch couch_db:check_is_admin(Db)) of
+ ok ->
do_all_docs_req(Req, Db, Keys, NS);
- false ->
- throw({forbidden, <<"Only admins can access _all_docs",
- " of system databases.">>})
- end
- end;
- false ->
- do_all_docs_req(Req, Db, Keys, NS)
+ _ when NS == <<"_local">> ->
+ throw({forbidden, <<"Only admins can access _local_docs">>});
+ _ ->
+ case is_public_fields_configured(Db) of
+ true ->
+ do_all_docs_req(Req, Db, Keys, NS);
+ false ->
+ throw(
+ {forbidden,
+ <<"Only admins can access _all_docs", " of system databases.">>}
+ )
+ end
+ end;
+ false ->
+ do_all_docs_req(Req, Db, Keys, NS)
end.
is_restricted(_Db, <<"_local">>) ->
@@ -196,18 +208,19 @@ is_restricted(Db, _) ->
is_public_fields_configured(Db) ->
DbName = ?b2l(couch_db:name(Db)),
case config:get("couch_httpd_auth", "authentication_db", "_users") of
- DbName ->
- UsersDbPublic = chttpd_util:get_chttpd_auth_config(
- "users_db_public", "false"),
- PublicFields = chttpd_util:get_chttpd_auth_config("public_fields"),
- case {UsersDbPublic, PublicFields} of
- {"true", PublicFields} when PublicFields =/= undefined ->
- true;
- {_, _} ->
+ DbName ->
+ UsersDbPublic = chttpd_util:get_chttpd_auth_config(
+ "users_db_public", "false"
+ ),
+ PublicFields = chttpd_util:get_chttpd_auth_config("public_fields"),
+ case {UsersDbPublic, PublicFields} of
+ {"true", PublicFields} when PublicFields =/= undefined ->
+ true;
+ {_, _} ->
+ false
+ end;
+ _ ->
false
- end;
- _ ->
- false
end.
do_all_docs_req(Req, Db, Keys, NS) ->
@@ -216,14 +229,16 @@ do_all_docs_req(Req, Db, Keys, NS) ->
ETagFun = fun(Sig, Acc0) ->
check_view_etag(Sig, Acc0, Req)
end,
- Args = Args1#mrargs{preflight_fun=ETagFun},
+ Args = Args1#mrargs{preflight_fun = ETagFun},
{ok, Resp} = couch_httpd:etag_maybe(Req, fun() ->
Max = chttpd:chunked_response_buffer_size(),
- VAcc0 = #vacc{db=Db, req=Req, threshold=Max},
+ VAcc0 = #vacc{db = Db, req = Req, threshold = Max},
DbName = ?b2l(couch_db:name(Db)),
- UsersDbName = config:get("couch_httpd_auth",
- "authentication_db",
- "_users"),
+ UsersDbName = config:get(
+ "couch_httpd_auth",
+ "authentication_db",
+ "_users"
+ ),
IsAdmin = is_admin(Db),
Callback = get_view_callback(DbName, UsersDbName, IsAdmin),
couch_mrview:query_all_docs(Db, Args, Callback, VAcc0)
@@ -238,13 +253,12 @@ set_namespace(NS, #mrargs{extra = Extra} = Args) ->
is_admin(Db) ->
case catch couch_db:check_is_admin(Db) of
- {unauthorized, _} ->
- false;
- ok ->
- true
+ {unauthorized, _} ->
+ false;
+ ok ->
+ true
end.
-
% admin users always get all fields
get_view_callback(_, _, true) ->
fun view_cb/2;
@@ -256,16 +270,15 @@ get_view_callback(_DbName, _DbName, false) ->
get_view_callback(_, _, _) ->
fun view_cb/2.
-
design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
Args0 = parse_params(Req, Keys),
ETagFun = fun(Sig, Acc0) ->
check_view_etag(Sig, Acc0, Req)
end,
- Args = Args0#mrargs{preflight_fun=ETagFun},
+ Args = Args0#mrargs{preflight_fun = ETagFun},
{ok, Resp} = couch_httpd:etag_maybe(Req, fun() ->
Max = chttpd:chunked_response_buffer_size(),
- VAcc0 = #vacc{db=Db, req=Req, threshold=Max},
+ VAcc0 = #vacc{db = Db, req = Req, threshold = Max},
couch_mrview:query_view(Db, DDoc, ViewName, Args, fun view_cb/2, VAcc0)
end),
case is_record(Resp, vacc) of
@@ -273,30 +286,36 @@ design_doc_view(Req, Db, DDoc, ViewName, Keys) ->
_ -> {ok, Resp}
end.
-
multi_query_view(Req, Db, DDoc, ViewName, Queries) ->
Args0 = parse_params(Req, undefined),
{ok, _, _, Args1} = couch_mrview_util:get_view(Db, DDoc, ViewName, Args0),
- ArgQueries = lists:map(fun({Query}) ->
- QueryArg = parse_params(Query, undefined, Args1),
- couch_mrview_util:validate_args(Db, DDoc, QueryArg)
- end, Queries),
+ ArgQueries = lists:map(
+ fun({Query}) ->
+ QueryArg = parse_params(Query, undefined, Args1),
+ couch_mrview_util:validate_args(Db, DDoc, QueryArg)
+ end,
+ Queries
+ ),
{ok, Resp2} = couch_httpd:etag_maybe(Req, fun() ->
Max = chttpd:chunked_response_buffer_size(),
- VAcc0 = #vacc{db=Db, req=Req, prepend="\r\n", threshold=Max},
+ VAcc0 = #vacc{db = Db, req = Req, prepend = "\r\n", threshold = Max},
%% TODO: proper calculation of etag
Etag = [$", couch_uuids:new(), $"],
Headers = [{"ETag", Etag}],
FirstChunk = "{\"results\":[",
{ok, Resp0} = chttpd:start_delayed_json_response(VAcc0#vacc.req, 200, Headers, FirstChunk),
- VAcc1 = VAcc0#vacc{resp=Resp0},
- VAcc2 = lists:foldl(fun(Args, Acc0) ->
- {ok, Acc1} = couch_mrview:query_view(Db, DDoc, ViewName, Args, fun view_cb/2, Acc0),
- Acc1
- end, VAcc1, ArgQueries),
+ VAcc1 = VAcc0#vacc{resp = Resp0},
+ VAcc2 = lists:foldl(
+ fun(Args, Acc0) ->
+ {ok, Acc1} = couch_mrview:query_view(Db, DDoc, ViewName, Args, fun view_cb/2, Acc0),
+ Acc1
+ end,
+ VAcc1,
+ ArgQueries
+ ),
{ok, Resp1} = chttpd:send_delayed_chunk(VAcc2#vacc.resp, "\r\n]}"),
{ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
- {ok, VAcc2#vacc{resp=Resp2}}
+ {ok, VAcc2#vacc{resp = Resp2}}
end),
case is_record(Resp2, vacc) of
true -> {ok, Resp2#vacc.resp};
@@ -304,94 +323,99 @@ multi_query_view(Req, Db, DDoc, ViewName, Queries) ->
end.
filtered_view_cb({row, Row0}, Acc) ->
- Row1 = lists:map(fun({doc, null}) ->
- {doc, null};
- ({doc, Body}) ->
- Doc = couch_users_db:strip_non_public_fields(#doc{body=Body}),
- {doc, Doc#doc.body};
- (KV) ->
- KV
- end, Row0),
+ Row1 = lists:map(
+ fun
+ ({doc, null}) ->
+ {doc, null};
+ ({doc, Body}) ->
+ Doc = couch_users_db:strip_non_public_fields(#doc{body = Body}),
+ {doc, Doc#doc.body};
+ (KV) ->
+ KV
+ end,
+ Row0
+ ),
view_cb({row, Row1}, Acc);
filtered_view_cb(Obj, Acc) ->
view_cb(Obj, Acc).
-
%% these clauses start (and possibly end) the response
-view_cb({error, Reason}, #vacc{resp=undefined}=Acc) ->
+view_cb({error, Reason}, #vacc{resp = undefined} = Acc) ->
{ok, Resp} = chttpd:send_error(Acc#vacc.req, Reason),
- {ok, Acc#vacc{resp=Resp}};
-
-view_cb(complete, #vacc{resp=undefined}=Acc) ->
+ {ok, Acc#vacc{resp = Resp}};
+view_cb(complete, #vacc{resp = undefined} = Acc) ->
% Nothing in view
{ok, Resp} = chttpd:send_json(Acc#vacc.req, 200, {[{rows, []}]}),
- {ok, Acc#vacc{resp=Resp}};
-
-view_cb(Msg, #vacc{resp=undefined}=Acc) ->
+ {ok, Acc#vacc{resp = Resp}};
+view_cb(Msg, #vacc{resp = undefined} = Acc) ->
%% Start response
Headers = [],
{ok, Resp} = chttpd:start_delayed_json_response(Acc#vacc.req, 200, Headers),
- view_cb(Msg, Acc#vacc{resp=Resp, should_close=true});
-
+ view_cb(Msg, Acc#vacc{resp = Resp, should_close = true});
%% ---------------------------------------------------
%% From here on down, the response has been started.
-view_cb({error, Reason}, #vacc{resp=Resp}=Acc) ->
+view_cb({error, Reason}, #vacc{resp = Resp} = Acc) ->
{ok, Resp1} = chttpd:send_delayed_error(Resp, Reason),
- {ok, Acc#vacc{resp=Resp1}};
-
-view_cb(complete, #vacc{resp=Resp, buffer=Buf, threshold=Max}=Acc) ->
+ {ok, Acc#vacc{resp = Resp1}};
+view_cb(complete, #vacc{resp = Resp, buffer = Buf, threshold = Max} = Acc) ->
% Finish view output and possibly end the response
{ok, Resp1} = chttpd:close_delayed_json_object(Resp, Buf, "\r\n]}", Max),
case Acc#vacc.should_close of
true ->
{ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
- {ok, Acc#vacc{resp=Resp2}};
+ {ok, Acc#vacc{resp = Resp2}};
_ ->
- {ok, Acc#vacc{resp=Resp1, meta_sent=false, row_sent=false,
- prepend=",\r\n", buffer=[], bufsize=0}}
+ {ok, Acc#vacc{
+ resp = Resp1,
+ meta_sent = false,
+ row_sent = false,
+ prepend = ",\r\n",
+ buffer = [],
+ bufsize = 0
+ }}
end;
-
-view_cb({meta, Meta}, #vacc{meta_sent=false, row_sent=false}=Acc) ->
+view_cb({meta, Meta}, #vacc{meta_sent = false, row_sent = false} = Acc) ->
% Sending metadata as we've not sent it or any row yet
- Parts = case couch_util:get_value(total, Meta) of
- undefined -> [];
- Total -> [io_lib:format("\"total_rows\":~p", [Total])]
- end ++ case couch_util:get_value(offset, Meta) of
- undefined -> [];
- Offset -> [io_lib:format("\"offset\":~p", [Offset])]
- end ++ case couch_util:get_value(update_seq, Meta) of
- undefined -> [];
- null ->
- ["\"update_seq\":null"];
- UpdateSeq when is_integer(UpdateSeq) ->
- [io_lib:format("\"update_seq\":~B", [UpdateSeq])];
- UpdateSeq when is_binary(UpdateSeq) ->
- [io_lib:format("\"update_seq\":\"~s\"", [UpdateSeq])]
- end ++ ["\"rows\":["],
+ Parts =
+ case couch_util:get_value(total, Meta) of
+ undefined -> [];
+ Total -> [io_lib:format("\"total_rows\":~p", [Total])]
+ end ++
+ case couch_util:get_value(offset, Meta) of
+ undefined -> [];
+ Offset -> [io_lib:format("\"offset\":~p", [Offset])]
+ end ++
+ case couch_util:get_value(update_seq, Meta) of
+ undefined ->
+ [];
+ null ->
+ ["\"update_seq\":null"];
+ UpdateSeq when is_integer(UpdateSeq) ->
+ [io_lib:format("\"update_seq\":~B", [UpdateSeq])];
+ UpdateSeq when is_binary(UpdateSeq) ->
+ [io_lib:format("\"update_seq\":\"~s\"", [UpdateSeq])]
+ end ++ ["\"rows\":["],
Chunk = [prepend_val(Acc), "{", string:join(Parts, ","), "\r\n"],
{ok, AccOut} = maybe_flush_response(Acc, Chunk, iolist_size(Chunk)),
- {ok, AccOut#vacc{prepend="", meta_sent=true}};
-
-view_cb({meta, _Meta}, #vacc{}=Acc) ->
+ {ok, AccOut#vacc{prepend = "", meta_sent = true}};
+view_cb({meta, _Meta}, #vacc{} = Acc) ->
%% ignore metadata
{ok, Acc};
-
-view_cb({row, Row}, #vacc{meta_sent=false}=Acc) ->
+view_cb({row, Row}, #vacc{meta_sent = false} = Acc) ->
%% sorted=false and row arrived before meta
% Adding another row
Chunk = [prepend_val(Acc), "{\"rows\":[\r\n", row_to_json(Row)],
- maybe_flush_response(Acc#vacc{meta_sent=true, row_sent=true}, Chunk, iolist_size(Chunk));
-
-view_cb({row, Row}, #vacc{meta_sent=true}=Acc) ->
+ maybe_flush_response(Acc#vacc{meta_sent = true, row_sent = true}, Chunk, iolist_size(Chunk));
+view_cb({row, Row}, #vacc{meta_sent = true} = Acc) ->
% Adding another row
Chunk = [prepend_val(Acc), row_to_json(Row)],
- maybe_flush_response(Acc#vacc{row_sent=true}, Chunk, iolist_size(Chunk)).
+ maybe_flush_response(Acc#vacc{row_sent = true}, Chunk, iolist_size(Chunk)).
-
-maybe_flush_response(#vacc{bufsize=Size, threshold=Max} = Acc, Data, Len)
- when Size > 0 andalso (Size + Len) > Max ->
+maybe_flush_response(#vacc{bufsize = Size, threshold = Max} = Acc, Data, Len) when
+ Size > 0 andalso (Size + Len) > Max
+->
#vacc{buffer = Buffer, resp = Resp} = Acc,
{ok, R1} = chttpd:send_delayed_chunk(Resp, Buffer),
{ok, Acc#vacc{prepend = ",\r\n", buffer = Data, bufsize = Len, resp = R1}};
@@ -404,7 +428,7 @@ maybe_flush_response(Acc0, Data, Len) ->
},
{ok, Acc}.
-prepend_val(#vacc{prepend=Prepend}) ->
+prepend_val(#vacc{prepend = Prepend}) ->
case Prepend of
undefined ->
"";
@@ -412,72 +436,83 @@ prepend_val(#vacc{prepend=Prepend}) ->
Prepend
end.
-
row_to_json(Row) ->
Id = couch_util:get_value(id, Row),
row_to_json(Id, Row).
-
row_to_json(error, Row) ->
% Special case for _all_docs request with KEYS to
% match prior behavior.
Key = couch_util:get_value(key, Row),
Val = couch_util:get_value(value, Row),
Reason = couch_util:get_value(reason, Row),
- ReasonProp = if Reason == undefined -> []; true ->
- [{reason, Reason}]
- end,
+ ReasonProp =
+ if
+ Reason == undefined -> [];
+ true -> [{reason, Reason}]
+ end,
Obj = {[{key, Key}, {error, Val}] ++ ReasonProp},
?JSON_ENCODE(Obj);
row_to_json(Id0, Row) ->
- Id = case Id0 of
- undefined -> [];
- Id0 -> [{id, Id0}]
- end,
+ Id =
+ case Id0 of
+ undefined -> [];
+ Id0 -> [{id, Id0}]
+ end,
Key = couch_util:get_value(key, Row, null),
Val = couch_util:get_value(value, Row),
- Doc = case couch_util:get_value(doc, Row) of
- undefined -> [];
- Doc0 -> [{doc, Doc0}]
- end,
+ Doc =
+ case couch_util:get_value(doc, Row) of
+ undefined -> [];
+ Doc0 -> [{doc, Doc0}]
+ end,
Obj = {Id ++ [{key, Key}, {value, Val}] ++ Doc},
?JSON_ENCODE(Obj).
-
-parse_params(#httpd{}=Req, Keys) ->
+parse_params(#httpd{} = Req, Keys) ->
parse_params(chttpd:qs(Req), Keys);
parse_params(Props, Keys) ->
Args = #mrargs{},
parse_params(Props, Keys, Args).
-
parse_params(Props, Keys, Args) ->
parse_params(Props, Keys, Args, []).
-parse_params(Props, Keys, #mrargs{}=Args0, Options) ->
+parse_params(Props, Keys, #mrargs{} = Args0, Options) ->
IsDecoded = lists:member(decoded, Options),
- Args1 = case lists:member(keep_group_level, Options) of
- true ->
- Args0;
- _ ->
- % group_level set to undefined to detect if explicitly set by user
- Args0#mrargs{keys=Keys, group=undefined, group_level=undefined}
- end,
- lists:foldl(fun({K, V}, Acc) ->
- parse_param(K, V, Acc, IsDecoded)
- end, Args1, Props).
-
-
-parse_body_and_query(#httpd{method='POST'} = Req, Keys) ->
+ Args1 =
+ case lists:member(keep_group_level, Options) of
+ true ->
+ Args0;
+ _ ->
+ % group_level set to undefined to detect if explicitly set by user
+ Args0#mrargs{keys = Keys, group = undefined, group_level = undefined}
+ end,
+ lists:foldl(
+ fun({K, V}, Acc) ->
+ parse_param(K, V, Acc, IsDecoded)
+ end,
+ Args1,
+ Props
+ ).
+
+parse_body_and_query(#httpd{method = 'POST'} = Req, Keys) ->
Props = chttpd:json_body_obj(Req),
parse_body_and_query(Req, Props, Keys);
-
parse_body_and_query(Req, Keys) ->
- parse_params(chttpd:qs(Req), Keys, #mrargs{keys=Keys, group=undefined,
- group_level=undefined}, [keep_group_level]).
+ parse_params(
+ chttpd:qs(Req),
+ Keys,
+ #mrargs{
+ keys = Keys,
+ group = undefined,
+ group_level = undefined
+ },
+ [keep_group_level]
+ ).
parse_body_and_query(Req, {Props}, Keys) ->
- Args = #mrargs{keys=Keys, group=undefined, group_level=undefined},
+ Args = #mrargs{keys = Keys, group = undefined, group_level = undefined},
BodyArgs = parse_params(Props, Keys, Args, [decoded]),
parse_params(chttpd:qs(Req), Keys, BodyArgs, [keep_group_level]).
@@ -488,101 +523,101 @@ parse_param(Key, Val, Args, IsDecoded) ->
"" ->
Args;
"reduce" ->
- Args#mrargs{reduce=parse_boolean(Val)};
+ Args#mrargs{reduce = parse_boolean(Val)};
"key" when IsDecoded ->
- Args#mrargs{start_key=Val, end_key=Val};
+ Args#mrargs{start_key = Val, end_key = Val};
"key" ->
JsonKey = ?JSON_DECODE(Val),
- Args#mrargs{start_key=JsonKey, end_key=JsonKey};
+ Args#mrargs{start_key = JsonKey, end_key = JsonKey};
"keys" when IsDecoded ->
- Args#mrargs{keys=Val};
+ Args#mrargs{keys = Val};
"keys" ->
- Args#mrargs{keys=?JSON_DECODE(Val)};
+ Args#mrargs{keys = ?JSON_DECODE(Val)};
"startkey" when IsDecoded ->
- Args#mrargs{start_key=Val};
+ Args#mrargs{start_key = Val};
"start_key" when IsDecoded ->
- Args#mrargs{start_key=Val};
+ Args#mrargs{start_key = Val};
"startkey" ->
- Args#mrargs{start_key=?JSON_DECODE(Val)};
+ Args#mrargs{start_key = ?JSON_DECODE(Val)};
"start_key" ->
- Args#mrargs{start_key=?JSON_DECODE(Val)};
+ Args#mrargs{start_key = ?JSON_DECODE(Val)};
"startkey_docid" ->
- Args#mrargs{start_key_docid=couch_util:to_binary(Val)};
+ Args#mrargs{start_key_docid = couch_util:to_binary(Val)};
"start_key_doc_id" ->
- Args#mrargs{start_key_docid=couch_util:to_binary(Val)};
+ Args#mrargs{start_key_docid = couch_util:to_binary(Val)};
"endkey" when IsDecoded ->
- Args#mrargs{end_key=Val};
+ Args#mrargs{end_key = Val};
"end_key" when IsDecoded ->
- Args#mrargs{end_key=Val};
+ Args#mrargs{end_key = Val};
"endkey" ->
- Args#mrargs{end_key=?JSON_DECODE(Val)};
+ Args#mrargs{end_key = ?JSON_DECODE(Val)};
"end_key" ->
- Args#mrargs{end_key=?JSON_DECODE(Val)};
+ Args#mrargs{end_key = ?JSON_DECODE(Val)};
"endkey_docid" ->
- Args#mrargs{end_key_docid=couch_util:to_binary(Val)};
+ Args#mrargs{end_key_docid = couch_util:to_binary(Val)};
"end_key_doc_id" ->
- Args#mrargs{end_key_docid=couch_util:to_binary(Val)};
+ Args#mrargs{end_key_docid = couch_util:to_binary(Val)};
"limit" ->
- Args#mrargs{limit=parse_pos_int(Val)};
+ Args#mrargs{limit = parse_pos_int(Val)};
"stale" when Val == "ok" orelse Val == <<"ok">> ->
- Args#mrargs{stable=true, update=false};
+ Args#mrargs{stable = true, update = false};
"stale" when Val == "update_after" orelse Val == <<"update_after">> ->
- Args#mrargs{stable=true, update=lazy};
+ Args#mrargs{stable = true, update = lazy};
"stale" ->
throw({query_parse_error, <<"Invalid value for `stale`.">>});
"stable" when Val == "true" orelse Val == <<"true">> orelse Val == true ->
- Args#mrargs{stable=true};
+ Args#mrargs{stable = true};
"stable" when Val == "false" orelse Val == <<"false">> orelse Val == false ->
- Args#mrargs{stable=false};
+ Args#mrargs{stable = false};
"stable" ->
throw({query_parse_error, <<"Invalid value for `stable`.">>});
"update" when Val == "true" orelse Val == <<"true">> orelse Val == true ->
- Args#mrargs{update=true};
+ Args#mrargs{update = true};
"update" when Val == "false" orelse Val == <<"false">> orelse Val == false ->
- Args#mrargs{update=false};
+ Args#mrargs{update = false};
"update" when Val == "lazy" orelse Val == <<"lazy">> ->
- Args#mrargs{update=lazy};
+ Args#mrargs{update = lazy};
"update" ->
throw({query_parse_error, <<"Invalid value for `update`.">>});
"descending" ->
case parse_boolean(Val) of
- true -> Args#mrargs{direction=rev};
- _ -> Args#mrargs{direction=fwd}
+ true -> Args#mrargs{direction = rev};
+ _ -> Args#mrargs{direction = fwd}
end;
"skip" ->
- Args#mrargs{skip=parse_pos_int(Val)};
+ Args#mrargs{skip = parse_pos_int(Val)};
"group" ->
- Args#mrargs{group=parse_boolean(Val)};
+ Args#mrargs{group = parse_boolean(Val)};
"group_level" ->
- Args#mrargs{group_level=parse_pos_int(Val)};
+ Args#mrargs{group_level = parse_pos_int(Val)};
"inclusive_end" ->
- Args#mrargs{inclusive_end=parse_boolean(Val)};
+ Args#mrargs{inclusive_end = parse_boolean(Val)};
"include_docs" ->
- Args#mrargs{include_docs=parse_boolean(Val)};
+ Args#mrargs{include_docs = parse_boolean(Val)};
"attachments" ->
case parse_boolean(Val) of
- true ->
- Opts = Args#mrargs.doc_options,
- Args#mrargs{doc_options=[attachments|Opts]};
- false ->
- Args
+ true ->
+ Opts = Args#mrargs.doc_options,
+ Args#mrargs{doc_options = [attachments | Opts]};
+ false ->
+ Args
end;
"att_encoding_info" ->
case parse_boolean(Val) of
- true ->
- Opts = Args#mrargs.doc_options,
- Args#mrargs{doc_options=[att_encoding_info|Opts]};
- false ->
- Args
+ true ->
+ Opts = Args#mrargs.doc_options,
+ Args#mrargs{doc_options = [att_encoding_info | Opts]};
+ false ->
+ Args
end;
"update_seq" ->
- Args#mrargs{update_seq=parse_boolean(Val)};
+ Args#mrargs{update_seq = parse_boolean(Val)};
"conflicts" ->
- Args#mrargs{conflicts=parse_boolean(Val)};
+ Args#mrargs{conflicts = parse_boolean(Val)};
"callback" ->
- Args#mrargs{callback=couch_util:to_binary(Val)};
+ Args#mrargs{callback = couch_util:to_binary(Val)};
"sorted" ->
- Args#mrargs{sorted=parse_boolean(Val)};
+ Args#mrargs{sorted = parse_boolean(Val)};
"partition" ->
Partition = couch_util:to_binary(Val),
couch_partition:validate_partition(Partition),
@@ -590,52 +625,50 @@ parse_param(Key, Val, Args, IsDecoded) ->
_ ->
BKey = couch_util:to_binary(Key),
BVal = couch_util:to_binary(Val),
- Args#mrargs{extra=[{BKey, BVal} | Args#mrargs.extra]}
+ Args#mrargs{extra = [{BKey, BVal} | Args#mrargs.extra]}
end.
-
parse_boolean(true) ->
true;
parse_boolean(false) ->
false;
-
parse_boolean(Val) when is_binary(Val) ->
parse_boolean(?b2l(Val));
-
parse_boolean(Val) ->
case string:to_lower(Val) of
- "true" -> true;
- "false" -> false;
- _ ->
- Msg = io_lib:format("Invalid boolean parameter: ~p", [Val]),
- throw({query_parse_error, ?l2b(Msg)})
+ "true" ->
+ true;
+ "false" ->
+ false;
+ _ ->
+ Msg = io_lib:format("Invalid boolean parameter: ~p", [Val]),
+ throw({query_parse_error, ?l2b(Msg)})
end.
parse_int(Val) when is_integer(Val) ->
Val;
parse_int(Val) ->
case (catch list_to_integer(Val)) of
- IntVal when is_integer(IntVal) ->
- IntVal;
- _ ->
- Msg = io_lib:format("Invalid value for integer: ~p", [Val]),
- throw({query_parse_error, ?l2b(Msg)})
+ IntVal when is_integer(IntVal) ->
+ IntVal;
+ _ ->
+ Msg = io_lib:format("Invalid value for integer: ~p", [Val]),
+ throw({query_parse_error, ?l2b(Msg)})
end.
parse_pos_int(Val) ->
case parse_int(Val) of
- IntVal when IntVal >= 0 ->
- IntVal;
- _ ->
- Fmt = "Invalid value for positive integer: ~p",
- Msg = io_lib:format(Fmt, [Val]),
- throw({query_parse_error, ?l2b(Msg)})
+ IntVal when IntVal >= 0 ->
+ IntVal;
+ _ ->
+ Fmt = "Invalid value for positive integer: ~p",
+ Msg = io_lib:format(Fmt, [Val]),
+ throw({query_parse_error, ?l2b(Msg)})
end.
-
check_view_etag(Sig, Acc0, Req) ->
ETag = chttpd:make_etag(Sig),
case chttpd:etag_match(Req, ETag) of
true -> throw({etag_match, ETag});
- false -> {ok, Acc0#vacc{etag=ETag}}
+ false -> {ok, Acc0#vacc{etag = ETag}}
end.
diff --git a/src/couch_mrview/src/couch_mrview_index.erl b/src/couch_mrview/src/couch_mrview_index.erl
index 68f1d2322..a024d35c8 100644
--- a/src/couch_mrview/src/couch_mrview_index.erl
+++ b/src/couch_mrview/src/couch_mrview_index.erl
@@ -12,7 +12,6 @@
-module(couch_mrview_index).
-
-export([get/2]).
-export([init/2, open/2, close/1, reset/1, delete/1, shutdown/1]).
-export([start_update/4, purge/4, process_doc/3, finish_update/1, commit/1]).
@@ -24,7 +23,6 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-
get(db_name, #mrst{db_name = DbName}) ->
DbName;
get(idx_name, #mrst{idx_name = IdxName}) ->
@@ -39,9 +37,18 @@ get(update_options, #mrst{design_opts = Opts}) ->
IncDesign = couch_util:get_value(<<"include_design">>, Opts, false),
LocalSeq = couch_util:get_value(<<"local_seq">>, Opts, false),
Partitioned = couch_util:get_value(<<"partitioned">>, Opts, false),
- if IncDesign -> [include_design]; true -> [] end
- ++ if LocalSeq -> [local_seq]; true -> [] end
- ++ if Partitioned -> [partitioned]; true -> [] end;
+ if
+ IncDesign -> [include_design];
+ true -> []
+ end ++
+ if
+ LocalSeq -> [local_seq];
+ true -> []
+ end ++
+ if
+ Partitioned -> [partitioned];
+ true -> []
+ end;
get(fd, #mrst{fd = Fd}) ->
Fd;
get(language, #mrst{language = Language}) ->
@@ -69,11 +76,12 @@ get(info, State) ->
{ok, [
{signature, list_to_binary(couch_index_util:hexsig(Sig))},
{language, Lang},
- {sizes, {[
- {file, FileSize},
- {active, ActiveSize},
- {external, ExternalSize}
- ]}},
+ {sizes,
+ {[
+ {file, FileSize},
+ {active, ActiveSize},
+ {external, ExternalSize}
+ ]}},
{update_seq, UpdateSeq},
{purge_seq, PurgeSeq},
{update_options, UpdateOptions}
@@ -81,16 +89,14 @@ get(info, State) ->
get(Other, _) ->
throw({unknown_index_property, Other}).
-
init(Db, DDoc) ->
{ok, State} = couch_mrview_util:ddoc_to_mrst(couch_db:name(Db), DDoc),
{ok, set_partitioned(Db, State)}.
-
open(Db, State0) ->
#mrst{
- db_name=DbName,
- sig=Sig
+ db_name = DbName,
+ sig = Sig
} = State = set_partitioned(Db, State0),
IndexFName = couch_mrview_util:index_file(DbName, Sig),
@@ -128,14 +134,18 @@ open(Db, State0) ->
ensure_local_purge_doc(Db, NewSt),
{ok, NewSt};
{ok, {WrongSig, _}} ->
- couch_log:error("~s has the wrong signature: expected: ~p but got ~p",
- [IndexFName, Sig, WrongSig]),
+ couch_log:error(
+ "~s has the wrong signature: expected: ~p but got ~p",
+ [IndexFName, Sig, WrongSig]
+ ),
NewSt = couch_mrview_util:reset_index(Db, Fd, State),
ensure_local_purge_doc(Db, NewSt),
{ok, NewSt};
{ok, Else} ->
- couch_log:error("~s has a bad header: got ~p",
- [IndexFName, Else]),
+ couch_log:error(
+ "~s has a bad header: got ~p",
+ [IndexFName, Else]
+ ),
NewSt = couch_mrview_util:reset_index(Db, Fd, State),
ensure_local_purge_doc(Db, NewSt),
{ok, NewSt};
@@ -145,17 +155,17 @@ open(Db, State0) ->
{ok, NewSt}
end;
{error, Reason} = Error ->
- couch_log:error("Failed to open view file '~s': ~s",
- [IndexFName, file:format_error(Reason)]),
+ couch_log:error(
+ "Failed to open view file '~s': ~s",
+ [IndexFName, file:format_error(Reason)]
+ ),
Error
end.
-
close(State) ->
erlang:demonitor(State#mrst.fd_monitor, [flush]),
couch_file:close(State#mrst.fd).
-
% This called after ddoc_updated event occurrs, and
% before we shutdown couch_index process.
% We unlink couch_index from corresponding couch_file and demonitor it.
@@ -167,19 +177,16 @@ shutdown(State) ->
erlang:demonitor(State#mrst.fd_monitor, [flush]),
unlink(State#mrst.fd).
-
-delete(#mrst{db_name=DbName, sig=Sig}=State) ->
+delete(#mrst{db_name = DbName, sig = Sig} = State) ->
couch_file:close(State#mrst.fd),
catch couch_mrview_util:delete_files(DbName, Sig).
-
reset(State) ->
couch_util:with_db(State#mrst.db_name, fun(Db) ->
NewState = couch_mrview_util:reset_index(Db, State#mrst.fd, State),
{ok, NewState}
end).
-
start_update(PartialDest, State, NumChanges, NumChangesDone) ->
couch_mrview_updater:start_update(
PartialDest,
@@ -188,94 +195,93 @@ start_update(PartialDest, State, NumChanges, NumChangesDone) ->
NumChangesDone
).
-
purge(Db, PurgeSeq, PurgedIdRevs, State) ->
couch_mrview_updater:purge(Db, PurgeSeq, PurgedIdRevs, State).
-
process_doc(Doc, Seq, State) ->
couch_mrview_updater:process_doc(Doc, Seq, State).
-
finish_update(State) ->
couch_mrview_updater:finish_update(State).
-
commit(State) ->
Header = {State#mrst.sig, couch_mrview_util:make_header(State)},
couch_file:write_header(State#mrst.fd, Header).
-
compact(Db, State, Opts) ->
couch_mrview_compactor:compact(Db, State, Opts).
-
swap_compacted(OldState, NewState) ->
couch_mrview_compactor:swap_compacted(OldState, NewState).
-
remove_compacted(State) ->
couch_mrview_compactor:remove_compacted(State).
-
index_file_exists(State) ->
#mrst{
- db_name=DbName,
- sig=Sig
+ db_name = DbName,
+ sig = Sig
} = State,
IndexFName = couch_mrview_util:index_file(DbName, Sig),
filelib:is_file(IndexFName).
-
verify_index_exists(DbName, Props) ->
try
Type = couch_util:get_value(<<"type">>, Props),
- if Type =/= <<"mrview">> -> false; true ->
- DDocId = couch_util:get_value(<<"ddoc_id">>, Props),
- couch_util:with_db(DbName, fun(Db) ->
- case couch_db:get_design_doc(Db, DDocId) of
- {ok, #doc{} = DDoc} ->
- {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(
- DbName, DDoc),
- IdxSig = IdxState#mrst.sig,
- SigInLocal = couch_util:get_value(
- <<"signature">>, Props),
- couch_index_util:hexsig(IdxSig) == SigInLocal;
- {not_found, _} ->
- false
- end
- end)
+ if
+ Type =/= <<"mrview">> ->
+ false;
+ true ->
+ DDocId = couch_util:get_value(<<"ddoc_id">>, Props),
+ couch_util:with_db(DbName, fun(Db) ->
+ case couch_db:get_design_doc(Db, DDocId) of
+ {ok, #doc{} = DDoc} ->
+ {ok, IdxState} = couch_mrview_util:ddoc_to_mrst(
+ DbName, DDoc
+ ),
+ IdxSig = IdxState#mrst.sig,
+ SigInLocal = couch_util:get_value(
+ <<"signature">>, Props
+ ),
+ couch_index_util:hexsig(IdxSig) == SigInLocal;
+ {not_found, _} ->
+ false
+ end
+ end)
end
- catch _:_ ->
- false
+ catch
+ _:_ ->
+ false
end.
-
set_partitioned(Db, State) ->
#mrst{
design_opts = DesignOpts
} = State,
DbPartitioned = couch_db:is_partitioned(Db),
ViewPartitioned = couch_util:get_value(
- <<"partitioned">>, DesignOpts, DbPartitioned),
+ <<"partitioned">>, DesignOpts, DbPartitioned
+ ),
IsPartitioned = DbPartitioned andalso ViewPartitioned,
State#mrst{partitioned = IsPartitioned}.
-
ensure_local_purge_docs(DbName, DDocs) ->
couch_util:with_db(DbName, fun(Db) ->
- lists:foreach(fun(DDoc) ->
- try couch_mrview_util:ddoc_to_mrst(DbName, DDoc) of
- {ok, MRSt} ->
- ensure_local_purge_doc(Db, MRSt)
- catch _:_ ->
- ok
- end
- end, DDocs)
+ lists:foreach(
+ fun(DDoc) ->
+ try couch_mrview_util:ddoc_to_mrst(DbName, DDoc) of
+ {ok, MRSt} ->
+ ensure_local_purge_doc(Db, MRSt)
+ catch
+ _:_ ->
+ ok
+ end
+ end,
+ DDocs
+ )
end).
-
-ensure_local_purge_doc(Db, #mrst{}=State) ->
+ensure_local_purge_doc(Db, #mrst{} = State) ->
Sig = couch_index_util:hexsig(get(signature, State)),
DocId = couch_mrview_util:get_local_purge_doc_id(Sig),
case couch_db:open_doc(Db, DocId, []) of
@@ -285,33 +291,33 @@ ensure_local_purge_doc(Db, #mrst{}=State) ->
ok
end.
-
create_local_purge_doc(Db, State) ->
PurgeSeq = couch_db:get_purge_seq(Db),
update_local_purge_doc(Db, State, PurgeSeq).
-
update_local_purge_doc(Db, State) ->
update_local_purge_doc(Db, State, get(purge_seq, State)).
-
update_local_purge_doc(Db, State, PSeq) ->
Sig = couch_index_util:hexsig(State#mrst.sig),
DocId = couch_mrview_util:get_local_purge_doc_id(Sig),
{Mega, Secs, _} = os:timestamp(),
NowSecs = Mega * 1000000 + Secs,
- BaseDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DocId},
- {<<"type">>, <<"mrview">>},
- {<<"purge_seq">>, PSeq},
- {<<"updated_on">>, NowSecs},
- {<<"ddoc_id">>, get(idx_name, State)},
- {<<"signature">>, Sig}
- ]}),
- Doc = case couch_db:open_doc(Db, DocId, []) of
- {ok, #doc{revs = Revs}} ->
- BaseDoc#doc{revs = Revs};
- {not_found, _} ->
- BaseDoc
- end,
+ BaseDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DocId},
+ {<<"type">>, <<"mrview">>},
+ {<<"purge_seq">>, PSeq},
+ {<<"updated_on">>, NowSecs},
+ {<<"ddoc_id">>, get(idx_name, State)},
+ {<<"signature">>, Sig}
+ ]}
+ ),
+ Doc =
+ case couch_db:open_doc(Db, DocId, []) of
+ {ok, #doc{revs = Revs}} ->
+ BaseDoc#doc{revs = Revs};
+ {not_found, _} ->
+ BaseDoc
+ end,
couch_db:update_doc(Db, Doc, []).
diff --git a/src/couch_mrview/src/couch_mrview_show.erl b/src/couch_mrview/src/couch_mrview_show.erl
index 0268b706e..3e95be9cc 100644
--- a/src/couch_mrview/src/couch_mrview_show.erl
+++ b/src/couch_mrview/src/couch_mrview_show.erl
@@ -33,23 +33,28 @@ maybe_open_doc(Db, DocId) ->
{not_found, _} -> nil
end.
-handle_doc_show_req(#httpd{
- path_parts=[_, _, _, _, ShowName, DocId]
- }=Req, Db, DDoc) ->
-
+handle_doc_show_req(
+ #httpd{
+ path_parts = [_, _, _, _, ShowName, DocId]
+ } = Req,
+ Db,
+ DDoc
+) ->
% open the doc
Doc = maybe_open_doc(Db, DocId),
% we don't handle revs here b/c they are an internal api
% returns 404 if there is no doc with DocId
handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId);
-
-handle_doc_show_req(#httpd{
- path_parts=[_, _, _, _, ShowName, DocId|Rest]
- }=Req, Db, DDoc) ->
-
- DocParts = [DocId|Rest],
- DocId1 = ?l2b(string:join([?b2l(P)|| P <- DocParts], "/")),
+handle_doc_show_req(
+ #httpd{
+ path_parts = [_, _, _, _, ShowName, DocId | Rest]
+ } = Req,
+ Db,
+ DDoc
+) ->
+ DocParts = [DocId | Rest],
+ DocId1 = ?l2b(string:join([?b2l(P) || P <- DocParts], "/")),
% open the doc
Doc = maybe_open_doc(Db, DocId1),
@@ -57,13 +62,15 @@ handle_doc_show_req(#httpd{
% we don't handle revs here b/c they are an internal api
% pass 404 docs to the show function
handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId1);
-
-handle_doc_show_req(#httpd{
- path_parts=[_, _, _, _, ShowName]
- }=Req, Db, DDoc) ->
+handle_doc_show_req(
+ #httpd{
+ path_parts = [_, _, _, _, ShowName]
+ } = Req,
+ Db,
+ DDoc
+) ->
% with no docid the doc is nil
handle_doc_show(Req, Db, DDoc, ShowName, nil);
-
handle_doc_show_req(Req, _Db, _DDoc) ->
chttpd:send_error(Req, 404, <<"show_error">>, <<"Invalid path.">>).
@@ -77,21 +84,29 @@ handle_doc_show(Req, Db, DDoc, ShowName, Doc, DocId) ->
JsonReq = chttpd_external:json_req_obj(Req, Db, DocId),
JsonDoc = couch_query_servers:json_doc(Doc),
[<<"resp">>, ExternalResp] =
- couch_query_servers:ddoc_prompt(DDoc, [<<"shows">>, ShowName],
- [JsonDoc, JsonReq]),
+ couch_query_servers:ddoc_prompt(
+ DDoc,
+ [<<"shows">>, ShowName],
+ [JsonDoc, JsonReq]
+ ),
JsonResp = apply_etag(ExternalResp, CurrentEtag),
chttpd_external:send_external_response(Req, JsonResp)
end).
-
-show_etag(#httpd{user_ctx=UserCtx}=Req, Doc, DDoc, More) ->
+show_etag(#httpd{user_ctx = UserCtx} = Req, Doc, DDoc, More) ->
Accept = chttpd:header_value(Req, "Accept"),
- DocPart = case Doc of
- nil -> nil;
- Doc -> chttpd:doc_etag(Doc)
- end,
- chttpd:make_etag({chttpd:doc_etag(DDoc), DocPart, Accept,
- {UserCtx#user_ctx.name, UserCtx#user_ctx.roles}, More}).
+ DocPart =
+ case Doc of
+ nil -> nil;
+ Doc -> chttpd:doc_etag(Doc)
+ end,
+ chttpd:make_etag({
+ chttpd:doc_etag(DDoc),
+ DocPart,
+ Accept,
+ {UserCtx#user_ctx.name, UserCtx#user_ctx.roles},
+ More
+ }).
% updates a doc based on a request
% handle_doc_update_req(#httpd{method = 'GET'}=Req, _Db, _DDoc) ->
@@ -101,20 +116,25 @@ show_etag(#httpd{user_ctx=UserCtx}=Req, Doc, DDoc, More) ->
% This call is creating a new doc using an _update function to
% modify the provided request body.
% /db/_design/foo/_update/bar
-handle_doc_update_req(#httpd{
- path_parts=[_, _, _, _, UpdateName]
- }=Req, Db, DDoc) ->
+handle_doc_update_req(
+ #httpd{
+ path_parts = [_, _, _, _, UpdateName]
+ } = Req,
+ Db,
+ DDoc
+) ->
send_doc_update_response(Req, Db, DDoc, UpdateName, nil, null);
-
% /db/_design/foo/_update/bar/docid
-handle_doc_update_req(#httpd{
- path_parts=[_, _, _, _, UpdateName | DocIdParts]
- }=Req, Db, DDoc) ->
+handle_doc_update_req(
+ #httpd{
+ path_parts = [_, _, _, _, UpdateName | DocIdParts]
+ } = Req,
+ Db,
+ DDoc
+) ->
DocId = ?l2b(string:join([?b2l(P) || P <- DocIdParts], "/")),
Doc = maybe_open_doc(Db, DocId),
send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId);
-
-
handle_doc_update_req(Req, _Db, _DDoc) ->
chttpd:send_error(Req, 404, <<"update_error">>, <<"Invalid path.">>).
@@ -123,32 +143,36 @@ send_doc_update_response(Req, Db, DDoc, UpdateName, Doc, DocId) ->
JsonDoc = couch_query_servers:json_doc(Doc),
Cmd = [<<"updates">>, UpdateName],
UpdateResp = couch_query_servers:ddoc_prompt(DDoc, Cmd, [JsonDoc, JsonReq]),
- JsonResp = case UpdateResp of
- [<<"up">>, {NewJsonDoc}, {JsonResp0}] ->
- case chttpd:header_value(
- Req, "X-Couch-Full-Commit", "false") of
- "true" ->
- Options = [full_commit, {user_ctx, Req#httpd.user_ctx}];
- _ ->
- Options = [{user_ctx, Req#httpd.user_ctx}]
- end,
- NewDoc = couch_db:doc_from_json_obj_validate(Db, {NewJsonDoc}),
- {ok, NewRev} = couch_db:update_doc(Db, NewDoc, Options),
- NewRevStr = couch_doc:rev_to_str(NewRev),
- {JsonResp1} = apply_headers(JsonResp0, [
- {<<"X-Couch-Update-NewRev">>, NewRevStr},
- {<<"X-Couch-Id">>, couch_util:url_encode(NewDoc#doc.id)}
- ]),
- {[{<<"code">>, 201} | JsonResp1]};
- [<<"up">>, _Other, {JsonResp0}] ->
- {[{<<"code">>, 200} | JsonResp0]}
- end,
+ JsonResp =
+ case UpdateResp of
+ [<<"up">>, {NewJsonDoc}, {JsonResp0}] ->
+ case
+ chttpd:header_value(
+ Req, "X-Couch-Full-Commit", "false"
+ )
+ of
+ "true" ->
+ Options = [full_commit, {user_ctx, Req#httpd.user_ctx}];
+ _ ->
+ Options = [{user_ctx, Req#httpd.user_ctx}]
+ end,
+ NewDoc = couch_db:doc_from_json_obj_validate(Db, {NewJsonDoc}),
+ {ok, NewRev} = couch_db:update_doc(Db, NewDoc, Options),
+ NewRevStr = couch_doc:rev_to_str(NewRev),
+ {JsonResp1} = apply_headers(JsonResp0, [
+ {<<"X-Couch-Update-NewRev">>, NewRevStr},
+ {<<"X-Couch-Id">>, couch_util:url_encode(NewDoc#doc.id)}
+ ]),
+ {[{<<"code">>, 201} | JsonResp1]};
+ [<<"up">>, _Other, {JsonResp0}] ->
+ {[{<<"code">>, 200} | JsonResp0]}
+ end,
% todo set location field
chttpd_external:send_external_response(Req, JsonResp).
-
-handle_view_list_req(#httpd{method=Method}=Req, Db, DDoc)
- when Method =:= 'GET' orelse Method =:= 'OPTIONS' ->
+handle_view_list_req(#httpd{method = Method} = Req, Db, DDoc) when
+ Method =:= 'GET' orelse Method =:= 'OPTIONS'
+->
case Req#httpd.path_parts of
[_, _, _DName, _, LName, VName] ->
% Same design doc for view and list
@@ -161,7 +185,7 @@ handle_view_list_req(#httpd{method=Method}=Req, Db, DDoc)
_ ->
chttpd:send_error(Req, 404, <<"list_error">>, <<"Bad path.">>)
end;
-handle_view_list_req(#httpd{method='POST'}=Req, Db, DDoc) ->
+handle_view_list_req(#httpd{method = 'POST'} = Req, Db, DDoc) ->
chttpd:validate_ctype(Req, "application/json"),
{Props} = chttpd:json_body_obj(Req),
Keys = proplists:get_value(<<"keys">>, Props),
@@ -179,7 +203,6 @@ handle_view_list_req(#httpd{method='POST'}=Req, Db, DDoc) ->
handle_view_list_req(Req, _Db, _DDoc) ->
chttpd:send_method_not_allowed(Req, "GET,POST,HEAD").
-
handle_view_list(Req, Db, DDoc, LName, VDDoc, VName, Keys) ->
Args0 = couch_mrview_http:parse_body_and_query(Req, Keys),
ETagFun = fun(BaseSig, Acc0) ->
@@ -191,67 +214,73 @@ handle_view_list(Req, Db, DDoc, LName, VDDoc, VName, Keys) ->
ETag = chttpd:make_etag({BaseSig, Parts}),
case chttpd:etag_match(Req, ETag) of
true -> throw({etag_match, ETag});
- false -> {ok, Acc0#lacc{etag=ETag}}
+ false -> {ok, Acc0#lacc{etag = ETag}}
end
end,
- Args = Args0#mrargs{preflight_fun=ETagFun},
+ Args = Args0#mrargs{preflight_fun = ETagFun},
couch_httpd:etag_maybe(Req, fun() ->
couch_query_servers:with_ddoc_proc(DDoc, fun(QServer) ->
- Acc = #lacc{db=Db, req=Req, qserver=QServer, lname=LName},
+ Acc = #lacc{db = Db, req = Req, qserver = QServer, lname = LName},
case VName of
- <<"_all_docs">> ->
- couch_mrview:query_all_docs(Db, Args, fun list_cb/2, Acc);
- _ ->
- couch_mrview:query_view(Db, VDDoc, VName, Args, fun list_cb/2, Acc)
+ <<"_all_docs">> ->
+ couch_mrview:query_all_docs(Db, Args, fun list_cb/2, Acc);
+ _ ->
+ couch_mrview:query_view(Db, VDDoc, VName, Args, fun list_cb/2, Acc)
end
end)
end).
-
-list_cb({meta, Meta}, #lacc{code=undefined} = Acc) ->
- MetaProps = case couch_util:get_value(total, Meta) of
- undefined -> [];
- Total -> [{total_rows, Total}]
- end ++ case couch_util:get_value(offset, Meta) of
- undefined -> [];
- Offset -> [{offset, Offset}]
- end ++ case couch_util:get_value(update_seq, Meta) of
- undefined -> [];
- UpdateSeq -> [{update_seq, UpdateSeq}]
- end,
+list_cb({meta, Meta}, #lacc{code = undefined} = Acc) ->
+ MetaProps =
+ case couch_util:get_value(total, Meta) of
+ undefined -> [];
+ Total -> [{total_rows, Total}]
+ end ++
+ case couch_util:get_value(offset, Meta) of
+ undefined -> [];
+ Offset -> [{offset, Offset}]
+ end ++
+ case couch_util:get_value(update_seq, Meta) of
+ undefined -> [];
+ UpdateSeq -> [{update_seq, UpdateSeq}]
+ end,
start_list_resp({MetaProps}, Acc);
-list_cb({row, Row}, #lacc{code=undefined} = Acc) ->
+list_cb({row, Row}, #lacc{code = undefined} = Acc) ->
{ok, NewAcc} = start_list_resp({[]}, Acc),
send_list_row(Row, NewAcc);
list_cb({row, Row}, Acc) ->
send_list_row(Row, Acc);
list_cb(complete, Acc) ->
#lacc{qserver = {Proc, _}, req = Req, resp = Resp0} = Acc,
- if Resp0 =:= nil ->
- {ok, #lacc{resp = Resp}} = start_list_resp({[]}, Acc);
- true ->
- Resp = Resp0
+ if
+ Resp0 =:= nil ->
+ {ok, #lacc{resp = Resp}} = start_list_resp({[]}, Acc);
+ true ->
+ Resp = Resp0
end,
case couch_query_servers:proc_prompt(Proc, [<<"list_end">>]) of
[<<"end">>, Data, Headers] ->
- Acc2 = fixup_headers(Headers, Acc#lacc{resp=Resp}),
+ Acc2 = fixup_headers(Headers, Acc#lacc{resp = Resp}),
#lacc{resp = Resp2} = send_non_empty_chunk(Acc2, Data);
[<<"end">>, Data] ->
- #lacc{resp = Resp2} = send_non_empty_chunk(Acc#lacc{resp=Resp}, Data)
+ #lacc{resp = Resp2} = send_non_empty_chunk(Acc#lacc{resp = Resp}, Data)
end,
last_chunk(Req, Resp2),
{ok, Resp2}.
start_list_resp(Head, Acc) ->
- #lacc{db=Db, req=Req, qserver=QServer, lname=LName} = Acc,
+ #lacc{db = Db, req = Req, qserver = QServer, lname = LName} = Acc,
JsonReq = json_req_obj(Req, Db),
- [<<"start">>,Chunk,JsonResp] = couch_query_servers:ddoc_proc_prompt(QServer,
- [<<"lists">>, LName], [Head, JsonReq]),
+ [<<"start">>, Chunk, JsonResp] = couch_query_servers:ddoc_proc_prompt(
+ QServer,
+ [<<"lists">>, LName],
+ [Head, JsonReq]
+ ),
Acc2 = send_non_empty_chunk(fixup_headers(JsonResp, Acc), Chunk),
{ok, Acc2}.
-fixup_headers(Headers, #lacc{etag=ETag} = Acc) ->
+fixup_headers(Headers, #lacc{etag = ETag} = Acc) ->
Headers2 = apply_etag(Headers, ETag),
#extern_resp_args{
code = Code,
@@ -260,61 +289,66 @@ fixup_headers(Headers, #lacc{etag=ETag} = Acc) ->
} = chttpd_external:parse_external_response(Headers2),
Headers3 = chttpd_external:default_or_content_type(CType, ExtHeaders),
Headers4 = chttpd_util:maybe_add_csp_header("showlist", Headers3, "sandbox"),
- Acc#lacc{code=Code, headers=Headers4}.
+ Acc#lacc{code = Code, headers = Headers4}.
send_list_row(Row, #lacc{qserver = {Proc, _}, req = Req, resp = Resp} = Acc) ->
- RowObj = case couch_util:get_value(id, Row) of
- undefined -> [];
- Id -> [{id, Id}]
- end ++ case couch_util:get_value(key, Row) of
- undefined -> [];
- Key -> [{key, Key}]
- end ++ case couch_util:get_value(value, Row) of
- undefined -> [];
- Val -> [{value, Val}]
- end ++ case couch_util:get_value(doc, Row) of
- undefined -> [];
- Doc -> [{doc, Doc}]
- end,
+ RowObj =
+ case couch_util:get_value(id, Row) of
+ undefined -> [];
+ Id -> [{id, Id}]
+ end ++
+ case couch_util:get_value(key, Row) of
+ undefined -> [];
+ Key -> [{key, Key}]
+ end ++
+ case couch_util:get_value(value, Row) of
+ undefined -> [];
+ Val -> [{value, Val}]
+ end ++
+ case couch_util:get_value(doc, Row) of
+ undefined -> [];
+ Doc -> [{doc, Doc}]
+ end,
try couch_query_servers:proc_prompt(Proc, [<<"list_row">>, {RowObj}]) of
- [<<"chunks">>, Chunk, Headers] ->
- Acc2 = send_non_empty_chunk(fixup_headers(Headers, Acc), Chunk),
- {ok, Acc2};
- [<<"chunks">>, Chunk] ->
- Acc2 = send_non_empty_chunk(Acc, Chunk),
- {ok, Acc2};
- [<<"end">>, Chunk, Headers] ->
- #lacc{resp = Resp2} = send_non_empty_chunk(fixup_headers(Headers, Acc), Chunk),
- {ok, Resp3} = last_chunk(Req, Resp2),
- {stop, Resp3};
- [<<"end">>, Chunk] ->
- #lacc{resp = Resp2} = send_non_empty_chunk(Acc, Chunk),
- {ok, Resp3} = last_chunk(Req, Resp2),
- {stop, Resp3}
- catch Error ->
- {ok, Resp2} = case Resp of
- undefined ->
- {Code, _, _} = chttpd:error_info(Error),
- #lacc{req=Req, headers=Headers} = Acc,
- chttpd:start_chunked_response(Req, Code, Headers);
- _ ->
- {ok, Resp}
- end,
- {ok, Resp3} = chttpd:send_chunked_error(Resp2, Error),
- {stop, Resp3}
+ [<<"chunks">>, Chunk, Headers] ->
+ Acc2 = send_non_empty_chunk(fixup_headers(Headers, Acc), Chunk),
+ {ok, Acc2};
+ [<<"chunks">>, Chunk] ->
+ Acc2 = send_non_empty_chunk(Acc, Chunk),
+ {ok, Acc2};
+ [<<"end">>, Chunk, Headers] ->
+ #lacc{resp = Resp2} = send_non_empty_chunk(fixup_headers(Headers, Acc), Chunk),
+ {ok, Resp3} = last_chunk(Req, Resp2),
+ {stop, Resp3};
+ [<<"end">>, Chunk] ->
+ #lacc{resp = Resp2} = send_non_empty_chunk(Acc, Chunk),
+ {ok, Resp3} = last_chunk(Req, Resp2),
+ {stop, Resp3}
+ catch
+ Error ->
+ {ok, Resp2} =
+ case Resp of
+ undefined ->
+ {Code, _, _} = chttpd:error_info(Error),
+ #lacc{req = Req, headers = Headers} = Acc,
+ chttpd:start_chunked_response(Req, Code, Headers);
+ _ ->
+ {ok, Resp}
+ end,
+ {ok, Resp3} = chttpd:send_chunked_error(Resp2, Error),
+ {stop, Resp3}
end.
send_non_empty_chunk(Acc, []) ->
Acc;
-send_non_empty_chunk(#lacc{resp=undefined} = Acc, Chunk) ->
- #lacc{req=Req, code=Code, headers=Headers} = Acc,
+send_non_empty_chunk(#lacc{resp = undefined} = Acc, Chunk) ->
+ #lacc{req = Req, code = Code, headers = Headers} = Acc,
{ok, Resp} = chttpd:start_chunked_response(Req, Code, Headers),
send_non_empty_chunk(Acc#lacc{resp = Resp}, Chunk);
-send_non_empty_chunk(#lacc{resp=Resp} = Acc, Chunk) ->
+send_non_empty_chunk(#lacc{resp = Resp} = Acc, Chunk) ->
chttpd:send_chunk(Resp, Chunk),
Acc.
-
apply_etag(JsonResp, undefined) ->
JsonResp;
apply_etag({ExternalResponse}, CurrentEtag) ->
@@ -332,7 +366,7 @@ apply_headers(JsonResp, []) ->
apply_headers(JsonResp, NewHeaders) ->
case couch_util:get_value(<<"headers">>, JsonResp) of
undefined ->
- {[{<<"headers">>, {NewHeaders}}| JsonResp]};
+ {[{<<"headers">>, {NewHeaders}} | JsonResp]};
JsonHeaders ->
Headers = apply_headers1(JsonHeaders, NewHeaders),
NewKV = {<<"headers">>, Headers},
@@ -344,13 +378,11 @@ apply_headers1(JsonHeaders, [{Key, Value} | Rest]) ->
apply_headers1(JsonHeaders, []) ->
JsonHeaders.
-
% Maybe this is in the proplists API
% todo move to couch_util
json_apply_field(H, {L}) ->
json_apply_field(H, L, []).
-
json_apply_field({Key, NewValue}, [{Key, _OldVal} | Headers], Acc) ->
% drop matching keys
json_apply_field({Key, NewValue}, Headers, Acc);
@@ -359,8 +391,7 @@ json_apply_field({Key, NewValue}, [{OtherKey, OtherVal} | Headers], Acc) ->
json_apply_field({Key, NewValue}, Headers, [{OtherKey, OtherVal} | Acc]);
json_apply_field({Key, NewValue}, [], Acc) ->
% end of list, add ours
- {[{Key, NewValue}|Acc]}.
-
+ {[{Key, NewValue} | Acc]}.
% This loads the db info if we have a fully loaded db record, but we might not
% have the db locally on this node, so then load the info through fabric.
@@ -371,7 +402,9 @@ json_req_obj(Req, Db) ->
% and json_req_obj calls fabric:get_db_info()
JRO = fun() -> exit(chttpd_external:json_req_obj(Req, Db)) end,
{Pid, Ref} = spawn_monitor(JRO),
- receive {'DOWN', Ref, process, Pid, JsonReq} -> JsonReq end;
+ receive
+ {'DOWN', Ref, process, Pid, JsonReq} -> JsonReq
+ end;
false ->
chttpd_external:json_req_obj(Req, Db)
end.
@@ -381,7 +414,6 @@ last_chunk(Req, undefined) ->
last_chunk(_Req, Resp) ->
chttpd:send_chunk(Resp, []).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -395,7 +427,7 @@ apply_headers_test_() ->
should_apply_headers() ->
?_test(begin
JsonResp = [{<<"code">>, 201}],
- Headers = [{<<"foo">>, <<"bar">>}],
+ Headers = [{<<"foo">>, <<"bar">>}],
{Props} = apply_headers(JsonResp, Headers),
JsonHeaders = couch_util:get_value(<<"headers">>, Props),
?assertEqual({Headers}, JsonHeaders)
@@ -404,7 +436,7 @@ should_apply_headers() ->
should_apply_headers_with_merge() ->
?_test(begin
BaseHeaders = [{<<"bar">>, <<"baz">>}],
- NewHeaders = [{<<"foo">>, <<"bar">>}],
+ NewHeaders = [{<<"foo">>, <<"bar">>}],
JsonResp = [
{<<"code">>, 201},
{<<"headers">>, {BaseHeaders}}
@@ -418,7 +450,7 @@ should_apply_headers_with_merge() ->
should_apply_headers_with_merge_overwrite() ->
?_test(begin
BaseHeaders = [{<<"foo">>, <<"bar">>}],
- NewHeaders = [{<<"foo">>, <<"baz">>}],
+ NewHeaders = [{<<"foo">>, <<"baz">>}],
JsonResp = [
{<<"code">>, 201},
{<<"headers">>, {BaseHeaders}}
@@ -428,7 +460,6 @@ should_apply_headers_with_merge_overwrite() ->
?assertEqual({NewHeaders}, JsonHeaders)
end).
-
send_list_row_test_() ->
Cases = couch_tests_combinatorics:product([
[
@@ -439,26 +470,40 @@ send_list_row_test_() ->
[
req,
undefined
- ]]),
+ ]
+ ]),
{"Ensure send_list_row returns a valid response on end or error",
{setup, fun setup/0, fun(_) -> meck:unload() end, [
{
lists:flatten(io_lib:format("~s -- ~p", [N, R])),
should_return_valid_response(F, R)
- } || [{N, F}, R] <- Cases
- ]}
- }.
+ }
+ || [{N, F}, R] <- Cases
+ ]}}.
setup() ->
ok = application:start(config, permanent),
- ok = meck:expect(chttpd, send_chunk,
- fun(Resp, _) -> {ok, Resp} end),
- ok = meck:expect(chttpd, send_chunked_error,
- fun(Resp, _) -> {ok, Resp} end),
- ok = meck:expect(chttpd, start_chunked_response,
- fun(_, _, _) -> {ok, resp} end),
- ok = meck:expect(chttpd_external, parse_external_response, 1,
- #extern_resp_args{headers = []}).
+ ok = meck:expect(
+ chttpd,
+ send_chunk,
+ fun(Resp, _) -> {ok, Resp} end
+ ),
+ ok = meck:expect(
+ chttpd,
+ send_chunked_error,
+ fun(Resp, _) -> {ok, Resp} end
+ ),
+ ok = meck:expect(
+ chttpd,
+ start_chunked_response,
+ fun(_, _, _) -> {ok, resp} end
+ ),
+ ok = meck:expect(
+ chttpd_external,
+ parse_external_response,
+ 1,
+ #extern_resp_args{headers = []}
+ ).
should_return_valid_response(Spec, Req) ->
?_test(begin
diff --git a/src/couch_mrview/src/couch_mrview_test_util.erl b/src/couch_mrview/src/couch_mrview_test_util.erl
index 2dfa64e61..918988ea3 100644
--- a/src/couch_mrview/src/couch_mrview_test_util.erl
+++ b/src/couch_mrview/src/couch_mrview_test_util.erl
@@ -18,17 +18,14 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_eunit.hrl").
-
init_db(Name, Type) ->
init_db(Name, Type, 10).
-
init_db(Name, Type, Count) ->
{ok, Db} = new_db(Name, Type),
Docs = make_docs(Type, Count),
save_docs(Db, Docs).
-
new_db(Name, Type) when Type == local; Type == design ->
couch_server:delete(Name, [?ADMIN_CTX]),
couch_db:create(Name, [?ADMIN_CTX]);
@@ -44,80 +41,96 @@ save_docs(Db, Docs) ->
{ok, _} = couch_db:update_docs(Db, Docs, []),
couch_db:reopen(Db).
-
make_docs(local, Count) ->
[local_doc(I) || I <- lists:seq(1, Count)];
make_docs(design, Count) ->
- lists:foldl(fun(I, Acc) ->
- [doc(I), ddoc(I) | Acc]
- end, [], lists:seq(1, Count));
+ lists:foldl(
+ fun(I, Acc) ->
+ [doc(I), ddoc(I) | Acc]
+ end,
+ [],
+ lists:seq(1, Count)
+ );
make_docs(_, Count) ->
[doc(I) || I <- lists:seq(1, Count)].
-
make_docs(_, Since, Count) ->
[doc(I) || I <- lists:seq(Since, Count)].
-
ddoc(map) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/bar">>},
- {<<"views">>, {[
- {<<"baz">>, {[
- {<<"map">>, <<"function(doc) {emit(doc.val, doc.val);}">>}
- ]}},
- {<<"bing">>, {[
- {<<"map">>, <<"function(doc) {}">>}
- ]}},
- {<<"zing">>, {[
- {<<"map">>, <<
- "function(doc) {\n"
- " if(doc.foo !== undefined)\n"
- " emit(doc.foo, 0);\n"
- "}"
- >>}
- ]}}
- ]}}
- ]});
+ couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/bar">>},
+ {<<"views">>,
+ {[
+ {<<"baz">>,
+ {[
+ {<<"map">>, <<"function(doc) {emit(doc.val, doc.val);}">>}
+ ]}},
+ {<<"bing">>,
+ {[
+ {<<"map">>, <<"function(doc) {}">>}
+ ]}},
+ {<<"zing">>,
+ {[
+ {<<"map">>, <<
+ "function(doc) {\n"
+ " if(doc.foo !== undefined)\n"
+ " emit(doc.foo, 0);\n"
+ "}"
+ >>}
+ ]}}
+ ]}}
+ ]}
+ );
ddoc(red) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/red">>},
- {<<"views">>, {[
- {<<"baz">>, {[
- {<<"map">>, <<
- "function(doc) {\n"
- " emit([doc.val % 2, doc.val], doc.val);\n"
- "}\n"
- >>},
- {<<"reduce">>, <<"function(keys, vals) {return sum(vals);}">>}
- ]}},
- {<<"zing">>, {[
- {<<"map">>, <<
- "function(doc) {\n"
- " if(doc.foo !== undefined)\n"
- " emit(doc.foo, null);\n"
- "}"
- >>},
- {<<"reduce">>, <<"_count">>}
- ]}}
- ]}}
- ]});
+ couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/red">>},
+ {<<"views">>,
+ {[
+ {<<"baz">>,
+ {[
+ {<<"map">>, <<
+ "function(doc) {\n"
+ " emit([doc.val % 2, doc.val], doc.val);\n"
+ "}\n"
+ >>},
+ {<<"reduce">>, <<"function(keys, vals) {return sum(vals);}">>}
+ ]}},
+ {<<"zing">>,
+ {[
+ {<<"map">>, <<
+ "function(doc) {\n"
+ " if(doc.foo !== undefined)\n"
+ " emit(doc.foo, null);\n"
+ "}"
+ >>},
+ {<<"reduce">>, <<"_count">>}
+ ]}}
+ ]}}
+ ]}
+ );
ddoc(Id) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, list_to_binary(io_lib:format("_design/bar~2..0b", [Id]))},
- {<<"views">>, {[]}}
- ]}).
-
+ couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, list_to_binary(io_lib:format("_design/bar~2..0b", [Id]))},
+ {<<"views">>, {[]}}
+ ]}
+ ).
doc(Id) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, list_to_binary(integer_to_list(Id))},
- {<<"val">>, Id}
- ]}).
-
+ couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, list_to_binary(integer_to_list(Id))},
+ {<<"val">>, Id}
+ ]}
+ ).
local_doc(Id) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, list_to_binary(io_lib:format("_local/~b", [Id]))},
- {<<"val">>, Id}
- ]}).
+ couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, list_to_binary(io_lib:format("_local/~b", [Id]))},
+ {<<"val">>, Id}
+ ]}
+ ).
diff --git a/src/couch_mrview/src/couch_mrview_update_notifier.erl b/src/couch_mrview/src/couch_mrview_update_notifier.erl
index 803d39747..ac91131a0 100644
--- a/src/couch_mrview/src/couch_mrview_update_notifier.erl
+++ b/src/couch_mrview/src/couch_mrview_update_notifier.erl
@@ -20,7 +20,9 @@
-include_lib("couch/include/couch_db.hrl").
start_link(Exec) ->
- couch_event_sup:start_link(couch_mrview_update, {couch_mrview_update_notifier, make_ref()}, Exec).
+ couch_event_sup:start_link(
+ couch_mrview_update, {couch_mrview_update_notifier, make_ref()}, Exec
+ ).
notify(Event) ->
gen_event:notify(couch_mrview_update, Event).
diff --git a/src/couch_mrview/src/couch_mrview_updater.erl b/src/couch_mrview/src/couch_mrview_updater.erl
index 522367c1d..969a82028 100644
--- a/src/couch_mrview/src/couch_mrview_updater.erl
+++ b/src/couch_mrview/src/couch_mrview_updater.erl
@@ -26,22 +26,25 @@ start_update(Partial, State, NumChanges, NumChangesDone) ->
{ok, DocQueue} = couch_work_queue:new(QueueOpts),
{ok, WriteQueue} = couch_work_queue:new(QueueOpts),
InitState = State#mrst{
- first_build=State#mrst.update_seq==0,
- partial_resp_pid=Partial,
- doc_acc=[],
- doc_queue=DocQueue,
- write_queue=WriteQueue
+ first_build = State#mrst.update_seq == 0,
+ partial_resp_pid = Partial,
+ doc_acc = [],
+ doc_queue = DocQueue,
+ write_queue = WriteQueue
},
Self = self(),
MapFun = fun() ->
- erlang:put(io_priority,
- {view_update, State#mrst.db_name, State#mrst.idx_name}),
- Progress = case NumChanges of
- 0 -> 0;
- _ -> (NumChangesDone * 100) div NumChanges
- end,
+ erlang:put(
+ io_priority,
+ {view_update, State#mrst.db_name, State#mrst.idx_name}
+ ),
+ Progress =
+ case NumChanges of
+ 0 -> 0;
+ _ -> (NumChangesDone * 100) div NumChanges
+ end,
couch_task_status:add_task([
{indexer_pid, ?l2b(pid_to_list(Partial))},
{type, indexer},
@@ -55,8 +58,10 @@ start_update(Partial, State, NumChanges, NumChangesDone) ->
map_docs(Self, InitState)
end,
WriteFun = fun() ->
- erlang:put(io_priority,
- {view_update, State#mrst.db_name, State#mrst.idx_name}),
+ erlang:put(
+ io_priority,
+ {view_update, State#mrst.db_name, State#mrst.idx_name}
+ ),
write_results(Self, InitState)
end,
spawn_link(MapFun),
@@ -64,12 +69,11 @@ start_update(Partial, State, NumChanges, NumChangesDone) ->
{ok, InitState}.
-
purge(_Db, PurgeSeq, PurgedIdRevs, State) ->
#mrst{
- id_btree=IdBtree,
- views=Views,
- partitioned=Partitioned
+ id_btree = IdBtree,
+ views = Views,
+ partitioned = Partitioned
} = State,
Ids = [Id || {Id, _Revs} <- PurgedIdRevs],
@@ -81,10 +85,14 @@ purge(_Db, PurgeSeq, PurgedIdRevs, State) ->
({ViewNum, {Key, Seq, _Op}}, DictAcc2) ->
dict:append(ViewNum, {Key, Seq, DocId}, DictAcc2);
({ViewNum, RowKey0}, DictAcc2) ->
- RowKey = if not Partitioned -> RowKey0; true ->
- [{RK, _}] = inject_partition([{RowKey0, DocId}]),
- RK
- end,
+ RowKey =
+ if
+ not Partitioned ->
+ RowKey0;
+ true ->
+ [{RK, _}] = inject_partition([{RowKey0, DocId}]),
+ RK
+ end,
dict:append(ViewNum, {RowKey, DocId}, DictAcc2)
end,
lists:foldl(FoldFun, DictAcc, ViewNumRowKeys);
@@ -93,54 +101,54 @@ purge(_Db, PurgeSeq, PurgedIdRevs, State) ->
end,
KeysToRemove = lists:foldl(MakeDictFun, dict:new(), Lookups),
- RemKeysFun = fun(#mrview{id_num=ViewId}=View) ->
+ RemKeysFun = fun(#mrview{id_num = ViewId} = View) ->
ToRem = couch_util:dict_find(ViewId, KeysToRemove, []),
{ok, VBtree2} = couch_btree:add_remove(View#mrview.btree, [], ToRem),
- NewPurgeSeq = case VBtree2 =/= View#mrview.btree of
- true -> PurgeSeq;
- _ -> View#mrview.purge_seq
- end,
- View#mrview{btree=VBtree2, purge_seq=NewPurgeSeq}
+ NewPurgeSeq =
+ case VBtree2 =/= View#mrview.btree of
+ true -> PurgeSeq;
+ _ -> View#mrview.purge_seq
+ end,
+ View#mrview{btree = VBtree2, purge_seq = NewPurgeSeq}
end,
Views2 = lists:map(RemKeysFun, Views),
{ok, State#mrst{
- id_btree=IdBtree2,
- views=Views2,
- purge_seq=PurgeSeq
+ id_btree = IdBtree2,
+ views = Views2,
+ purge_seq = PurgeSeq
}}.
-
-process_doc(Doc, Seq, #mrst{doc_acc=Acc}=State) when length(Acc) > 100 ->
+process_doc(Doc, Seq, #mrst{doc_acc = Acc} = State) when length(Acc) > 100 ->
couch_work_queue:queue(State#mrst.doc_queue, lists:reverse(Acc)),
- process_doc(Doc, Seq, State#mrst{doc_acc=[]});
-process_doc(nil, Seq, #mrst{doc_acc=Acc}=State) ->
- {ok, State#mrst{doc_acc=[{nil, Seq, nil} | Acc]}};
-process_doc(#doc{id=Id, deleted=true}, Seq, #mrst{doc_acc=Acc}=State) ->
- {ok, State#mrst{doc_acc=[{Id, Seq, deleted} | Acc]}};
-process_doc(#doc{id=Id}=Doc, Seq, #mrst{doc_acc=Acc}=State) ->
- {ok, State#mrst{doc_acc=[{Id, Seq, Doc} | Acc]}}.
-
-
-finish_update(#mrst{doc_acc=Acc}=State) ->
- if Acc /= [] ->
- couch_work_queue:queue(State#mrst.doc_queue, Acc);
- true -> ok
+ process_doc(Doc, Seq, State#mrst{doc_acc = []});
+process_doc(nil, Seq, #mrst{doc_acc = Acc} = State) ->
+ {ok, State#mrst{doc_acc = [{nil, Seq, nil} | Acc]}};
+process_doc(#doc{id = Id, deleted = true}, Seq, #mrst{doc_acc = Acc} = State) ->
+ {ok, State#mrst{doc_acc = [{Id, Seq, deleted} | Acc]}};
+process_doc(#doc{id = Id} = Doc, Seq, #mrst{doc_acc = Acc} = State) ->
+ {ok, State#mrst{doc_acc = [{Id, Seq, Doc} | Acc]}}.
+
+finish_update(#mrst{doc_acc = Acc} = State) ->
+ if
+ Acc /= [] ->
+ couch_work_queue:queue(State#mrst.doc_queue, Acc);
+ true ->
+ ok
end,
couch_work_queue:close(State#mrst.doc_queue),
receive
{new_state, NewState} ->
{ok, NewState#mrst{
- first_build=undefined,
- partial_resp_pid=undefined,
- doc_acc=undefined,
- doc_queue=undefined,
- write_queue=undefined,
- qserver=nil
+ first_build = undefined,
+ partial_resp_pid = undefined,
+ doc_acc = undefined,
+ doc_queue = undefined,
+ write_queue = undefined,
+ qserver = nil
}}
end.
-
map_docs(Parent, #mrst{db_name = DbName, idx_name = IdxName} = State0) ->
erlang:put(io_priority, {view_update, DbName, IdxName}),
case couch_work_queue:dequeue(State0#mrst.doc_queue) of
@@ -150,10 +158,11 @@ map_docs(Parent, #mrst{db_name = DbName, idx_name = IdxName} = State0) ->
{ok, Dequeued} ->
% Run all the non deleted docs through the view engine and
% then pass the results on to the writer process.
- State1 = case State0#mrst.qserver of
- nil -> start_query_server(State0);
- _ -> State0
- end,
+ State1 =
+ case State0#mrst.qserver of
+ nil -> start_query_server(State0);
+ _ -> State0
+ end,
QServer = State1#mrst.qserver,
DocFun = fun
({nil, Seq, _}, {SeqAcc, Results}) ->
@@ -174,38 +183,37 @@ map_docs(Parent, #mrst{db_name = DbName, idx_name = IdxName} = State0) ->
map_docs(Parent, State1)
end.
-
write_results(Parent, #mrst{} = State) ->
case accumulate_writes(State, State#mrst.write_queue, nil) of
stop ->
Parent ! {new_state, State};
{Go, {Seq, ViewKVs, DocIdKeys}} ->
NewState = write_kvs(State, Seq, ViewKVs, DocIdKeys),
- if Go == stop ->
- Parent ! {new_state, NewState};
- true ->
- send_partial(NewState#mrst.partial_resp_pid, NewState),
- write_results(Parent, NewState)
+ if
+ Go == stop ->
+ Parent ! {new_state, NewState};
+ true ->
+ send_partial(NewState#mrst.partial_resp_pid, NewState),
+ write_results(Parent, NewState)
end
end.
-
start_query_server(State) ->
#mrst{
- language=Language,
- lib=Lib,
- views=Views
+ language = Language,
+ lib = Lib,
+ views = Views
} = State,
Defs = [View#mrview.def || View <- Views],
{ok, QServer} = couch_query_servers:start_doc_map(Language, Defs, Lib),
- State#mrst{qserver=QServer}.
-
+ State#mrst{qserver = QServer}.
accumulate_writes(State, W, Acc0) ->
- {Seq, ViewKVs, DocIdKVs} = case Acc0 of
- nil -> {0, [{V#mrview.id_num, []} || V <- State#mrst.views], []};
- _ -> Acc0
- end,
+ {Seq, ViewKVs, DocIdKVs} =
+ case Acc0 of
+ nil -> {0, [{V#mrview.id_num, []} || V <- State#mrst.views], []};
+ _ -> Acc0
+ end,
case couch_work_queue:dequeue(W) of
closed when Seq == 0 ->
stop;
@@ -219,15 +227,13 @@ accumulate_writes(State, W, Acc0) ->
end
end.
-
accumulate_more(NumDocIds, Acc) ->
% check if we have enough items now
MinItems = config:get("view_updater", "min_writer_items", "100"),
MinSize = config:get("view_updater", "min_writer_size", "16777216"),
CurrMem = ?term_size(Acc),
- NumDocIds < list_to_integer(MinItems)
- andalso CurrMem < list_to_integer(MinSize).
-
+ NumDocIds < list_to_integer(MinItems) andalso
+ CurrMem < list_to_integer(MinSize).
merge_results([], SeqAcc, ViewKVs, DocIdKeys) ->
{SeqAcc, ViewKVs, DocIdKeys};
@@ -238,7 +244,6 @@ merge_results([{Seq, Results} | Rest], SeqAcc, ViewKVs, DocIdKeys) ->
{ViewKVs1, DocIdKeys1} = lists:foldl(Fun, {ViewKVs, DocIdKeys}, Results),
merge_results(Rest, erlang:max(Seq, SeqAcc), ViewKVs1, DocIdKeys1).
-
merge_results({DocId, []}, ViewKVs, DocIdKeys) ->
{ViewKVs, [{DocId, []} | DocIdKeys]};
merge_results({DocId, RawResults}, ViewKVs, DocIdKeys) ->
@@ -252,7 +257,6 @@ merge_results({DocId, RawResults}, ViewKVs, DocIdKeys) ->
{ViewKVs1, [ViewIdKeys | DocIdKeys]}
end.
-
insert_results(DocId, [], [], ViewKVs, ViewIdKeys) ->
{lists:reverse(ViewKVs), {DocId, ViewIdKeys}};
insert_results(DocId, [KVs | RKVs], [{Id, VKVs} | RVKVs], VKVAcc, VIdKeys) ->
@@ -266,62 +270,67 @@ insert_results(DocId, [KVs | RKVs], [{Id, VKVs} | RVKVs], VKVAcc, VIdKeys) ->
end,
InitAcc = {[], VIdKeys},
couch_stats:increment_counter([couchdb, mrview, emits], length(KVs)),
- {Duped, VIdKeys0} = lists:foldl(CombineDupesFun, InitAcc,
- lists:sort(KVs)),
+ {Duped, VIdKeys0} = lists:foldl(
+ CombineDupesFun,
+ InitAcc,
+ lists:sort(KVs)
+ ),
FinalKVs = [{{Key, DocId}, Val} || {Key, Val} <- Duped] ++ VKVs,
insert_results(DocId, RKVs, RVKVs, [{Id, FinalKVs} | VKVAcc], VIdKeys0).
-
write_kvs(State, UpdateSeq, ViewKVs, DocIdKeys) ->
#mrst{
- id_btree=IdBtree,
- first_build=FirstBuild,
- partitioned=Partitioned
+ id_btree = IdBtree,
+ first_build = FirstBuild,
+ partitioned = Partitioned
} = State,
{ok, ToRemove, IdBtree2} = update_id_btree(IdBtree, DocIdKeys, FirstBuild),
ToRemByView = collapse_rem_keys(ToRemove, dict:new()),
- UpdateView = fun(#mrview{id_num=ViewId}=View, {ViewId, KVs0}) ->
+ UpdateView = fun(#mrview{id_num = ViewId} = View, {ViewId, KVs0}) ->
ToRem0 = couch_util:dict_find(ViewId, ToRemByView, []),
- {KVs, ToRem} = case Partitioned of
- true ->
- KVs1 = inject_partition(KVs0),
- ToRem1 = inject_partition(ToRem0),
- {KVs1, ToRem1};
- false ->
- {KVs0, ToRem0}
- end,
+ {KVs, ToRem} =
+ case Partitioned of
+ true ->
+ KVs1 = inject_partition(KVs0),
+ ToRem1 = inject_partition(ToRem0),
+ {KVs1, ToRem1};
+ false ->
+ {KVs0, ToRem0}
+ end,
{ok, VBtree2} = couch_btree:add_remove(View#mrview.btree, KVs, ToRem),
- NewUpdateSeq = case VBtree2 =/= View#mrview.btree of
- true -> UpdateSeq;
- _ -> View#mrview.update_seq
- end,
+ NewUpdateSeq =
+ case VBtree2 =/= View#mrview.btree of
+ true -> UpdateSeq;
+ _ -> View#mrview.update_seq
+ end,
- View2 = View#mrview{btree=VBtree2, update_seq=NewUpdateSeq},
+ View2 = View#mrview{btree = VBtree2, update_seq = NewUpdateSeq},
maybe_notify(State, View2, KVs, ToRem),
View2
end,
State#mrst{
- views=lists:zipwith(UpdateView, State#mrst.views, ViewKVs),
- update_seq=UpdateSeq,
- id_btree=IdBtree2
+ views = lists:zipwith(UpdateView, State#mrst.views, ViewKVs),
+ update_seq = UpdateSeq,
+ id_btree = IdBtree2
}.
-
inject_partition(Rows) ->
- lists:map(fun
- ({{Key, DocId}, Value}) ->
- % Adding a row to the view
- {Partition, _} = couch_partition:extract(DocId),
- {{{p, Partition, Key}, DocId}, Value};
- ({Key, DocId}) ->
- % Removing a row based on values in id_tree
- {Partition, _} = couch_partition:extract(DocId),
- {{p, Partition, Key}, DocId}
- end, Rows).
-
+ lists:map(
+ fun
+ ({{Key, DocId}, Value}) ->
+ % Adding a row to the view
+ {Partition, _} = couch_partition:extract(DocId),
+ {{{p, Partition, Key}, DocId}, Value};
+ ({Key, DocId}) ->
+ % Removing a row based on values in id_tree
+ {Partition, _} = couch_partition:extract(DocId),
+ {{p, Partition, Key}, DocId}
+ end,
+ Rows
+ ).
update_id_btree(Btree, DocIdKeys, true) ->
ToAdd = [{Id, DIKeys} || {Id, DIKeys} <- DocIdKeys, DIKeys /= []],
@@ -332,37 +341,38 @@ update_id_btree(Btree, DocIdKeys, _) ->
ToRem = [Id || {Id, DIKeys} <- DocIdKeys, DIKeys == []],
couch_btree:query_modify(Btree, ToFind, ToAdd, ToRem).
-
collapse_rem_keys([], Acc) ->
Acc;
collapse_rem_keys([{ok, {DocId, ViewIdKeys}} | Rest], Acc) ->
- NewAcc = lists:foldl(fun({ViewId, Key}, Acc2) ->
- dict:append(ViewId, {Key, DocId}, Acc2)
- end, Acc, ViewIdKeys),
+ NewAcc = lists:foldl(
+ fun({ViewId, Key}, Acc2) ->
+ dict:append(ViewId, {Key, DocId}, Acc2)
+ end,
+ Acc,
+ ViewIdKeys
+ ),
collapse_rem_keys(Rest, NewAcc);
collapse_rem_keys([{not_found, _} | Rest], Acc) ->
collapse_rem_keys(Rest, Acc).
-
send_partial(Pid, State) when is_pid(Pid) ->
gen_server:cast(Pid, {new_state, State});
send_partial(_, _) ->
ok.
-
update_task(NumChanges) ->
[Changes, Total] = couch_task_status:get([changes_done, total_changes]),
Changes2 = Changes + NumChanges,
- Progress = case Total of
- 0 ->
- % updater restart after compaction finishes
- 0;
- _ ->
- (Changes2 * 100) div Total
- end,
+ Progress =
+ case Total of
+ 0 ->
+ % updater restart after compaction finishes
+ 0;
+ _ ->
+ (Changes2 * 100) div Total
+ end,
couch_task_status:update([{progress, Progress}, {changes_done, Changes2}]).
-
maybe_notify(State, View, KVs, ToRem) ->
Updated = fun() ->
[Key || {{Key, _}, _} <- KVs]
diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl
index d318a3f4a..b7220f71f 100644
--- a/src/couch_mrview/src/couch_mrview_util.erl
+++ b/src/couch_mrview/src/couch_mrview_util.erl
@@ -38,39 +38,54 @@
-define(GET_VIEW_RETRY_DELAY, 50).
-define(LOWEST_KEY, null).
-define(HIGHEST_KEY, {<<255, 255, 255, 255>>}).
--define(LOWEST(A, B), (if A < B -> A; true -> B end)).
--define(HIGHEST(A, B), (if A > B -> A; true -> B end)).
+-define(LOWEST(A, B),
+ (if
+ A < B -> A;
+ true -> B
+ end)
+).
+-define(HIGHEST(A, B),
+ (if
+ A > B -> A;
+ true -> B
+ end)
+).
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-
get_local_purge_doc_id(Sig) ->
?l2b(?LOCAL_DOC_PREFIX ++ "purge-mrview-" ++ Sig).
-
get_value_from_options(Key, Options) ->
case couch_util:get_value(Key, Options) of
undefined ->
Reason = <<"'", Key/binary, "' must exists in options.">>,
throw({bad_request, Reason});
- Value -> Value
+ Value ->
+ Value
end.
-
verify_view_filename(FileName) ->
FilePathList = filename:split(FileName),
PureFN = lists:last(FilePathList),
case filename:extension(PureFN) of
".view" ->
Sig = filename:basename(PureFN),
- case [Ch || Ch <- Sig, not (((Ch >= $0) and (Ch =< $9))
- orelse ((Ch >= $a) and (Ch =< $f))
- orelse ((Ch >= $A) and (Ch =< $F)))] == [] of
+ case
+ [
+ Ch
+ || Ch <- Sig,
+ not (((Ch >= $0) and (Ch =< $9)) orelse
+ ((Ch >= $a) and (Ch =< $f)) orelse
+ ((Ch >= $A) and (Ch =< $F)))
+ ] == []
+ of
true -> true;
false -> false
end;
- _ -> false
+ _ ->
+ false
end.
get_signature_from_filename(FileName) ->
@@ -82,7 +97,7 @@ get_view(Db, DDoc, ViewName, Args0) ->
case get_view_index_state(Db, DDoc, ViewName, Args0) of
{ok, State, Args2} ->
Ref = erlang:monitor(process, State#mrst.fd),
- #mrst{language=Lang, views=Views} = State,
+ #mrst{language = Lang, views = Views} = State,
{Type, View, Args3} = extract_view(Lang, Args2, ViewName, Views),
check_range(Args3, view_cmp(View)),
Sig = view_sig(Db, State, View, Args3),
@@ -91,7 +106,6 @@ get_view(Db, DDoc, ViewName, Args0) ->
ddoc_updated
end.
-
get_view_index_pid(Db, DDoc, ViewName, Args0) ->
ArgCheck = fun(InitState) ->
Args1 = set_view_type(Args0, ViewName, InitState#mrst.views),
@@ -99,7 +113,6 @@ get_view_index_pid(Db, DDoc, ViewName, Args0) ->
end,
couch_index_server:get_index(?MOD, Db, DDoc, ArgCheck).
-
get_view_index_state(Db, DDoc, ViewName, Args0) ->
get_view_index_state(Db, DDoc, ViewName, Args0, ?GET_VIEW_RETRY_COUNT).
@@ -112,17 +125,18 @@ get_view_index_state(Db, DDoc, ViewName, Args0, RetryCount) ->
UpdateSeq = couch_util:with_db(Db, fun(WDb) ->
couch_db:get_update_seq(WDb)
end),
- State = case Args#mrargs.update of
- lazy ->
- spawn(fun() ->
- catch couch_index:get_state(Pid, UpdateSeq)
- end),
- couch_index:get_state(Pid, 0);
- false ->
- couch_index:get_state(Pid, 0);
- _ ->
- couch_index:get_state(Pid, UpdateSeq)
- end,
+ State =
+ case Args#mrargs.update of
+ lazy ->
+ spawn(fun() ->
+ catch couch_index:get_state(Pid, UpdateSeq)
+ end),
+ couch_index:get_state(Pid, 0);
+ false ->
+ couch_index:get_state(Pid, 0);
+ _ ->
+ couch_index:get_state(Pid, UpdateSeq)
+ end,
case State of
{ok, State0} -> {ok, State0, Args};
ddoc_updated -> ddoc_updated;
@@ -138,33 +152,37 @@ get_view_index_state(Db, DDoc, ViewName, Args0, RetryCount) ->
throw(Error)
end.
-
-ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
- MakeDict = fun({Name, {MRFuns}}, DictBySrcAcc) ->
- case couch_util:get_value(<<"map">>, MRFuns) of
- MapSrc when MapSrc /= undefined ->
- RedSrc = couch_util:get_value(<<"reduce">>, MRFuns, null),
- {ViewOpts} = couch_util:get_value(<<"options">>, MRFuns, {[]}),
- View = case dict:find({MapSrc, ViewOpts}, DictBySrcAcc) of
- {ok, View0} -> View0;
- error -> #mrview{def=MapSrc, options=ViewOpts}
- end,
- {MapNames, RedSrcs} = case RedSrc of
- null ->
- MNames = [Name | View#mrview.map_names],
- {MNames, View#mrview.reduce_funs};
- _ ->
- RedFuns = [{Name, RedSrc} | View#mrview.reduce_funs],
- {View#mrview.map_names, RedFuns}
- end,
- View2 = View#mrview{map_names=MapNames, reduce_funs=RedSrcs},
- dict:store({MapSrc, ViewOpts}, View2, DictBySrcAcc);
- undefined ->
- DictBySrcAcc
- end;
+ddoc_to_mrst(DbName, #doc{id = Id, body = {Fields}}) ->
+ MakeDict = fun
+ ({Name, {MRFuns}}, DictBySrcAcc) ->
+ case couch_util:get_value(<<"map">>, MRFuns) of
+ MapSrc when MapSrc /= undefined ->
+ RedSrc = couch_util:get_value(<<"reduce">>, MRFuns, null),
+ {ViewOpts} = couch_util:get_value(<<"options">>, MRFuns, {[]}),
+ View =
+ case dict:find({MapSrc, ViewOpts}, DictBySrcAcc) of
+ {ok, View0} -> View0;
+ error -> #mrview{def = MapSrc, options = ViewOpts}
+ end,
+ {MapNames, RedSrcs} =
+ case RedSrc of
+ null ->
+ MNames = [Name | View#mrview.map_names],
+ {MNames, View#mrview.reduce_funs};
+ _ ->
+ RedFuns = [{Name, RedSrc} | View#mrview.reduce_funs],
+ {View#mrview.map_names, RedFuns}
+ end,
+ View2 = View#mrview{map_names = MapNames, reduce_funs = RedSrcs},
+ dict:store({MapSrc, ViewOpts}, View2, DictBySrcAcc);
+ undefined ->
+ DictBySrcAcc
+ end;
({Name, Else}, DictBySrcAcc) ->
- couch_log:error("design_doc_to_view_group ~s views ~p",
- [Name, Else]),
+ couch_log:error(
+ "design_doc_to_view_group ~s views ~p",
+ [Name, Else]
+ ),
DictBySrcAcc
end,
{DesignOpts} = proplists:get_value(<<"options">>, Fields, {[]}),
@@ -174,7 +192,7 @@ ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
BySrc = lists:foldl(MakeDict, dict:new(), RawViews),
NumViews = fun({_, View}, N) ->
- {View#mrview{id_num=N}, N+1}
+ {View#mrview{id_num = N}, N + 1}
end,
{Views, _} = lists:mapfoldl(NumViews, 0, lists:sort(dict:to_list(BySrc))),
@@ -182,17 +200,16 @@ ddoc_to_mrst(DbName, #doc{id=Id, body={Fields}}) ->
Lib = couch_util:get_value(<<"lib">>, RawViews, {[]}),
IdxState = #mrst{
- db_name=DbName,
- idx_name=Id,
- lib=Lib,
- views=Views,
- language=Language,
- design_opts=DesignOpts,
- partitioned=Partitioned
+ db_name = DbName,
+ idx_name = Id,
+ lib = Lib,
+ views = Views,
+ language = Language,
+ design_opts = DesignOpts,
+ partitioned = Partitioned
},
SigInfo = {Views, Language, DesignOpts, couch_index_util:sort_lib(Lib)},
- {ok, IdxState#mrst{sig=couch_hash:md5_hash(term_to_binary(SigInfo))}}.
-
+ {ok, IdxState#mrst{sig = couch_hash:md5_hash(term_to_binary(SigInfo))}}.
set_view_type(_Args, _ViewName, []) ->
throw({not_found, missing_named_view});
@@ -201,48 +218,44 @@ set_view_type(Args, ViewName, [View | Rest]) ->
case lists:member(ViewName, RedNames) of
true ->
case Args#mrargs.reduce of
- false -> Args#mrargs{view_type=map};
- _ -> Args#mrargs{view_type=red}
+ false -> Args#mrargs{view_type = map};
+ _ -> Args#mrargs{view_type = red}
end;
false ->
case lists:member(ViewName, View#mrview.map_names) of
- true -> Args#mrargs{view_type=map};
+ true -> Args#mrargs{view_type = map};
false -> set_view_type(Args, ViewName, Rest)
end
end.
-
set_extra(#mrargs{} = Args, Key, Value) ->
Extra0 = Args#mrargs.extra,
Extra1 = lists:ukeysort(1, [{Key, Value} | Extra0]),
Args#mrargs{extra = Extra1}.
-
get_extra(#mrargs{} = Args, Key) ->
couch_util:get_value(Key, Args#mrargs.extra).
get_extra(#mrargs{} = Args, Key, Default) ->
couch_util:get_value(Key, Args#mrargs.extra, Default).
-
extract_view(_Lang, _Args, _ViewName, []) ->
throw({not_found, missing_named_view});
-extract_view(Lang, #mrargs{view_type=map}=Args, Name, [View | Rest]) ->
+extract_view(Lang, #mrargs{view_type = map} = Args, Name, [View | Rest]) ->
Names = View#mrview.map_names ++ [N || {N, _} <- View#mrview.reduce_funs],
case lists:member(Name, Names) of
true -> {map, View, Args};
_ -> extract_view(Lang, Args, Name, Rest)
end;
-extract_view(Lang, #mrargs{view_type=red}=Args, Name, [View | Rest]) ->
+extract_view(Lang, #mrargs{view_type = red} = Args, Name, [View | Rest]) ->
RedNames = [N || {N, _} <- View#mrview.reduce_funs],
case lists:member(Name, RedNames) of
true -> {red, {index_of(Name, RedNames), Lang, View}, Args};
false -> extract_view(Lang, Args, Name, Rest)
end.
-
-view_sig(Db, State, View, #mrargs{include_docs=true}=Args) ->
- BaseSig = view_sig(Db, State, View, Args#mrargs{include_docs=false}),
+view_sig(Db, State, View, #mrargs{include_docs = true} = Args) ->
+ BaseSig = view_sig(Db, State, View, Args#mrargs{include_docs = false}),
UpdateSeq = couch_db:get_update_seq(Db),
PurgeSeq = couch_db:get_purge_seq(Db),
Term = view_sig_term(BaseSig, UpdateSeq, PurgeSeq),
@@ -254,8 +267,8 @@ view_sig(_Db, State, View, Args0) ->
UpdateSeq = View#mrview.update_seq,
PurgeSeq = View#mrview.purge_seq,
Args = Args0#mrargs{
- preflight_fun=undefined,
- extra=[]
+ preflight_fun = undefined,
+ extra = []
},
Term = view_sig_term(Sig, UpdateSeq, PurgeSeq, Args),
couch_index_util:hexsig(couch_hash:md5_hash(term_to_binary(Term))).
@@ -266,26 +279,25 @@ view_sig_term(BaseSig, UpdateSeq, PurgeSeq) ->
view_sig_term(BaseSig, UpdateSeq, PurgeSeq, Args) ->
{BaseSig, UpdateSeq, PurgeSeq, Args}.
-
-init_state(Db, Fd, #mrst{views=Views}=State, nil) ->
+init_state(Db, Fd, #mrst{views = Views} = State, nil) ->
PurgeSeq = couch_db:get_purge_seq(Db),
Header = #mrheader{
- seq=0,
- purge_seq=PurgeSeq,
- id_btree_state=nil,
- view_states=[make_view_state(#mrview{}) || _ <- Views]
+ seq = 0,
+ purge_seq = PurgeSeq,
+ id_btree_state = nil,
+ view_states = [make_view_state(#mrview{}) || _ <- Views]
},
init_state(Db, Fd, State, Header);
init_state(Db, Fd, State, Header) ->
#mrst{
- language=Lang,
- views=Views
+ language = Lang,
+ views = Views
} = State,
#mrheader{
- seq=Seq,
- purge_seq=PurgeSeq,
- id_btree_state=IdBtreeState,
- view_states=ViewStates
+ seq = Seq,
+ purge_seq = PurgeSeq,
+ id_btree_state = IdBtreeState,
+ view_states = ViewStates
} = maybe_update_header(Header),
IdBtOpts = [
@@ -297,12 +309,12 @@ init_state(Db, Fd, State, Header) ->
Views2 = lists:zipwith(OpenViewFun, ViewStates, Views),
State#mrst{
- fd=Fd,
- fd_monitor=erlang:monitor(process, Fd),
- update_seq=Seq,
- purge_seq=PurgeSeq,
- id_btree=IdBtree,
- views=Views2
+ fd = Fd,
+ fd_monitor = erlang:monitor(process, Fd),
+ update_seq = Seq,
+ purge_seq = PurgeSeq,
+ id_btree = IdBtree,
+ views = Views2
}.
open_view(_Db, Fd, Lang, ViewState, View) ->
@@ -317,38 +329,42 @@ open_view(_Db, Fd, Lang, ViewState, View) ->
],
{ok, Btree} = couch_btree:open(BTState, Fd, ViewBtOpts),
- View#mrview{btree=Btree,
- update_seq=get_update_seq(ViewState),
- purge_seq=get_purge_seq(ViewState)}.
-
+ View#mrview{
+ btree = Btree,
+ update_seq = get_update_seq(ViewState),
+ purge_seq = get_purge_seq(ViewState)
+ }.
temp_view_to_ddoc({Props}) ->
Language = couch_util:get_value(<<"language">>, Props, <<"javascript">>),
Options = couch_util:get_value(<<"options">>, Props, {[]}),
View0 = [{<<"map">>, couch_util:get_value(<<"map">>, Props)}],
- View1 = View0 ++ case couch_util:get_value(<<"reduce">>, Props) of
- RedSrc when is_binary(RedSrc) -> [{<<"reduce">>, RedSrc}];
- _ -> []
- end,
- DDoc = {[
- {<<"_id">>, couch_uuids:random()},
- {<<"language">>, Language},
- {<<"options">>, Options},
- {<<"views">>, {[
- {<<"temp">>, {View1}}
- ]}}
- ]},
+ View1 =
+ View0 ++
+ case couch_util:get_value(<<"reduce">>, Props) of
+ RedSrc when is_binary(RedSrc) -> [{<<"reduce">>, RedSrc}];
+ _ -> []
+ end,
+ DDoc =
+ {[
+ {<<"_id">>, couch_uuids:random()},
+ {<<"language">>, Language},
+ {<<"options">>, Options},
+ {<<"views">>,
+ {[
+ {<<"temp">>, {View1}}
+ ]}}
+ ]},
couch_doc:from_json_obj(DDoc).
-
-get_row_count(#mrview{btree=Bt}) ->
- Count = case couch_btree:full_reduce(Bt) of
- {ok, {Count0, _Reds, _}} -> Count0;
- {ok, {Count0, _Reds}} -> Count0
- end,
+get_row_count(#mrview{btree = Bt}) ->
+ Count =
+ case couch_btree:full_reduce(Bt) of
+ {ok, {Count0, _Reds, _}} -> Count0;
+ {ok, {Count0, _Reds}} -> Count0
+ end,
{ok, Count}.
-
all_docs_reduce_to_count(Reductions) ->
Reduce = fun couch_bt_engine:id_tree_reduce/2,
{Count, _, _} = couch_btree:final_reduce(Reduce, Reductions),
@@ -361,8 +377,7 @@ reduce_to_count(Reductions) ->
FinalReduction = couch_btree:final_reduce(CountReduceFun, Reductions),
get_count(FinalReduction).
-
-fold(#mrview{btree=Bt}, Fun, Acc, Opts) ->
+fold(#mrview{btree = Bt}, Fun, Acc, Opts) ->
WrapperFun = fun(KV, Reds, Acc2) ->
fold_fun(Fun, expand_dups([KV], []), Reds, Acc2)
end,
@@ -370,19 +385,18 @@ fold(#mrview{btree=Bt}, Fun, Acc, Opts) ->
fold_fun(_Fun, [], _, Acc) ->
{ok, Acc};
-fold_fun(Fun, [KV|Rest], {KVReds, Reds}, Acc) ->
+fold_fun(Fun, [KV | Rest], {KVReds, Reds}, Acc) ->
case Fun(KV, {KVReds, Reds}, Acc) of
{ok, Acc2} ->
- fold_fun(Fun, Rest, {[KV|KVReds], Reds}, Acc2);
+ fold_fun(Fun, Rest, {[KV | KVReds], Reds}, Acc2);
{stop, Acc2} ->
{stop, Acc2}
end.
-
-fold_reduce({NthRed, Lang, View}, Fun, Acc, Options) ->
+fold_reduce({NthRed, Lang, View}, Fun, Acc, Options) ->
#mrview{
- btree=Bt,
- reduce_funs=RedFuns
+ btree = Bt,
+ reduce_funs = RedFuns
} = View,
ReduceFun = make_user_reds_reduce_fun(Lang, RedFuns, NthRed),
@@ -395,13 +409,11 @@ fold_reduce({NthRed, Lang, View}, Fun, Acc, Options) ->
couch_btree:fold_reduce(Bt, WrapperFun, Acc, Options).
-
validate_args(Db, DDoc, Args0) ->
{ok, State} = couch_mrview_index:init(Db, DDoc),
Args1 = apply_limit(State#mrst.partitioned, Args0),
validate_args(State, Args1).
-
validate_args(#mrst{} = State, Args0) ->
Args = validate_args(Args0),
@@ -410,45 +422,55 @@ validate_args(#mrst{} = State, Args0) ->
case {ViewPartitioned, Partition} of
{true, undefined} ->
- Msg1 = <<"`partition` parameter is mandatory "
- "for queries to this view.">>,
+ Msg1 = <<
+ "`partition` parameter is mandatory "
+ "for queries to this view."
+ >>,
mrverror(Msg1);
{true, _} ->
apply_partition(Args, Partition);
{false, undefined} ->
Args;
{false, Value} when is_binary(Value) ->
- Msg2 = <<"`partition` parameter is not "
- "supported in this design doc">>,
+ Msg2 = <<
+ "`partition` parameter is not "
+ "supported in this design doc"
+ >>,
mrverror(Msg2)
end.
-
apply_limit(ViewPartitioned, Args) ->
Options = Args#mrargs.extra,
IgnorePQLimit = lists:keyfind(ignore_partition_query_limit, 1, Options),
- LimitType = case {ViewPartitioned, IgnorePQLimit} of
- {true, false} -> "partition_query_limit";
- {true, _} -> "query_limit";
- {false, _} -> "query_limit"
- end,
+ LimitType =
+ case {ViewPartitioned, IgnorePQLimit} of
+ {true, false} -> "partition_query_limit";
+ {true, _} -> "query_limit";
+ {false, _} -> "query_limit"
+ end,
- MaxLimit = config:get_integer("query_server_config",
- LimitType, ?MAX_VIEW_LIMIT),
+ MaxLimit = config:get_integer(
+ "query_server_config",
+ LimitType,
+ ?MAX_VIEW_LIMIT
+ ),
% Set the highest limit possible if a user has not
% specified a limit
- Args1 = case Args#mrargs.limit == ?MAX_VIEW_LIMIT of
- true -> Args#mrargs{limit = MaxLimit};
- false -> Args
- end,
+ Args1 =
+ case Args#mrargs.limit == ?MAX_VIEW_LIMIT of
+ true -> Args#mrargs{limit = MaxLimit};
+ false -> Args
+ end,
- if Args1#mrargs.limit =< MaxLimit -> Args1; true ->
- Fmt = "Limit is too large, must not exceed ~p",
- mrverror(io_lib:format(Fmt, [MaxLimit]))
+ if
+ Args1#mrargs.limit =< MaxLimit ->
+ Args1;
+ true ->
+ Fmt = "Limit is too large, must not exceed ~p",
+ mrverror(io_lib:format(Fmt, [MaxLimit]))
end.
-
validate_all_docs_args(Db, Args0) ->
Args = validate_args(Args0),
@@ -465,7 +487,6 @@ validate_all_docs_args(Db, Args0) ->
Args
end.
-
validate_args(Args) ->
GroupLevel = determine_group_level(Args),
Reduce = Args#mrargs.reduce,
@@ -480,11 +501,13 @@ validate_args(Args) ->
end,
case {Args#mrargs.view_type, GroupLevel, Args#mrargs.keys} of
- {red, exact, _} -> ok;
+ {red, exact, _} ->
+ ok;
{red, _, KeyList} when is_list(KeyList) ->
Msg = <<"Multi-key fetchs for reduce views must use `group=true`">>,
mrverror(Msg);
- _ -> ok
+ _ ->
+ ok
end,
case Args#mrargs.keys of
@@ -493,13 +516,18 @@ validate_args(Args) ->
_ -> mrverror(<<"`keys` must be an array of strings.">>)
end,
- case {Args#mrargs.keys, Args#mrargs.start_key,
- Args#mrargs.end_key} of
- {undefined, _, _} -> ok;
- {[], _, _} -> ok;
- {[_|_], undefined, undefined} -> ok;
- _ -> mrverror(<<"`keys` is incompatible with `key`"
- ", `start_key` and `end_key`">>)
+ case {Args#mrargs.keys, Args#mrargs.start_key, Args#mrargs.end_key} of
+ {undefined, _, _} ->
+ ok;
+ {[], _, _} ->
+ ok;
+ {[_ | _], undefined, undefined} ->
+ ok;
+ _ ->
+ mrverror(<<
+ "`keys` is incompatible with `key`"
+ ", `start_key` and `end_key`"
+ >>)
end,
case Args#mrargs.start_key_docid of
@@ -571,17 +599,19 @@ validate_args(Args) ->
{red, _} -> mrverror(<<"`conflicts` is invalid for reduce views.">>)
end,
- SKDocId = case {Args#mrargs.direction, Args#mrargs.start_key_docid} of
- {fwd, undefined} -> <<>>;
- {rev, undefined} -> <<255>>;
- {_, SKDocId1} -> SKDocId1
- end,
+ SKDocId =
+ case {Args#mrargs.direction, Args#mrargs.start_key_docid} of
+ {fwd, undefined} -> <<>>;
+ {rev, undefined} -> <<255>>;
+ {_, SKDocId1} -> SKDocId1
+ end,
- EKDocId = case {Args#mrargs.direction, Args#mrargs.end_key_docid} of
- {fwd, undefined} -> <<255>>;
- {rev, undefined} -> <<>>;
- {_, EKDocId1} -> EKDocId1
- end,
+ EKDocId =
+ case {Args#mrargs.direction, Args#mrargs.end_key_docid} of
+ {fwd, undefined} -> <<255>>;
+ {rev, undefined} -> <<>>;
+ {_, EKDocId1} -> EKDocId1
+ end,
case is_boolean(Args#mrargs.sorted) of
true -> ok;
@@ -595,32 +625,30 @@ validate_args(Args) ->
end,
Args#mrargs{
- start_key_docid=SKDocId,
- end_key_docid=EKDocId,
- group_level=GroupLevel
+ start_key_docid = SKDocId,
+ end_key_docid = EKDocId,
+ group_level = GroupLevel
}.
-
-determine_group_level(#mrargs{group=undefined, group_level=undefined}) ->
+determine_group_level(#mrargs{group = undefined, group_level = undefined}) ->
0;
-determine_group_level(#mrargs{group=false, group_level=undefined}) ->
+determine_group_level(#mrargs{group = false, group_level = undefined}) ->
0;
-determine_group_level(#mrargs{group=false, group_level=Level}) when Level > 0 ->
+determine_group_level(#mrargs{group = false, group_level = Level}) when Level > 0 ->
mrverror(<<"Can't specify group=false and group_level>0 at the same time">>);
-determine_group_level(#mrargs{group=true, group_level=undefined}) ->
+determine_group_level(#mrargs{group = true, group_level = undefined}) ->
exact;
-determine_group_level(#mrargs{group_level=GroupLevel}) ->
+determine_group_level(#mrargs{group_level = GroupLevel}) ->
GroupLevel.
-apply_partition(#mrargs{keys=[{p, _, _} | _]} = Args, _Partition) ->
- Args; % already applied
-
-apply_partition(#mrargs{keys=Keys} = Args, Partition) when Keys /= undefined ->
- Args#mrargs{keys=[{p, Partition, K} || K <- Keys]};
-
-apply_partition(#mrargs{start_key={p, _, _}, end_key={p, _, _}} = Args, _Partition) ->
- Args; % already applied.
-
+apply_partition(#mrargs{keys = [{p, _, _} | _]} = Args, _Partition) ->
+ % already applied
+ Args;
+apply_partition(#mrargs{keys = Keys} = Args, Partition) when Keys /= undefined ->
+ Args#mrargs{keys = [{p, Partition, K} || K <- Keys]};
+apply_partition(#mrargs{start_key = {p, _, _}, end_key = {p, _, _}} = Args, _Partition) ->
+ % already applied.
+ Args;
apply_partition(Args, Partition) ->
#mrargs{
direction = Dir,
@@ -628,13 +656,22 @@ apply_partition(Args, Partition) ->
end_key = EndKey
} = Args,
- {DefSK, DefEK} = case Dir of
- fwd -> {?LOWEST_KEY, ?HIGHEST_KEY};
- rev -> {?HIGHEST_KEY, ?LOWEST_KEY}
- end,
+ {DefSK, DefEK} =
+ case Dir of
+ fwd -> {?LOWEST_KEY, ?HIGHEST_KEY};
+ rev -> {?HIGHEST_KEY, ?LOWEST_KEY}
+ end,
- SK0 = if StartKey /= undefined -> StartKey; true -> DefSK end,
- EK0 = if EndKey /= undefined -> EndKey; true -> DefEK end,
+ SK0 =
+ if
+ StartKey /= undefined -> StartKey;
+ true -> DefSK
+ end,
+ EK0 =
+ if
+ EndKey /= undefined -> EndKey;
+ true -> DefEK
+ end,
Args#mrargs{
start_key = {p, Partition, SK0},
@@ -650,92 +687,101 @@ apply_all_docs_partition(#mrargs{} = Args, Partition) ->
end_key = EndKey
} = Args,
- {DefSK, DefEK} = case Dir of
- fwd ->
- {
- couch_partition:start_key(Partition),
- couch_partition:end_key(Partition)
- };
- rev ->
- {
- couch_partition:end_key(Partition),
- couch_partition:start_key(Partition)
- }
- end,
+ {DefSK, DefEK} =
+ case Dir of
+ fwd ->
+ {
+ couch_partition:start_key(Partition),
+ couch_partition:end_key(Partition)
+ };
+ rev ->
+ {
+ couch_partition:end_key(Partition),
+ couch_partition:start_key(Partition)
+ }
+ end,
- SK0 = if StartKey == undefined -> DefSK; true -> StartKey end,
- EK0 = if EndKey == undefined -> DefEK; true -> EndKey end,
+ SK0 =
+ if
+ StartKey == undefined -> DefSK;
+ true -> StartKey
+ end,
+ EK0 =
+ if
+ EndKey == undefined -> DefEK;
+ true -> EndKey
+ end,
- {SK1, EK1} = case Dir of
- fwd -> {?HIGHEST(DefSK, SK0), ?LOWEST(DefEK, EK0)};
- rev -> {?LOWEST(DefSK, SK0), ?HIGHEST(DefEK, EK0)}
- end,
+ {SK1, EK1} =
+ case Dir of
+ fwd -> {?HIGHEST(DefSK, SK0), ?LOWEST(DefEK, EK0)};
+ rev -> {?LOWEST(DefSK, SK0), ?HIGHEST(DefEK, EK0)}
+ end,
Args#mrargs{
start_key = SK1,
end_key = EK1
}.
-
-check_range(#mrargs{start_key=undefined}, _Cmp) ->
+check_range(#mrargs{start_key = undefined}, _Cmp) ->
ok;
-check_range(#mrargs{end_key=undefined}, _Cmp) ->
+check_range(#mrargs{end_key = undefined}, _Cmp) ->
ok;
-check_range(#mrargs{start_key=K, end_key=K}, _Cmp) ->
+check_range(#mrargs{start_key = K, end_key = K}, _Cmp) ->
ok;
check_range(Args, Cmp) ->
#mrargs{
- direction=Dir,
- start_key=SK,
- start_key_docid=SKD,
- end_key=EK,
- end_key_docid=EKD
+ direction = Dir,
+ start_key = SK,
+ start_key_docid = SKD,
+ end_key = EK,
+ end_key_docid = EKD
} = Args,
case {Dir, Cmp({SK, SKD}, {EK, EKD})} of
{fwd, false} ->
- throw({query_parse_error,
- <<"No rows can match your key range, reverse your ",
- "start_key and end_key or set descending=true">>});
+ throw(
+ {query_parse_error,
+ <<"No rows can match your key range, reverse your ",
+ "start_key and end_key or set descending=true">>}
+ );
{rev, true} ->
- throw({query_parse_error,
- <<"No rows can match your key range, reverse your ",
- "start_key and end_key or set descending=false">>});
- _ -> ok
+ throw(
+ {query_parse_error,
+ <<"No rows can match your key range, reverse your ",
+ "start_key and end_key or set descending=false">>}
+ );
+ _ ->
+ ok
end.
-
view_cmp({_Nth, _Lang, View}) ->
view_cmp(View);
view_cmp(View) ->
fun(A, B) -> couch_btree:less(View#mrview.btree, A, B) end.
-
make_header(State) ->
#mrst{
- update_seq=Seq,
- purge_seq=PurgeSeq,
- id_btree=IdBtree,
- views=Views
+ update_seq = Seq,
+ purge_seq = PurgeSeq,
+ id_btree = IdBtree,
+ views = Views
} = State,
#mrheader{
- seq=Seq,
- purge_seq=PurgeSeq,
- id_btree_state=get_btree_state(IdBtree),
- view_states=[make_view_state(V) || V <- Views]
+ seq = Seq,
+ purge_seq = PurgeSeq,
+ id_btree_state = get_btree_state(IdBtree),
+ view_states = [make_view_state(V) || V <- Views]
}.
-
index_file(DbName, Sig) ->
FileName = couch_index_util:hexsig(Sig) ++ ".view",
couch_index_util:index_file(mrview, DbName, FileName).
-
compaction_file(DbName, Sig) ->
FileName = couch_index_util:hexsig(Sig) ++ ".compact.view",
couch_index_util:index_file(mrview, DbName, FileName).
-
open_file(FName) ->
case couch_file:open(FName, [nologifmissing]) of
{ok, Fd} -> {ok, Fd};
@@ -743,20 +789,16 @@ open_file(FName) ->
Error -> Error
end.
-
delete_files(DbName, Sig) ->
delete_index_file(DbName, Sig),
delete_compaction_file(DbName, Sig).
-
delete_index_file(DbName, Sig) ->
delete_file(index_file(DbName, Sig)).
-
delete_compaction_file(DbName, Sig) ->
delete_file(compaction_file(DbName, Sig)).
-
delete_file(FName) ->
case filelib:is_file(FName) of
true ->
@@ -766,87 +808,91 @@ delete_file(FName) ->
ok
end.
-
-reset_index(Db, Fd, #mrst{sig=Sig}=State) ->
+reset_index(Db, Fd, #mrst{sig = Sig} = State) ->
ok = couch_file:truncate(Fd, 0),
ok = couch_file:write_header(Fd, {Sig, nil}),
init_state(Db, Fd, reset_state(State), nil).
-
reset_state(State) ->
State#mrst{
- fd=nil,
- qserver=nil,
- update_seq=0,
- id_btree=nil,
- views=[View#mrview{btree=nil} || View <- State#mrst.views]
+ fd = nil,
+ qserver = nil,
+ update_seq = 0,
+ id_btree = nil,
+ views = [View#mrview{btree = nil} || View <- State#mrst.views]
}.
-
all_docs_key_opts(#mrargs{extra = Extra} = Args) ->
all_docs_key_opts(Args, Extra).
-all_docs_key_opts(#mrargs{keys=undefined}=Args, Extra) ->
- all_docs_key_opts(Args#mrargs{keys=[]}, Extra);
-all_docs_key_opts(#mrargs{keys=[], direction=Dir}=Args, Extra) ->
+all_docs_key_opts(#mrargs{keys = undefined} = Args, Extra) ->
+ all_docs_key_opts(Args#mrargs{keys = []}, Extra);
+all_docs_key_opts(#mrargs{keys = [], direction = Dir} = Args, Extra) ->
[[{dir, Dir}] ++ ad_skey_opts(Args) ++ ad_ekey_opts(Args) ++ Extra];
-all_docs_key_opts(#mrargs{keys=Keys, direction=Dir}=Args, Extra) ->
- lists:map(fun(K) ->
- [{dir, Dir}]
- ++ ad_skey_opts(Args#mrargs{start_key=K})
- ++ ad_ekey_opts(Args#mrargs{end_key=K})
- ++ Extra
- end, Keys).
-
+all_docs_key_opts(#mrargs{keys = Keys, direction = Dir} = Args, Extra) ->
+ lists:map(
+ fun(K) ->
+ [{dir, Dir}] ++
+ ad_skey_opts(Args#mrargs{start_key = K}) ++
+ ad_ekey_opts(Args#mrargs{end_key = K}) ++
+ Extra
+ end,
+ Keys
+ ).
-ad_skey_opts(#mrargs{start_key=SKey}) when is_binary(SKey) ->
+ad_skey_opts(#mrargs{start_key = SKey}) when is_binary(SKey) ->
[{start_key, SKey}];
-ad_skey_opts(#mrargs{start_key_docid=SKeyDocId}) ->
+ad_skey_opts(#mrargs{start_key_docid = SKeyDocId}) ->
[{start_key, SKeyDocId}].
-
-ad_ekey_opts(#mrargs{end_key=EKey}=Args) when is_binary(EKey) ->
- Type = if Args#mrargs.inclusive_end -> end_key; true -> end_key_gt end,
+ad_ekey_opts(#mrargs{end_key = EKey} = Args) when is_binary(EKey) ->
+ Type =
+ if
+ Args#mrargs.inclusive_end -> end_key;
+ true -> end_key_gt
+ end,
[{Type, EKey}];
-ad_ekey_opts(#mrargs{end_key_docid=EKeyDocId}=Args) ->
- Type = if Args#mrargs.inclusive_end -> end_key; true -> end_key_gt end,
+ad_ekey_opts(#mrargs{end_key_docid = EKeyDocId} = Args) ->
+ Type =
+ if
+ Args#mrargs.inclusive_end -> end_key;
+ true -> end_key_gt
+ end,
[{Type, EKeyDocId}].
-
key_opts(Args) ->
key_opts(Args, []).
-key_opts(#mrargs{keys=undefined, direction=Dir}=Args, Extra) ->
+key_opts(#mrargs{keys = undefined, direction = Dir} = Args, Extra) ->
[[{dir, Dir}] ++ skey_opts(Args) ++ ekey_opts(Args) ++ Extra];
-key_opts(#mrargs{keys=Keys, direction=Dir}=Args, Extra) ->
- lists:map(fun(K) ->
- [{dir, Dir}]
- ++ skey_opts(Args#mrargs{start_key=K})
- ++ ekey_opts(Args#mrargs{end_key=K})
- ++ Extra
- end, Keys).
-
+key_opts(#mrargs{keys = Keys, direction = Dir} = Args, Extra) ->
+ lists:map(
+ fun(K) ->
+ [{dir, Dir}] ++
+ skey_opts(Args#mrargs{start_key = K}) ++
+ ekey_opts(Args#mrargs{end_key = K}) ++
+ Extra
+ end,
+ Keys
+ ).
-skey_opts(#mrargs{start_key=undefined}) ->
+skey_opts(#mrargs{start_key = undefined}) ->
[];
-skey_opts(#mrargs{start_key=SKey, start_key_docid=SKeyDocId}) ->
+skey_opts(#mrargs{start_key = SKey, start_key_docid = SKeyDocId}) ->
[{start_key, {SKey, SKeyDocId}}].
-
-ekey_opts(#mrargs{end_key=undefined}) ->
+ekey_opts(#mrargs{end_key = undefined}) ->
[];
-ekey_opts(#mrargs{end_key=EKey, end_key_docid=EKeyDocId}=Args) ->
+ekey_opts(#mrargs{end_key = EKey, end_key_docid = EKeyDocId} = Args) ->
case Args#mrargs.inclusive_end of
true -> [{end_key, {EKey, EKeyDocId}}];
false -> [{end_key_gt, {EKey, reverse_key_default(EKeyDocId)}}]
end.
-
reverse_key_default(<<>>) -> <<255>>;
reverse_key_default(<<255>>) -> <<>>;
reverse_key_default(Key) -> Key.
-
reduced_external_size(Tree) ->
case couch_btree:full_reduce(Tree) of
{ok, {_, _, Size}} -> Size;
@@ -854,35 +900,31 @@ reduced_external_size(Tree) ->
{ok, {_, _}} -> 0
end.
-
calculate_external_size(Views) ->
SumFun = fun
- (#mrview{btree=nil}, Acc) ->
+ (#mrview{btree = nil}, Acc) ->
Acc;
- (#mrview{btree=Bt}, Acc) ->
+ (#mrview{btree = Bt}, Acc) ->
Acc + reduced_external_size(Bt)
end,
{ok, lists:foldl(SumFun, 0, Views)}.
-
calculate_active_size(Views) ->
FoldFun = fun
- (#mrview{btree=nil}, Acc) ->
+ (#mrview{btree = nil}, Acc) ->
Acc;
- (#mrview{btree=Bt}, Acc) ->
+ (#mrview{btree = Bt}, Acc) ->
Acc + couch_btree:size(Bt)
end,
{ok, lists:foldl(FoldFun, 0, Views)}.
-
detuple_kvs([], Acc) ->
lists:reverse(Acc);
detuple_kvs([KV | Rest], Acc) ->
- {{Key,Id},Value} = KV,
+ {{Key, Id}, Value} = KV,
NKV = [[Key, Id], Value],
detuple_kvs(Rest, [NKV | Acc]).
-
expand_dups([], Acc) ->
lists:reverse(Acc);
expand_dups([{Key, {dups, Vals}} | Rest], Acc) ->
@@ -891,56 +933,49 @@ expand_dups([{Key, {dups, Vals}} | Rest], Acc) ->
expand_dups([KV | Rest], Acc) ->
expand_dups(Rest, [KV | Acc]).
-
-maybe_load_doc(_Db, _DI, #mrargs{include_docs=false}) ->
+maybe_load_doc(_Db, _DI, #mrargs{include_docs = false}) ->
[];
-maybe_load_doc(Db, #doc_info{}=DI, #mrargs{conflicts=true, doc_options=Opts}) ->
+maybe_load_doc(Db, #doc_info{} = DI, #mrargs{conflicts = true, doc_options = Opts}) ->
doc_row(couch_index_util:load_doc(Db, DI, [conflicts]), Opts);
-maybe_load_doc(Db, #doc_info{}=DI, #mrargs{doc_options=Opts}) ->
+maybe_load_doc(Db, #doc_info{} = DI, #mrargs{doc_options = Opts}) ->
doc_row(couch_index_util:load_doc(Db, DI, []), Opts).
-
-maybe_load_doc(_Db, _Id, _Val, #mrargs{include_docs=false}) ->
+maybe_load_doc(_Db, _Id, _Val, #mrargs{include_docs = false}) ->
[];
-maybe_load_doc(Db, Id, Val, #mrargs{conflicts=true, doc_options=Opts}) ->
+maybe_load_doc(Db, Id, Val, #mrargs{conflicts = true, doc_options = Opts}) ->
doc_row(couch_index_util:load_doc(Db, docid_rev(Id, Val), [conflicts]), Opts);
-maybe_load_doc(Db, Id, Val, #mrargs{doc_options=Opts}) ->
+maybe_load_doc(Db, Id, Val, #mrargs{doc_options = Opts}) ->
doc_row(couch_index_util:load_doc(Db, docid_rev(Id, Val), []), Opts).
-
doc_row(null, _Opts) ->
[{doc, null}];
doc_row(Doc, Opts) ->
[{doc, couch_doc:to_json_obj(Doc, Opts)}].
-
docid_rev(Id, {Props}) ->
DocId = couch_util:get_value(<<"_id">>, Props, Id),
- Rev = case couch_util:get_value(<<"_rev">>, Props, nil) of
- nil -> nil;
- Rev0 -> couch_doc:parse_rev(Rev0)
- end,
+ Rev =
+ case couch_util:get_value(<<"_rev">>, Props, nil) of
+ nil -> nil;
+ Rev0 -> couch_doc:parse_rev(Rev0)
+ end,
{DocId, Rev};
docid_rev(Id, _) ->
{Id, nil}.
-
index_of(Key, List) ->
index_of(Key, List, 1).
-
index_of(_, [], _) ->
throw({error, missing_named_view});
index_of(Key, [Key | _], Idx) ->
Idx;
index_of(Key, [_ | Rest], Idx) ->
- index_of(Key, Rest, Idx+1).
-
+ index_of(Key, Rest, Idx + 1).
mrverror(Mesg) ->
throw({query_parse_error, Mesg}).
-
%% Updates 2.x view files to 3.x or later view files
%% transparently, the first time the 2.x view file is opened by
%% 3.x or later.
@@ -960,11 +995,11 @@ maybe_update_index_file(State) ->
% open in read-only mode so we don't create
% the file if it doesn't exist.
case file:open(NewIndexFile, [read, raw]) of
- {ok, Fd_Read} ->
- % the new index file exists, there is nothing to do here.
- file:close(Fd_Read);
- _Error ->
- update_index_file(State)
+ {ok, Fd_Read} ->
+ % the new index file exists, there is nothing to do here.
+ file:close(Fd_Read);
+ _Error ->
+ update_index_file(State)
end.
update_index_file(State) ->
@@ -975,26 +1010,35 @@ update_index_file(State) ->
% If we have an old index, rename it to the new position.
case file:read_file_info(IndexFile) of
- {ok, _FileInfo} ->
- % Crash if the rename fails for any reason.
- % If the target exists, e.g. the next request will find the
- % new file and we are good. We might need to catch this
- % further up to avoid a full server crash.
- NewIndexFile = index_file(DbName, State#mrst.sig),
- couch_log:notice("Attempting to update legacy view index file"
- " from ~p to ~s", [IndexFile, NewIndexFile]),
- ok = filelib:ensure_dir(NewIndexFile),
- ok = file:rename(IndexFile, NewIndexFile),
- couch_log:notice("Successfully updated legacy view index file"
- " ~s", [IndexFile]),
- Sig;
- {error, enoent} ->
- % Ignore missing index file
- ok;
- {error, Reason} ->
- couch_log:error("Failed to update legacy view index file"
- " ~s : ~s", [IndexFile, file:format_error(Reason)]),
- ok
+ {ok, _FileInfo} ->
+ % Crash if the rename fails for any reason.
+ % If the target exists, e.g. the next request will find the
+ % new file and we are good. We might need to catch this
+ % further up to avoid a full server crash.
+ NewIndexFile = index_file(DbName, State#mrst.sig),
+ couch_log:notice(
+ "Attempting to update legacy view index file"
+ " from ~p to ~s",
+ [IndexFile, NewIndexFile]
+ ),
+ ok = filelib:ensure_dir(NewIndexFile),
+ ok = file:rename(IndexFile, NewIndexFile),
+ couch_log:notice(
+ "Successfully updated legacy view index file"
+ " ~s",
+ [IndexFile]
+ ),
+ Sig;
+ {error, enoent} ->
+ % Ignore missing index file
+ ok;
+ {error, Reason} ->
+ couch_log:error(
+ "Failed to update legacy view index file"
+ " ~s : ~s",
+ [IndexFile, file:format_error(Reason)]
+ ),
+ ok
end.
sig_vsn_2x(State) ->
@@ -1010,21 +1054,21 @@ sig_vsn_2x(State) ->
couch_hash:md5_hash(term_to_binary(SigInfo)).
old_view_format(View, SI, KSI) ->
-{
- mrview,
- View#mrview.id_num,
- View#mrview.update_seq,
- View#mrview.purge_seq,
- View#mrview.map_names,
- View#mrview.reduce_funs,
- View#mrview.def,
- View#mrview.btree,
- nil,
- nil,
- SI,
- KSI,
- View#mrview.options
-}.
+ {
+ mrview,
+ View#mrview.id_num,
+ View#mrview.update_seq,
+ View#mrview.purge_seq,
+ View#mrview.map_names,
+ View#mrview.reduce_funs,
+ View#mrview.def,
+ View#mrview.btree,
+ nil,
+ nil,
+ SI,
+ KSI,
+ View#mrview.options
+ }.
maybe_update_header(#mrheader{} = Header) ->
Header;
@@ -1050,7 +1094,6 @@ make_view_state({BTState, _SeqBTState, _KSeqBTState, UpdateSeq, PurgeSeq}) ->
make_view_state(nil) ->
{nil, 0, 0}.
-
get_key_btree_state(ViewState) ->
element(1, ViewState).
@@ -1066,18 +1109,14 @@ get_count(Reduction) ->
get_user_reds(Reduction) ->
element(2, Reduction).
-
% This is for backwards compatibility for seq btree reduces
get_external_size_reds(Reduction) when is_integer(Reduction) ->
0;
-
get_external_size_reds(Reduction) when tuple_size(Reduction) == 2 ->
0;
-
get_external_size_reds(Reduction) when tuple_size(Reduction) == 3 ->
element(3, Reduction).
-
make_reduce_fun(Lang, ReduceFuns) ->
FunSrcs = [FunSrc || {_, FunSrc} <- ReduceFuns],
fun
@@ -1093,20 +1132,21 @@ make_reduce_fun(Lang, ReduceFuns) ->
ExtAcc = ExtAcc0 + get_external_size_reds(Red),
{CountsAcc, URedsAcc, ExtAcc}
end,
- {Counts, UReds, ExternalSize} = lists:foldl(ExtractFun,
- {0, [], 0}, Reds),
+ {Counts, UReds, ExternalSize} = lists:foldl(
+ ExtractFun,
+ {0, [], 0},
+ Reds
+ ),
{ok, Result} = couch_query_servers:rereduce(Lang, FunSrcs, UReds),
{Counts, Result, ExternalSize}
end.
-
maybe_define_less_fun(#mrview{options = Options}) ->
case couch_util:get_value(<<"collation">>, Options) of
<<"raw">> -> undefined;
_ -> fun couch_ejson_compare:less_json_ids/2
end.
-
count_reduce(reduce, KVs) ->
CountFun = fun
({_, {dups, Vals}}, Acc) -> Acc + length(Vals);
@@ -1121,7 +1161,6 @@ count_reduce(rereduce, Reds) ->
Count = lists:foldl(CountFun, 0, Reds),
{Count, []}.
-
make_user_reds_reduce_fun(Lang, ReduceFuns, NthRed) ->
LPad = lists:duplicate(NthRed - 1, []),
RPad = lists:duplicate(length(ReduceFuns) - NthRed, []),
@@ -1140,18 +1179,15 @@ make_user_reds_reduce_fun(Lang, ReduceFuns, NthRed) ->
{0, LPad ++ Result ++ RPad}
end.
-
get_btree_state(nil) ->
nil;
get_btree_state(#btree{} = Btree) ->
couch_btree:get_state(Btree).
-
-extract_view_reduce({red, {N, _Lang, #mrview{reduce_funs=Reds}}, _Ref}) ->
+extract_view_reduce({red, {N, _Lang, #mrview{reduce_funs = Reds}}, _Ref}) ->
{_Name, FunSrc} = lists:nth(N, Reds),
FunSrc.
-
get_view_keys({Props}) ->
case couch_util:get_value(<<"keys">>, Props) of
undefined ->
@@ -1162,7 +1198,6 @@ get_view_keys({Props}) ->
throw({bad_request, "`keys` member must be an array."})
end.
-
get_view_queries({Props}) ->
case couch_util:get_value(<<"queries">>, Props) of
undefined ->
@@ -1173,8 +1208,11 @@ get_view_queries({Props}) ->
throw({bad_request, "`queries` member must be an array."})
end.
-
kv_external_size(KVList, Reduction) ->
- lists:foldl(fun([[Key, _], Value], Acc) ->
- ?term_size(Key) + ?term_size(Value) + Acc
- end, ?term_size(Reduction), KVList).
+ lists:foldl(
+ fun([[Key, _], Value], Acc) ->
+ ?term_size(Key) + ?term_size(Value) + Acc
+ end,
+ ?term_size(Reduction),
+ KVList
+ ).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl
index bf8eb7e5b..1a81d4f0a 100644
--- a/src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_all_docs_tests.erl
@@ -17,8 +17,6 @@
-define(TIMEOUT, 1000).
-
-
setup() ->
{ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
Db.
@@ -28,16 +26,17 @@ teardown(Db) ->
couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
ok.
-
all_docs_test_() ->
{
"_all_docs view tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_query/1,
fun should_query_with_range/1,
@@ -50,47 +49,50 @@ all_docs_test_() ->
}
}.
-
should_query(Db) ->
Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 11}, {offset, 0}]},
- mk_row(<<"1">>, <<"1-08d53a5760b95fce6df2e2c5b008be39">>),
- mk_row(<<"10">>, <<"1-a05b6ea2bc0243949f103d5b4f15f71e">>),
- mk_row(<<"2">>, <<"1-b57c77a9e6f7574ca6469f0d6dcd78bb">>),
- mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>),
- mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
- mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
- mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>),
- mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>),
- mk_row(<<"8">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>),
- mk_row(<<"9">>, <<"1-558c8487d9aee25399a91b5d31d90fe2">>),
- mk_row(<<"_design/bar">>, <<"1-a44e1dd1994a7717bf89c894ebd1f081">>)
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 11}, {offset, 0}]},
+ mk_row(<<"1">>, <<"1-08d53a5760b95fce6df2e2c5b008be39">>),
+ mk_row(<<"10">>, <<"1-a05b6ea2bc0243949f103d5b4f15f71e">>),
+ mk_row(<<"2">>, <<"1-b57c77a9e6f7574ca6469f0d6dcd78bb">>),
+ mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>),
+ mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
+ mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
+ mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>),
+ mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>),
+ mk_row(<<"8">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>),
+ mk_row(<<"9">>, <<"1-558c8487d9aee25399a91b5d31d90fe2">>),
+ mk_row(<<"_design/bar">>, <<"1-a44e1dd1994a7717bf89c894ebd1f081">>)
+ ]},
?_assertEqual(Expect, Result).
should_query_with_range(Db) ->
Result = run_query(Db, [{start_key, <<"3">>}, {end_key, <<"5">>}]),
- Expect = {ok, [
- {meta, [{total, 11}, {offset, 3}]},
- mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>),
- mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
- mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>)
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 11}, {offset, 3}]},
+ mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>),
+ mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
+ mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>)
+ ]},
?_assertEqual(Expect, Result).
should_query_with_range_rev(Db) ->
Result = run_query(Db, [
{direction, rev},
- {start_key, <<"5">>}, {end_key, <<"3">>},
+ {start_key, <<"5">>},
+ {end_key, <<"3">>},
{inclusive_end, true}
]),
- Expect = {ok, [
- {meta, [{total, 11}, {offset, 5}]},
- mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
- mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
- mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>)
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 11}, {offset, 5}]},
+ mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
+ mk_row(<<"4">>, <<"1-fcaf5852c08ffb239ac8ce16c409f253">>),
+ mk_row(<<"3">>, <<"1-7fbf84d56f8017880974402d60f5acd6">>)
+ ]},
?_assertEqual(Expect, Result).
should_query_with_limit_and_skip(Db) ->
@@ -99,12 +101,13 @@ should_query_with_limit_and_skip(Db) ->
{limit, 3},
{skip, 3}
]),
- Expect = {ok, [
- {meta, [{total, 11}, {offset, 5}]},
- mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
- mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>),
- mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>)
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 11}, {offset, 5}]},
+ mk_row(<<"5">>, <<"1-aaac5d460fd40f9286e57b9bf12e23d2">>),
+ mk_row(<<"6">>, <<"1-aca21c2e7bc5f8951424fcfc5d1209d8">>),
+ mk_row(<<"7">>, <<"1-4374aeec17590d82f16e70f318116ad9">>)
+ ]},
?_assertEqual(Expect, Result).
should_query_with_include_docs(Db) ->
@@ -113,26 +116,28 @@ should_query_with_include_docs(Db) ->
{end_key, <<"8">>},
{include_docs, true}
]),
- Doc = {[
- {<<"_id">>,<<"8">>},
- {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
- {<<"val">>, 8}
- ]},
+ Doc =
+ {[
+ {<<"_id">>, <<"8">>},
+ {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
+ {<<"val">>, 8}
+ ]},
Val = {[{rev, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>}]},
- Expect = {ok, [
- {meta, [{total, 11}, {offset, 8}]},
- {row, [{id, <<"8">>}, {key, <<"8">>}, {value, Val}, {doc, Doc}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 11}, {offset, 8}]},
+ {row, [{id, <<"8">>}, {key, <<"8">>}, {value, Val}, {doc, Doc}]}
+ ]},
?_assertEqual(Expect, Result).
should_query_empty_views(Db) ->
Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>),
- Expect = {ok, [
- {meta, [{total, 0}, {offset, 0}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 0}, {offset, 0}]}
+ ]},
?_assertEqual(Expect, Result).
-
mk_row(Id, Rev) ->
{row, [{id, Id}, {key, Id}, {value, {[{rev, Rev}]}}]}.
diff --git a/src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl
index 0d2afbe3c..c00b97b33 100644
--- a/src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_collation_tests.erl
@@ -56,10 +56,8 @@
% Values with depth > 10 trigger the erlang collation fallback in couch_ejson_compare
{[{<<"x">>, [[[[[[[[[[[<<"y">>]]]]]]]]]]]}]}
-
]).
-
setup() ->
{ok, Db1} = couch_mrview_test_util:new_db(?tempdb(), map),
Docs = [couch_mrview_test_util:ddoc(red) | make_docs()],
@@ -71,16 +69,17 @@ teardown(Db) ->
couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
ok.
-
collation_test_() ->
{
"Collation tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_collate_fwd/1,
fun should_collate_rev/1,
@@ -96,7 +95,6 @@ collation_test_() ->
}
}.
-
should_collate_fwd(Db) ->
{ok, Results} = run_query(Db, []),
Expect = [{meta, [{total, length(?VALUES)}, {offset, 0}]}] ++ rows(),
@@ -108,100 +106,127 @@ should_collate_rev(Db) ->
?_assertEquiv(Expect, Results).
should_collate_range_(Db) ->
- Index = lists:zip(lists:seq(0, length(?VALUES)-1), ?VALUES),
- lists:map(fun(V) ->
- {ok, Results} = run_query(Db, [{start_key, V}, {end_key, V}]),
- Expect = [
- {meta, [{total, length(?VALUES)}, find_offset(Index, V)]} |
- find_matching_rows(Index, V)
- ],
- ?_assertEquiv(Expect, Results)
- end, ?VALUES).
+ Index = lists:zip(lists:seq(0, length(?VALUES) - 1), ?VALUES),
+ lists:map(
+ fun(V) ->
+ {ok, Results} = run_query(Db, [{start_key, V}, {end_key, V}]),
+ Expect = [
+ {meta, [{total, length(?VALUES)}, find_offset(Index, V)]}
+ | find_matching_rows(Index, V)
+ ],
+ ?_assertEquiv(Expect, Results)
+ end,
+ ?VALUES
+ ).
find_offset(Index, Value) ->
- [{Offset, _} | _] = lists:dropwhile(fun({_, V}) ->
- couch_ejson_compare:less(Value, V) =/= 0
- end, Index),
+ [{Offset, _} | _] = lists:dropwhile(
+ fun({_, V}) ->
+ couch_ejson_compare:less(Value, V) =/= 0
+ end,
+ Index
+ ),
{offset, Offset}.
find_matching_rows(Index, Value) ->
- Matches = lists:filter(fun({_, V}) ->
- couch_ejson_compare:less(Value, V) =:= 0
- end, Index),
- lists:map(fun({Id, V}) ->
- {row, [{id, list_to_binary(integer_to_list(Id))}, {key, V}, {value, 0}]}
- end, Matches).
+ Matches = lists:filter(
+ fun({_, V}) ->
+ couch_ejson_compare:less(Value, V) =:= 0
+ end,
+ Index
+ ),
+ lists:map(
+ fun({Id, V}) ->
+ {row, [{id, list_to_binary(integer_to_list(Id))}, {key, V}, {value, 0}]}
+ end,
+ Matches
+ ).
should_collate_with_inclusive_end_fwd(Db) ->
Opts = [{end_key, <<"b">>}, {inclusive_end, true}],
{ok, Rows0} = run_query(Db, Opts),
LastRow = lists:last(Rows0),
- Expect = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
+ Expect = {row, [{id, <<"10">>}, {key, <<"b">>}, {value, 0}]},
?_assertEqual(Expect, LastRow).
should_collate_with_inclusive_end_rev(Db) ->
Opts = [{end_key, <<"b">>}, {inclusive_end, true}, {direction, rev}],
{ok, Rows} = run_query(Db, Opts),
LastRow = lists:last(Rows),
- Expect = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
+ Expect = {row, [{id, <<"10">>}, {key, <<"b">>}, {value, 0}]},
?_assertEqual(Expect, LastRow).
should_collate_without_inclusive_end_fwd(Db) ->
Opts = [{end_key, <<"b">>}, {inclusive_end, false}],
{ok, Rows0} = run_query(Db, Opts),
LastRow = lists:last(Rows0),
- Expect = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]},
+ Expect = {row, [{id, <<"9">>}, {key, <<"aa">>}, {value, 0}]},
?_assertEqual(Expect, LastRow).
should_collate_without_inclusive_end_rev(Db) ->
Opts = [{end_key, <<"b">>}, {inclusive_end, false}, {direction, rev}],
{ok, Rows} = run_query(Db, Opts),
LastRow = lists:last(Rows),
- Expect = {row, [{id,<<"11">>}, {key,<<"B">>}, {value,0}]},
+ Expect = {row, [{id, <<"11">>}, {key, <<"B">>}, {value, 0}]},
?_assertEqual(Expect, LastRow).
should_collate_with_endkey_docid(Db) ->
?_test(begin
{ok, Rows0} = run_query(Db, [
- {end_key, <<"b">>}, {end_key_docid, <<"10">>},
+ {end_key, <<"b">>},
+ {end_key_docid, <<"10">>},
{inclusive_end, false}
]),
Result0 = lists:last(Rows0),
- Expect0 = {row, [{id,<<"9">>}, {key,<<"aa">>}, {value,0}]},
+ Expect0 = {row, [{id, <<"9">>}, {key, <<"aa">>}, {value, 0}]},
?assertEqual(Expect0, Result0),
{ok, Rows1} = run_query(Db, [
- {end_key, <<"b">>}, {end_key_docid, <<"11">>},
+ {end_key, <<"b">>},
+ {end_key_docid, <<"11">>},
{inclusive_end, false}
]),
Result1 = lists:last(Rows1),
- Expect1 = {row, [{id,<<"10">>}, {key,<<"b">>}, {value,0}]},
+ Expect1 = {row, [{id, <<"10">>}, {key, <<"b">>}, {value, 0}]},
?assertEqual(Expect1, Result1)
end).
should_use_collator_for_reduce_grouping(Db) ->
- UniqueKeys = lists:usort(fun(A, B) ->
- not couch_ejson_compare:less_json(B, A)
- end, ?VALUES),
- {ok, [{meta,_} | Rows]} = reduce_query(Db, [{group_level, exact}]),
+ UniqueKeys = lists:usort(
+ fun(A, B) ->
+ not couch_ejson_compare:less_json(B, A)
+ end,
+ ?VALUES
+ ),
+ {ok, [{meta, _} | Rows]} = reduce_query(Db, [{group_level, exact}]),
?_assertEqual(length(UniqueKeys), length(Rows)).
make_docs() ->
- {Docs, _} = lists:foldl(fun(V, {Docs0, Count}) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, list_to_binary(integer_to_list(Count))},
- {<<"foo">>, V}
- ]}),
- {[Doc | Docs0], Count+1}
- end, {[], 0}, ?VALUES),
+ {Docs, _} = lists:foldl(
+ fun(V, {Docs0, Count}) ->
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, list_to_binary(integer_to_list(Count))},
+ {<<"foo">>, V}
+ ]}
+ ),
+ {[Doc | Docs0], Count + 1}
+ end,
+ {[], 0},
+ ?VALUES
+ ),
Docs.
rows() ->
- {Rows, _} = lists:foldl(fun(V, {Rows0, Count}) ->
- Id = list_to_binary(integer_to_list(Count)),
- Row = {row, [{id, Id}, {key, V}, {value, 0}]},
- {[Row | Rows0], Count+1}
- end, {[], 0}, ?VALUES),
+ {Rows, _} = lists:foldl(
+ fun(V, {Rows0, Count}) ->
+ Id = list_to_binary(integer_to_list(Count)),
+ Row = {row, [{id, Id}, {key, V}, {value, 0}]},
+ {[Row | Rows0], Count + 1}
+ end,
+ {[], 0},
+ ?VALUES
+ ),
lists:reverse(Rows).
run_query(Db, Opts) ->
diff --git a/src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl
index 7664becdc..df035c649 100644
--- a/src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_compact_tests.erl
@@ -17,7 +17,6 @@
-define(TIMEOUT, 1000).
-
setup() ->
{ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map, 1000),
ok = meck:new(couch_mrview_compactor, [passthrough]),
@@ -29,16 +28,17 @@ teardown(Db) ->
couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
ok.
-
compaction_test_() ->
{
"Compaction tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_swap/1,
fun should_remove/1
@@ -47,7 +47,6 @@ compaction_test_() ->
}
}.
-
should_swap(Db) ->
?_test(begin
couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
@@ -57,9 +56,12 @@ should_swap(Db) ->
{'DOWN', MonRef, process, _, _} -> ok
after ?TIMEOUT ->
erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, ?LINE},
- {reason, "compaction failed"}]})
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, "compaction failed"}
+ ]}
+ )
end,
QPid ! {self(), continue},
receive
@@ -67,13 +69,15 @@ should_swap(Db) ->
?assertEqual(1000, Count)
after ?TIMEOUT ->
erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, ?LINE},
- {reason, "query failed"}]})
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, "query failed"}
+ ]}
+ )
end
end).
-
should_remove(Db) ->
?_test(begin
DDoc = <<"_design/bar">>,
@@ -87,29 +91,40 @@ should_remove(Db) ->
receive
{'DOWN', MonRef, process, _, crash} ->
meck:wait(couch_mrview_compactor, remove_compacted, '_', 100),
- ?assertEqual(1, meck:num_calls(
- couch_mrview_compactor, remove_compacted, '_', IndexPid)),
+ ?assertEqual(
+ 1,
+ meck:num_calls(
+ couch_mrview_compactor, remove_compacted, '_', IndexPid
+ )
+ ),
?assert(is_process_alive(IndexPid)),
?assert(is_process_alive(CompactorPid))
after ?TIMEOUT ->
erlang:error(
{assertion_failed, [
- {module, ?MODULE}, {line, ?LINE},
- {reason, "compaction didn't exit :/"}]})
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, "compaction didn't exit :/"}
+ ]}
+ )
end
end).
-
start_query(Db) ->
Self = self(),
Pid = spawn(fun() ->
CB = fun
- (_, wait) -> receive {Self, continue} -> {ok, 0} end;
- ({row, _}, Count) -> {ok, Count+1};
- (_, Count) -> {ok, Count}
+ (_, wait) ->
+ receive
+ {Self, continue} -> {ok, 0}
+ end;
+ ({row, _}, Count) ->
+ {ok, Count + 1};
+ (_, Count) ->
+ {ok, Count}
end,
{ok, Result} =
- couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, [], CB, wait),
+ couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, [], CB, wait),
Self ! {self(), Result}
end),
{ok, Pid}.
diff --git a/src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl
index 4310157eb..2a6299448 100644
--- a/src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_ddoc_updated_tests.erl
@@ -17,30 +17,33 @@
-define(TIMEOUT, 1000).
-
setup() ->
Name = ?tempdb(),
couch_server:delete(Name, [?ADMIN_CTX]),
{ok, Db} = couch_db:create(Name, [?ADMIN_CTX]),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/bar">>},
- {<<"views">>, {[
- {<<"baz">>, {[
- {<<"map">>, <<
- "function(doc) {\n"
- " emit(doc.val, doc.val);\n"
- "}"
- >>}
- ]}}
- ]}}
- ]}),
+ DDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/bar">>},
+ {<<"views">>,
+ {[
+ {<<"baz">>,
+ {[
+ {<<"map">>, <<
+ "function(doc) {\n"
+ " emit(doc.val, doc.val);\n"
+ "}"
+ >>}
+ ]}}
+ ]}}
+ ]}
+ ),
[Doc1 | Docs999] = couch_mrview_test_util:make_docs(map, 100),
{ok, _} = couch_db:update_docs(Db, [DDoc, Doc1], []),
{ok, Db2} = couch_db:reopen(Db),
% run a query with 1 doc to initialize couch_index process
CB = fun
- ({row, _}, Count) -> {ok, Count+1};
+ ({row, _}, Count) -> {ok, Count + 1};
(_, Count) -> {ok, Count}
end,
{ok, _} =
@@ -63,16 +66,17 @@ teardown(Db) ->
couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
ok.
-
ddoc_update_test_() ->
{
"Check ddoc update actions",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun check_indexing_stops_on_ddoc_change/1
]
@@ -80,7 +84,6 @@ ddoc_update_test_() ->
}
}.
-
check_indexing_stops_on_ddoc_change(Db) ->
?_test(begin
DDocID = <<"_design/bar">>,
@@ -91,17 +94,20 @@ check_indexing_stops_on_ddoc_change(Db) ->
?assertEqual(1, length(AliveBefore)),
{ok, DDoc} = couch_db:open_doc(Db, DDocID, [ejson_body, ?ADMIN_CTX]),
- DDocJson2 = couch_doc:from_json_obj({[
- {<<"_id">>, DDocID},
- {<<"_deleted">>, true},
- {<<"_rev">>, couch_doc:rev_to_str(DDoc#doc.revs)}
- ]}),
+ DDocJson2 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DDocID},
+ {<<"_deleted">>, true},
+ {<<"_rev">>, couch_doc:rev_to_str(DDoc#doc.revs)}
+ ]}
+ ),
% spawn a process for query
Self = self(),
QPid = spawn(fun() ->
{ok, Result} = couch_mrview:query_view(
- Db, <<"_design/bar">>, <<"baz">>, []),
+ Db, <<"_design/bar">>, <<"baz">>, []
+ ),
Self ! {self(), Result}
end),
@@ -112,8 +118,12 @@ check_indexing_stops_on_ddoc_change(Db) ->
?assertEqual(Msg, ddoc_updated)
after ?TIMEOUT ->
erlang:error(
- {assertion_failed, [{module, ?MODULE}, {line, ?LINE},
- {reason, "test failed"}]})
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, "test failed"}
+ ]}
+ )
end,
%% assert that previously running indexes are gone
@@ -123,11 +133,11 @@ check_indexing_stops_on_ddoc_change(Db) ->
?assertEqual(0, length(AliveAfter))
end).
-
get_indexes_by_ddoc(DDocID, N) ->
Indexes = test_util:wait(fun() ->
Indxs = ets:match_object(
- couchdb_indexes_by_db, {'$1', {DDocID, '$2'}}),
+ couchdb_indexes_by_db, {'$1', {DDocID, '$2'}}
+ ),
case length(Indxs) == N of
true ->
Indxs;
@@ -135,11 +145,13 @@ get_indexes_by_ddoc(DDocID, N) ->
wait
end
end),
- lists:foldl(fun({DbName, {_DDocID, Sig}}, Acc) ->
- case ets:lookup(couchdb_indexes_by_sig, {DbName, Sig}) of
- [{_, Pid}] -> [Pid|Acc];
- _ -> Acc
- end
- end, [], Indexes).
-
-
+ lists:foldl(
+ fun({DbName, {_DDocID, Sig}}, Acc) ->
+ case ets:lookup(couchdb_indexes_by_sig, {DbName, Sig}) of
+ [{_, Pid}] -> [Pid | Acc];
+ _ -> Acc
+ end
+ end,
+ [],
+ Indexes
+ ).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl
index ce2be8904..3e4cbc84f 100644
--- a/src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_ddoc_validation_tests.erl
@@ -31,10 +31,12 @@ ddoc_validation_test_() ->
"ddoc validation tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_reject_invalid_js_map/1,
fun should_reject_invalid_js_reduce/1,
@@ -79,344 +81,477 @@ ddoc_validation_test_() ->
}.
should_reject_invalid_js_map(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_invalid_js_map">>},
- {<<"views">>, {[
- {<<"foo">>, {[
- {<<"map">>, <<"function(doc) }{">>}
- ]}}
- ]}}
- ]}),
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_invalid_js_map">>},
+ {<<"views">>,
+ {[
+ {<<"foo">>,
+ {[
+ {<<"map">>, <<"function(doc) }{">>}
+ ]}}
+ ]}}
+ ]}
+ ),
?_assertThrow(
{bad_request, compilation_error, _},
- couch_db:update_doc(Db, Doc, [])).
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_reject_invalid_js_reduce(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_invalid_js_reduce">>},
- {<<"views">>, {[
- {<<"foo">>, {[
- {<<"map">>, <<"function(doc) { emit(null); }">>},
- {<<"reduce">>, <<"function(k, v, r) }{}">>}
- ]}}
- ]}}
- ]}),
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_invalid_js_reduce">>},
+ {<<"views">>,
+ {[
+ {<<"foo">>,
+ {[
+ {<<"map">>, <<"function(doc) { emit(null); }">>},
+ {<<"reduce">>, <<"function(k, v, r) }{}">>}
+ ]}}
+ ]}}
+ ]}
+ ),
?_assertThrow(
{bad_request, compilation_error, _},
- couch_db:update_doc(Db, Doc, [])).
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_reject_invalid_builtin_reduce(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_invalid_builtin_reduce">>},
- {<<"views">>, {[
- {<<"foo">>, {[
- {<<"map">>, <<"function(doc) { emit(null); }">>},
- {<<"reduce">>, <<"_foobar">>}
- ]}}
- ]}}
- ]}),
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_invalid_builtin_reduce">>},
+ {<<"views">>,
+ {[
+ {<<"foo">>,
+ {[
+ {<<"map">>, <<"function(doc) { emit(null); }">>},
+ {<<"reduce">>, <<"_foobar">>}
+ ]}}
+ ]}}
+ ]}
+ ),
?_assertThrow(
{bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_reject_non_object_options(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_object_options">>},
- {<<"options">>, <<"invalid">>}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_non_object_options">>},
+ {<<"options">>, <<"invalid">>}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_reject_non_object_filters(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_object_filters">>},
- {<<"filters">>, <<"invalid">>}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_non_object_filters">>},
+ {<<"filters">>, <<"invalid">>}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_accept_obj_in_filters(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_obj_in_filters">>},
- {<<"filters">>, ?LIB}
- ]}),
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_obj_in_filters">>},
+ {<<"filters">>, ?LIB}
+ ]}
+ ),
?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
should_reject_non_object_lists(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_object_lists">>},
- {<<"lists">>, <<"invalid">>}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_non_object_lists">>},
+ {<<"lists">>, <<"invalid">>}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_reject_non_object_shows(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_object_shows">>},
- {<<"shows">>, <<"invalid">>}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_non_object_shows">>},
+ {<<"shows">>, <<"invalid">>}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_accept_obj_in_shows(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_obj_in_shows">>},
- {<<"shows">>, ?LIB}
- ]}),
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_obj_in_shows">>},
+ {<<"shows">>, ?LIB}
+ ]}
+ ),
?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
should_reject_non_object_updates(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_object_updates">>},
- {<<"updates">>, <<"invalid">>}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_non_object_updates">>},
+ {<<"updates">>, <<"invalid">>}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_accept_obj_in_updates(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_obj_in_updates">>},
- {<<"updates">>, ?LIB}
- ]}),
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_obj_in_updates">>},
+ {<<"updates">>, ?LIB}
+ ]}
+ ),
?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
should_reject_non_object_views(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_object_views">>},
- {<<"views">>, <<"invalid">>}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_non_object_views">>},
+ {<<"views">>, <<"invalid">>}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_reject_non_string_language(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_string_language">>},
- {<<"language">>, 1}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_non_string_language">>},
+ {<<"language">>, 1}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_reject_non_string_validate_doc_update(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_string_vdu">>},
- {<<"validate_doc_update">>, 1}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_non_string_vdu">>},
+ {<<"validate_doc_update">>, 1}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_accept_string_rewrites(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_array_rewrites">>},
- {<<"rewrites">>, <<"function(req){}">>}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_non_array_rewrites">>},
+ {<<"rewrites">>, <<"function(req){}">>}
+ ]}
+ ),
+ ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
should_reject_bad_rewrites(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_array_rewrites">>},
- {<<"rewrites">>, 42}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_non_array_rewrites">>},
+ {<<"rewrites">>, 42}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_accept_option(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_options">>},
- {<<"options">>, {[ {<<"option1">>, <<"function(doc,req){}">>} ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_options">>},
+ {<<"options">>, {[{<<"option1">>, <<"function(doc,req){}">>}]}}
+ ]}
+ ),
+ ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
should_accept_any_option(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_any_option">>},
- {<<"options">>, {[ {<<"option1">>, true} ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_any_option">>},
+ {<<"options">>, {[{<<"option1">>, true}]}}
+ ]}
+ ),
+ ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
should_accept_filter(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_filters">>},
- {<<"filters">>, {[ {<<"filter1">>, <<"function(doc,req){}">>} ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_filters">>},
+ {<<"filters">>, {[{<<"filter1">>, <<"function(doc,req){}">>}]}}
+ ]}
+ ),
+ ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
should_reject_non_string_or_obj_filter_function(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_string_or_obj_filter_function">>},
- {<<"filters">>, {[ {<<"filter1">>, 1} ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_non_string_or_obj_filter_function">>},
+ {<<"filters">>, {[{<<"filter1">>, 1}]}}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_accept_list(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_lists">>},
- {<<"lists">>, {[ {<<"list1">>, <<"function(doc,req){}">>} ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_lists">>},
+ {<<"lists">>, {[{<<"list1">>, <<"function(doc,req){}">>}]}}
+ ]}
+ ),
+ ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
should_reject_non_string_or_obj_list_function(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_string_or_obj_list_function">>},
- {<<"lists">>, {[ {<<"list1">>, 1} ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_non_string_or_obj_list_function">>},
+ {<<"lists">>, {[{<<"list1">>, 1}]}}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_accept_obj_in_lists(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_obj_in_lists">>},
- {<<"lists">>, ?LIB}
- ]}),
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_obj_in_lists">>},
+ {<<"lists">>, ?LIB}
+ ]}
+ ),
?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
-
should_accept_show(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_shows">>},
- {<<"shows">>, {[ {<<"show1">>, <<"function(doc,req){}">>} ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_shows">>},
+ {<<"shows">>, {[{<<"show1">>, <<"function(doc,req){}">>}]}}
+ ]}
+ ),
+ ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
should_reject_non_string_or_obj_show_function(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_string_or_obj_show_function">>},
- {<<"shows">>, {[ {<<"show1">>, 1} ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_non_string_or_obj_show_function">>},
+ {<<"shows">>, {[{<<"show1">>, 1}]}}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_accept_update(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_updates">>},
- {<<"updates">>, {[ {<<"update1">>, <<"function(doc,req){}">>} ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_updates">>},
+ {<<"updates">>, {[{<<"update1">>, <<"function(doc,req){}">>}]}}
+ ]}
+ ),
+ ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
should_reject_non_string_or_obj_update_function(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_string_or_obj_update_function">>},
- {<<"updates">>, {[ {<<"update1">>, 1} ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_non_string_or_obj_update_function">>},
+ {<<"updates">>, {[{<<"update1">>, 1}]}}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_accept_view(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_view">>},
- {<<"views">>, {[
- {<<"view1">>, {[{<<"map">>, <<"function(d){}">>}]}}
- ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_view">>},
+ {<<"views">>,
+ {[
+ {<<"view1">>, {[{<<"map">>, <<"function(d){}">>}]}}
+ ]}}
+ ]}
+ ),
+ ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
should_accept_view_with_reduce(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_view_with_reduce">>},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, <<"function(d){}">>},
- {<<"reduce">>,<<"function(d){}">>}
- ]}}
- ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_view_with_reduce">>},
+ {<<"views">>,
+ {[
+ {<<"view1">>,
+ {[
+ {<<"map">>, <<"function(d){}">>},
+ {<<"reduce">>, <<"function(d){}">>}
+ ]}}
+ ]}}
+ ]}
+ ),
+ ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
should_accept_view_with_lib(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_view_with_lib">>},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, <<"function(d){}">>}
- ]}},
- {<<"lib">>, {[
- {<<"lib1">>, <<"x=42">>}
- ]}}
- ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_view_with_lib">>},
+ {<<"views">>,
+ {[
+ {<<"view1">>,
+ {[
+ {<<"map">>, <<"function(d){}">>}
+ ]}},
+ {<<"lib">>,
+ {[
+ {<<"lib1">>, <<"x=42">>}
+ ]}}
+ ]}}
+ ]}
+ ),
+ ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
should_reject_view_that_is_not_an_object(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_non_object_view">>},
- {<<"views">>, {[{<<"view1">>, <<"thisisbad">>}]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_non_object_view">>},
+ {<<"views">>, {[{<<"view1">>, <<"thisisbad">>}]}}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_reject_view_without_map_function(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_view_without_map">>},
- {<<"views">>, {[
- {<<"view1">>, {[]}}
- ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
-
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_view_without_map">>},
+ {<<"views">>,
+ {[
+ {<<"view1">>, {[]}}
+ ]}}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_reject_view_with_non_string_map_function(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_view_with_nonstr_map">>},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>,{[]}}
- ]}}
- ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_view_with_nonstr_map">>},
+ {<<"views">>,
+ {[
+ {<<"view1">>,
+ {[
+ {<<"map">>, {[]}}
+ ]}}
+ ]}}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_reject_view_with_non_string_reduce_function(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_view_with_nonstr_reduce">>},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>,<<"function(d){}">>},
- {<<"reduce">>,1}
- ]}}
- ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_view_with_nonstr_reduce">>},
+ {<<"views">>,
+ {[
+ {<<"view1">>,
+ {[
+ {<<"map">>, <<"function(d){}">>},
+ {<<"reduce">>, 1}
+ ]}}
+ ]}}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
should_accept_any_in_lib(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_any_in_lib">>},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, <<"function(d){}">>}
- ]}},
- {<<"lib">>, {[{<<"lib1">>, {[]}}]}}
- ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
-
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_any_in_lib">>},
+ {<<"views">>,
+ {[
+ {<<"view1">>,
+ {[
+ {<<"map">>, <<"function(d){}">>}
+ ]}},
+ {<<"lib">>, {[{<<"lib1">>, {[]}}]}}
+ ]}}
+ ]}
+ ),
+ ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
should_accept_map_object_for_queries(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_accept_map_objects_for_queries">>},
- {<<"language">>, <<"query">>},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, {[
- {<<"x">>, <<"y">>}
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_accept_map_objects_for_queries">>},
+ {<<"language">>, <<"query">>},
+ {<<"views">>,
+ {[
+ {<<"view1">>,
+ {[
+ {<<"map">>,
+ {[
+ {<<"x">>, <<"y">>}
+ ]}}
+ ]}}
]}}
- ]}}
- ]}}
- ]}),
- ?_assertMatch({ok,_}, couch_db:update_doc(Db, Doc, [])).
-
+ ]}
+ ),
+ ?_assertMatch({ok, _}, couch_db:update_doc(Db, Doc, [])).
should_reject_map_non_objects_for_queries(Db) ->
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/should_reject_map_non_objects__with_nonstr_reduce">>},
- {<<"language">>, <<"query">>},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, <<"function(d){}">>}
- ]}}
- ]}}
- ]}),
- ?_assertThrow({bad_request, invalid_design_doc, _},
- couch_db:update_doc(Db, Doc, [])).
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/should_reject_map_non_objects__with_nonstr_reduce">>},
+ {<<"language">>, <<"query">>},
+ {<<"views">>,
+ {[
+ {<<"view1">>,
+ {[
+ {<<"map">>, <<"function(d){}">>}
+ ]}}
+ ]}}
+ ]}
+ ),
+ ?_assertThrow(
+ {bad_request, invalid_design_doc, _},
+ couch_db:update_doc(Db, Doc, [])
+ ).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl
index aedd42865..b1de2839d 100644
--- a/src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_design_docs_tests.erl
@@ -17,8 +17,6 @@
-define(TIMEOUT, 1000).
-
-
setup() ->
{ok, Db} = couch_mrview_test_util:init_db(?tempdb(), design),
Db.
@@ -28,16 +26,17 @@ teardown(Db) ->
couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
ok.
-
design_docs_test_() ->
{
"_design_docs view tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_query/1,
fun should_query_with_range/1,
@@ -49,22 +48,22 @@ design_docs_test_() ->
}
}.
-
should_query(Db) ->
Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 10}]},
- mk_row(<<"_design/bar01">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar02">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar06">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar07">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar08">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar09">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar10">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 10}, {offset, 10}]},
+ mk_row(<<"_design/bar01">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
+ mk_row(<<"_design/bar02">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
+ mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
+ mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
+ mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
+ mk_row(<<"_design/bar06">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
+ mk_row(<<"_design/bar07">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
+ mk_row(<<"_design/bar08">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
+ mk_row(<<"_design/bar09">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
+ mk_row(<<"_design/bar10">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
+ ]},
?_assertEqual(Expect, Result).
should_query_with_range(Db) ->
@@ -72,26 +71,29 @@ should_query_with_range(Db) ->
{start_key, <<"_design/bar03">>},
{end_key, <<"_design/bar05">>}
]),
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 12}]},
- mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 10}, {offset, 12}]},
+ mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
+ mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
+ mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
+ ]},
?_assertEqual(Expect, Result).
should_query_with_range_rev(Db) ->
Result = run_query(Db, [
{direction, rev},
- {start_key, <<"_design/bar05">>}, {end_key, <<"_design/bar03">>},
+ {start_key, <<"_design/bar05">>},
+ {end_key, <<"_design/bar03">>},
{inclusive_end, true}
]),
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 5}]},
- mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 10}, {offset, 5}]},
+ mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
+ mk_row(<<"_design/bar04">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
+ mk_row(<<"_design/bar03">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
+ ]},
?_assertEqual(Expect, Result).
should_query_with_limit_and_skip(Db) ->
@@ -100,12 +102,13 @@ should_query_with_limit_and_skip(Db) ->
{limit, 3},
{skip, 3}
]),
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 14}]},
- mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar06">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
- mk_row(<<"_design/bar07">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 10}, {offset, 14}]},
+ mk_row(<<"_design/bar05">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
+ mk_row(<<"_design/bar06">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>),
+ mk_row(<<"_design/bar07">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>)
+ ]},
?_assertEqual(Expect, Result).
should_query_with_include_docs(Db) ->
@@ -114,20 +117,25 @@ should_query_with_include_docs(Db) ->
{end_key, <<"_design/bar08">>},
{include_docs, true}
]),
- Doc = {[
- {<<"_id">>,<<"_design/bar08">>},
- {<<"_rev">>,<<"1-0b24e44a44af45e51e562fd124ce3007">>},
- {<<"views">>,{[]}}
- ]},
+ Doc =
+ {[
+ {<<"_id">>, <<"_design/bar08">>},
+ {<<"_rev">>, <<"1-0b24e44a44af45e51e562fd124ce3007">>},
+ {<<"views">>, {[]}}
+ ]},
Val = {[{rev, <<"1-0b24e44a44af45e51e562fd124ce3007">>}]},
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 17}]},
- {row, [{id, <<"_design/bar08">>}, {key, <<"_design/bar08">>},
- {value, Val}, {doc, Doc}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 10}, {offset, 17}]},
+ {row, [
+ {id, <<"_design/bar08">>},
+ {key, <<"_design/bar08">>},
+ {value, Val},
+ {doc, Doc}
+ ]}
+ ]},
?_assertEqual(Expect, Result).
-
mk_row(Id, Rev) ->
{row, [{id, Id}, {key, Id}, {value, {[{rev, Rev}]}}]}.
diff --git a/src/couch_mrview/test/eunit/couch_mrview_http_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_http_tests.erl
index bd11c7ad8..bfa4965a4 100644
--- a/src/couch_mrview/test/eunit/couch_mrview_http_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_http_tests.erl
@@ -15,14 +15,23 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-
mrview_http_test_() ->
[
- ?_assertEqual(#mrargs{group_level=undefined, group=true},
- couch_mrview_http:parse_params([{"group", "true"}],
- undefined, #mrargs{})),
+ ?_assertEqual(
+ #mrargs{group_level = undefined, group = true},
+ couch_mrview_http:parse_params(
+ [{"group", "true"}],
+ undefined,
+ #mrargs{}
+ )
+ ),
- ?_assertEqual(#mrargs{group_level=1, group=undefined},
- couch_mrview_http:parse_params([{"group_level", "1"}],
- undefined, #mrargs{}))
+ ?_assertEqual(
+ #mrargs{group_level = 1, group = undefined},
+ couch_mrview_http:parse_params(
+ [{"group_level", "1"}],
+ undefined,
+ #mrargs{}
+ )
+ )
].
diff --git a/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl
index c4c765feb..bf64eaea3 100644
--- a/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_index_info_tests.erl
@@ -17,20 +17,17 @@
-define(TIMEOUT, 1000).
-
setup() ->
{ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>),
{ok, Info} = couch_mrview:get_info(Db, <<"_design/bar">>),
{Db, Info}.
-
teardown({Db, _}) ->
couch_db:close(Db),
couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
ok.
-
view_info_test_() ->
{
"Views index tests",
@@ -57,50 +54,41 @@ view_info_test_() ->
}
}.
-
sig_is_binary({_, Info}) ->
?_assert(is_binary(prop(signature, Info))).
-
language_is_js({_, Info}) ->
?_assertEqual(<<"javascript">>, prop(language, Info)).
-
file_size_is_non_neg_int({_, Info}) ->
?_assert(check_non_neg_int([sizes, file], Info)).
-
active_size_is_non_neg_int({_, Info}) ->
?_assert(check_non_neg_int([sizes, active], Info)).
-
external_size_is_non_neg_int({_, Info}) ->
?_assert(check_non_neg_int([sizes, external], Info)).
-
active_size_less_than_file_size({_, Info}) ->
?_assert(prop([sizes, active], Info) < prop([sizes, file], Info)).
-
update_seq_is_non_neg_int({_, Info}) ->
?_assert(check_non_neg_int(update_seq, Info)).
-
purge_seq_is_non_neg_int({_, Info}) ->
?_assert(check_non_neg_int(purge_seq, Info)).
-
update_opts_is_bin_list({_, Info}) ->
Opts = prop(update_options, Info),
- ?_assert(is_list(Opts) andalso
- (Opts == [] orelse lists:all([is_binary(B) || B <- Opts]))).
-
+ ?_assert(
+ is_list(Opts) andalso
+ (Opts == [] orelse lists:all([is_binary(B) || B <- Opts]))
+ ).
check_non_neg_int(Key, Info) ->
Size = prop(Key, Info),
is_integer(Size) andalso Size >= 0.
-
prop(Key, {Props}) when is_list(Props) ->
prop(Key, Props);
prop([Key], Info) ->
diff --git a/src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl
index b0d25469a..7c812eeb0 100644
--- a/src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_local_docs_tests.erl
@@ -17,8 +17,6 @@
-define(TIMEOUT, 1000).
-
-
setup() ->
{ok, Db} = couch_mrview_test_util:init_db(?tempdb(), local),
Db.
@@ -28,16 +26,17 @@ teardown(Db) ->
couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
ok.
-
all_docs_test_() ->
{
"_local_docs view tests",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_query/1,
fun should_query_with_range/1,
@@ -50,22 +49,22 @@ all_docs_test_() ->
}
}.
-
should_query(Db) ->
Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, null}, {offset, null}]},
- mk_row(1),
- mk_row(10),
- mk_row(2),
- mk_row(3),
- mk_row(4),
- mk_row(5),
- mk_row(6),
- mk_row(7),
- mk_row(8),
- mk_row(9)
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, null}, {offset, null}]},
+ mk_row(1),
+ mk_row(10),
+ mk_row(2),
+ mk_row(3),
+ mk_row(4),
+ mk_row(5),
+ mk_row(6),
+ mk_row(7),
+ mk_row(8),
+ mk_row(9)
+ ]},
?_assertEqual(Expect, Result).
should_query_with_range(Db) ->
@@ -73,26 +72,29 @@ should_query_with_range(Db) ->
{start_key, <<"_local/3">>},
{end_key, <<"_local/5">>}
]),
- Expect = {ok, [
- {meta, [{total, null}, {offset, null}]},
- mk_row(3),
- mk_row(4),
- mk_row(5)
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, null}, {offset, null}]},
+ mk_row(3),
+ mk_row(4),
+ mk_row(5)
+ ]},
?_assertEqual(Expect, Result).
should_query_with_range_rev(Db) ->
Result = run_query(Db, [
{direction, rev},
- {start_key, <<"_local/5">>}, {end_key, <<"_local/3">>},
+ {start_key, <<"_local/5">>},
+ {end_key, <<"_local/3">>},
{inclusive_end, true}
]),
- Expect = {ok, [
- {meta, [{total, null}, {offset, null}]},
- mk_row(5),
- mk_row(4),
- mk_row(3)
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, null}, {offset, null}]},
+ mk_row(5),
+ mk_row(4),
+ mk_row(3)
+ ]},
?_assertEqual(Expect, Result).
should_query_with_limit_and_skip(Db) ->
@@ -101,12 +103,13 @@ should_query_with_limit_and_skip(Db) ->
{limit, 3},
{skip, 3}
]),
- Expect = {ok, [
- {meta, [{total, null}, {offset, null}]},
- mk_row(5),
- mk_row(6),
- mk_row(7)
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, null}, {offset, null}]},
+ mk_row(5),
+ mk_row(6),
+ mk_row(7)
+ ]},
?_assertEqual(Expect, Result).
should_query_with_include_docs(Db) ->
@@ -116,15 +119,21 @@ should_query_with_include_docs(Db) ->
{include_docs, true}
]),
{row, Doc0} = mk_row(8),
- Doc = Doc0 ++ [{doc, {[
- {<<"_id">>, <<"_local/8">>},
- {<<"_rev">>, <<"0-1">>},
- {<<"val">>, 8}
- ]}}],
- Expect = {ok, [
- {meta, [{total, null}, {offset, null}]},
- {row, Doc}
- ]},
+ Doc =
+ Doc0 ++
+ [
+ {doc,
+ {[
+ {<<"_id">>, <<"_local/8">>},
+ {<<"_rev">>, <<"0-1">>},
+ {<<"val">>, 8}
+ ]}}
+ ],
+ Expect =
+ {ok, [
+ {meta, [{total, null}, {offset, null}]},
+ {row, Doc}
+ ]},
?_assertEqual(Expect, Result).
should_query_with_update_seq(Db) ->
@@ -133,10 +142,11 @@ should_query_with_update_seq(Db) ->
{limit, 1},
{update_seq, true}
]),
- Expect = {ok, [
- {meta, [{total, null}, {offset, null}, {update_seq, null}]},
- mk_row(2)
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, null}, {offset, null}, {update_seq, null}]},
+ mk_row(2)
+ ]},
?_assertEqual(Expect, Result).
mk_row(IntId) ->
diff --git a/src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl
index 805dc6c74..0f8357a98 100644
--- a/src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_map_views_tests.erl
@@ -17,7 +17,6 @@
-define(TIMEOUT, 1000).
-
setup() ->
{ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map),
Db.
@@ -27,16 +26,17 @@ teardown(Db) ->
couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
ok.
-
map_views_test_() ->
{
"Map views",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_map/1,
fun should_map_with_range/1,
@@ -49,36 +49,38 @@ map_views_test_() ->
}
}.
-
should_map(Db) ->
Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
- {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
- {row, [{id, <<"7">>}, {key, 7}, {value, 7}]},
- {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
- {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
- {row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 10}, {offset, 0}]},
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
+ {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
+ {row, [{id, <<"7">>}, {key, 7}, {value, 7}]},
+ {row, [{id, <<"8">>}, {key, 8}, {value, 8}]},
+ {row, [{id, <<"9">>}, {key, 9}, {value, 9}]},
+ {row, [{id, <<"10">>}, {key, 10}, {value, 10}]}
+ ]},
?_assertEqual(Expect, Result).
should_map_with_range(Db) ->
Result = run_query(Db, [
{direction, rev},
- {start_key, 5}, {end_key, 3},
+ {start_key, 5},
+ {end_key, 3},
{inclusive_end, true}
]),
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 5}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 10}, {offset, 5}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]}
+ ]},
?_assertEqual(Expect, Result).
should_map_with_limit_and_skip(Db) ->
@@ -87,12 +89,13 @@ should_map_with_limit_and_skip(Db) ->
{limit, 3},
{skip, 3}
]),
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
- {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
- {row, [{id, <<"7">>}, {key, 7}, {value, 7}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 10}, {offset, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]},
+ {row, [{id, <<"6">>}, {key, 6}, {value, 6}]},
+ {row, [{id, <<"7">>}, {key, 7}, {value, 7}]}
+ ]},
?_assertEqual(Expect, Result).
should_map_with_include_docs(Db) ->
@@ -101,35 +104,41 @@ should_map_with_include_docs(Db) ->
{end_key, 8},
{include_docs, true}
]),
- Doc = {[
- {<<"_id">>,<<"8">>},
- {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
- {<<"val">>,8}
- ]},
- Expect = {ok, [
- {meta, [{total, 10}, {offset, 7}]},
- {row, [{id, <<"8">>}, {key, 8}, {value, 8}, {doc, Doc}]}
- ]},
+ Doc =
+ {[
+ {<<"_id">>, <<"8">>},
+ {<<"_rev">>, <<"1-55b9a29311341e07ec0a7ca13bc1b59f">>},
+ {<<"val">>, 8}
+ ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 10}, {offset, 7}]},
+ {row, [{id, <<"8">>}, {key, 8}, {value, 8}, {doc, Doc}]}
+ ]},
?_assertEqual(Expect, Result).
should_map_empty_views(Db) ->
Result = couch_mrview:query_view(Db, <<"_design/bar">>, <<"bing">>),
- Expect = {ok, [
- {meta, [{total, 0}, {offset, 0}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 0}, {offset, 0}]}
+ ]},
?_assertEqual(Expect, Result).
should_give_ext_size_seq_indexed_test(Db) ->
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/seqdoc">>},
- {<<"options">>, {[{<<"seq_indexed">>, true}]}},
- {<<"views">>, {[
- {<<"view1">>, {[
- {<<"map">>, <<"function(doc){emit(doc._id, doc._id);}">>}
+ DDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/seqdoc">>},
+ {<<"options">>, {[{<<"seq_indexed">>, true}]}},
+ {<<"views">>,
+ {[
+ {<<"view1">>,
+ {[
+ {<<"map">>, <<"function(doc){emit(doc._id, doc._id);}">>}
+ ]}}
]}}
- ]}
- }
- ]}),
+ ]}
+ ),
{ok, _} = couch_db:update_doc(Db, DDoc, []),
{ok, Db1} = couch_db:open_int(couch_db:name(Db), []),
{ok, DDoc1} = couch_db:open_doc(Db1, <<"_design/seqdoc">>, [ejson_body]),
@@ -139,6 +148,5 @@ should_give_ext_size_seq_indexed_test(Db) ->
ok = couch_db:close(Db1),
?_assert(is_number(Size)).
-
run_query(Db, Opts) ->
couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl
index b2969bba0..3207a3da3 100644
--- a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_fabric_tests.erl
@@ -17,20 +17,18 @@
-include_lib("mem3/include/mem3.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
--define(TIMEOUT, 60). % seconds
-
+% seconds
+-define(TIMEOUT, 60).
setup_all() ->
Ctx = test_util:start_couch([fabric, mem3]),
meck:new(couch_mrview_index, [passthrough]),
Ctx.
-
teardown_all(Ctx) ->
meck:unload(),
test_util:stop_couch(Ctx).
-
setup() ->
DbName = ?tempdb(),
ok = fabric:create_db(DbName, [?ADMIN_CTX, {q, 1}]),
@@ -40,11 +38,9 @@ setup() ->
end),
DbName.
-
teardown(DbName) ->
ok = fabric:delete_db(DbName, [?ADMIN_CTX]).
-
view_purge_fabric_test_() ->
{
"Map views",
@@ -64,152 +60,185 @@ view_purge_fabric_test_() ->
}
}.
-
test_purge_verify_index(DbName) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Docs1 = couch_mrview_test_util:make_docs(normal, 5),
- {ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]),
- {ok, _} = fabric:update_doc(
- DbName,
- couch_mrview_test_util:ddoc(map),
- [?ADMIN_CTX]
- ),
-
- Result1 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
- Expect1 = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect1, Result1),
-
- {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName),
- ?assertEqual(0, couch_util:get_value(<<"purge_seq">>, Props1)),
- ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
- [ShardDbName | _Rest ] = ShardNames,
- ?assertEqual(true, couch_mrview_index:verify_index_exists(
- ShardDbName, Props1)),
-
- purge_docs(DbName, [<<"1">>]),
-
- Result2 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
- Expect2 = {ok, [
- {meta, [{total, 4}, {offset, 0}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect2, Result2),
-
- {ok, #doc{body = {Props2}}} = get_local_purge_doc(DbName),
- ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props2)),
- ?assertEqual(true, couch_mrview_index:verify_index_exists(
- ShardDbName, Props2))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ Docs1 = couch_mrview_test_util:make_docs(normal, 5),
+ {ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]),
+ {ok, _} = fabric:update_doc(
+ DbName,
+ couch_mrview_test_util:ddoc(map),
+ [?ADMIN_CTX]
+ ),
+
+ Result1 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
+ Expect1 =
+ {ok, [
+ {meta, [{total, 5}, {offset, 0}]},
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
+ ?assertEqual(Expect1, Result1),
+
+ {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName),
+ ?assertEqual(0, couch_util:get_value(<<"purge_seq">>, Props1)),
+ ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
+ [ShardDbName | _Rest] = ShardNames,
+ ?assertEqual(
+ true,
+ couch_mrview_index:verify_index_exists(
+ ShardDbName, Props1
+ )
+ ),
+
+ purge_docs(DbName, [<<"1">>]),
+
+ Result2 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
+ Expect2 =
+ {ok, [
+ {meta, [{total, 4}, {offset, 0}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
+ ?assertEqual(Expect2, Result2),
+
+ {ok, #doc{body = {Props2}}} = get_local_purge_doc(DbName),
+ ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props2)),
+ ?assertEqual(
+ true,
+ couch_mrview_index:verify_index_exists(
+ ShardDbName, Props2
+ )
+ )
+ end)}.
test_purge_hook_before_compaction(DbName) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Docs1 = couch_mrview_test_util:make_docs(normal, 5),
- {ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]),
- {ok, _} = fabric:update_doc(
- DbName,
- couch_mrview_test_util:ddoc(map),
- [?ADMIN_CTX]
- ),
-
- Result1 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
- Expect1 = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect1, Result1),
-
- purge_docs(DbName, [<<"1">>]),
-
- Result2 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
- Expect2 = {ok, [
- {meta, [{total, 4}, {offset, 0}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect2, Result2),
-
- {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName),
- ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props1)),
-
- [ShardName | _] = local_shards(DbName),
- couch_util:with_db(ShardName, fun(Db) ->
- {ok, _} = couch_db:start_compact(Db)
- end),
- wait_compaction(ShardName, ?LINE),
-
- ?assertEqual(ok, meck:wait(1, couch_mrview_index,
- ensure_local_purge_docs, '_', 5000)
- ),
-
- % Make sure compaction didn't change the update seq
- {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName),
- ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props1)),
-
- purge_docs(DbName, [<<"2">>]),
-
- couch_util:with_db(ShardName, fun(Db) ->
- {ok, _} = couch_db:start_compact(Db)
- end),
- wait_compaction(ShardName, ?LINE),
-
- ?assertEqual(ok, meck:wait(2, couch_mrview_index,
- ensure_local_purge_docs, '_', 5000)
- ),
-
- % Make sure compaction after a purge didn't overwrite
- % the local purge doc for the index
- {ok, #doc{body = {Props2}}} = get_local_purge_doc(DbName),
- ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props2)),
-
- % Force another update to ensure that we update
- % the local doc appropriate after compaction
- Result3 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
- Expect3 = {ok, [
- {meta, [{total, 3}, {offset, 0}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
- ?assertEqual(Expect3, Result3),
-
- {ok, #doc{body = {Props3}}} = get_local_purge_doc(DbName),
- ?assertEqual(2, couch_util:get_value(<<"purge_seq">>, Props3)),
-
- % Check that if the local doc doesn't exist that one
- % is created for the index on compaction
- delete_local_purge_doc(DbName),
- ?assertMatch({not_found, _}, get_local_purge_doc(DbName)),
-
- couch_util:with_db(ShardName, fun(Db) ->
- {ok, _} = couch_db:start_compact(Db)
- end),
- wait_compaction(ShardName, ?LINE),
-
- ?assertEqual(ok, meck:wait(3, couch_mrview_index,
- ensure_local_purge_docs, '_', 5000)
- ),
-
- {ok, #doc{body = {Props4}}} = get_local_purge_doc(DbName),
- ?assertEqual(2, couch_util:get_value(<<"purge_seq">>, Props4))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ Docs1 = couch_mrview_test_util:make_docs(normal, 5),
+ {ok, _} = fabric:update_docs(DbName, Docs1, [?ADMIN_CTX]),
+ {ok, _} = fabric:update_doc(
+ DbName,
+ couch_mrview_test_util:ddoc(map),
+ [?ADMIN_CTX]
+ ),
+
+ Result1 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
+ Expect1 =
+ {ok, [
+ {meta, [{total, 5}, {offset, 0}]},
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
+ ?assertEqual(Expect1, Result1),
+
+ purge_docs(DbName, [<<"1">>]),
+
+ Result2 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
+ Expect2 =
+ {ok, [
+ {meta, [{total, 4}, {offset, 0}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
+ ?assertEqual(Expect2, Result2),
+
+ {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName),
+ ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props1)),
+
+ [ShardName | _] = local_shards(DbName),
+ couch_util:with_db(ShardName, fun(Db) ->
+ {ok, _} = couch_db:start_compact(Db)
+ end),
+ wait_compaction(ShardName, ?LINE),
+
+ ?assertEqual(
+ ok,
+ meck:wait(
+ 1,
+ couch_mrview_index,
+ ensure_local_purge_docs,
+ '_',
+ 5000
+ )
+ ),
+
+ % Make sure compaction didn't change the update seq
+ {ok, #doc{body = {Props1}}} = get_local_purge_doc(DbName),
+ ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props1)),
+
+ purge_docs(DbName, [<<"2">>]),
+
+ couch_util:with_db(ShardName, fun(Db) ->
+ {ok, _} = couch_db:start_compact(Db)
+ end),
+ wait_compaction(ShardName, ?LINE),
+
+ ?assertEqual(
+ ok,
+ meck:wait(
+ 2,
+ couch_mrview_index,
+ ensure_local_purge_docs,
+ '_',
+ 5000
+ )
+ ),
+
+ % Make sure compaction after a purge didn't overwrite
+ % the local purge doc for the index
+ {ok, #doc{body = {Props2}}} = get_local_purge_doc(DbName),
+ ?assertEqual(1, couch_util:get_value(<<"purge_seq">>, Props2)),
+
+ % Force another update to ensure that we update
+ % the local doc appropriate after compaction
+ Result3 = fabric:query_view(DbName, <<"bar">>, <<"baz">>, #mrargs{}),
+ Expect3 =
+ {ok, [
+ {meta, [{total, 3}, {offset, 0}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
+ ?assertEqual(Expect3, Result3),
+
+ {ok, #doc{body = {Props3}}} = get_local_purge_doc(DbName),
+ ?assertEqual(2, couch_util:get_value(<<"purge_seq">>, Props3)),
+
+ % Check that if the local doc doesn't exist that one
+ % is created for the index on compaction
+ delete_local_purge_doc(DbName),
+ ?assertMatch({not_found, _}, get_local_purge_doc(DbName)),
+
+ couch_util:with_db(ShardName, fun(Db) ->
+ {ok, _} = couch_db:start_compact(Db)
+ end),
+ wait_compaction(ShardName, ?LINE),
+
+ ?assertEqual(
+ ok,
+ meck:wait(
+ 3,
+ couch_mrview_index,
+ ensure_local_purge_docs,
+ '_',
+ 5000
+ )
+ ),
+
+ {ok, #doc{body = {Props4}}} = get_local_purge_doc(DbName),
+ ?assertEqual(2, couch_util:get_value(<<"purge_seq">>, Props4))
+ end)}.
get_local_purge_doc(DbName) ->
{ok, DDoc} = fabric:open_doc(DbName, <<"_design/bar">>, []),
@@ -222,7 +251,6 @@ get_local_purge_doc(DbName) ->
couch_db:open_doc(Db, DocId, [])
end).
-
delete_local_purge_doc(DbName) ->
{ok, DDoc} = fabric:open_doc(DbName, <<"_design/bar">>, []),
{ok, IdxState} = couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
@@ -235,21 +263,21 @@ delete_local_purge_doc(DbName) ->
{ok, _} = couch_db:update_doc(Db, NewDoc, [])
end).
-
get_rev(#full_doc_info{} = FDI) ->
#doc_info{
revs = [#rev_info{} = PrevRev | _]
} = couch_doc:to_doc_info(FDI),
PrevRev#rev_info.rev.
-
purge_docs(DbName, DocIds) ->
- lists:foreach(fun(DocId) ->
- FDI = fabric:get_full_doc_info(DbName, DocId, []),
- Rev = get_rev(FDI),
- {ok, [{ok, _}]} = fabric:purge_docs(DbName, [{DocId, [Rev]}], [])
- end, DocIds).
-
+ lists:foreach(
+ fun(DocId) ->
+ FDI = fabric:get_full_doc_info(DbName, DocId, []),
+ Rev = get_rev(FDI),
+ {ok, [{ok, _}]} = fabric:purge_docs(DbName, [{DocId, [Rev]}], [])
+ end,
+ DocIds
+ ).
wait_compaction(DbName, Line) ->
WaitFun = fun() ->
@@ -260,23 +288,23 @@ wait_compaction(DbName, Line) ->
end,
case test_util:wait(WaitFun, 10000) of
timeout ->
- erlang:error({assertion_failed, [
+ erlang:error(
+ {assertion_failed, [
{module, ?MODULE},
{line, Line},
{reason, "Timeout waiting for database compaction"}
- ]});
+ ]}
+ );
_ ->
ok
end.
-
is_compaction_running(DbName) ->
{ok, DbInfo} = couch_util:with_db(DbName, fun(Db) ->
couch_db:get_db_info(Db)
end),
couch_util:get_value(compact_running, DbInfo).
-
local_shards(DbName) ->
try
[ShardName || #shard{name = ShardName} <- mem3:local_shards(DbName)]
diff --git a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl
index 62e1410cb..63c5de458 100644
--- a/src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_purge_docs_tests.erl
@@ -18,7 +18,6 @@
-define(TIMEOUT, 1000).
-
setup() ->
meck:new(couch_index_updater, [passthrough]),
{ok, Db} = couch_mrview_test_util:init_db(?tempdb(), map, 5),
@@ -57,51 +56,52 @@ view_purge_test_() ->
}
}.
-
test_purge_single(Db) ->
?_test(begin
Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 5}, {offset, 0}]},
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
?assertEqual(Expect, Result),
FDI = couch_db:get_full_doc_info(Db, <<"1">>),
Rev = get_rev(FDI),
{ok, [{ok, _PRevs}]} = couch_db:purge_docs(
- Db,
- [{<<"UUID1">>, <<"1">>, [Rev]}]
- ),
+ Db,
+ [{<<"UUID1">>, <<"1">>, [Rev]}]
+ ),
{ok, Db2} = couch_db:reopen(Db),
Result2 = run_query(Db2, []),
- Expect2 = {ok, [
- {meta, [{total, 4}, {offset, 0}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
+ Expect2 =
+ {ok, [
+ {meta, [{total, 4}, {offset, 0}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
?assertEqual(Expect2, Result2)
end).
-
test_purge_single_for_docid_with_list(Db) ->
?_test(begin
Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 5}, {offset, 0}]},
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
?assertEqual(Expect, Result),
FDI = couch_db:get_full_doc_info(Db, <<"1">>),
@@ -113,35 +113,39 @@ test_purge_single_for_docid_with_list(Db) ->
{ok, Db2} = couch_db:reopen(Db),
Result2 = run_query(Db2, []),
- Expect2 = {ok, [
- {meta, [{total, 4}, {offset, 0}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
+ Expect2 =
+ {ok, [
+ {meta, [{total, 4}, {offset, 0}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
?assertEqual(Expect2, Result2)
end).
test_purge_partial(Db) ->
?_test(begin
Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 5}, {offset, 0}]},
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
?assertEqual(Expect, Result),
- FDI1 = couch_db:get_full_doc_info(Db, <<"1">>), Rev1 = get_rev(FDI1),
- Update = {[
- {'_id', <<"1">>},
- {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
- {'val', 1.2}
- ]},
+ FDI1 = couch_db:get_full_doc_info(Db, <<"1">>),
+ Rev1 = get_rev(FDI1),
+ Update =
+ {[
+ {'_id', <<"1">>},
+ {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
+ {'val', 1.2}
+ ]},
{ok, [_Rev2]} = save_docs(Db, [Update], [replicated_changes]),
PurgeInfos = [{<<"UUID1">>, <<"1">>, [Rev1]}],
@@ -150,34 +154,38 @@ test_purge_partial(Db) ->
{ok, Db2} = couch_db:reopen(Db),
Result2 = run_query(Db2, []),
- Expect2 = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1.2}, {value, 1.2}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
+ Expect2 =
+ {ok, [
+ {meta, [{total, 5}, {offset, 0}]},
+ {row, [{id, <<"1">>}, {key, 1.2}, {value, 1.2}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
?assertEqual(Expect2, Result2)
end).
-
test_purge_complete(Db) ->
?_test(begin
Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 5}, {offset, 0}]},
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
?assertEqual(Expect, Result),
- FDI1 = couch_db:get_full_doc_info(Db, <<"1">>), Rev1 = get_rev(FDI1),
- FDI2 = couch_db:get_full_doc_info(Db, <<"2">>), Rev2 = get_rev(FDI2),
- FDI5 = couch_db:get_full_doc_info(Db, <<"5">>), Rev5 = get_rev(FDI5),
+ FDI1 = couch_db:get_full_doc_info(Db, <<"1">>),
+ Rev1 = get_rev(FDI1),
+ FDI2 = couch_db:get_full_doc_info(Db, <<"2">>),
+ Rev2 = get_rev(FDI2),
+ FDI5 = couch_db:get_full_doc_info(Db, <<"5">>),
+ Rev5 = get_rev(FDI5),
PurgeInfos = [
{<<"UUID1">>, <<"1">>, [Rev1]},
@@ -188,31 +196,35 @@ test_purge_complete(Db) ->
{ok, Db2} = couch_db:reopen(Db),
Result2 = run_query(Db2, []),
- Expect2 = {ok, [
- {meta, [{total, 2}, {offset, 0}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}
- ]},
+ Expect2 =
+ {ok, [
+ {meta, [{total, 2}, {offset, 0}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}
+ ]},
?assertEqual(Expect2, Result2)
end).
-
test_purge_complete_for_docid_with_list(Db) ->
?_test(begin
Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 5}, {offset, 0}]},
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
?assertEqual(Expect, Result),
- FDI1 = couch_db:get_full_doc_info(Db, <<"1">>), Rev1 = get_rev(FDI1),
- FDI2 = couch_db:get_full_doc_info(Db, <<"2">>), Rev2 = get_rev(FDI2),
- FDI5 = couch_db:get_full_doc_info(Db, <<"5">>), Rev5 = get_rev(FDI5),
+ FDI1 = couch_db:get_full_doc_info(Db, <<"1">>),
+ Rev1 = get_rev(FDI1),
+ FDI2 = couch_db:get_full_doc_info(Db, <<"2">>),
+ Rev2 = get_rev(FDI2),
+ FDI5 = couch_db:get_full_doc_info(Db, <<"5">>),
+ Rev5 = get_rev(FDI5),
PurgeInfos = [
{<<"UUID1">>, "1", [Rev1]},
@@ -223,26 +235,27 @@ test_purge_complete_for_docid_with_list(Db) ->
{ok, Db2} = couch_db:reopen(Db),
Result2 = run_query(Db2, []),
- Expect2 = {ok, [
- {meta, [{total, 2}, {offset, 0}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}
- ]},
+ Expect2 =
+ {ok, [
+ {meta, [{total, 2}, {offset, 0}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]}
+ ]},
?assertEqual(Expect2, Result2)
end).
-
test_purge_nochange(Db) ->
?_test(begin
Result = run_query(Db, []),
- Expect = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 5}, {offset, 0}]},
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
?assertEqual(Expect, Result),
FDI1 = couch_db:get_full_doc_info(Db, <<"1">>),
@@ -255,40 +268,44 @@ test_purge_nochange(Db) ->
{ok, Db2} = couch_db:reopen(Db),
Result2 = run_query(Db2, []),
- Expect2 = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
+ Expect2 =
+ {ok, [
+ {meta, [{total, 5}, {offset, 0}]},
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
?assertEqual(Expect2, Result2)
end).
-
test_purge_index_reset(Db) ->
?_test(begin
ok = couch_db:set_purge_infos_limit(Db, 2),
{ok, Db1} = couch_db:reopen(Db),
Result = run_query(Db1, []),
- Expect = {ok, [
- {meta, [{total, 5}, {offset, 0}]},
- {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
- {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
- {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
- {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
- {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, [{total, 5}, {offset, 0}]},
+ {row, [{id, <<"1">>}, {key, 1}, {value, 1}]},
+ {row, [{id, <<"2">>}, {key, 2}, {value, 2}]},
+ {row, [{id, <<"3">>}, {key, 3}, {value, 3}]},
+ {row, [{id, <<"4">>}, {key, 4}, {value, 4}]},
+ {row, [{id, <<"5">>}, {key, 5}, {value, 5}]}
+ ]},
?assertEqual(Expect, Result),
- PurgeInfos = lists:map(fun(I) ->
- DocId = list_to_binary(integer_to_list(I)),
- FDI = couch_db:get_full_doc_info(Db, DocId),
- Rev = get_rev(FDI),
- {couch_uuids:random(), DocId, [Rev]}
- end, lists:seq(1, 5)),
+ PurgeInfos = lists:map(
+ fun(I) ->
+ DocId = list_to_binary(integer_to_list(I)),
+ FDI = couch_db:get_full_doc_info(Db, DocId),
+ Rev = get_rev(FDI),
+ {couch_uuids:random(), DocId, [Rev]}
+ end,
+ lists:seq(1, 5)
+ ),
{ok, _} = couch_db:purge_docs(Db1, PurgeInfos),
{ok, Db2} = couch_db:reopen(Db1),
@@ -309,22 +326,22 @@ test_purge_index_reset(Db) ->
{ok, Db3} = couch_db:reopen(Db2),
Result2 = run_query(Db3, []),
- Expect2 = {ok, [
- {meta, [{total, 0}, {offset, 0}]}
- ]},
+ Expect2 =
+ {ok, [
+ {meta, [{total, 0}, {offset, 0}]}
+ ]},
?assertEqual(Expect2, Result2),
% Assert that we had a reset
meck:wait(
- 1,
- couch_index_updater,
- handle_info,
- [{'EXIT', '_', {reset, '_'}}, '_'],
- 5000
- )
+ 1,
+ couch_index_updater,
+ handle_info,
+ [{'EXIT', '_', {reset, '_'}}, '_'],
+ 5000
+ )
end).
-
test_purge_compact_size_check(Db) ->
?_test(begin
DbName = couch_db:name(Db),
@@ -334,24 +351,28 @@ test_purge_compact_size_check(Db) ->
DiskSizeBefore = db_disk_size(DbName),
PurgedDocsNum = 150,
- IdsRevs = lists:foldl(fun(Id, CIdRevs) ->
- Id1 = docid(Id),
- FDI1 = couch_db:get_full_doc_info(Db1, Id1),
- Rev1 = get_rev(FDI1),
- UUID1 = uuid(Id),
- [{UUID1, Id1, [Rev1]} | CIdRevs]
- end, [], lists:seq(1, PurgedDocsNum)),
+ IdsRevs = lists:foldl(
+ fun(Id, CIdRevs) ->
+ Id1 = docid(Id),
+ FDI1 = couch_db:get_full_doc_info(Db1, Id1),
+ Rev1 = get_rev(FDI1),
+ UUID1 = uuid(Id),
+ [{UUID1, Id1, [Rev1]} | CIdRevs]
+ end,
+ [],
+ lists:seq(1, PurgedDocsNum)
+ ),
{ok, _} = couch_db:purge_docs(Db1, IdsRevs),
{ok, Db2} = couch_db:reopen(Db1),
_Result1 = run_query(Db2, []),
{ok, PurgedIdRevs} = couch_db:fold_purge_infos(
- Db2,
- 0,
- fun fold_fun/2,
- [],
- []
- ),
+ Db2,
+ 0,
+ fun fold_fun/2,
+ [],
+ []
+ ),
?assertEqual(PurgedDocsNum, length(PurgedIdRevs)),
config:set("couchdb", "file_compression", "snappy", false),
@@ -363,7 +384,6 @@ test_purge_compact_size_check(Db) ->
?assert(DiskSizeBefore > DiskSizeAfter)
end).
-
test_purge_compact_for_stale_purge_cp_without_client(Db) ->
?_test(begin
DbName = couch_db:name(Db),
@@ -378,23 +398,27 @@ test_purge_compact_for_stale_purge_cp_without_client(Db) ->
% purge 150 documents
PurgedDocsNum = 150,
- PurgeInfos = lists:foldl(fun(Id, CIdRevs) ->
- Id1 = docid(Id),
- FDI1 = couch_db:get_full_doc_info(Db1, Id1),
- Rev1 = get_rev(FDI1),
- UUID1 = uuid(Id),
- [{UUID1, Id1, [Rev1]} | CIdRevs]
- end, [], lists:seq(1, PurgedDocsNum)),
+ PurgeInfos = lists:foldl(
+ fun(Id, CIdRevs) ->
+ Id1 = docid(Id),
+ FDI1 = couch_db:get_full_doc_info(Db1, Id1),
+ Rev1 = get_rev(FDI1),
+ UUID1 = uuid(Id),
+ [{UUID1, Id1, [Rev1]} | CIdRevs]
+ end,
+ [],
+ lists:seq(1, PurgedDocsNum)
+ ),
{ok, _} = couch_db:purge_docs(Db1, PurgeInfos),
{ok, Db2} = couch_db:reopen(Db1),
{ok, PurgedIdRevs} = couch_db:fold_purge_infos(
- Db2,
- 0,
- fun fold_fun/2,
- [],
- []
- ),
+ Db2,
+ 0,
+ fun fold_fun/2,
+ [],
+ []
+ ),
?assertEqual(PurgedDocsNum, length(PurgedIdRevs)),
% run compaction to trigger pruning of purge tree
@@ -407,16 +431,15 @@ test_purge_compact_for_stale_purge_cp_without_client(Db) ->
{ok, Db4} = couch_db:reopen(Db3),
OldestPSeq = couch_db:get_oldest_purge_seq(Db4),
{ok, PurgedIdRevs2} = couch_db:fold_purge_infos(
- Db4,
- OldestPSeq - 1,
- fun fold_fun/2,
- [],
- []
- ),
+ Db4,
+ OldestPSeq - 1,
+ fun fold_fun/2,
+ [],
+ []
+ ),
?assertEqual(PurgedDocsLimit, length(PurgedIdRevs2))
end).
-
test_purge_compact_for_stale_purge_cp_with_client(Db) ->
?_test(begin
DbName = couch_db:name(Db),
@@ -432,36 +455,44 @@ test_purge_compact_for_stale_purge_cp_with_client(Db) ->
% first purge 30 documents
PurgedDocsNum1 = 30,
- IdsRevs = lists:foldl(fun(Id, CIdRevs) ->
- Id1 = docid(Id),
- FDI1 = couch_db:get_full_doc_info(Db1, Id1),
- Rev1 = get_rev(FDI1),
- UUID1 = uuid(Id),
- [{UUID1, Id1, [Rev1]} | CIdRevs]
- end, [], lists:seq(1, PurgedDocsNum1)),
+ IdsRevs = lists:foldl(
+ fun(Id, CIdRevs) ->
+ Id1 = docid(Id),
+ FDI1 = couch_db:get_full_doc_info(Db1, Id1),
+ Rev1 = get_rev(FDI1),
+ UUID1 = uuid(Id),
+ [{UUID1, Id1, [Rev1]} | CIdRevs]
+ end,
+ [],
+ lists:seq(1, PurgedDocsNum1)
+ ),
{ok, _} = couch_db:purge_docs(Db1, IdsRevs),
{ok, Db2} = couch_db:reopen(Db1),
% run query again to reflect purge request to mrview
_Result1 = run_query(Db2, []),
{ok, PurgedIdRevs} = couch_db:fold_purge_infos(
- Db2,
- 0,
- fun fold_fun/2,
- [],
- []
- ),
+ Db2,
+ 0,
+ fun fold_fun/2,
+ [],
+ []
+ ),
?assertEqual(PurgedDocsNum1, length(PurgedIdRevs)),
% then purge 120 documents
PurgedDocsNum2 = 150,
- IdsRevs2 = lists:foldl(fun(Id, CIdRevs) ->
- Id1 = docid(Id),
- FDI1 = couch_db:get_full_doc_info(Db1, Id1),
- Rev1 = get_rev(FDI1),
- UUID1 = uuid(Id),
- [{UUID1, Id1, [Rev1]} | CIdRevs]
- end, [], lists:seq(PurgedDocsNum1 + 1, PurgedDocsNum2)),
+ IdsRevs2 = lists:foldl(
+ fun(Id, CIdRevs) ->
+ Id1 = docid(Id),
+ FDI1 = couch_db:get_full_doc_info(Db1, Id1),
+ Rev1 = get_rev(FDI1),
+ UUID1 = uuid(Id),
+ [{UUID1, Id1, [Rev1]} | CIdRevs]
+ end,
+ [],
+ lists:seq(PurgedDocsNum1 + 1, PurgedDocsNum2)
+ ),
{ok, _} = couch_db:purge_docs(Db2, IdsRevs2),
% run compaction to trigger pruning of purge tree
@@ -475,16 +506,15 @@ test_purge_compact_for_stale_purge_cp_with_client(Db) ->
{ok, Db4} = couch_db:reopen(Db3),
OldestPSeq = couch_db:get_oldest_purge_seq(Db4),
{ok, PurgedIdRevs2} = couch_db:fold_purge_infos(
- Db4,
- OldestPSeq - 1,
- fun fold_fun/2,
- [],
- []
- ),
+ Db4,
+ OldestPSeq - 1,
+ fun fold_fun/2,
+ [],
+ []
+ ),
?assertEqual(PurgedDocsNum2 - PurgedDocsNum1, length(PurgedIdRevs2))
end).
-
get_local_purge_doc(Db) ->
{ok, DDoc} = couch_db:open_doc(Db, <<"_design/bar">>, []),
{ok, IdxState} = couch_mrview_util:ddoc_to_mrst(couch_db:name(Db), DDoc),
@@ -493,48 +523,50 @@ get_local_purge_doc(Db) ->
DocId = couch_mrview_util:get_local_purge_doc_id(HexSig),
couch_db:open_doc(Db, DocId, []).
-
run_query(Db, Opts) ->
couch_mrview:query_view(Db, <<"_design/bar">>, <<"baz">>, Opts).
-
save_docs(Db, JsonDocs, Options) ->
- Docs = lists:map(fun(JDoc) ->
- couch_doc:from_json_obj(?JSON_DECODE(?JSON_ENCODE(JDoc)))
- end, JsonDocs),
+ Docs = lists:map(
+ fun(JDoc) ->
+ couch_doc:from_json_obj(?JSON_DECODE(?JSON_ENCODE(JDoc)))
+ end,
+ JsonDocs
+ ),
Opts = [full_commit | Options],
case lists:member(replicated_changes, Options) of
true ->
{ok, []} = couch_db:update_docs(
- Db, Docs, Opts, replicated_changes),
- {ok, lists:map(fun(Doc) ->
- {Pos, [RevId | _]} = Doc#doc.revs,
- {Pos, RevId}
- end, Docs)};
+ Db, Docs, Opts, replicated_changes
+ ),
+ {ok,
+ lists:map(
+ fun(Doc) ->
+ {Pos, [RevId | _]} = Doc#doc.revs,
+ {Pos, RevId}
+ end,
+ Docs
+ )};
false ->
{ok, Resp} = couch_db:update_docs(Db, Docs, Opts),
{ok, [Rev || {ok, Rev} <- Resp]}
end.
-
get_rev(#full_doc_info{} = FDI) ->
#doc_info{
revs = [#rev_info{} = PrevRev | _]
} = couch_doc:to_doc_info(FDI),
PrevRev#rev_info.rev.
-
db_disk_size(DbName) ->
{ok, Db} = couch_db:open_int(DbName, []),
{ok, Info} = couch_db:get_db_info(Db),
ok = couch_db:close(Db),
active_size(Info).
-
active_size(Info) ->
couch_util:get_nested_json_value({Info}, [sizes, active]).
-
wait_compaction(DbName, Kind, Line) ->
WaitFun = fun() ->
case is_compaction_running(DbName) of
@@ -544,32 +576,32 @@ wait_compaction(DbName, Kind, Line) ->
end,
case test_util:wait(WaitFun, 10000) of
timeout ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
{line, Line},
- {reason, "Timeout waiting for "
- ++ Kind
- ++ " database compaction"}]});
+ {reason,
+ "Timeout waiting for " ++
+ Kind ++
+ " database compaction"}
+ ]}
+ );
_ ->
ok
end.
-
is_compaction_running(DbName) ->
{ok, Db} = couch_db:open_int(DbName, []),
{ok, DbInfo} = couch_db:get_db_info(Db),
couch_db:close(Db),
couch_util:get_value(compact_running, DbInfo).
-
fold_fun({_PSeq, _UUID, Id, Revs}, Acc) ->
{ok, [{Id, Revs} | Acc]}.
-
docid(I) ->
list_to_binary(integer_to_list(I)).
-
uuid(I) ->
Str = io_lib:format("UUID~4..0b", [I]),
iolist_to_binary(Str).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl
index b83686113..b6042b6c7 100644
--- a/src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_red_views_tests.erl
@@ -17,7 +17,6 @@
-define(TIMEOUT, 1000).
-
setup() ->
{ok, Db} = couch_mrview_test_util:init_db(?tempdb(), red),
Db.
@@ -27,16 +26,17 @@ teardown(Db) ->
couch_server:delete(couch_db:name(Db), [?ADMIN_CTX]),
ok.
-
reduce_views_test_() ->
{
"Reduce views",
{
setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_reduce_basic/1,
fun should_reduce_key_range/1,
@@ -47,49 +47,51 @@ reduce_views_test_() ->
}
}.
-
should_reduce_basic(Db) ->
Result = run_query(Db, []),
- Expect = {ok, [
- {meta, []},
- {row, [{key, null}, {value, 55}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, []},
+ {row, [{key, null}, {value, 55}]}
+ ]},
?_assertEqual(Expect, Result).
should_reduce_key_range(Db) ->
Result = run_query(Db, [{start_key, [0, 2]}, {end_key, [0, 4]}]),
- Expect = {ok, [
- {meta, []},
- {row, [{key, null}, {value, 6}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, []},
+ {row, [{key, null}, {value, 6}]}
+ ]},
?_assertEqual(Expect, Result).
should_reduce_with_group_level(Db) ->
Result = run_query(Db, [{group_level, 1}]),
- Expect = {ok, [
- {meta, []},
- {row, [{key, [0]}, {value, 30}]},
- {row, [{key, [1]}, {value, 25}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, []},
+ {row, [{key, [0]}, {value, 30}]},
+ {row, [{key, [1]}, {value, 25}]}
+ ]},
?_assertEqual(Expect, Result).
should_reduce_with_group_exact(Db) ->
Result = run_query(Db, [{group_level, exact}]),
- Expect = {ok, [
- {meta, []},
- {row, [{key, [0, 2]}, {value, 2}]},
- {row, [{key, [0, 4]}, {value, 4}]},
- {row, [{key, [0, 6]}, {value, 6}]},
- {row, [{key, [0, 8]}, {value, 8}]},
- {row, [{key, [0, 10]}, {value, 10}]},
- {row, [{key, [1, 1]}, {value, 1}]},
- {row, [{key, [1, 3]}, {value, 3}]},
- {row, [{key, [1, 5]}, {value, 5}]},
- {row, [{key, [1, 7]}, {value, 7}]},
- {row, [{key, [1, 9]}, {value, 9}]}
- ]},
+ Expect =
+ {ok, [
+ {meta, []},
+ {row, [{key, [0, 2]}, {value, 2}]},
+ {row, [{key, [0, 4]}, {value, 4}]},
+ {row, [{key, [0, 6]}, {value, 6}]},
+ {row, [{key, [0, 8]}, {value, 8}]},
+ {row, [{key, [0, 10]}, {value, 10}]},
+ {row, [{key, [1, 1]}, {value, 1}]},
+ {row, [{key, [1, 3]}, {value, 3}]},
+ {row, [{key, [1, 5]}, {value, 5}]},
+ {row, [{key, [1, 7]}, {value, 7}]},
+ {row, [{key, [1, 9]}, {value, 9}]}
+ ]},
?_assertEqual(Expect, Result).
-
run_query(Db, Opts) ->
couch_mrview:query_view(Db, <<"_design/red">>, <<"baz">>, Opts).
diff --git a/src/couch_mrview/test/eunit/couch_mrview_util_tests.erl b/src/couch_mrview/test/eunit/couch_mrview_util_tests.erl
index 7046c9bb2..a495fd82c 100644
--- a/src/couch_mrview/test/eunit/couch_mrview_util_tests.erl
+++ b/src/couch_mrview/test/eunit/couch_mrview_util_tests.erl
@@ -15,25 +15,23 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-
-
couch_mrview_util_test_() ->
[
- ?_assertEqual(0, validate_group_level(undefined, undefined)),
- ?_assertEqual(exact, validate_group_level(true, undefined)),
- ?_assertEqual(0, validate_group_level(false, undefined)),
- ?_assertEqual(1, validate_group_level(undefined, 1)),
- ?_assertEqual(0, validate_group_level(true, 0)),
- ?_assertEqual(0, validate_group_level(undefined, 0)),
- ?_assertEqual(1, validate_group_level(true, 1)),
- ?_assertEqual(0, validate_group_level(false, 0)),
- ?_assertThrow({query_parse_error,
- <<"Can't specify group=false and group_level>0 at the same time">>},
- validate_group_level(false,1))
+ ?_assertEqual(0, validate_group_level(undefined, undefined)),
+ ?_assertEqual(exact, validate_group_level(true, undefined)),
+ ?_assertEqual(0, validate_group_level(false, undefined)),
+ ?_assertEqual(1, validate_group_level(undefined, 1)),
+ ?_assertEqual(0, validate_group_level(true, 0)),
+ ?_assertEqual(0, validate_group_level(undefined, 0)),
+ ?_assertEqual(1, validate_group_level(true, 1)),
+ ?_assertEqual(0, validate_group_level(false, 0)),
+ ?_assertThrow(
+ {query_parse_error, <<"Can't specify group=false and group_level>0 at the same time">>},
+ validate_group_level(false, 1)
+ )
].
validate_group_level(Group, GroupLevel) ->
- Args0 = #mrargs{group=Group, group_level=GroupLevel, view_type=red},
+ Args0 = #mrargs{group = Group, group_level = GroupLevel, view_type = red},
Args1 = couch_mrview_util:validate_args(Args0),
Args1#mrargs.group_level.
-
diff --git a/src/couch_peruser/src/couch_peruser.erl b/src/couch_peruser/src/couch_peruser.erl
index 886fb4f6e..c87ffb2fb 100644
--- a/src/couch_peruser/src/couch_peruser.erl
+++ b/src/couch_peruser/src/couch_peruser.erl
@@ -18,8 +18,15 @@
-include_lib("mem3/include/mem3.hrl").
% gen_server callbacks
--export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
+-export([
+ start_link/0,
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
-export([init_changes_handler/1, changes_handler/3]).
@@ -52,8 +59,10 @@
-define(DEFAULT_USERDB_PREFIX, "userdb-").
-define(RELISTEN_DELAY, 5000).
--define(DEFAULT_QUIET_PERIOD, 60). % seconds
--define(DEFAULT_START_PERIOD, 5). % seconds
+% seconds
+-define(DEFAULT_QUIET_PERIOD, 60).
+% seconds
+-define(DEFAULT_START_PERIOD, 5).
%%
%% Please leave in the commented-out couch_log:debug calls, thanks! — Jan
@@ -66,267 +75,346 @@ start_link() ->
init_state() ->
couch_log:debug("peruser: starting on node ~p in pid ~p", [node(), self()]),
case config:get_boolean("couch_peruser", "enable", false) of
- false ->
- couch_log:debug("peruser: disabled on node ~p", [node()]),
- #state{};
- true ->
- couch_log:debug("peruser: enabled on node ~p", [node()]),
- DbName = ?l2b(config:get(
- "couch_httpd_auth", "authentication_db", "_users")),
- DeleteDbs = config:get_boolean("couch_peruser", "delete_dbs", false),
- Q = config:get_integer("couch_peruser", "q", 1),
- Prefix = config:get("couch_peruser", "database_prefix", ?DEFAULT_USERDB_PREFIX),
- case couch_db:validate_dbname(Prefix) of
- ok -> ok;
- Error ->
- couch_log:error("couch_peruser can't proceed as illegal database prefix ~p.
- Error: ~p", [Prefix, Error]),
- throw(Error)
- end,
-
-
- % set up cluster-stable listener
- Period = abs(config:get_integer("couch_peruser", "cluster_quiet_period",
- ?DEFAULT_QUIET_PERIOD)),
- StartPeriod = abs(config:get_integer("couch_peruser",
- "cluster_start_period", ?DEFAULT_START_PERIOD)),
-
- {ok, Mem3Cluster} = mem3_cluster:start_link(?MODULE, self(), StartPeriod,
- Period),
-
- #state{
- parent = self(),
- db_name = DbName,
- delete_dbs = DeleteDbs,
- mem3_cluster_pid = Mem3Cluster,
- cluster_stable = false,
- q_for_peruser_db = Q,
- peruser_dbname_prefix = ?l2b(Prefix)
- }
+ false ->
+ couch_log:debug("peruser: disabled on node ~p", [node()]),
+ #state{};
+ true ->
+ couch_log:debug("peruser: enabled on node ~p", [node()]),
+ DbName = ?l2b(
+ config:get(
+ "couch_httpd_auth", "authentication_db", "_users"
+ )
+ ),
+ DeleteDbs = config:get_boolean("couch_peruser", "delete_dbs", false),
+ Q = config:get_integer("couch_peruser", "q", 1),
+ Prefix = config:get("couch_peruser", "database_prefix", ?DEFAULT_USERDB_PREFIX),
+ case couch_db:validate_dbname(Prefix) of
+ ok ->
+ ok;
+ Error ->
+ couch_log:error(
+ "couch_peruser can't proceed as illegal database prefix ~p.\n"
+ " Error: ~p",
+ [Prefix, Error]
+ ),
+ throw(Error)
+ end,
+
+ % set up cluster-stable listener
+ Period = abs(
+ config:get_integer(
+ "couch_peruser",
+ "cluster_quiet_period",
+ ?DEFAULT_QUIET_PERIOD
+ )
+ ),
+ StartPeriod = abs(
+ config:get_integer(
+ "couch_peruser",
+ "cluster_start_period",
+ ?DEFAULT_START_PERIOD
+ )
+ ),
+
+ {ok, Mem3Cluster} = mem3_cluster:start_link(
+ ?MODULE,
+ self(),
+ StartPeriod,
+ Period
+ ),
+
+ #state{
+ parent = self(),
+ db_name = DbName,
+ delete_dbs = DeleteDbs,
+ mem3_cluster_pid = Mem3Cluster,
+ cluster_stable = false,
+ q_for_peruser_db = Q,
+ peruser_dbname_prefix = ?l2b(Prefix)
+ }
end.
-
-spec start_listening(State :: #state{}) -> #state{} | ok.
-start_listening(#state{states=ChangesStates}=State)
- when length(ChangesStates) > 0 ->
+start_listening(#state{states = ChangesStates} = State) when
+ length(ChangesStates) > 0
+->
% couch_log:debug("peruser: start_listening() already run on node ~p in pid ~p", [node(), self()]),
State;
-start_listening(#state{db_name=DbName, delete_dbs=DeleteDbs,
- q_for_peruser_db = Q, peruser_dbname_prefix = Prefix} = State) ->
+start_listening(
+ #state{
+ db_name = DbName,
+ delete_dbs = DeleteDbs,
+ q_for_peruser_db = Q,
+ peruser_dbname_prefix = Prefix
+ } = State
+) ->
% couch_log:debug("peruser: start_listening() on node ~p", [node()]),
try
- States = lists:map(fun (A) ->
- S = #changes_state{
- parent = State#state.parent,
- db_name = A#shard.name,
- delete_dbs = DeleteDbs,
- q_for_peruser_db = Q,
- peruser_dbname_prefix = Prefix
- },
- {Pid, Ref} = spawn_opt(
- ?MODULE, init_changes_handler, [S], [link, monitor]),
- S#changes_state{changes_pid=Pid, changes_ref=Ref}
- end, mem3:local_shards(DbName)),
+ States = lists:map(
+ fun(A) ->
+ S = #changes_state{
+ parent = State#state.parent,
+ db_name = A#shard.name,
+ delete_dbs = DeleteDbs,
+ q_for_peruser_db = Q,
+ peruser_dbname_prefix = Prefix
+ },
+ {Pid, Ref} = spawn_opt(
+ ?MODULE, init_changes_handler, [S], [link, monitor]
+ ),
+ S#changes_state{changes_pid = Pid, changes_ref = Ref}
+ end,
+ mem3:local_shards(DbName)
+ ),
% couch_log:debug("peruser: start_listening() States ~p", [States]),
State#state{states = States, cluster_stable = true}
- catch error:database_does_not_exist ->
- couch_log:warning("couch_peruser can't proceed as underlying database (~s) is missing, disables itself.", [DbName]),
- config:set("couch_peruser", "enable", "false", lists:concat([binary_to_list(DbName), " is missing"]))
+ catch
+ error:database_does_not_exist ->
+ couch_log:warning(
+ "couch_peruser can't proceed as underlying database (~s) is missing, disables itself.",
+ [DbName]
+ ),
+ config:set(
+ "couch_peruser",
+ "enable",
+ "false",
+ lists:concat([binary_to_list(DbName), " is missing"])
+ )
end.
-spec init_changes_handler(ChangesState :: #changes_state{}) -> ok.
-init_changes_handler(#changes_state{db_name=DbName} = ChangesState) ->
+init_changes_handler(#changes_state{db_name = DbName} = ChangesState) ->
% couch_log:debug("peruser: init_changes_handler() on DbName ~p", [DbName]),
try
{ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX, sys_db]),
FunAcc = {fun ?MODULE:changes_handler/3, ChangesState},
(couch_changes:handle_db_changes(
- #changes_args{feed="continuous", timeout=infinity},
- {json_req, null},
- Db))(FunAcc)
- catch error:database_does_not_exist ->
- ok
+ #changes_args{feed = "continuous", timeout = infinity},
+ {json_req, null},
+ Db
+ ))(
+ FunAcc
+ )
+ catch
+ error:database_does_not_exist ->
+ ok
end.
-type db_change() :: {atom(), tuple(), binary()}.
-spec changes_handler(
Change :: db_change(),
ResultType :: any(),
- ChangesState :: #changes_state{}) -> #changes_state{}.
+ ChangesState :: #changes_state{}
+) -> #changes_state{}.
changes_handler(
{change, {Doc}, _Prepend},
_ResType,
- ChangesState=#changes_state{db_name=DbName, q_for_peruser_db = Q,
- peruser_dbname_prefix = Prefix}) ->
+ ChangesState = #changes_state{
+ db_name = DbName,
+ q_for_peruser_db = Q,
+ peruser_dbname_prefix = Prefix
+ }
+) ->
% couch_log:debug("peruser: changes_handler() on DbName/Doc ~p/~p", [DbName, Doc]),
case couch_util:get_value(<<"id">>, Doc) of
- <<"org.couchdb.user:",User/binary>> = DocId ->
- case should_handle_doc(DbName, DocId) of
- true ->
- case couch_util:get_value(<<"deleted">>, Doc, false) of
- false ->
- UserDb = ensure_user_db(Prefix, User, Q),
- ok = ensure_security(User, UserDb, fun add_user/3),
- ChangesState;
- true ->
- case ChangesState#changes_state.delete_dbs of
+ <<"org.couchdb.user:", User/binary>> = DocId ->
+ case should_handle_doc(DbName, DocId) of
true ->
- _UserDb = delete_user_db(Prefix, User),
- ChangesState;
+ case couch_util:get_value(<<"deleted">>, Doc, false) of
+ false ->
+ UserDb = ensure_user_db(Prefix, User, Q),
+ ok = ensure_security(User, UserDb, fun add_user/3),
+ ChangesState;
+ true ->
+ case ChangesState#changes_state.delete_dbs of
+ true ->
+ _UserDb = delete_user_db(Prefix, User),
+ ChangesState;
+ false ->
+ UserDb = user_db_name(Prefix, User),
+ ok = ensure_security(User, UserDb, fun remove_user/3),
+ ChangesState
+ end
+ end;
false ->
- UserDb = user_db_name(Prefix, User),
- ok = ensure_security(User, UserDb, fun remove_user/3),
ChangesState
- end
end;
- false ->
+ _ ->
ChangesState
- end;
- _ ->
- ChangesState
end;
changes_handler(_Event, _ResType, ChangesState) ->
ChangesState.
--spec should_handle_doc(ShardName :: binary(), DocId::binary()) -> boolean().
+-spec should_handle_doc(ShardName :: binary(), DocId :: binary()) -> boolean().
should_handle_doc(ShardName, DocId) ->
case is_stable() of
- false ->
- % when the cluster is unstable, we have already stopped all Listeners
- % the next stable event will restart all listeners and pick up this
- % doc change
- couch_log:debug("peruser: skipping, cluster unstable ~s/~s",
- [ShardName, DocId]),
- false;
- true ->
- should_handle_doc_int(ShardName, DocId)
+ false ->
+ % when the cluster is unstable, we have already stopped all Listeners
+ % the next stable event will restart all listeners and pick up this
+ % doc change
+ couch_log:debug(
+ "peruser: skipping, cluster unstable ~s/~s",
+ [ShardName, DocId]
+ ),
+ false;
+ true ->
+ should_handle_doc_int(ShardName, DocId)
end.
-spec should_handle_doc_int(
ShardName :: binary(),
- DocId :: binary()) -> boolean().
+ DocId :: binary()
+) -> boolean().
should_handle_doc_int(ShardName, DocId) ->
DbName = mem3:dbname(ShardName),
Live = [erlang:node() | erlang:nodes()],
Shards = mem3:shards(DbName, DocId),
- Nodes = [N || #shard{node=N} <- Shards, lists:member(N, Live)],
+ Nodes = [N || #shard{node = N} <- Shards, lists:member(N, Live)],
case mem3:owner(DbName, DocId, Nodes) of
- ThisNode when ThisNode =:= node() ->
- couch_log:debug("peruser: handling ~s/~s", [DbName, DocId]),
- true; % do the database action
- _OtherNode ->
- couch_log:debug("peruser: skipping ~s/~s", [DbName, DocId]),
- false
- end.
-
--spec delete_user_db(Prefix:: binary(), User :: binary()) -> binary().
+ ThisNode when ThisNode =:= node() ->
+ couch_log:debug("peruser: handling ~s/~s", [DbName, DocId]),
+ % do the database action
+ true;
+ _OtherNode ->
+ couch_log:debug("peruser: skipping ~s/~s", [DbName, DocId]),
+ false
+ end.
+
+-spec delete_user_db(Prefix :: binary(), User :: binary()) -> binary().
delete_user_db(Prefix, User) ->
UserDb = user_db_name(Prefix, User),
try
case fabric:delete_db(UserDb, [?ADMIN_CTX]) of
- ok -> ok;
- accepted -> ok
+ ok -> ok;
+ accepted -> ok
end
- catch error:database_does_not_exist ->
- ok
+ catch
+ error:database_does_not_exist ->
+ ok
end,
UserDb.
--spec ensure_user_db(Prefix:: binary(), User :: binary(), Q :: integer()) -> binary().
+-spec ensure_user_db(Prefix :: binary(), User :: binary(), Q :: integer()) -> binary().
ensure_user_db(Prefix, User, Q) ->
UserDb = user_db_name(Prefix, User),
try
{ok, _DbInfo} = fabric:get_db_info(UserDb)
- catch error:database_does_not_exist ->
- case fabric:create_db(UserDb, [?ADMIN_CTX, {q, integer_to_list(Q)}]) of
- {error, file_exists} -> ok;
- ok -> ok;
- accepted -> ok
- end
+ catch
+ error:database_does_not_exist ->
+ case fabric:create_db(UserDb, [?ADMIN_CTX, {q, integer_to_list(Q)}]) of
+ {error, file_exists} -> ok;
+ ok -> ok;
+ accepted -> ok
+ end
end,
UserDb.
-spec add_user(
User :: binary(),
Properties :: tuple(),
- Acc :: tuple()) -> tuple().
+ Acc :: tuple()
+) -> tuple().
add_user(User, Prop, {Modified, SecProps}) ->
{PropValue} = couch_util:get_value(Prop, SecProps, {[]}),
Names = couch_util:get_value(<<"names">>, PropValue, []),
case lists:member(User, Names) of
- true ->
- {Modified, SecProps};
- false ->
- {true,
- lists:keystore(
- Prop, 1, SecProps,
- {Prop,
- {lists:keystore(
- <<"names">>, 1, PropValue,
- {<<"names">>, [User | Names]})}})}
+ true ->
+ {Modified, SecProps};
+ false ->
+ {true,
+ lists:keystore(
+ Prop,
+ 1,
+ SecProps,
+ {Prop,
+ {lists:keystore(
+ <<"names">>,
+ 1,
+ PropValue,
+ {<<"names">>, [User | Names]}
+ )}}
+ )}
end.
-spec remove_user(
User :: binary(),
Properties :: tuple(),
- Acc :: tuple()) -> tuple().
+ Acc :: tuple()
+) -> tuple().
remove_user(User, Prop, {Modified, SecProps}) ->
{PropValue} = couch_util:get_value(Prop, SecProps, {[]}),
Names = couch_util:get_value(<<"names">>, PropValue, []),
case lists:member(User, Names) of
- false ->
- {Modified, SecProps};
- true ->
- {true,
- lists:keystore(
- Prop, 1, SecProps,
- {Prop,
- {lists:keystore(
- <<"names">>, 1, PropValue,
- {<<"names">>, lists:delete(User, Names)})}})}
+ false ->
+ {Modified, SecProps};
+ true ->
+ {true,
+ lists:keystore(
+ Prop,
+ 1,
+ SecProps,
+ {Prop,
+ {lists:keystore(
+ <<"names">>,
+ 1,
+ PropValue,
+ {<<"names">>, lists:delete(User, Names)}
+ )}}
+ )}
end.
-spec ensure_security(
User :: binary(),
UserDb :: binary(),
- TransformFun :: fun()) -> ok.
+ TransformFun :: fun()
+) -> ok.
ensure_security(User, UserDb, TransformFun) ->
case fabric:get_all_security(UserDb, [?ADMIN_CTX]) of
- {error, no_majority} ->
- % TODO: make sure this is still true: single node, ignore
- ok;
- {ok, Shards} ->
- {_ShardInfo, {SecProps}} = hd(Shards),
- % assert that shards have the same security object
- true = lists:all(fun ({_, {SecProps1}}) ->
- SecProps =:= SecProps1
- end, Shards),
- case lists:foldl(
- fun (Prop, SAcc) -> TransformFun(User, Prop, SAcc) end,
- {false, SecProps},
- [<<"admins">>, <<"members">>]) of
- {false, _} ->
+ {error, no_majority} ->
+ % TODO: make sure this is still true: single node, ignore
ok;
- {true, SecProps1} ->
- ok = fabric:set_security(UserDb, {SecProps1}, [?ADMIN_CTX])
- end
+ {ok, Shards} ->
+ {_ShardInfo, {SecProps}} = hd(Shards),
+ % assert that shards have the same security object
+ true = lists:all(
+ fun({_, {SecProps1}}) ->
+ SecProps =:= SecProps1
+ end,
+ Shards
+ ),
+ case
+ lists:foldl(
+ fun(Prop, SAcc) -> TransformFun(User, Prop, SAcc) end,
+ {false, SecProps},
+ [<<"admins">>, <<"members">>]
+ )
+ of
+ {false, _} ->
+ ok;
+ {true, SecProps1} ->
+ ok = fabric:set_security(UserDb, {SecProps1}, [?ADMIN_CTX])
+ end
end.
-spec user_db_name(Prefix :: binary(), User :: binary()) -> binary().
user_db_name(Prefix, User) ->
HexUser = list_to_binary(
- [string:to_lower(integer_to_list(X, 16)) || <<X>> <= User]),
- <<Prefix/binary,HexUser/binary>>.
+ [string:to_lower(integer_to_list(X, 16)) || <<X>> <= User]
+ ),
+ <<Prefix/binary, HexUser/binary>>.
-spec exit_changes(State :: #state{}) -> ok.
exit_changes(State) ->
- lists:foreach(fun (ChangesState) ->
- demonitor(ChangesState#changes_state.changes_ref, [flush]),
- unlink(ChangesState#changes_state.changes_pid),
- exit(ChangesState#changes_state.changes_pid, kill)
- end, State#state.states).
+ lists:foreach(
+ fun(ChangesState) ->
+ demonitor(ChangesState#changes_state.changes_ref, [flush]),
+ unlink(ChangesState#changes_state.changes_pid),
+ exit(ChangesState#changes_state.changes_pid, kill)
+ end,
+ State#state.states
+ ).
-spec is_stable() -> true | false.
is_stable() ->
@@ -364,7 +452,6 @@ handle_call(is_stable, _From, #state{cluster_stable = IsStable} = State) ->
handle_call(_Msg, _From, State) ->
{reply, error, State}.
-
handle_cast(update_config, State) when State#state.states =/= undefined ->
exit_changes(State),
{noreply, init_state()};
@@ -386,10 +473,16 @@ handle_info({'DOWN', _Ref, _, _, _Reason}, State) ->
{stop, normal, State};
handle_info({config_change, "couch_peruser", _, _, _}, State) ->
handle_cast(update_config, State);
-handle_info({
- config_change,
- "couch_httpd_auth",
- "authentication_db", _, _}, State) ->
+handle_info(
+ {
+ config_change,
+ "couch_httpd_auth",
+ "authentication_db",
+ _,
+ _
+ },
+ State
+) ->
handle_cast(update_config, State);
handle_info({gen_event_EXIT, _Handler, _Reason}, State) ->
erlang:send_after(?RELISTEN_DELAY, self(), restart_config_listener),
diff --git a/src/couch_peruser/src/couch_peruser_app.erl b/src/couch_peruser/src/couch_peruser_app.erl
index 770c08237..ab0e04444 100644
--- a/src/couch_peruser/src/couch_peruser_app.erl
+++ b/src/couch_peruser/src/couch_peruser_app.erl
@@ -16,11 +16,8 @@
-export([start/2, stop/1]).
-
start(_Type, _StartArgs) ->
couch_peruser_sup:start_link().
-
stop(_State) ->
ok.
-
diff --git a/src/couch_peruser/src/couch_peruser_sup.erl b/src/couch_peruser/src/couch_peruser_sup.erl
index b89a36324..a9a789ce6 100644
--- a/src/couch_peruser/src/couch_peruser_sup.erl
+++ b/src/couch_peruser/src/couch_peruser_sup.erl
@@ -19,11 +19,8 @@
%% Helper macro for declaring children of supervisor
-define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}).
-
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
init([]) ->
- {ok, { {one_for_one, 5, 10}, [?CHILD(couch_peruser, worker)]}}.
-
+ {ok, {{one_for_one, 5, 10}, [?CHILD(couch_peruser, worker)]}}.
diff --git a/src/couch_peruser/test/eunit/couch_peruser_test.erl b/src/couch_peruser/test/eunit/couch_peruser_test.erl
index 5ddbe7a5a..3ba344e9d 100644
--- a/src/couch_peruser/test/eunit/couch_peruser_test.erl
+++ b/src/couch_peruser/test/eunit/couch_peruser_test.erl
@@ -24,7 +24,7 @@ setup_all() ->
TestCtx = test_util:start_couch([chttpd]),
ok = application:start(couch_peruser),
Hashed = couch_passwords:hash_admin_password(?ADMIN_PASSWORD),
- ok = config:set("admins", ?ADMIN_USERNAME, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?ADMIN_USERNAME, ?b2l(Hashed), _Persist = false),
TestCtx.
teardown_all(TestCtx) ->
@@ -52,18 +52,21 @@ teardown(TestAuthDb) ->
set_config("cluster", "n", "3"),
do_request(delete, get_cluster_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
do_request(delete, get_base_url() ++ "/" ++ ?b2l(TestAuthDb)),
- lists:foreach(fun(DbName) ->
- case binary:part(DbName, 0, 7) of
- <<"userdb-">> -> delete_db(DbName);
- _ -> ok
- end
- end, all_dbs()).
+ lists:foreach(
+ fun(DbName) ->
+ case binary:part(DbName, 0, 7) of
+ <<"userdb-">> -> delete_db(DbName);
+ _ -> ok
+ end
+ end,
+ all_dbs()
+ ).
set_config(Section, Key, Value) ->
- ok = config:set(Section, Key, Value, _Persist=false).
+ ok = config:set(Section, Key, Value, _Persist = false).
delete_config(Section, Key) ->
- ok = config:delete(Section, Key, _Persist=false).
+ ok = config:delete(Section, Key, _Persist = false).
do_request(Method, Url) ->
Headers = [{basic_auth, {?ADMIN_USERNAME, ?ADMIN_PASSWORD}}],
@@ -72,12 +75,14 @@ do_request(Method, Url) ->
do_request(Method, Url, Body) ->
Headers = [
{basic_auth, {?ADMIN_USERNAME, ?ADMIN_PASSWORD}},
- {"Content-Type", "application/json"}],
+ {"Content-Type", "application/json"}
+ ],
{ok, _, _, _} = test_request:request(Method, Url, Headers, Body).
do_anon_request(Method, Url, Body) ->
Headers = [
- {"Content-Type", "application/json"}],
+ {"Content-Type", "application/json"}
+ ],
{ok, _, _, _} = test_request:request(Method, Url, Headers, Body).
create_db(DbName) ->
@@ -87,22 +92,31 @@ delete_db(DbName) ->
{ok, _, _, _} = do_request(delete, get_cluster_base_url() ++ "/" ++ ?b2l(DbName)).
create_user(AuthDb, Name) ->
- Body = "{\"name\":\"" ++ Name ++
- "\",\"type\":\"user\",\"roles\":[],\"password\":\"secret\"}",
+ Body =
+ "{\"name\":\"" ++ Name ++
+ "\",\"type\":\"user\",\"roles\":[],\"password\":\"secret\"}",
Url = lists:concat([
- get_cluster_base_url(), "/", ?b2l(AuthDb), "/org.couchdb.user:", Name]),
+ get_cluster_base_url(), "/", ?b2l(AuthDb), "/org.couchdb.user:", Name
+ ]),
{ok, 201, _, _} = do_request(put, Url, Body).
create_anon_user(AuthDb, Name) ->
- Body = "{\"name\":\"" ++ Name ++
- "\",\"type\":\"user\",\"roles\":[],\"password\":\"secret\"}",
+ Body =
+ "{\"name\":\"" ++ Name ++
+ "\",\"type\":\"user\",\"roles\":[],\"password\":\"secret\"}",
Url = lists:concat([
- get_cluster_base_url(), "/", ?b2l(AuthDb), "/org.couchdb.user:", Name]),
+ get_cluster_base_url(), "/", ?b2l(AuthDb), "/org.couchdb.user:", Name
+ ]),
{ok, 201, _, _} = do_anon_request(put, Url, Body).
delete_user(AuthDb, Name) ->
- Url = lists:concat([get_cluster_base_url(), "/", ?b2l(AuthDb),
- "/org.couchdb.user:", Name]),
+ Url = lists:concat([
+ get_cluster_base_url(),
+ "/",
+ ?b2l(AuthDb),
+ "/org.couchdb.user:",
+ Name
+ ]),
{ok, 200, _, Body} = do_request(get, Url),
{DocProps} = jiffy:decode(Body),
Rev = proplists:get_value(<<"_rev">>, DocProps),
@@ -110,7 +124,8 @@ delete_user(AuthDb, Name) ->
get_security(DbName) ->
Url = lists:concat([
- get_cluster_base_url(), "/", ?b2l(DbName), "/_security"]),
+ get_cluster_base_url(), "/", ?b2l(DbName), "/_security"
+ ]),
test_util:wait(fun() ->
{ok, 200, _, Body} = do_request(get, Url),
case jiffy:decode(Body) of
@@ -121,7 +136,8 @@ get_security(DbName) ->
set_security(DbName, SecurityProperties) ->
Url = lists:concat([
- get_cluster_base_url(), "/", ?b2l(DbName), "/_security"]),
+ get_cluster_base_url(), "/", ?b2l(DbName), "/_security"
+ ]),
Body = jiffy:encode({SecurityProperties}),
{ok, 200, _, _} = do_request(put, Url, Body).
@@ -143,7 +159,6 @@ get_cluster_base_url() ->
Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)),
"http://" ++ Addr ++ ":" ++ Port.
-
should_create_user_db_with_default(TestAuthDb) ->
?_test(begin
create_user(TestAuthDb, "foo"),
@@ -288,42 +303,42 @@ should_delete_user_db_with_custom_special_prefix(TestAuthDb) ->
end).
should_reflect_config_changes(TestAuthDb) ->
- {timeout, 10000, ?_test(begin
- User = "baz",
- UserDbName = <<"userdb-62617a">>,
- set_config("couch_peruser", "delete_dbs", "true"),
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- AfterCreate1 = lists:member(UserDbName, all_dbs()),
- delete_user(TestAuthDb, User),
- timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- wait_for_db_delete(UserDbName),
- AfterDelete1 = lists:member(UserDbName, all_dbs()),
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- AfterCreate2 = lists:member(UserDbName, all_dbs()),
- set_config("couch_peruser", "delete_dbs", "false"),
- delete_user(TestAuthDb, User),
- timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- AfterDelete2 = lists:member(UserDbName, all_dbs()),
- create_user(TestAuthDb, User),
- wait_for_db_create(UserDbName),
- set_config("couch_peruser", "delete_dbs", "true"),
- delete_user(TestAuthDb, User),
- wait_for_db_delete(UserDbName),
- AfterDelete3 = lists:member(UserDbName, all_dbs()),
- set_config("couch_peruser", "enable", "false"),
- create_user(TestAuthDb, User),
- timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
- AfterCreate3 = lists:member(UserDbName, all_dbs()),
- ?assert(AfterCreate1),
- ?assertNot(AfterDelete1),
- ?assert(AfterCreate2),
- ?assert(AfterDelete2),
- ?assertNot(AfterDelete3),
- ?assertNot(AfterCreate3)
- end)}.
-
+ {timeout, 10000,
+ ?_test(begin
+ User = "baz",
+ UserDbName = <<"userdb-62617a">>,
+ set_config("couch_peruser", "delete_dbs", "true"),
+ create_user(TestAuthDb, User),
+ wait_for_db_create(UserDbName),
+ AfterCreate1 = lists:member(UserDbName, all_dbs()),
+ delete_user(TestAuthDb, User),
+ timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
+ wait_for_db_delete(UserDbName),
+ AfterDelete1 = lists:member(UserDbName, all_dbs()),
+ create_user(TestAuthDb, User),
+ wait_for_db_create(UserDbName),
+ AfterCreate2 = lists:member(UserDbName, all_dbs()),
+ set_config("couch_peruser", "delete_dbs", "false"),
+ delete_user(TestAuthDb, User),
+ timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
+ AfterDelete2 = lists:member(UserDbName, all_dbs()),
+ create_user(TestAuthDb, User),
+ wait_for_db_create(UserDbName),
+ set_config("couch_peruser", "delete_dbs", "true"),
+ delete_user(TestAuthDb, User),
+ wait_for_db_delete(UserDbName),
+ AfterDelete3 = lists:member(UserDbName, all_dbs()),
+ set_config("couch_peruser", "enable", "false"),
+ create_user(TestAuthDb, User),
+ timer:sleep(?WAIT_FOR_USER_DELETE_TIMEOUT),
+ AfterCreate3 = lists:member(UserDbName, all_dbs()),
+ ?assert(AfterCreate1),
+ ?assertNot(AfterDelete1),
+ ?assert(AfterCreate2),
+ ?assert(AfterDelete2),
+ ?assertNot(AfterDelete3),
+ ?assertNot(AfterCreate3)
+ end)}.
should_add_user_to_db_admins(TestAuthDb) ->
?_test(begin
@@ -332,8 +347,9 @@ should_add_user_to_db_admins(TestAuthDb) ->
create_user(TestAuthDb, User),
wait_for_db_create(UserDbName),
?assertEqual(
- {[{<<"names">>,[<<"qux">>]}]},
- proplists:get_value(<<"admins">>, get_security(UserDbName)))
+ {[{<<"names">>, [<<"qux">>]}]},
+ proplists:get_value(<<"admins">>, get_security(UserDbName))
+ )
end).
should_add_user_to_db_members(TestAuthDb) ->
@@ -343,8 +359,9 @@ should_add_user_to_db_members(TestAuthDb) ->
create_user(TestAuthDb, User),
wait_for_db_create(UserDbName),
?assertEqual(
- {[{<<"names">>,[<<"qux">>]}]},
- proplists:get_value(<<"members">>, get_security(UserDbName)))
+ {[{<<"names">>, [<<"qux">>]}]},
+ proplists:get_value(<<"members">>, get_security(UserDbName))
+ )
end).
should_not_remove_existing_db_admins(TestAuthDb) ->
@@ -352,15 +369,17 @@ should_not_remove_existing_db_admins(TestAuthDb) ->
User = "qux",
UserDbName = <<"userdb-717578">>,
SecurityProperties = [
- {<<"admins">>,{[{<<"names">>,[<<"foo">>,<<"bar">>]}]}},
- {<<"members">>,{[{<<"names">>,[<<"baz">>,<<"pow">>]}]}}
+ {<<"admins">>, {[{<<"names">>, [<<"foo">>, <<"bar">>]}]}},
+ {<<"members">>, {[{<<"names">>, [<<"baz">>, <<"pow">>]}]}}
],
create_db(UserDbName),
set_security(UserDbName, SecurityProperties),
create_user(TestAuthDb, User),
wait_for_security_create(<<"admins">>, User, UserDbName),
- {AdminProperties} = proplists:get_value(<<"admins">>,
- get_security(UserDbName)),
+ {AdminProperties} = proplists:get_value(
+ <<"admins">>,
+ get_security(UserDbName)
+ ),
AdminNames = proplists:get_value(<<"names">>, AdminProperties),
?assert(lists:member(<<"foo">>, AdminNames)),
?assert(lists:member(<<"bar">>, AdminNames)),
@@ -372,15 +391,17 @@ should_not_remove_existing_db_members(TestAuthDb) ->
User = "qux",
UserDbName = <<"userdb-717578">>,
SecurityProperties = [
- {<<"admins">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}},
- {<<"members">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}}
+ {<<"admins">>, {[{<<"names">>, [<<"pow">>, <<"wow">>]}]}},
+ {<<"members">>, {[{<<"names">>, [<<"pow">>, <<"wow">>]}]}}
],
create_db(UserDbName),
set_security(UserDbName, SecurityProperties),
create_user(TestAuthDb, User),
wait_for_security_create(<<"members">>, User, UserDbName),
- {MemberProperties} = proplists:get_value(<<"members">>,
- get_security(UserDbName)),
+ {MemberProperties} = proplists:get_value(
+ <<"members">>,
+ get_security(UserDbName)
+ ),
MemberNames = proplists:get_value(<<"names">>, MemberProperties),
?assert(lists:member(<<"pow">>, MemberNames)),
?assert(lists:member(<<"wow">>, MemberNames)),
@@ -392,23 +413,27 @@ should_remove_user_from_db_admins(TestAuthDb) ->
User = "qux",
UserDbName = <<"userdb-717578">>,
SecurityProperties = [
- {<<"admins">>,{[{<<"names">>,[<<"foo">>,<<"bar">>]}]}},
- {<<"members">>,{[{<<"names">>,[<<"baz">>,<<"pow">>]}]}}
+ {<<"admins">>, {[{<<"names">>, [<<"foo">>, <<"bar">>]}]}},
+ {<<"members">>, {[{<<"names">>, [<<"baz">>, <<"pow">>]}]}}
],
create_db(UserDbName),
set_security(UserDbName, SecurityProperties),
create_user(TestAuthDb, User),
wait_for_security_create(<<"admins">>, User, UserDbName),
- {AdminProperties} = proplists:get_value(<<"admins">>,
- get_security(UserDbName)),
+ {AdminProperties} = proplists:get_value(
+ <<"admins">>,
+ get_security(UserDbName)
+ ),
AdminNames = proplists:get_value(<<"names">>, AdminProperties),
FooBefore = lists:member(<<"foo">>, AdminNames),
BarBefore = lists:member(<<"bar">>, AdminNames),
QuxBefore = lists:member(<<"qux">>, AdminNames),
delete_user(TestAuthDb, User),
wait_for_security_delete(<<"admins">>, User, UserDbName),
- {NewAdminProperties} = proplists:get_value(<<"admins">>,
- get_security(UserDbName)),
+ {NewAdminProperties} = proplists:get_value(
+ <<"admins">>,
+ get_security(UserDbName)
+ ),
NewAdminNames = proplists:get_value(<<"names">>, NewAdminProperties),
FooAfter = lists:member(<<"foo">>, NewAdminNames),
BarAfter = lists:member(<<"bar">>, NewAdminNames),
@@ -426,23 +451,27 @@ should_remove_user_from_db_members(TestAuthDb) ->
User = "qux",
UserDbName = <<"userdb-717578">>,
SecurityProperties = [
- {<<"admins">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}},
- {<<"members">>,{[{<<"names">>,[<<"pow">>,<<"wow">>]}]}}
+ {<<"admins">>, {[{<<"names">>, [<<"pow">>, <<"wow">>]}]}},
+ {<<"members">>, {[{<<"names">>, [<<"pow">>, <<"wow">>]}]}}
],
create_db(UserDbName),
set_security(UserDbName, SecurityProperties),
create_user(TestAuthDb, User),
wait_for_security_create(<<"members">>, User, UserDbName),
- {MemberProperties} = proplists:get_value(<<"members">>,
- get_security(UserDbName)),
+ {MemberProperties} = proplists:get_value(
+ <<"members">>,
+ get_security(UserDbName)
+ ),
MemberNames = proplists:get_value(<<"names">>, MemberProperties),
PowBefore = lists:member(<<"pow">>, MemberNames),
WowBefore = lists:member(<<"wow">>, MemberNames),
QuxBefore = lists:member(<<"qux">>, MemberNames),
delete_user(TestAuthDb, User),
wait_for_security_delete(<<"members">>, User, UserDbName),
- {NewMemberProperties} = proplists:get_value(<<"members">>,
- get_security(UserDbName)),
+ {NewMemberProperties} = proplists:get_value(
+ <<"members">>,
+ get_security(UserDbName)
+ ),
NewMemberNames = proplists:get_value(<<"names">>, NewMemberProperties),
PowAfter = lists:member(<<"pow">>, NewMemberNames),
WowAfter = lists:member(<<"wow">>, NewMemberNames),
@@ -455,12 +484,11 @@ should_remove_user_from_db_members(TestAuthDb) ->
?assertNot(QuxAfter)
end).
-
-
wait_for_db_create(UserDbName) ->
test_util:wait(fun() ->
case all_dbs_with_errors() of
- {error, _, _ , _} -> wait;
+ {error, _, _, _} ->
+ wait;
{ok, _, _, AllDbs} ->
case lists:member(UserDbName, AllDbs) of
true -> true;
@@ -472,7 +500,8 @@ wait_for_db_create(UserDbName) ->
wait_for_db_delete(UserDbName) ->
test_util:wait(fun() ->
case all_dbs_with_errors() of
- {ok, 500, _ , _} -> wait;
+ {ok, 500, _, _} ->
+ wait;
{ok, _, _, AllDbs} ->
case not lists:member(UserDbName, AllDbs) of
true -> true;
@@ -508,10 +537,12 @@ couch_peruser_test_() ->
"couch_peruser test",
{
setup,
- fun setup_all/0, fun teardown_all/1,
+ fun setup_all/0,
+ fun teardown_all/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_create_anon_user_db_with_default/1,
fun should_create_anon_user_db_with_custom_prefix/1,
diff --git a/src/couch_plugins/src/couch_plugins.erl b/src/couch_plugins/src/couch_plugins.erl
index 139a878bf..97834134b 100644
--- a/src/couch_plugins/src/couch_plugins.erl
+++ b/src/couch_plugins/src/couch_plugins.erl
@@ -17,63 +17,62 @@
% couch_plugins:install({"geocouch", "http://people.apache.org/~jan/", "couchdb1.2.x_v0.3.0-11-gd83ba22", [{"R15B03", "ZetgdHj2bY2w37buulWVf3USOZs="}]}).
plugin_dir() ->
- couch_config:get("couchdb", "plugin_dir").
+ couch_config:get("couchdb", "plugin_dir").
log(T) ->
- couch_log:debug("[couch_plugins] ~p ~n", [T]).
+ couch_log:debug("[couch_plugins] ~p ~n", [T]).
%% "geocouch", "http://localhost:8000/dist", "1.0.0"
-type plugin() :: {string(), string(), string(), list()}.
-spec install(plugin()) -> ok | {error, string()}.
-install({Name, _BaseUrl, Version, Checksums}=Plugin) ->
- log("Installing " ++ Name),
+install({Name, _BaseUrl, Version, Checksums} = Plugin) ->
+ log("Installing " ++ Name),
- {ok, LocalFilename} = download(Plugin),
- log("downloaded to " ++ LocalFilename),
+ {ok, LocalFilename} = download(Plugin),
+ log("downloaded to " ++ LocalFilename),
- ok = verify_checksum(LocalFilename, Checksums),
- log("checksum verified"),
+ ok = verify_checksum(LocalFilename, Checksums),
+ log("checksum verified"),
- ok = untargz(LocalFilename),
- log("extraction done"),
+ ok = untargz(LocalFilename),
+ log("extraction done"),
- ok = add_code_path(Name, Version),
- log("added code path"),
+ ok = add_code_path(Name, Version),
+ log("added code path"),
- ok = register_plugin(Name, Version),
- log("registered plugin"),
+ ok = register_plugin(Name, Version),
+ log("registered plugin"),
- load_config(Name, Version),
- log("loaded config"),
+ load_config(Name, Version),
+ log("loaded config"),
- ok.
+ ok.
% Idempotent uninstall, if you uninstall a non-existant
% plugin, you get an `ok`.
-spec uninstall(plugin()) -> ok | {error, string()}.
uninstall({Name, _BaseUrl, Version, _Checksums}) ->
- % unload config
- ok = unload_config(Name, Version),
- log("config unloaded"),
+ % unload config
+ ok = unload_config(Name, Version),
+ log("config unloaded"),
- % delete files
- ok = delete_files(Name, Version),
- log("files deleted"),
+ % delete files
+ ok = delete_files(Name, Version),
+ log("files deleted"),
- % delete code path
- ok = del_code_path(Name, Version),
- log("deleted code path"),
+ % delete code path
+ ok = del_code_path(Name, Version),
+ log("deleted code path"),
- % unregister plugin
- ok = unregister_plugin(Name),
- log("unregistered plugin"),
+ % unregister plugin
+ ok = unregister_plugin(Name),
+ log("unregistered plugin"),
- % done
- ok.
+ % done
+ ok.
%% * * *
-
%% Plugin Registration
%% On uninstall:
%% - add plugins/name = version to config
@@ -82,15 +81,14 @@ uninstall({Name, _BaseUrl, Version, _Checksums}) ->
-spec register_plugin(string(), string()) -> ok.
register_plugin(Name, Version) ->
- couch_config:set("plugins", Name, Version).
+ couch_config:set("plugins", Name, Version).
-spec unregister_plugin(string()) -> ok.
unregister_plugin(Name) ->
- couch_config:delete("plugins", Name).
+ couch_config:delete("plugins", Name).
%% * * *
-
%% Load Config
%% Parses <plugindir>/priv/default.d/<pluginname.ini> and applies
%% the contents to the config system, or removes them on uninstall
@@ -105,8 +103,10 @@ unload_config(Name, Version) ->
-spec loop_config(string(), string(), function()) -> ok.
loop_config(Name, Version, Fun) ->
- lists:foreach(fun(File) -> load_config_file(File, Fun) end,
- filelib:wildcard(file_names(Name, Version))).
+ lists:foreach(
+ fun(File) -> load_config_file(File, Fun) end,
+ filelib:wildcard(file_names(Name, Version))
+ ).
-spec load_config_file(string(), function()) -> ok.
load_config_file(File, Fun) ->
@@ -123,13 +123,18 @@ delete_config({{Section, Key}, _Value}) ->
-spec file_names(string(), string()) -> string().
file_names(Name, Version) ->
- filename:join(
- [plugin_dir(), get_file_slug(Name, Version),
- "priv", "default.d", "*.ini"]).
+ filename:join(
+ [
+ plugin_dir(),
+ get_file_slug(Name, Version),
+ "priv",
+ "default.d",
+ "*.ini"
+ ]
+ ).
%% * * *
-
%% Code Path Management
%% The Erlang code path is where the Erlang runtime looks for `.beam`
%% files to load on, say, `application:load()`. Since plugin directories
@@ -138,137 +143,151 @@ file_names(Name, Version) ->
-spec add_code_path(string(), string()) -> ok | {error, bad_directory}.
add_code_path(Name, Version) ->
- PluginPath = plugin_dir() ++ "/" ++ get_file_slug(Name, Version) ++ "/ebin",
- case code:add_path(PluginPath) of
- true -> ok;
- Else ->
- couch_log:error("Failed to add PluginPath: '~s'", [PluginPath]),
- Else
- end.
+ PluginPath = plugin_dir() ++ "/" ++ get_file_slug(Name, Version) ++ "/ebin",
+ case code:add_path(PluginPath) of
+ true ->
+ ok;
+ Else ->
+ couch_log:error("Failed to add PluginPath: '~s'", [PluginPath]),
+ Else
+ end.
-spec del_code_path(string(), string()) -> ok | {error, atom()}.
del_code_path(Name, Version) ->
- PluginPath = plugin_dir() ++ "/" ++ get_file_slug(Name, Version) ++ "/ebin",
- case code:del_path(PluginPath) of
- true -> ok;
- _Else ->
- couch_log:debug("Failed to delete PluginPath: '~s', ignoring",
- [PluginPath]),
- ok
- end.
+ PluginPath = plugin_dir() ++ "/" ++ get_file_slug(Name, Version) ++ "/ebin",
+ case code:del_path(PluginPath) of
+ true ->
+ ok;
+ _Else ->
+ couch_log:debug(
+ "Failed to delete PluginPath: '~s', ignoring",
+ [PluginPath]
+ ),
+ ok
+ end.
%% * * *
-
-spec untargz(string()) -> {ok, string()} | {error, string()}.
untargz(Filename) ->
- % read .gz file
- {ok, GzData} = file:read_file(Filename),
- % gunzip
- log("unzipped"),
- TarData = zlib:gunzip(GzData),
- ok = filelib:ensure_dir(plugin_dir()),
- % untar
- erl_tar:extract({binary, TarData}, [{cwd, plugin_dir()}, keep_old_files]).
+ % read .gz file
+ {ok, GzData} = file:read_file(Filename),
+ % gunzip
+ log("unzipped"),
+ TarData = zlib:gunzip(GzData),
+ ok = filelib:ensure_dir(plugin_dir()),
+ % untar
+ erl_tar:extract({binary, TarData}, [{cwd, plugin_dir()}, keep_old_files]).
-spec delete_files(string(), string()) -> ok | {error, atom()}.
delete_files(Name, Version) ->
- PluginPath = plugin_dir() ++ "/" ++ get_file_slug(Name, Version),
- mochitemp:rmtempdir(PluginPath).
-
+ PluginPath = plugin_dir() ++ "/" ++ get_file_slug(Name, Version),
+ mochitemp:rmtempdir(PluginPath).
% downloads a pluygin .tar.gz into a local plugins directory
-spec download(string()) -> ok | {error, string()}.
-download({Name, _BaseUrl, Version, _Checksums}=Plugin) ->
- TargetFile = filename:join(mochitemp:gettempdir(), get_filename(Name, Version)),
- case file_exists(TargetFile) of
- %% wipe and redownload
- true -> file:delete(TargetFile);
- _Else -> ok
- end,
- Url = get_url(Plugin),
- HTTPOptions = [
- {connect_timeout, 30*1000}, % 30 seconds
- {timeout, 30*1000} % 30 seconds
- ],
- % todo: windows
- Options = [
- {stream, TargetFile}, % /tmp/something
- {body_format, binary},
- {full_result, false}
- ],
- % todo: reduce to just httpc:request()
- case httpc:request(get, {Url, []}, HTTPOptions, Options) of
- {ok, _Result} ->
- log("downloading " ++ Url),
- {ok, TargetFile};
- Error -> Error
- end.
+download({Name, _BaseUrl, Version, _Checksums} = Plugin) ->
+ TargetFile = filename:join(mochitemp:gettempdir(), get_filename(Name, Version)),
+ case file_exists(TargetFile) of
+ %% wipe and redownload
+ true -> file:delete(TargetFile);
+ _Else -> ok
+ end,
+ Url = get_url(Plugin),
+ HTTPOptions = [
+ % 30 seconds
+ {connect_timeout, 30 * 1000},
+ % 30 seconds
+ {timeout, 30 * 1000}
+ ],
+ % todo: windows
+ Options = [
+ % /tmp/something
+ {stream, TargetFile},
+ {body_format, binary},
+ {full_result, false}
+ ],
+ % todo: reduce to just httpc:request()
+ case httpc:request(get, {Url, []}, HTTPOptions, Options) of
+ {ok, _Result} ->
+ log("downloading " ++ Url),
+ {ok, TargetFile};
+ Error ->
+ Error
+ end.
-spec verify_checksum(string(), list()) -> ok | {error, string()}.
verify_checksum(Filename, Checksums) ->
-
- CouchDBVersion = couchdb_version(),
- case proplists:get_value(CouchDBVersion, Checksums) of
- undefined ->
- couch_log:error("[couch_plugins] Can't find checksum for CouchDB Version"
- " '~s'", [CouchDBVersion]),
- {error, no_couchdb_checksum};
- OTPChecksum ->
- OTPRelease = erlang:system_info(otp_release),
- case proplists:get_value(OTPRelease, OTPChecksum) of
- undefined ->
- couch_log:error("[couch_plugins] Can't find checksum for Erlang Version"
- " '~s'", [OTPRelease]),
- {error, no_erlang_checksum};
- Checksum ->
- do_verify_checksum(Filename, Checksum)
- end
- end.
+ CouchDBVersion = couchdb_version(),
+ case proplists:get_value(CouchDBVersion, Checksums) of
+ undefined ->
+ couch_log:error(
+ "[couch_plugins] Can't find checksum for CouchDB Version"
+ " '~s'",
+ [CouchDBVersion]
+ ),
+ {error, no_couchdb_checksum};
+ OTPChecksum ->
+ OTPRelease = erlang:system_info(otp_release),
+ case proplists:get_value(OTPRelease, OTPChecksum) of
+ undefined ->
+ couch_log:error(
+ "[couch_plugins] Can't find checksum for Erlang Version"
+ " '~s'",
+ [OTPRelease]
+ ),
+ {error, no_erlang_checksum};
+ Checksum ->
+ do_verify_checksum(Filename, Checksum)
+ end
+ end.
-spec do_verify_checksum(string(), string()) -> ok | {error, string()}.
do_verify_checksum(Filename, Checksum) ->
- couch_log:debug("Checking Filename: ~s", [Filename]),
- case file:read_file(Filename) of
- {ok, Data} ->
- ComputedChecksum = binary_to_list(base64:encode(crypto:hash(sha, Data))),
- case ComputedChecksum of
- Checksum -> ok;
- _Else ->
- couch_log:error("Checksum mismatch. Wanted: '~p'. Got '~p'",
- [Checksum, ComputedChecksum]),
- {error, checksum_mismatch}
- end;
- Error -> Error
- end.
-
+ couch_log:debug("Checking Filename: ~s", [Filename]),
+ case file:read_file(Filename) of
+ {ok, Data} ->
+ ComputedChecksum = binary_to_list(base64:encode(crypto:hash(sha, Data))),
+ case ComputedChecksum of
+ Checksum ->
+ ok;
+ _Else ->
+ couch_log:error(
+ "Checksum mismatch. Wanted: '~p'. Got '~p'",
+ [Checksum, ComputedChecksum]
+ ),
+ {error, checksum_mismatch}
+ end;
+ Error ->
+ Error
+ end.
%% utils
-spec get_url(plugin()) -> string().
get_url({Name, BaseUrl, Version, _Checksums}) ->
- BaseUrl ++ "/" ++ get_filename(Name, Version).
+ BaseUrl ++ "/" ++ get_filename(Name, Version).
-spec get_filename(string(), string()) -> string().
get_filename(Name, Version) ->
- get_file_slug(Name, Version) ++ ".tar.gz".
+ get_file_slug(Name, Version) ++ ".tar.gz".
-spec get_file_slug(string(), string()) -> string().
get_file_slug(Name, Version) ->
- % OtpRelease does not include patch levels like the -1 in R15B03-1
- OTPRelease = erlang:system_info(otp_release),
- CouchDBVersion = couchdb_version(),
- string:join([Name, Version, OTPRelease, CouchDBVersion], "-").
+ % OtpRelease does not include patch levels like the -1 in R15B03-1
+ OTPRelease = erlang:system_info(otp_release),
+ CouchDBVersion = couchdb_version(),
+ string:join([Name, Version, OTPRelease, CouchDBVersion], "-").
-spec file_exists(string()) -> boolean().
file_exists(Filename) ->
- does_file_exist(file:read_file_info(Filename)).
+ does_file_exist(file:read_file_info(Filename)).
-spec does_file_exist(term()) -> boolean().
does_file_exist({error, enoent}) -> false;
does_file_exist(_Else) -> true.
couchdb_version() ->
- couch_server:get_version(short).
+ couch_server:get_version(short).
% installing a plugin:
% - POST /_plugins -d {plugin-def}
@@ -301,4 +320,3 @@ couchdb_version() ->
% geocouch-{geocouch_version}-{erlang_version}/ebin
% [geocouch-{geocouch_version}-{erlang_version}/config/config.erlt]
% [geocouch-{geocouch_version}-{erlang_version}/share/]
-
diff --git a/src/couch_plugins/src/couch_plugins_httpd.erl b/src/couch_plugins/src/couch_plugins_httpd.erl
index 90a09a5a5..784f040fc 100644
--- a/src/couch_plugins/src/couch_plugins_httpd.erl
+++ b/src/couch_plugins/src/couch_plugins_httpd.erl
@@ -15,7 +15,7 @@
-include_lib("couch/include/couch_db.hrl").
-handle_req(#httpd{method='POST'}=Req) ->
+handle_req(#httpd{method = 'POST'} = Req) ->
ok = couch_httpd:verify_is_server_admin(Req),
couch_httpd:validate_ctype(Req, "application/json"),
@@ -29,15 +29,15 @@ handle_req(#httpd{method='POST'}=Req) ->
Plugin = {Name, Url, Version, Checksums},
case do_install(Delete, Plugin) of
- ok ->
- couch_httpd:send_json(Req, 202, {[{ok, true}]});
- Error ->
- couch_log:debug("Plugin Spec: ~p", [PluginSpec]),
- couch_httpd:send_error(Req, {bad_request, Error})
+ ok ->
+ couch_httpd:send_json(Req, 202, {[{ok, true}]});
+ Error ->
+ couch_log:debug("Plugin Spec: ~p", [PluginSpec]),
+ couch_httpd:send_error(Req, {bad_request, Error})
end;
% handles /_plugins/<pluginname>/<file>
% serves <plugin_dir>/<pluginname>-<pluginversion>-<otpversion>-<couchdbversion>/<file>
-handle_req(#httpd{method='GET',path_parts=[_, Name0 | Path0]}=Req) ->
+handle_req(#httpd{method = 'GET', path_parts = [_, Name0 | Path0]} = Req) ->
Name = ?b2l(Name0),
Path = lists:map(fun binary_to_list/1, Path0),
OTPRelease = erlang:system_info(otp_release),
@@ -51,15 +51,19 @@ handle_req(Req) ->
couch_httpd:send_method_not_allowed(Req, "POST").
plugin_dir() ->
- couch_config:get("couchdb", "plugin_dir").
+ couch_config:get("couchdb", "plugin_dir").
do_install(false, Plugin) ->
couch_plugins:install(Plugin);
do_install(true, Plugin) ->
couch_plugins:uninstall(Plugin).
parse_checksums(Checksums) ->
- lists:map(fun({K, {V}}) ->
- {binary_to_list(K), parse_checksums(V)};
- ({K, V}) ->
- {binary_to_list(K), binary_to_list(V)}
- end, Checksums).
+ lists:map(
+ fun
+ ({K, {V}}) ->
+ {binary_to_list(K), parse_checksums(V)};
+ ({K, V}) ->
+ {binary_to_list(K), binary_to_list(V)}
+ end,
+ Checksums
+ ).
diff --git a/src/couch_prometheus/src/couch_prometheus_http.erl b/src/couch_prometheus/src/couch_prometheus_http.erl
index bd0c4c6f9..b3df1ea4b 100644
--- a/src/couch_prometheus/src/couch_prometheus_http.erl
+++ b/src/couch_prometheus/src/couch_prometheus_http.erl
@@ -23,10 +23,11 @@
-include_lib("couch/include/couch_db.hrl").
start_link() ->
- IP = case config:get("prometheus", "bind_address", "any") of
- "any" -> any;
- Else -> Else
- end,
+ IP =
+ case config:get("prometheus", "bind_address", "any") of
+ "any" -> any;
+ Else -> Else
+ end,
Port = config:get("prometheus", "port"),
ok = couch_httpd:validate_bind_address(IP),
@@ -47,7 +48,7 @@ start_link() ->
handle_request(MochiReq) ->
RawUri = MochiReq:get(raw_path),
{"/" ++ Path, _, _} = mochiweb_util:urlsplit_path(RawUri),
- PathParts = string:tokens(Path, "/"),
+ PathParts = string:tokens(Path, "/"),
try
case PathParts of
["_node", Node, "_prometheus"] ->
@@ -55,16 +56,19 @@ handle_request(MochiReq) ->
_ ->
send_error(MochiReq, 404, <<"not_found">>, <<>>)
end
- catch T:R ->
- Body = list_to_binary(io_lib:format("~p:~p", [T, R])),
- send_error(MochiReq, 500, <<"server_error">>, Body)
+ catch
+ T:R ->
+ Body = list_to_binary(io_lib:format("~p:~p", [T, R])),
+ send_error(MochiReq, 500, <<"server_error">>, Body)
end.
send_prometheus(MochiReq, Node) ->
Type = "text/plain; version=" ++ ?PROMETHEUS_VERSION,
- Headers = couch_httpd:server_header() ++ [
- {<<"Content-Type">>, ?l2b(Type)}
- ],
+ Headers =
+ couch_httpd:server_header() ++
+ [
+ {<<"Content-Type">>, ?l2b(Type)}
+ ],
Body = call_node(Node, couch_prometheus_server, scrape, []),
send_resp(MochiReq, 200, Headers, Body).
@@ -73,23 +77,29 @@ send_resp(MochiReq, Status, ExtraHeaders, Body) ->
MochiReq:respond({Status, Headers, Body}).
send_error(MochiReq, Code, Error, Reason) ->
- Headers = couch_httpd:server_header() ++ [
- {<<"Content-Type">>, <<"application/json">>}
- ],
- JsonError = {[{<<"error">>, Error},
- {<<"reason">>, Reason}]},
+ Headers =
+ couch_httpd:server_header() ++
+ [
+ {<<"Content-Type">>, <<"application/json">>}
+ ],
+ JsonError =
+ {[
+ {<<"error">>, Error},
+ {<<"reason">>, Reason}
+ ]},
Body = ?JSON_ENCODE(JsonError),
MochiReq:respond({Code, Headers, Body}).
call_node("_local", Mod, Fun, Args) ->
call_node(node(), Mod, Fun, Args);
call_node(Node0, Mod, Fun, Args) when is_list(Node0) ->
- Node1 = try
- list_to_existing_atom(Node0)
- catch
- error:badarg ->
- NoNode = list_to_binary(Node0),
- throw({not_found, <<"no such node: ", NoNode/binary>>})
+ Node1 =
+ try
+ list_to_existing_atom(Node0)
+ catch
+ error:badarg ->
+ NoNode = list_to_binary(Node0),
+ throw({not_found, <<"no such node: ", NoNode/binary>>})
end,
call_node(Node1, Mod, Fun, Args);
call_node(Node, Mod, Fun, Args) when is_atom(Node) ->
diff --git a/src/couch_prometheus/src/couch_prometheus_server.erl b/src/couch_prometheus/src/couch_prometheus_server.erl
index e97df04a4..701483a38 100644
--- a/src/couch_prometheus/src/couch_prometheus_server.erl
+++ b/src/couch_prometheus/src/couch_prometheus_server.erl
@@ -48,7 +48,7 @@ start_link() ->
init([]) ->
Metrics = refresh_metrics(),
RT = update_refresh_timer(),
- {ok, #st{metrics=Metrics, refresh=RT}}.
+ {ok, #st{metrics = Metrics, refresh = RT}}.
scrape() ->
{ok, Metrics} = gen_server:call(?MODULE, scrape),
@@ -57,13 +57,13 @@ scrape() ->
version() ->
?PROMETHEUS_VERSION.
-handle_call(scrape, _from, #st{metrics = Metrics}=State) ->
+handle_call(scrape, _from, #st{metrics = Metrics} = State) ->
{reply, {ok, Metrics}, State};
-handle_call(refresh, _from, #st{refresh=OldRT} = State) ->
+handle_call(refresh, _from, #st{refresh = OldRT} = State) ->
timer:cancel(OldRT),
Metrics = refresh_metrics(),
RT = update_refresh_timer(),
- {reply, ok, State#st{metrics=Metrics, refresh=RT}};
+ {reply, ok, State#st{metrics = Metrics, refresh = RT}};
handle_call(Msg, _From, State) ->
{stop, {unknown_call, Msg}, error, State}.
@@ -73,7 +73,7 @@ handle_cast(Msg, State) ->
handle_info(refresh, State) ->
Metrics = refresh_metrics(),
RT = update_refresh_timer(),
- {noreply, State#st{metrics=Metrics, refresh=RT}};
+ {noreply, State#st{metrics = Metrics, refresh = RT}};
handle_info(Msg, State) ->
{stop, {unknown_info, Msg}, State}.
@@ -86,15 +86,23 @@ code_change(_OldVsn, State, _Extra) ->
refresh_metrics() ->
CouchDB = get_couchdb_stats(),
System = couch_stats_httpd:to_ejson(get_system_stats()),
- couch_prometheus_util:to_bin(lists:map(fun(Line) ->
- io_lib:format("~s~n", [Line])
- end, CouchDB ++ System)).
+ couch_prometheus_util:to_bin(
+ lists:map(
+ fun(Line) ->
+ io_lib:format("~s~n", [Line])
+ end,
+ CouchDB ++ System
+ )
+ ).
get_couchdb_stats() ->
Stats = lists:sort(couch_stats:fetch()),
- lists:flatmap(fun({Path, Info}) ->
- couch_to_prom(Path, Info, Stats)
- end, Stats).
+ lists:flatmap(
+ fun({Path, Info}) ->
+ couch_to_prom(Path, Info, Stats)
+ end,
+ Stats
+ ).
get_system_stats() ->
lists:flatten([
@@ -111,9 +119,12 @@ get_uptime_stat() ->
to_prom(uptime_seconds, counter, couch_app:uptime() div 1000).
get_vm_stats() ->
- MemLabels = lists:map(fun({Type, Value}) ->
- {[{memory_type, Type}], Value}
- end, erlang:memory()),
+ MemLabels = lists:map(
+ fun({Type, Value}) ->
+ {[{memory_type, Type}], Value}
+ end,
+ erlang:memory()
+ ),
{NumGCs, WordsReclaimed, _} = erlang:statistics(garbage_collection),
CtxSwitches = element(1, erlang:statistics(context_switches)),
Reds = element(1, erlang:statistics(reductions)),
@@ -137,14 +148,17 @@ get_io_stats() ->
].
get_message_queue_stats() ->
- Queues = lists:map(fun(Name) ->
- case process_info(whereis(Name), message_queue_len) of
- {message_queue_len, N} ->
- N;
- _ ->
- 0
- end
- end, registered()),
+ Queues = lists:map(
+ fun(Name) ->
+ case process_info(whereis(Name), message_queue_len) of
+ {message_queue_len, N} ->
+ N;
+ _ ->
+ 0
+ end
+ end,
+ registered()
+ ),
[
to_prom(erlang_message_queues, gauge, lists:sum(Queues)),
to_prom(erlang_message_queue_min, gauge, lists:min(Queues)),
@@ -153,13 +167,14 @@ get_message_queue_stats() ->
get_run_queue_stats() ->
%% Workaround for https://bugs.erlang.org/browse/ERL-1355
- {Normal, Dirty} = case erlang:system_info(dirty_cpu_schedulers) > 0 of
- false ->
- {statistics(run_queue), 0};
- true ->
- [DCQ | SQs] = lists:reverse(statistics(run_queue_lengths)),
- {lists:sum(SQs), DCQ}
- end,
+ {Normal, Dirty} =
+ case erlang:system_info(dirty_cpu_schedulers) > 0 of
+ false ->
+ {statistics(run_queue), 0};
+ true ->
+ [DCQ | SQs] = lists:reverse(statistics(run_queue_lengths)),
+ {lists:sum(SQs), DCQ}
+ end,
[
to_prom(erlang_scheduler_queues, gauge, Normal),
to_prom(erlang_dirty_cpu_scheduler_queues, gauge, Dirty)
diff --git a/src/couch_prometheus/src/couch_prometheus_sup.erl b/src/couch_prometheus/src/couch_prometheus_sup.erl
index 8d8c7e078..45a884fad 100644
--- a/src/couch_prometheus/src/couch_prometheus_sup.erl
+++ b/src/couch_prometheus/src/couch_prometheus_sup.erl
@@ -26,7 +26,8 @@ start_link() ->
init([]) ->
{ok, {
- {one_for_one, 5, 10}, [
+ {one_for_one, 5, 10},
+ [
?CHILD(couch_prometheus_server, worker)
] ++ maybe_start_prometheus_http()
}}.
diff --git a/src/couch_prometheus/src/couch_prometheus_util.erl b/src/couch_prometheus/src/couch_prometheus_util.erl
index c3b58cb3a..ea2cdf737 100644
--- a/src/couch_prometheus/src/couch_prometheus_util.erl
+++ b/src/couch_prometheus/src/couch_prometheus_util.erl
@@ -10,7 +10,7 @@
% License for the specific language governing permissions and limitations under
% the License.
--module(couch_prometheus_util ).
+-module(couch_prometheus_util).
-export([
couch_to_prom/3,
@@ -25,7 +25,6 @@ couch_to_prom([couch_log, level, alert], Info, _All) ->
to_prom(couch_log_requests_total, counter, {[{level, alert}], val(Info)});
couch_to_prom([couch_log, level, Level], Info, _All) ->
to_prom(couch_log_requests_total, {[{level, Level}], val(Info)});
-
couch_to_prom([couch_replicator, checkpoints, failure], Info, _All) ->
to_prom(couch_replicator_checkpoints_failure_total, counter, val(Info));
couch_to_prom([couch_replicator, checkpoints, success], Info, All) ->
@@ -41,7 +40,6 @@ couch_to_prom([couch_replicator, stream_responses, failure], Info, _All) ->
couch_to_prom([couch_replicator, stream_responses, success], Info, All) ->
Total = val(Info) + val([couch_replicator, stream_responses, failure], All),
to_prom(couch_replicator_stream_responses_total, counter, Total);
-
couch_to_prom([couchdb, auth_cache_hits], Info, All) ->
Total = val(Info) + val([couchdb, auth_cache_misses], All),
to_prom(auth_cache_requests_total, counter, Total);
@@ -53,7 +51,6 @@ couch_to_prom([couchdb, httpd_request_methods, Method], Info, _All) ->
to_prom(httpd_request_methods, {[{method, Method}], val(Info)});
couch_to_prom([couchdb, httpd_status_codes, Code], Info, _All) ->
to_prom(httpd_status_codes, {[{code, Code}], val(Info)});
-
couch_to_prom([ddoc_cache, hit], Info, All) ->
Total = val(Info) + val([ddoc_cache, miss], All),
to_prom(ddoc_cache_requests_total, counter, Total);
@@ -61,21 +58,17 @@ couch_to_prom([ddoc_cache, miss], Info, _All) ->
to_prom(ddoc_cache_requests_failures_total, counter, val(Info));
couch_to_prom([ddoc_cache, recovery], Info, _All) ->
to_prom(ddoc_cache_requests_recovery_total, counter, val(Info));
-
couch_to_prom([fabric, read_repairs, failure], Info, _All) ->
to_prom(fabric_read_repairs_failures_total, counter, val(Info));
couch_to_prom([fabric, read_repairs, success], Info, All) ->
Total = val(Info) + val([fabric, read_repairs, failure], All),
to_prom(fabric_read_repairs_total, counter, Total);
-
couch_to_prom([rexi, streams, timeout, init_stream], Info, _All) ->
to_prom(rexi_streams_timeout_total, counter, {[{stage, init_stream}], val(Info)});
couch_to_prom([rexi_streams, timeout, Stage], Info, _All) ->
to_prom(rexi_streams_timeout_total, {[{stage, Stage}], val(Info)});
-
couch_to_prom([couchdb | Rest], Info, All) ->
couch_to_prom(Rest, Info, All);
-
couch_to_prom(Path, Info, _All) ->
case lists:keyfind(type, 1, Info) of
{type, counter} ->
@@ -94,16 +87,20 @@ to_prom(Metric, Type, Data) ->
to_prom(Metric, Instances) when is_list(Instances) ->
lists:flatmap(fun(Inst) -> to_prom(Metric, Inst) end, Instances);
to_prom(Metric, {Labels, Value}) ->
- LabelParts = lists:map(fun({K, V}) ->
- lists:flatten(io_lib:format("~s=\"~s\"", [to_bin(K), to_bin(V)]))
- end, Labels),
- MetricStr = case length(LabelParts) > 0 of
- true ->
- LabelStr = string:join(LabelParts, ", "),
- lists:flatten(io_lib:format("~s{~s}", [to_prom_name(Metric), LabelStr]));
- false ->
- lists:flatten(io_lib:format("~s", [to_prom_name(Metric)]))
- end,
+ LabelParts = lists:map(
+ fun({K, V}) ->
+ lists:flatten(io_lib:format("~s=\"~s\"", [to_bin(K), to_bin(V)]))
+ end,
+ Labels
+ ),
+ MetricStr =
+ case length(LabelParts) > 0 of
+ true ->
+ LabelStr = string:join(LabelParts, ", "),
+ lists:flatten(io_lib:format("~s{~s}", [to_prom_name(Metric), LabelStr]));
+ false ->
+ lists:flatten(io_lib:format("~s", [to_prom_name(Metric)]))
+ end,
[to_bin(io_lib:format("~s ~p", [MetricStr, Value]))];
to_prom(Metric, Value) ->
[to_bin(io_lib:format("~s ~p", [to_prom_name(Metric), Value]))].
@@ -114,18 +111,21 @@ to_prom_summary(Path, Info) ->
{arithmetic_mean, Mean} = lists:keyfind(arithmetic_mean, 1, Value),
{percentile, Percentiles} = lists:keyfind(percentile, 1, Value),
{n, Count} = lists:keyfind(n, 1, Value),
- Quantiles = lists:map(fun({Perc, Val0}) ->
- % Prometheus uses seconds, so we need to covert milliseconds to seconds
- Val = Val0/1000,
- case Perc of
- 50 -> {[{quantile, <<"0.5">>}], Val};
- 75 -> {[{quantile, <<"0.75">>}], Val};
- 90 -> {[{quantile, <<"0.9">>}], Val};
- 95 -> {[{quantile, <<"0.95">>}], Val};
- 99 -> {[{quantile, <<"0.99">>}], Val};
- 999 -> {[{quantile, <<"0.999">>}], Val}
- end
- end, Percentiles),
+ Quantiles = lists:map(
+ fun({Perc, Val0}) ->
+ % Prometheus uses seconds, so we need to covert milliseconds to seconds
+ Val = Val0 / 1000,
+ case Perc of
+ 50 -> {[{quantile, <<"0.5">>}], Val};
+ 75 -> {[{quantile, <<"0.75">>}], Val};
+ 90 -> {[{quantile, <<"0.9">>}], Val};
+ 95 -> {[{quantile, <<"0.95">>}], Val};
+ 99 -> {[{quantile, <<"0.99">>}], Val};
+ 999 -> {[{quantile, <<"0.999">>}], Val}
+ end
+ end,
+ Percentiles
+ ),
SumMetric = path_to_name(Path ++ ["seconds", "sum"]),
SumStat = to_prom(SumMetric, Count * Mean),
CountMetric = path_to_name(Path ++ ["seconds", "count"]),
@@ -136,9 +136,12 @@ to_prom_name(Metric) ->
to_bin(io_lib:format("couchdb_~s", [Metric])).
path_to_name(Path) ->
- Parts = lists:map(fun(Part) ->
- io_lib:format("~s", [Part])
- end, Path),
+ Parts = lists:map(
+ fun(Part) ->
+ io_lib:format("~s", [Part])
+ end,
+ Path
+ ),
string:join(Parts, "_").
counter_metric(Path) ->
@@ -163,4 +166,4 @@ val(Data) ->
val(Key, Stats) ->
{Key, Data} = lists:keyfind(Key, 1, Stats),
- val(Data). \ No newline at end of file
+ val(Data).
diff --git a/src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl b/src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl
index 5b8adfd1d..f986fc6b0 100644
--- a/src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl
+++ b/src/couch_prometheus/test/eunit/couch_prometheus_e2e_tests.erl
@@ -26,7 +26,7 @@ start() ->
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
ok = config:set_integer("stats", "interval", 2),
ok = config:set_integer("couch_prometheus", "interval", 1),
Port = mochiweb_socket_server:get(chttpd, port),
@@ -40,10 +40,12 @@ couch_prometheus_e2e_test_() ->
"Prometheus E2E Tests",
{
setup,
- fun start/0, fun test_util:stop_couch/1,
+ fun start/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun node_call_chttpd/1,
fun node_call_prometheus_http/1,
@@ -56,11 +58,11 @@ couch_prometheus_e2e_test_() ->
% normal chttpd path via cluster port
node_call_chttpd(Url) ->
- {ok, RC1, _, _} = test_request:get(
- Url,
- [?CONTENT_JSON, ?AUTH],
- []
- ),
+ {ok, RC1, _, _} = test_request:get(
+ Url,
+ [?CONTENT_JSON, ?AUTH],
+ []
+ ),
?_assertEqual(200, RC1).
% normal chttpd path via cluster port
@@ -84,14 +86,14 @@ node_call_prometheus_http(_) ->
maybe_start_http_server("true"),
Url = construct_url(?PROM_PORT),
{ok, RC1, _, _} = test_request:get(
- Url,
- [?CONTENT_JSON, ?AUTH]
- ),
+ Url,
+ [?CONTENT_JSON, ?AUTH]
+ ),
% since this port doesn't require auth, this should work
{ok, RC2, _, _} = test_request:get(
- Url,
- [?CONTENT_JSON]
- ),
+ Url,
+ [?CONTENT_JSON]
+ ),
delete_db(Url),
?_assertEqual({200, 200}, {RC1, RC2}).
@@ -100,16 +102,16 @@ deny_prometheus_http(_) ->
maybe_start_http_server("false"),
Url = construct_url(?PROM_PORT),
Response = test_request:get(
- Url,
- [?CONTENT_JSON, ?AUTH],
- []
- ),
- ?_assertEqual({error,{conn_failed,{error,econnrefused}}}, Response).
+ Url,
+ [?CONTENT_JSON, ?AUTH],
+ []
+ ),
+ ?_assertEqual({error, {conn_failed, {error, econnrefused}}}, Response).
maybe_start_http_server(Additional) ->
test_util:stop_applications([couch_prometheus, chttpd]),
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
ok = config:set("prometheus", "additional_port", Additional),
ok = config:set("prometheus", "port", ?PROM_PORT),
test_util:start_applications([couch_prometheus, chttpd]).
@@ -126,18 +128,24 @@ delete_db(Url) ->
{ok, 200, _, _} = test_request:delete(Url, [?AUTH]).
create_doc(Url, Id) ->
- test_request:put(Url ++ "/" ++ Id,
- [?CONTENT_JSON, ?AUTH], "{\"mr\": \"rockoartischocko\"}").
+ test_request:put(
+ Url ++ "/" ++ Id,
+ [?CONTENT_JSON, ?AUTH],
+ "{\"mr\": \"rockoartischocko\"}"
+ ).
wait_for_metrics(Url, Value, Timeout) ->
- test_util:wait(fun() ->
+ test_util:wait(
+ fun() ->
{ok, _, _, Body} = test_request:get(
- Url,
- [?CONTENT_JSON, ?AUTH],
- []
- ),
- case string:find(Body, Value) of
- nomatch -> wait;
- M -> M
- end
- end, Timeout).
+ Url,
+ [?CONTENT_JSON, ?AUTH],
+ []
+ ),
+ case string:find(Body, Value) of
+ nomatch -> wait;
+ M -> M
+ end
+ end,
+ Timeout
+ ).
diff --git a/src/couch_prometheus/test/eunit/couch_prometheus_util_tests.erl b/src/couch_prometheus/test/eunit/couch_prometheus_util_tests.erl
index 8fe17e561..65828db62 100644
--- a/src/couch_prometheus/test/eunit/couch_prometheus_util_tests.erl
+++ b/src/couch_prometheus/test/eunit/couch_prometheus_util_tests.erl
@@ -21,39 +21,49 @@
couch_prometheus_util_test_() ->
[
- ?_assertEqual(<<"couchdb_ddoc_cache 10">>,
- test_to_prom_output(ddoc_cache, counter, 10)),
- ?_assertEqual(<<"couchdb_httpd_status_codes{code=\"200\"} 3">>,
- test_to_prom_output(httpd_status_codes, counter, {[{code, 200}], 3})),
- ?_assertEqual(<<"couchdb_temperature_celsius 36">>,
- test_to_prom_output(temperature_celsius, gauge, 36)),
- ?_assertEqual(<<"couchdb_mango_query_time_seconds{quantile=\"0.75\"} 4.5">>,
+ ?_assertEqual(
+ <<"couchdb_ddoc_cache 10">>,
+ test_to_prom_output(ddoc_cache, counter, 10)
+ ),
+ ?_assertEqual(
+ <<"couchdb_httpd_status_codes{code=\"200\"} 3">>,
+ test_to_prom_output(httpd_status_codes, counter, {[{code, 200}], 3})
+ ),
+ ?_assertEqual(
+ <<"couchdb_temperature_celsius 36">>,
+ test_to_prom_output(temperature_celsius, gauge, 36)
+ ),
+ ?_assertEqual(
+ <<"couchdb_mango_query_time_seconds{quantile=\"0.75\"} 4.5">>,
test_to_prom_sum_output([mango_query_time], [
- {value,
- [
- {min,0.0},
- {max,0.0},
- {arithmetic_mean,0.0},
- {geometric_mean,0.0},
- {harmonic_mean,0.0},
- {median,0.0},{variance,0.0},
- {standard_deviation,0.0},
- {skewness,0.0},{kurtosis,0.0},
- {percentile,[
- {50,0.0},
- {75, 4500},
- {90,0.0},
- {95,0.0},
- {99,0.0},
- {999,0.0}]},
- {histogram,[
- {0,0}]},
- {n,0}
- ]
- },
- {type,histogram},
+ {value, [
+ {min, 0.0},
+ {max, 0.0},
+ {arithmetic_mean, 0.0},
+ {geometric_mean, 0.0},
+ {harmonic_mean, 0.0},
+ {median, 0.0},
+ {variance, 0.0},
+ {standard_deviation, 0.0},
+ {skewness, 0.0},
+ {kurtosis, 0.0},
+ {percentile, [
+ {50, 0.0},
+ {75, 4500},
+ {90, 0.0},
+ {95, 0.0},
+ {99, 0.0},
+ {999, 0.0}
+ ]},
+ {histogram, [
+ {0, 0}
+ ]},
+ {n, 0}
+ ]},
+ {type, histogram},
{desc, <<"length of time processing a mango query">>}
- ]))
+ ])
+ )
].
test_to_prom_output(Metric, Type, Val) ->
@@ -62,4 +72,4 @@ test_to_prom_output(Metric, Type, Val) ->
test_to_prom_sum_output(Metric, Info) ->
Out = to_prom_summary(Metric, Info),
- lists:nth(3, Out). \ No newline at end of file
+ lists:nth(3, Out).
diff --git a/src/couch_pse_tests/src/cpse_gather.erl b/src/couch_pse_tests/src/cpse_gather.erl
index 7804d419e..346eca29b 100644
--- a/src/couch_pse_tests/src/cpse_gather.erl
+++ b/src/couch_pse_tests/src/cpse_gather.erl
@@ -12,12 +12,10 @@
-module(cpse_gather).
-
-export([
module/1
]).
-
module(ModName) ->
Exports = ModName:module_info(exports),
@@ -26,15 +24,19 @@ module(ModName) ->
SetupTest = get_fun(ModName, setup_each, 0, Exports),
TeardownTest = get_fun(ModName, teardown_each, 1, Exports),
- RevTests = lists:foldl(fun({Fun, Arity}, Acc) ->
- case {atom_to_list(Fun), Arity} of
- {[$c, $p, $s, $e, $_ | _], Arity} when Arity == 0; Arity == 1 ->
- TestFun = make_test_fun(ModName, Fun, Arity),
- [TestFun | Acc];
- _ ->
- Acc
- end
- end, [], Exports),
+ RevTests = lists:foldl(
+ fun({Fun, Arity}, Acc) ->
+ case {atom_to_list(Fun), Arity} of
+ {[$c, $p, $s, $e, $_ | _], Arity} when Arity == 0; Arity == 1 ->
+ TestFun = make_test_fun(ModName, Fun, Arity),
+ [TestFun | Acc];
+ _ ->
+ Acc
+ end
+ end,
+ [],
+ Exports
+ ),
Tests = lists:reverse(RevTests),
{
@@ -52,21 +54,18 @@ module(ModName) ->
]
}.
-
get_setup_all(ModName, Exports) ->
case lists:member({setup_all, 0}, Exports) of
true -> fun ModName:setup_all/0;
false -> fun cpse_util:setup_all/0
end.
-
get_teardown_all(ModName, Exports) ->
case lists:member({teardown_all, 1}, Exports) of
true -> fun ModName:teardown_all/1;
false -> fun cpse_util:teardown_all/1
end.
-
get_fun(ModName, FunName, Arity, Exports) ->
case lists:member({FunName, Arity}, Exports) of
true -> fun ModName:FunName/Arity;
@@ -74,22 +73,23 @@ get_fun(ModName, FunName, Arity, Exports) ->
false when Arity == 1 -> fun(_) -> ok end
end.
-
make_test_fun(Module, Fun, Arity) ->
Name = atom_to_list(Fun),
case Arity of
0 ->
fun(_) ->
- {timeout, 60, {Name, fun() ->
- process_flag(trap_exit, true),
- Module:Fun()
- end}}
+ {timeout, 60,
+ {Name, fun() ->
+ process_flag(trap_exit, true),
+ Module:Fun()
+ end}}
end;
1 ->
fun(Arg) ->
- {timeout, 60, {Name, fun() ->
- process_flag(trap_exit, true),
- Module:Fun(Arg)
- end}}
+ {timeout, 60,
+ {Name, fun() ->
+ process_flag(trap_exit, true),
+ Module:Fun(Arg)
+ end}}
end
end.
diff --git a/src/couch_pse_tests/src/cpse_test_attachments.erl b/src/couch_pse_tests/src/cpse_test_attachments.erl
index ddd1077d1..4447b8120 100644
--- a/src/couch_pse_tests/src/cpse_test_attachments.erl
+++ b/src/couch_pse_tests/src/cpse_test_attachments.erl
@@ -14,27 +14,23 @@
-compile(export_all).
-compile(nowarn_export_all).
-
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
setup_each() ->
{ok, Db} = cpse_util:create_db(),
Db.
-
teardown_each(Db) ->
ok = couch_server:delete(couch_db:name(Db), []).
-
cpse_write_attachment(Db1) ->
AttBin = crypto:strong_rand_bytes(32768),
try
[Att0] = cpse_util:prep_atts(Db1, [
- {<<"ohai.txt">>, AttBin}
- ]),
+ {<<"ohai.txt">>, AttBin}
+ ]),
{stream, Stream} = couch_att:fetch(data, Att0),
?assertEqual(true, couch_db_engine:is_active_stream(Db1, Stream)),
@@ -61,19 +57,21 @@ cpse_write_attachment(Db1) ->
},
Doc1 = couch_db_engine:read_doc_body(Db3, Doc0),
- Atts1 = if not is_binary(Doc1#doc.atts) -> Doc1#doc.atts; true ->
- couch_compress:decompress(Doc1#doc.atts)
- end,
+ Atts1 =
+ if
+ not is_binary(Doc1#doc.atts) -> Doc1#doc.atts;
+ true -> couch_compress:decompress(Doc1#doc.atts)
+ end,
StreamSrc = fun(Sp) -> couch_db_engine:open_read_stream(Db3, Sp) end,
[Att1] = [couch_att:from_disk_term(StreamSrc, T) || T <- Atts1],
ReadBin = couch_att:to_binary(Att1),
?assertEqual(AttBin, ReadBin)
- catch throw:not_supported ->
- ok
+ catch
+ throw:not_supported ->
+ ok
end.
-
% N.B. This test may be overly specific for some theoretical
% storage engines that don't re-initialize their
% attachments streams when restarting (for instance if
@@ -84,8 +82,8 @@ cpse_inactive_stream(Db1) ->
try
[Att0] = cpse_util:prep_atts(Db1, [
- {<<"ohai.txt">>, AttBin}
- ]),
+ {<<"ohai.txt">>, AttBin}
+ ]),
{stream, Stream} = couch_att:fetch(data, Att0),
?assertEqual(true, couch_db_engine:is_active_stream(Db1, Stream)),
@@ -94,6 +92,7 @@ cpse_inactive_stream(Db1) ->
{ok, Db2} = couch_db:reopen(Db1),
?assertEqual(false, couch_db_engine:is_active_stream(Db2, Stream))
- catch throw:not_supported ->
- ok
+ catch
+ throw:not_supported ->
+ ok
end.
diff --git a/src/couch_pse_tests/src/cpse_test_compaction.erl b/src/couch_pse_tests/src/cpse_test_compaction.erl
index 6bc470b2f..3be95db60 100644
--- a/src/couch_pse_tests/src/cpse_test_compaction.erl
+++ b/src/couch_pse_tests/src/cpse_test_compaction.erl
@@ -14,20 +14,16 @@
-compile(export_all).
-compile(nowarn_export_all).
-
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
setup_each() ->
{ok, Db} = cpse_util:create_db(),
Db.
-
teardown_each(Db) ->
ok = couch_server:delete(couch_db:name(Db), []).
-
cpse_compact_empty(Db1) ->
Term1 = cpse_util:db_as_term(Db1),
@@ -39,7 +35,6 @@ cpse_compact_empty(Db1) ->
Diff = cpse_util:term_diff(Term1, Term2),
?assertEqual(nodiff, Diff).
-
cpse_compact_doc(Db1) ->
Actions = [{create, {<<"foo">>, {[]}}}],
{ok, Db2} = cpse_util:apply_actions(Db1, Actions),
@@ -53,7 +48,6 @@ cpse_compact_doc(Db1) ->
Diff = cpse_util:term_diff(Term1, Term2),
?assertEqual(nodiff, Diff).
-
cpse_compact_local_doc(Db1) ->
Actions = [{create, {<<"_local/foo">>, {[]}}}],
{ok, Db2} = cpse_util:apply_actions(Db1, Actions),
@@ -67,16 +61,21 @@ cpse_compact_local_doc(Db1) ->
Diff = cpse_util:term_diff(Term1, Term2),
?assertEqual(nodiff, Diff).
-
cpse_compact_with_everything(Db1) ->
% Add a whole bunch of docs
- DocActions = lists:map(fun(Seq) ->
- {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
- end, lists:seq(1, 1000)),
-
- LocalActions = lists:map(fun(I) ->
- {create, {local_docid(I), {[{<<"int">>, I}]}}}
- end, lists:seq(1, 25)),
+ DocActions = lists:map(
+ fun(Seq) ->
+ {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
+ end,
+ lists:seq(1, 1000)
+ ),
+
+ LocalActions = lists:map(
+ fun(I) ->
+ {create, {local_docid(I), {[{<<"int">>, I}]}}}
+ end,
+ lists:seq(1, 25)
+ ),
Actions1 = DocActions ++ LocalActions,
@@ -110,11 +109,13 @@ cpse_compact_with_everything(Db1) ->
],
{ok, PIdRevs4} = couch_db_engine:fold_purge_infos(
- Db4, 0, fun fold_fun/2, [], []),
+ Db4, 0, fun fold_fun/2, [], []
+ ),
?assertEqual(PurgedIdRevs, PIdRevs4),
- {ok, Db5} = try
- [Att0, Att1, Att2, Att3, Att4] = cpse_util:prep_atts(Db4, [
+ {ok, Db5} =
+ try
+ [Att0, Att1, Att2, Att3, Att4] = cpse_util:prep_atts(Db4, [
{<<"ohai.txt">>, crypto:strong_rand_bytes(2048)},
{<<"stuff.py">>, crypto:strong_rand_bytes(32768)},
{<<"a.erl">>, crypto:strong_rand_bytes(29)},
@@ -122,15 +123,16 @@ cpse_compact_with_everything(Db1) ->
{<<"a.app">>, crypto:strong_rand_bytes(400)}
]),
- Actions4 = [
- {create, {<<"small_att">>, {[]}, [Att0]}},
- {create, {<<"large_att">>, {[]}, [Att1]}},
- {create, {<<"multi_att">>, {[]}, [Att2, Att3, Att4]}}
- ],
- cpse_util:apply_actions(Db4, Actions4)
- catch throw:not_supported ->
- {ok, Db4}
- end,
+ Actions4 = [
+ {create, {<<"small_att">>, {[]}, [Att0]}},
+ {create, {<<"large_att">>, {[]}, [Att1]}},
+ {create, {<<"multi_att">>, {[]}, [Att2, Att3, Att4]}}
+ ],
+ cpse_util:apply_actions(Db4, Actions4)
+ catch
+ throw:not_supported ->
+ {ok, Db4}
+ end,
{ok, Db6} = couch_db:reopen(Db5),
Term1 = cpse_util:db_as_term(Db6),
@@ -150,11 +152,13 @@ cpse_compact_with_everything(Db1) ->
Diff = cpse_util:term_diff(Term1, Term2),
?assertEqual(nodiff, Diff).
-
cpse_recompact_updates(Db1) ->
- Actions1 = lists:map(fun(Seq) ->
- {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
- end, lists:seq(1, 1000)),
+ Actions1 = lists:map(
+ fun(Seq) ->
+ {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
+ end,
+ lists:seq(1, 1000)
+ ),
{ok, Db2} = cpse_util:apply_batch(Db1, Actions1),
{ok, Compactor} = couch_db:start_compact(Db2),
@@ -177,11 +181,13 @@ cpse_recompact_updates(Db1) ->
Diff = cpse_util:term_diff(Term1, Term2),
?assertEqual(nodiff, Diff).
-
cpse_purge_during_compact(Db1) ->
- Actions1 = lists:map(fun(Seq) ->
- {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
- end, lists:seq(1, 1000)),
+ Actions1 = lists:map(
+ fun(Seq) ->
+ {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
+ end,
+ lists:seq(1, 1000)
+ ),
Actions2 = [
{create, {<<"foo">>, {[]}}},
{create, {<<"bar">>, {[]}}},
@@ -216,11 +222,13 @@ cpse_purge_during_compact(Db1) ->
Diff = cpse_util:term_diff(Term1, Term2),
?assertEqual(nodiff, Diff).
-
cpse_multiple_purge_during_compact(Db1) ->
- Actions1 = lists:map(fun(Seq) ->
- {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
- end, lists:seq(1, 1000)),
+ Actions1 = lists:map(
+ fun(Seq) ->
+ {create, {docid(Seq), {[{<<"int">>, Seq}]}}}
+ end,
+ lists:seq(1, 1000)
+ ),
Actions2 = [
{create, {<<"foo">>, {[]}}},
{create, {<<"bar">>, {[]}}},
@@ -233,7 +241,6 @@ cpse_multiple_purge_during_compact(Db1) ->
],
{ok, Db3} = cpse_util:apply_actions(Db2, Actions3),
-
{ok, Pid} = couch_db:start_compact(Db3),
catch erlang:suspend_process(Pid),
@@ -261,31 +268,39 @@ cpse_multiple_purge_during_compact(Db1) ->
Diff = cpse_util:term_diff(Term1, Term2),
?assertEqual(nodiff, Diff).
-
cpse_compact_purged_docs_limit(Db1) ->
NumDocs = 1200,
- {RActions, RIds} = lists:foldl(fun(Id, {CActions, CIds}) ->
- Id1 = docid(Id),
- Action = {create, {Id1, {[{<<"int">>, Id}]}}},
- {[Action| CActions], [Id1| CIds]}
- end, {[], []}, lists:seq(1, NumDocs)),
+ {RActions, RIds} = lists:foldl(
+ fun(Id, {CActions, CIds}) ->
+ Id1 = docid(Id),
+ Action = {create, {Id1, {[{<<"int">>, Id}]}}},
+ {[Action | CActions], [Id1 | CIds]}
+ end,
+ {[], []},
+ lists:seq(1, NumDocs)
+ ),
Ids = lists:reverse(RIds),
{ok, Db2} = cpse_util:apply_batch(Db1, lists:reverse(RActions)),
FDIs = couch_db_engine:open_docs(Db2, Ids),
- RActions2 = lists:foldl(fun(FDI, CActions) ->
- Id = FDI#full_doc_info.id,
- PrevRev = cpse_util:prev_rev(FDI),
- Rev = PrevRev#rev_info.rev,
- [{purge, {Id, Rev}}| CActions]
- end, [], FDIs),
+ RActions2 = lists:foldl(
+ fun(FDI, CActions) ->
+ Id = FDI#full_doc_info.id,
+ PrevRev = cpse_util:prev_rev(FDI),
+ Rev = PrevRev#rev_info.rev,
+ [{purge, {Id, Rev}} | CActions]
+ end,
+ [],
+ FDIs
+ ),
{ok, Db3} = cpse_util:apply_batch(Db2, lists:reverse(RActions2)),
% check that before compaction all NumDocs of purge_requests
% are in purge_tree,
% even if NumDocs=1200 is greater than purged_docs_limit=1000
{ok, PurgedIdRevs} = couch_db_engine:fold_purge_infos(
- Db3, 0, fun fold_fun/2, [], []),
+ Db3, 0, fun fold_fun/2, [], []
+ ),
?assertEqual(1, couch_db_engine:get_oldest_purge_seq(Db3)),
?assertEqual(NumDocs, length(PurgedIdRevs)),
@@ -298,21 +313,19 @@ cpse_compact_purged_docs_limit(Db1) ->
PurgedDocsLimit = couch_db_engine:get_purge_infos_limit(Db4),
OldestPSeq = couch_db_engine:get_oldest_purge_seq(Db4),
{ok, PurgedIdRevs2} = couch_db_engine:fold_purge_infos(
- Db4, OldestPSeq - 1, fun fold_fun/2, [], []),
+ Db4, OldestPSeq - 1, fun fold_fun/2, [], []
+ ),
ExpectedOldestPSeq = NumDocs - PurgedDocsLimit + 1,
?assertEqual(ExpectedOldestPSeq, OldestPSeq),
?assertEqual(PurgedDocsLimit, length(PurgedIdRevs2)).
-
docid(I) ->
Str = io_lib:format("~4..0b", [I]),
iolist_to_binary(Str).
-
local_docid(I) ->
Str = io_lib:format("_local/~4..0b", [I]),
iolist_to_binary(Str).
-
fold_fun({_PSeq, _UUID, Id, Revs}, Acc) ->
{ok, [{Id, Revs} | Acc]}.
diff --git a/src/couch_pse_tests/src/cpse_test_copy_purge_infos.erl b/src/couch_pse_tests/src/cpse_test_copy_purge_infos.erl
index 4e41430d3..a32f866b4 100644
--- a/src/couch_pse_tests/src/cpse_test_copy_purge_infos.erl
+++ b/src/couch_pse_tests/src/cpse_test_copy_purge_infos.erl
@@ -14,35 +14,35 @@
-compile(export_all).
-compile(nowarn_export_all).
-
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-define(NUM_DOCS, 100).
-
setup_each() ->
{ok, SrcDb} = cpse_util:create_db(),
{ok, SrcDb2} = create_and_purge(SrcDb),
{ok, TrgDb} = cpse_util:create_db(),
{SrcDb2, TrgDb}.
-
teardown_each({SrcDb, TrgDb}) ->
ok = couch_server:delete(couch_db:name(SrcDb), []),
ok = couch_server:delete(couch_db:name(TrgDb), []).
-
cpse_copy_empty_purged_info({_, Db}) ->
{ok, Db1} = couch_db_engine:copy_purge_infos(Db, []),
?assertEqual(ok, cpse_util:assert_each_prop(Db1, [{purge_infos, []}])).
-
cpse_copy_purged_info({SrcDb, TrgDb}) ->
- {ok, RPIs} = couch_db_engine:fold_purge_infos(SrcDb, 0, fun(PI, Acc) ->
- {ok, [PI | Acc]}
- end, [], []),
+ {ok, RPIs} = couch_db_engine:fold_purge_infos(
+ SrcDb,
+ 0,
+ fun(PI, Acc) ->
+ {ok, [PI | Acc]}
+ end,
+ [],
+ []
+ ),
PIs = lists:reverse(RPIs),
AEPFold = fun({PSeq, UUID, Id, Revs}, {CPSeq, CPurges}) ->
{max(PSeq, CPSeq), [{UUID, Id, Revs} | CPurges]}
@@ -53,30 +53,36 @@ cpse_copy_purged_info({SrcDb, TrgDb}) ->
AssertProps = [{purge_seq, PurgeSeq}, {purge_infos, Purges}],
?assertEqual(ok, cpse_util:assert_each_prop(TrgDb2, AssertProps)).
-
create_and_purge(Db) ->
- {RActions, RIds} = lists:foldl(fun(Id, {CActions, CIds}) ->
- Id1 = docid(Id),
- Action = {create, {Id1, {[{<<"int">>, Id}]}}},
- {[Action| CActions], [Id1| CIds]}
- end, {[], []}, lists:seq(1, ?NUM_DOCS)),
+ {RActions, RIds} = lists:foldl(
+ fun(Id, {CActions, CIds}) ->
+ Id1 = docid(Id),
+ Action = {create, {Id1, {[{<<"int">>, Id}]}}},
+ {[Action | CActions], [Id1 | CIds]}
+ end,
+ {[], []},
+ lists:seq(1, ?NUM_DOCS)
+ ),
Actions = lists:reverse(RActions),
Ids = lists:reverse(RIds),
{ok, Db1} = cpse_util:apply_batch(Db, Actions),
FDIs = couch_db_engine:open_docs(Db1, Ids),
- RActions2 = lists:foldl(fun(FDI, CActions) ->
- Id = FDI#full_doc_info.id,
- PrevRev = cpse_util:prev_rev(FDI),
- Rev = PrevRev#rev_info.rev,
- Action = {purge, {Id, Rev}},
- [Action| CActions]
- end, [], FDIs),
+ RActions2 = lists:foldl(
+ fun(FDI, CActions) ->
+ Id = FDI#full_doc_info.id,
+ PrevRev = cpse_util:prev_rev(FDI),
+ Rev = PrevRev#rev_info.rev,
+ Action = {purge, {Id, Rev}},
+ [Action | CActions]
+ end,
+ [],
+ FDIs
+ ),
Actions2 = lists:reverse(RActions2),
{ok, Db2} = cpse_util:apply_batch(Db1, Actions2),
{ok, Db2}.
-
docid(I) ->
Str = io_lib:format("~4..0b", [I]),
iolist_to_binary(Str).
diff --git a/src/couch_pse_tests/src/cpse_test_fold_changes.erl b/src/couch_pse_tests/src/cpse_test_fold_changes.erl
index 436396276..91f7c63e9 100644
--- a/src/couch_pse_tests/src/cpse_test_fold_changes.erl
+++ b/src/couch_pse_tests/src/cpse_test_fold_changes.erl
@@ -14,37 +14,34 @@
-compile(export_all).
-compile(nowarn_export_all).
-
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-define(NUM_DOCS, 25).
-
setup_each() ->
{ok, Db} = cpse_util:create_db(),
Db.
-
teardown_each(Db) ->
ok = couch_server:delete(couch_db:name(Db), []).
-
cpse_empty_changes(Db) ->
?assertEqual(0, couch_db_engine:count_changes_since(Db, 0)),
- ?assertEqual({ok, []},
- couch_db_engine:fold_changes(Db, 0, fun fold_fun/2, [], [])).
-
+ ?assertEqual(
+ {ok, []},
+ couch_db_engine:fold_changes(Db, 0, fun fold_fun/2, [], [])
+ ).
cpse_single_change(Db1) ->
Actions = [{create, {<<"a">>, {[]}}}],
{ok, Db2} = cpse_util:apply_actions(Db1, Actions),
?assertEqual(1, couch_db_engine:count_changes_since(Db2, 0)),
- ?assertEqual({ok, [{<<"a">>, 1}]},
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], [])).
-
+ ?assertEqual(
+ {ok, [{<<"a">>, 1}]},
+ couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], [])
+ ).
cpse_two_changes(Db1) ->
Actions = [
@@ -55,10 +52,9 @@ cpse_two_changes(Db1) ->
?assertEqual(2, couch_db_engine:count_changes_since(Db2, 0)),
{ok, Changes} =
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
+ couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
?assertEqual([{<<"a">>, 1}, {<<"b">>, 2}], lists:reverse(Changes)).
-
cpse_two_changes_batch(Db1) ->
Actions = [
{batch, [
@@ -70,10 +66,9 @@ cpse_two_changes_batch(Db1) ->
?assertEqual(2, couch_db_engine:count_changes_since(Db2, 0)),
{ok, Changes} =
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
+ couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
?assertEqual([{<<"a">>, 1}, {<<"b">>, 2}], lists:reverse(Changes)).
-
cpse_two_changes_batch_sorted(Db1) ->
Actions = [
{batch, [
@@ -85,10 +80,9 @@ cpse_two_changes_batch_sorted(Db1) ->
?assertEqual(2, couch_db_engine:count_changes_since(Db2, 0)),
{ok, Changes} =
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
+ couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
?assertEqual([{<<"a">>, 1}, {<<"b">>, 2}], lists:reverse(Changes)).
-
cpse_update_one(Db1) ->
Actions = [
{create, {<<"a">>, {[]}}},
@@ -97,9 +91,10 @@ cpse_update_one(Db1) ->
{ok, Db2} = cpse_util:apply_actions(Db1, Actions),
?assertEqual(1, couch_db_engine:count_changes_since(Db2, 0)),
- ?assertEqual({ok, [{<<"a">>, 2}]},
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], [])).
-
+ ?assertEqual(
+ {ok, [{<<"a">>, 2}]},
+ couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], [])
+ ).
cpse_update_first_of_two(Db1) ->
Actions = [
@@ -111,10 +106,9 @@ cpse_update_first_of_two(Db1) ->
?assertEqual(2, couch_db_engine:count_changes_since(Db2, 0)),
{ok, Changes} =
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
+ couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
?assertEqual([{<<"b">>, 2}, {<<"a">>, 3}], lists:reverse(Changes)).
-
cpse_update_second_of_two(Db1) ->
Actions = [
{create, {<<"a">>, {[]}}},
@@ -125,14 +119,18 @@ cpse_update_second_of_two(Db1) ->
?assertEqual(2, couch_db_engine:count_changes_since(Db2, 0)),
{ok, Changes} =
- couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
+ couch_db_engine:fold_changes(Db2, 0, fun fold_fun/2, [], []),
?assertEqual([{<<"a">>, 1}, {<<"b">>, 3}], lists:reverse(Changes)).
-
cpse_check_mutation_ordering(Db1) ->
- Actions = shuffle(lists:map(fun(Seq) ->
- {create, {docid(Seq), {[]}}}
- end, lists:seq(1, ?NUM_DOCS))),
+ Actions = shuffle(
+ lists:map(
+ fun(Seq) ->
+ {create, {docid(Seq), {[]}}}
+ end,
+ lists:seq(1, ?NUM_DOCS)
+ )
+ ),
DocIdOrder = [DocId || {_, {DocId, _}} <- Actions],
DocSeqs = lists:zip(DocIdOrder, lists:seq(1, ?NUM_DOCS)),
@@ -141,45 +139,44 @@ cpse_check_mutation_ordering(Db1) ->
% First lets see that we can get the correct
% suffix/prefix starting at every update sequence
- lists:foreach(fun(Seq) ->
- {ok, Suffix} =
+ lists:foreach(
+ fun(Seq) ->
+ {ok, Suffix} =
couch_db_engine:fold_changes(Db2, Seq, fun fold_fun/2, [], []),
- ?assertEqual(lists:nthtail(Seq, DocSeqs), lists:reverse(Suffix)),
+ ?assertEqual(lists:nthtail(Seq, DocSeqs), lists:reverse(Suffix)),
- {ok, Prefix} = couch_db_engine:fold_changes(
- Db2, Seq, fun fold_fun/2, [], [{dir, rev}]),
- ?assertEqual(lists:sublist(DocSeqs, Seq + 1), Prefix)
- end, lists:seq(0, ?NUM_DOCS)),
+ {ok, Prefix} = couch_db_engine:fold_changes(
+ Db2, Seq, fun fold_fun/2, [], [{dir, rev}]
+ ),
+ ?assertEqual(lists:sublist(DocSeqs, Seq + 1), Prefix)
+ end,
+ lists:seq(0, ?NUM_DOCS)
+ ),
ok = do_mutation_ordering(Db2, ?NUM_DOCS + 1, DocSeqs, []).
-
do_mutation_ordering(Db, _Seq, [], FinalDocSeqs) ->
{ok, RevOrder} = couch_db_engine:fold_changes(Db, 0, fun fold_fun/2, [], []),
?assertEqual(FinalDocSeqs, lists:reverse(RevOrder)),
ok;
-
do_mutation_ordering(Db, Seq, [{DocId, _OldSeq} | Rest], DocSeqAcc) ->
Actions = [{update, {DocId, {[]}}}],
{ok, NewDb} = cpse_util:apply_actions(Db, Actions),
NewAcc = DocSeqAcc ++ [{DocId, Seq}],
Expected = Rest ++ NewAcc,
{ok, RevOrder} =
- couch_db_engine:fold_changes(NewDb, 0, fun fold_fun/2, [], []),
+ couch_db_engine:fold_changes(NewDb, 0, fun fold_fun/2, [], []),
?assertEqual(Expected, lists:reverse(RevOrder)),
do_mutation_ordering(NewDb, Seq + 1, Rest, NewAcc).
-
shuffle(List) ->
Paired = [{couch_rand:uniform(), I} || I <- List],
Sorted = lists:sort(Paired),
[I || {_, I} <- Sorted].
-
-fold_fun(#full_doc_info{id=Id, update_seq=Seq}, Acc) ->
+fold_fun(#full_doc_info{id = Id, update_seq = Seq}, Acc) ->
{ok, [{Id, Seq} | Acc]}.
-
docid(I) ->
Str = io_lib:format("~4..0b", [I]),
iolist_to_binary(Str).
diff --git a/src/couch_pse_tests/src/cpse_test_fold_docs.erl b/src/couch_pse_tests/src/cpse_test_fold_docs.erl
index d43930c4a..2d6eb7a9d 100644
--- a/src/couch_pse_tests/src/cpse_test_fold_docs.erl
+++ b/src/couch_pse_tests/src/cpse_test_fold_docs.erl
@@ -14,70 +14,53 @@
-compile(export_all).
-compile(nowarn_export_all).
-
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-define(NUM_DOCS, 100).
-
setup_each() ->
cpse_util:dbname().
-
teardown_each(DbName) ->
ok = couch_server:delete(DbName, []).
-
cpse_fold_all(DbName) ->
fold_all(DbName, fold_docs, fun docid/1).
-
cpse_fold_all_local(DbName) ->
fold_all(DbName, fold_local_docs, fun local_docid/1).
-
cpse_fold_start_key(DbName) ->
fold_start_key(DbName, fold_docs, fun docid/1).
-
cpse_fold_start_key_local(DbName) ->
fold_start_key(DbName, fold_local_docs, fun local_docid/1).
-
cpse_fold_end_key(DbName) ->
fold_end_key(DbName, fold_docs, fun docid/1).
-
cpse_fold_end_key_local(DbName) ->
fold_end_key(DbName, fold_local_docs, fun local_docid/1).
-
cpse_fold_end_key_gt(DbName) ->
fold_end_key_gt(DbName, fold_docs, fun docid/1).
-
cpse_fold_end_key_gt_local(DbName) ->
fold_end_key_gt(DbName, fold_local_docs, fun local_docid/1).
-
cpse_fold_range(DbName) ->
fold_range(DbName, fold_docs, fun docid/1).
-
cpse_fold_range_local(DbName) ->
fold_range(DbName, fold_local_docs, fun local_docid/1).
-
cpse_fold_stop(DbName) ->
fold_user_fun_stop(DbName, fold_docs, fun docid/1).
-
cpse_fold_stop_local(DbName) ->
fold_user_fun_stop(DbName, fold_local_docs, fun local_docid/1).
-
% This is a loose test but we have to have this until
% I figure out what to do about the total_rows/offset
% meta data included in _all_docs
@@ -89,7 +72,6 @@ cpse_fold_include_reductions(DbName) ->
?assert(is_integer(Count)),
?assert(Count >= 0).
-
fold_all(DbName, FoldFun, DocIdFun) ->
DocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
{ok, Db} = init_db(DbName, DocIdFun),
@@ -103,7 +85,6 @@ fold_all(DbName, FoldFun, DocIdFun) ->
?assertEqual(?NUM_DOCS, length(DocIdAccRev)),
?assertEqual(DocIds, DocIdAccRev).
-
fold_start_key(DbName, FoldFun, DocIdFun) ->
{ok, Db} = init_db(DbName, DocIdFun),
@@ -114,138 +95,153 @@ fold_start_key(DbName, FoldFun, DocIdFun) ->
DocIdsFwd = [DocIdFun(I) || I <- lists:seq(StartKeyNum, ?NUM_DOCS)],
DocIdsRev = [DocIdFun(I) || I <- lists:seq(1, StartKeyNum)],
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
+ ?assertEqual(
+ {ok, []},
+ couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
{start_key, <<255>>}
- ])),
+ ])
+ ),
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
+ ?assertEqual(
+ {ok, []},
+ couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
{dir, rev},
{start_key, <<"">>}
- ])),
+ ])
+ ),
{ok, AllDocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, <<"">>}
- ]),
+ {start_key, <<"">>}
+ ]),
?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
{ok, AllDocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, <<255>>}
- ]),
+ {dir, rev},
+ {start_key, <<255>>}
+ ]),
?assertEqual(length(AllDocIds), length(AllDocIdAccRev)),
?assertEqual(AllDocIds, AllDocIdAccRev),
{ok, DocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, StartKey}
- ]),
+ {start_key, StartKey}
+ ]),
?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
{ok, DocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, StartKey}
- ]),
+ {dir, rev},
+ {start_key, StartKey}
+ ]),
?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
?assertEqual(DocIdsRev, DocIdAccRev).
-
fold_end_key(DbName, FoldFun, DocIdFun) ->
{ok, Db} = init_db(DbName, DocIdFun),
EndKeyNum = ?NUM_DOCS div 4,
EndKey = DocIdFun(EndKeyNum),
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
+ ?assertEqual(
+ {ok, []},
+ couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
{end_key, <<"">>}
- ])),
+ ])
+ ),
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
+ ?assertEqual(
+ {ok, []},
+ couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
{dir, rev},
{end_key, <<255>>}
- ])),
+ ])
+ ),
AllDocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
{ok, AllDocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key, <<255>>}
- ]),
+ {end_key, <<255>>}
+ ]),
?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
{ok, AllDocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key, <<"">>}
- ]),
+ {dir, rev},
+ {end_key, <<"">>}
+ ]),
?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
?assertEqual(AllDocIds, AllDocIdAccRev),
DocIdsFwd = [DocIdFun(I) || I <- lists:seq(1, EndKeyNum)],
{ok, DocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key, EndKey}
- ]),
+ {end_key, EndKey}
+ ]),
?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
DocIdsRev = [DocIdFun(I) || I <- lists:seq(EndKeyNum, ?NUM_DOCS)],
{ok, DocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key, EndKey}
- ]),
+ {dir, rev},
+ {end_key, EndKey}
+ ]),
?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
?assertEqual(DocIdsRev, DocIdAccRev).
-
fold_end_key_gt(DbName, FoldFun, DocIdFun) ->
{ok, Db} = init_db(DbName, DocIdFun),
EndKeyNum = ?NUM_DOCS div 4,
EndKey = DocIdFun(EndKeyNum),
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
+ ?assertEqual(
+ {ok, []},
+ couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
{end_key_gt, <<"">>}
- ])),
+ ])
+ ),
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
+ ?assertEqual(
+ {ok, []},
+ couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
{dir, rev},
{end_key_gt, <<255>>}
- ])),
+ ])
+ ),
AllDocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
{ok, AllDocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key_gt, <<255>>}
- ]),
+ {end_key_gt, <<255>>}
+ ]),
?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
{ok, AllDocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key_gt, <<"">>}
- ]),
+ {dir, rev},
+ {end_key_gt, <<"">>}
+ ]),
?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
?assertEqual(AllDocIds, AllDocIdAccRev),
DocIdsFwd = [DocIdFun(I) || I <- lists:seq(1, EndKeyNum - 1)],
{ok, DocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {end_key_gt, EndKey}
- ]),
+ {end_key_gt, EndKey}
+ ]),
?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
DocIdsRev = [DocIdFun(I) || I <- lists:seq(EndKeyNum + 1, ?NUM_DOCS)],
{ok, DocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {end_key_gt, EndKey}
- ]),
+ {dir, rev},
+ {end_key_gt, EndKey}
+ ]),
?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
?assertEqual(DocIdsRev, DocIdAccRev).
-
fold_range(DbName, FoldFun, DocIdFun) ->
{ok, Db} = init_db(DbName, DocIdFun),
@@ -255,133 +251,153 @@ fold_range(DbName, FoldFun, DocIdFun) ->
StartKey = DocIdFun(StartKeyNum),
EndKey = DocIdFun(EndKeyNum),
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
+ ?assertEqual(
+ {ok, []},
+ couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
{start_key, <<"">>},
{end_key, <<"">>}
- ])),
+ ])
+ ),
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
+ ?assertEqual(
+ {ok, []},
+ couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
{dir, rev},
{start_key, <<"">>},
{end_key, <<255>>}
- ])),
+ ])
+ ),
AllDocIds = [DocIdFun(I) || I <- lists:seq(1, ?NUM_DOCS)],
{ok, AllDocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, <<"">>},
- {end_key, <<255>>}
- ]),
+ {start_key, <<"">>},
+ {end_key, <<255>>}
+ ]),
?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
?assertEqual(AllDocIds, lists:reverse(AllDocIdAccFwd)),
{ok, AllDocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, <<255>>},
- {end_key_gt, <<"">>}
- ]),
+ {dir, rev},
+ {start_key, <<255>>},
+ {end_key_gt, <<"">>}
+ ]),
?assertEqual(length(AllDocIds), length(AllDocIdAccFwd)),
?assertEqual(AllDocIds, AllDocIdAccRev),
DocIdsFwd = [DocIdFun(I) || I <- lists:seq(StartKeyNum, EndKeyNum)],
{ok, DocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {start_key, StartKey},
- {end_key, EndKey}
- ]),
+ {start_key, StartKey},
+ {end_key, EndKey}
+ ]),
?assertEqual(length(DocIdsFwd), length(DocIdAccFwd)),
?assertEqual(DocIdsFwd, lists:reverse(DocIdAccFwd)),
DocIdsRev = [DocIdFun(I) || I <- lists:seq(StartKeyNum, EndKeyNum)],
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
+ ?assertEqual(
+ {ok, []},
+ couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
{dir, rev},
{start_key, StartKey},
{end_key, EndKey}
- ])),
+ ])
+ ),
{ok, DocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_fun/2, [], [
- {dir, rev},
- {start_key, EndKey},
- {end_key, StartKey}
- ]),
+ {dir, rev},
+ {start_key, EndKey},
+ {end_key, StartKey}
+ ]),
?assertEqual(length(DocIdsRev), length(DocIdAccRev)),
?assertEqual(DocIdsRev, DocIdAccRev).
-
fold_user_fun_stop(DbName, FoldFun, DocIdFun) ->
{ok, Db} = init_db(DbName, DocIdFun),
StartKeyNum = ?NUM_DOCS div 4,
StartKey = DocIdFun(StartKeyNum),
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
+ ?assertEqual(
+ {ok, []},
+ couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
{start_key, <<255>>}
- ])),
+ ])
+ ),
- ?assertEqual({ok, []}, couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
+ ?assertEqual(
+ {ok, []},
+ couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
{dir, rev},
{start_key, <<"">>}
- ])),
+ ])
+ ),
SuffixDocIds = [DocIdFun(I) || I <- lists:seq(?NUM_DOCS - 3, ?NUM_DOCS)],
{ok, SuffixDocIdAcc} = couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {start_key, DocIdFun(?NUM_DOCS - 3)}
- ]),
+ {start_key, DocIdFun(?NUM_DOCS - 3)}
+ ]),
?assertEqual(length(SuffixDocIds), length(SuffixDocIdAcc)),
?assertEqual(SuffixDocIds, lists:reverse(SuffixDocIdAcc)),
PrefixDocIds = [DocIdFun(I) || I <- lists:seq(1, 3)],
{ok, PrefixDocIdAcc} = couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {dir, rev},
- {start_key, DocIdFun(3)}
- ]),
+ {dir, rev},
+ {start_key, DocIdFun(3)}
+ ]),
?assertEqual(3, length(PrefixDocIdAcc)),
?assertEqual(PrefixDocIds, PrefixDocIdAcc),
- FiveDocIdsFwd = [DocIdFun(I)
- || I <- lists:seq(StartKeyNum, StartKeyNum + 5)],
+ FiveDocIdsFwd = [
+ DocIdFun(I)
+ || I <- lists:seq(StartKeyNum, StartKeyNum + 5)
+ ],
{ok, FiveDocIdAccFwd} = couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {start_key, StartKey}
- ]),
+ {start_key, StartKey}
+ ]),
?assertEqual(length(FiveDocIdsFwd), length(FiveDocIdAccFwd)),
?assertEqual(FiveDocIdsFwd, lists:reverse(FiveDocIdAccFwd)),
- FiveDocIdsRev = [DocIdFun(I)
- || I <- lists:seq(StartKeyNum - 5, StartKeyNum)],
+ FiveDocIdsRev = [
+ DocIdFun(I)
+ || I <- lists:seq(StartKeyNum - 5, StartKeyNum)
+ ],
{ok, FiveDocIdAccRev} = couch_db_engine:FoldFun(Db, fun fold_stop/2, [], [
- {dir, rev},
- {start_key, StartKey}
- ]),
+ {dir, rev},
+ {start_key, StartKey}
+ ]),
?assertEqual(length(FiveDocIdsRev), length(FiveDocIdAccRev)),
?assertEqual(FiveDocIdsRev, FiveDocIdAccRev).
-
init_db(DbName, DocIdFun) ->
{ok, Db1} = cpse_util:create_db(DbName),
- Actions = lists:map(fun(Id) ->
- {create, {DocIdFun(Id), {[{<<"int">>, Id}]}}}
- end, lists:seq(1, ?NUM_DOCS)),
+ Actions = lists:map(
+ fun(Id) ->
+ {create, {DocIdFun(Id), {[{<<"int">>, Id}]}}}
+ end,
+ lists:seq(1, ?NUM_DOCS)
+ ),
cpse_util:apply_actions(Db1, [{batch, Actions}]).
-
fold_fun(Doc, Acc) ->
- Id = case Doc of
- #doc{id = Id0} -> Id0;
- #full_doc_info{id = Id0} -> Id0
- end,
+ Id =
+ case Doc of
+ #doc{id = Id0} -> Id0;
+ #full_doc_info{id = Id0} -> Id0
+ end,
{ok, [Id | Acc]}.
-
fold_stop(Doc, Acc) ->
- Id = case Doc of
- #doc{id = Id0} -> Id0;
- #full_doc_info{id = Id0} -> Id0
- end,
+ Id =
+ case Doc of
+ #doc{id = Id0} -> Id0;
+ #full_doc_info{id = Id0} -> Id0
+ end,
case length(Acc) of
N when N =< 4 ->
{ok, [Id | Acc]};
@@ -389,12 +405,10 @@ fold_stop(Doc, Acc) ->
{stop, [Id | Acc]}
end.
-
docid(I) ->
Str = io_lib:format("~4..0b", [I]),
iolist_to_binary(Str).
-
local_docid(I) ->
Str = io_lib:format("_local/~4..0b", [I]),
iolist_to_binary(Str).
diff --git a/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl b/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl
index 4826c5d9c..6225cbdb0 100644
--- a/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl
+++ b/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl
@@ -14,54 +14,60 @@
-compile(export_all).
-compile(nowarn_export_all).
-
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-define(NUM_DOCS, 100).
-
setup_each() ->
{ok, Db} = cpse_util:create_db(),
Db.
-
teardown_each(Db) ->
ok = couch_server:delete(couch_db:name(Db), []).
-
cpse_empty_purged_docs(Db) ->
- ?assertEqual({ok, []}, couch_db_engine:fold_purge_infos(
- Db, 0, fun fold_fun/2, [], [])).
-
+ ?assertEqual(
+ {ok, []},
+ couch_db_engine:fold_purge_infos(
+ Db, 0, fun fold_fun/2, [], []
+ )
+ ).
cpse_all_purged_docs(Db1) ->
- {RActions, RIds} = lists:foldl(fun(Id, {CActions, CIds}) ->
- Id1 = docid(Id),
- Action = {create, {Id1, {[{<<"int">>, Id}]}}},
- {[Action| CActions], [Id1| CIds]}
- end, {[], []}, lists:seq(1, ?NUM_DOCS)),
+ {RActions, RIds} = lists:foldl(
+ fun(Id, {CActions, CIds}) ->
+ Id1 = docid(Id),
+ Action = {create, {Id1, {[{<<"int">>, Id}]}}},
+ {[Action | CActions], [Id1 | CIds]}
+ end,
+ {[], []},
+ lists:seq(1, ?NUM_DOCS)
+ ),
Actions = lists:reverse(RActions),
Ids = lists:reverse(RIds),
{ok, Db2} = cpse_util:apply_batch(Db1, Actions),
FDIs = couch_db_engine:open_docs(Db2, Ids),
- {RevActions2, RevIdRevs} = lists:foldl(fun(FDI, {CActions, CIdRevs}) ->
- Id = FDI#full_doc_info.id,
- PrevRev = cpse_util:prev_rev(FDI),
- Rev = PrevRev#rev_info.rev,
- Action = {purge, {Id, Rev}},
- {[Action| CActions], [{Id, [Rev]}| CIdRevs]}
- end, {[], []}, FDIs),
+ {RevActions2, RevIdRevs} = lists:foldl(
+ fun(FDI, {CActions, CIdRevs}) ->
+ Id = FDI#full_doc_info.id,
+ PrevRev = cpse_util:prev_rev(FDI),
+ Rev = PrevRev#rev_info.rev,
+ Action = {purge, {Id, Rev}},
+ {[Action | CActions], [{Id, [Rev]} | CIdRevs]}
+ end,
+ {[], []},
+ FDIs
+ ),
{Actions2, IdsRevs} = {lists:reverse(RevActions2), lists:reverse(RevIdRevs)},
{ok, Db3} = cpse_util:apply_batch(Db2, Actions2),
{ok, PurgedIdRevs} = couch_db_engine:fold_purge_infos(
- Db3, 0, fun fold_fun/2, [], []),
+ Db3, 0, fun fold_fun/2, [], []
+ ),
?assertEqual(IdsRevs, lists:reverse(PurgedIdRevs)).
-
cpse_start_seq(Db1) ->
Actions1 = [
{create, {docid(1), {[{<<"int">>, 1}]}}},
@@ -74,22 +80,26 @@ cpse_start_seq(Db1) ->
{ok, Db2} = cpse_util:apply_actions(Db1, Actions1),
FDIs = couch_db_engine:open_docs(Db2, Ids),
- {RActions2, RIdRevs} = lists:foldl(fun(FDI, {CActions, CIdRevs}) ->
- Id = FDI#full_doc_info.id,
- PrevRev = cpse_util:prev_rev(FDI),
- Rev = PrevRev#rev_info.rev,
- Action = {purge, {Id, Rev}},
- {[Action| CActions], [{Id, [Rev]}| CIdRevs]}
- end, {[], []}, FDIs),
+ {RActions2, RIdRevs} = lists:foldl(
+ fun(FDI, {CActions, CIdRevs}) ->
+ Id = FDI#full_doc_info.id,
+ PrevRev = cpse_util:prev_rev(FDI),
+ Rev = PrevRev#rev_info.rev,
+ Action = {purge, {Id, Rev}},
+ {[Action | CActions], [{Id, [Rev]} | CIdRevs]}
+ end,
+ {[], []},
+ FDIs
+ ),
{ok, Db3} = cpse_util:apply_actions(Db2, lists:reverse(RActions2)),
StartSeq = 3,
StartSeqIdRevs = lists:nthtail(StartSeq, lists:reverse(RIdRevs)),
{ok, PurgedIdRevs} = couch_db_engine:fold_purge_infos(
- Db3, StartSeq, fun fold_fun/2, [], []),
+ Db3, StartSeq, fun fold_fun/2, [], []
+ ),
?assertEqual(StartSeqIdRevs, lists:reverse(PurgedIdRevs)).
-
cpse_id_rev_repeated(Db1) ->
Actions1 = [
{create, {<<"foo">>, {[{<<"vsn">>, 1}]}}},
@@ -106,7 +116,8 @@ cpse_id_rev_repeated(Db1) ->
{ok, Db3} = cpse_util:apply_actions(Db2, Actions2),
{ok, PurgedIdRevs1} = couch_db_engine:fold_purge_infos(
- Db3, 0, fun fold_fun/2, [], []),
+ Db3, 0, fun fold_fun/2, [], []
+ ),
ExpectedPurgedIdRevs1 = [
{<<"foo">>, [Rev1]}
],
@@ -117,7 +128,8 @@ cpse_id_rev_repeated(Db1) ->
% purge the same Id,Rev when the doc still exists
{ok, Db4} = cpse_util:apply_actions(Db3, Actions2),
{ok, PurgedIdRevs2} = couch_db_engine:fold_purge_infos(
- Db4, 0, fun fold_fun/2, [], []),
+ Db4, 0, fun fold_fun/2, [], []
+ ),
ExpectedPurgedIdRevs2 = [
{<<"foo">>, [Rev1]},
{<<"foo">>, [Rev1]}
@@ -134,7 +146,8 @@ cpse_id_rev_repeated(Db1) ->
{ok, Db5} = cpse_util:apply_actions(Db4, Actions3),
{ok, PurgedIdRevs3} = couch_db_engine:fold_purge_infos(
- Db5, 0, fun fold_fun/2, [], []),
+ Db5, 0, fun fold_fun/2, [], []
+ ),
ExpectedPurgedIdRevs3 = [
{<<"foo">>, [Rev1]},
{<<"foo">>, [Rev1]},
@@ -147,7 +160,8 @@ cpse_id_rev_repeated(Db1) ->
{ok, Db6} = cpse_util:apply_actions(Db5, Actions3),
{ok, PurgedIdRevs4} = couch_db_engine:fold_purge_infos(
- Db6, 0, fun fold_fun/2, [], []),
+ Db6, 0, fun fold_fun/2, [], []
+ ),
ExpectedPurgedIdRevs4 = [
{<<"foo">>, [Rev1]},
{<<"foo">>, [Rev1]},
@@ -157,11 +171,9 @@ cpse_id_rev_repeated(Db1) ->
?assertEqual(ExpectedPurgedIdRevs4, lists:reverse(PurgedIdRevs4)),
?assertEqual(4, couch_db_engine:get_purge_seq(Db6)).
-
fold_fun({_PSeq, _UUID, Id, Revs}, Acc) ->
{ok, [{Id, Revs} | Acc]}.
-
docid(I) ->
Str = io_lib:format("~4..0b", [I]),
iolist_to_binary(Str).
diff --git a/src/couch_pse_tests/src/cpse_test_get_set_props.erl b/src/couch_pse_tests/src/cpse_test_get_set_props.erl
index d49f67f49..773f1d0dc 100644
--- a/src/couch_pse_tests/src/cpse_test_get_set_props.erl
+++ b/src/couch_pse_tests/src/cpse_test_get_set_props.erl
@@ -14,18 +14,14 @@
-compile(export_all).
-compile(nowarn_export_all).
-
-include_lib("eunit/include/eunit.hrl").
-
setup_each() ->
cpse_util:dbname().
-
teardown_each(DbName) ->
ok = couch_server:delete(DbName, []).
-
cpse_default_props(DbName) ->
{ok, {_App, Engine, _Extension}} = application:get_env(couch, test_engine),
{ok, Db} = cpse_util:create_db(DbName),
@@ -46,16 +42,18 @@ cpse_default_props(DbName) ->
?assertEqual([{Node, 0}], couch_db_engine:get_epochs(Db)),
?assertEqual(0, couch_db_engine:get_compacted_seq(Db)).
-
--define(ADMIN_ONLY_SEC_PROPS, {[
- {<<"members">>, {[
- {<<"roles">>, [<<"_admin">>]}
- ]}},
- {<<"admins">>, {[
- {<<"roles">>, [<<"_admin">>]}
- ]}}
-]}).
-
+-define(ADMIN_ONLY_SEC_PROPS,
+ {[
+ {<<"members">>,
+ {[
+ {<<"roles">>, [<<"_admin">>]}
+ ]}},
+ {<<"admins">>,
+ {[
+ {<<"roles">>, [<<"_admin">>]}
+ ]}}
+ ]}
+).
cpse_admin_only_security(DbName) ->
Config = [{"couchdb", "default_security", "admin_only"}],
@@ -70,16 +68,13 @@ cpse_admin_only_security(DbName) ->
couch_log:error("~n~n~n~n~s -> ~s~n~n", [couch_db:name(Db1), couch_db:name(Db2)]),
?assertEqual(?ADMIN_ONLY_SEC_PROPS, couch_db:get_security(Db2)).
-
cpse_set_security(DbName) ->
SecProps = {[{<<"foo">>, <<"bar">>}]},
check_prop_set(DbName, get_security, set_security, {[]}, SecProps).
-
cpse_set_revs_limit(DbName) ->
check_prop_set(DbName, get_revs_limit, set_revs_limit, 1000, 50).
-
check_prop_set(DbName, GetFun, SetFun, Default, Value) ->
{ok, Db0} = cpse_util:create_db(DbName),
diff --git a/src/couch_pse_tests/src/cpse_test_open_close_delete.erl b/src/couch_pse_tests/src/cpse_test_open_close_delete.erl
index d9b589fd6..c63a05bea 100644
--- a/src/couch_pse_tests/src/cpse_test_open_close_delete.erl
+++ b/src/couch_pse_tests/src/cpse_test_open_close_delete.erl
@@ -14,55 +14,49 @@
-compile(export_all).
-compile(nowarn_export_all).
-
-include_lib("eunit/include/eunit.hrl").
-
setup_each() ->
cpse_util:dbname().
-
teardown_each(DbName) ->
case couch_server:exists(DbName) of
true -> ok = couch_server:delete(DbName, []);
false -> ok
end.
-
cpse_open_non_existent(DbName) ->
% Try twice to check that a failed open doesn't create
% the database for some reason.
?assertEqual({not_found, no_db_file}, cpse_util:open_db(DbName)),
?assertEqual({not_found, no_db_file}, cpse_util:open_db(DbName)).
-
cpse_open_create(DbName) ->
?assertEqual(false, couch_server:exists(DbName)),
?assertEqual({not_found, no_db_file}, cpse_util:open_db(DbName)),
?assertMatch({ok, _}, cpse_util:create_db(DbName)),
?assertEqual(true, couch_server:exists(DbName)).
-
cpse_open_when_exists(DbName) ->
?assertEqual(false, couch_server:exists(DbName)),
?assertEqual({not_found, no_db_file}, cpse_util:open_db(DbName)),
?assertMatch({ok, _}, cpse_util:create_db(DbName)),
?assertEqual(file_exists, cpse_util:create_db(DbName)).
-
cpse_terminate(DbName) ->
?assertEqual(false, couch_server:exists(DbName)),
?assertEqual({not_found, no_db_file}, cpse_util:open_db(DbName)),
?assertEqual(ok, cycle_db(DbName, create_db)),
?assertEqual(true, couch_server:exists(DbName)).
-
cpse_rapid_recycle(DbName) ->
?assertEqual(ok, cycle_db(DbName, create_db)),
- lists:foreach(fun(_) ->
- ?assertEqual(ok, cycle_db(DbName, open_db))
- end, lists:seq(1, 100)).
-
+ lists:foreach(
+ fun(_) ->
+ ?assertEqual(ok, cycle_db(DbName, open_db))
+ end,
+ lists:seq(1, 100)
+ ).
cpse_delete(DbName) ->
?assertEqual(false, couch_server:exists(DbName)),
@@ -71,7 +65,6 @@ cpse_delete(DbName) ->
?assertEqual(ok, couch_server:delete(DbName, [])),
?assertEqual(false, couch_server:exists(DbName)).
-
cycle_db(DbName, Type) ->
{ok, Db} = cpse_util:Type(DbName),
cpse_util:shutdown_db(Db).
diff --git a/src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl b/src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl
index c7a85c7e4..bddbdb699 100644
--- a/src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl
+++ b/src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl
@@ -14,11 +14,9 @@
-compile(export_all).
-compile(nowarn_export_all).
-
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
setup_each() ->
{ok, Db1} = cpse_util:create_db(),
{ok, Revs} = cpse_util:save_docs(couch_db:name(Db1), [
@@ -33,20 +31,21 @@ setup_each() ->
{[{'_id', foo8}, {vsn, 8}]},
{[{'_id', foo9}, {vsn, 9}]}
]),
- PInfos = lists:map(fun(Idx) ->
- DocId = iolist_to_binary(["foo", $0 + Idx]),
- Rev = lists:nth(Idx + 1, Revs),
- {cpse_util:uuid(), DocId, [Rev]}
- end, lists:seq(0, 9)),
+ PInfos = lists:map(
+ fun(Idx) ->
+ DocId = iolist_to_binary(["foo", $0 + Idx]),
+ Rev = lists:nth(Idx + 1, Revs),
+ {cpse_util:uuid(), DocId, [Rev]}
+ end,
+ lists:seq(0, 9)
+ ),
{ok, _} = cpse_util:purge(couch_db:name(Db1), PInfos),
{ok, Db2} = couch_db:reopen(Db1),
Db2.
-
teardown_each(Db) ->
ok = couch_server:delete(couch_db:name(Db), []).
-
cpse_bad_purge_seq(Db1) ->
Db2 = save_local_doc(Db1, <<"foo">>),
?assertEqual(0, couch_db:get_minimum_purge_seq(Db2)),
@@ -55,7 +54,6 @@ cpse_bad_purge_seq(Db1) ->
{ok, Db3} = couch_db:reopen(Db2),
?assertEqual(1, couch_db:get_minimum_purge_seq(Db3)).
-
cpse_verify_non_boolean(Db1) ->
Db2 = save_local_doc(Db1, 2),
?assertEqual(0, couch_db:get_minimum_purge_seq(Db2)),
@@ -64,17 +62,22 @@ cpse_verify_non_boolean(Db1) ->
{ok, Db3} = couch_db:reopen(Db2),
?assertEqual(5, couch_db:get_minimum_purge_seq(Db3)).
-
save_local_doc(Db1, PurgeSeq) ->
{Mega, Secs, _} = os:timestamp(),
NowSecs = Mega * 1000000 + Secs,
- Doc = couch_doc:from_json_obj(?JSON_DECODE(?JSON_ENCODE({[
- {<<"_id">>, <<"_local/purge-test-stuff">>},
- {<<"purge_seq">>, PurgeSeq},
- {<<"timestamp_utc">>, NowSecs},
- {<<"verify_options">>, {[{<<"signature">>, <<"stuff">>}]}},
- {<<"type">>, <<"test">>}
- ]}))),
+ Doc = couch_doc:from_json_obj(
+ ?JSON_DECODE(
+ ?JSON_ENCODE(
+ {[
+ {<<"_id">>, <<"_local/purge-test-stuff">>},
+ {<<"purge_seq">>, PurgeSeq},
+ {<<"timestamp_utc">>, NowSecs},
+ {<<"verify_options">>, {[{<<"signature">>, <<"stuff">>}]}},
+ {<<"type">>, <<"test">>}
+ ]}
+ )
+ )
+ ),
{ok, _} = couch_db:update_doc(Db1, Doc, []),
{ok, Db2} = couch_db:reopen(Db1),
Db2.
diff --git a/src/couch_pse_tests/src/cpse_test_purge_docs.erl b/src/couch_pse_tests/src/cpse_test_purge_docs.erl
index 60a072da6..f0ed3d747 100644
--- a/src/couch_pse_tests/src/cpse_test_purge_docs.erl
+++ b/src/couch_pse_tests/src/cpse_test_purge_docs.erl
@@ -14,23 +14,18 @@
-compile(export_all).
-compile(nowarn_export_all).
-
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-define(REV_DEPTH, 100).
-
setup_each() ->
{ok, Db} = cpse_util:create_db(),
couch_db:name(Db).
-
teardown_each(DbName) ->
ok = couch_server:delete(DbName, []).
-
cpse_purge_simple(DbName) ->
{ok, Rev} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}),
@@ -57,7 +52,6 @@ cpse_purge_simple(DbName) ->
{purge_infos, PurgeInfos}
]).
-
cpse_purge_simple_info_check(DbName) ->
{ok, Rev} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}),
PurgeInfos = [
@@ -72,7 +66,6 @@ cpse_purge_simple_info_check(DbName) ->
?assertMatch([{1, <<_/binary>>, <<"foo1">>, [Rev]}], AllInfos).
-
cpse_purge_empty_db(DbName) ->
PurgeInfos = [
{cpse_util:uuid(), <<"foo">>, [{0, <<0>>}]}
@@ -90,7 +83,6 @@ cpse_purge_empty_db(DbName) ->
{purge_infos, PurgeInfos}
]).
-
cpse_purge_single_docid(DbName) ->
{ok, [Rev1, _Rev2]} = cpse_util:save_docs(DbName, [
{[{'_id', foo1}, {vsn, 1}]},
@@ -121,7 +113,6 @@ cpse_purge_single_docid(DbName) ->
{purge_infos, PurgeInfos}
]).
-
cpse_purge_multiple_docids(DbName) ->
{ok, [Rev1, Rev2]} = cpse_util:save_docs(DbName, [
{[{'_id', foo1}, {vsn, 1.1}]},
@@ -156,7 +147,6 @@ cpse_purge_multiple_docids(DbName) ->
{purge_infos, PurgeInfos}
]).
-
cpse_purge_no_docids(DbName) ->
{ok, [_Rev1, _Rev2]} = cpse_util:save_docs(DbName, [
{[{'_id', foo1}, {vsn, 1}]},
@@ -183,15 +173,15 @@ cpse_purge_no_docids(DbName) ->
{purge_infos, []}
]).
-
cpse_purge_rev_path(DbName) ->
{ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, 1}]}),
- Update = {[
- {<<"_id">>, <<"foo">>},
- {<<"_rev">>, couch_doc:rev_to_str(Rev1)},
- {<<"_deleted">>, true},
- {<<"vsn">>, 2}
- ]},
+ Update =
+ {[
+ {<<"_id">>, <<"foo">>},
+ {<<"_rev">>, couch_doc:rev_to_str(Rev1)},
+ {<<"_deleted">>, true},
+ {<<"vsn">>, 2}
+ ]},
{ok, Rev2} = cpse_util:save_doc(DbName, Update),
cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
@@ -219,18 +209,22 @@ cpse_purge_rev_path(DbName) ->
{purge_infos, PurgeInfos}
]).
-
cpse_purge_deep_revision_path(DbName) ->
{ok, InitRev} = cpse_util:save_doc(DbName, {[{'_id', bar}, {vsn, 0}]}),
- LastRev = lists:foldl(fun(Count, PrevRev) ->
- Update = {[
- {'_id', bar},
- {'_rev', couch_doc:rev_to_str(PrevRev)},
- {vsn, Count}
- ]},
- {ok, NewRev} = cpse_util:save_doc(DbName, Update),
- NewRev
- end, InitRev, lists:seq(1, ?REV_DEPTH)),
+ LastRev = lists:foldl(
+ fun(Count, PrevRev) ->
+ Update =
+ {[
+ {'_id', bar},
+ {'_rev', couch_doc:rev_to_str(PrevRev)},
+ {vsn, Count}
+ ]},
+ {ok, NewRev} = cpse_util:save_doc(DbName, Update),
+ NewRev
+ end,
+ InitRev,
+ lists:seq(1, ?REV_DEPTH)
+ ),
PurgeInfos = [
{cpse_util:uuid(), <<"bar">>, [LastRev]}
@@ -248,14 +242,14 @@ cpse_purge_deep_revision_path(DbName) ->
{purge_infos, PurgeInfos}
]).
-
cpse_purge_partial_revs(DbName) ->
{ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, <<"1.1">>}]}),
- Update = {[
- {'_id', foo},
- {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
- {vsn, <<"1.2">>}
- ]},
+ Update =
+ {[
+ {'_id', foo},
+ {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
+ {vsn, <<"1.2">>}
+ ]},
{ok, [_Rev2]} = cpse_util:save_docs(DbName, [Update], [replicated_changes]),
PurgeInfos = [
@@ -274,7 +268,6 @@ cpse_purge_partial_revs(DbName) ->
{purge_infos, PurgeInfos}
]).
-
cpse_purge_missing_docid(DbName) ->
{ok, [Rev1, _Rev2]} = cpse_util:save_docs(DbName, [
{[{'_id', foo1}, {vsn, 1}]},
@@ -305,7 +298,6 @@ cpse_purge_missing_docid(DbName) ->
{purge_infos, PurgeInfos}
]).
-
cpse_purge_duplicate_docids(DbName) ->
{ok, [Rev1, _Rev2]} = cpse_util:save_docs(DbName, [
{[{'_id', foo1}, {vsn, 1}]},
@@ -338,14 +330,14 @@ cpse_purge_duplicate_docids(DbName) ->
{purge_infos, PurgeInfos}
]).
-
cpse_purge_internal_revision(DbName) ->
{ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, 1}]}),
- Update = {[
- {'_id', foo},
- {'_rev', couch_doc:rev_to_str(Rev1)},
- {vsn, 2}
- ]},
+ Update =
+ {[
+ {'_id', foo},
+ {'_rev', couch_doc:rev_to_str(Rev1)},
+ {vsn, 2}
+ ]},
{ok, _Rev2} = cpse_util:save_doc(DbName, Update),
PurgeInfos = [
@@ -364,7 +356,6 @@ cpse_purge_internal_revision(DbName) ->
{purge_infos, PurgeInfos}
]).
-
cpse_purge_missing_revision(DbName) ->
{ok, [_Rev1, Rev2]} = cpse_util:save_docs(DbName, [
{[{'_id', foo1}, {vsn, 1}]},
@@ -387,14 +378,14 @@ cpse_purge_missing_revision(DbName) ->
{purge_infos, PurgeInfos}
]).
-
cpse_purge_repeated_revisions(DbName) ->
{ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, <<"1.1">>}]}),
- Update = {[
- {'_id', foo},
- {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
- {vsn, <<"1.2">>}
- ]},
+ Update =
+ {[
+ {'_id', foo},
+ {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
+ {vsn, <<"1.2">>}
+ ]},
{ok, [Rev2]} = cpse_util:save_docs(DbName, [Update], [replicated_changes]),
cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
@@ -424,7 +415,6 @@ cpse_purge_repeated_revisions(DbName) ->
{purge_infos, PurgeInfos1}
]).
-
cpse_purge_repeated_uuid(DbName) ->
{ok, Rev} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}),
@@ -459,6 +449,5 @@ cpse_purge_repeated_uuid(DbName) ->
{purge_infos, PurgeInfos}
]).
-
fold_all_infos(Info, Acc) ->
{ok, [Info | Acc]}.
diff --git a/src/couch_pse_tests/src/cpse_test_purge_replication.erl b/src/couch_pse_tests/src/cpse_test_purge_replication.erl
index 20dcc2f81..5ec04b711 100644
--- a/src/couch_pse_tests/src/cpse_test_purge_replication.erl
+++ b/src/couch_pse_tests/src/cpse_test_purge_replication.erl
@@ -14,27 +14,22 @@
-compile(export_all).
-compile(nowarn_export_all).
-
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("mem3/include/mem3.hrl").
-
setup_all() ->
cpse_util:setup_all([mem3, fabric, couch_replicator]).
-
setup_each() ->
{ok, Src} = cpse_util:create_db(),
{ok, Tgt} = cpse_util:create_db(),
{couch_db:name(Src), couch_db:name(Tgt)}.
-
teardown_each({SrcDb, TgtDb}) ->
ok = couch_server:delete(SrcDb, []),
ok = couch_server:delete(TgtDb, []).
-
cpse_purge_http_replication({Source, Target}) ->
{ok, Rev1} = cpse_util:save_doc(Source, {[{'_id', foo}, {vsn, 1}]}),
@@ -47,10 +42,11 @@ cpse_purge_http_replication({Source, Target}) ->
{purge_infos, []}
]),
- RepObject = {[
- {<<"source">>, db_url(Source)},
- {<<"target">>, db_url(Target)}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, db_url(Source)},
+ {<<"target">>, db_url(Target)}
+ ]},
{ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
{ok, Doc1} = cpse_util:open_doc(Target, foo),
@@ -99,10 +95,11 @@ cpse_purge_http_replication({Source, Target}) ->
% Show that replicating from the target
% back to the source reintroduces the doc
- RepObject2 = {[
- {<<"source">>, db_url(Target)},
- {<<"target">>, db_url(Source)}
- ]},
+ RepObject2 =
+ {[
+ {<<"source">>, db_url(Target)},
+ {<<"target">>, db_url(Source)}
+ ]},
{ok, _} = couch_replicator:replicate(RepObject2, ?ADMIN_USER),
{ok, Doc3} = cpse_util:open_doc(Source, foo),
@@ -118,7 +115,6 @@ cpse_purge_http_replication({Source, Target}) ->
{purge_infos, PurgeInfos}
]).
-
cpse_purge_internal_repl_disabled({Source, Target}) ->
cpse_util:with_config([{"mem3", "replicate_purges", "false"}], fun() ->
repl(Source, Target),
@@ -151,7 +147,6 @@ cpse_purge_internal_repl_disabled({Source, Target}) ->
?assertMatch({ok, #doc_info{}}, cpse_util:open_doc(Target, <<"foo1">>))
end).
-
cpse_purge_repl_simple_pull({Source, Target}) ->
repl(Source, Target),
@@ -165,7 +160,6 @@ cpse_purge_repl_simple_pull({Source, Target}) ->
?assertEqual([Rev], PRevs),
repl(Source, Target).
-
cpse_purge_repl_simple_push({Source, Target}) ->
repl(Source, Target),
@@ -179,7 +173,6 @@ cpse_purge_repl_simple_push({Source, Target}) ->
?assertEqual([Rev], PRevs),
repl(Source, Target).
-
repl(Source, Target) ->
SrcShard = make_shard(Source),
TgtShard = make_shard(Target),
@@ -192,7 +185,6 @@ repl(Source, Target) ->
Diff = cpse_util:term_diff(SrcTerm, TgtTerm),
?assertEqual(nodiff, Diff).
-
make_shard(DbName) ->
#shard{
name = DbName,
@@ -201,7 +193,6 @@ make_shard(DbName) ->
range = [0, 16#FFFFFFFF]
}.
-
db_url(DbName) ->
Addr = config:get("httpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(couch_httpd, port),
diff --git a/src/couch_pse_tests/src/cpse_test_purge_seqs.erl b/src/couch_pse_tests/src/cpse_test_purge_seqs.erl
index 6a546580c..f9d87945e 100644
--- a/src/couch_pse_tests/src/cpse_test_purge_seqs.erl
+++ b/src/couch_pse_tests/src/cpse_test_purge_seqs.erl
@@ -14,20 +14,16 @@
-compile(export_all).
-compile(nowarn_export_all).
-
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
setup_each() ->
{ok, Db} = cpse_util:create_db(),
couch_db:name(Db).
-
teardown_each(DbName) ->
ok = couch_server:delete(DbName, []).
-
cpse_increment_purge_seq_on_complete_purge(DbName) ->
{ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}),
{ok, Rev2} = cpse_util:save_doc(DbName, {[{'_id', foo2}, {vsn, 1.2}]}),
@@ -68,7 +64,6 @@ cpse_increment_purge_seq_on_complete_purge(DbName) ->
{purge_infos, PurgeInfos1 ++ PurgeInfos2}
]).
-
cpse_increment_purge_multiple_times(DbName) ->
{ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}),
{ok, Rev2} = cpse_util:save_doc(DbName, {[{'_id', foo2}, {vsn, 1.2}]}),
@@ -97,14 +92,14 @@ cpse_increment_purge_multiple_times(DbName) ->
{purge_infos, PurgeInfos1}
]).
-
cpse_increment_purge_seq_on_partial_purge(DbName) ->
{ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, <<"1.1">>}]}),
- Update = {[
- {'_id', foo1},
- {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
- {vsn, <<"1.2">>}
- ]},
+ Update =
+ {[
+ {'_id', foo1},
+ {'_rev', couch_doc:rev_to_str({1, [couch_hash:md5_hash(<<"1.2">>)]})},
+ {vsn, <<"1.2">>}
+ ]},
{ok, [_Rev2]} = cpse_util:save_docs(DbName, [Update], [replicated_changes]),
cpse_util:assert_db_props(?MODULE, ?LINE, DbName, [
diff --git a/src/couch_pse_tests/src/cpse_test_read_write_docs.erl b/src/couch_pse_tests/src/cpse_test_read_write_docs.erl
index a2151340a..f51e50aec 100644
--- a/src/couch_pse_tests/src/cpse_test_read_write_docs.erl
+++ b/src/couch_pse_tests/src/cpse_test_read_write_docs.erl
@@ -14,20 +14,16 @@
-compile(export_all).
-compile(nowarn_export_all).
-
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
setup_each() ->
{ok, Db} = cpse_util:create_db(),
Db.
-
teardown_each(Db) ->
ok = couch_server:delete(couch_db:name(Db), []).
-
cpse_read_docs_from_empty_db(Db) ->
?assertEqual([not_found], couch_db_engine:open_docs(Db, [<<"foo">>])),
?assertEqual(
@@ -35,7 +31,6 @@ cpse_read_docs_from_empty_db(Db) ->
couch_db_engine:open_docs(Db, [<<"a">>, <<"b">>])
).
-
cpse_read_empty_local_docs(Db) ->
{LocalA, LocalB} = {<<"_local/a">>, <<"_local/b">>},
?assertEqual([not_found], couch_db_engine:open_local_docs(Db, [LocalA])),
@@ -44,7 +39,6 @@ cpse_read_empty_local_docs(Db) ->
couch_db_engine:open_local_docs(Db, [LocalA, LocalB])
).
-
cpse_write_one_doc(Db1) ->
?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
@@ -79,12 +73,13 @@ cpse_write_one_doc(Db1) ->
},
Doc1 = couch_db_engine:read_doc_body(Db3, Doc0),
- Body1 = if not is_binary(Doc1#doc.body) -> Doc1#doc.body; true ->
- couch_compress:decompress(Doc1#doc.body)
- end,
+ Body1 =
+ if
+ not is_binary(Doc1#doc.body) -> Doc1#doc.body;
+ true -> couch_compress:decompress(Doc1#doc.body)
+ end,
?assertEqual({[{<<"vsn">>, 1}]}, Body1).
-
cpse_write_two_docs(Db1) ->
?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
@@ -106,7 +101,6 @@ cpse_write_two_docs(Db1) ->
Resps = couch_db_engine:open_docs(Db3, [<<"foo">>, <<"bar">>]),
?assertEqual(false, lists:member(not_found, Resps)).
-
cpse_write_three_doc_batch(Db1) ->
?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
@@ -131,7 +125,6 @@ cpse_write_three_doc_batch(Db1) ->
Resps = couch_db_engine:open_docs(Db3, [<<"foo">>, <<"bar">>, <<"baz">>]),
?assertEqual(false, lists:member(not_found, Resps)).
-
cpse_update_doc(Db1) ->
?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
@@ -167,13 +160,14 @@ cpse_update_doc(Db1) ->
},
Doc1 = couch_db_engine:read_doc_body(Db3, Doc0),
- Body1 = if not is_binary(Doc1#doc.body) -> Doc1#doc.body; true ->
- couch_compress:decompress(Doc1#doc.body)
- end,
+ Body1 =
+ if
+ not is_binary(Doc1#doc.body) -> Doc1#doc.body;
+ true -> couch_compress:decompress(Doc1#doc.body)
+ end,
?assertEqual({[{<<"vsn">>, 2}]}, Body1).
-
cpse_delete_doc(Db1) ->
?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
@@ -207,13 +201,14 @@ cpse_delete_doc(Db1) ->
},
Doc1 = couch_db_engine:read_doc_body(Db3, Doc0),
- Body1 = if not is_binary(Doc1#doc.body) -> Doc1#doc.body; true ->
- couch_compress:decompress(Doc1#doc.body)
- end,
+ Body1 =
+ if
+ not is_binary(Doc1#doc.body) -> Doc1#doc.body;
+ true -> couch_compress:decompress(Doc1#doc.body)
+ end,
?assertEqual({[]}, Body1).
-
cpse_write_local_doc(Db1) ->
?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
@@ -235,7 +230,6 @@ cpse_write_local_doc(Db1) ->
[#doc{} = Doc] = couch_db_engine:open_local_docs(Db3, [<<"_local/foo">>]),
?assertEqual({[{<<"yay">>, false}]}, Doc#doc.body).
-
cpse_write_mixed_batch(Db1) ->
?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
@@ -262,7 +256,6 @@ cpse_write_mixed_batch(Db1) ->
[not_found] = couch_db_engine:open_local_docs(Db3, [<<"bar">>]),
[#doc{}] = couch_db_engine:open_local_docs(Db3, [<<"_local/foo">>]).
-
cpse_update_local_doc(Db1) ->
?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
@@ -285,7 +278,6 @@ cpse_update_local_doc(Db1) ->
[#doc{} = Doc] = couch_db_engine:open_local_docs(Db3, [<<"_local/foo">>]),
?assertEqual({[{<<"stuff">>, null}]}, Doc#doc.body).
-
cpse_delete_local_doc(Db1) ->
?assertEqual(0, couch_db_engine:get_doc_count(Db1)),
?assertEqual(0, couch_db_engine:get_del_doc_count(Db1)),
@@ -306,6 +298,6 @@ cpse_delete_local_doc(Db1) ->
[not_found] = couch_db_engine:open_docs(Db3, [<<"_local/foo">>]),
?assertEqual(
- [not_found],
- couch_db_engine:open_local_docs(Db3, [<<"_local/foo">>])
- ).
+ [not_found],
+ couch_db_engine:open_local_docs(Db3, [<<"_local/foo">>])
+ ).
diff --git a/src/couch_pse_tests/src/cpse_test_ref_counting.erl b/src/couch_pse_tests/src/cpse_test_ref_counting.erl
index cb115a785..a0123d1ca 100644
--- a/src/couch_pse_tests/src/cpse_test_ref_counting.erl
+++ b/src/couch_pse_tests/src/cpse_test_ref_counting.erl
@@ -14,23 +14,18 @@
-compile(export_all).
-compile(nowarn_export_all).
-
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-define(NUM_CLIENTS, 1000).
-
setup_each() ->
{ok, Db} = cpse_util:create_db(),
{Db, self()}.
-
teardown_each({Db, _}) ->
ok = couch_server:delete(couch_db:name(Db), []).
-
cpse_empty_monitors({Db, Pid}) ->
Pids = couch_db_engine:monitored_by(Db),
?assert(is_list(Pids)),
@@ -41,7 +36,6 @@ cpse_empty_monitors({Db, Pid}) ->
],
?assertEqual([], Pids -- Expected).
-
cpse_incref_decref({Db, _}) ->
{Pid, _} = Client = start_client(Db),
wait_client(Client),
@@ -54,11 +48,13 @@ cpse_incref_decref({Db, _}) ->
Pids2 = couch_db_engine:monitored_by(Db),
?assert(not lists:member(Pid, Pids2)).
-
cpse_incref_decref_many({Db, _}) ->
- Clients = lists:map(fun(_) ->
- start_client(Db)
- end, lists:seq(1, ?NUM_CLIENTS)),
+ Clients = lists:map(
+ fun(_) ->
+ start_client(Db)
+ end,
+ lists:seq(1, ?NUM_CLIENTS)
+ ),
lists:foreach(fun(C) -> wait_client(C) end, Clients),
@@ -71,7 +67,6 @@ cpse_incref_decref_many({Db, _}) ->
Pids2 = couch_db_engine:monitored_by(Db),
?assertEqual(3, length(Pids2)).
-
start_client(Db0) ->
spawn_monitor(fun() ->
{ok, Db1} = couch_db:open_int(couch_db:name(Db0), []),
@@ -92,7 +87,6 @@ start_client(Db0) ->
end
end).
-
wait_client({Pid, _Ref}) ->
Pid ! {waiting, self()},
receive
@@ -101,7 +95,6 @@ wait_client({Pid, _Ref}) ->
erlang:error(timeout)
end.
-
close_client({Pid, Ref}) ->
Pid ! close,
receive
@@ -110,4 +103,3 @@ close_client({Pid, Ref}) ->
after 1000 ->
erlang:error(timeout)
end.
-
diff --git a/src/couch_pse_tests/src/cpse_util.erl b/src/couch_pse_tests/src/cpse_util.erl
index 55622e925..bcbea4487 100644
--- a/src/couch_pse_tests/src/cpse_util.erl
+++ b/src/couch_pse_tests/src/cpse_util.erl
@@ -14,11 +14,9 @@
-compile(export_all).
-compile(nowarn_export_all).
-
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-define(TEST_MODULES, [
cpse_test_open_close_delete,
cpse_test_get_set_props,
@@ -36,29 +34,27 @@
cpse_test_purge_seqs
]).
-
-define(SHUTDOWN_TIMEOUT, 5000).
-define(COMPACTOR_TIMEOUT, 50000).
-define(ATTACHMENT_WRITE_TIMEOUT, 10000).
-define(MAKE_DOC_SUMMARY_TIMEOUT, 5000).
-
create_tests(EngineApp, Extension) ->
create_tests(EngineApp, EngineApp, Extension).
-
create_tests(EngineApp, EngineModule, Extension) ->
TestEngine = {EngineApp, EngineModule, Extension},
application:set_env(couch, test_engine, TestEngine),
- lists:map(fun(TestMod) ->
- {atom_to_list(TestMod), cpse_gather:module(TestMod)}
- end, ?TEST_MODULES).
-
+ lists:map(
+ fun(TestMod) ->
+ {atom_to_list(TestMod), cpse_gather:module(TestMod)}
+ end,
+ ?TEST_MODULES
+ ).
setup_all() ->
setup_all([]).
-
setup_all(ExtraApps) ->
Ctx = test_util:start_couch(ExtraApps),
{ok, {_, EngineMod, Extension}} = application:get_env(couch, test_engine),
@@ -68,20 +64,16 @@ setup_all(ExtraApps) ->
config:set("mem3", "replicate_purges", "true", false),
Ctx.
-
teardown_all(Ctx) ->
test_util:stop_couch(Ctx).
-
rootdir() ->
config:get("couchdb", "database_dir", ".").
-
dbname() ->
UUID = couch_uuids:random(),
<<"db-", UUID/binary>>.
-
get_engine() ->
case application:get_env(couch, test_engine) of
{ok, {_App, _Mod, Extension}} ->
@@ -90,21 +82,17 @@ get_engine() ->
<<"couch">>
end.
-
create_db() ->
create_db(dbname()).
-
create_db(DbName) ->
Engine = get_engine(),
couch_db:create(DbName, [{engine, Engine}, ?ADMIN_CTX]).
-
open_db(DbName) ->
Engine = get_engine(),
couch_db:open_int(DbName, [{engine, Engine}, ?ADMIN_CTX]).
-
shutdown_db(Db) ->
Pid = couch_db:get_pid(Db),
Ref = erlang:monitor(process, Pid),
@@ -116,38 +104,47 @@ shutdown_db(Db) ->
erlang:error(database_shutdown_timeout)
end,
test_util:wait(fun() ->
- case ets:member(couch_server:couch_dbs(couch_db:name(Db)),
- couch_db:name(Db)) of
+ case
+ ets:member(
+ couch_server:couch_dbs(couch_db:name(Db)),
+ couch_db:name(Db)
+ )
+ of
true -> wait;
false -> ok
end
end).
-
save_doc(DbName, Json) ->
{ok, [Rev]} = save_docs(DbName, [Json], []),
{ok, Rev}.
-
save_docs(DbName, JsonDocs) ->
save_docs(DbName, JsonDocs, []).
-
save_docs(DbName, JsonDocs, Options) ->
- Docs = lists:map(fun(JDoc) ->
- couch_doc:from_json_obj(?JSON_DECODE(?JSON_ENCODE(JDoc)))
- end, JsonDocs),
+ Docs = lists:map(
+ fun(JDoc) ->
+ couch_doc:from_json_obj(?JSON_DECODE(?JSON_ENCODE(JDoc)))
+ end,
+ JsonDocs
+ ),
Opts = [full_commit | Options],
{ok, Db} = couch_db:open_int(DbName, []),
try
case lists:member(replicated_changes, Options) of
true ->
{ok, []} = couch_db:update_docs(
- Db, Docs, Opts, replicated_changes),
- {ok, lists:map(fun(Doc) ->
- {Pos, [RevId | _]} = Doc#doc.revs,
- {Pos, RevId}
- end, Docs)};
+ Db, Docs, Opts, replicated_changes
+ ),
+ {ok,
+ lists:map(
+ fun(Doc) ->
+ {Pos, [RevId | _]} = Doc#doc.revs,
+ {Pos, RevId}
+ end,
+ Docs
+ )};
false ->
{ok, Resp} = couch_db:update_docs(Db, Docs, Opts),
{ok, [Rev || {ok, Rev} <- Resp]}
@@ -156,7 +153,6 @@ save_docs(DbName, JsonDocs, Options) ->
couch_db:close(Db)
end.
-
open_doc(DbName, DocId0) ->
DocId = ?JSON_DECODE(?JSON_ENCODE(DocId0)),
{ok, Db} = couch_db:open_int(DbName, []),
@@ -166,15 +162,16 @@ open_doc(DbName, DocId0) ->
couch_db:close(Db)
end.
-
purge(DbName, PurgeInfos) ->
purge(DbName, PurgeInfos, []).
-
purge(DbName, PurgeInfos0, Options) when is_list(PurgeInfos0) ->
- PurgeInfos = lists:map(fun({UUID, DocIdJson, Revs}) ->
- {UUID, ?JSON_DECODE(?JSON_ENCODE(DocIdJson)), Revs}
- end, PurgeInfos0),
+ PurgeInfos = lists:map(
+ fun({UUID, DocIdJson, Revs}) ->
+ {UUID, ?JSON_DECODE(?JSON_ENCODE(DocIdJson)), Revs}
+ end,
+ PurgeInfos0
+ ),
{ok, Db} = couch_db:open_int(DbName, []),
try
couch_db:purge_docs(Db, PurgeInfos, Options)
@@ -182,31 +179,29 @@ purge(DbName, PurgeInfos0, Options) when is_list(PurgeInfos0) ->
couch_db:close(Db)
end.
-
uuid() ->
couch_uuids:random().
-
assert_db_props(Module, Line, DbName, Props) when is_binary(DbName) ->
{ok, Db} = couch_db:open_int(DbName, []),
try
assert_db_props(Module, Line, Db, Props)
- catch error:{assertEqual, Props} ->
- {_, Rest} = proplists:split(Props, [module, line]),
- erlang:error({assertEqual, [{module, Module}, {line, Line} | Rest]})
+ catch
+ error:{assertEqual, Props} ->
+ {_, Rest} = proplists:split(Props, [module, line]),
+ erlang:error({assertEqual, [{module, Module}, {line, Line} | Rest]})
after
couch_db:close(Db)
end;
-
assert_db_props(Module, Line, Db, Props) ->
try
assert_each_prop(Db, Props)
- catch error:{assertEqual, Props} ->
- {_, Rest} = proplists:split(Props, [module, line]),
- erlang:error({assertEqual, [{module, Module}, {line, Line} | Rest]})
+ catch
+ error:{assertEqual, Props} ->
+ {_, Rest} = proplists:split(Props, [module, line]),
+ erlang:error({assertEqual, [{module, Module}, {line, Line} | Rest]})
end.
-
assert_each_prop(_Db, []) ->
ok;
assert_each_prop(Db, [{doc_count, Expect} | Rest]) ->
@@ -234,51 +229,47 @@ assert_each_prop(Db, [{purge_infos, Expect} | Rest]) ->
?assertEqual(Expect, lists:reverse(PurgeInfos)),
assert_each_prop(Db, Rest).
-
aep_changes(_A, Acc) ->
{ok, Acc + 1}.
-
aep_fold({_PSeq, UUID, Id, Revs}, Acc) ->
{ok, [{UUID, Id, Revs} | Acc]}.
-
apply_actions(DbName, Actions) when is_binary(DbName) ->
{ok, Db0} = couch_db:open_int(DbName, [?ADMIN_CTX]),
{ok, Db1} = apply_actions(Db0, Actions),
couch_db:close(Db1),
ok;
-
apply_actions(Db, []) ->
{ok, Db};
-
apply_actions(Db, [Action | Rest]) ->
{ok, NewDb} = apply_action(Db, Action),
apply_actions(NewDb, Rest).
-
apply_action(Db, {batch, BatchActions}) ->
apply_batch(Db, BatchActions);
-
apply_action(Db, Action) ->
apply_batch(Db, [Action]).
-
apply_batch(Db, Actions) ->
AccIn = {[], [], [], []},
- AccOut = lists:foldl(fun(Action, Acc) ->
- {DocAcc, ConfAcc, LDocAcc, PurgeAcc} = Acc,
- case gen_write(Db, Action) of
- {update, Doc} ->
- {[Doc | DocAcc], ConfAcc, LDocAcc, PurgeAcc};
- {conflict, Doc} ->
- {DocAcc, [Doc | ConfAcc], LDocAcc, PurgeAcc};
- {local, Doc} ->
- {DocAcc, ConfAcc, [Doc | LDocAcc], PurgeAcc};
- {purge, PurgeInfo} ->
- {DocAcc, ConfAcc, LDocAcc, [PurgeInfo | PurgeAcc]}
- end
- end, AccIn, Actions),
+ AccOut = lists:foldl(
+ fun(Action, Acc) ->
+ {DocAcc, ConfAcc, LDocAcc, PurgeAcc} = Acc,
+ case gen_write(Db, Action) of
+ {update, Doc} ->
+ {[Doc | DocAcc], ConfAcc, LDocAcc, PurgeAcc};
+ {conflict, Doc} ->
+ {DocAcc, [Doc | ConfAcc], LDocAcc, PurgeAcc};
+ {local, Doc} ->
+ {DocAcc, ConfAcc, [Doc | LDocAcc], PurgeAcc};
+ {purge, PurgeInfo} ->
+ {DocAcc, ConfAcc, LDocAcc, [PurgeInfo | PurgeAcc]}
+ end
+ end,
+ AccIn,
+ Actions
+ ),
{Docs0, Conflicts0, LDocs0, PurgeInfos0} = AccOut,
Docs = lists:reverse(Docs0),
@@ -293,37 +284,37 @@ apply_batch(Db, Actions) ->
{ok, []} = couch_db:update_docs(Db, Conflicts, [], replicated_changes),
{ok, Db2} = couch_db:reopen(Db1),
- if PurgeInfos == [] -> ok; true ->
- {ok, _} = couch_db:purge_docs(Db2, PurgeInfos)
+ if
+ PurgeInfos == [] -> ok;
+ true -> {ok, _} = couch_db:purge_docs(Db2, PurgeInfos)
end,
couch_db:reopen(Db2).
-
gen_write(Db, {Action, {<<"_local/", _/binary>> = DocId, Body}}) ->
- PrevRev = case couch_db:open_doc(Db, DocId) of
- {not_found, _} ->
- 0;
- {ok, #doc{revs = {0, []}}} ->
- 0;
- {ok, #doc{revs = {0, [RevStr | _]}}} ->
- binary_to_integer(RevStr)
- end,
- {RevId, Deleted} = case Action of
- Action when Action == create; Action == update ->
- {PrevRev + 1, false};
- delete ->
- {0, true}
- end,
+ PrevRev =
+ case couch_db:open_doc(Db, DocId) of
+ {not_found, _} ->
+ 0;
+ {ok, #doc{revs = {0, []}}} ->
+ 0;
+ {ok, #doc{revs = {0, [RevStr | _]}}} ->
+ binary_to_integer(RevStr)
+ end,
+ {RevId, Deleted} =
+ case Action of
+ Action when Action == create; Action == update ->
+ {PrevRev + 1, false};
+ delete ->
+ {0, true}
+ end,
{local, #doc{
id = DocId,
revs = {0, [list_to_binary(integer_to_list(RevId))]},
body = Body,
deleted = Deleted
}};
-
gen_write(Db, {Action, {DocId, Body}}) ->
gen_write(Db, {Action, {DocId, Body, []}});
-
gen_write(Db, {create, {DocId, Body, Atts}}) ->
{not_found, _} = couch_db:open_doc(Db, DocId),
{update, #doc{
@@ -333,11 +324,13 @@ gen_write(Db, {create, {DocId, Body, Atts}}) ->
body = Body,
atts = Atts
}};
-
gen_write(_Db, {purge, {DocId, PrevRevs0, _}}) ->
- PrevRevs = if is_list(PrevRevs0) -> PrevRevs0; true -> [PrevRevs0] end,
+ PrevRevs =
+ if
+ is_list(PrevRevs0) -> PrevRevs0;
+ true -> [PrevRevs0]
+ end,
{purge, {couch_uuids:random(), DocId, PrevRevs}};
-
gen_write(Db, {Action, {DocId, Body, Atts}}) ->
#full_doc_info{} = PrevFDI = couch_db:get_full_doc_info(Db, DocId),
@@ -351,16 +344,18 @@ gen_write(Db, {Action, {DocId, Body, Atts}}) ->
NewRev = gen_rev(Action, DocId, PrevRev, Body, Atts),
- Deleted = case Action of
- update -> false;
- conflict -> false;
- delete -> true
- end,
+ Deleted =
+ case Action of
+ update -> false;
+ conflict -> false;
+ delete -> true
+ end,
- Type = case Action of
- conflict -> conflict;
- _ -> update
- end,
+ Type =
+ case Action of
+ conflict -> conflict;
+ _ -> update
+ end,
{Type, #doc{
id = DocId,
@@ -370,7 +365,6 @@ gen_write(Db, {Action, {DocId, Body, Atts}}) ->
atts = Atts
}}.
-
gen_rev(A, DocId, {Pos, Rev}, Body, Atts) when A == update; A == delete ->
NewRev = couch_hash:md5_hash(term_to_binary({DocId, Rev, Body, Atts})),
{Pos + 1, [NewRev, Rev]};
@@ -379,26 +373,24 @@ gen_rev(conflict, DocId, _, Body, Atts) ->
NewRev = couch_hash:md5_hash(term_to_binary({DocId, UUID, Body, Atts})),
{1, [NewRev]}.
-
prep_atts(_Db, []) ->
[];
-
prep_atts(Db, [{FileName, Data} | Rest]) ->
{_, Ref} = spawn_monitor(fun() ->
{ok, Stream} = couch_db:open_write_stream(Db, []),
exit(write_att(Stream, FileName, Data, Data))
end),
- Att = receive
- {'DOWN', Ref, _, _, {{no_catch, not_supported}, _}} ->
- throw(not_supported);
- {'DOWN', Ref, _, _, Resp} ->
- Resp
+ Att =
+ receive
+ {'DOWN', Ref, _, _, {{no_catch, not_supported}, _}} ->
+ throw(not_supported);
+ {'DOWN', Ref, _, _, Resp} ->
+ Resp
after ?ATTACHMENT_WRITE_TIMEOUT ->
erlang:error(attachment_write_timeout)
- end,
+ end,
[Att | prep_atts(Db, Rest)].
-
write_att(Stream, FileName, OrigData, <<>>) ->
{StreamEngine, Len, Len, Md5, Md5} = couch_stream:close(Stream),
couch_util:check_md5(Md5, couch_hash:md5_hash(OrigData)),
@@ -412,26 +404,24 @@ write_att(Stream, FileName, OrigData, <<>>) ->
{md5, Md5},
{encoding, identity}
]);
-
write_att(Stream, FileName, OrigData, Data) ->
- {Chunk, Rest} = case size(Data) > 4096 of
- true ->
- <<Head:4096/binary, Tail/binary>> = Data,
- {Head, Tail};
- false ->
- {Data, <<>>}
- end,
+ {Chunk, Rest} =
+ case size(Data) > 4096 of
+ true ->
+ <<Head:4096/binary, Tail/binary>> = Data,
+ {Head, Tail};
+ false ->
+ {Data, <<>>}
+ end,
ok = couch_stream:write(Stream, Chunk),
write_att(Stream, FileName, OrigData, Rest).
-
prev_rev(#full_doc_info{} = FDI) ->
#doc_info{
revs = [#rev_info{} = PrevRev | _]
} = couch_doc:to_doc_info(FDI),
PrevRev.
-
db_as_term(Db) ->
db_as_term(Db, compact).
@@ -439,7 +429,6 @@ db_as_term(DbName, Type) when is_binary(DbName) ->
couch_util:with_db(DbName, fun(Db) ->
db_as_term(Db, Type)
end);
-
db_as_term(Db, Type) ->
[
{props, db_props_as_term(Db, Type)},
@@ -449,7 +438,6 @@ db_as_term(Db, Type) ->
{purged_docs, db_purged_docs_as_term(Db)}
].
-
db_props_as_term(Db, Type) ->
Props0 = [
get_doc_count,
@@ -463,27 +451,36 @@ db_props_as_term(Db, Type) ->
get_uuid,
get_epochs
],
- Props = if Type /= replication -> Props0; true ->
- Props0 -- [get_uuid]
- end,
- lists:map(fun(Fun) ->
- {Fun, couch_db_engine:Fun(Db)}
- end, Props).
-
+ Props =
+ if
+ Type /= replication -> Props0;
+ true -> Props0 -- [get_uuid]
+ end,
+ lists:map(
+ fun(Fun) ->
+ {Fun, couch_db_engine:Fun(Db)}
+ end,
+ Props
+ ).
db_docs_as_term(Db) ->
FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
{ok, FDIs} = couch_db:fold_docs(Db, FoldFun, [], []),
- lists:reverse(lists:map(fun(FDI) ->
- fdi_to_term(Db, FDI)
- end, FDIs)).
-
+ lists:reverse(
+ lists:map(
+ fun(FDI) ->
+ fdi_to_term(Db, FDI)
+ end,
+ FDIs
+ )
+ ).
db_local_docs_as_term(Db, Type) ->
FoldFun = fun(Doc, Acc) ->
case Doc#doc.id of
- <<?LOCAL_DOC_PREFIX, "purge-mem3", _/binary>>
- when Type == replication ->
+ <<?LOCAL_DOC_PREFIX, "purge-mem3", _/binary>> when
+ Type == replication
+ ->
{ok, Acc};
_ ->
{ok, [Doc | Acc]}
@@ -492,14 +489,17 @@ db_local_docs_as_term(Db, Type) ->
{ok, LDocs} = couch_db:fold_local_docs(Db, FoldFun, [], []),
lists:reverse(LDocs).
-
db_changes_as_term(Db) ->
FoldFun = fun(FDI, Acc) -> {ok, [FDI | Acc]} end,
{ok, Changes} = couch_db:fold_changes(Db, 0, FoldFun, [], []),
- lists:reverse(lists:map(fun(FDI) ->
- fdi_to_term(Db, FDI)
- end, Changes)).
-
+ lists:reverse(
+ lists:map(
+ fun(FDI) ->
+ fdi_to_term(Db, FDI)
+ end,
+ Changes
+ )
+ ).
db_purged_docs_as_term(Db) ->
InitPSeq = couch_db_engine:get_oldest_purge_seq(Db) - 1,
@@ -507,18 +507,22 @@ db_purged_docs_as_term(Db) ->
{ok, [{PSeq, UUID, Id, Revs} | Acc]}
end,
{ok, PDocs} = couch_db_engine:fold_purge_infos(
- Db, InitPSeq, FoldFun, [], []),
+ Db, InitPSeq, FoldFun, [], []
+ ),
lists:reverse(PDocs).
-
fdi_to_term(Db, FDI) ->
#full_doc_info{
id = DocId,
rev_tree = OldTree
} = FDI,
- {NewRevTree, _} = couch_key_tree:mapfold(fun(Rev, Node, Type, Acc) ->
- tree_to_term(Rev, Node, Type, Acc, DocId)
- end, Db, OldTree),
+ {NewRevTree, _} = couch_key_tree:mapfold(
+ fun(Rev, Node, Type, Acc) ->
+ tree_to_term(Rev, Node, Type, Acc, DocId)
+ end,
+ Db,
+ OldTree
+ ),
FDI#full_doc_info{
rev_tree = NewRevTree,
% Blank out sizes because we allow storage
@@ -530,10 +534,8 @@ fdi_to_term(Db, FDI) ->
}
}.
-
tree_to_term(_Rev, _Leaf, branch, Acc, _DocId) ->
{?REV_MISSING, Acc};
-
tree_to_term({Pos, RevId}, #leaf{} = Leaf, leaf, Db, DocId) ->
#leaf{
deleted = Deleted,
@@ -549,13 +551,17 @@ tree_to_term({Pos, RevId}, #leaf{} = Leaf, leaf, Db, DocId) ->
Doc1 = couch_db_engine:read_doc_body(Db, Doc0),
- Body = if not is_binary(Doc1#doc.body) -> Doc1#doc.body; true ->
- couch_compress:decompress(Doc1#doc.body)
- end,
+ Body =
+ if
+ not is_binary(Doc1#doc.body) -> Doc1#doc.body;
+ true -> couch_compress:decompress(Doc1#doc.body)
+ end,
- Atts1 = if not is_binary(Doc1#doc.atts) -> Doc1#doc.atts; true ->
- couch_compress:decompress(Doc1#doc.atts)
- end,
+ Atts1 =
+ if
+ not is_binary(Doc1#doc.atts) -> Doc1#doc.atts;
+ true -> couch_compress:decompress(Doc1#doc.atts)
+ end,
StreamSrc = fun(Sp) -> couch_db:open_read_stream(Db, Sp) end,
Atts2 = [couch_att:from_disk_term(StreamSrc, Att) || Att <- Atts1],
@@ -568,34 +574,25 @@ tree_to_term({Pos, RevId}, #leaf{} = Leaf, leaf, Db, DocId) ->
},
{NewLeaf, Db}.
-
att_to_term(Att) ->
Bin = couch_att:to_binary(Att),
couch_att:store(data, Bin, Att).
-
term_diff(T1, T2) when is_tuple(T1), is_tuple(T2) ->
tuple_diff(tuple_to_list(T1), tuple_to_list(T2));
-
term_diff(L1, L2) when is_list(L1), is_list(L2) ->
list_diff(L1, L2);
-
term_diff(V1, V2) when V1 == V2 ->
nodiff;
-
term_diff(V1, V2) ->
{V1, V2}.
-
tuple_diff([], []) ->
nodiff;
-
tuple_diff([T1 | _], []) ->
{longer, T1};
-
tuple_diff([], [T2 | _]) ->
{shorter, T2};
-
tuple_diff([T1 | R1], [T2 | R2]) ->
case term_diff(T1, T2) of
nodiff ->
@@ -604,16 +601,12 @@ tuple_diff([T1 | R1], [T2 | R2]) ->
{T1, Else}
end.
-
list_diff([], []) ->
nodiff;
-
list_diff([T1 | _], []) ->
{longer, T1};
-
list_diff([], [T2 | _]) ->
{shorter, T2};
-
list_diff([T1 | R1], [T2 | R2]) ->
case term_diff(T1, T2) of
nodiff ->
@@ -622,7 +615,6 @@ list_diff([T1 | R1], [T2 | R2]) ->
{T1, Else}
end.
-
compact(Db) ->
{ok, Pid} = couch_db:start_compact(Db),
Ref = erlang:monitor(process, Pid),
@@ -656,7 +648,6 @@ compact(Db) ->
end
end).
-
with_config(Config, Fun) ->
OldConfig = apply_config(Config),
try
@@ -665,10 +656,8 @@ with_config(Config, Fun) ->
apply_config(OldConfig)
end.
-
apply_config([]) ->
[];
-
apply_config([{Section, Key, Value} | Rest]) ->
Orig = config:get(Section, Key),
case Value of
diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl
index 413628143..39b3903ea 100644
--- a/src/couch_replicator/src/couch_replicator.erl
+++ b/src/couch_replicator/src/couch_replicator.erl
@@ -30,13 +30,20 @@
-define(DESIGN_DOC_CREATION_DELAY_MSEC, 1000).
-define(REPLICATION_STATES, [
- initializing, % Just added to scheduler
- error, % Could not be turned into a replication job
- running, % Scheduled and running
- pending, % Scheduled and waiting to run
- crashing, % Scheduled but crashing, backed off by the scheduler
- completed, % Non-continuous (normal) completed replication
- failed % Terminal failure, will not be retried anymore
+ % Just added to scheduler
+ initializing,
+ % Could not be turned into a replication job
+ error,
+ % Scheduled and running
+ running,
+ % Scheduled and waiting to run
+ pending,
+ % Scheduled but crashing, backed off by the scheduler
+ crashing,
+ % Non-continuous (normal) completed replication
+ completed,
+ % Terminal failure, will not be retried anymore
+ failed
]).
-import(couch_util, [
@@ -44,101 +51,96 @@
get_value/3
]).
-
-spec replicate({[_]}, any()) ->
- {ok, {continuous, binary()}} |
- {ok, {[_]}} |
- {ok, {cancelled, binary()}} |
- {error, any()} |
- no_return().
+ {ok, {continuous, binary()}}
+ | {ok, {[_]}}
+ | {ok, {cancelled, binary()}}
+ | {error, any()}
+ | no_return().
replicate(PostBody, Ctx) ->
{ok, Rep0} = couch_replicator_utils:parse_rep_doc(PostBody, Ctx),
Rep = Rep0#rep{start_time = os:timestamp()},
#rep{id = RepId, options = Options, user_ctx = UserCtx} = Rep,
case get_value(cancel, Options, false) of
- true ->
- CancelRepId = case get_value(id, Options, nil) of
- nil ->
- RepId;
- RepId2 ->
- RepId2
- end,
- case check_authorization(CancelRepId, UserCtx) of
- ok ->
- cancel_replication(CancelRepId);
- not_found ->
- {error, not_found}
- end;
- false ->
- check_authorization(RepId, UserCtx),
- {ok, Listener} = rep_result_listener(RepId),
- Result = do_replication_loop(Rep),
- couch_replicator_notifier:stop(Listener),
- Result
+ true ->
+ CancelRepId =
+ case get_value(id, Options, nil) of
+ nil ->
+ RepId;
+ RepId2 ->
+ RepId2
+ end,
+ case check_authorization(CancelRepId, UserCtx) of
+ ok ->
+ cancel_replication(CancelRepId);
+ not_found ->
+ {error, not_found}
+ end;
+ false ->
+ check_authorization(RepId, UserCtx),
+ {ok, Listener} = rep_result_listener(RepId),
+ Result = do_replication_loop(Rep),
+ couch_replicator_notifier:stop(Listener),
+ Result
end.
-
-spec do_replication_loop(#rep{}) ->
{ok, {continuous, binary()}} | {ok, tuple()} | {error, any()}.
do_replication_loop(#rep{id = {BaseId, Ext} = Id, options = Options} = Rep) ->
ok = couch_replicator_scheduler:add_job(Rep),
case get_value(continuous, Options, false) of
- true ->
- {ok, {continuous, ?l2b(BaseId ++ Ext)}};
- false ->
- wait_for_result(Id)
+ true ->
+ {ok, {continuous, ?l2b(BaseId ++ Ext)}};
+ false ->
+ wait_for_result(Id)
end.
-
-spec rep_result_listener(rep_id()) -> {ok, pid()}.
rep_result_listener(RepId) ->
ReplyTo = self(),
{ok, _Listener} = couch_replicator_notifier:start_link(
- fun({_, RepId2, _} = Ev) when RepId2 =:= RepId ->
+ fun
+ ({_, RepId2, _} = Ev) when RepId2 =:= RepId ->
ReplyTo ! Ev;
(_) ->
ok
- end).
-
+ end
+ ).
-spec wait_for_result(rep_id()) ->
{ok, {[_]}} | {error, any()}.
wait_for_result(RepId) ->
receive
- {finished, RepId, RepResult} ->
- {ok, RepResult};
- {error, RepId, Reason} ->
- {error, Reason}
+ {finished, RepId, RepResult} ->
+ {ok, RepResult};
+ {error, RepId, Reason} ->
+ {error, Reason}
end.
-
-spec cancel_replication(rep_id()) ->
{ok, {cancelled, binary()}} | {error, not_found}.
cancel_replication({BasedId, Extension} = RepId) ->
FullRepId = BasedId ++ Extension,
couch_log:notice("Canceling replication '~s' ...", [FullRepId]),
case couch_replicator_scheduler:rep_state(RepId) of
- #rep{} ->
- ok = couch_replicator_scheduler:remove_job(RepId),
- couch_log:notice("Replication '~s' cancelled", [FullRepId]),
- {ok, {cancelled, ?l2b(FullRepId)}};
- nil ->
- couch_log:notice("Replication '~s' not found", [FullRepId]),
- {error, not_found}
+ #rep{} ->
+ ok = couch_replicator_scheduler:remove_job(RepId),
+ couch_log:notice("Replication '~s' cancelled", [FullRepId]),
+ {ok, {cancelled, ?l2b(FullRepId)}};
+ nil ->
+ couch_log:notice("Replication '~s' not found", [FullRepId]),
+ {error, not_found}
end.
-
-spec replication_states() -> [atom()].
replication_states() ->
?REPLICATION_STATES.
-
-spec strip_url_creds(binary() | {[_]}) -> binary().
strip_url_creds(Endpoint) ->
- try
- couch_replicator_docs:parse_rep_db(Endpoint, [], []) of
- #httpdb{url = Url} ->
- iolist_to_binary(couch_util:url_strip_password(Url))
+ try couch_replicator_docs:parse_rep_db(Endpoint, [], []) of
+ #httpdb{url = Url} ->
+ iolist_to_binary(couch_util:url_strip_password(Url))
catch
throw:{error, local_endpoints_not_supported} ->
Endpoint;
@@ -148,39 +150,39 @@ strip_url_creds(Endpoint) ->
null
end.
-
-spec job(binary()) -> {ok, {[_]}} | {error, not_found}.
job(JobId0) when is_binary(JobId0) ->
JobId = couch_replicator_ids:convert(JobId0),
{Res, _Bad} = rpc:multicall(couch_replicator_scheduler, job, [JobId]),
case [JobInfo || {ok, JobInfo} <- Res] of
- [JobInfo| _] ->
+ [JobInfo | _] ->
{ok, JobInfo};
[] ->
{error, not_found}
end.
-
-spec restart_job(binary() | list() | rep_id()) ->
{ok, {[_]}} | {error, not_found}.
restart_job(JobId0) ->
JobId = couch_replicator_ids:convert(JobId0),
{Res, _} = rpc:multicall(couch_replicator_scheduler, restart_job, [JobId]),
case [JobInfo || {ok, JobInfo} <- Res] of
- [JobInfo| _] ->
+ [JobInfo | _] ->
{ok, JobInfo};
[] ->
{error, not_found}
end.
-
-spec active_doc(binary(), binary()) -> {ok, {[_]}} | {error, not_found}.
active_doc(DbName, DocId) ->
try
Shards = mem3:shards(DbName),
Live = [node() | nodes()],
- Nodes = lists:usort([N || #shard{node=N} <- Shards,
- lists:member(N, Live)]),
+ Nodes = lists:usort([
+ N
+ || #shard{node = N} <- Shards,
+ lists:member(N, Live)
+ ]),
Owner = mem3:owner(DbName, DocId, Nodes),
case active_doc_rpc(DbName, DocId, [Owner]) of
{ok, DocInfo} ->
@@ -194,7 +196,6 @@ active_doc(DbName, DocId) ->
active_doc_rpc(DbName, DocId, [node()])
end.
-
-spec active_doc_rpc(binary(), binary(), [node()]) ->
{ok, {[_]}} | {error, not_found}.
active_doc_rpc(_DbName, _DocId, []) ->
@@ -202,8 +203,12 @@ active_doc_rpc(_DbName, _DocId, []) ->
active_doc_rpc(DbName, DocId, [Node]) when Node =:= node() ->
couch_replicator_doc_processor:doc(DbName, DocId);
active_doc_rpc(DbName, DocId, Nodes) ->
- {Res, _Bad} = rpc:multicall(Nodes, couch_replicator_doc_processor, doc,
- [DbName, DocId]),
+ {Res, _Bad} = rpc:multicall(
+ Nodes,
+ couch_replicator_doc_processor,
+ doc,
+ [DbName, DocId]
+ ),
case [DocInfo || {ok, DocInfo} <- Res] of
[DocInfo | _] ->
{ok, DocInfo};
@@ -211,7 +216,6 @@ active_doc_rpc(DbName, DocId, Nodes) ->
{error, not_found}
end.
-
-spec doc(binary(), binary(), any()) -> {ok, {[_]}} | {error, not_found}.
doc(RepDb, DocId, UserCtx) ->
case active_doc(RepDb, DocId) of
@@ -221,17 +225,15 @@ doc(RepDb, DocId, UserCtx) ->
doc_from_db(RepDb, DocId, UserCtx)
end.
-
-spec doc_from_db(binary(), binary(), any()) -> {ok, {[_]}} | {error, not_found}.
doc_from_db(RepDb, DocId, UserCtx) ->
case fabric:open_doc(RepDb, DocId, [UserCtx, ejson_body]) of
{ok, Doc} ->
{ok, info_from_doc(RepDb, couch_doc:to_json_obj(Doc, []))};
- {not_found, _Reason} ->
+ {not_found, _Reason} ->
{error, not_found}
end.
-
-spec info_from_doc(binary(), {[_]}) -> {[_]}.
info_from_doc(RepDb, {Props}) ->
DocId = get_value(<<"_id">>, Props),
@@ -239,27 +241,28 @@ info_from_doc(RepDb, {Props}) ->
Target = get_value(<<"target">>, Props),
State0 = state_atom(get_value(<<"_replication_state">>, Props, null)),
StateTime = get_value(<<"_replication_state_time">>, Props, null),
- {State1, StateInfo, ErrorCount, StartTime} = case State0 of
- completed ->
- {InfoP} = get_value(<<"_replication_stats">>, Props, {[]}),
- case lists:keytake(<<"start_time">>, 1, InfoP) of
- {value, {_, Time}, InfoP1} ->
- {State0, {InfoP1}, 0, Time};
- false ->
- case lists:keytake(start_time, 1, InfoP) of
- {value, {_, Time}, InfoP1} ->
- {State0, {InfoP1}, 0, Time};
- false ->
- {State0, {InfoP}, 0, null}
+ {State1, StateInfo, ErrorCount, StartTime} =
+ case State0 of
+ completed ->
+ {InfoP} = get_value(<<"_replication_stats">>, Props, {[]}),
+ case lists:keytake(<<"start_time">>, 1, InfoP) of
+ {value, {_, Time}, InfoP1} ->
+ {State0, {InfoP1}, 0, Time};
+ false ->
+ case lists:keytake(start_time, 1, InfoP) of
+ {value, {_, Time}, InfoP1} ->
+ {State0, {InfoP1}, 0, Time};
+ false ->
+ {State0, {InfoP}, 0, null}
end
- end;
- failed ->
- Info = get_value(<<"_replication_state_reason">>, Props, nil),
- EJsonInfo = couch_replicator_utils:ejson_state_info(Info),
- {State0, EJsonInfo, 1, StateTime};
- _OtherState ->
- {null, null, 0, null}
- end,
+ end;
+ failed ->
+ Info = get_value(<<"_replication_state_reason">>, Props, nil),
+ EJsonInfo = couch_replicator_utils:ejson_state_info(Info),
+ {State0, EJsonInfo, 1, StateTime};
+ _OtherState ->
+ {null, null, 0, null}
+ end,
{[
{doc_id, DocId},
{database, RepDb},
@@ -271,29 +274,27 @@ info_from_doc(RepDb, {Props}) ->
{info, StateInfo},
{start_time, StartTime},
{last_updated, StateTime}
- ]}.
-
+ ]}.
state_atom(<<"triggered">>) ->
- triggered; % This handles a legacy case were document wasn't converted yet
+ % This handles a legacy case were document wasn't converted yet
+ triggered;
state_atom(State) when is_binary(State) ->
erlang:binary_to_existing_atom(State, utf8);
state_atom(State) when is_atom(State) ->
State.
-
-spec check_authorization(rep_id(), #user_ctx{}) -> ok | not_found.
check_authorization(RepId, #user_ctx{name = Name} = Ctx) ->
case couch_replicator_scheduler:rep_state(RepId) of
- #rep{user_ctx = #user_ctx{name = Name}} ->
- ok;
- #rep{} ->
- couch_httpd:verify_is_server_admin(Ctx);
- nil ->
- not_found
+ #rep{user_ctx = #user_ctx{name = Name}} ->
+ ok;
+ #rep{} ->
+ couch_httpd:verify_is_server_admin(Ctx);
+ nil ->
+ not_found
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -301,8 +302,8 @@ check_authorization(RepId, #user_ctx{name = Name} = Ctx) ->
authorization_test_() ->
{
foreach,
- fun () -> ok end,
- fun (_) -> meck:unload() end,
+ fun() -> ok end,
+ fun(_) -> meck:unload() end,
[
t_admin_is_always_authorized(),
t_username_must_match(),
@@ -310,7 +311,6 @@ authorization_test_() ->
]
}.
-
t_admin_is_always_authorized() ->
?_test(begin
expect_rep_user_ctx(<<"someuser">>, <<"_admin">>),
@@ -318,20 +318,23 @@ t_admin_is_always_authorized() ->
?assertEqual(ok, check_authorization(<<"RepId">>, UserCtx))
end).
-
t_username_must_match() ->
- ?_test(begin
+ ?_test(begin
expect_rep_user_ctx(<<"user">>, <<"somerole">>),
UserCtx1 = #user_ctx{name = <<"user">>, roles = [<<"somerole">>]},
?assertEqual(ok, check_authorization(<<"RepId">>, UserCtx1)),
UserCtx2 = #user_ctx{name = <<"other">>, roles = [<<"somerole">>]},
- ?assertThrow({unauthorized, _}, check_authorization(<<"RepId">>,
- UserCtx2))
+ ?assertThrow(
+ {unauthorized, _},
+ check_authorization(
+ <<"RepId">>,
+ UserCtx2
+ )
+ )
end).
-
t_replication_not_found() ->
- ?_test(begin
+ ?_test(begin
meck:expect(couch_replicator_scheduler, rep_state, 1, nil),
UserCtx1 = #user_ctx{name = <<"user">>, roles = [<<"somerole">>]},
?assertEqual(not_found, check_authorization(<<"RepId">>, UserCtx1)),
@@ -339,17 +342,18 @@ t_replication_not_found() ->
?assertEqual(not_found, check_authorization(<<"RepId">>, UserCtx2))
end).
-
expect_rep_user_ctx(Name, Role) ->
- meck:expect(couch_replicator_scheduler, rep_state,
+ meck:expect(
+ couch_replicator_scheduler,
+ rep_state,
fun(_Id) ->
UserCtx = #user_ctx{name = Name, roles = [Role]},
#rep{user_ctx = UserCtx}
- end).
-
+ end
+ ).
strip_url_creds_test_() ->
- {
+ {
setup,
fun() ->
meck:expect(config, get, fun(_, _, Default) -> Default end)
@@ -365,11 +369,9 @@ strip_url_creds_test_() ->
]
}.
-
t_strip_local_db_creds() ->
?_test(?assertEqual(<<"localdb">>, strip_url_creds(<<"localdb">>))).
-
t_strip_http_basic_creds() ->
?_test(begin
Url1 = <<"http://adm:pass@host/db">>,
@@ -379,22 +381,24 @@ t_strip_http_basic_creds() ->
Url3 = <<"http://adm:pass@host:80/db">>,
?assertEqual(<<"http://host:80/db/">>, strip_url_creds(Url3)),
Url4 = <<"http://adm:pass@host/db?a=b&c=d">>,
- ?assertEqual(<<"http://host/db?a=b&c=d">>,
- strip_url_creds(Url4))
+ ?assertEqual(
+ <<"http://host/db?a=b&c=d">>,
+ strip_url_creds(Url4)
+ )
end).
-
t_strip_http_props_creds() ->
?_test(begin
Props1 = {[{<<"url">>, <<"http://adm:pass@host/db">>}]},
?assertEqual(<<"http://host/db/">>, strip_url_creds(Props1)),
- Props2 = {[ {<<"url">>, <<"http://host/db">>},
- {<<"headers">>, {[{<<"Authorization">>, <<"Basic pa55">>}]}}
- ]},
+ Props2 =
+ {[
+ {<<"url">>, <<"http://host/db">>},
+ {<<"headers">>, {[{<<"Authorization">>, <<"Basic pa55">>}]}}
+ ]},
?assertEqual(<<"http://host/db/">>, strip_url_creds(Props2))
end).
-
t_strip_url_creds_errors() ->
?_test(begin
Bad1 = {[{<<"url">>, <<"http://adm:pass/bad">>}]},
@@ -412,5 +416,4 @@ t_strip_url_creds_errors() ->
?assertEqual(null, strip_url_creds(Bad5))
end).
-
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_api_wrap.erl b/src/couch_replicator/src/couch_replicator_api_wrap.erl
index 8dc7f2f01..193f8dad4 100644
--- a/src/couch_replicator/src/couch_replicator_api_wrap.erl
+++ b/src/couch_replicator/src/couch_replicator_api_wrap.erl
@@ -40,17 +40,17 @@
changes_since/5,
db_uri/1,
normalize_db/1
- ]).
+]).
-import(couch_replicator_httpc, [
send_req/3
- ]).
+]).
-import(couch_util, [
encode_doc_id/1,
get_value/2,
get_value/3
- ]).
+]).
-define(MAX_WAIT, 5 * 60 * 1000).
@@ -59,14 +59,11 @@
db_uri(#httpdb{url = Url}) ->
couch_util:url_strip_password(Url);
-
db_uri(DbName) when is_binary(DbName) ->
?b2l(DbName);
-
db_uri(Db) ->
db_uri(couch_db:name(Db)).
-
db_open(Db) ->
db_open(Db, false, []).
@@ -74,40 +71,48 @@ db_open(#httpdb{} = Db1, Create, CreateParams) ->
{ok, Db} = couch_replicator_httpc:setup(Db1),
try
case Create of
- false ->
- ok;
- true ->
- Db2 = maybe_append_create_query_params(Db, CreateParams),
- send_req(Db2, [{method, put}],
- fun(401, _, _) ->
- throw({unauthorized, ?l2b(db_uri(Db2))});
+ false ->
+ ok;
+ true ->
+ Db2 = maybe_append_create_query_params(Db, CreateParams),
+ send_req(
+ Db2,
+ [{method, put}],
+ fun
+ (401, _, _) ->
+ throw({unauthorized, ?l2b(db_uri(Db2))});
+ (403, _, _) ->
+ throw({forbidden, ?l2b(db_uri(Db2))});
+ (_, _, _) ->
+ ok
+ end
+ )
+ end,
+ send_req(
+ Db,
+ [{method, get}],
+ fun
+ (200, _, {Props}) ->
+ UpdateSeq = get_value(<<"update_seq">>, Props),
+ InstanceStart = get_value(<<"instance_start_time">>, Props),
+ case {UpdateSeq, InstanceStart} of
+ {undefined, _} ->
+ throw({db_not_found, ?l2b(db_uri(Db))});
+ {_, undefined} ->
+ throw({db_not_found, ?l2b(db_uri(Db))});
+ _ ->
+ {ok, Db}
+ end;
+ (200, _, _Body) ->
+ throw({db_not_found, ?l2b(db_uri(Db))});
+ (401, _, _) ->
+ throw({unauthorized, ?l2b(db_uri(Db))});
(403, _, _) ->
- throw({forbidden, ?l2b(db_uri(Db2))});
+ throw({forbidden, ?l2b(db_uri(Db))});
(_, _, _) ->
- ok
- end)
- end,
- send_req(Db, [{method, get}],
- fun(200, _, {Props}) ->
- UpdateSeq = get_value(<<"update_seq">>, Props),
- InstanceStart = get_value(<<"instance_start_time">>, Props),
- case {UpdateSeq, InstanceStart} of
- {undefined, _} ->
- throw({db_not_found, ?l2b(db_uri(Db))});
- {_, undefined} ->
- throw({db_not_found, ?l2b(db_uri(Db))});
- _ ->
- {ok, Db}
- end;
- (200, _, _Body) ->
- throw({db_not_found, ?l2b(db_uri(Db))});
- (401, _, _) ->
- throw({unauthorized, ?l2b(db_uri(Db))});
- (403, _, _) ->
- throw({forbidden, ?l2b(db_uri(Db))});
- (_, _, _) ->
- throw({db_not_found, ?l2b(db_uri(Db))})
- end)
+ throw({db_not_found, ?l2b(db_uri(Db))})
+ end
+ )
catch
throw:Error ->
db_close(Db),
@@ -125,12 +130,14 @@ db_close(#httpdb{httpc_pool = Pool} = HttpDb) ->
unlink(Pool),
ok = couch_replicator_httpc_pool:stop(Pool).
-
get_db_info(#httpdb{} = Db) ->
- send_req(Db, [],
+ send_req(
+ Db,
+ [],
fun(200, _, {Props}) ->
{ok, Props}
- end).
+ end
+ ).
get_pending_count(#httpdb{} = Db, Seq) when is_number(Seq) ->
% Source looks like Apache CouchDB and not Cloudant so we fall
@@ -151,52 +158,63 @@ get_pending_count(#httpdb{} = Db, Seq) ->
get_view_info(#httpdb{} = Db, DDocId, ViewName) ->
Path = io_lib:format("~s/_view/~s/_info", [DDocId, ViewName]),
- send_req(Db, [{path, Path}],
+ send_req(
+ Db,
+ [{path, Path}],
fun(200, _, {Props}) ->
{VInfo} = couch_util:get_value(<<"view_index">>, Props, {[]}),
{ok, VInfo}
- end).
-
+ end
+ ).
ensure_full_commit(#httpdb{} = Db) ->
send_req(
Db,
- [{method, post}, {path, "_ensure_full_commit"},
- {headers, [{"Content-Type", "application/json"}]}],
- fun(201, _, {Props}) ->
- {ok, get_value(<<"instance_start_time">>, Props)};
- (_, _, {Props}) ->
- {error, get_value(<<"error">>, Props)}
- end).
-
+ [
+ {method, post},
+ {path, "_ensure_full_commit"},
+ {headers, [{"Content-Type", "application/json"}]}
+ ],
+ fun
+ (201, _, {Props}) ->
+ {ok, get_value(<<"instance_start_time">>, Props)};
+ (_, _, {Props}) ->
+ {error, get_value(<<"error">>, Props)}
+ end
+ ).
get_missing_revs(#httpdb{} = Db, IdRevs) ->
JsonBody = {[{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs} <- IdRevs]},
send_req(
Db,
- [{method, post}, {path, "_revs_diff"}, {body, ?JSON_ENCODE(JsonBody)},
- {headers, [{"Content-Type", "application/json"}]}],
- fun(200, _, {Props}) ->
- ConvertToNativeFun = fun({Id, {Result}}) ->
- MissingRevs = couch_doc:parse_revs(
- get_value(<<"missing">>, Result)
- ),
- PossibleAncestors = couch_doc:parse_revs(
- get_value(<<"possible_ancestors">>, Result, [])
- ),
- {Id, MissingRevs, PossibleAncestors}
- end,
- {ok, lists:map(ConvertToNativeFun, Props)};
- (ErrCode, _, ErrMsg) when is_integer(ErrCode) ->
- {error, {revs_diff_failed, ErrCode, ErrMsg}}
- end).
-
+ [
+ {method, post},
+ {path, "_revs_diff"},
+ {body, ?JSON_ENCODE(JsonBody)},
+ {headers, [{"Content-Type", "application/json"}]}
+ ],
+ fun
+ (200, _, {Props}) ->
+ ConvertToNativeFun = fun({Id, {Result}}) ->
+ MissingRevs = couch_doc:parse_revs(
+ get_value(<<"missing">>, Result)
+ ),
+ PossibleAncestors = couch_doc:parse_revs(
+ get_value(<<"possible_ancestors">>, Result, [])
+ ),
+ {Id, MissingRevs, PossibleAncestors}
+ end,
+ {ok, lists:map(ConvertToNativeFun, Props)};
+ (ErrCode, _, ErrMsg) when is_integer(ErrCode) ->
+ {error, {revs_diff_failed, ErrCode, ErrMsg}}
+ end
+ ).
open_doc_revs(#httpdb{retries = 0} = HttpDb, Id, Revs, Options, _Fun, _Acc) ->
Path = encode_doc_id(Id),
QS = options_to_query_args(HttpDb, Path, [revs, {open_revs, Revs} | Options]),
Url = couch_util:url_strip_password(
- couch_replicator_httpc:full_url(HttpDb, [{path,Path}, {qs,QS}])
+ couch_replicator_httpc:full_url(HttpDb, [{path, Path}, {qs, QS}])
),
couch_log:error("Replication crashing because GET ~s failed", [Url]),
exit(kaboom);
@@ -206,15 +224,15 @@ open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
{Pid, Ref} = spawn_monitor(fun() ->
Self = self(),
Callback = fun
- (200, Headers, StreamDataFun) ->
- remote_open_doc_revs_streamer_start(Self),
- {<<"--">>, _, _} = couch_httpd:parse_multipart_request(
- header_value("Content-Type", Headers),
- StreamDataFun,
- fun mp_parse_mixed/1
- );
- (414, _, _) ->
- exit(request_uri_too_long)
+ (200, Headers, StreamDataFun) ->
+ remote_open_doc_revs_streamer_start(Self),
+ {<<"--">>, _, _} = couch_httpd:parse_multipart_request(
+ header_value("Content-Type", Headers),
+ StreamDataFun,
+ fun mp_parse_mixed/1
+ );
+ (414, _, _) ->
+ exit(request_uri_too_long)
end,
Streamer = spawn_link(fun() ->
Params = [
@@ -241,12 +259,12 @@ open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
exit(Streamer, {streamer_parent_died, Self});
{'DOWN', Ref, process, Self, _} ->
ok
- end
+ end
end),
receive
- {started_open_doc_revs, Ref} ->
- Ret = receive_docs_loop(Streamer, Fun, Id, Revs, Ref, Acc),
- exit({exit_ok, Ret})
+ {started_open_doc_revs, Ref} ->
+ Ret = receive_docs_loop(Streamer, Fun, Id, Revs, Ref, Acc),
+ exit({exit_ok, Ret})
end
end),
receive
@@ -254,7 +272,7 @@ open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
Ret;
{'DOWN', Ref, process, Pid, {{nocatch, missing_doc}, _}} ->
throw(missing_doc);
- {'DOWN', Ref, process, Pid, {{nocatch, {missing_stub,_} = Stub}, _}} ->
+ {'DOWN', Ref, process, Pid, {{nocatch, {missing_stub, _} = Stub}, _}} ->
throw(Stub);
{'DOWN', Ref, process, Pid, {http_request_failed, _, _, max_backoff}} ->
exit(max_backoff);
@@ -264,19 +282,27 @@ open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
true ->
throw(request_uri_too_long);
false ->
- couch_log:info("Reducing url length to ~B because of"
- " 414 response", [NewMaxLen]),
- Options1 = lists:keystore(max_url_len, 1, Options,
- {max_url_len, NewMaxLen}),
+ couch_log:info(
+ "Reducing url length to ~B because of"
+ " 414 response",
+ [NewMaxLen]
+ ),
+ Options1 = lists:keystore(
+ max_url_len,
+ 1,
+ Options,
+ {max_url_len, NewMaxLen}
+ ),
open_doc_revs(HttpDb, Id, Revs, Options1, Fun, Acc)
end;
{'DOWN', Ref, process, Pid, Else} ->
Url = couch_util:url_strip_password(
- couch_replicator_httpc:full_url(HttpDb, [{path,Path}, {qs,QS}])
+ couch_replicator_httpc:full_url(HttpDb, [{path, Path}, {qs, QS}])
),
#httpdb{retries = Retries, wait = Wait0} = HttpDb,
Wait = 2 * erlang:min(Wait0 * 2, ?MAX_WAIT),
- couch_log:notice("Retrying GET to ~s in ~p seconds due to error ~w",
+ couch_log:notice(
+ "Retrying GET to ~s in ~p seconds due to error ~w",
[Url, Wait / 1000, error_reason(Else)]
),
ok = timer:sleep(Wait),
@@ -287,7 +313,6 @@ open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
open_doc_revs(RetryDb, Id, Revs, Options, Fun, Acc)
end.
-
error_reason({http_request_failed, "GET", _Url, {error, timeout}}) ->
timeout;
error_reason({http_request_failed, "GET", _Url, {error, {_, req_timedout}}}) ->
@@ -301,35 +326,44 @@ open_doc(#httpdb{} = Db, Id, Options) ->
send_req(
Db,
[{path, encode_doc_id(Id)}, {qs, options_to_query_args(Options, [])}],
- fun(200, _, Body) ->
- {ok, couch_doc:from_json_obj(Body)};
- (_, _, {Props}) ->
- {error, get_value(<<"error">>, Props)}
- end).
-
+ fun
+ (200, _, Body) ->
+ {ok, couch_doc:from_json_obj(Body)};
+ (_, _, {Props}) ->
+ {error, get_value(<<"error">>, Props)}
+ end
+ ).
update_doc(Db, Doc, Options) ->
update_doc(Db, Doc, Options, interactive_edit).
update_doc(#httpdb{} = HttpDb, #doc{id = DocId} = Doc, Options, Type) ->
- QArgs = case Type of
- replicated_changes ->
- [{"new_edits", "false"}];
- _ ->
- []
- end ++ options_to_query_args(Options, []),
+ QArgs =
+ case Type of
+ replicated_changes ->
+ [{"new_edits", "false"}];
+ _ ->
+ []
+ end ++ options_to_query_args(Options, []),
Boundary = couch_uuids:random(),
JsonBytes = ?JSON_ENCODE(
couch_doc:to_json_obj(
- Doc, [revs, attachments, follows, att_encoding_info | Options])),
- {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(Boundary,
- JsonBytes, Doc#doc.atts, true),
- Headers = case lists:member(delay_commit, Options) of
- true ->
- [{"X-Couch-Full-Commit", "false"}];
- false ->
- []
- end ++ [{"Content-Type", ?b2l(ContentType)}, {"Content-Length", Len}],
+ Doc, [revs, attachments, follows, att_encoding_info | Options]
+ )
+ ),
+ {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(
+ Boundary,
+ JsonBytes,
+ Doc#doc.atts,
+ true
+ ),
+ Headers =
+ case lists:member(delay_commit, Options) of
+ true ->
+ [{"X-Couch-Full-Commit", "false"}];
+ false ->
+ []
+ end ++ [{"Content-Type", ?b2l(ContentType)}, {"Content-Length", Len}],
Body = {fun stream_doc/1, {JsonBytes, Doc#doc.atts, Boundary, Len}},
send_req(
% A crash here bubbles all the way back up to run_user_fun inside
@@ -337,27 +371,33 @@ update_doc(#httpdb{} = HttpDb, #doc{id = DocId} = Doc, Options, Type) ->
% appropriate course of action, since we've already started streaming
% the response body from the GET request.
HttpDb#httpdb{retries = 0},
- [{method, put}, {path, encode_doc_id(DocId)},
- {qs, QArgs}, {headers, Headers}, {body, Body}],
- fun(Code, _, {Props}) when Code =:= 200 orelse Code =:= 201 orelse Code =:= 202 ->
+ [
+ {method, put},
+ {path, encode_doc_id(DocId)},
+ {qs, QArgs},
+ {headers, Headers},
+ {body, Body}
+ ],
+ fun
+ (Code, _, {Props}) when Code =:= 200 orelse Code =:= 201 orelse Code =:= 202 ->
{ok, couch_doc:parse_rev(get_value(<<"rev">>, Props))};
(409, _, _) ->
throw(conflict);
(Code, _, {Props}) ->
case {Code, get_value(<<"error">>, Props)} of
- {401, <<"unauthorized">>} ->
- throw({unauthorized, get_value(<<"reason">>, Props)});
- {403, <<"forbidden">>} ->
- throw({forbidden, get_value(<<"reason">>, Props)});
- {412, <<"missing_stub">>} ->
- throw({missing_stub, get_value(<<"reason">>, Props)});
- {413, _} ->
- {error, request_body_too_large};
- {_, Error} ->
- {error, Error}
+ {401, <<"unauthorized">>} ->
+ throw({unauthorized, get_value(<<"reason">>, Props)});
+ {403, <<"forbidden">>} ->
+ throw({forbidden, get_value(<<"reason">>, Props)});
+ {412, <<"missing_stub">>} ->
+ throw({missing_stub, get_value(<<"reason">>, Props)});
+ {413, _} ->
+ {error, request_body_too_large};
+ {_, Error} ->
+ {error, Error}
end
- end).
-
+ end
+ ).
update_docs(Db, DocList, Options) ->
update_docs(Db, DocList, Options, interactive_edit).
@@ -366,26 +406,30 @@ update_docs(_Db, [], _Options, _UpdateType) ->
{ok, []};
update_docs(#httpdb{} = HttpDb, DocList, Options, UpdateType) ->
FullCommit = atom_to_list(not lists:member(delay_commit, Options)),
- Prefix = case UpdateType of
- replicated_changes ->
- <<"{\"new_edits\":false,\"docs\":[">>;
- interactive_edit ->
- <<"{\"docs\":[">>
- end,
+ Prefix =
+ case UpdateType of
+ replicated_changes ->
+ <<"{\"new_edits\":false,\"docs\":[">>;
+ interactive_edit ->
+ <<"{\"docs\":[">>
+ end,
Suffix = <<"]}">>,
% Note: nginx and other servers don't like PUT/POST requests without
% a Content-Length header, so we can't do a chunked transfer encoding
% and JSON encode each doc only before sending it through the socket.
{Docs, Len} = lists:mapfoldl(
- fun(#doc{} = Doc, Acc) ->
- Json = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
- {Json, Acc + iolist_size(Json)};
- (Doc, Acc) ->
- {Doc, Acc + iolist_size(Doc)}
+ fun
+ (#doc{} = Doc, Acc) ->
+ Json = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
+ {Json, Acc + iolist_size(Json)};
+ (Doc, Acc) ->
+ {Doc, Acc + iolist_size(Doc)}
end,
byte_size(Prefix) + byte_size(Suffix) + length(DocList) - 1,
- DocList),
- BodyFun = fun(eof) ->
+ DocList
+ ),
+ BodyFun = fun
+ (eof) ->
eof;
([]) ->
{ok, Suffix, eof};
@@ -403,153 +447,195 @@ update_docs(#httpdb{} = HttpDb, DocList, Options, UpdateType) ->
],
send_req(
HttpDb,
- [{method, post}, {path, "_bulk_docs"},
- {body, {BodyFun, [prefix | Docs]}}, {headers, Headers}],
- fun(201, _, Results) when is_list(Results) ->
+ [
+ {method, post},
+ {path, "_bulk_docs"},
+ {body, {BodyFun, [prefix | Docs]}},
+ {headers, Headers}
+ ],
+ fun
+ (201, _, Results) when is_list(Results) ->
{ok, bulk_results_to_errors(DocList, Results, remote)};
- (413, _, _) ->
+ (413, _, _) ->
{error, request_body_too_large};
- (417, _, Results) when is_list(Results) ->
+ (417, _, Results) when is_list(Results) ->
{ok, bulk_results_to_errors(DocList, Results, remote)};
- (ErrCode, _, ErrMsg) when is_integer(ErrCode) ->
+ (ErrCode, _, ErrMsg) when is_integer(ErrCode) ->
{error, {bulk_docs_failed, ErrCode, ErrMsg}}
- end).
-
-
-changes_since(#httpdb{headers = Headers1, timeout = InactiveTimeout} = HttpDb,
- Style, StartSeq, UserFun, Options) ->
+ end
+ ).
+
+changes_since(
+ #httpdb{headers = Headers1, timeout = InactiveTimeout} = HttpDb,
+ Style,
+ StartSeq,
+ UserFun,
+ Options
+) ->
Timeout = erlang:max(1000, InactiveTimeout div 3),
- BaseQArgs = case get_value(continuous, Options, false) of
- false ->
- [{"feed", "normal"}];
- true ->
- [{"feed", "continuous"}]
- end ++ [
- {"style", atom_to_list(Style)}, {"since", ?JSON_ENCODE(StartSeq)},
- {"timeout", integer_to_list(Timeout)}
- ],
+ BaseQArgs =
+ case get_value(continuous, Options, false) of
+ false ->
+ [{"feed", "normal"}];
+ true ->
+ [{"feed", "continuous"}]
+ end ++
+ [
+ {"style", atom_to_list(Style)},
+ {"since", ?JSON_ENCODE(StartSeq)},
+ {"timeout", integer_to_list(Timeout)}
+ ],
DocIds = get_value(doc_ids, Options),
Selector = get_value(selector, Options),
- {QArgs, Method, Body, Headers} = case {DocIds, Selector} of
- {undefined, undefined} ->
- QArgs1 = maybe_add_changes_filter_q_args(BaseQArgs, Options),
- {QArgs1, get, [], Headers1};
- {undefined, _} when is_tuple(Selector) ->
- Headers2 = [{"Content-Type", "application/json"} | Headers1],
- JsonSelector = ?JSON_ENCODE({[{<<"selector">>, Selector}]}),
- {[{"filter", "_selector"} | BaseQArgs], post, JsonSelector, Headers2};
- {_, undefined} when is_list(DocIds) ->
- Headers2 = [{"Content-Type", "application/json"} | Headers1],
- JsonDocIds = ?JSON_ENCODE({[{<<"doc_ids">>, DocIds}]}),
- {[{"filter", "_doc_ids"} | BaseQArgs], post, JsonDocIds, Headers2}
- end,
+ {QArgs, Method, Body, Headers} =
+ case {DocIds, Selector} of
+ {undefined, undefined} ->
+ QArgs1 = maybe_add_changes_filter_q_args(BaseQArgs, Options),
+ {QArgs1, get, [], Headers1};
+ {undefined, _} when is_tuple(Selector) ->
+ Headers2 = [{"Content-Type", "application/json"} | Headers1],
+ JsonSelector = ?JSON_ENCODE({[{<<"selector">>, Selector}]}),
+ {[{"filter", "_selector"} | BaseQArgs], post, JsonSelector, Headers2};
+ {_, undefined} when is_list(DocIds) ->
+ Headers2 = [{"Content-Type", "application/json"} | Headers1],
+ JsonDocIds = ?JSON_ENCODE({[{<<"doc_ids">>, DocIds}]}),
+ {[{"filter", "_doc_ids"} | BaseQArgs], post, JsonDocIds, Headers2}
+ end,
try
send_req(
HttpDb,
- [{method, Method}, {path, "_changes"}, {qs, QArgs},
- {headers, Headers}, {body, Body},
- {ibrowse_options, [{stream_to, {self(), once}}]}],
- fun(200, _, DataStreamFun) ->
+ [
+ {method, Method},
+ {path, "_changes"},
+ {qs, QArgs},
+ {headers, Headers},
+ {body, Body},
+ {ibrowse_options, [{stream_to, {self(), once}}]}
+ ],
+ fun
+ (200, _, DataStreamFun) ->
parse_changes_feed(Options, UserFun, DataStreamFun);
(405, _, _) when is_list(DocIds) ->
% CouchDB versions < 1.1.0 don't have the builtin
% _changes feed filter "_doc_ids" neither support POST
- send_req(HttpDb, [{method, get}, {path, "_changes"},
- {qs, BaseQArgs}, {headers, Headers1},
- {ibrowse_options, [{stream_to, {self(), once}}]}],
+ send_req(
+ HttpDb,
+ [
+ {method, get},
+ {path, "_changes"},
+ {qs, BaseQArgs},
+ {headers, Headers1},
+ {ibrowse_options, [{stream_to, {self(), once}}]}
+ ],
fun(200, _, DataStreamFun2) ->
- UserFun2 = fun(#doc_info{id = Id} = DocInfo) ->
- case lists:member(Id, DocIds) of
- true ->
- UserFun(DocInfo);
- false ->
- ok
- end;
- (LastSeq) ->
- UserFun(LastSeq)
+ UserFun2 = fun
+ (#doc_info{id = Id} = DocInfo) ->
+ case lists:member(Id, DocIds) of
+ true ->
+ UserFun(DocInfo);
+ false ->
+ ok
+ end;
+ (LastSeq) ->
+ UserFun(LastSeq)
end,
- parse_changes_feed(Options, UserFun2,
- DataStreamFun2)
- end);
- (ErrCode, _, ErrMsg) when is_integer(ErrCode) ->
+ parse_changes_feed(
+ Options,
+ UserFun2,
+ DataStreamFun2
+ )
+ end
+ );
+ (ErrCode, _, ErrMsg) when is_integer(ErrCode) ->
throw({retry_limit, {changes_req_failed, ErrCode, ErrMsg}})
- end)
+ end
+ )
catch
exit:{http_request_failed, _, _, max_backoff} ->
exit(max_backoff);
- exit:{http_request_failed, _, _, {error, {connection_closed,
- mid_stream}}} ->
+ exit:{http_request_failed, _, _, {error, {connection_closed, mid_stream}}} ->
throw(retry_no_limit);
exit:{http_request_failed, _, _, _} = Error ->
throw({retry_limit, Error})
end.
-
% internal functions
maybe_add_changes_filter_q_args(BaseQS, Options) ->
case get_value(filter, Options) of
- undefined ->
- BaseQS;
- FilterName ->
- %% get list of view attributes
- ViewFields0 = [atom_to_list(F) || F <- record_info(fields, mrargs)],
- ViewFields = ["key" | ViewFields0],
-
- {Params} = get_value(query_params, Options, {[]}),
- [{"filter", ?b2l(FilterName)} | lists:foldl(
- fun({K, V}, QSAcc) ->
- Ks = couch_util:to_list(K),
- case lists:keymember(Ks, 1, QSAcc) of
- true ->
- QSAcc;
- false when FilterName =:= <<"_view">> ->
- V1 = case lists:member(Ks, ViewFields) of
- true -> ?JSON_ENCODE(V);
- false -> couch_util:to_list(V)
+ undefined ->
+ BaseQS;
+ FilterName ->
+ %% get list of view attributes
+ ViewFields0 = [atom_to_list(F) || F <- record_info(fields, mrargs)],
+ ViewFields = ["key" | ViewFields0],
+
+ {Params} = get_value(query_params, Options, {[]}),
+ [
+ {"filter", ?b2l(FilterName)}
+ | lists:foldl(
+ fun({K, V}, QSAcc) ->
+ Ks = couch_util:to_list(K),
+ case lists:keymember(Ks, 1, QSAcc) of
+ true ->
+ QSAcc;
+ false when FilterName =:= <<"_view">> ->
+ V1 =
+ case lists:member(Ks, ViewFields) of
+ true -> ?JSON_ENCODE(V);
+ false -> couch_util:to_list(V)
+ end,
+ [{Ks, V1} | QSAcc];
+ false ->
+ [{Ks, couch_util:to_list(V)} | QSAcc]
+ end
end,
- [{Ks, V1} | QSAcc];
- false ->
- [{Ks, couch_util:to_list(V)} | QSAcc]
- end
- end,
- BaseQS, Params)]
+ BaseQS,
+ Params
+ )
+ ]
end.
parse_changes_feed(Options, UserFun, DataStreamFun) ->
case get_value(continuous, Options, false) of
- true ->
- continuous_changes(DataStreamFun, UserFun);
- false ->
- EventFun = fun(Ev) ->
- changes_ev1(Ev, fun(DocInfo, _) -> UserFun(DocInfo) end, [])
- end,
- json_stream_parse:events(DataStreamFun, EventFun)
+ true ->
+ continuous_changes(DataStreamFun, UserFun);
+ false ->
+ EventFun = fun(Ev) ->
+ changes_ev1(Ev, fun(DocInfo, _) -> UserFun(DocInfo) end, [])
+ end,
+ json_stream_parse:events(DataStreamFun, EventFun)
end.
-
options_to_query_args(HttpDb, Path, Options0) ->
case lists:keytake(max_url_len, 1, Options0) of
- false -> MaxLen = ?MAX_URL_LEN, Options = Options0;
- {value, {max_url_len, MaxLen}, Options} -> ok
+ false ->
+ MaxLen = ?MAX_URL_LEN,
+ Options = Options0;
+ {value, {max_url_len, MaxLen}, Options} ->
+ ok
end,
case lists:keytake(atts_since, 1, Options) of
- false ->
- options_to_query_args(Options, []);
- {value, {atts_since, []}, Options2} ->
- options_to_query_args(Options2, []);
- {value, {atts_since, PAs}, Options2} ->
- QueryArgs1 = options_to_query_args(Options2, []),
- FullUrl = couch_replicator_httpc:full_url(
- HttpDb, [{path, Path}, {qs, QueryArgs1}]),
- RevList = atts_since_arg(
- length("GET " ++ FullUrl ++ " HTTP/1.1\r\n") +
- length("&atts_since=") + 6, % +6 = % encoded [ and ]
- PAs, MaxLen, []),
- [{"atts_since", ?b2l(iolist_to_binary(?JSON_ENCODE(RevList)))} | QueryArgs1]
+ false ->
+ options_to_query_args(Options, []);
+ {value, {atts_since, []}, Options2} ->
+ options_to_query_args(Options2, []);
+ {value, {atts_since, PAs}, Options2} ->
+ QueryArgs1 = options_to_query_args(Options2, []),
+ FullUrl = couch_replicator_httpc:full_url(
+ HttpDb, [{path, Path}, {qs, QueryArgs1}]
+ ),
+ RevList = atts_since_arg(
+ length("GET " ++ FullUrl ++ " HTTP/1.1\r\n") +
+ % +6 = % encoded [ and ]
+ length("&atts_since=") + 6,
+ PAs,
+ MaxLen,
+ []
+ ),
+ [{"atts_since", ?b2l(iolist_to_binary(?JSON_ENCODE(RevList)))} | QueryArgs1]
end.
-
options_to_query_args([], Acc) ->
lists:reverse(Acc);
options_to_query_args([ejson_body | Rest], Acc) ->
@@ -570,22 +656,22 @@ atts_since_arg(_UrlLen, [], _MaxLen, Acc) ->
lists:reverse(Acc);
atts_since_arg(UrlLen, [PA | Rest], MaxLen, Acc) ->
RevStr = couch_doc:rev_to_str(PA),
- NewUrlLen = case Rest of
- [] ->
- % plus 2 double quotes (% encoded)
- UrlLen + size(RevStr) + 6;
- _ ->
- % plus 2 double quotes and a comma (% encoded)
- UrlLen + size(RevStr) + 9
- end,
+ NewUrlLen =
+ case Rest of
+ [] ->
+ % plus 2 double quotes (% encoded)
+ UrlLen + size(RevStr) + 6;
+ _ ->
+ % plus 2 double quotes and a comma (% encoded)
+ UrlLen + size(RevStr) + 9
+ end,
case NewUrlLen >= MaxLen of
- true ->
- lists:reverse(Acc);
- false ->
- atts_since_arg(NewUrlLen, Rest, MaxLen, [RevStr | Acc])
+ true ->
+ lists:reverse(Acc);
+ false ->
+ atts_since_arg(NewUrlLen, Rest, MaxLen, [RevStr | Acc])
end.
-
% TODO: A less verbose, more elegant and automatic restart strategy for
% the exported open_doc_revs/6 function. The restart should be
% transparent to the caller like any other Couch API function exported
@@ -596,51 +682,55 @@ receive_docs_loop(Streamer, Fun, Id, Revs, Ref, Acc) ->
erlang:put(open_doc_revs, {Id, Revs, Ref, Streamer}),
receive_docs(Streamer, Fun, Ref, Acc)
catch
- error:{restart_open_doc_revs, NewRef} ->
- receive_docs_loop(Streamer, Fun, Id, Revs, NewRef, Acc)
+ error:{restart_open_doc_revs, NewRef} ->
+ receive_docs_loop(Streamer, Fun, Id, Revs, NewRef, Acc)
end.
receive_docs(Streamer, UserFun, Ref, UserAcc) ->
Streamer ! {get_headers, Ref, self()},
receive
- {started_open_doc_revs, NewRef} ->
- restart_remote_open_doc_revs(Ref, NewRef);
- {headers, Ref, Headers} ->
- case header_value("content-type", Headers) of
- {"multipart/related", _} = ContentType ->
- % Skip document body and attachment size limits validation here
- % since these should be validated by the replication target
- case couch_doc:doc_from_multi_part_stream(
- ContentType,
- fun() -> receive_doc_data(Streamer, Ref) end,
- Ref, _ValidateDocLimits = false) of
- {ok, Doc, WaitFun, Parser} ->
- case run_user_fun(UserFun, {ok, Doc}, UserAcc, Ref) of
- {ok, UserAcc2} ->
- ok;
- {skip, UserAcc2} ->
- couch_httpd_multipart:abort_multipart_stream(Parser)
- end,
- WaitFun(),
- receive_docs(Streamer, UserFun, Ref, UserAcc2)
+ {started_open_doc_revs, NewRef} ->
+ restart_remote_open_doc_revs(Ref, NewRef);
+ {headers, Ref, Headers} ->
+ case header_value("content-type", Headers) of
+ {"multipart/related", _} = ContentType ->
+ % Skip document body and attachment size limits validation here
+ % since these should be validated by the replication target
+ case
+ couch_doc:doc_from_multi_part_stream(
+ ContentType,
+ fun() -> receive_doc_data(Streamer, Ref) end,
+ Ref,
+ _ValidateDocLimits = false
+ )
+ of
+ {ok, Doc, WaitFun, Parser} ->
+ case run_user_fun(UserFun, {ok, Doc}, UserAcc, Ref) of
+ {ok, UserAcc2} ->
+ ok;
+ {skip, UserAcc2} ->
+ couch_httpd_multipart:abort_multipart_stream(Parser)
+ end,
+ WaitFun(),
+ receive_docs(Streamer, UserFun, Ref, UserAcc2)
+ end;
+ {"application/json", []} ->
+ Doc = couch_doc:from_json_obj(
+ ?JSON_DECODE(receive_all(Streamer, Ref, []))
+ ),
+ {_, UserAcc2} = run_user_fun(UserFun, {ok, Doc}, UserAcc, Ref),
+ receive_docs(Streamer, UserFun, Ref, UserAcc2);
+ {"application/json", [{"error", "true"}]} ->
+ {ErrorProps} = ?JSON_DECODE(receive_all(Streamer, Ref, [])),
+ Rev = get_value(<<"missing">>, ErrorProps),
+ Result = {{not_found, missing}, couch_doc:parse_rev(Rev)},
+ {_, UserAcc2} = run_user_fun(UserFun, Result, UserAcc, Ref),
+ receive_docs(Streamer, UserFun, Ref, UserAcc2)
end;
- {"application/json", []} ->
- Doc = couch_doc:from_json_obj(
- ?JSON_DECODE(receive_all(Streamer, Ref, []))),
- {_, UserAcc2} = run_user_fun(UserFun, {ok, Doc}, UserAcc, Ref),
- receive_docs(Streamer, UserFun, Ref, UserAcc2);
- {"application/json", [{"error","true"}]} ->
- {ErrorProps} = ?JSON_DECODE(receive_all(Streamer, Ref, [])),
- Rev = get_value(<<"missing">>, ErrorProps),
- Result = {{not_found, missing}, couch_doc:parse_rev(Rev)},
- {_, UserAcc2} = run_user_fun(UserFun, Result, UserAcc, Ref),
- receive_docs(Streamer, UserFun, Ref, UserAcc2)
- end;
- {done, Ref} ->
- {ok, UserAcc}
+ {done, Ref} ->
+ {ok, UserAcc}
end.
-
run_user_fun(UserFun, Arg, UserAcc, OldRef) ->
{Pid, Ref} = spawn_monitor(fun() ->
try UserFun(Arg, UserAcc) of
@@ -670,78 +760,76 @@ run_user_fun(UserFun, Arg, UserAcc, OldRef) ->
erlang:exit(Reason)
end.
-
restart_remote_open_doc_revs(Ref, NewRef) ->
receive
- {body_bytes, Ref, _} ->
- restart_remote_open_doc_revs(Ref, NewRef);
- {body_done, Ref} ->
- restart_remote_open_doc_revs(Ref, NewRef);
- {done, Ref} ->
- restart_remote_open_doc_revs(Ref, NewRef);
- {headers, Ref, _} ->
- restart_remote_open_doc_revs(Ref, NewRef)
+ {body_bytes, Ref, _} ->
+ restart_remote_open_doc_revs(Ref, NewRef);
+ {body_done, Ref} ->
+ restart_remote_open_doc_revs(Ref, NewRef);
+ {done, Ref} ->
+ restart_remote_open_doc_revs(Ref, NewRef);
+ {headers, Ref, _} ->
+ restart_remote_open_doc_revs(Ref, NewRef)
after 0 ->
erlang:error({restart_open_doc_revs, NewRef})
end.
-
remote_open_doc_revs_streamer_start(Parent) ->
receive
- {get_headers, _Ref, Parent} ->
- remote_open_doc_revs_streamer_start(Parent);
- {next_bytes, _Ref, Parent} ->
- remote_open_doc_revs_streamer_start(Parent)
+ {get_headers, _Ref, Parent} ->
+ remote_open_doc_revs_streamer_start(Parent);
+ {next_bytes, _Ref, Parent} ->
+ remote_open_doc_revs_streamer_start(Parent)
after 0 ->
Parent ! {started_open_doc_revs, make_ref()}
end.
-
receive_all(Streamer, Ref, Acc) ->
Streamer ! {next_bytes, Ref, self()},
receive
- {started_open_doc_revs, NewRef} ->
- restart_remote_open_doc_revs(Ref, NewRef);
- {body_bytes, Ref, Bytes} ->
- receive_all(Streamer, Ref, [Bytes | Acc]);
- {body_done, Ref} ->
- lists:reverse(Acc)
+ {started_open_doc_revs, NewRef} ->
+ restart_remote_open_doc_revs(Ref, NewRef);
+ {body_bytes, Ref, Bytes} ->
+ receive_all(Streamer, Ref, [Bytes | Acc]);
+ {body_done, Ref} ->
+ lists:reverse(Acc)
end.
-
mp_parse_mixed(eof) ->
- receive {get_headers, Ref, From} ->
- From ! {done, Ref}
+ receive
+ {get_headers, Ref, From} ->
+ From ! {done, Ref}
end;
mp_parse_mixed({headers, H}) ->
- receive {get_headers, Ref, From} ->
- From ! {headers, Ref, H}
+ receive
+ {get_headers, Ref, From} ->
+ From ! {headers, Ref, H}
end,
fun mp_parse_mixed/1;
mp_parse_mixed({body, Bytes}) ->
- receive {next_bytes, Ref, From} ->
- From ! {body_bytes, Ref, Bytes}
+ receive
+ {next_bytes, Ref, From} ->
+ From ! {body_bytes, Ref, Bytes}
end,
fun mp_parse_mixed/1;
mp_parse_mixed(body_end) ->
- receive {next_bytes, Ref, From} ->
- From ! {body_done, Ref};
- {get_headers, Ref, From} ->
- self() ! {get_headers, Ref, From}
+ receive
+ {next_bytes, Ref, From} ->
+ From ! {body_done, Ref};
+ {get_headers, Ref, From} ->
+ self() ! {get_headers, Ref, From}
end,
fun mp_parse_mixed/1.
-
receive_doc_data(Streamer, Ref) ->
Streamer ! {next_bytes, Ref, self()},
receive
- {body_bytes, Ref, Bytes} ->
- {Bytes, fun() -> receive_doc_data(Streamer, Ref) end};
- {body_done, Ref} ->
- {<<>>, fun() -> receive_doc_data(Streamer, Ref) end}
+ {body_bytes, Ref, Bytes} ->
+ {Bytes, fun() -> receive_doc_data(Streamer, Ref) end};
+ {body_done, Ref} ->
+ {<<>>, fun() -> receive_doc_data(Streamer, Ref) end}
end.
-
changes_ev1(object_start, UserFun, UserAcc) ->
fun(Ev) -> changes_ev2(Ev, UserFun, UserAcc) end.
@@ -755,11 +843,13 @@ changes_ev3(array_start, UserFun, UserAcc) ->
changes_ev_loop(object_start, UserFun, UserAcc) ->
fun(Ev) ->
- json_stream_parse:collect_object(Ev,
+ json_stream_parse:collect_object(
+ Ev,
fun(Obj) ->
UserAcc2 = UserFun(json_to_doc_info(Obj), UserAcc),
fun(Ev2) -> changes_ev_loop(Ev2, UserFun, UserAcc2) end
- end)
+ end
+ )
end;
changes_ev_loop(array_end, _UserFun, _UserAcc) ->
fun(_Ev) -> changes_ev_done() end.
@@ -770,80 +860,106 @@ changes_ev_done() ->
continuous_changes(DataFun, UserFun) ->
{DataFun2, _, Rest} = json_stream_parse:events(
DataFun,
- fun(Ev) -> parse_changes_line(Ev, UserFun) end),
+ fun(Ev) -> parse_changes_line(Ev, UserFun) end
+ ),
continuous_changes(fun() -> {Rest, DataFun2} end, UserFun).
parse_changes_line(object_start, UserFun) ->
fun(Ev) ->
- json_stream_parse:collect_object(Ev,
- fun(Obj) -> UserFun(json_to_doc_info(Obj)) end)
+ json_stream_parse:collect_object(
+ Ev,
+ fun(Obj) -> UserFun(json_to_doc_info(Obj)) end
+ )
end.
json_to_doc_info({Props}) ->
case get_value(<<"changes">>, Props) of
- undefined ->
- {last_seq, get_value(<<"last_seq">>, Props)};
- Changes ->
- RevsInfo0 = lists:map(
- fun({Change}) ->
- Rev = couch_doc:parse_rev(get_value(<<"rev">>, Change)),
- Del = couch_replicator_utils:is_deleted(Change),
- #rev_info{rev=Rev, deleted=Del}
- end, Changes),
-
- RevsInfo = case get_value(<<"removed">>, Props) of
- true ->
- [_ | RevsInfo1] = RevsInfo0,
- RevsInfo1;
- _ ->
- RevsInfo0
- end,
+ undefined ->
+ {last_seq, get_value(<<"last_seq">>, Props)};
+ Changes ->
+ RevsInfo0 = lists:map(
+ fun({Change}) ->
+ Rev = couch_doc:parse_rev(get_value(<<"rev">>, Change)),
+ Del = couch_replicator_utils:is_deleted(Change),
+ #rev_info{rev = Rev, deleted = Del}
+ end,
+ Changes
+ ),
- #doc_info{
- id = get_value(<<"id">>, Props),
- high_seq = get_value(<<"seq">>, Props),
- revs = RevsInfo
- }
+ RevsInfo =
+ case get_value(<<"removed">>, Props) of
+ true ->
+ [_ | RevsInfo1] = RevsInfo0,
+ RevsInfo1;
+ _ ->
+ RevsInfo0
+ end,
+
+ #doc_info{
+ id = get_value(<<"id">>, Props),
+ high_seq = get_value(<<"seq">>, Props),
+ revs = RevsInfo
+ }
end.
bulk_results_to_errors(Docs, {ok, Results}, interactive_edit) ->
- lists:reverse(lists:foldl(
- fun({_, {ok, _}}, Acc) ->
- Acc;
- ({#doc{id = Id, revs = {Pos, [RevId | _]}}, Error}, Acc) ->
- {_, Error, Reason} = couch_httpd:error_info(Error),
- [ {[{id, Id}, {rev, rev_to_str({Pos, RevId})},
- {error, Error}, {reason, Reason}]} | Acc ]
- end,
- [], lists:zip(Docs, Results)));
-
+ lists:reverse(
+ lists:foldl(
+ fun
+ ({_, {ok, _}}, Acc) ->
+ Acc;
+ ({#doc{id = Id, revs = {Pos, [RevId | _]}}, Error}, Acc) ->
+ {_, Error, Reason} = couch_httpd:error_info(Error),
+ [
+ {[
+ {id, Id},
+ {rev, rev_to_str({Pos, RevId})},
+ {error, Error},
+ {reason, Reason}
+ ]}
+ | Acc
+ ]
+ end,
+ [],
+ lists:zip(Docs, Results)
+ )
+ );
bulk_results_to_errors(Docs, {ok, Results}, replicated_changes) ->
bulk_results_to_errors(Docs, {aborted, Results}, interactive_edit);
-
bulk_results_to_errors(_Docs, {aborted, Results}, interactive_edit) ->
lists:map(
fun({{Id, Rev}, Err}) ->
{_, Error, Reason} = couch_httpd:error_info(Err),
{[{id, Id}, {rev, rev_to_str(Rev)}, {error, Error}, {reason, Reason}]}
end,
- Results);
-
+ Results
+ );
bulk_results_to_errors(_Docs, Results, remote) ->
- lists:reverse(lists:foldl(
- fun({Props}, Acc) ->
- case get_value(<<"error">>, Props, get_value(error, Props)) of
- undefined ->
- Acc;
- Error ->
- Id = get_value(<<"id">>, Props, get_value(id, Props)),
- Rev = get_value(<<"rev">>, Props, get_value(rev, Props)),
- Reason = get_value(<<"reason">>, Props, get_value(reason, Props)),
- [ {[{id, Id}, {rev, rev_to_str(Rev)},
- {error, Error}, {reason, Reason}]} | Acc ]
- end
- end,
- [], Results)).
-
+ lists:reverse(
+ lists:foldl(
+ fun({Props}, Acc) ->
+ case get_value(<<"error">>, Props, get_value(error, Props)) of
+ undefined ->
+ Acc;
+ Error ->
+ Id = get_value(<<"id">>, Props, get_value(id, Props)),
+ Rev = get_value(<<"rev">>, Props, get_value(rev, Props)),
+ Reason = get_value(<<"reason">>, Props, get_value(reason, Props)),
+ [
+ {[
+ {id, Id},
+ {rev, rev_to_str(Rev)},
+ {error, Error},
+ {reason, Reason}
+ ]}
+ | Acc
+ ]
+ end
+ end,
+ [],
+ Results
+ )
+ ).
rev_to_str({_Pos, _Id} = Rev) ->
couch_doc:rev_to_str(Rev);
@@ -852,18 +968,19 @@ rev_to_str(Rev) ->
write_fun() ->
fun(Data) ->
- receive {get_data, Ref, From} ->
- From ! {data, Ref, Data}
+ receive
+ {get_data, Ref, From} ->
+ From ! {data, Ref, Data}
end
end.
stream_doc({JsonBytes, Atts, Boundary, Len}) ->
case erlang:erase({doc_streamer, Boundary}) of
- Pid when is_pid(Pid) ->
- unlink(Pid),
- exit(Pid, kill);
- _ ->
- ok
+ Pid when is_pid(Pid) ->
+ unlink(Pid),
+ exit(Pid, kill);
+ _ ->
+ ok
end,
DocStreamer = spawn_link(
couch_doc,
@@ -878,8 +995,9 @@ stream_doc({0, Id}) ->
stream_doc({LenLeft, Id}) when LenLeft > 0 ->
Ref = make_ref(),
erlang:get({doc_streamer, Id}) ! {get_data, Ref, self()},
- receive {data, Ref, Data} ->
- {ok, Data, {LenLeft - iolist_size(Data), Id}}
+ receive
+ {data, Ref, Data} ->
+ {ok, Data, {LenLeft - iolist_size(Data), Id}}
end.
header_value(Key, Headers) ->
@@ -894,7 +1012,6 @@ header_value(Key, Headers, Default) ->
Default
end.
-
% Normalize an #httpdb{} or #db{} record such that it can be used for
% comparisons. This means remove things like pids and also sort options / props.
normalize_db(#httpdb{} = HttpDb) ->
@@ -907,40 +1024,34 @@ normalize_db(#httpdb{} = HttpDb) ->
retries = HttpDb#httpdb.retries,
http_connections = HttpDb#httpdb.http_connections
};
-
normalize_db(<<DbName/binary>>) ->
DbName.
-
maybe_append_create_query_params(Db, []) ->
Db;
-
maybe_append_create_query_params(Db, CreateParams) ->
NewUrl = Db#httpdb.url ++ "?" ++ mochiweb_util:urlencode(CreateParams),
Db#httpdb{url = NewUrl}.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
normalize_http_db_test() ->
- HttpDb = #httpdb{
+ HttpDb = #httpdb{
url = "http://host/db",
auth_props = [{"key", "val"}],
- headers = [{"k2","v2"}, {"k1","v1"}],
+ headers = [{"k2", "v2"}, {"k1", "v1"}],
timeout = 30000,
ibrowse_options = [{k2, v2}, {k1, v1}],
retries = 10,
http_connections = 20
},
Expected = HttpDb#httpdb{
- headers = [{"k1","v1"}, {"k2","v2"}],
+ headers = [{"k1", "v1"}, {"k2", "v2"}],
ibrowse_options = [{k1, v1}, {k2, v2}]
},
?assertEqual(Expected, normalize_db(HttpDb)),
?assertEqual(<<"local">>, normalize_db(<<"local">>)).
-
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_auth.erl b/src/couch_replicator/src/couch_replicator_auth.erl
index 272e10af5..e5c024f7e 100644
--- a/src/couch_replicator/src/couch_replicator_auth.erl
+++ b/src/couch_replicator/src/couch_replicator_auth.erl
@@ -12,7 +12,6 @@
-module(couch_replicator_auth).
-
-export([
initialize/1,
update_headers/2,
@@ -20,17 +19,13 @@
cleanup/1
]).
-
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-
-type headers() :: [{string(), string()}].
-type code() :: non_neg_integer().
-
-define(DEFAULT_PLUGINS, "couch_replicator_auth_session,couch_replicator_auth_noop").
-
% Behavior API
% Note for plugin developers: consider using the "auth" field in the source and
@@ -49,7 +44,6 @@
-callback cleanup(term()) -> ok.
-
% Main API
-spec initialize(#httpdb{}) -> {ok, #httpdb{}} | {error, term()}.
@@ -61,13 +55,11 @@ initialize(#httpdb{auth_context = nil} = HttpDb) ->
{error, Error}
end.
-
-spec update_headers(#httpdb{}, headers()) -> {headers(), #httpdb{}}.
update_headers(#httpdb{auth_context = {Mod, Context}} = HttpDb, Headers) ->
{Headers1, Context1} = Mod:update_headers(Context, Headers),
{Headers1, HttpDb#httpdb{auth_context = {Mod, Context1}}}.
-
-spec handle_response(#httpdb{}, code(), headers()) ->
{continue | retry, term()}.
handle_response(#httpdb{} = HttpDb, Code, Headers) ->
@@ -75,13 +67,11 @@ handle_response(#httpdb{} = HttpDb, Code, Headers) ->
{Res, Context1} = Mod:handle_response(Context, Code, Headers),
{Res, HttpDb#httpdb{auth_context = {Mod, Context1}}}.
-
-spec cleanup(#httpdb{}) -> #httpdb{}.
cleanup(#httpdb{auth_context = {Module, Context}} = HttpDb) ->
ok = Module:cleanup(Context),
HttpDb#httpdb{auth_context = nil}.
-
% Private helper functions
-spec get_plugin_modules() -> [atom()].
@@ -89,7 +79,6 @@ get_plugin_modules() ->
Plugins1 = config:get("replicator", "auth_plugins", ?DEFAULT_PLUGINS),
[list_to_atom(Plugin) || Plugin <- string:tokens(Plugins1, ",")].
-
try_initialize([], _HttpDb) ->
{error, no_more_auth_plugins_left_to_try};
try_initialize([Mod | Modules], HttpDb) ->
diff --git a/src/couch_replicator/src/couch_replicator_auth_noop.erl b/src/couch_replicator/src/couch_replicator_auth_noop.erl
index 5dbf13335..e2a7ee839 100644
--- a/src/couch_replicator/src/couch_replicator_auth_noop.erl
+++ b/src/couch_replicator/src/couch_replicator_auth_noop.erl
@@ -12,10 +12,8 @@
-module(couch_replicator_auth_noop).
-
-behavior(couch_replicator_auth).
-
-export([
initialize/1,
update_headers/2,
@@ -23,30 +21,24 @@
cleanup/1
]).
-
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-
-type headers() :: [{string(), string()}].
-type code() :: non_neg_integer().
-
-spec initialize(#httpdb{}) -> {ok, #httpdb{}, term()} | ignore.
initialize(#httpdb{} = HttpDb) ->
{ok, HttpDb, nil}.
-
-spec update_headers(term(), headers()) -> {headers(), term()}.
update_headers(Context, Headers) ->
{Headers, Context}.
-
-spec handle_response(term(), code(), headers()) ->
{continue | retry, term()}.
handle_response(Context, _Code, _Headers) ->
{continue, Context}.
-
-spec cleanup(term()) -> ok.
cleanup(_Context) ->
ok.
diff --git a/src/couch_replicator/src/couch_replicator_auth_session.erl b/src/couch_replicator/src/couch_replicator_auth_session.erl
index 4f70cd668..d29600706 100644
--- a/src/couch_replicator/src/couch_replicator_auth_session.erl
+++ b/src/couch_replicator/src/couch_replicator_auth_session.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
% This is the replicator session auth plugin. It implements session based
% authentication for the replicator. The only public API are the functions from
% the couch_replicator_auth behaviour. Most of the logic and state is in the
@@ -46,14 +45,11 @@
% ensure if something goes wrong and one of the endpoints issues invalid
% cookies, replicator won't be stuck in a busy loop refreshing them.
-
-module(couch_replicator_auth_session).
-
-behaviour(couch_replicator_auth).
-behaviour(gen_server).
-
-export([
initialize/1,
update_headers/2,
@@ -71,11 +67,9 @@
format_status/2
]).
-
-include_lib("ibrowse/include/ibrowse.hrl").
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-
-type headers() :: [{string(), string()}].
-type code() :: non_neg_integer().
-type time_sec() :: non_neg_integer().
@@ -84,7 +78,6 @@
-define(MIN_UPDATE_INTERVAL_SEC, 5).
-define(DEFAULT_REFRESH_INTERVAL_SEC, 550).
-
-record(state, {
epoch = 0 :: non_neg_integer(),
cookie :: string() | undefined,
@@ -94,12 +87,11 @@
httpdb_pool :: pid(),
httpdb_ibrowse_options = [] :: list(),
session_url :: string(),
- next_refresh = infinity :: infinity | non_neg_integer(),
+ next_refresh = infinity :: infinity | non_neg_integer(),
refresh_tstamp = 0 :: non_neg_integer(),
require_valid_user = false :: boolean()
}).
-
% Behavior API callbacks
-spec initialize(#httpdb{}) ->
@@ -117,37 +109,31 @@ initialize(#httpdb{} = HttpDb) ->
ignore
end.
-
-spec update_headers(term(), headers()) -> {headers(), term()}.
update_headers({Pid, Epoch, Timeout}, Headers) ->
Args = {update_headers, Headers, Epoch},
{Headers1, Epoch1} = gen_server:call(Pid, Args, Timeout * 10),
{Headers1, {Pid, Epoch1, Timeout}}.
-
-spec handle_response(term(), code(), headers()) ->
{continue | retry, term()}.
handle_response({Pid, Epoch, Timeout}, Code, Headers) ->
- Args = {handle_response, Code, Headers, Epoch},
+ Args = {handle_response, Code, Headers, Epoch},
{Retry, Epoch1} = gen_server:call(Pid, Args, Timeout * 10),
{Retry, {Pid, Epoch1, Timeout}}.
-
-spec cleanup(term()) -> ok.
cleanup({Pid, _Epoch, Timeout}) ->
gen_server:call(Pid, stop, Timeout * 10).
-
%% gen_server functions
init([#state{} = State]) ->
{ok, State}.
-
terminate(_Reason, _State) ->
ok.
-
handle_call({update_headers, Headers, _Epoch}, _From, State) ->
case maybe_refresh(State) of
{ok, State1} ->
@@ -159,29 +145,23 @@ handle_call({update_headers, Headers, _Epoch}, _From, State) ->
couch_log:error(LogMsg, [?MODULE, Error]),
{stop, Error, State}
end;
-
handle_call({handle_response, Code, Headers, Epoch}, _From, State) ->
{Retry, State1} = process_response(Code, Headers, Epoch, State),
{reply, {Retry, State1#state.epoch}, State1};
-
handle_call(stop, _From, State) ->
{stop, normal, ok, State}.
-
handle_cast(Msg, State) ->
couch_log:error("~p: Received un-expected cast ~p", [?MODULE, Msg]),
{noreply, State}.
-
handle_info(Msg, State) ->
couch_log:error("~p : Received un-expected message ~p", [?MODULE, Msg]),
{noreply, State}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
format_status(_Opt, [_PDict, State]) ->
[
{epoch, State#state.epoch},
@@ -190,10 +170,8 @@ format_status(_Opt, [_PDict, State]) ->
{refresh_tstamp, State#state.refresh_tstamp}
].
-
%% Private helper functions
-
-spec init_state(#httpdb{}) ->
{ok, #httpdb{}, #state{}} | {error, term()} | ignore.
init_state(#httpdb{} = HttpDb) ->
@@ -234,7 +212,6 @@ init_state(#httpdb{} = HttpDb) ->
{error, Error}
end.
-
-spec extract_creds(#httpdb{}) ->
{ok, string(), string(), #httpdb{}} | {error, term()}.
extract_creds(#httpdb{} = HttpDb) ->
@@ -248,9 +225,12 @@ extract_creds(#httpdb{} = HttpDb) ->
{ok, User, Pass, HttpDb1}
end.
-
--spec process_response(non_neg_integer(), headers(),
- non_neg_integer(), #state{}) -> {retry | continue, #state{}}.
+-spec process_response(
+ non_neg_integer(),
+ headers(),
+ non_neg_integer(),
+ #state{}
+) -> {retry | continue, #state{}}.
process_response(403, _Headers, Epoch, State) ->
process_auth_failure(Epoch, State);
process_response(401, _Headers, Epoch, State) ->
@@ -258,25 +238,26 @@ process_response(401, _Headers, Epoch, State) ->
process_response(Code, Headers, _Epoch, State) when Code >= 200, Code < 300 ->
% If server noticed cookie is about to time out it can send a new cookie in
% the response headers. Take advantage of that and refresh the cookie.
- State1 = case maybe_update_cookie(Headers, State) of
- {ok, UpdatedState} ->
- UpdatedState;
- {error, cookie_not_found} ->
- State;
- {error, Other} ->
- LogMsg = "~p : Could not parse cookie from response headers ~p",
- couch_log:error(LogMsg, [?MODULE, Other]),
- State
- end,
+ State1 =
+ case maybe_update_cookie(Headers, State) of
+ {ok, UpdatedState} ->
+ UpdatedState;
+ {error, cookie_not_found} ->
+ State;
+ {error, Other} ->
+ LogMsg = "~p : Could not parse cookie from response headers ~p",
+ couch_log:error(LogMsg, [?MODULE, Other]),
+ State
+ end,
{continue, State1};
process_response(_Code, _Headers, _Epoch, State) ->
{continue, State}.
-
-spec process_auth_failure(non_neg_integer(), #state{}) ->
{retry | continue, #state{}}.
-process_auth_failure(Epoch, #state{epoch = StateEpoch} = State)
- when StateEpoch > Epoch ->
+process_auth_failure(Epoch, #state{epoch = StateEpoch} = State) when
+ StateEpoch > Epoch
+->
% This request used an outdated cookie, tell it to immediately retry
% and it will pick up the current cookie when its headers are updated
{retry, State};
@@ -293,7 +274,6 @@ process_auth_failure(Epoch, #state{epoch = Epoch} = State) ->
{retry, schedule_refresh(now_sec(), State)}
end.
-
-spec get_session_url(string()) -> string().
get_session_url(Url) ->
#url{
@@ -311,14 +291,12 @@ get_session_url(Url) ->
lists:concat([Proto, "://", Host, "/_session"])
end.
-
-spec schedule_refresh(non_neg_integer(), #state{}) -> #state{}.
schedule_refresh(T, #state{next_refresh = Tc} = State) when T < Tc ->
State#state{next_refresh = T};
schedule_refresh(_, #state{} = State) ->
State.
-
-spec maybe_refresh(#state{}) -> {ok, #state{}} | {error, term()}.
maybe_refresh(#state{next_refresh = T} = State) ->
case now_sec() >= T of
@@ -328,21 +306,20 @@ maybe_refresh(#state{next_refresh = T} = State) ->
{ok, State}
end.
-
-spec refresh(#state{}) -> {ok, #state{}} | {error, term()}.
refresh(#state{session_url = Url, user = User, pass = Pass} = State) ->
- Body = mochiweb_util:urlencode([{name, User}, {password, Pass}]),
+ Body = mochiweb_util:urlencode([{name, User}, {password, Pass}]),
Headers0 = [{"Content-Type", "application/x-www-form-urlencoded"}],
- Headers = case State#state.require_valid_user of
- true ->
- Headers0 ++ [{"Authorization", "Basic " ++ b64creds(User, Pass)}];
- false ->
- Headers0
- end,
+ Headers =
+ case State#state.require_valid_user of
+ true ->
+ Headers0 ++ [{"Authorization", "Basic " ++ b64creds(User, Pass)}];
+ false ->
+ Headers0
+ end,
Result = http_request(State, Url, Headers, post, Body),
http_response(Result, State).
-
-spec http_request(#state{}, string(), headers(), atom(), iolist()) ->
{ok, string(), headers(), binary()} | {error, term()}.
http_request(#state{httpdb_pool = Pool} = State, Url, Headers, Method, Body) ->
@@ -354,20 +331,26 @@ http_request(#state{httpdb_pool = Pool} = State, Url, Headers, Method, Body) ->
],
{ok, Wrk} = couch_replicator_httpc_pool:get_worker(Pool),
try
- Result = ibrowse:send_req_direct(Wrk, Url, Headers, Method, Body, Opts,
- Timeout),
+ Result = ibrowse:send_req_direct(
+ Wrk,
+ Url,
+ Headers,
+ Method,
+ Body,
+ Opts,
+ Timeout
+ ),
case Result of
{ok, _, ResultHeaders, _} ->
stop_worker_if_server_requested(ResultHeaders, Wrk);
_Other ->
ok
- end,
- Result
+ end,
+ Result
after
ok = couch_replicator_httpc_pool:release_worker_sync(Pool, Wrk)
end.
-
-spec stop_worker_if_server_requested(headers(), pid()) -> ok.
stop_worker_if_server_requested(ResultHeaders0, Worker) ->
ResultHeaders = mochiweb_headers:make(ResultHeaders0),
@@ -383,13 +366,16 @@ stop_worker_if_server_requested(ResultHeaders0, Worker) ->
ok
end.
-
--spec http_response({ok, string(), headers(), binary()} | {error, term()},
- #state{}) -> {ok, #state{}} | {error, term()}.
+-spec http_response(
+ {ok, string(), headers(), binary()} | {error, term()},
+ #state{}
+) -> {ok, #state{}} | {error, term()}.
http_response({ok, "200", Headers, _}, State) ->
maybe_update_cookie(Headers, State);
-http_response({ok, "401", Headers0, _}, #state{session_url = Url,
- user = User}) ->
+http_response({ok, "401", Headers0, _}, #state{
+ session_url = Url,
+ user = User
+}) ->
Headers = mochiweb_headers:make(Headers0),
case mochiweb_headers:get_value("WWW-Authenticate", Headers) of
undefined ->
@@ -406,7 +392,6 @@ http_response({ok, Code, _, _}, #state{session_url = Url, user = User}) ->
http_response({error, Error}, #state{session_url = Url, user = User}) ->
{error, {session_request_failed, Url, User, Error}}.
-
-spec parse_cookie(list()) -> {ok, age(), string()} | {error, term()}.
parse_cookie(Headers0) ->
Headers = mochiweb_headers:make(Headers0),
@@ -425,14 +410,11 @@ parse_cookie(Headers0) ->
end
end.
-
-spec parse_max_age(list()) -> age().
parse_max_age(CaseInsKVs) ->
case mochiweb_headers:get_value("Max-Age", CaseInsKVs) of
String when is_list(String) ->
- try
- list_to_integer(String)
- of
+ try list_to_integer(String) of
MaxAge when MaxAge >= 0 ->
MaxAge;
_ ->
@@ -445,7 +427,6 @@ parse_max_age(CaseInsKVs) ->
undefined
end.
-
-spec maybe_update_cookie(headers(), #state{}) ->
{ok, string()} | {error, term()}.
maybe_update_cookie(ResponseHeaders, State) ->
@@ -456,7 +437,6 @@ maybe_update_cookie(ResponseHeaders, State) ->
{error, Error}
end.
-
-spec update_cookie(#state{}, string(), time_sec(), age()) -> #state{}.
update_cookie(#state{cookie = Cookie} = State, Cookie, _, _) ->
State;
@@ -469,72 +449,72 @@ update_cookie(#state{epoch = Epoch} = State, Cookie, NowSec, MaxAge) ->
},
schedule_refresh(NextRefresh, NewState).
-
-spec next_refresh(time_sec(), age(), time_sec()) -> time_sec().
next_refresh(NowSec, undefined, RefreshInterval) ->
NowSec + RefreshInterval;
-
next_refresh(NowSec, MaxAge, _) when is_integer(MaxAge) ->
% Apply a fudge factor to account for delays in receving the cookie
% and / or time adjustments happening over a longer period of time
NowSec + trunc(MaxAge * 0.9).
-
-spec cookie_age_sec(#state{}, time_sec()) -> time_sec().
cookie_age_sec(#state{refresh_tstamp = RefreshTs}, Now) ->
max(0, Now - RefreshTs).
-
-spec now_sec() -> time_sec().
now_sec() ->
{Mega, Sec, _Micro} = os:timestamp(),
Mega * 1000000 + Sec.
-
-spec min_update_interval() -> time_sec().
min_update_interval() ->
- config:get_integer("replicator", "session_min_update_interval",
- ?MIN_UPDATE_INTERVAL_SEC).
-
+ config:get_integer(
+ "replicator",
+ "session_min_update_interval",
+ ?MIN_UPDATE_INTERVAL_SEC
+ ).
-spec refresh_interval() -> integer().
refresh_interval() ->
- config:get_integer("replicator", "session_refresh_interval_sec",
- ?DEFAULT_REFRESH_INTERVAL_SEC).
-
-
+ config:get_integer(
+ "replicator",
+ "session_refresh_interval_sec",
+ ?DEFAULT_REFRESH_INTERVAL_SEC
+ ).
-spec b64creds(string(), string()) -> string().
b64creds(User, Pass) ->
base64:encode_to_string(User ++ ":" ++ Pass).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
get_session_url_test_() ->
- [?_assertEqual(SessionUrl, get_session_url(Url)) || {Url, SessionUrl} <- [
- {"http://host/db", "http://host/_session"},
- {"http://127.0.0.1/db", "http://127.0.0.1/_session"},
- {"http://host/x/y/z", "http://host/_session"},
- {"http://host:5984/db", "http://host:5984/_session"},
- {"https://host/db?q=1", "https://host/_session"}
- ]].
-
+ [
+ ?_assertEqual(SessionUrl, get_session_url(Url))
+ || {Url, SessionUrl} <- [
+ {"http://host/db", "http://host/_session"},
+ {"http://127.0.0.1/db", "http://127.0.0.1/_session"},
+ {"http://host/x/y/z", "http://host/_session"},
+ {"http://host:5984/db", "http://host:5984/_session"},
+ {"https://host/db?q=1", "https://host/_session"}
+ ]
+ ].
extract_creds_success_test() ->
- HttpDb = #httpdb{auth_props = [
- {<<"basic">>, {[
- {<<"username">>, <<"u2">>},
- {<<"password">>, <<"p2">>}
- ]}}
- ]},
+ HttpDb = #httpdb{
+ auth_props = [
+ {<<"basic">>,
+ {[
+ {<<"username">>, <<"u2">>},
+ {<<"password">>, <<"p2">>}
+ ]}}
+ ]
+ },
?assertEqual({ok, "u2", "p2", #httpdb{}}, extract_creds(HttpDb)),
?assertEqual({error, missing_credentials}, extract_creds(#httpdb{})).
-
cookie_update_test_() ->
{
setup,
@@ -562,7 +542,6 @@ cookie_update_test_() ->
}
}.
-
t_do_refresh_without_max_age() ->
?_test(begin
State = #state{next_refresh = 0},
@@ -573,7 +552,6 @@ t_do_refresh_without_max_age() ->
?assert(540 < RefreshInterval andalso RefreshInterval =< 550)
end).
-
t_do_refresh_with_max_age() ->
?_test(begin
State = #state{next_refresh = 0},
@@ -585,7 +563,6 @@ t_do_refresh_with_max_age() ->
?assert(80 < RefreshInterval andalso RefreshInterval =< 90)
end).
-
t_dont_refresh() ->
?_test(begin
State = #state{
@@ -602,7 +579,6 @@ t_dont_refresh() ->
?assertMatch(State2, State3)
end).
-
t_process_auth_failure() ->
?_test(begin
State = #state{epoch = 1, refresh_tstamp = 0},
@@ -611,21 +587,18 @@ t_process_auth_failure() ->
?assert(NextRefresh =< now_sec())
end).
-
t_process_auth_failure_stale_epoch() ->
?_test(begin
State = #state{epoch = 3},
?assertMatch({retry, State}, process_auth_failure(2, State))
end).
-
t_process_auth_failure_too_frequent() ->
?_test(begin
State = #state{epoch = 4, refresh_tstamp = now_sec()},
?assertMatch({continue, _}, process_auth_failure(4, State))
end).
-
t_process_ok_update_cookie() ->
?_test(begin
Headers = [{"set-CookiE", "AuthSession=xyz; Path=/;"}, {"X", "y"}],
@@ -636,7 +609,6 @@ t_process_ok_update_cookie() ->
?assertMatch({continue, #state{cookie = "xyz", epoch = 2}}, Res2)
end).
-
t_process_ok_no_cookie() ->
?_test(begin
Headers = [{"X", "y"}],
@@ -645,37 +617,34 @@ t_process_ok_no_cookie() ->
?assertMatch({continue, State}, Res)
end).
-
t_init_state_fails_on_401() ->
?_test(begin
mock_http_401_response(),
{error, Error} = init_state(httpdb("http://u:p@h")),
- SessionUrl = "http://h/_session",
+ SessionUrl = "http://h/_session",
?assertEqual({session_request_unauthorized, SessionUrl, "u"}, Error)
end).
-
t_init_state_401_with_require_valid_user() ->
?_test(begin
mock_http_401_response_with_require_valid_user(),
- ?assertMatch({ok, #httpdb{}, #state{cookie = "Cookie"}},
- init_state(httpdb("http://u:p@h")))
+ ?assertMatch(
+ {ok, #httpdb{}, #state{cookie = "Cookie"}},
+ init_state(httpdb("http://u:p@h"))
+ )
end).
-
t_init_state_404() ->
?_test(begin
mock_http_404_response(),
?assertEqual(ignore, init_state(httpdb("http://u:p@h")))
end).
-
t_init_state_no_creds() ->
?_test(begin
?_assertEqual(ignore, init_state(httpdb("http://h")))
end).
-
t_init_state_http_error() ->
?_test(begin
mock_http_error_response(),
@@ -684,11 +653,9 @@ t_init_state_http_error() ->
?assertEqual({session_request_failed, SessionUrl, "u", x}, Error)
end).
-
httpdb(Url) ->
couch_replicator_utils:normalize_basic_auth(#httpdb{url = Url}).
-
setup_all() ->
meck:expect(couch_replicator_httpc_pool, get_worker, 1, {ok, worker}),
meck:expect(couch_replicator_httpc_pool, release_worker_sync, 2, ok),
@@ -696,11 +663,9 @@ setup_all() ->
mock_http_cookie_response("Abc"),
ok.
-
teardown_all(_) ->
meck:unload().
-
setup() ->
meck:reset([
config,
@@ -708,44 +673,37 @@ setup() ->
ibrowse
]).
-
teardown(_) ->
ok.
-
mock_http_cookie_response(Cookie) ->
Resp = {ok, "200", [{"Set-Cookie", "AuthSession=" ++ Cookie}], []},
meck:expect(ibrowse, send_req_direct, 7, Resp).
-
mock_http_cookie_response_with_age(Cookie, Age) ->
AgeKV = "Max-Age=" ++ Age,
CookieKV = "AuthSession=" ++ Cookie,
Resp = {ok, "200", [{"Set-Cookie", CookieKV ++ ";" ++ AgeKV}], []},
meck:expect(ibrowse, send_req_direct, 7, Resp).
-
mock_http_401_response() ->
meck:expect(ibrowse, send_req_direct, 7, {ok, "401", [], []}).
-
mock_http_401_response_with_require_valid_user() ->
Resp1 = {ok, "401", [{"WWW-Authenticate", "Basic realm=\"server\""}], []},
Resp2 = {ok, "200", [{"Set-Cookie", "AuthSession=Cookie"}], []},
meck:expect(ibrowse, send_req_direct, 7, meck:seq([Resp1, Resp2])).
-
mock_http_404_response() ->
meck:expect(ibrowse, send_req_direct, 7, {ok, "404", [], []}).
-
mock_http_error_response() ->
meck:expect(ibrowse, send_req_direct, 7, {error, x}).
-
parse_max_age_test_() ->
- [?_assertEqual(R, parse_max_age(mochiweb_headers:make([{"Max-Age", A}])))
- || {A, R} <- [
+ [
+ ?_assertEqual(R, parse_max_age(mochiweb_headers:make([{"Max-Age", A}])))
+ || {A, R} <- [
{"-10", undefined},
{"\ufeff", undefined},
{"*", undefined},
@@ -758,5 +716,4 @@ parse_max_age_test_() ->
]
].
-
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_changes_reader.erl b/src/couch_replicator/src/couch_replicator_changes_reader.erl
index 2e4df5365..83080b6fb 100644
--- a/src/couch_replicator/src/couch_replicator_changes_reader.erl
+++ b/src/couch_replicator/src/couch_replicator_changes_reader.erl
@@ -28,25 +28,37 @@
start_link(StartSeq, #httpdb{} = Db, ChangesQueue, Options) ->
Parent = self(),
- {ok, spawn_link(fun() ->
- put(last_seq, StartSeq),
- put(retries_left, Db#httpdb.retries),
- ?MODULE:read_changes(Parent, StartSeq, Db#httpdb{retries = 0},
- ChangesQueue, Options)
- end)};
+ {ok,
+ spawn_link(fun() ->
+ put(last_seq, StartSeq),
+ put(retries_left, Db#httpdb.retries),
+ ?MODULE:read_changes(
+ Parent,
+ StartSeq,
+ Db#httpdb{retries = 0},
+ ChangesQueue,
+ Options
+ )
+ end)};
start_link(StartSeq, Db, ChangesQueue, Options) ->
Parent = self(),
- {ok, spawn_link(fun() ->
- ?MODULE:read_changes(Parent, StartSeq, Db, ChangesQueue, Options)
- end)}.
+ {ok,
+ spawn_link(fun() ->
+ ?MODULE:read_changes(Parent, StartSeq, Db, ChangesQueue, Options)
+ end)}.
read_changes(Parent, StartSeq, Db, ChangesQueue, Options) ->
Continuous = couch_util:get_value(continuous, Options),
try
- couch_replicator_api_wrap:changes_since(Db, all_docs, StartSeq,
+ couch_replicator_api_wrap:changes_since(
+ Db,
+ all_docs,
+ StartSeq,
fun(Item) ->
process_change(Item, {Parent, Db, ChangesQueue, Continuous})
- end, Options),
+ end,
+ Options
+ ),
couch_work_queue:close(ChangesQueue)
catch
throw:recurse ->
@@ -56,63 +68,79 @@ read_changes(Parent, StartSeq, Db, ChangesQueue, Options) ->
LS = get(last_seq),
read_changes(Parent, LS, Db, ChangesQueue, Options);
throw:{retry_limit, Error} ->
- couch_stats:increment_counter(
- [couch_replicator, changes_read_failures]
- ),
- case get(retries_left) of
- N when N > 0 ->
- put(retries_left, N - 1),
- LastSeq = get(last_seq),
- Db2 = case LastSeq of
- StartSeq ->
- couch_log:notice("Retrying _changes request to source database ~s"
- " with since=~p in ~p seconds",
- [couch_replicator_api_wrap:db_uri(Db), LastSeq, Db#httpdb.wait / 1000]),
- ok = timer:sleep(Db#httpdb.wait),
- Db#httpdb{wait = 2 * Db#httpdb.wait};
- _ ->
- couch_log:notice("Retrying _changes request to source database ~s"
- " with since=~p", [couch_replicator_api_wrap:db_uri(Db), LastSeq]),
- Db
- end,
- read_changes(Parent, LastSeq, Db2, ChangesQueue, Options);
- _ ->
- exit(Error)
- end
+ couch_stats:increment_counter(
+ [couch_replicator, changes_read_failures]
+ ),
+ case get(retries_left) of
+ N when N > 0 ->
+ put(retries_left, N - 1),
+ LastSeq = get(last_seq),
+ Db2 =
+ case LastSeq of
+ StartSeq ->
+ couch_log:notice(
+ "Retrying _changes request to source database ~s"
+ " with since=~p in ~p seconds",
+ [
+ couch_replicator_api_wrap:db_uri(Db),
+ LastSeq,
+ Db#httpdb.wait / 1000
+ ]
+ ),
+ ok = timer:sleep(Db#httpdb.wait),
+ Db#httpdb{wait = 2 * Db#httpdb.wait};
+ _ ->
+ couch_log:notice(
+ "Retrying _changes request to source database ~s"
+ " with since=~p",
+ [couch_replicator_api_wrap:db_uri(Db), LastSeq]
+ ),
+ Db
+ end,
+ read_changes(Parent, LastSeq, Db2, ChangesQueue, Options);
+ _ ->
+ exit(Error)
+ end
end.
-
process_change(#doc_info{id = <<>>} = DocInfo, {_, Db, _, _}) ->
% Previous CouchDB releases had a bug which allowed a doc with an empty ID
% to be inserted into databases. Such doc is impossible to GET.
- couch_log:error("Replicator: ignoring document with empty ID in "
+ couch_log:error(
+ "Replicator: ignoring document with empty ID in "
"source database `~s` (_changes sequence ~p)",
- [couch_replicator_api_wrap:db_uri(Db), DocInfo#doc_info.high_seq]);
-
+ [couch_replicator_api_wrap:db_uri(Db), DocInfo#doc_info.high_seq]
+ );
process_change(#doc_info{id = Id} = DocInfo, {Parent, Db, ChangesQueue, _}) ->
case is_doc_id_too_long(byte_size(Id)) of
true ->
SourceDb = couch_replicator_api_wrap:db_uri(Db),
- couch_log:error("Replicator: document id `~s...` from source db "
- " `~64s` is too long, ignoring.", [Id, SourceDb]),
+ couch_log:error(
+ "Replicator: document id `~s...` from source db "
+ " `~64s` is too long, ignoring.",
+ [Id, SourceDb]
+ ),
Stats = couch_replicator_stats:new([{doc_write_failures, 1}]),
ok = gen_server:call(Parent, {add_stats, Stats}, infinity);
false ->
ok = couch_work_queue:queue(ChangesQueue, DocInfo),
put(last_seq, DocInfo#doc_info.high_seq)
end;
-
process_change({last_seq, LS}, {_Parent, _, ChangesQueue, true = _Continuous}) ->
% LS should never be undefined, but it doesn't hurt to be defensive inside
% the replicator.
- Seq = case LS of undefined -> get(last_seq); _ -> LS end,
+ Seq =
+ case LS of
+ undefined -> get(last_seq);
+ _ -> LS
+ end,
OldSeq = get(last_seq),
- if Seq == OldSeq -> ok; true ->
- ok = couch_work_queue:queue(ChangesQueue, {last_seq, Seq})
+ if
+ Seq == OldSeq -> ok;
+ true -> ok = couch_work_queue:queue(ChangesQueue, {last_seq, Seq})
end,
put(last_seq, Seq),
throw(recurse);
-
process_change({last_seq, _}, _) ->
% This clause is unreachable today, but let's plan ahead for the future
% where we checkpoint against last_seq instead of the sequence of the last
diff --git a/src/couch_replicator/src/couch_replicator_clustering.erl b/src/couch_replicator/src/couch_replicator_clustering.erl
index 18de1e825..8db320433 100644
--- a/src/couch_replicator/src/couch_replicator_clustering.erl
+++ b/src/couch_replicator/src/couch_replicator_clustering.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
% Maintain cluster membership and stability notifications for replications.
% On changes to cluster membership, broadcast events to `replication` gen_event.
% Listeners will get `{cluster, stable}` or `{cluster, unstable}` events.
@@ -23,7 +22,6 @@
% This module is also in charge of calculating ownership of replications based
% on where their _replicator db documents shards live.
-
-module(couch_replicator_clustering).
-behaviour(gen_server).
@@ -64,8 +62,10 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("mem3/include/mem3.hrl").
--define(DEFAULT_QUIET_PERIOD, 60). % seconds
--define(DEFAULT_START_PERIOD, 5). % seconds
+% seconds
+-define(DEFAULT_QUIET_PERIOD, 60).
+% seconds
+-define(DEFAULT_START_PERIOD, 5).
-define(RELISTEN_DELAY, 5000).
-record(state, {
@@ -73,12 +73,10 @@
cluster_stable :: boolean()
}).
-
-spec start_link() -> {ok, pid()} | ignore | {error, term()}.
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
% owner/2 function computes ownership for a {DbName, DocId} tuple
% `unstable` if cluster is considered to be unstable i.e. it has changed
% recently, or returns node() which of the owner.
@@ -94,23 +92,22 @@ owner(<<"shards/", _/binary>> = DbName, DocId) ->
owner(_DbName, _DocId) ->
node().
-
-spec is_stable() -> true | false.
is_stable() ->
gen_server:call(?MODULE, is_stable).
-
-spec link_cluster_event_listener(atom(), atom(), list()) -> pid().
-link_cluster_event_listener(Mod, Fun, Args)
- when is_atom(Mod), is_atom(Fun), is_list(Args) ->
+link_cluster_event_listener(Mod, Fun, Args) when
+ is_atom(Mod), is_atom(Fun), is_list(Args)
+->
CallbackFun =
- fun(Event = {cluster, _}) -> erlang:apply(Mod, Fun, Args ++ [Event]);
- (_) -> ok
+ fun
+ (Event = {cluster, _}) -> erlang:apply(Mod, Fun, Args ++ [Event]);
+ (_) -> ok
end,
{ok, Pid} = couch_replicator_notifier:start_link(CallbackFun),
Pid.
-
% Mem3 cluster callbacks
cluster_unstable(Server) ->
@@ -127,80 +124,80 @@ cluster_stable(Server) ->
couch_log:notice("~s : cluster stable", [?MODULE]),
Server.
-
% gen_server callbacks
init([]) ->
ok = config:listen_for_changes(?MODULE, nil),
- Period = abs(config:get_integer("replicator", "cluster_quiet_period",
- ?DEFAULT_QUIET_PERIOD)),
- StartPeriod = abs(config:get_integer("replicator", "cluster_start_period",
- ?DEFAULT_START_PERIOD)),
+ Period = abs(
+ config:get_integer(
+ "replicator",
+ "cluster_quiet_period",
+ ?DEFAULT_QUIET_PERIOD
+ )
+ ),
+ StartPeriod = abs(
+ config:get_integer(
+ "replicator",
+ "cluster_start_period",
+ ?DEFAULT_START_PERIOD
+ )
+ ),
couch_stats:update_gauge([couch_replicator, cluster_is_stable], 0),
- {ok, Mem3Cluster} = mem3_cluster:start_link(?MODULE, self(), StartPeriod,
- Period),
+ {ok, Mem3Cluster} = mem3_cluster:start_link(
+ ?MODULE,
+ self(),
+ StartPeriod,
+ Period
+ ),
{ok, #state{mem3_cluster_pid = Mem3Cluster, cluster_stable = false}}.
-
terminate(_Reason, _State) ->
ok.
-
handle_call(is_stable, _From, #state{cluster_stable = IsStable} = State) ->
{reply, IsStable, State};
-
handle_call(set_stable, _From, State) ->
{reply, ok, State#state{cluster_stable = true}};
-
handle_call(set_unstable, _From, State) ->
{reply, ok, State#state{cluster_stable = false}}.
-
handle_cast({set_period, Period}, #state{mem3_cluster_pid = Pid} = State) ->
ok = mem3_cluster:set_period(Pid, Period),
{noreply, State}.
-
handle_info(restart_config_listener, State) ->
ok = config:listen_for_changes(?MODULE, nil),
{noreply, State}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
%% Internal functions
-
handle_config_change("replicator", "cluster_quiet_period", V, _, S) ->
ok = gen_server:cast(?MODULE, {set_period, list_to_integer(V)}),
{ok, S};
handle_config_change(_, _, _, _, S) ->
{ok, S}.
-
-handle_config_terminate(_, stop, _) -> ok;
+handle_config_terminate(_, stop, _) ->
+ ok;
handle_config_terminate(_S, _R, _St) ->
Pid = whereis(?MODULE),
erlang:send_after(?RELISTEN_DELAY, Pid, restart_config_listener).
-
-spec owner_int(binary(), binary()) -> node().
owner_int(ShardName, DocId) ->
DbName = mem3:dbname(ShardName),
Live = [node() | nodes()],
Shards = mem3:shards(DbName, DocId),
- Nodes = [N || #shard{node=N} <- Shards, lists:member(N, Live)],
+ Nodes = [N || #shard{node = N} <- Shards, lists:member(N, Live)],
mem3:owner(DbName, DocId, Nodes).
-
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
replicator_clustering_test_() ->
{
setup,
@@ -217,7 +214,6 @@ replicator_clustering_test_() ->
}
}.
-
t_stable_callback() ->
?_test(begin
?assertEqual(false, is_stable()),
@@ -225,7 +221,6 @@ t_stable_callback() ->
?assertEqual(true, is_stable())
end).
-
t_unstable_callback() ->
?_test(begin
cluster_stable(whereis(?MODULE)),
@@ -234,7 +229,6 @@ t_unstable_callback() ->
?assertEqual(false, is_stable())
end).
-
setup_all() ->
meck:expect(couch_log, notice, 2, ok),
meck:expect(config, get, fun(_, _, Default) -> Default end),
@@ -242,11 +236,9 @@ setup_all() ->
meck:expect(couch_stats, update_gauge, 2, ok),
meck:expect(couch_replicator_notifier, notify, 1, ok).
-
teardown_all(_) ->
meck:unload().
-
setup() ->
meck:reset([
config,
@@ -258,22 +250,20 @@ setup() ->
{ok, Pid} = start_link(),
Pid.
-
teardown(Pid) ->
stop_clustering_process(Pid).
-
stop_clustering_process() ->
stop_clustering_process(whereis(?MODULE)).
-
stop_clustering_process(undefined) ->
ok;
-
stop_clustering_process(Pid) when is_pid(Pid) ->
Ref = erlang:monitor(process, Pid),
unlink(Pid),
exit(Pid, kill),
- receive {'DOWN', Ref, _, _, _} -> ok end.
+ receive
+ {'DOWN', Ref, _, _, _} -> ok
+ end.
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_connection.erl b/src/couch_replicator/src/couch_replicator_connection.erl
index d1c3d93fc..a158d2609 100644
--- a/src/couch_replicator/src/couch_replicator_connection.erl
+++ b/src/couch_replicator/src/couch_replicator_connection.erl
@@ -20,18 +20,18 @@
]).
-export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_info/2,
+ handle_cast/2,
+ code_change/3
]).
-export([
- acquire/1,
- acquire/2,
- release/1
+ acquire/1,
+ acquire/2,
+ release/1
]).
-export([
@@ -44,7 +44,6 @@
-define(DEFAULT_CLOSE_INTERVAL, 90000).
-define(RELISTEN_DELAY, 5000).
-
-record(state, {
close_interval,
timer
@@ -59,40 +58,43 @@
mref
}).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
init([]) ->
process_flag(trap_exit, true),
- ?MODULE = ets:new(?MODULE, [named_table, public,
- {keypos, #connection.worker}]),
+ ?MODULE = ets:new(?MODULE, [
+ named_table,
+ public,
+ {keypos, #connection.worker}
+ ]),
ok = config:listen_for_changes(?MODULE, nil),
- Interval = config:get_integer("replicator", "connection_close_interval",
- ?DEFAULT_CLOSE_INTERVAL),
+ Interval = config:get_integer(
+ "replicator",
+ "connection_close_interval",
+ ?DEFAULT_CLOSE_INTERVAL
+ ),
Timer = erlang:send_after(Interval, self(), close_idle_connections),
ibrowse:add_config([
{inactivity_timeout, Interval},
{worker_trap_exits, false}
]),
- {ok, #state{close_interval=Interval, timer=Timer}}.
+ {ok, #state{close_interval = Interval, timer = Timer}}.
acquire(Url) ->
acquire(Url, undefined).
acquire(Url, ProxyUrl) when is_binary(Url) ->
acquire(binary_to_list(Url), ProxyUrl);
-
acquire(Url, ProxyUrl) when is_binary(ProxyUrl) ->
acquire(Url, binary_to_list(ProxyUrl));
-
acquire(Url0, ProxyUrl0) ->
Url = couch_util:url_strip_password(Url0),
- ProxyUrl = case ProxyUrl0 of
- undefined -> undefined;
- _ -> couch_util:url_strip_password(ProxyUrl0)
- end,
+ ProxyUrl =
+ case ProxyUrl0 of
+ undefined -> undefined;
+ _ -> couch_util:url_strip_password(ProxyUrl0)
+ end,
case gen_server:call(?MODULE, {acquire, Url, ProxyUrl}) of
{ok, Worker} ->
link(Worker),
@@ -105,28 +107,37 @@ acquire(Url0, ProxyUrl0) ->
{error, Reason}
end.
-
release(Worker) ->
unlink(Worker),
gen_server:cast(?MODULE, {release, Worker}).
-
handle_call({acquire, Url, ProxyUrl}, From, State) ->
{Pid, _Ref} = From,
case {ibrowse_lib:parse_url(Url), parse_proxy_url(ProxyUrl)} of
- {#url{host=Host, port=Port}, #url{host=ProxyHost, port=ProxyPort}} ->
+ {#url{host = Host, port = Port}, #url{host = ProxyHost, port = ProxyPort}} ->
Pat = #connection{
- host=Host, port=Port,
- proxy_host=ProxyHost, proxy_port=ProxyPort,
- mref=undefined, _='_'},
+ host = Host,
+ port = Port,
+ proxy_host = ProxyHost,
+ proxy_port = ProxyPort,
+ mref = undefined,
+ _ = '_'
+ },
case ets:match_object(?MODULE, Pat, 1) of
'$end_of_table' ->
{reply, {error, all_allocated}, State};
{[Worker], _Cont} ->
- couch_stats:increment_counter([couch_replicator, connection,
- acquires]),
- ets:insert(?MODULE, Worker#connection{mref=monitor(process,
- Pid)}),
+ couch_stats:increment_counter([
+ couch_replicator,
+ connection,
+ acquires
+ ]),
+ ets:insert(?MODULE, Worker#connection{
+ mref = monitor(
+ process,
+ Pid
+ )
+ }),
{reply, {ok, Worker#connection.worker}, State}
end;
{{error, invalid_uri}, _} ->
@@ -134,26 +145,30 @@ handle_call({acquire, Url, ProxyUrl}, From, State) ->
{_, {error, invalid_uri}} ->
{reply, {error, invalid_uri}, State}
end;
-
handle_call({create, Url, ProxyUrl, Worker}, From, State) ->
{Pid, _Ref} = From,
case {ibrowse_lib:parse_url(Url), parse_proxy_url(ProxyUrl)} of
- {#url{host=Host, port=Port}, #url{host=ProxyHost, port=ProxyPort}} ->
+ {#url{host = Host, port = Port}, #url{host = ProxyHost, port = ProxyPort}} ->
link(Worker),
- couch_stats:increment_counter([couch_replicator, connection,
- creates]),
+ couch_stats:increment_counter([
+ couch_replicator,
+ connection,
+ creates
+ ]),
true = ets:insert_new(
?MODULE,
#connection{
- host=Host, port=Port,
- proxy_host=ProxyHost, proxy_port=ProxyPort,
- worker=Worker,
- mref=monitor(process, Pid)}
+ host = Host,
+ port = Port,
+ proxy_host = ProxyHost,
+ proxy_port = ProxyPort,
+ worker = Worker,
+ mref = monitor(process, Pid)
+ }
),
{reply, ok, State}
end.
-
handle_cast({release, WorkerPid}, State) ->
couch_stats:increment_counter([couch_replicator, connection, releases]),
case ets:lookup(?MODULE, WorkerPid) of
@@ -162,39 +177,45 @@ handle_cast({release, WorkerPid}, State) ->
MRef when is_reference(MRef) -> demonitor(MRef, [flush]);
undefined -> ok
end,
- ets:insert(?MODULE, Worker#connection{mref=undefined});
+ ets:insert(?MODULE, Worker#connection{mref = undefined});
[] ->
ok
end,
{noreply, State};
-
handle_cast({connection_close_interval, V}, State) ->
erlang:cancel_timer(State#state.timer),
NewTimer = erlang:send_after(V, self(), close_idle_connections),
ibrowse:add_config([{inactivity_timeout, V}]),
- {noreply, State#state{close_interval=V, timer=NewTimer}}.
-
+ {noreply, State#state{close_interval = V, timer = NewTimer}}.
% owner crashed
handle_info({'DOWN', Ref, process, _Pid, _Reason}, State) ->
- couch_stats:increment_counter([couch_replicator, connection,
- owner_crashes]),
- Conns = ets:match_object(?MODULE, #connection{mref = Ref, _='_'}),
- lists:foreach(fun(Conn) ->
- couch_stats:increment_counter([couch_replicator, connection, closes]),
- delete_worker(Conn)
- end, Conns),
+ couch_stats:increment_counter([
+ couch_replicator,
+ connection,
+ owner_crashes
+ ]),
+ Conns = ets:match_object(?MODULE, #connection{mref = Ref, _ = '_'}),
+ lists:foreach(
+ fun(Conn) ->
+ couch_stats:increment_counter([couch_replicator, connection, closes]),
+ delete_worker(Conn)
+ end,
+ Conns
+ ),
{noreply, State};
-
% worker crashed
handle_info({'EXIT', Pid, Reason}, State) ->
- couch_stats:increment_counter([couch_replicator, connection,
- worker_crashes]),
+ couch_stats:increment_counter([
+ couch_replicator,
+ connection,
+ worker_crashes
+ ]),
case ets:lookup(?MODULE, Pid) of
[] ->
ok;
[Worker] ->
- #connection{host=Host, port=Port} = Worker,
+ #connection{host = Host, port = Port} = Worker,
maybe_log_worker_death(Host, Port, Reason),
case Worker#connection.mref of
MRef when is_reference(MRef) -> demonitor(MRef, [flush]);
@@ -203,42 +224,38 @@ handle_info({'EXIT', Pid, Reason}, State) ->
ets:delete(?MODULE, Pid)
end,
{noreply, State};
-
handle_info(close_idle_connections, State) ->
#state{
- close_interval=Interval,
- timer=Timer
+ close_interval = Interval,
+ timer = Timer
} = State,
- Conns = ets:match_object(?MODULE, #connection{mref=undefined, _='_'}),
- lists:foreach(fun(Conn) ->
- couch_stats:increment_counter([couch_replicator, connection, closes]),
- delete_worker(Conn)
- end, Conns),
+ Conns = ets:match_object(?MODULE, #connection{mref = undefined, _ = '_'}),
+ lists:foreach(
+ fun(Conn) ->
+ couch_stats:increment_counter([couch_replicator, connection, closes]),
+ delete_worker(Conn)
+ end,
+ Conns
+ ),
erlang:cancel_timer(Timer),
NewTimer = erlang:send_after(Interval, self(), close_idle_connections),
- {noreply, State#state{timer=NewTimer}};
-
+ {noreply, State#state{timer = NewTimer}};
handle_info(restart_config_listener, State) ->
ok = config:listen_for_changes(?MODULE, nil),
{noreply, State}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
terminate(_Reason, _State) ->
ok.
-
maybe_log_worker_death(_Host, _Port, normal) ->
ok;
-
maybe_log_worker_death(Host, Port, Reason) ->
ErrMsg = "Replication connection to: ~p:~p died with reason ~p",
couch_log:info(ErrMsg, [Host, Port, Reason]).
-
-spec delete_worker(#connection{}) -> ok.
delete_worker(Worker) ->
ets:delete(?MODULE, Worker#connection.worker),
@@ -246,25 +263,19 @@ delete_worker(Worker) ->
spawn(fun() -> ibrowse_http_client:stop(Worker#connection.worker) end),
ok.
-
handle_config_change("replicator", "connection_close_interval", V, _, S) ->
- ok = gen_server:cast(?MODULE, {connection_close_interval,
- list_to_integer(V)}),
+ ok = gen_server:cast(?MODULE, {connection_close_interval, list_to_integer(V)}),
{ok, S};
-
handle_config_change(_, _, _, _, S) ->
{ok, S}.
-
handle_config_terminate(_, stop, _) ->
ok;
-
handle_config_terminate(_, _, _) ->
Pid = whereis(?MODULE),
erlang:send_after(?RELISTEN_DELAY, Pid, restart_config_listener).
-
parse_proxy_url(undefined) ->
- #url{host=undefined, port=undefined};
+ #url{host = undefined, port = undefined};
parse_proxy_url(ProxyUrl) ->
ibrowse_lib:parse_url(ProxyUrl).
diff --git a/src/couch_replicator/src/couch_replicator_db_changes.erl b/src/couch_replicator/src/couch_replicator_db_changes.erl
index 92b0222c4..947af51b4 100644
--- a/src/couch_replicator/src/couch_replicator_db_changes.erl
+++ b/src/couch_replicator/src/couch_replicator_db_changes.erl
@@ -15,42 +15,42 @@
-behaviour(gen_server).
-export([
- start_link/0
+ start_link/0
]).
-export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_info/2,
+ handle_cast/2,
+ code_change/3
]).
-export([
- notify_cluster_event/2
+ notify_cluster_event/2
]).
-record(state, {
- event_listener :: pid(),
- mdb_changes :: pid() | nil
+ event_listener :: pid(),
+ mdb_changes :: pid() | nil
}).
-
-spec notify_cluster_event(pid(), {cluster, any()}) -> ok.
notify_cluster_event(Server, {cluster, _} = Event) ->
gen_server:cast(Server, Event).
-
-spec start_link() ->
{ok, pid()} | ignore | {error, any()}.
start_link() ->
gen_server:start_link(?MODULE, [], []).
-
init([]) ->
- EvtPid = couch_replicator_clustering:link_cluster_event_listener(?MODULE,
- notify_cluster_event, [self()]),
+ EvtPid = couch_replicator_clustering:link_cluster_event_listener(
+ ?MODULE,
+ notify_cluster_event,
+ [self()]
+ ),
State = #state{event_listener = EvtPid, mdb_changes = nil},
case couch_replicator_clustering:is_stable() of
true ->
@@ -59,45 +59,40 @@ init([]) ->
{ok, State}
end.
-
terminate(_Reason, _State) ->
ok.
-
handle_call(_Msg, _From, State) ->
{reply, {error, invalid_call}, State}.
-
handle_cast({cluster, unstable}, State) ->
{noreply, stop_mdb_changes(State)};
-
handle_cast({cluster, stable}, State) ->
{noreply, restart_mdb_changes(State)}.
-
handle_info(_Msg, State) ->
{noreply, State}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
-spec restart_mdb_changes(#state{}) -> #state{}.
restart_mdb_changes(#state{mdb_changes = nil} = State) ->
Suffix = <<"_replicator">>,
CallbackMod = couch_replicator_doc_processor,
Options = [skip_ddocs],
- {ok, Pid} = couch_multidb_changes:start_link(Suffix, CallbackMod, nil,
- Options),
+ {ok, Pid} = couch_multidb_changes:start_link(
+ Suffix,
+ CallbackMod,
+ nil,
+ Options
+ ),
couch_stats:increment_counter([couch_replicator, db_scans]),
couch_log:notice("Started replicator db changes listener ~p", [Pid]),
State#state{mdb_changes = Pid};
-
restart_mdb_changes(#state{mdb_changes = _Pid} = State) ->
restart_mdb_changes(stop_mdb_changes(State)).
-
-spec stop_mdb_changes(#state{}) -> #state{}.
stop_mdb_changes(#state{mdb_changes = nil} = State) ->
State;
diff --git a/src/couch_replicator/src/couch_replicator_doc_processor.erl b/src/couch_replicator/src/couch_replicator_doc_processor.erl
index ed6670615..436d7c44d 100644
--- a/src/couch_replicator/src/couch_replicator_doc_processor.erl
+++ b/src/couch_replicator/src/couch_replicator_doc_processor.erl
@@ -20,12 +20,12 @@
]).
-export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_info/2,
+ handle_cast/2,
+ code_change/3
]).
-export([
@@ -54,15 +54,15 @@
]).
-define(DEFAULT_UPDATE_DOCS, false).
--define(ERROR_MAX_BACKOFF_EXPONENT, 12). % ~ 1 day on average
+% ~ 1 day on average
+-define(ERROR_MAX_BACKOFF_EXPONENT, 12).
-define(TS_DAY_SEC, 86400).
-define(INITIAL_BACKOFF_EXPONENT, 64).
-define(MIN_FILTER_DELAY_SEC, 60).
--type filter_type() :: nil | view | user | docids | mango.
+-type filter_type() :: nil | view | user | docids | mango.
-type repstate() :: initializing | error | scheduled.
-
-record(rdoc, {
id :: db_doc_id() | '_' | {any(), '_'},
state :: repstate() | '_',
@@ -75,7 +75,6 @@
last_updated :: erlang:timestamp() | '_'
}).
-
% couch_multidb_changes API callbacks
db_created(DbName, Server) ->
@@ -83,35 +82,31 @@ db_created(DbName, Server) ->
couch_replicator_docs:ensure_rep_ddoc_exists(DbName),
Server.
-
db_deleted(DbName, Server) ->
couch_stats:increment_counter([couch_replicator, docs, dbs_deleted]),
ok = gen_server:call(?MODULE, {clean_up_replications, DbName}, infinity),
Server.
-
db_found(DbName, Server) ->
couch_stats:increment_counter([couch_replicator, docs, dbs_found]),
couch_replicator_docs:ensure_rep_ddoc_exists(DbName),
Server.
-
db_change(DbName, {ChangeProps} = Change, Server) ->
couch_stats:increment_counter([couch_replicator, docs, db_changes]),
try
ok = process_change(DbName, Change)
catch
- exit:{Error, {gen_server, call, [?MODULE, _, _]}} ->
- ErrMsg = "~p exited ~p while processing change from db ~p",
- couch_log:error(ErrMsg, [?MODULE, Error, DbName]);
- _Tag:Error ->
- {RepProps} = get_json_value(doc, ChangeProps),
- DocId = get_json_value(<<"_id">>, RepProps),
- couch_replicator_docs:update_failed(DbName, DocId, Error)
+ exit:{Error, {gen_server, call, [?MODULE, _, _]}} ->
+ ErrMsg = "~p exited ~p while processing change from db ~p",
+ couch_log:error(ErrMsg, [?MODULE, Error, DbName]);
+ _Tag:Error ->
+ {RepProps} = get_json_value(doc, ChangeProps),
+ DocId = get_json_value(<<"_id">>, RepProps),
+ couch_replicator_docs:update_failed(DbName, DocId, Error)
end,
Server.
-
-spec get_worker_ref(db_doc_id()) -> reference() | nil.
get_worker_ref({DbName, DocId}) when is_binary(DbName), is_binary(DocId) ->
case ets:lookup(?MODULE, {DbName, DocId}) of
@@ -123,46 +118,43 @@ get_worker_ref({DbName, DocId}) when is_binary(DbName), is_binary(DocId) ->
nil
end.
-
% Cluster membership change notification callback
-spec notify_cluster_event(pid(), {cluster, any()}) -> ok.
notify_cluster_event(Server, {cluster, _} = Event) ->
gen_server:cast(Server, Event).
-
process_change(DbName, {Change}) ->
{RepProps} = JsonRepDoc = get_json_value(doc, Change),
DocId = get_json_value(<<"_id">>, RepProps),
Owner = couch_replicator_clustering:owner(DbName, DocId),
Id = {DbName, DocId},
case {Owner, get_json_value(deleted, Change, false)} of
- {_, true} ->
- ok = gen_server:call(?MODULE, {removed, Id}, infinity);
- {unstable, false} ->
- couch_log:notice("Not starting '~s' as cluster is unstable", [DocId]);
- {ThisNode, false} when ThisNode =:= node() ->
- case get_json_value(<<"_replication_state">>, RepProps) of
- undefined ->
- ok = process_updated(Id, JsonRepDoc);
- <<"triggered">> ->
- maybe_remove_state_fields(DbName, DocId),
- ok = process_updated(Id, JsonRepDoc);
- <<"completed">> ->
- ok = gen_server:call(?MODULE, {completed, Id}, infinity);
- <<"error">> ->
- % Handle replications started from older versions of replicator
- % which wrote transient errors to replication docs
- maybe_remove_state_fields(DbName, DocId),
- ok = process_updated(Id, JsonRepDoc);
- <<"failed">> ->
+ {_, true} ->
+ ok = gen_server:call(?MODULE, {removed, Id}, infinity);
+ {unstable, false} ->
+ couch_log:notice("Not starting '~s' as cluster is unstable", [DocId]);
+ {ThisNode, false} when ThisNode =:= node() ->
+ case get_json_value(<<"_replication_state">>, RepProps) of
+ undefined ->
+ ok = process_updated(Id, JsonRepDoc);
+ <<"triggered">> ->
+ maybe_remove_state_fields(DbName, DocId),
+ ok = process_updated(Id, JsonRepDoc);
+ <<"completed">> ->
+ ok = gen_server:call(?MODULE, {completed, Id}, infinity);
+ <<"error">> ->
+ % Handle replications started from older versions of replicator
+ % which wrote transient errors to replication docs
+ maybe_remove_state_fields(DbName, DocId),
+ ok = process_updated(Id, JsonRepDoc);
+ <<"failed">> ->
+ ok
+ end;
+ {Owner, false} ->
ok
- end;
- {Owner, false} ->
- ok
end,
ok.
-
maybe_remove_state_fields(DbName, DocId) ->
case update_docs() of
true ->
@@ -171,7 +163,6 @@ maybe_remove_state_fields(DbName, DocId) ->
couch_replicator_docs:remove_state_fields(DbName, DocId)
end.
-
process_updated({DbName, _DocId} = Id, JsonRepDoc) ->
% Parsing replication doc (but not calculating the id) could throw an
% exception which would indicate this document is malformed. This exception
@@ -180,53 +171,54 @@ process_updated({DbName, _DocId} = Id, JsonRepDoc) ->
% problem.
Rep0 = couch_replicator_docs:parse_rep_doc_without_id(JsonRepDoc),
Rep = Rep0#rep{db_name = DbName, start_time = os:timestamp()},
- Filter = case couch_replicator_filters:parse(Rep#rep.options) of
- {ok, nil} ->
- nil;
- {ok, {user, _FName, _QP}} ->
- user;
- {ok, {view, _FName, _QP}} ->
- view;
- {ok, {docids, _DocIds}} ->
- docids;
- {ok, {mango, _Selector}} ->
- mango;
- {error, FilterError} ->
- throw(FilterError)
- end,
+ Filter =
+ case couch_replicator_filters:parse(Rep#rep.options) of
+ {ok, nil} ->
+ nil;
+ {ok, {user, _FName, _QP}} ->
+ user;
+ {ok, {view, _FName, _QP}} ->
+ view;
+ {ok, {docids, _DocIds}} ->
+ docids;
+ {ok, {mango, _Selector}} ->
+ mango;
+ {error, FilterError} ->
+ throw(FilterError)
+ end,
gen_server:call(?MODULE, {updated, Id, Rep, Filter}, infinity).
-
% Doc processor gen_server API and callbacks
start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
+ gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
init([]) ->
- ?MODULE = ets:new(?MODULE, [named_table, {keypos, #rdoc.id},
- {read_concurrency, true}, {write_concurrency, true}]),
- couch_replicator_clustering:link_cluster_event_listener(?MODULE,
- notify_cluster_event, [self()]),
+ ?MODULE = ets:new(?MODULE, [
+ named_table,
+ {keypos, #rdoc.id},
+ {read_concurrency, true},
+ {write_concurrency, true}
+ ]),
+ couch_replicator_clustering:link_cluster_event_listener(
+ ?MODULE,
+ notify_cluster_event,
+ [self()]
+ ),
{ok, nil}.
-
terminate(_Reason, _State) ->
ok.
-
handle_call({updated, Id, Rep, Filter}, _From, State) ->
ok = updated_doc(Id, Rep, Filter),
{reply, ok, State};
-
handle_call({removed, Id}, _From, State) ->
ok = removed_doc(Id),
{reply, ok, State};
-
handle_call({completed, Id}, _From, State) ->
true = ets:delete(?MODULE, Id),
{reply, ok, State};
-
handle_call({clean_up_replications, DbName}, _From, State) ->
ok = removed_db(DbName),
{reply, ok, State}.
@@ -234,29 +226,29 @@ handle_call({clean_up_replications, DbName}, _From, State) ->
handle_cast({cluster, unstable}, State) ->
% Ignoring unstable state transition
{noreply, State};
-
handle_cast({cluster, stable}, State) ->
% Membership changed recheck all the replication document ownership
nil = ets:foldl(fun cluster_membership_foldl/2, nil, ?MODULE),
{noreply, State};
-
handle_cast(Msg, State) ->
{stop, {error, unexpected_message, Msg}, State}.
-
-handle_info({'DOWN', _, _, _, #doc_worker_result{id = Id, wref = Ref,
- result = Res}}, State) ->
+handle_info(
+ {'DOWN', _, _, _, #doc_worker_result{
+ id = Id,
+ wref = Ref,
+ result = Res
+ }},
+ State
+) ->
ok = worker_returned(Ref, Id, Res),
{noreply, State};
-
handle_info(_Msg, State) ->
{noreply, State}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
% Doc processor gen_server private helper functions
% Handle doc update -- add to ets, then start a worker to try to turn it into
@@ -289,7 +281,6 @@ updated_doc(Id, Rep, Filter) ->
ok
end.
-
% Return current #rep{} record if any. If replication hasn't been submitted
% to the scheduler yet, #rep{} record will be in the document processor's
% ETS table, otherwise query scheduler for the #rep{} record.
@@ -308,81 +299,81 @@ current_rep({DbName, DocId}) when is_binary(DbName), is_binary(DocId) ->
Rep
end.
-
-spec worker_returned(reference(), db_doc_id(), rep_start_result()) -> ok.
worker_returned(Ref, Id, {ok, RepId}) ->
case ets:lookup(?MODULE, Id) of
- [#rdoc{worker = Ref} = Row] ->
- Row0 = Row#rdoc{
- state = scheduled,
- errcnt = 0,
- worker = nil,
- last_updated = os:timestamp()
- },
- NewRow = case Row0 of
- #rdoc{rid = RepId, filter = user} ->
- % Filtered replication id didn't change.
- Row0;
- #rdoc{rid = nil, filter = user} ->
- % Calculated new replication id for a filtered replication. Make
- % sure to schedule another check as filter code could change.
- % Replication starts could have been failing, so also clear
- % error count.
- Row0#rdoc{rid = RepId};
- #rdoc{rid = OldRepId, filter = user} ->
- % Replication id of existing replication job with filter has
- % changed. Remove old replication job from scheduler and
- % schedule check to check for future changes.
- ok = couch_replicator_scheduler:remove_job(OldRepId),
- Msg = io_lib:format("Replication id changed: ~p -> ~p", [
- OldRepId, RepId]),
- Row0#rdoc{rid = RepId, info = couch_util:to_binary(Msg)};
- #rdoc{rid = nil} ->
- % Calculated new replication id for non-filtered replication.
- % Remove replication doc body, after this we won't need it
- % anymore.
- Row0#rdoc{rep=nil, rid=RepId, info=nil}
- end,
- true = ets:insert(?MODULE, NewRow),
- ok = maybe_update_doc_triggered(Row#rdoc.rep, RepId),
- ok = maybe_start_worker(Id);
- _ ->
- ok % doc could have been deleted, ignore
+ [#rdoc{worker = Ref} = Row] ->
+ Row0 = Row#rdoc{
+ state = scheduled,
+ errcnt = 0,
+ worker = nil,
+ last_updated = os:timestamp()
+ },
+ NewRow =
+ case Row0 of
+ #rdoc{rid = RepId, filter = user} ->
+ % Filtered replication id didn't change.
+ Row0;
+ #rdoc{rid = nil, filter = user} ->
+ % Calculated new replication id for a filtered replication. Make
+ % sure to schedule another check as filter code could change.
+ % Replication starts could have been failing, so also clear
+ % error count.
+ Row0#rdoc{rid = RepId};
+ #rdoc{rid = OldRepId, filter = user} ->
+ % Replication id of existing replication job with filter has
+ % changed. Remove old replication job from scheduler and
+ % schedule check to check for future changes.
+ ok = couch_replicator_scheduler:remove_job(OldRepId),
+ Msg = io_lib:format("Replication id changed: ~p -> ~p", [
+ OldRepId, RepId
+ ]),
+ Row0#rdoc{rid = RepId, info = couch_util:to_binary(Msg)};
+ #rdoc{rid = nil} ->
+ % Calculated new replication id for non-filtered replication.
+ % Remove replication doc body, after this we won't need it
+ % anymore.
+ Row0#rdoc{rep = nil, rid = RepId, info = nil}
+ end,
+ true = ets:insert(?MODULE, NewRow),
+ ok = maybe_update_doc_triggered(Row#rdoc.rep, RepId),
+ ok = maybe_start_worker(Id);
+ _ ->
+ % doc could have been deleted, ignore
+ ok
end,
ok;
-
worker_returned(_Ref, _Id, ignore) ->
ok;
-
worker_returned(Ref, Id, {temporary_error, Reason}) ->
case ets:lookup(?MODULE, Id) of
- [#rdoc{worker = Ref, errcnt = ErrCnt} = Row] ->
- NewRow = Row#rdoc{
- rid = nil,
- state = error,
- info = Reason,
- errcnt = ErrCnt + 1,
- worker = nil,
- last_updated = os:timestamp()
- },
- true = ets:insert(?MODULE, NewRow),
- ok = maybe_update_doc_error(NewRow#rdoc.rep, Reason),
- ok = maybe_start_worker(Id);
- _ ->
- ok % doc could have been deleted, ignore
+ [#rdoc{worker = Ref, errcnt = ErrCnt} = Row] ->
+ NewRow = Row#rdoc{
+ rid = nil,
+ state = error,
+ info = Reason,
+ errcnt = ErrCnt + 1,
+ worker = nil,
+ last_updated = os:timestamp()
+ },
+ true = ets:insert(?MODULE, NewRow),
+ ok = maybe_update_doc_error(NewRow#rdoc.rep, Reason),
+ ok = maybe_start_worker(Id);
+ _ ->
+ % doc could have been deleted, ignore
+ ok
end,
ok;
-
worker_returned(Ref, Id, {permanent_failure, _Reason}) ->
case ets:lookup(?MODULE, Id) of
- [#rdoc{worker = Ref}] ->
- true = ets:delete(?MODULE, Id);
- _ ->
- ok % doc could have been deleted, ignore
+ [#rdoc{worker = Ref}] ->
+ true = ets:delete(?MODULE, Id);
+ _ ->
+ % doc could have been deleted, ignore
+ ok
end,
ok.
-
-spec maybe_update_doc_error(#rep{}, any()) -> ok.
maybe_update_doc_error(Rep, Reason) ->
case update_docs() of
@@ -392,7 +383,6 @@ maybe_update_doc_error(Rep, Reason) ->
ok
end.
-
-spec maybe_update_doc_triggered(#rep{}, rep_id()) -> ok.
maybe_update_doc_triggered(Rep, RepId) ->
case update_docs() of
@@ -402,7 +392,6 @@ maybe_update_doc_triggered(Rep, RepId) ->
ok
end.
-
-spec error_backoff(non_neg_integer()) -> seconds().
error_backoff(ErrCnt) ->
Exp = min(ErrCnt, ?ERROR_MAX_BACKOFF_EXPONENT),
@@ -411,7 +400,6 @@ error_backoff(ErrCnt) ->
% on average. Then 1 minute and so on.
couch_rand:uniform(?INITIAL_BACKOFF_EXPONENT bsl Exp).
-
-spec filter_backoff() -> seconds().
filter_backoff() ->
Total = ets:info(?MODULE, size),
@@ -424,7 +412,6 @@ filter_backoff() ->
Range = 1 + min(2 * (Total / 10), ?TS_DAY_SEC),
?MIN_FILTER_DELAY_SEC + couch_rand:uniform(round(Range)).
-
% Document removed from db -- clear ets table and remove all scheduled jobs
-spec removed_doc(db_doc_id()) -> ok.
removed_doc({DbName, DocId} = Id) ->
@@ -432,7 +419,6 @@ removed_doc({DbName, DocId} = Id) ->
RepIds = couch_replicator_scheduler:find_jobs_by_doc(DbName, DocId),
lists:foreach(fun couch_replicator_scheduler:remove_job/1, RepIds).
-
% Whole db shard is gone -- remove all its ets rows and stop jobs
-spec removed_db(binary()) -> ok.
removed_db(DbName) ->
@@ -441,32 +427,30 @@ removed_db(DbName) ->
RepIds = couch_replicator_scheduler:find_jobs_by_dbname(DbName),
lists:foreach(fun couch_replicator_scheduler:remove_job/1, RepIds).
-
% Spawn a worker process which will attempt to calculate a replication id, then
% start a replication. Returns a process monitor reference. The worker is
% guaranteed to exit with rep_start_result() type only.
-spec maybe_start_worker(db_doc_id()) -> ok.
maybe_start_worker(Id) ->
case ets:lookup(?MODULE, Id) of
- [] ->
- ok;
- [#rdoc{state = scheduled, filter = Filter}] when Filter =/= user ->
- ok;
- [#rdoc{rep = Rep} = Doc] ->
- % For any replication with a user created filter function, periodically
- % (every `filter_backoff/0` seconds) to try to see if the user filter
- % has changed by using a worker to check for changes. When the worker
- % returns check if replication ID has changed. If it hasn't keep
- % checking (spawn another worker and so on). If it has stop the job
- % with the old ID and continue checking.
- Wait = get_worker_wait(Doc),
- Ref = make_ref(),
- true = ets:insert(?MODULE, Doc#rdoc{worker = Ref}),
- couch_replicator_doc_processor_worker:spawn_worker(Id, Rep, Wait, Ref),
- ok
+ [] ->
+ ok;
+ [#rdoc{state = scheduled, filter = Filter}] when Filter =/= user ->
+ ok;
+ [#rdoc{rep = Rep} = Doc] ->
+ % For any replication with a user created filter function, periodically
+ % (every `filter_backoff/0` seconds) to try to see if the user filter
+ % has changed by using a worker to check for changes. When the worker
+ % returns check if replication ID has changed. If it hasn't keep
+ % checking (spawn another worker and so on). If it has stop the job
+ % with the old ID and continue checking.
+ Wait = get_worker_wait(Doc),
+ Ref = make_ref(),
+ true = ets:insert(?MODULE, Doc#rdoc{worker = Ref}),
+ couch_replicator_doc_processor_worker:spawn_worker(Id, Rep, Wait, Ref),
+ ok
end.
-
-spec get_worker_wait(#rdoc{}) -> seconds().
get_worker_wait(#rdoc{state = scheduled, filter = user}) ->
filter_backoff();
@@ -475,45 +459,52 @@ get_worker_wait(#rdoc{state = error, errcnt = ErrCnt}) ->
get_worker_wait(#rdoc{state = initializing}) ->
0.
-
-spec update_docs() -> boolean().
update_docs() ->
config:get_boolean("replicator", "update_docs", ?DEFAULT_UPDATE_DOCS).
-
% _scheduler/docs HTTP endpoint helpers
-spec docs([atom()]) -> [{[_]}] | [].
docs(States) ->
HealthThreshold = couch_replicator_scheduler:health_threshold(),
- ets:foldl(fun(RDoc, Acc) ->
- case ejson_doc(RDoc, HealthThreshold) of
- nil ->
- Acc; % Could have been deleted if job just completed
- {Props} = EJson ->
- {state, DocState} = lists:keyfind(state, 1, Props),
- case ejson_doc_state_filter(DocState, States) of
- true ->
- [EJson | Acc];
- false ->
- Acc
- end
- end
- end, [], ?MODULE).
-
+ ets:foldl(
+ fun(RDoc, Acc) ->
+ case ejson_doc(RDoc, HealthThreshold) of
+ nil ->
+ % Could have been deleted if job just completed
+ Acc;
+ {Props} = EJson ->
+ {state, DocState} = lists:keyfind(state, 1, Props),
+ case ejson_doc_state_filter(DocState, States) of
+ true ->
+ [EJson | Acc];
+ false ->
+ Acc
+ end
+ end
+ end,
+ [],
+ ?MODULE
+ ).
-spec doc(binary(), binary()) -> {ok, {[_]}} | {error, not_found}.
doc(Db, DocId) ->
HealthThreshold = couch_replicator_scheduler:health_threshold(),
- Res = (catch ets:foldl(fun(RDoc, nil) ->
- {Shard, RDocId} = RDoc#rdoc.id,
- case {mem3:dbname(Shard), RDocId} of
- {Db, DocId} ->
- throw({found, ejson_doc(RDoc, HealthThreshold)});
- {_OtherDb, _OtherDocId} ->
- nil
- end
- end, nil, ?MODULE)),
+ Res =
+ (catch ets:foldl(
+ fun(RDoc, nil) ->
+ {Shard, RDocId} = RDoc#rdoc.id,
+ case {mem3:dbname(Shard), RDocId} of
+ {Db, DocId} ->
+ throw({found, ejson_doc(RDoc, HealthThreshold)});
+ {_OtherDb, _OtherDocId} ->
+ nil
+ end
+ end,
+ nil,
+ ?MODULE
+ )),
case Res of
{found, DocInfo} ->
{ok, DocInfo};
@@ -521,7 +512,6 @@ doc(Db, DocId) ->
{error, not_found}
end.
-
-spec doc_lookup(binary(), binary(), integer()) ->
{ok, {[_]}} | {error, not_found}.
doc_lookup(Db, DocId, HealthThreshold) ->
@@ -532,14 +522,12 @@ doc_lookup(Db, DocId, HealthThreshold) ->
{error, not_found}
end.
-
-spec ejson_rep_id(rep_id() | nil) -> binary() | null.
ejson_rep_id(nil) ->
null;
ejson_rep_id({BaseId, Ext}) ->
iolist_to_binary([BaseId, Ext]).
-
-spec ejson_doc(#rdoc{}, non_neg_integer()) -> {[_]} | nil.
ejson_doc(#rdoc{state = scheduled} = RDoc, HealthThreshold) ->
#rdoc{id = {DbName, DocId}, rid = RepId} = RDoc,
@@ -552,18 +540,18 @@ ejson_doc(#rdoc{state = scheduled} = RDoc, HealthThreshold) ->
{doc_id, DocId},
{database, DbName},
{id, ejson_rep_id(RepId)},
- {node, node()} | JobProps
+ {node, node()}
+ | JobProps
]}
end;
-
ejson_doc(#rdoc{state = RepState} = RDoc, _HealthThreshold) ->
#rdoc{
- id = {DbName, DocId},
- info = StateInfo,
- rid = RepId,
- errcnt = ErrorCount,
- last_updated = StateTime,
- rep = Rep
+ id = {DbName, DocId},
+ info = StateInfo,
+ rid = RepId,
+ errcnt = ErrorCount,
+ last_updated = StateTime,
+ rep = Rep
} = RDoc,
{[
{doc_id, DocId},
@@ -577,14 +565,12 @@ ejson_doc(#rdoc{state = RepState} = RDoc, _HealthThreshold) ->
{start_time, couch_replicator_utils:iso8601(Rep#rep.start_time)}
]}.
-
-spec ejson_doc_state_filter(atom(), [atom()]) -> boolean().
ejson_doc_state_filter(_DocState, []) ->
true;
ejson_doc_state_filter(State, States) when is_list(States), is_atom(State) ->
lists:member(State, States).
-
-spec cluster_membership_foldl(#rdoc{}, nil) -> nil.
cluster_membership_foldl(#rdoc{id = {DbName, DocId} = Id, rid = RepId}, nil) ->
case couch_replicator_clustering:owner(DbName, DocId) of
@@ -599,7 +585,6 @@ cluster_membership_foldl(#rdoc{id = {DbName, DocId} = Id, rid = RepId}, nil) ->
nil
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -611,7 +596,6 @@ cluster_membership_foldl(#rdoc{id = {DbName, DocId} = Id, rid = RepId}, nil) ->
-define(R1, {"1", ""}).
-define(R2, {"2", ""}).
-
doc_processor_test_() ->
{
setup,
@@ -640,7 +624,6 @@ doc_processor_test_() ->
}
}.
-
% Can't parse replication doc, so should write failure state to document.
t_bad_change() ->
?_test(begin
@@ -648,7 +631,6 @@ t_bad_change() ->
?assert(updated_doc_with_failed_state())
end).
-
% Regular change, parse to a #rep{} and then add job.
t_regular_change() ->
?_test(begin
@@ -658,15 +640,13 @@ t_regular_change() ->
?assert(started_worker({?DB, ?DOC1}))
end).
-
% Handle cases where doc processor exits or crashes while processing a change
t_change_with_doc_processor_crash() ->
?_test(begin
mock_existing_jobs_lookup([]),
?assertEqual(acc, db_change(?EXIT_DB, change(), acc)),
?assert(failed_state_not_updated())
- end).
-
+ end).
% Regular change, parse to a #rep{} and then add job but there is already
% a running job with same Id found.
@@ -678,7 +658,6 @@ t_change_with_existing_job() ->
?assert(started_worker({?DB, ?DOC1}))
end).
-
% Change is a deletion, and job is running, so remove job.
t_deleted_change() ->
?_test(begin
@@ -687,7 +666,6 @@ t_deleted_change() ->
?assert(removed_job(?R2))
end).
-
% Change is in `triggered` state. Remove legacy state and add job.
t_triggered_change() ->
?_test(begin
@@ -698,7 +676,6 @@ t_triggered_change() ->
?assert(started_worker({?DB, ?DOC1}))
end).
-
% Change is in `completed` state, so skip over it.
t_completed_change() ->
?_test(begin
@@ -708,7 +685,6 @@ t_completed_change() ->
?assert(did_not_spawn_worker())
end).
-
% Completed change comes for what used to be an active job. In this case
% remove entry from doc_processor's ets (because there is no linkage or
% callback mechanism for scheduler to tell doc_processsor a replication just
@@ -723,7 +699,6 @@ t_active_replication_completed() ->
?assertNot(ets:member(?MODULE, {?DB, ?DOC1}))
end).
-
% Change is in `error` state. Remove legacy state and retry
% running the job. This state was used for transient erorrs which are not
% written to the document anymore.
@@ -736,7 +711,6 @@ t_error_change() ->
?assert(started_worker({?DB, ?DOC1}))
end).
-
% Change is in `failed` state. This is a terminal state and it will not
% be tried again, so skip over it.
t_failed_change() ->
@@ -747,27 +721,24 @@ t_failed_change() ->
?assert(did_not_spawn_worker())
end).
-
% Normal change, but according to cluster ownership algorithm, replication
% belongs to a different node, so this node should skip it.
t_change_for_different_node() ->
- ?_test(begin
+ ?_test(begin
meck:expect(couch_replicator_clustering, owner, 2, different_node),
?assertEqual(ok, process_change(?DB, change())),
?assert(did_not_spawn_worker())
- end).
-
+ end).
% Change handled when cluster is unstable (nodes are added or removed), so
% job is not added. A rescan will be triggered soon and change will be
% evaluated again.
t_change_when_cluster_unstable() ->
- ?_test(begin
- meck:expect(couch_replicator_clustering, owner, 2, unstable),
- ?assertEqual(ok, process_change(?DB, change())),
- ?assert(did_not_spawn_worker())
- end).
-
+ ?_test(begin
+ meck:expect(couch_replicator_clustering, owner, 2, unstable),
+ ?assertEqual(ok, process_change(?DB, change())),
+ ?assert(did_not_spawn_worker())
+ end).
% Check if docs/0 function produces expected ejson after adding a job
t_ejson_docs() ->
@@ -776,12 +747,17 @@ t_ejson_docs() ->
?assertEqual(ok, process_change(?DB, change())),
?assert(ets:member(?MODULE, {?DB, ?DOC1})),
EJsonDocs = docs([]),
- ?assertMatch([{[_|_]}], EJsonDocs),
+ ?assertMatch([{[_ | _]}], EJsonDocs),
[{DocProps}] = EJsonDocs,
- {value, StateTime, DocProps1} = lists:keytake(last_updated, 1,
- DocProps),
- ?assertMatch({last_updated, BinVal1} when is_binary(BinVal1),
- StateTime),
+ {value, StateTime, DocProps1} = lists:keytake(
+ last_updated,
+ 1,
+ DocProps
+ ),
+ ?assertMatch(
+ {last_updated, BinVal1} when is_binary(BinVal1),
+ StateTime
+ ),
{value, StartTime, DocProps2} = lists:keytake(start_time, 1, DocProps1),
?assertMatch({start_time, BinVal2} when is_binary(BinVal2), StartTime),
ExpectedProps = [
@@ -796,11 +772,10 @@ t_ejson_docs() ->
?assertEqual(ExpectedProps, lists:usort(DocProps2))
end).
-
% Check that when cluster membership changes records from doc processor and job
% scheduler get removed
t_cluster_membership_foldl() ->
- ?_test(begin
+ ?_test(begin
mock_existing_jobs_lookup([test_rep(?R1)]),
?assertEqual(ok, process_change(?DB, change())),
meck:expect(couch_replicator_clustering, owner, 2, different_node),
@@ -809,8 +784,7 @@ t_cluster_membership_foldl() ->
meck:wait(2, couch_replicator_scheduler, find_jobs_by_doc, 2, 5000),
?assertNot(ets:member(?MODULE, {?DB, ?DOC1})),
?assert(removed_job(?R1))
- end).
-
+ end).
get_worker_ref_test_() ->
{
@@ -830,10 +804,8 @@ get_worker_ref_test_() ->
end)
}.
-
% Test helper functions
-
setup_all() ->
meck:expect(couch_log, info, 2, ok),
meck:expect(couch_log, notice, 2, ok),
@@ -842,8 +814,12 @@ setup_all() ->
meck:expect(config, get, fun(_, _, Default) -> Default end),
meck:expect(config, listen_for_changes, 2, ok),
meck:expect(couch_replicator_clustering, owner, 2, node()),
- meck:expect(couch_replicator_clustering, link_cluster_event_listener, 3,
- ok),
+ meck:expect(
+ couch_replicator_clustering,
+ link_cluster_event_listener,
+ 3,
+ ok
+ ),
meck:expect(couch_replicator_doc_processor_worker, spawn_worker, fun
({?EXIT_DB, _}, _, _, _) -> exit(kapow);
(_, _, _, _) -> pid
@@ -852,11 +828,9 @@ setup_all() ->
meck:expect(couch_replicator_docs, remove_state_fields, 2, ok),
meck:expect(couch_replicator_docs, update_failed, 3, ok).
-
teardown_all(_) ->
meck:unload().
-
setup() ->
meck:reset([
config,
@@ -873,30 +847,29 @@ setup() ->
unlink(Pid),
Pid.
-
teardown(Pid) ->
- test_util:stop_sync(Pid, kill, 1000). % 1s wait should suffice
-
+ % 1s wait should suffice
+ test_util:stop_sync(Pid, kill, 1000).
removed_state_fields() ->
meck:called(couch_replicator_docs, remove_state_fields, [?DB, ?DOC1]).
-
started_worker(_Id) ->
1 == meck:num_calls(couch_replicator_doc_processor_worker, spawn_worker, 4).
-
removed_job(Id) ->
meck:called(couch_replicator_scheduler, remove_job, [test_rep(Id)]).
-
did_not_remove_state_fields() ->
0 == meck:num_calls(couch_replicator_docs, remove_state_fields, '_').
-
did_not_spawn_worker() ->
- 0 == meck:num_calls(couch_replicator_doc_processor_worker, spawn_worker,
- '_').
+ 0 ==
+ meck:num_calls(
+ couch_replicator_doc_processor_worker,
+ spawn_worker,
+ '_'
+ ).
updated_doc_with_failed_state() ->
1 == meck:num_calls(couch_replicator_docs, update_failed, '_').
@@ -910,53 +883,52 @@ mock_existing_jobs_lookup(ExistingJobs) ->
(?DB, ?DOC1) -> ExistingJobs
end).
-
test_rep(Id) ->
- #rep{id = Id, start_time = {0, 0, 0}}.
-
+ #rep{id = Id, start_time = {0, 0, 0}}.
change() ->
{[
{<<"id">>, ?DOC1},
- {doc, {[
- {<<"_id">>, ?DOC1},
- {<<"source">>, <<"http://srchost.local/src">>},
- {<<"target">>, <<"http://tgthost.local/tgt">>}
- ]}}
+ {doc,
+ {[
+ {<<"_id">>, ?DOC1},
+ {<<"source">>, <<"http://srchost.local/src">>},
+ {<<"target">>, <<"http://tgthost.local/tgt">>}
+ ]}}
]}.
-
change(State) ->
{[
{<<"id">>, ?DOC1},
- {doc, {[
- {<<"_id">>, ?DOC1},
- {<<"source">>, <<"http://srchost.local/src">>},
- {<<"target">>, <<"http://tgthost.local/tgt">>},
- {<<"_replication_state">>, State}
- ]}}
+ {doc,
+ {[
+ {<<"_id">>, ?DOC1},
+ {<<"source">>, <<"http://srchost.local/src">>},
+ {<<"target">>, <<"http://tgthost.local/tgt">>},
+ {<<"_replication_state">>, State}
+ ]}}
]}.
-
deleted_change() ->
{[
{<<"id">>, ?DOC1},
{<<"deleted">>, true},
- {doc, {[
- {<<"_id">>, ?DOC1},
- {<<"source">>, <<"http://srchost.local/src">>},
- {<<"target">>, <<"http://tgthost.local/tgt">>}
- ]}}
+ {doc,
+ {[
+ {<<"_id">>, ?DOC1},
+ {<<"source">>, <<"http://srchost.local/src">>},
+ {<<"target">>, <<"http://tgthost.local/tgt">>}
+ ]}}
]}.
-
bad_change() ->
{[
{<<"id">>, ?DOC2},
- {doc, {[
- {<<"_id">>, ?DOC2},
- {<<"source">>, <<"src">>}
- ]}}
+ {doc,
+ {[
+ {<<"_id">>, ?DOC2},
+ {<<"source">>, <<"src">>}
+ ]}}
]}.
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl b/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl
index a4c829323..5d971151b 100644
--- a/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl
+++ b/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl
@@ -32,7 +32,6 @@
% hung forever.
-define(WORKER_TIMEOUT_MSEC, 61000).
-
% Spawn a worker which attempts to calculate replication id then add a
% replication job to scheduler. This function create a monitor to the worker
% a worker will then exit with the #doc_worker_result{} record within
@@ -46,7 +45,6 @@ spawn_worker(Id, Rep, WaitSec, WRef) ->
end),
Pid.
-
% Private functions
-spec worker_fun(db_doc_id(), #rep{}, seconds(), reference()) -> no_return().
@@ -72,13 +70,15 @@ worker_fun(Id, Rep, WaitSec, WRef) ->
exit(Pid, kill),
{DbName, DocId} = Id,
TimeoutSec = round(?WORKER_TIMEOUT_MSEC / 1000),
- Msg = io_lib:format("Replication for db ~p doc ~p failed to start due "
- "to timeout after ~B seconds", [DbName, DocId, TimeoutSec]),
+ Msg = io_lib:format(
+ "Replication for db ~p doc ~p failed to start due "
+ "to timeout after ~B seconds",
+ [DbName, DocId, TimeoutSec]
+ ),
Result = {temporary_error, couch_util:to_binary(Msg)},
exit(#doc_worker_result{id = Id, wref = WRef, result = Result})
end.
-
% Try to start a replication. Used by a worker. This function should return
% rep_start_result(), also throws {filter_fetch_error, Reason} if cannot fetch
% filter.It can also block for an indeterminate amount of time while fetching
@@ -86,51 +86,54 @@ worker_fun(Id, Rep, WaitSec, WRef) ->
maybe_start_replication(Id, RepWithoutId, WRef) ->
Rep = couch_replicator_docs:update_rep_id(RepWithoutId),
case maybe_add_job_to_scheduler(Id, Rep, WRef) of
- ignore ->
- ignore;
- {ok, RepId} ->
- {ok, RepId};
- {temporary_error, Reason} ->
- {temporary_error, Reason};
- {permanent_failure, Reason} ->
- {DbName, DocId} = Id,
- couch_replicator_docs:update_failed(DbName, DocId, Reason),
- {permanent_failure, Reason}
+ ignore ->
+ ignore;
+ {ok, RepId} ->
+ {ok, RepId};
+ {temporary_error, Reason} ->
+ {temporary_error, Reason};
+ {permanent_failure, Reason} ->
+ {DbName, DocId} = Id,
+ couch_replicator_docs:update_failed(DbName, DocId, Reason),
+ {permanent_failure, Reason}
end.
-
-spec maybe_add_job_to_scheduler(db_doc_id(), #rep{}, reference()) ->
- rep_start_result().
+ rep_start_result().
maybe_add_job_to_scheduler({DbName, DocId}, Rep, WRef) ->
RepId = Rep#rep.id,
case couch_replicator_scheduler:rep_state(RepId) of
- nil ->
- % Before adding a job check that this worker is still the current
- % worker. This is to handle a race condition where a worker which was
- % sleeping and then checking a replication filter may inadvertently
- % re-add a replication which was already deleted.
- case couch_replicator_doc_processor:get_worker_ref({DbName, DocId}) of
- WRef ->
- ok = couch_replicator_scheduler:add_job(Rep),
+ nil ->
+ % Before adding a job check that this worker is still the current
+ % worker. This is to handle a race condition where a worker which was
+ % sleeping and then checking a replication filter may inadvertently
+ % re-add a replication which was already deleted.
+ case couch_replicator_doc_processor:get_worker_ref({DbName, DocId}) of
+ WRef ->
+ ok = couch_replicator_scheduler:add_job(Rep),
+ {ok, RepId};
+ _NilOrOtherWRef ->
+ ignore
+ end;
+ #rep{doc_id = DocId} ->
{ok, RepId};
- _NilOrOtherWRef ->
- ignore
- end;
- #rep{doc_id = DocId} ->
- {ok, RepId};
- #rep{doc_id = null} ->
- Msg = io_lib:format("Replication `~s` specified by document `~s`"
- " already running as a transient replication, started via"
- " `_replicate` API endpoint", [pp_rep_id(RepId), DocId]),
- {temporary_error, couch_util:to_binary(Msg)};
- #rep{db_name = OtherDb, doc_id = OtherDocId} ->
- Msg = io_lib:format("Replication `~s` specified by document `~s`"
- " already started, triggered by document `~s` from db `~s`",
- [pp_rep_id(RepId), DocId, OtherDocId, mem3:dbname(OtherDb)]),
- {permanent_failure, couch_util:to_binary(Msg)}
+ #rep{doc_id = null} ->
+ Msg = io_lib:format(
+ "Replication `~s` specified by document `~s`"
+ " already running as a transient replication, started via"
+ " `_replicate` API endpoint",
+ [pp_rep_id(RepId), DocId]
+ ),
+ {temporary_error, couch_util:to_binary(Msg)};
+ #rep{db_name = OtherDb, doc_id = OtherDocId} ->
+ Msg = io_lib:format(
+ "Replication `~s` specified by document `~s`"
+ " already started, triggered by document `~s` from db `~s`",
+ [pp_rep_id(RepId), DocId, OtherDocId, mem3:dbname(OtherDb)]
+ ),
+ {permanent_failure, couch_util:to_binary(Msg)}
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -139,7 +142,6 @@ maybe_add_job_to_scheduler({DbName, DocId}, Rep, WRef) ->
-define(DOC1, <<"doc1">>).
-define(R1, {"ad08e05057046eabe898a2572bbfb573", ""}).
-
doc_processor_worker_test_() ->
{
foreach,
@@ -156,94 +158,105 @@ doc_processor_worker_test_() ->
]
}.
-
% Replication is already running, with same doc id. Ignore change.
t_should_add_job() ->
- ?_test(begin
- Id = {?DB, ?DOC1},
- Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
- ?assertEqual({ok, ?R1}, maybe_start_replication(Id, Rep, nil)),
- ?assert(added_job())
- end).
-
+ ?_test(begin
+ Id = {?DB, ?DOC1},
+ Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
+ ?assertEqual({ok, ?R1}, maybe_start_replication(Id, Rep, nil)),
+ ?assert(added_job())
+ end).
% Replication is already running, with same doc id. Ignore change.
t_already_running_same_docid() ->
- ?_test(begin
- Id = {?DB, ?DOC1},
- mock_already_running(?DB, ?DOC1),
- Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
- ?assertEqual({ok, ?R1}, maybe_start_replication(Id, Rep, nil)),
- ?assert(did_not_add_job())
- end).
-
+ ?_test(begin
+ Id = {?DB, ?DOC1},
+ mock_already_running(?DB, ?DOC1),
+ Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
+ ?assertEqual({ok, ?R1}, maybe_start_replication(Id, Rep, nil)),
+ ?assert(did_not_add_job())
+ end).
% There is a transient replication with same replication id running. Ignore.
t_already_running_transient() ->
- ?_test(begin
- Id = {?DB, ?DOC1},
- mock_already_running(null, null),
- Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
- ?assertMatch({temporary_error, _}, maybe_start_replication(Id, Rep,
- nil)),
- ?assert(did_not_add_job())
- end).
-
+ ?_test(begin
+ Id = {?DB, ?DOC1},
+ mock_already_running(null, null),
+ Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
+ ?assertMatch(
+ {temporary_error, _},
+ maybe_start_replication(
+ Id,
+ Rep,
+ nil
+ )
+ ),
+ ?assert(did_not_add_job())
+ end).
% There is a duplicate replication potentially from a different db and doc.
% Write permanent failure to doc.
t_already_running_other_db_other_doc() ->
- ?_test(begin
- Id = {?DB, ?DOC1},
- mock_already_running(<<"otherdb">>, <<"otherdoc">>),
- Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
- ?assertMatch({permanent_failure, _}, maybe_start_replication(Id, Rep,
- nil)),
- ?assert(did_not_add_job()),
- 1 == meck:num_calls(couch_replicator_docs, update_failed, '_')
- end).
-
+ ?_test(begin
+ Id = {?DB, ?DOC1},
+ mock_already_running(<<"otherdb">>, <<"otherdoc">>),
+ Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
+ ?assertMatch(
+ {permanent_failure, _},
+ maybe_start_replication(
+ Id,
+ Rep,
+ nil
+ )
+ ),
+ ?assert(did_not_add_job()),
+ 1 == meck:num_calls(couch_replicator_docs, update_failed, '_')
+ end).
% Should spawn worker
t_spawn_worker() ->
- ?_test(begin
- Id = {?DB, ?DOC1},
- Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
- WRef = make_ref(),
- meck:expect(couch_replicator_doc_processor, get_worker_ref, 1, WRef),
- Pid = spawn_worker(Id, Rep, 0, WRef),
- Res = receive {'DOWN', _Ref, process, Pid, Reason} -> Reason
- after 1000 -> timeout end,
- Expect = #doc_worker_result{id = Id, wref = WRef, result = {ok, ?R1}},
- ?assertEqual(Expect, Res),
- ?assert(added_job())
- end).
-
+ ?_test(begin
+ Id = {?DB, ?DOC1},
+ Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
+ WRef = make_ref(),
+ meck:expect(couch_replicator_doc_processor, get_worker_ref, 1, WRef),
+ Pid = spawn_worker(Id, Rep, 0, WRef),
+ Res =
+ receive
+ {'DOWN', _Ref, process, Pid, Reason} -> Reason
+ after 1000 -> timeout
+ end,
+ Expect = #doc_worker_result{id = Id, wref = WRef, result = {ok, ?R1}},
+ ?assertEqual(Expect, Res),
+ ?assert(added_job())
+ end).
% Should not add job if by the time worker got to fetching the filter
% and getting a replication id, replication doc was deleted
t_ignore_if_doc_deleted() ->
- ?_test(begin
- Id = {?DB, ?DOC1},
- Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
- meck:expect(couch_replicator_doc_processor, get_worker_ref, 1, nil),
- ?assertEqual(ignore, maybe_start_replication(Id, Rep, make_ref())),
- ?assertNot(added_job())
- end).
-
+ ?_test(begin
+ Id = {?DB, ?DOC1},
+ Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
+ meck:expect(couch_replicator_doc_processor, get_worker_ref, 1, nil),
+ ?assertEqual(ignore, maybe_start_replication(Id, Rep, make_ref())),
+ ?assertNot(added_job())
+ end).
% Should not add job if by the time worker got to fetchign the filter
% and building a replication id, another worker was spawned.
t_ignore_if_worker_ref_does_not_match() ->
?_test(begin
- Id = {?DB, ?DOC1},
- Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
- meck:expect(couch_replicator_doc_processor, get_worker_ref, 1,
- make_ref()),
- ?assertEqual(ignore, maybe_start_replication(Id, Rep, make_ref())),
- ?assertNot(added_job())
- end).
-
+ Id = {?DB, ?DOC1},
+ Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
+ meck:expect(
+ couch_replicator_doc_processor,
+ get_worker_ref,
+ 1,
+ make_ref()
+ ),
+ ?assertEqual(ignore, maybe_start_replication(Id, Rep, make_ref())),
+ ?assertNot(added_job())
+ end).
% Test helper functions
@@ -256,29 +269,27 @@ setup() ->
meck:expect(couch_replicator_doc_processor, get_worker_ref, 1, nil),
ok.
-
teardown(_) ->
meck:unload().
-
mock_already_running(DbName, DocId) ->
- meck:expect(couch_replicator_scheduler, rep_state,
- fun(RepId) -> #rep{id = RepId, doc_id = DocId, db_name = DbName} end).
-
+ meck:expect(
+ couch_replicator_scheduler,
+ rep_state,
+ fun(RepId) -> #rep{id = RepId, doc_id = DocId, db_name = DbName} end
+ ).
added_job() ->
1 == meck:num_calls(couch_replicator_scheduler, add_job, '_').
-
did_not_add_job() ->
0 == meck:num_calls(couch_replicator_scheduler, add_job, '_').
-
change() ->
{[
- {<<"_id">>, ?DOC1},
- {<<"source">>, <<"http://srchost.local/src">>},
- {<<"target">>, <<"http://tgthost.local/tgt">>}
- ]}.
+ {<<"_id">>, ?DOC1},
+ {<<"source">>, <<"http://srchost.local/src">>},
+ {<<"target">>, <<"http://tgthost.local/tgt">>}
+ ]}.
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl
index 7c60e8a43..bcab46747 100644
--- a/src/couch_replicator/src/couch_replicator_docs.erl
+++ b/src/couch_replicator/src/couch_replicator_docs.erl
@@ -30,7 +30,6 @@
update_error/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("ibrowse/include/ibrowse.hrl").
-include_lib("mem3/include/mem3.hrl").
@@ -49,45 +48,51 @@
get_json_value/3
]).
-
-define(REP_DB_NAME, <<"_replicator">>).
-define(REP_DESIGN_DOC, <<"_design/_replicator">>).
-define(OWNER, <<"owner">>).
--define(CTX, {user_ctx, #user_ctx{roles=[<<"_admin">>, <<"_replicator">>]}}).
+-define(CTX, {user_ctx, #user_ctx{roles = [<<"_admin">>, <<"_replicator">>]}}).
-define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
-
remove_state_fields(DbName, DocId) ->
update_rep_doc(DbName, DocId, [
{<<"_replication_state">>, undefined},
{<<"_replication_state_time">>, undefined},
{<<"_replication_state_reason">>, undefined},
{<<"_replication_id">>, undefined},
- {<<"_replication_stats">>, undefined}]).
-
+ {<<"_replication_stats">>, undefined}
+ ]).
-spec update_doc_completed(binary(), binary(), [_]) -> any().
update_doc_completed(DbName, DocId, Stats) ->
update_rep_doc(DbName, DocId, [
{<<"_replication_state">>, <<"completed">>},
{<<"_replication_state_reason">>, undefined},
- {<<"_replication_stats">>, {Stats}}]),
- couch_stats:increment_counter([couch_replicator, docs,
- completed_state_updates]).
-
+ {<<"_replication_stats">>, {Stats}}
+ ]),
+ couch_stats:increment_counter([
+ couch_replicator,
+ docs,
+ completed_state_updates
+ ]).
-spec update_failed(binary(), binary(), any()) -> any().
update_failed(DbName, DocId, Error) ->
Reason = error_reason(Error),
- couch_log:error("Error processing replication doc `~s` from `~s`: ~s",
- [DocId, DbName, Reason]),
+ couch_log:error(
+ "Error processing replication doc `~s` from `~s`: ~s",
+ [DocId, DbName, Reason]
+ ),
update_rep_doc(DbName, DocId, [
{<<"_replication_state">>, <<"failed">>},
{<<"_replication_stats">>, undefined},
- {<<"_replication_state_reason">>, Reason}]),
- couch_stats:increment_counter([couch_replicator, docs,
- failed_state_updates]).
-
+ {<<"_replication_state_reason">>, Reason}
+ ]),
+ couch_stats:increment_counter([
+ couch_replicator,
+ docs,
+ failed_state_updates
+ ]).
-spec update_triggered(#rep{}, rep_id()) -> ok.
update_triggered(Rep, {Base, Ext}) ->
@@ -99,27 +104,28 @@ update_triggered(Rep, {Base, Ext}) ->
{<<"_replication_state">>, <<"triggered">>},
{<<"_replication_state_reason">>, undefined},
{<<"_replication_id">>, iolist_to_binary([Base, Ext])},
- {<<"_replication_stats">>, undefined}]),
+ {<<"_replication_stats">>, undefined}
+ ]),
ok.
-
-spec update_error(#rep{}, any()) -> ok.
update_error(#rep{db_name = DbName, doc_id = DocId, id = RepId}, Error) ->
Reason = error_reason(Error),
- BinRepId = case RepId of
- {Base, Ext} ->
- iolist_to_binary([Base, Ext]);
- _Other ->
- null
- end,
+ BinRepId =
+ case RepId of
+ {Base, Ext} ->
+ iolist_to_binary([Base, Ext]);
+ _Other ->
+ null
+ end,
update_rep_doc(DbName, DocId, [
{<<"_replication_state">>, <<"error">>},
{<<"_replication_state_reason">>, Reason},
{<<"_replication_stats">>, undefined},
- {<<"_replication_id">>, BinRepId}]),
+ {<<"_replication_id">>, BinRepId}
+ ]),
ok.
-
-spec ensure_rep_ddoc_exists(binary()) -> ok.
ensure_rep_ddoc_exists(RepDb) ->
case mem3:belongs(RepDb, ?REP_DESIGN_DOC) of
@@ -129,7 +135,6 @@ ensure_rep_ddoc_exists(RepDb) ->
ok
end.
-
-spec ensure_rep_ddoc_exists(binary(), binary()) -> ok.
ensure_rep_ddoc_exists(RepDb, DDocId) ->
case open_rep_doc(RepDb, DDocId) of
@@ -163,21 +168,18 @@ ensure_rep_ddoc_exists(RepDb, DDocId) ->
end,
ok.
-
-spec ensure_cluster_rep_ddoc_exists(binary()) -> ok.
ensure_cluster_rep_ddoc_exists(RepDb) ->
DDocId = ?REP_DESIGN_DOC,
[#shard{name = DbShard} | _] = mem3:shards(RepDb, DDocId),
ensure_rep_ddoc_exists(DbShard, DDocId).
-
-spec compare_ejson({[_]}, {[_]}) -> boolean().
compare_ejson(EJson1, EJson2) ->
EjsonSorted1 = couch_replicator_filters:ejsort(EJson1),
EjsonSorted2 = couch_replicator_filters:ejsort(EJson2),
EjsonSorted1 == EjsonSorted2.
-
-spec replication_design_doc_props(binary()) -> [_].
replication_design_doc_props(DDocId) ->
[
@@ -186,7 +188,6 @@ replication_design_doc_props(DDocId) ->
{<<"validate_doc_update">>, ?REP_DB_DOC_VALIDATE_FUN}
].
-
% Note: parse_rep_doc can handle filtered replications. During parsing of the
% replication doc it will make possibly remote http requests to the source
% database. If failure or parsing of filter docs fails, parse_doc throws a
@@ -195,32 +196,32 @@ replication_design_doc_props(DDocId) ->
% on netowrk availability of the source db and other factors.
-spec parse_rep_doc({[_]}) -> #rep{}.
parse_rep_doc(RepDoc) ->
- {ok, Rep} = try
- parse_rep_doc(RepDoc, rep_user_ctx(RepDoc))
- catch
- throw:{error, Reason} ->
- throw({bad_rep_doc, Reason});
- throw:{filter_fetch_error, Reason} ->
- throw({filter_fetch_error, Reason});
- Tag:Err ->
- throw({bad_rep_doc, to_binary({Tag, Err})})
- end,
+ {ok, Rep} =
+ try
+ parse_rep_doc(RepDoc, rep_user_ctx(RepDoc))
+ catch
+ throw:{error, Reason} ->
+ throw({bad_rep_doc, Reason});
+ throw:{filter_fetch_error, Reason} ->
+ throw({filter_fetch_error, Reason});
+ Tag:Err ->
+ throw({bad_rep_doc, to_binary({Tag, Err})})
+ end,
Rep.
-
-spec parse_rep_doc_without_id({[_]}) -> #rep{}.
parse_rep_doc_without_id(RepDoc) ->
- {ok, Rep} = try
- parse_rep_doc_without_id(RepDoc, rep_user_ctx(RepDoc))
- catch
- throw:{error, Reason} ->
- throw({bad_rep_doc, Reason});
- Tag:Err ->
- throw({bad_rep_doc, to_binary({Tag, Err})})
- end,
+ {ok, Rep} =
+ try
+ parse_rep_doc_without_id(RepDoc, rep_user_ctx(RepDoc))
+ catch
+ throw:{error, Reason} ->
+ throw({bad_rep_doc, Reason});
+ Tag:Err ->
+ throw({bad_rep_doc, to_binary({Tag, Err})})
+ end,
Rep.
-
-spec parse_rep_doc({[_]}, #user_ctx{}) -> {ok, #rep{}}.
parse_rep_doc(Doc, UserCtx) ->
{ok, Rep} = parse_rep_doc_without_id(Doc, UserCtx),
@@ -238,44 +239,45 @@ parse_rep_doc(Doc, UserCtx) ->
{ok, update_rep_id(Rep)}
end.
-
-spec parse_rep_doc_without_id({[_]}, #user_ctx{}) -> {ok, #rep{}}.
parse_rep_doc_without_id({Props}, UserCtx) ->
{SrcProxy, TgtProxy} = parse_proxy_settings(Props),
Opts = make_options(Props),
- case get_value(cancel, Opts, false) andalso
- (get_value(id, Opts, nil) =/= nil) of
- true ->
- {ok, #rep{options = Opts, user_ctx = UserCtx}};
- false ->
- Source = parse_rep_db(get_value(<<"source">>, Props), SrcProxy, Opts),
- Target = parse_rep_db(get_value(<<"target">>, Props), TgtProxy, Opts),
- {Type, View} = case couch_replicator_filters:view_type(Props, Opts) of
- {error, Error} ->
- throw({bad_request, Error});
- Result ->
- Result
- end,
- Rep = #rep{
- source = Source,
- target = Target,
- options = Opts,
- user_ctx = UserCtx,
- type = Type,
- view = View,
- doc_id = get_value(<<"_id">>, Props, null)
- },
- % Check if can parse filter code, if not throw exception
- case couch_replicator_filters:parse(Opts) of
- {error, FilterError} ->
- throw({error, FilterError});
- {ok, _Filter} ->
- ok
- end,
- {ok, Rep}
+ case
+ get_value(cancel, Opts, false) andalso
+ (get_value(id, Opts, nil) =/= nil)
+ of
+ true ->
+ {ok, #rep{options = Opts, user_ctx = UserCtx}};
+ false ->
+ Source = parse_rep_db(get_value(<<"source">>, Props), SrcProxy, Opts),
+ Target = parse_rep_db(get_value(<<"target">>, Props), TgtProxy, Opts),
+ {Type, View} =
+ case couch_replicator_filters:view_type(Props, Opts) of
+ {error, Error} ->
+ throw({bad_request, Error});
+ Result ->
+ Result
+ end,
+ Rep = #rep{
+ source = Source,
+ target = Target,
+ options = Opts,
+ user_ctx = UserCtx,
+ type = Type,
+ view = View,
+ doc_id = get_value(<<"_id">>, Props, null)
+ },
+ % Check if can parse filter code, if not throw exception
+ case couch_replicator_filters:parse(Opts) of
+ {error, FilterError} ->
+ throw({error, FilterError});
+ {ok, _Filter} ->
+ ok
+ end,
+ {ok, Rep}
end.
-
parse_proxy_settings(Props) when is_list(Props) ->
Proxy = get_value(<<"proxy">>, Props, <<>>),
SrcProxy = get_value(<<"source_proxy">>, Props, <<>>),
@@ -285,7 +287,7 @@ parse_proxy_settings(Props) when is_list(Props) ->
true when SrcProxy =/= <<>> ->
Error = "`proxy` is mutually exclusive with `source_proxy`",
throw({bad_request, Error});
- true when TgtProxy =/= <<>> ->
+ true when TgtProxy =/= <<>> ->
Error = "`proxy` is mutually exclusive with `target_proxy`",
throw({bad_request, Error});
true ->
@@ -294,7 +296,6 @@ parse_proxy_settings(Props) when is_list(Props) ->
{SrcProxy, TgtProxy}
end.
-
% Update a #rep{} record with a replication_id. Calculating the id might involve
% fetching a filter from the source db, and so it could fail intermetently.
% In case of a failure to fetch the filter this function will throw a
@@ -303,11 +304,9 @@ update_rep_id(Rep) ->
RepId = couch_replicator_ids:replication_id(Rep),
Rep#rep{id = RepId}.
-
update_rep_doc(RepDbName, RepDocId, KVs) ->
update_rep_doc(RepDbName, RepDocId, KVs, 1).
-
update_rep_doc(RepDbName, RepDocId, KVs, Wait) when is_binary(RepDocId) ->
try
case open_rep_doc(RepDbName, RepDocId) of
@@ -323,36 +322,40 @@ update_rep_doc(RepDbName, RepDocId, KVs, Wait) when is_binary(RepDocId) ->
ok = timer:sleep(couch_rand:uniform(erlang:min(128, Wait)) * 100),
update_rep_doc(RepDbName, RepDocId, KVs, Wait * 2)
end;
-
update_rep_doc(RepDbName, #doc{body = {RepDocBody}} = RepDoc, KVs, _Try) ->
NewRepDocBody = lists:foldl(
- fun({K, undefined}, Body) ->
+ fun
+ ({K, undefined}, Body) ->
lists:keydelete(K, 1, Body);
- ({<<"_replication_state">> = K, State} = KV, Body) ->
+ ({<<"_replication_state">> = K, State} = KV, Body) ->
case get_json_value(K, Body) of
- State ->
- Body;
- _ ->
- Body1 = lists:keystore(K, 1, Body, KV),
- Timestamp = couch_replicator_utils:iso8601(os:timestamp()),
- lists:keystore(
- <<"_replication_state_time">>, 1, Body1,
- {<<"_replication_state_time">>, Timestamp})
+ State ->
+ Body;
+ _ ->
+ Body1 = lists:keystore(K, 1, Body, KV),
+ Timestamp = couch_replicator_utils:iso8601(os:timestamp()),
+ lists:keystore(
+ <<"_replication_state_time">>,
+ 1,
+ Body1,
+ {<<"_replication_state_time">>, Timestamp}
+ )
end;
({K, _V} = KV, Body) ->
lists:keystore(K, 1, Body, KV)
end,
- RepDocBody, KVs),
+ RepDocBody,
+ KVs
+ ),
case NewRepDocBody of
- RepDocBody ->
- ok;
- _ ->
- % Might not succeed - when the replication doc is deleted right
- % before this update (not an error, ignore).
- save_rep_doc(RepDbName, RepDoc#doc{body = {NewRepDocBody}})
+ RepDocBody ->
+ ok;
+ _ ->
+ % Might not succeed - when the replication doc is deleted right
+ % before this update (not an error, ignore).
+ save_rep_doc(RepDbName, RepDoc#doc{body = {NewRepDocBody}})
end.
-
open_rep_doc(DbName, DocId) ->
case couch_db:open_int(DbName, [?CTX, sys_db]) of
{ok, Db} ->
@@ -365,7 +368,6 @@ open_rep_doc(DbName, DocId) ->
Else
end.
-
save_rep_doc(DbName, Doc) ->
{ok, Db} = couch_db:open_int(DbName, [?CTX, sys_db]),
try
@@ -382,27 +384,26 @@ save_rep_doc(DbName, Doc) ->
couch_db:close(Db)
end.
-
-spec rep_user_ctx({[_]}) -> #user_ctx{}.
rep_user_ctx({RepDoc}) ->
case get_json_value(<<"user_ctx">>, RepDoc) of
- undefined ->
- #user_ctx{};
- {UserCtx} ->
- #user_ctx{
- name = get_json_value(<<"name">>, UserCtx, null),
- roles = get_json_value(<<"roles">>, UserCtx, [])
- }
+ undefined ->
+ #user_ctx{};
+ {UserCtx} ->
+ #user_ctx{
+ name = get_json_value(<<"name">>, UserCtx, null),
+ roles = get_json_value(<<"roles">>, UserCtx, [])
+ }
end.
-
-spec parse_rep_db({[_]} | binary(), binary(), [_]) -> #httpd{} | binary().
parse_rep_db({Props}, Proxy, Options) ->
ProxyParams = parse_proxy_params(Proxy),
- ProxyURL = case ProxyParams of
- [] -> undefined;
- _ -> binary_to_list(Proxy)
- end,
+ ProxyURL =
+ case ProxyParams of
+ [] -> undefined;
+ _ -> binary_to_list(Proxy)
+ end,
Url = maybe_add_trailing_slash(get_value(<<"url">>, Props)),
{AuthProps} = get_value(<<"auth">>, Props, {[]}),
{BinHeaders} = get_value(<<"headers">>, Props, {[]}),
@@ -412,36 +413,36 @@ parse_rep_db({Props}, Proxy, Options) ->
url = Url,
auth_props = AuthProps,
headers = lists:ukeymerge(1, Headers, DefaultHeaders),
- ibrowse_options = lists:keysort(1,
- [{socket_options, get_value(socket_options, Options)} |
- ProxyParams ++ ssl_params(Url)]),
+ ibrowse_options = lists:keysort(
+ 1,
+ [
+ {socket_options, get_value(socket_options, Options)}
+ | ProxyParams ++ ssl_params(Url)
+ ]
+ ),
timeout = get_value(connection_timeout, Options),
http_connections = get_value(http_connections, Options),
retries = get_value(retries, Options),
proxy_url = ProxyURL
},
couch_replicator_utils:normalize_basic_auth(HttpDb);
-
parse_rep_db(<<"http://", _/binary>> = Url, Proxy, Options) ->
parse_rep_db({[{<<"url">>, Url}]}, Proxy, Options);
-
parse_rep_db(<<"https://", _/binary>> = Url, Proxy, Options) ->
parse_rep_db({[{<<"url">>, Url}]}, Proxy, Options);
-
parse_rep_db(<<_/binary>>, _Proxy, _Options) ->
throw({error, local_endpoints_not_supported});
-
parse_rep_db(undefined, _Proxy, _Options) ->
throw({error, <<"Missing replicator database">>}).
-
-spec maybe_add_trailing_slash(binary() | list()) -> list().
maybe_add_trailing_slash(Url) when is_binary(Url) ->
maybe_add_trailing_slash(?b2l(Url));
maybe_add_trailing_slash(Url) ->
case lists:member($?, Url) of
true ->
- Url; % skip if there are query params
+ % skip if there are query params
+ Url;
false ->
case lists:last(Url) of
$/ ->
@@ -451,7 +452,6 @@ maybe_add_trailing_slash(Url) ->
end
end.
-
-spec make_options([_]) -> [_].
make_options(Props) ->
Options0 = lists:ukeysort(1, convert_options(Props)),
@@ -462,43 +462,55 @@ make_options(Props) ->
DefTimeout = config:get_integer("replicator", "connection_timeout", 30000),
DefRetries = config:get_integer("replicator", "retries_per_request", 5),
UseCheckpoints = config:get_boolean("replicator", "use_checkpoints", true),
- DefCheckpointInterval = config:get_integer("replicator",
- "checkpoint_interval", 30000),
+ DefCheckpointInterval = config:get_integer(
+ "replicator",
+ "checkpoint_interval",
+ 30000
+ ),
{ok, DefSocketOptions} = couch_util:parse_term(
- config:get("replicator", "socket_options",
- "[{keepalive, true}, {nodelay, false}]")),
- lists:ukeymerge(1, Options, lists:keysort(1, [
- {connection_timeout, DefTimeout},
- {retries, DefRetries},
- {http_connections, DefConns},
- {socket_options, DefSocketOptions},
- {worker_batch_size, DefBatchSize},
- {worker_processes, DefWorkers},
- {use_checkpoints, UseCheckpoints},
- {checkpoint_interval, DefCheckpointInterval}
- ])).
-
+ config:get(
+ "replicator",
+ "socket_options",
+ "[{keepalive, true}, {nodelay, false}]"
+ )
+ ),
+ lists:ukeymerge(
+ 1,
+ Options,
+ lists:keysort(1, [
+ {connection_timeout, DefTimeout},
+ {retries, DefRetries},
+ {http_connections, DefConns},
+ {socket_options, DefSocketOptions},
+ {worker_batch_size, DefBatchSize},
+ {worker_processes, DefWorkers},
+ {use_checkpoints, UseCheckpoints},
+ {checkpoint_interval, DefCheckpointInterval}
+ ])
+ ).
-spec convert_options([_]) -> [_].
-convert_options([])->
+convert_options([]) ->
[];
-convert_options([{<<"cancel">>, V} | _R]) when not is_boolean(V)->
+convert_options([{<<"cancel">>, V} | _R]) when not is_boolean(V) ->
throw({bad_request, <<"parameter `cancel` must be a boolean">>});
convert_options([{<<"cancel">>, V} | R]) ->
[{cancel, V} | convert_options(R)];
-convert_options([{IdOpt, V} | R]) when IdOpt =:= <<"_local_id">>;
- IdOpt =:= <<"replication_id">>; IdOpt =:= <<"id">> ->
+convert_options([{IdOpt, V} | R]) when
+ IdOpt =:= <<"_local_id">>;
+ IdOpt =:= <<"replication_id">>;
+ IdOpt =:= <<"id">>
+->
[{id, couch_replicator_ids:convert(V)} | convert_options(R)];
-convert_options([{<<"create_target">>, V} | _R]) when not is_boolean(V)->
+convert_options([{<<"create_target">>, V} | _R]) when not is_boolean(V) ->
throw({bad_request, <<"parameter `create_target` must be a boolean">>});
convert_options([{<<"create_target">>, V} | R]) ->
[{create_target, V} | convert_options(R)];
convert_options([{<<"create_target_params">>, V} | _R]) when not is_tuple(V) ->
- throw({bad_request,
- <<"parameter `create_target_params` must be a JSON object">>});
+ throw({bad_request, <<"parameter `create_target_params` must be a JSON object">>});
convert_options([{<<"create_target_params">>, V} | R]) ->
[{create_target_params, V} | convert_options(R)];
-convert_options([{<<"continuous">>, V} | _R]) when not is_boolean(V)->
+convert_options([{<<"continuous">>, V} | _R]) when not is_boolean(V) ->
throw({bad_request, <<"parameter `continuous` must be a boolean">>});
convert_options([{<<"continuous">>, V} | R]) ->
[{continuous, V} | convert_options(R)];
@@ -538,10 +550,10 @@ convert_options([{<<"use_checkpoints">>, V} | R]) ->
[{use_checkpoints, V} | convert_options(R)];
convert_options([{<<"checkpoint_interval">>, V} | R]) ->
[{checkpoint_interval, couch_util:to_integer(V)} | convert_options(R)];
-convert_options([_ | R]) -> % skip unknown option
+% skip unknown option
+convert_options([_ | R]) ->
convert_options(R).
-
-spec check_options([_]) -> [_].
check_options(Options) ->
DocIds = lists:keyfind(doc_ids, 1, Options),
@@ -552,12 +564,9 @@ check_options(Options) ->
{false, false, _} -> Options;
{false, _, false} -> Options;
{_, false, false} -> Options;
- _ ->
- throw({bad_request,
- "`doc_ids`,`filter`,`selector` are mutually exclusive"})
+ _ -> throw({bad_request, "`doc_ids`,`filter`,`selector` are mutually exclusive"})
end.
-
-spec parse_proxy_params(binary() | [_]) -> [_].
parse_proxy_params(ProxyUrl) when is_binary(ProxyUrl) ->
parse_proxy_params(?b2l(ProxyUrl));
@@ -571,15 +580,17 @@ parse_proxy_params(ProxyUrl) ->
password = Passwd,
protocol = Protocol
} = ibrowse_lib:parse_url(ProxyUrl),
- Params = [
- {proxy_host, Host},
- {proxy_port, Port}
- ] ++ case is_list(User) andalso is_list(Passwd) of
- false ->
- [];
- true ->
- [{proxy_user, User}, {proxy_password, Passwd}]
- end,
+ Params =
+ [
+ {proxy_host, Host},
+ {proxy_port, Port}
+ ] ++
+ case is_list(User) andalso is_list(Passwd) of
+ false ->
+ [];
+ true ->
+ [{proxy_user, User}, {proxy_password, Passwd}]
+ end,
case Protocol of
socks5 ->
[proxy_to_socks5(Param) || Param <- Params];
@@ -587,7 +598,6 @@ parse_proxy_params(ProxyUrl) ->
Params
end.
-
-spec proxy_to_socks5({atom(), string()}) -> {atom(), string()}.
proxy_to_socks5({proxy_host, Val}) ->
{socks5_host, Val};
@@ -598,36 +608,45 @@ proxy_to_socks5({proxy_user, Val}) ->
proxy_to_socks5({proxy_password, Val}) ->
{socks5_password, Val}.
-
-spec ssl_params([_]) -> [_].
ssl_params(Url) ->
case ibrowse_lib:parse_url(Url) of
- #url{protocol = https} ->
- Depth = config:get_integer("replicator",
- "ssl_certificate_max_depth", 3),
- VerifyCerts = config:get_boolean("replicator",
- "verify_ssl_certificates", false),
- CertFile = config:get("replicator", "cert_file", undefined),
- KeyFile = config:get("replicator", "key_file", undefined),
- Password = config:get("replicator", "password", undefined),
- SslOpts = [{depth, Depth} | ssl_verify_options(VerifyCerts)],
- SslOpts1 = case CertFile /= undefined andalso KeyFile /= undefined of
- true ->
- case Password of
- undefined ->
- [{certfile, CertFile}, {keyfile, KeyFile}] ++ SslOpts;
- _ ->
- [{certfile, CertFile}, {keyfile, KeyFile},
- {password, Password}] ++ SslOpts
- end;
- false -> SslOpts
- end,
- [{is_ssl, true}, {ssl_options, SslOpts1}];
- #url{protocol = http} ->
- []
+ #url{protocol = https} ->
+ Depth = config:get_integer(
+ "replicator",
+ "ssl_certificate_max_depth",
+ 3
+ ),
+ VerifyCerts = config:get_boolean(
+ "replicator",
+ "verify_ssl_certificates",
+ false
+ ),
+ CertFile = config:get("replicator", "cert_file", undefined),
+ KeyFile = config:get("replicator", "key_file", undefined),
+ Password = config:get("replicator", "password", undefined),
+ SslOpts = [{depth, Depth} | ssl_verify_options(VerifyCerts)],
+ SslOpts1 =
+ case CertFile /= undefined andalso KeyFile /= undefined of
+ true ->
+ case Password of
+ undefined ->
+ [{certfile, CertFile}, {keyfile, KeyFile}] ++ SslOpts;
+ _ ->
+ [
+ {certfile, CertFile},
+ {keyfile, KeyFile},
+ {password, Password}
+ ] ++ SslOpts
+ end;
+ false ->
+ SslOpts
+ end,
+ [{is_ssl, true}, {ssl_options, SslOpts1}];
+ #url{protocol = http} ->
+ []
end.
-
-spec ssl_verify_options(true | false) -> [_].
ssl_verify_options(true) ->
CAFile = config:get("replicator", "ssl_trusted_certificates_file"),
@@ -635,75 +654,85 @@ ssl_verify_options(true) ->
ssl_verify_options(false) ->
[{verify, verify_none}].
-
--spec before_doc_update(#doc{}, Db::any(), couch_db:update_type()) -> #doc{}.
+-spec before_doc_update(#doc{}, Db :: any(), couch_db:update_type()) -> #doc{}.
before_doc_update(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, _Db, _UpdateType) ->
Doc;
before_doc_update(#doc{body = {Body}} = Doc, Db, _UpdateType) ->
#user_ctx{
- roles = Roles,
- name = Name
+ roles = Roles,
+ name = Name
} = couch_db:get_user_ctx(Db),
case lists:member(<<"_replicator">>, Roles) of
- true ->
- Doc;
- false ->
- case couch_util:get_value(?OWNER, Body) of
- undefined ->
- Doc#doc{body = {?replace(Body, ?OWNER, Name)}};
- Name ->
+ true ->
Doc;
- Other ->
- case (catch couch_db:check_is_admin(Db)) of
- ok when Other =:= null ->
- Doc#doc{body = {?replace(Body, ?OWNER, Name)}};
- ok ->
- Doc;
- _ ->
- throw({forbidden, <<"Can't update replication documents",
- " from other users.">>})
+ false ->
+ case couch_util:get_value(?OWNER, Body) of
+ undefined ->
+ Doc#doc{body = {?replace(Body, ?OWNER, Name)}};
+ Name ->
+ Doc;
+ Other ->
+ case (catch couch_db:check_is_admin(Db)) of
+ ok when Other =:= null ->
+ Doc#doc{body = {?replace(Body, ?OWNER, Name)}};
+ ok ->
+ Doc;
+ _ ->
+ throw(
+ {forbidden,
+ <<"Can't update replication documents", " from other users.">>}
+ )
+ end
end
- end
end.
-
--spec after_doc_read(#doc{}, Db::any()) -> #doc{}.
+-spec after_doc_read(#doc{}, Db :: any()) -> #doc{}.
after_doc_read(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, _Db) ->
Doc;
after_doc_read(#doc{body = {Body}} = Doc, Db) ->
#user_ctx{name = Name} = couch_db:get_user_ctx(Db),
case (catch couch_db:check_is_admin(Db)) of
- ok ->
- Doc;
- _ ->
- case couch_util:get_value(?OWNER, Body) of
- Name ->
+ ok ->
Doc;
- _Other ->
- Source = strip_credentials(couch_util:get_value(<<"source">>,
-Body)),
- Target = strip_credentials(couch_util:get_value(<<"target">>,
-Body)),
- NewBody0 = ?replace(Body, <<"source">>, Source),
- NewBody = ?replace(NewBody0, <<"target">>, Target),
- #doc{revs = {Pos, [_ | Revs]}} = Doc,
- NewDoc = Doc#doc{body = {NewBody}, revs = {Pos - 1, Revs}},
- NewRevId = couch_db:new_revid(NewDoc),
- NewDoc#doc{revs = {Pos, [NewRevId | Revs]}}
- end
+ _ ->
+ case couch_util:get_value(?OWNER, Body) of
+ Name ->
+ Doc;
+ _Other ->
+ Source = strip_credentials(
+ couch_util:get_value(
+ <<"source">>,
+ Body
+ )
+ ),
+ Target = strip_credentials(
+ couch_util:get_value(
+ <<"target">>,
+ Body
+ )
+ ),
+ NewBody0 = ?replace(Body, <<"source">>, Source),
+ NewBody = ?replace(NewBody0, <<"target">>, Target),
+ #doc{revs = {Pos, [_ | Revs]}} = Doc,
+ NewDoc = Doc#doc{body = {NewBody}, revs = {Pos - 1, Revs}},
+ NewRevId = couch_db:new_revid(NewDoc),
+ NewDoc#doc{revs = {Pos, [NewRevId | Revs]}}
+ end
end.
-
--spec strip_credentials(undefined) -> undefined;
+-spec strip_credentials
+ (undefined) -> undefined;
(binary()) -> binary();
({[_]}) -> {[_]}.
strip_credentials(undefined) ->
undefined;
strip_credentials(Url) when is_binary(Url) ->
- re:replace(Url,
+ re:replace(
+ Url,
"http(s)?://(?:[^:]+):[^@]+@(.*)$",
"http\\1://\\2",
- [{return, binary}]);
+ [{return, binary}]
+ );
strip_credentials({Props0}) ->
Props1 = lists:keydelete(<<"headers">>, 1, Props0),
% Strip "auth" just like headers, for replication plugins it can be a place
@@ -711,26 +740,23 @@ strip_credentials({Props0}) ->
Props2 = lists:keydelete(<<"auth">>, 1, Props1),
{Props2}.
-
error_reason({shutdown, Error}) ->
error_reason(Error);
error_reason({bad_rep_doc, Reason}) ->
to_binary(Reason);
-error_reason({error, {Error, Reason}})
- when is_atom(Error), is_binary(Reason) ->
+error_reason({error, {Error, Reason}}) when
+ is_atom(Error), is_binary(Reason)
+->
to_binary(io_lib:format("~s: ~s", [Error, Reason]));
error_reason({error, Reason}) ->
to_binary(Reason);
error_reason(Reason) ->
to_binary(Reason).
-
-ifdef(TEST).
-
-include_lib("couch/include/couch_eunit.hrl").
-
check_options_pass_values_test() ->
?assertEqual(check_options([]), []),
?assertEqual(check_options([baz, {other, fiz}]), [baz, {other, fiz}]),
@@ -738,88 +764,120 @@ check_options_pass_values_test() ->
?assertEqual(check_options([{filter, x}]), [{filter, x}]),
?assertEqual(check_options([{selector, x}]), [{selector, x}]).
-
check_options_fail_values_test() ->
- ?assertThrow({bad_request, _},
- check_options([{doc_ids, x}, {filter, y}])),
- ?assertThrow({bad_request, _},
- check_options([{doc_ids, x}, {selector, y}])),
- ?assertThrow({bad_request, _},
- check_options([{filter, x}, {selector, y}])),
- ?assertThrow({bad_request, _},
- check_options([{doc_ids, x}, {selector, y}, {filter, z}])).
-
+ ?assertThrow(
+ {bad_request, _},
+ check_options([{doc_ids, x}, {filter, y}])
+ ),
+ ?assertThrow(
+ {bad_request, _},
+ check_options([{doc_ids, x}, {selector, y}])
+ ),
+ ?assertThrow(
+ {bad_request, _},
+ check_options([{filter, x}, {selector, y}])
+ ),
+ ?assertThrow(
+ {bad_request, _},
+ check_options([{doc_ids, x}, {selector, y}, {filter, z}])
+ ).
check_convert_options_pass_test() ->
?assertEqual([], convert_options([])),
?assertEqual([], convert_options([{<<"random">>, 42}])),
- ?assertEqual([{cancel, true}],
- convert_options([{<<"cancel">>, true}])),
- ?assertEqual([{create_target, true}],
- convert_options([{<<"create_target">>, true}])),
- ?assertEqual([{continuous, true}],
- convert_options([{<<"continuous">>, true}])),
- ?assertEqual([{doc_ids, [<<"id">>]}],
- convert_options([{<<"doc_ids">>, [<<"id">>]}])),
- ?assertEqual([{selector, {key, value}}],
- convert_options([{<<"selector">>, {key, value}}])).
-
+ ?assertEqual(
+ [{cancel, true}],
+ convert_options([{<<"cancel">>, true}])
+ ),
+ ?assertEqual(
+ [{create_target, true}],
+ convert_options([{<<"create_target">>, true}])
+ ),
+ ?assertEqual(
+ [{continuous, true}],
+ convert_options([{<<"continuous">>, true}])
+ ),
+ ?assertEqual(
+ [{doc_ids, [<<"id">>]}],
+ convert_options([{<<"doc_ids">>, [<<"id">>]}])
+ ),
+ ?assertEqual(
+ [{selector, {key, value}}],
+ convert_options([{<<"selector">>, {key, value}}])
+ ).
check_convert_options_fail_test() ->
- ?assertThrow({bad_request, _},
- convert_options([{<<"cancel">>, <<"true">>}])),
- ?assertThrow({bad_request, _},
- convert_options([{<<"create_target">>, <<"true">>}])),
- ?assertThrow({bad_request, _},
- convert_options([{<<"continuous">>, <<"true">>}])),
- ?assertThrow({bad_request, _},
- convert_options([{<<"doc_ids">>, not_a_list}])),
- ?assertThrow({bad_request, _},
- convert_options([{<<"selector">>, [{key, value}]}])).
+ ?assertThrow(
+ {bad_request, _},
+ convert_options([{<<"cancel">>, <<"true">>}])
+ ),
+ ?assertThrow(
+ {bad_request, _},
+ convert_options([{<<"create_target">>, <<"true">>}])
+ ),
+ ?assertThrow(
+ {bad_request, _},
+ convert_options([{<<"continuous">>, <<"true">>}])
+ ),
+ ?assertThrow(
+ {bad_request, _},
+ convert_options([{<<"doc_ids">>, not_a_list}])
+ ),
+ ?assertThrow(
+ {bad_request, _},
+ convert_options([{<<"selector">>, [{key, value}]}])
+ ).
check_strip_credentials_test() ->
- [?assertEqual(Expected, strip_credentials(Body)) || {Expected, Body} <- [
- {
- undefined,
- undefined
- },
- {
- <<"https://remote_server/database">>,
- <<"https://foo:bar@remote_server/database">>
- },
- {
- {[{<<"_id">>, <<"foo">>}]},
- {[{<<"_id">>, <<"foo">>}, {<<"headers">>, <<"bar">>}]}
- },
- {
- {[{<<"_id">>, <<"foo">>}, {<<"other">>, <<"bar">>}]},
- {[{<<"_id">>, <<"foo">>}, {<<"other">>, <<"bar">>}]}
- },
- {
- {[{<<"_id">>, <<"foo">>}]},
- {[{<<"_id">>, <<"foo">>}, {<<"headers">>, <<"baz">>}]}
- },
- {
- {[{<<"_id">>, <<"foo">>}]},
- {[{<<"_id">>, <<"foo">>}, {<<"auth">>, <<"pluginsecret">>}]}
- }
- ]].
-
+ [
+ ?assertEqual(Expected, strip_credentials(Body))
+ || {Expected, Body} <- [
+ {
+ undefined,
+ undefined
+ },
+ {
+ <<"https://remote_server/database">>,
+ <<"https://foo:bar@remote_server/database">>
+ },
+ {
+ {[{<<"_id">>, <<"foo">>}]},
+ {[{<<"_id">>, <<"foo">>}, {<<"headers">>, <<"bar">>}]}
+ },
+ {
+ {[{<<"_id">>, <<"foo">>}, {<<"other">>, <<"bar">>}]},
+ {[{<<"_id">>, <<"foo">>}, {<<"other">>, <<"bar">>}]}
+ },
+ {
+ {[{<<"_id">>, <<"foo">>}]},
+ {[{<<"_id">>, <<"foo">>}, {<<"headers">>, <<"baz">>}]}
+ },
+ {
+ {[{<<"_id">>, <<"foo">>}]},
+ {[{<<"_id">>, <<"foo">>}, {<<"auth">>, <<"pluginsecret">>}]}
+ }
+ ]
+ ].
parse_proxy_params_test() ->
- ?assertEqual([
- {proxy_host, "foo.com"},
- {proxy_port, 443},
- {proxy_user, "u"},
- {proxy_password, "p"}
- ], parse_proxy_params("https://u:p@foo.com")),
- ?assertEqual([
- {socks5_host, "foo.com"},
- {socks5_port, 1080},
- {socks5_user, "u"},
- {socks5_password, "p"}
- ], parse_proxy_params("socks5://u:p@foo.com")).
-
+ ?assertEqual(
+ [
+ {proxy_host, "foo.com"},
+ {proxy_port, 443},
+ {proxy_user, "u"},
+ {proxy_password, "p"}
+ ],
+ parse_proxy_params("https://u:p@foo.com")
+ ),
+ ?assertEqual(
+ [
+ {socks5_host, "foo.com"},
+ {socks5_port, 1080},
+ {socks5_user, "u"},
+ {socks5_password, "p"}
+ ],
+ parse_proxy_params("socks5://u:p@foo.com")
+ ).
setup() ->
DbName = ?tempdb(),
@@ -828,12 +886,10 @@ setup() ->
create_vdu(DbName),
DbName.
-
teardown(DbName) when is_binary(DbName) ->
couch_server:delete(DbName, [?ADMIN_CTX]),
ok.
-
create_vdu(DbName) ->
couch_util:with_db(DbName, fun(Db) ->
VduFun = <<"function(newdoc, olddoc, userctx) {throw({'forbidden':'fail'})}">>,
@@ -844,48 +900,51 @@ create_vdu(DbName) ->
{ok, _} = couch_db:update_docs(Db, [Doc])
end).
-
update_replicator_doc_with_bad_vdu_test_() ->
{
setup,
fun test_util:start_couch/0,
fun test_util:stop_couch/1,
{
- foreach, fun setup/0, fun teardown/1,
+ foreach,
+ fun setup/0,
+ fun teardown/1,
[
fun t_vdu_does_not_crash_on_save/1
]
}
}.
-
t_vdu_does_not_crash_on_save(DbName) ->
?_test(begin
Doc = #doc{id = <<"some_id">>, body = {[{<<"foo">>, 42}]}},
?assertEqual({ok, forbidden}, save_rep_doc(DbName, Doc))
end).
-
local_replication_endpoint_error_test_() ->
- {
+ {
foreach,
- fun () -> meck:expect(config, get,
- fun(_, _, Default) -> Default end)
+ fun() ->
+ meck:expect(
+ config,
+ get,
+ fun(_, _, Default) -> Default end
+ )
end,
- fun (_) -> meck:unload() end,
+ fun(_) -> meck:unload() end,
[
t_error_on_local_endpoint()
]
}.
-
t_error_on_local_endpoint() ->
?_test(begin
- RepDoc = {[
- {<<"_id">>, <<"someid">>},
- {<<"source">>, <<"localdb">>},
- {<<"target">>, <<"http://somehost.local/tgt">>}
- ]},
+ RepDoc =
+ {[
+ {<<"_id">>, <<"someid">>},
+ {<<"source">>, <<"localdb">>},
+ {<<"target">>, <<"http://somehost.local/tgt">>}
+ ]},
Expect = local_endpoints_not_supported,
?assertThrow({bad_rep_doc, Expect}, parse_rep_doc_without_id(RepDoc))
end).
diff --git a/src/couch_replicator/src/couch_replicator_fabric.erl b/src/couch_replicator/src/couch_replicator_fabric.erl
index 1650105b5..6e5ebfc25 100644
--- a/src/couch_replicator/src/couch_replicator_fabric.erl
+++ b/src/couch_replicator/src/couch_replicator_fabric.erl
@@ -13,7 +13,7 @@
-module(couch_replicator_fabric).
-export([
- docs/5
+ docs/5
]).
-include_lib("fabric/include/fabric.hrl").
@@ -24,7 +24,8 @@
docs(DbName, Options, QueryArgs, Callback, Acc) ->
Shards = mem3:shards(DbName),
Workers0 = fabric_util:submit_jobs(
- Shards, couch_replicator_fabric_rpc, docs, [Options, QueryArgs]),
+ Shards, couch_replicator_fabric_rpc, docs, [Options, QueryArgs]
+ ),
RexiMon = fabric_util:create_monitors(Workers0),
try
case fabric_streams:start(Workers0, #shard.ref) of
@@ -50,7 +51,6 @@ docs(DbName, Options, QueryArgs, Callback, Acc) ->
rexi_monitor:stop(RexiMon)
end.
-
docs_int(DbName, Workers, QueryArgs, Callback, Acc0) ->
#mrargs{limit = Limit, skip = Skip} = QueryArgs,
State = #collector{
@@ -63,22 +63,28 @@ docs_int(DbName, Workers, QueryArgs, Callback, Acc0) ->
user_acc = Acc0,
update_seq = nil
},
- case rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, infinity, 5000) of
- {ok, NewState} ->
- {ok, NewState#collector.user_acc};
- {timeout, NewState} ->
- Callback({error, timeout}, NewState#collector.user_acc);
- {error, Resp} ->
- {ok, Resp}
+ case
+ rexi_utils:recv(
+ Workers,
+ #shard.ref,
+ fun handle_message/3,
+ State,
+ infinity,
+ 5000
+ )
+ of
+ {ok, NewState} ->
+ {ok, NewState#collector.user_acc};
+ {timeout, NewState} ->
+ Callback({error, timeout}, NewState#collector.user_acc);
+ {error, Resp} ->
+ {ok, Resp}
end.
handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
fabric_view:check_down_shards(State, NodeRef);
-
handle_message({rexi_EXIT, Reason}, Worker, State) ->
fabric_view:handle_worker_exit(State, Worker, Reason);
-
handle_message({meta, Meta0}, {Worker, From}, State) ->
Tot = couch_util:get_value(total, Meta0, 0),
Off = couch_util:get_value(offset, Meta0, 0),
@@ -97,50 +103,46 @@ handle_message({meta, Meta0}, {Worker, From}, State) ->
Total = Total0 + Tot,
Offset = Offset0 + Off,
case fabric_dict:any(0, Counters1) of
- true ->
- {ok, State#collector{
- counters = Counters1,
- total_rows = Total,
- offset = Offset
- }};
- false ->
- FinalOffset = erlang:min(Total, Offset+State#collector.skip),
- Meta = [{total, Total}, {offset, FinalOffset}],
- {Go, Acc} = Callback({meta, Meta}, AccIn),
- {Go, State#collector{
- counters = fabric_dict:decrement_all(Counters1),
- total_rows = Total,
- offset = FinalOffset,
- user_acc = Acc
- }}
+ true ->
+ {ok, State#collector{
+ counters = Counters1,
+ total_rows = Total,
+ offset = Offset
+ }};
+ false ->
+ FinalOffset = erlang:min(Total, Offset + State#collector.skip),
+ Meta = [{total, Total}, {offset, FinalOffset}],
+ {Go, Acc} = Callback({meta, Meta}, AccIn),
+ {Go, State#collector{
+ counters = fabric_dict:decrement_all(Counters1),
+ total_rows = Total,
+ offset = FinalOffset,
+ user_acc = Acc
+ }}
end;
-
handle_message(#view_row{id = Id, doc = Doc} = Row0, {Worker, From}, State) ->
#collector{query_args = Args, counters = Counters0, rows = Rows0} = State,
case maybe_fetch_and_filter_doc(Id, Doc, State) of
{[_ | _]} = NewDoc ->
Row = Row0#view_row{doc = NewDoc},
Dir = Args#mrargs.direction,
- Rows = merge_row(Dir, Row#view_row{worker={Worker, From}}, Rows0),
+ Rows = merge_row(Dir, Row#view_row{worker = {Worker, From}}, Rows0),
Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- State1 = State#collector{rows=Rows, counters=Counters1},
+ State1 = State#collector{rows = Rows, counters = Counters1},
fabric_view:maybe_send_row(State1);
skip ->
rexi:stream_ack(From),
{ok, State}
end;
-
handle_message(complete, Worker, State) ->
Counters = fabric_dict:update_counter(Worker, 1, State#collector.counters),
fabric_view:maybe_send_row(State#collector{counters = Counters}).
-
merge_row(fwd, Row, Rows) ->
lists:keymerge(#view_row.id, [Row], Rows);
merge_row(rev, Row, Rows) ->
lists:rkeymerge(#view_row.id, [Row], Rows).
-
maybe_fetch_and_filter_doc(Id, undecided, State) ->
#collector{db_name = DbName, query_args = #mrargs{extra = Extra}} = State,
FilterStates = proplists:get_value(filter_states, Extra),
@@ -149,7 +151,8 @@ maybe_fetch_and_filter_doc(Id, undecided, State) ->
DocState = couch_util:get_value(state, Props),
couch_replicator_utils:filter_state(DocState, FilterStates, DocInfo);
{error, not_found} ->
- skip % could have been deleted
+ % could have been deleted
+ skip
end;
maybe_fetch_and_filter_doc(_Id, Doc, _State) ->
Doc.
diff --git a/src/couch_replicator/src/couch_replicator_fabric_rpc.erl b/src/couch_replicator/src/couch_replicator_fabric_rpc.erl
index d67f87548..daeb86e60 100644
--- a/src/couch_replicator/src/couch_replicator_fabric_rpc.erl
+++ b/src/couch_replicator/src/couch_replicator_fabric_rpc.erl
@@ -13,14 +13,13 @@
-module(couch_replicator_fabric_rpc).
-export([
- docs/3
+ docs/3
]).
-include_lib("fabric/include/fabric.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-
docs(DbName, Options, Args0) ->
set_io_priority(DbName, Options),
#mrargs{skip = Skip, limit = Limit, extra = Extra} = Args0,
@@ -31,7 +30,6 @@ docs(DbName, Options, Args0) ->
Acc = {DbName, FilterStates, HealthThreshold},
couch_mrview:query_all_docs(Db, Args, fun docs_cb/2, Acc).
-
docs_cb({meta, Meta}, Acc) ->
ok = rexi:stream2({meta, Meta}),
{ok, Acc};
@@ -54,16 +52,14 @@ docs_cb(complete, Acc) ->
ok = rexi:stream_last(complete),
{ok, Acc}.
-
set_io_priority(DbName, Options) ->
case lists:keyfind(io_priority, 1, Options) of
- {io_priority, Pri} ->
- erlang:put(io_priority, Pri);
- false ->
- erlang:put(io_priority, {interactive, DbName})
+ {io_priority, Pri} ->
+ erlang:put(io_priority, Pri);
+ false ->
+ erlang:put(io_priority, {interactive, DbName})
end.
-
%% Get the state of the replication document. If it is found and has a terminal
%% state then it can be filtered and either included in the results or skipped.
%% If it is not in a terminal state, look it up in the local doc processor ETS
@@ -80,8 +76,13 @@ rep_doc_state(Shard, Id, {[_ | _]} = Doc, States, HealthThreshold) ->
null ->
% Fetch from local doc processor. If there, filter by state.
% If not there, mark as undecided. Let coordinator figure it out.
- case couch_replicator_doc_processor:doc_lookup(Shard, Id,
- HealthThreshold) of
+ case
+ couch_replicator_doc_processor:doc_lookup(
+ Shard,
+ Id,
+ HealthThreshold
+ )
+ of
{ok, EtsInfo} ->
State = get_doc_state(EtsInfo),
couch_replicator_utils:filter_state(State, States, EtsInfo);
@@ -92,6 +93,5 @@ rep_doc_state(Shard, Id, {[_ | _]} = Doc, States, HealthThreshold) ->
couch_replicator_utils:filter_state(OtherState, States, DocInfo)
end.
-
-get_doc_state({Props})->
+get_doc_state({Props}) ->
couch_util:get_value(state, Props).
diff --git a/src/couch_replicator/src/couch_replicator_filters.erl b/src/couch_replicator/src/couch_replicator_filters.erl
index c8980001a..aab8e80b3 100644
--- a/src/couch_replicator/src/couch_replicator_filters.erl
+++ b/src/couch_replicator/src/couch_replicator_filters.erl
@@ -21,19 +21,18 @@
-include_lib("couch/include/couch_db.hrl").
-
% Parse the filter from replication options proplist.
% Return {ok, {FilterType,...}} | {error, ParseError}.
% For `user` filter, i.e. filters specified as user code
% in source database, this code doesn't fetch the filter
% code, but only returns the name of the filter.
-spec parse([_]) ->
- {ok, nil} |
- {ok, {view, binary(), {[_]}}} |
- {ok, {user, {binary(), binary()}, {[_]}}} |
- {ok, {docids, [_]}} |
- {ok, {mango, {[_]}}} |
- {error, binary()}.
+ {ok, nil}
+ | {ok, {view, binary(), {[_]}}}
+ | {ok, {user, {binary(), binary()}, {[_]}}}
+ | {ok, {docids, [_]}}
+ | {ok, {mango, {[_]}}}
+ | {error, binary()}.
parse(Options) ->
Filter = couch_util:get_value(filter, Options),
DocIds = couch_util:get_value(doc_ids, Options),
@@ -59,7 +58,6 @@ parse(Options) ->
{error, list_to_binary(Err)}
end.
-
% Fetches body of filter function from source database. Guaranteed to either
% return {ok, Body} or an {error, Reason}. Also assume this function might
% block due to network / socket issues for an undeterminted amount of time.
@@ -86,18 +84,17 @@ fetch(DDocName, FilterName, Source) ->
{error, couch_util:to_binary(Reason)}
end.
-
% Get replication type and view (if any) from replication document props
-spec view_type([_], [_]) ->
{view, {binary(), binary()}} | {db, nil} | {error, binary()}.
view_type(Props, Options) ->
case couch_util:get_value(<<"filter">>, Props) of
<<"_view">> ->
- {QP} = couch_util:get_value(query_params, Options, {[]}),
+ {QP} = couch_util:get_value(query_params, Options, {[]}),
ViewParam = couch_util:get_value(<<"view">>, QP),
case re:split(ViewParam, <<"/">>) of
[DName, ViewName] ->
- {view, {<< "_design/", DName/binary >>, ViewName}};
+ {view, {<<"_design/", DName/binary>>, ViewName}};
_ ->
{error, <<"Invalid `view` parameter.">>}
end;
@@ -105,57 +102,70 @@ view_type(Props, Options) ->
{db, nil}
end.
-
% Private functions
fetch_internal(DDocName, FilterName, Source) ->
- Db = case (catch couch_replicator_api_wrap:db_open(Source)) of
- {ok, Db0} ->
- Db0;
- DbError ->
- DbErrorMsg = io_lib:format("Could not open source database `~s`: ~s",
- [couch_replicator_api_wrap:db_uri(Source),
- couch_util:to_binary(DbError)]),
- throw({fetch_error, iolist_to_binary(DbErrorMsg)})
- end,
- try
- Body = case (catch couch_replicator_api_wrap:open_doc(
- Db, <<"_design/", DDocName/binary>>, [ejson_body])) of
- {ok, #doc{body = Body0}} ->
- Body0;
- DocError ->
- DocErrorMsg = io_lib:format(
- "Couldn't open document `_design/~s` from source "
- "database `~s`: ~s", [DDocName,
- couch_replicator_api_wrap:db_uri(Source),
- couch_util:to_binary(DocError)]
- ),
- throw({fetch_error, iolist_to_binary(DocErrorMsg)})
+ Db =
+ case (catch couch_replicator_api_wrap:db_open(Source)) of
+ {ok, Db0} ->
+ Db0;
+ DbError ->
+ DbErrorMsg = io_lib:format(
+ "Could not open source database `~s`: ~s",
+ [
+ couch_replicator_api_wrap:db_uri(Source),
+ couch_util:to_binary(DbError)
+ ]
+ ),
+ throw({fetch_error, iolist_to_binary(DbErrorMsg)})
end,
+ try
+ Body =
+ case
+ (catch couch_replicator_api_wrap:open_doc(
+ Db, <<"_design/", DDocName/binary>>, [ejson_body]
+ ))
+ of
+ {ok, #doc{body = Body0}} ->
+ Body0;
+ DocError ->
+ DocErrorMsg = io_lib:format(
+ "Couldn't open document `_design/~s` from source "
+ "database `~s`: ~s",
+ [
+ DDocName,
+ couch_replicator_api_wrap:db_uri(Source),
+ couch_util:to_binary(DocError)
+ ]
+ ),
+ throw({fetch_error, iolist_to_binary(DocErrorMsg)})
+ end,
try
Code = couch_util:get_nested_json_value(
- Body, [<<"filters">>, FilterName]),
+ Body, [<<"filters">>, FilterName]
+ ),
re:replace(Code, [$^, "\s*(.*?)\s*", $$], "\\1", [{return, binary}])
- catch
- _Tag:CodeError ->
- CodeErrorMsg = io_lib:format(
- "Couldn't parse filter code from document ~s on `~s` "
- " Error: ~s", [DDocName,
- couch_replicator_api_wrap:db_uri(Source),
- couch_util:to_binary(CodeError)]
- ),
- throw({fetch_error, CodeErrorMsg})
- end
+ catch
+ _Tag:CodeError ->
+ CodeErrorMsg = io_lib:format(
+ "Couldn't parse filter code from document ~s on `~s` "
+ " Error: ~s",
+ [
+ DDocName,
+ couch_replicator_api_wrap:db_uri(Source),
+ couch_util:to_binary(CodeError)
+ ]
+ ),
+ throw({fetch_error, CodeErrorMsg})
+ end
after
couch_replicator_api_wrap:db_close(Db)
end.
-
-spec query_params([_]) -> {[_]}.
-query_params(Options)->
+query_params(Options) ->
couch_util:get_value(query_params, Options, {[]}).
-
parse_user_filter(Filter) ->
case re:run(Filter, "(.*?)/(.*)", [{capture, [1, 2], binary}]) of
{match, [DDocName0, FilterName0]} ->
@@ -164,31 +174,27 @@ parse_user_filter(Filter) ->
{error, <<"Invalid filter. Must match `ddocname/filtername`.">>}
end.
-
% Sort an EJSON object's properties to attempt
% to generate a unique representation. This is used
% to reduce the chance of getting different
% replication checkpoints for the same Mango selector
-ejsort({V})->
+ejsort({V}) ->
ejsort_props(V, []);
ejsort(V) when is_list(V) ->
ejsort_array(V, []);
ejsort(V) ->
V.
-
-ejsort_props([], Acc)->
+ejsort_props([], Acc) ->
{lists:keysort(1, Acc)};
-ejsort_props([{K, V}| R], Acc) ->
+ejsort_props([{K, V} | R], Acc) ->
ejsort_props(R, [{K, ejsort(V)} | Acc]).
-
-ejsort_array([], Acc)->
+ejsort_array([], Acc) ->
lists:reverse(Acc);
ejsort_array([V | R], Acc) ->
ejsort_array(R, [ejsort(V) | Acc]).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -200,14 +206,15 @@ ejsort_basic_values_test() ->
?assertEqual(ejsort([]), []),
?assertEqual(ejsort({[]}), {[]}).
-
ejsort_compound_values_test() ->
?assertEqual(ejsort([2, 1, 3, <<"a">>]), [2, 1, 3, <<"a">>]),
- Ej1 = {[{<<"a">>, 0}, {<<"c">>, 0}, {<<"b">>, 0}]},
- Ej1s = {[{<<"a">>, 0}, {<<"b">>, 0}, {<<"c">>, 0}]},
+ Ej1 = {[{<<"a">>, 0}, {<<"c">>, 0}, {<<"b">>, 0}]},
+ Ej1s = {[{<<"a">>, 0}, {<<"b">>, 0}, {<<"c">>, 0}]},
?assertEqual(ejsort(Ej1), Ej1s),
Ej2 = {[{<<"x">>, Ej1}, {<<"z">>, Ej1}, {<<"y">>, [Ej1, Ej1]}]},
- ?assertEqual(ejsort(Ej2),
- {[{<<"x">>, Ej1s}, {<<"y">>, [Ej1s, Ej1s]}, {<<"z">>, Ej1s}]}).
+ ?assertEqual(
+ ejsort(Ej2),
+ {[{<<"x">>, Ej1s}, {<<"y">>, [Ej1s, Ej1s]}, {<<"z">>, Ej1s}]}
+ ).
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_httpc.erl b/src/couch_replicator/src/couch_replicator_httpc.erl
index a2af51898..67e3f8474 100644
--- a/src/couch_replicator/src/couch_replicator_httpc.erl
+++ b/src/couch_replicator/src/couch_replicator_httpc.erl
@@ -39,7 +39,6 @@
% where we may end up processing an unbounded number of messages.
-define(MAX_DISCARDED_MESSAGES, 16).
-
setup(Db) ->
#httpdb{
httpc_pool = nil,
@@ -47,8 +46,11 @@ setup(Db) ->
http_connections = MaxConns,
proxy_url = ProxyUrl
} = Db,
- {ok, Pid} = couch_replicator_httpc_pool:start_link(Url, ProxyUrl,
- [{max_connections, MaxConns}]),
+ {ok, Pid} = couch_replicator_httpc_pool:start_link(
+ Url,
+ ProxyUrl,
+ [{max_connections, MaxConns}]
+ ),
case couch_replicator_auth:initialize(Db#httpdb{httpc_pool = Pid}) of
{ok, Db1} ->
{ok, Db1};
@@ -59,42 +61,47 @@ setup(Db) ->
throw({replication_auth_error, Error})
end.
-
send_req(HttpDb, Params1, Callback) ->
put(?STREAM_STATUS, init),
couch_stats:increment_counter([couch_replicator, requests]),
- Params2 = ?replace(Params1, qs,
- [{K, ?b2l(iolist_to_binary(V))} || {K, V} <- get_value(qs, Params1, [])]),
- Params = ?replace(Params2, ibrowse_options,
- lists:keysort(1, get_value(ibrowse_options, Params2, []))),
+ Params2 = ?replace(
+ Params1,
+ qs,
+ [{K, ?b2l(iolist_to_binary(V))} || {K, V} <- get_value(qs, Params1, [])]
+ ),
+ Params = ?replace(
+ Params2,
+ ibrowse_options,
+ lists:keysort(1, get_value(ibrowse_options, Params2, []))
+ ),
{Worker, Response, HttpDb1} = send_ibrowse_req(HttpDb, Params),
- Ret = try
- process_response(Response, Worker, HttpDb1, Params, Callback)
- catch
- throw:{retry, NewHttpDb0, NewParams0} ->
- {retry, NewHttpDb0, NewParams0}
- after
- Pool = HttpDb1#httpdb.httpc_pool,
- case get(?STOP_HTTP_WORKER) of
- stop ->
- ok = stop_and_release_worker(Pool, Worker),
- erase(?STOP_HTTP_WORKER);
- undefined ->
- ok = couch_replicator_httpc_pool:release_worker(Pool, Worker)
+ Ret =
+ try
+ process_response(Response, Worker, HttpDb1, Params, Callback)
+ catch
+ throw:{retry, NewHttpDb0, NewParams0} ->
+ {retry, NewHttpDb0, NewParams0}
+ after
+ Pool = HttpDb1#httpdb.httpc_pool,
+ case get(?STOP_HTTP_WORKER) of
+ stop ->
+ ok = stop_and_release_worker(Pool, Worker),
+ erase(?STOP_HTTP_WORKER);
+ undefined ->
+ ok = couch_replicator_httpc_pool:release_worker(Pool, Worker)
+ end,
+ clean_mailbox(Response)
end,
- clean_mailbox(Response)
- end,
% This is necessary to keep this tail-recursive. Calling
% send_req in the catch clause would turn it into a body
% recursive call accidentally.
case Ret of
- {retry, #httpdb{}=NewHttpDb, NewParams} ->
+ {retry, #httpdb{} = NewHttpDb, NewParams} ->
send_req(NewHttpDb, NewParams, Callback);
_ ->
Ret
end.
-
send_ibrowse_req(#httpdb{headers = BaseHeaders} = HttpDb0, Params) ->
Method = get_value(method, Params, get),
UserHeaders = get_value(headers, Params, []),
@@ -103,32 +110,40 @@ send_ibrowse_req(#httpdb{headers = BaseHeaders} = HttpDb0, Params) ->
Url = full_url(HttpDb, Params),
Body = get_value(body, Params, []),
case get_value(path, Params) == "_changes" of
- true ->
- Timeout = infinity;
- false ->
- Timeout = case config:get("replicator", "request_timeout", "infinity") of
- "infinity" -> infinity;
- Milliseconds -> list_to_integer(Milliseconds)
- end
+ true ->
+ Timeout = infinity;
+ false ->
+ Timeout =
+ case config:get("replicator", "request_timeout", "infinity") of
+ "infinity" -> infinity;
+ Milliseconds -> list_to_integer(Milliseconds)
+ end
end,
{ok, Worker} = couch_replicator_httpc_pool:get_worker(HttpDb#httpdb.httpc_pool),
- BasicAuthOpts = case couch_replicator_utils:get_basic_auth_creds(HttpDb) of
- {undefined, undefined} ->
- [];
- {User, Pass} when is_list(User), is_list(Pass) ->
- [{basic_auth, {User, Pass}}]
- end,
- IbrowseOptions = BasicAuthOpts ++ [
- {response_format, binary}, {inactivity_timeout, HttpDb#httpdb.timeout} |
- lists:ukeymerge(1, get_value(ibrowse_options, Params, []),
- HttpDb#httpdb.ibrowse_options)
- ],
+ BasicAuthOpts =
+ case couch_replicator_utils:get_basic_auth_creds(HttpDb) of
+ {undefined, undefined} ->
+ [];
+ {User, Pass} when is_list(User), is_list(Pass) ->
+ [{basic_auth, {User, Pass}}]
+ end,
+ IbrowseOptions =
+ BasicAuthOpts ++
+ [
+ {response_format, binary},
+ {inactivity_timeout, HttpDb#httpdb.timeout}
+ | lists:ukeymerge(
+ 1,
+ get_value(ibrowse_options, Params, []),
+ HttpDb#httpdb.ibrowse_options
+ )
+ ],
backoff_before_request(Worker, HttpDb, Params),
Response = ibrowse:send_req_direct(
- Worker, Url, Headers2, Method, Body, IbrowseOptions, Timeout),
+ Worker, Url, Headers2, Method, Body, IbrowseOptions, Timeout
+ ),
{Worker, Response, HttpDb}.
-
%% Stop worker, wait for it to die, then release it. Make sure it is dead before
%% releasing it to the pool, so there is not race triggered recycling it again.
%% The reason is recycling a dying worker, could end up that worker returning
@@ -146,8 +161,6 @@ stop_and_release_worker(Pool, Worker) ->
process_response({error, sel_conn_closed}, Worker, HttpDb, Params, _Cb) ->
put(?STOP_HTTP_WORKER, stop),
maybe_retry(sel_conn_closed, Worker, HttpDb, Params);
-
-
%% This clause handles un-expected connection closing during pipelined requests.
%% For example, if server responds to a request, sets Connection: close header
%% and closes the socket, ibrowse will detect that error when it sends
@@ -155,78 +168,85 @@ process_response({error, sel_conn_closed}, Worker, HttpDb, Params, _Cb) ->
process_response({error, connection_closing}, Worker, HttpDb, Params, _Cb) ->
put(?STOP_HTTP_WORKER, stop),
maybe_retry({error, connection_closing}, Worker, HttpDb, Params);
-
process_response({ibrowse_req_id, ReqId}, Worker, HttpDb, Params, Callback) ->
process_stream_response(ReqId, Worker, HttpDb, Params, Callback);
-
process_response({ok, Code, Headers, Body}, Worker, HttpDb, Params, Callback) ->
case list_to_integer(Code) of
- R when R =:= 301 ; R =:= 302 ; R =:= 303 ->
- backoff_success(HttpDb, Params),
- do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
- 429 ->
- backoff(HttpDb, Params);
- Error when Error =:= 408 ; Error >= 500 ->
- couch_stats:increment_counter([couch_replicator, responses, failure]),
- maybe_retry({code, Error}, Worker, HttpDb, Params);
- Ok when Ok >= 200 , Ok < 500 ->
- backoff_success(HttpDb, Params),
- couch_stats:increment_counter([couch_replicator, responses, success]),
- EJson = case Body of
- <<>> ->
- null;
- Json ->
- ?JSON_DECODE(Json)
- end,
- process_auth_response(HttpDb, Ok, Headers, Params),
- if Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop); true -> ok end,
- Callback(Ok, Headers, EJson)
+ R when R =:= 301; R =:= 302; R =:= 303 ->
+ backoff_success(HttpDb, Params),
+ do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
+ 429 ->
+ backoff(HttpDb, Params);
+ Error when Error =:= 408; Error >= 500 ->
+ couch_stats:increment_counter([couch_replicator, responses, failure]),
+ maybe_retry({code, Error}, Worker, HttpDb, Params);
+ Ok when Ok >= 200, Ok < 500 ->
+ backoff_success(HttpDb, Params),
+ couch_stats:increment_counter([couch_replicator, responses, success]),
+ EJson =
+ case Body of
+ <<>> ->
+ null;
+ Json ->
+ ?JSON_DECODE(Json)
+ end,
+ process_auth_response(HttpDb, Ok, Headers, Params),
+ if
+ Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop);
+ true -> ok
+ end,
+ Callback(Ok, Headers, EJson)
end;
-
process_response(Error, Worker, HttpDb, Params, _Callback) ->
maybe_retry(Error, Worker, HttpDb, Params).
-
process_stream_response(ReqId, Worker, HttpDb, Params, Callback) ->
receive
- {ibrowse_async_headers, ReqId, Code, Headers} ->
- case list_to_integer(Code) of
- R when R =:= 301 ; R =:= 302 ; R =:= 303 ->
- backoff_success(HttpDb, Params),
- do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
- 429 ->
- Timeout = couch_replicator_rate_limiter:max_interval(),
- backoff(HttpDb#httpdb{timeout = Timeout}, Params);
- Error when Error =:= 408 ; Error >= 500 ->
+ {ibrowse_async_headers, ReqId, Code, Headers} ->
+ case list_to_integer(Code) of
+ R when R =:= 301; R =:= 302; R =:= 303 ->
+ backoff_success(HttpDb, Params),
+ do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
+ 429 ->
+ Timeout = couch_replicator_rate_limiter:max_interval(),
+ backoff(HttpDb#httpdb{timeout = Timeout}, Params);
+ Error when Error =:= 408; Error >= 500 ->
+ couch_stats:increment_counter(
+ [couch_replicator, stream_responses, failure]
+ ),
+ report_error(Worker, HttpDb, Params, {code, Error});
+ Ok when Ok >= 200, Ok < 500 ->
+ backoff_success(HttpDb, Params),
+ HttpDb1 = process_auth_response(HttpDb, Ok, Headers, Params),
+ StreamDataFun = fun() ->
+ stream_data_self(HttpDb1, Params, Worker, ReqId, Callback)
+ end,
+ put(?STREAM_STATUS, {streaming, Worker}),
+ if
+ Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop);
+ true -> ok
+ end,
+ ibrowse:stream_next(ReqId),
+ try
+ Ret = Callback(Ok, Headers, StreamDataFun),
+ Ret
+ catch
+ throw:{maybe_retry_req, connection_closed} ->
+ maybe_retry(
+ {connection_closed, mid_stream},
+ Worker,
+ HttpDb1,
+ Params
+ );
+ throw:{maybe_retry_req, Err} ->
+ maybe_retry(Err, Worker, HttpDb1, Params)
+ end
+ end;
+ {ibrowse_async_response, ReqId, {error, _} = Error} ->
couch_stats:increment_counter(
[couch_replicator, stream_responses, failure]
),
- report_error(Worker, HttpDb, Params, {code, Error});
- Ok when Ok >= 200 , Ok < 500 ->
- backoff_success(HttpDb, Params),
- HttpDb1 = process_auth_response(HttpDb, Ok, Headers, Params),
- StreamDataFun = fun() ->
- stream_data_self(HttpDb1, Params, Worker, ReqId, Callback)
- end,
- put(?STREAM_STATUS, {streaming, Worker}),
- if Ok =:= 413 -> put(?STOP_HTTP_WORKER, stop); true -> ok end,
- ibrowse:stream_next(ReqId),
- try
- Ret = Callback(Ok, Headers, StreamDataFun),
- Ret
- catch
- throw:{maybe_retry_req, connection_closed} ->
- maybe_retry({connection_closed, mid_stream},
- Worker, HttpDb1, Params);
- throw:{maybe_retry_req, Err} ->
- maybe_retry(Err, Worker, HttpDb1, Params)
- end
- end;
- {ibrowse_async_response, ReqId, {error, _} = Error} ->
- couch_stats:increment_counter(
- [couch_replicator, stream_responses, failure]
- ),
- maybe_retry(Error, Worker, HttpDb, Params)
+ maybe_retry(Error, Worker, HttpDb, Params)
after HttpDb#httpdb.timeout + 500 ->
% Note: ibrowse should always reply with timeouts, but this doesn't
% seem to be always true when there's a very high rate of requests
@@ -234,7 +254,6 @@ process_stream_response(ReqId, Worker, HttpDb, Params, Callback) ->
maybe_retry(timeout, Worker, HttpDb, Params)
end.
-
process_auth_response(HttpDb, Code, Headers, Params) ->
case couch_replicator_auth:handle_response(HttpDb, Code, Headers) of
{continue, HttpDb1} ->
@@ -244,7 +263,6 @@ process_auth_response(HttpDb, Code, Headers, Params) ->
throw({retry, HttpDb1, Params})
end.
-
% Only streaming HTTP requests send messages back from
% the ibrowse worker process. We can detect that based
% on the ibrowse_req_id format. This just drops all
@@ -254,7 +272,6 @@ process_auth_response(HttpDb, Code, Headers, Params) ->
clean_mailbox(ReqId) ->
clean_mailbox(ReqId, ?MAX_DISCARDED_MESSAGES).
-
clean_mailbox(_ReqId, 0) ->
case get(?STREAM_STATUS) of
{streaming, Worker} ->
@@ -285,14 +302,13 @@ clean_mailbox({ibrowse_req_id, ReqId}, Count) when Count > 0 ->
{ibrowse_async_response_end, ReqId} ->
put(?STREAM_STATUS, ended),
ok
- after 0 ->
- ok
+ after 0 ->
+ ok
end
end;
clean_mailbox(_, Count) when Count > 0 ->
ok.
-
discard_message(ReqId, Worker, Count) ->
ibrowse:stream_next(ReqId),
receive
@@ -306,12 +322,14 @@ discard_message(ReqId, Worker, Count) ->
exit({timeout, ibrowse_stream_cleanup})
end.
-
maybe_retry(Error, Worker, #httpdb{retries = 0} = HttpDb, Params) ->
report_error(Worker, HttpDb, Params, {error, Error});
-
-maybe_retry(Error, Worker, #httpdb{retries = Retries, wait = Wait} = HttpDb,
- Params) ->
+maybe_retry(
+ Error,
+ Worker,
+ #httpdb{retries = Retries, wait = Wait} = HttpDb,
+ Params
+) ->
case total_error_time_exceeded(HttpDb) of
true ->
report_error(Worker, HttpDb, Params, {error, Error});
@@ -324,14 +342,12 @@ maybe_retry(Error, Worker, #httpdb{retries = Retries, wait = Wait} = HttpDb,
throw({retry, HttpDb2, Params})
end.
-
% When retrying, check to make total time spent retrying a request is below
% the current scheduler health threshold. The goal is to not exceed the
% threshold, otherwise the job which keep retrying too long will still be
% considered healthy.
total_error_time_exceeded(#httpdb{first_error_timestamp = nil}) ->
false;
-
total_error_time_exceeded(#httpdb{first_error_timestamp = ErrorTimestamp}) ->
HealthThresholdSec = couch_replicator_scheduler:health_threshold(),
% Theshold value is halved because in the calling code the next step
@@ -340,23 +356,21 @@ total_error_time_exceeded(#httpdb{first_error_timestamp = ErrorTimestamp}) ->
ThresholdUSec = (HealthThresholdSec / 2) * 1000000,
timer:now_diff(os:timestamp(), ErrorTimestamp) > ThresholdUSec.
-
% Remember the first time an error occurs. This value is used later to check
% the total time spend retrying a request. Because retrying is cursive, on
% successful result #httpdb{} record is reset back to the original value.
update_first_error_timestamp(#httpdb{first_error_timestamp = nil} = HttpDb) ->
HttpDb#httpdb{first_error_timestamp = os:timestamp()};
-
update_first_error_timestamp(HttpDb) ->
HttpDb.
-
log_retry_error(Params, HttpDb, Wait, Error) ->
Method = string:to_upper(atom_to_list(get_value(method, Params, get))),
Url = couch_util:url_strip_password(full_url(HttpDb, Params)),
- couch_log:notice("Retrying ~s request to ~s in ~p seconds due to error ~s",
- [Method, Url, Wait / 1000, error_cause(Error)]).
-
+ couch_log:notice(
+ "Retrying ~s request to ~s in ~p seconds due to error ~s",
+ [Method, Url, Wait / 1000, error_cause(Error)]
+ ).
report_error(_Worker, HttpDb, Params, Error) ->
Method = string:to_upper(atom_to_list(get_value(method, Params, get))),
@@ -364,60 +378,60 @@ report_error(_Worker, HttpDb, Params, Error) ->
do_report_error(Url, Method, Error),
exit({http_request_failed, Method, Url, Error}).
-
do_report_error(Url, Method, {code, Code}) ->
- couch_log:error("Replicator, request ~s to ~p failed. The received "
- "HTTP error code is ~p", [Method, Url, Code]);
-
+ couch_log:error(
+ "Replicator, request ~s to ~p failed. The received "
+ "HTTP error code is ~p",
+ [Method, Url, Code]
+ );
do_report_error(FullUrl, Method, Error) ->
- couch_log:error("Replicator, request ~s to ~p failed due to error ~s",
- [Method, FullUrl, error_cause(Error)]).
-
+ couch_log:error(
+ "Replicator, request ~s to ~p failed due to error ~s",
+ [Method, FullUrl, error_cause(Error)]
+ ).
error_cause({error, Cause}) ->
lists:flatten(io_lib:format("~p", [Cause]));
error_cause(Cause) ->
lists:flatten(io_lib:format("~p", [Cause])).
-
stream_data_self(#httpdb{timeout = T} = HttpDb, Params, Worker, ReqId, Cb) ->
case accumulate_messages(ReqId, [], T + 500) of
- {Data, ibrowse_async_response} ->
- ibrowse:stream_next(ReqId),
- {Data, fun() -> stream_data_self(HttpDb, Params, Worker, ReqId, Cb) end};
- {Data, ibrowse_async_response_end} ->
- put(?STREAM_STATUS, ended),
- {Data, fun() -> throw({maybe_retry_req, more_data_expected}) end}
+ {Data, ibrowse_async_response} ->
+ ibrowse:stream_next(ReqId),
+ {Data, fun() -> stream_data_self(HttpDb, Params, Worker, ReqId, Cb) end};
+ {Data, ibrowse_async_response_end} ->
+ put(?STREAM_STATUS, ended),
+ {Data, fun() -> throw({maybe_retry_req, more_data_expected}) end}
end.
accumulate_messages(ReqId, Acc, Timeout) ->
receive
- {ibrowse_async_response, ReqId, {error, Error}} ->
- throw({maybe_retry_req, Error});
- {ibrowse_async_response, ReqId, <<>>} ->
- accumulate_messages(ReqId, Acc, Timeout);
- {ibrowse_async_response, ReqId, Data} ->
- accumulate_messages(ReqId, [Data | Acc], 0);
- {ibrowse_async_response_end, ReqId} ->
- {iolist_to_binary(lists:reverse(Acc)), ibrowse_async_response_end}
+ {ibrowse_async_response, ReqId, {error, Error}} ->
+ throw({maybe_retry_req, Error});
+ {ibrowse_async_response, ReqId, <<>>} ->
+ accumulate_messages(ReqId, Acc, Timeout);
+ {ibrowse_async_response, ReqId, Data} ->
+ accumulate_messages(ReqId, [Data | Acc], 0);
+ {ibrowse_async_response_end, ReqId} ->
+ {iolist_to_binary(lists:reverse(Acc)), ibrowse_async_response_end}
after Timeout ->
% Note: ibrowse should always reply with timeouts, but this doesn't
% seem to be always true when there's a very high rate of requests
% and many open connections.
- if Acc =:= [] ->
- throw({maybe_retry_req, timeout});
- true ->
- {iolist_to_binary(lists:reverse(Acc)), ibrowse_async_response}
+ if
+ Acc =:= [] ->
+ throw({maybe_retry_req, timeout});
+ true ->
+ {iolist_to_binary(lists:reverse(Acc)), ibrowse_async_response}
end
end.
-
full_url(#httpdb{url = BaseUrl}, Params) ->
Path = get_value(path, Params, []),
QueryArgs = get_value(qs, Params, []),
BaseUrl ++ Path ++ query_args_to_string(QueryArgs, []).
-
query_args_to_string([], []) ->
"";
query_args_to_string([], Acc) ->
@@ -425,13 +439,11 @@ query_args_to_string([], Acc) ->
query_args_to_string([{K, V} | Rest], Acc) ->
query_args_to_string(Rest, [K ++ "=" ++ couch_httpd:quote(V) | Acc]).
-
do_redirect(_Worker, Code, Headers, #httpdb{url = Url} = HttpDb, Params, _Cb) ->
RedirectUrl = redirect_url(Headers, Url),
{HttpDb2, Params2} = after_redirect(RedirectUrl, Code, HttpDb, Params),
throw({retry, HttpDb2, Params2}).
-
redirect_url(RespHeaders, OrigUrl) ->
MochiHeaders = mochiweb_headers:make(RespHeaders),
RedUrl = mochiweb_headers:get_value("Location", MochiHeaders),
@@ -439,25 +451,28 @@ redirect_url(RespHeaders, OrigUrl) ->
host = Host,
host_type = HostType,
port = Port,
- path = Path, % includes query string
+ % includes query string
+ path = Path,
protocol = Proto
} = ibrowse_lib:parse_url(RedUrl),
#url{
username = User,
password = Passwd
} = ibrowse_lib:parse_url(OrigUrl),
- Creds = case is_list(User) andalso is_list(Passwd) of
- true ->
- User ++ ":" ++ Passwd ++ "@";
- false ->
- []
- end,
- HostPart = case HostType of
- ipv6_address ->
- "[" ++ Host ++ "]";
- _ ->
- Host
- end,
+ Creds =
+ case is_list(User) andalso is_list(Passwd) of
+ true ->
+ User ++ ":" ++ Passwd ++ "@";
+ false ->
+ []
+ end,
+ HostPart =
+ case HostType of
+ ipv6_address ->
+ "[" ++ Host ++ "]";
+ _ ->
+ Host
+ end,
atom_to_list(Proto) ++ "://" ++ Creds ++ HostPart ++ ":" ++
integer_to_list(Port) ++ Path.
@@ -470,24 +485,20 @@ after_redirect(RedirectUrl, HttpDb, Params) ->
Params2 = lists:keydelete(path, 1, lists:keydelete(qs, 1, Params)),
{HttpDb#httpdb{url = RedirectUrl}, Params2}.
-
backoff_key(HttpDb, Params) ->
Method = get_value(method, Params, get),
Url = HttpDb#httpdb.url,
{Url, Method}.
-
backoff(HttpDb, Params) ->
Key = backoff_key(HttpDb, Params),
couch_replicator_rate_limiter:failure(Key),
throw({retry, HttpDb, Params}).
-
backoff_success(HttpDb, Params) ->
Key = backoff_key(HttpDb, Params),
couch_replicator_rate_limiter:success(Key).
-
backoff_before_request(Worker, HttpDb, Params) ->
Key = backoff_key(HttpDb, Params),
Limit = couch_replicator_rate_limiter:max_interval(),
@@ -500,25 +511,27 @@ backoff_before_request(Worker, HttpDb, Params) ->
ok
end.
-
merge_headers(Headers1, Headers2) when is_list(Headers1), is_list(Headers2) ->
Empty = mochiweb_headers:empty(),
Merged = mochiweb_headers:enter_from_list(Headers1 ++ Headers2, Empty),
mochiweb_headers:to_list(Merged).
-
-ifdef(TEST).
-include_lib("couch/include/couch_eunit.hrl").
-
merge_headers_test() ->
?assertEqual([], merge_headers([], [])),
?assertEqual([{"a", "x"}], merge_headers([], [{"a", "x"}])),
?assertEqual([{"a", "x"}], merge_headers([{"a", "x"}], [])),
?assertEqual([{"a", "y"}], merge_headers([{"A", "x"}], [{"a", "y"}])),
- ?assertEqual([{"a", "y"}, {"B", "x"}], merge_headers([{"B", "x"}],
- [{"a", "y"}])),
+ ?assertEqual(
+ [{"a", "y"}, {"B", "x"}],
+ merge_headers(
+ [{"B", "x"}],
+ [{"a", "y"}]
+ )
+ ),
?assertEqual([{"a", "y"}], merge_headers([{"A", "z"}, {"a", "y"}], [])),
?assertEqual([{"a", "y"}], merge_headers([], [{"A", "z"}, {"a", "y"}])).
diff --git a/src/couch_replicator/src/couch_replicator_httpc_pool.erl b/src/couch_replicator/src/couch_replicator_httpc_pool.erl
index dc9a749ac..ff070f922 100644
--- a/src/couch_replicator/src/couch_replicator_httpc_pool.erl
+++ b/src/couch_replicator/src/couch_replicator_httpc_pool.erl
@@ -31,13 +31,15 @@
-record(state, {
url,
proxy_url,
- limit, % max # of workers allowed
+ % max # of workers allowed
+ limit,
workers = [],
- waiting = queue:new(), % blocked clients waiting for a worker
- callers = [] % clients who've been given a worker
+ % blocked clients waiting for a worker
+ waiting = queue:new(),
+ % clients who've been given a worker
+ callers = []
}).
-
start_link(Url, Options) ->
start_link(Url, undefined, Options).
@@ -47,11 +49,9 @@ start_link(Url, ProxyUrl, Options) ->
stop(Pool) ->
ok = gen_server:call(Pool, stop, infinity).
-
get_worker(Pool) ->
{ok, _Worker} = gen_server:call(Pool, get_worker, infinity).
-
release_worker(Pool, Worker) ->
ok = gen_server:cast(Pool, {release_worker, Worker}).
@@ -67,7 +67,6 @@ init({Url, ProxyUrl, Options}) ->
},
{ok, State}.
-
handle_call(get_worker, From, State) ->
#state{
waiting = Waiting,
@@ -78,22 +77,20 @@ handle_call(get_worker, From, State) ->
workers = Workers
} = State,
case length(Workers) >= Limit of
- true ->
- {noreply, State#state{waiting = queue:in(From, Waiting)}};
- false ->
- % If the call to acquire fails, the worker pool will crash with a
- % badmatch.
- {ok, Worker} = couch_replicator_connection:acquire(Url, ProxyUrl),
- NewState = State#state{
- workers = [Worker | Workers],
- callers = monitor_client(Callers, Worker, From)
- },
- {reply, {ok, Worker}, NewState}
+ true ->
+ {noreply, State#state{waiting = queue:in(From, Waiting)}};
+ false ->
+ % If the call to acquire fails, the worker pool will crash with a
+ % badmatch.
+ {ok, Worker} = couch_replicator_connection:acquire(Url, ProxyUrl),
+ NewState = State#state{
+ workers = [Worker | Workers],
+ callers = monitor_client(Callers, Worker, From)
+ },
+ {reply, {ok, Worker}, NewState}
end;
-
handle_call(stop, _From, State) ->
{stop, normal, ok, State};
-
handle_call({release_worker_sync, Worker}, _From, State) ->
{reply, ok, release_worker_internal(Worker, State)}.
@@ -115,8 +112,10 @@ handle_info({'EXIT', Pid, _Reason}, State) ->
Workers2 ->
case queue:out(Waiting) of
{empty, _} ->
- {noreply, State#state{workers = Workers2,
- callers = NewCallers0}};
+ {noreply, State#state{
+ workers = Workers2,
+ callers = NewCallers0
+ }};
{{value, From}, Waiting2} ->
{ok, Worker} = couch_replicator_connection:acquire(Url, ProxyUrl),
NewCallers1 = monitor_client(NewCallers0, Worker, From),
@@ -129,7 +128,6 @@ handle_info({'EXIT', Pid, _Reason}, State) ->
{noreply, NewState}
end
end;
-
handle_info({'DOWN', Ref, process, _, _}, #state{callers = Callers} = State) ->
case lists:keysearch(Ref, 2, Callers) of
{value, {Worker, Ref}} ->
@@ -138,10 +136,9 @@ handle_info({'DOWN', Ref, process, _, _}, #state{callers = Callers} = State) ->
{noreply, State}
end.
-code_change(_OldVsn, #state{}=State, _Extra) ->
+code_change(_OldVsn, #state{} = State, _Extra) ->
{ok, State}.
-
terminate(_Reason, _State) ->
ok.
@@ -150,10 +147,14 @@ format_status(_Opt, [_PDict, State]) ->
url = Url,
proxy_url = ProxyUrl
} = State,
- [{data, [{"State", State#state{
- url = couch_util:url_strip_password(Url),
- proxy_url = couch_util:url_strip_password(ProxyUrl)
- }}]}].
+ [
+ {data, [
+ {"State", State#state{
+ url = couch_util:url_strip_password(Url),
+ proxy_url = couch_util:url_strip_password(ProxyUrl)
+ }}
+ ]}
+ ].
monitor_client(Callers, Worker, {ClientPid, _}) ->
[{Worker, erlang:monitor(process, ClientPid)} | Callers].
@@ -170,29 +171,31 @@ demonitor_client(Callers, Worker) ->
release_worker_internal(Worker, State) ->
#state{waiting = Waiting, callers = Callers} = State,
NewCallers0 = demonitor_client(Callers, Worker),
- case is_process_alive(Worker) andalso
- lists:member(Worker, State#state.workers) of
- true ->
- Workers = case queue:out(Waiting) of
- {empty, Waiting2} ->
- NewCallers1 = NewCallers0,
- couch_replicator_connection:release(Worker),
- State#state.workers -- [Worker];
- {{value, From}, Waiting2} ->
- NewCallers1 = monitor_client(NewCallers0, Worker, From),
- gen_server:reply(From, {ok, Worker}),
- State#state.workers
- end,
- NewState = State#state{
- workers = Workers,
- waiting = Waiting2,
- callers = NewCallers1
- },
- NewState;
- false ->
- State#state{callers = NewCallers0}
- end.
-
+ case
+ is_process_alive(Worker) andalso
+ lists:member(Worker, State#state.workers)
+ of
+ true ->
+ Workers =
+ case queue:out(Waiting) of
+ {empty, Waiting2} ->
+ NewCallers1 = NewCallers0,
+ couch_replicator_connection:release(Worker),
+ State#state.workers -- [Worker];
+ {{value, From}, Waiting2} ->
+ NewCallers1 = monitor_client(NewCallers0, Worker, From),
+ gen_server:reply(From, {ok, Worker}),
+ State#state.workers
+ end,
+ NewState = State#state{
+ workers = Workers,
+ waiting = Waiting2,
+ callers = NewCallers1
+ },
+ NewState;
+ false ->
+ State#state{callers = NewCallers0}
+ end.
-ifdef(TEST).
@@ -210,4 +213,4 @@ format_status_test_() ->
ok
end).
--endif. \ No newline at end of file
+-endif.
diff --git a/src/couch_replicator/src/couch_replicator_httpd.erl b/src/couch_replicator/src/couch_replicator_httpd.erl
index 6efd49335..77c78efe2 100644
--- a/src/couch_replicator/src/couch_replicator_httpd.erl
+++ b/src/couch_replicator/src/couch_replicator_httpd.erl
@@ -30,50 +30,68 @@
to_binary/1
]).
-
-define(DEFAULT_TASK_LIMIT, 100).
-define(REPDB, <<"_replicator">>).
% This is a macro so it can be used as a guard
--define(ISREPDB(X), X =:= ?REPDB orelse binary_part(X, {byte_size(X), -12})
- =:= <<"/_replicator">>).
-
-
-handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"jobs">>]}=Req) ->
- Limit = couch_replicator_httpd_util:parse_int_param(Req, "limit",
- ?DEFAULT_TASK_LIMIT, 0, infinity),
- Skip = couch_replicator_httpd_util:parse_int_param(Req, "skip", 0, 0,
- infinity),
+-define(ISREPDB(X),
+ X =:= ?REPDB orelse
+ binary_part(X, {byte_size(X), -12}) =:=
+ <<"/_replicator">>
+).
+
+handle_scheduler_req(#httpd{method = 'GET', path_parts = [_, <<"jobs">>]} = Req) ->
+ Limit = couch_replicator_httpd_util:parse_int_param(
+ Req,
+ "limit",
+ ?DEFAULT_TASK_LIMIT,
+ 0,
+ infinity
+ ),
+ Skip = couch_replicator_httpd_util:parse_int_param(
+ Req,
+ "skip",
+ 0,
+ 0,
+ infinity
+ ),
{Replies, _BadNodes} = rpc:multicall(couch_replicator_scheduler, jobs, []),
Flatlist = lists:concat(Replies),
% couch_replicator_scheduler:job_ejson/1 guarantees {id, Id} to be the
% the first item in the list
- Sorted = lists:sort(fun({[{id,A}|_]},{[{id,B}|_]}) -> A =< B end, Flatlist),
+ Sorted = lists:sort(fun({[{id, A} | _]}, {[{id, B} | _]}) -> A =< B end, Flatlist),
Total = length(Sorted),
Offset = min(Skip, Total),
- Sublist = lists:sublist(Sorted, Offset+1, Limit),
- Sublist1 = [couch_replicator_httpd_util:update_db_name(Task)
- || Task <- Sublist],
+ Sublist = lists:sublist(Sorted, Offset + 1, Limit),
+ Sublist1 = [
+ couch_replicator_httpd_util:update_db_name(Task)
+ || Task <- Sublist
+ ],
send_json(Req, {[{total_rows, Total}, {offset, Offset}, {jobs, Sublist1}]});
-handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"jobs">>,JobId]}=Req) ->
+handle_scheduler_req(#httpd{method = 'GET', path_parts = [_, <<"jobs">>, JobId]} = Req) ->
case couch_replicator:job(JobId) of
{ok, JobInfo} ->
send_json(Req, couch_replicator_httpd_util:update_db_name(JobInfo));
{error, not_found} ->
throw(not_found)
end;
-handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"docs">>]}=Req) ->
+handle_scheduler_req(#httpd{method = 'GET', path_parts = [_, <<"docs">>]} = Req) ->
handle_scheduler_docs(?REPDB, Req);
-handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"docs">>,Db]}=Req)
- when ?ISREPDB(Db) ->
+handle_scheduler_req(#httpd{method = 'GET', path_parts = [_, <<"docs">>, Db]} = Req) when
+ ?ISREPDB(Db)
+->
handle_scheduler_docs(Db, Req);
-handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"docs">>,Db,DocId]}
- = Req) when ?ISREPDB(Db) ->
+handle_scheduler_req(
+ #httpd{method = 'GET', path_parts = [_, <<"docs">>, Db, DocId]} =
+ Req
+) when ?ISREPDB(Db) ->
handle_scheduler_doc(Db, DocId, Req);
% Allow users to pass in unencoded _replicator database names (/ are not
% escaped). This is possible here because _replicator is not a valid document
% ID so can disambiguate between an element of a db path and the document ID.
-handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"docs">>|Unquoted]}
- = Req) ->
+handle_scheduler_req(
+ #httpd{method = 'GET', path_parts = [_, <<"docs">> | Unquoted]} =
+ Req
+) ->
case parse_unquoted_docs_path(Unquoted) of
{db_only, Db} ->
handle_scheduler_docs(Db, Req);
@@ -82,38 +100,37 @@ handle_scheduler_req(#httpd{method='GET', path_parts=[_,<<"docs">>|Unquoted]}
{error, invalid} ->
throw(bad_request)
end;
-handle_scheduler_req(#httpd{method='GET'} = Req) ->
+handle_scheduler_req(#httpd{method = 'GET'} = Req) ->
send_json(Req, 404, {[{error, <<"not found">>}]});
handle_scheduler_req(Req) ->
send_method_not_allowed(Req, "GET,HEAD").
-
handle_req(#httpd{method = 'POST', user_ctx = UserCtx} = Req) ->
couch_httpd:validate_ctype(Req, "application/json"),
RepDoc = {Props} = couch_httpd:json_body_obj(Req),
couch_replicator_httpd_util:validate_rep_props(Props),
case couch_replicator:replicate(RepDoc, UserCtx) of
- {error, {Error, Reason}} ->
- send_json(
- Req, 500,
- {[{error, to_binary(Error)}, {reason, to_binary(Reason)}]});
- {error, not_found} ->
- % Tried to cancel a replication that didn't exist.
- send_json(Req, 404, {[{error, <<"not found">>}]});
- {error, Reason} ->
- send_json(Req, 500, {[{error, to_binary(Reason)}]});
- {ok, {cancelled, RepId}} ->
- send_json(Req, 200, {[{ok, true}, {<<"_local_id">>, RepId}]});
- {ok, {continuous, RepId}} ->
- send_json(Req, 202, {[{ok, true}, {<<"_local_id">>, RepId}]});
- {ok, {HistoryResults}} ->
- send_json(Req, {[{ok, true} | HistoryResults]})
+ {error, {Error, Reason}} ->
+ send_json(
+ Req,
+ 500,
+ {[{error, to_binary(Error)}, {reason, to_binary(Reason)}]}
+ );
+ {error, not_found} ->
+ % Tried to cancel a replication that didn't exist.
+ send_json(Req, 404, {[{error, <<"not found">>}]});
+ {error, Reason} ->
+ send_json(Req, 500, {[{error, to_binary(Reason)}]});
+ {ok, {cancelled, RepId}} ->
+ send_json(Req, 200, {[{ok, true}, {<<"_local_id">>, RepId}]});
+ {ok, {continuous, RepId}} ->
+ send_json(Req, 202, {[{ok, true}, {<<"_local_id">>, RepId}]});
+ {ok, {HistoryResults}} ->
+ send_json(Req, {[{ok, true} | HistoryResults]})
end;
-
handle_req(Req) ->
send_method_not_allowed(Req, "POST").
-
handle_scheduler_docs(Db, Req) when is_binary(Db) ->
VArgs0 = couch_mrview_http:parse_params(Req, undefined),
StatesQs = chttpd:qs_value(Req, "states"),
@@ -130,8 +147,7 @@ handle_scheduler_docs(Db, Req) when is_binary(Db) ->
Acc = couch_replicator_httpd_util:docs_acc_new(Req, Db, Max),
Cb = fun couch_replicator_httpd_util:docs_cb/2,
{ok, RAcc} = couch_replicator_fabric:docs(Db, Opts, VArgs2, Cb, Acc),
- {ok, couch_replicator_httpd_util:docs_acc_response(RAcc)}.
-
+ {ok, couch_replicator_httpd_util:docs_acc_response(RAcc)}.
handle_scheduler_doc(Db, DocId, Req) when is_binary(Db), is_binary(DocId) ->
UserCtx = Req#httpd.user_ctx,
@@ -142,7 +158,6 @@ handle_scheduler_doc(Db, DocId, Req) when is_binary(Db), is_binary(DocId) ->
throw(not_found)
end.
-
parse_unquoted_docs_path([_, _ | _] = Unquoted) ->
DbAndAfter = lists:dropwhile(fun(E) -> E =/= ?REPDB end, Unquoted),
BeforeRDb = lists:takewhile(fun(E) -> E =/= ?REPDB end, Unquoted),
@@ -155,19 +170,21 @@ parse_unquoted_docs_path([_, _ | _] = Unquoted) ->
{db_and_doc, filename:join(BeforeRDb ++ [?REPDB]), DocId}
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
unquoted_scheduler_docs_path_test_() ->
- [?_assertEqual(Res, parse_unquoted_docs_path(Path)) || {Res, Path} <- [
- {{error, invalid}, [<<"a">>,<< "b">>]},
- {{db_only, <<"a/_replicator">>}, [<<"a">>, ?REPDB]},
- {{db_only, <<"a/b/_replicator">>}, [<<"a">>, <<"b">>, ?REPDB]},
- {{db_and_doc, <<"_replicator">>, <<"x">>}, [?REPDB, <<"x">>]},
- {{db_and_doc, <<"a/_replicator">>, <<"x">>}, [<<"a">>, ?REPDB, <<"x">>]},
- {{error, invalid}, [<<"a/_replicator">>,<<"x">>]}
- ]].
+ [
+ ?_assertEqual(Res, parse_unquoted_docs_path(Path))
+ || {Res, Path} <- [
+ {{error, invalid}, [<<"a">>, <<"b">>]},
+ {{db_only, <<"a/_replicator">>}, [<<"a">>, ?REPDB]},
+ {{db_only, <<"a/b/_replicator">>}, [<<"a">>, <<"b">>, ?REPDB]},
+ {{db_and_doc, <<"_replicator">>, <<"x">>}, [?REPDB, <<"x">>]},
+ {{db_and_doc, <<"a/_replicator">>, <<"x">>}, [<<"a">>, ?REPDB, <<"x">>]},
+ {{error, invalid}, [<<"a/_replicator">>, <<"x">>]}
+ ]
+ ].
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_httpd_util.erl b/src/couch_replicator/src/couch_replicator_httpd_util.erl
index 624eddd2f..ddcc179d4 100644
--- a/src/couch_replicator/src/couch_replicator_httpd_util.erl
+++ b/src/couch_replicator/src/couch_replicator_httpd_util.erl
@@ -35,61 +35,64 @@
to_binary/1
]).
-
parse_replication_state_filter(undefined) ->
- []; % This is the default (wildcard) filter
+ % This is the default (wildcard) filter
+ [];
parse_replication_state_filter(States) when is_list(States) ->
AllStates = couch_replicator:replication_states(),
StrStates = [string:to_lower(S) || S <- string:tokens(States, ",")],
- AtomStates = try
- [list_to_existing_atom(S) || S <- StrStates]
- catch error:badarg ->
- Msg1 = io_lib:format("States must be one or more of ~w", [AllStates]),
- throw({query_parse_error, ?l2b(Msg1)})
- end,
+ AtomStates =
+ try
+ [list_to_existing_atom(S) || S <- StrStates]
+ catch
+ error:badarg ->
+ Msg1 = io_lib:format("States must be one or more of ~w", [AllStates]),
+ throw({query_parse_error, ?l2b(Msg1)})
+ end,
AllSet = sets:from_list(AllStates),
StatesSet = sets:from_list(AtomStates),
Diff = sets:to_list(sets:subtract(StatesSet, AllSet)),
case Diff of
- [] ->
- AtomStates;
- _ ->
- Args = [Diff, AllStates],
- Msg2 = io_lib:format("Unknown states ~w. Choose from: ~w", Args),
- throw({query_parse_error, ?l2b(Msg2)})
+ [] ->
+ AtomStates;
+ _ ->
+ Args = [Diff, AllStates],
+ Msg2 = io_lib:format("Unknown states ~w. Choose from: ~w", Args),
+ throw({query_parse_error, ?l2b(Msg2)})
end.
-
parse_int_param(Req, Param, Default, Min, Max) ->
- IntVal = try
- list_to_integer(chttpd:qs_value(Req, Param, integer_to_list(Default)))
- catch error:badarg ->
- Msg1 = io_lib:format("~s must be an integer", [Param]),
- throw({query_parse_error, ?l2b(Msg1)})
- end,
+ IntVal =
+ try
+ list_to_integer(chttpd:qs_value(Req, Param, integer_to_list(Default)))
+ catch
+ error:badarg ->
+ Msg1 = io_lib:format("~s must be an integer", [Param]),
+ throw({query_parse_error, ?l2b(Msg1)})
+ end,
case IntVal >= Min andalso IntVal =< Max of
- true ->
- IntVal;
- false ->
- Msg2 = io_lib:format("~s not in range of [~w,~w]", [Param, Min, Max]),
- throw({query_parse_error, ?l2b(Msg2)})
+ true ->
+ IntVal;
+ false ->
+ Msg2 = io_lib:format("~s not in range of [~w,~w]", [Param, Min, Max]),
+ throw({query_parse_error, ?l2b(Msg2)})
end.
-
validate_rep_props([]) ->
ok;
-validate_rep_props([{<<"query_params">>, {Params}}|Rest]) ->
- lists:foreach(fun
- ({_,V}) when is_binary(V) -> ok;
- ({K,_}) -> throw({bad_request,
- <<K/binary," value must be a string.">>})
- end, Params),
+validate_rep_props([{<<"query_params">>, {Params}} | Rest]) ->
+ lists:foreach(
+ fun
+ ({_, V}) when is_binary(V) -> ok;
+ ({K, _}) -> throw({bad_request, <<K/binary, " value must be a string.">>})
+ end,
+ Params
+ ),
validate_rep_props(Rest);
-validate_rep_props([_|Rest]) ->
+validate_rep_props([_ | Rest]) ->
validate_rep_props(Rest).
-
-prepend_val(#vacc{prepend=Prepend}) ->
+prepend_val(#vacc{prepend = Prepend}) ->
case Prepend of
undefined ->
"";
@@ -97,9 +100,9 @@ prepend_val(#vacc{prepend=Prepend}) ->
Prepend
end.
-
-maybe_flush_response(#vacc{bufsize=Size, threshold=Max} = Acc, Data, Len)
- when Size > 0 andalso (Size + Len) > Max ->
+maybe_flush_response(#vacc{bufsize = Size, threshold = Max} = Acc, Data, Len) when
+ Size > 0 andalso (Size + Len) > Max
+->
#vacc{buffer = Buffer, resp = Resp} = Acc,
{ok, R1} = chttpd:send_delayed_chunk(Resp, Buffer),
{ok, Acc#vacc{prepend = ",\r\n", buffer = Data, bufsize = Len, resp = R1}};
@@ -113,71 +116,69 @@ maybe_flush_response(Acc0, Data, Len) ->
{ok, Acc}.
docs_acc_new(Req, Db, Threshold) ->
- #vacc{db=Db, req=Req, threshold=Threshold}.
+ #vacc{db = Db, req = Req, threshold = Threshold}.
docs_acc_response(#vacc{resp = Resp}) ->
Resp.
-docs_cb({error, Reason}, #vacc{resp=undefined}=Acc) ->
+docs_cb({error, Reason}, #vacc{resp = undefined} = Acc) ->
{ok, Resp} = chttpd:send_error(Acc#vacc.req, Reason),
- {ok, Acc#vacc{resp=Resp}};
-
-docs_cb(complete, #vacc{resp=undefined}=Acc) ->
+ {ok, Acc#vacc{resp = Resp}};
+docs_cb(complete, #vacc{resp = undefined} = Acc) ->
% Nothing in view
{ok, Resp} = chttpd:send_json(Acc#vacc.req, 200, {[{rows, []}]}),
- {ok, Acc#vacc{resp=Resp}};
-
-docs_cb(Msg, #vacc{resp=undefined}=Acc) ->
+ {ok, Acc#vacc{resp = Resp}};
+docs_cb(Msg, #vacc{resp = undefined} = Acc) ->
%% Start response
Headers = [],
{ok, Resp} = chttpd:start_delayed_json_response(Acc#vacc.req, 200, Headers),
- docs_cb(Msg, Acc#vacc{resp=Resp, should_close=true});
-
-docs_cb({error, Reason}, #vacc{resp=Resp}=Acc) ->
+ docs_cb(Msg, Acc#vacc{resp = Resp, should_close = true});
+docs_cb({error, Reason}, #vacc{resp = Resp} = Acc) ->
{ok, Resp1} = chttpd:send_delayed_error(Resp, Reason),
- {ok, Acc#vacc{resp=Resp1}};
-
-docs_cb(complete, #vacc{resp=Resp, buffer=Buf, threshold=Max}=Acc) ->
+ {ok, Acc#vacc{resp = Resp1}};
+docs_cb(complete, #vacc{resp = Resp, buffer = Buf, threshold = Max} = Acc) ->
% Finish view output and possibly end the response
{ok, Resp1} = chttpd:close_delayed_json_object(Resp, Buf, "\r\n]}", Max),
case Acc#vacc.should_close of
true ->
{ok, Resp2} = chttpd:end_delayed_json_response(Resp1),
- {ok, Acc#vacc{resp=Resp2}};
+ {ok, Acc#vacc{resp = Resp2}};
_ ->
- {ok, Acc#vacc{resp=Resp1, meta_sent=false, row_sent=false,
- prepend=",\r\n", buffer=[], bufsize=0}}
+ {ok, Acc#vacc{
+ resp = Resp1,
+ meta_sent = false,
+ row_sent = false,
+ prepend = ",\r\n",
+ buffer = [],
+ bufsize = 0
+ }}
end;
-
-docs_cb({meta, Meta}, #vacc{meta_sent=false, row_sent=false}=Acc) ->
+docs_cb({meta, Meta}, #vacc{meta_sent = false, row_sent = false} = Acc) ->
% Sending metadata as we've not sent it or any row yet
- Parts = case couch_util:get_value(total, Meta) of
- undefined -> [];
- Total -> [io_lib:format("\"total_rows\":~p", [adjust_total(Total)])]
- end ++ case couch_util:get_value(offset, Meta) of
- undefined -> [];
- Offset -> [io_lib:format("\"offset\":~p", [Offset])]
- end ++ ["\"docs\":["],
+ Parts =
+ case couch_util:get_value(total, Meta) of
+ undefined -> [];
+ Total -> [io_lib:format("\"total_rows\":~p", [adjust_total(Total)])]
+ end ++
+ case couch_util:get_value(offset, Meta) of
+ undefined -> [];
+ Offset -> [io_lib:format("\"offset\":~p", [Offset])]
+ end ++ ["\"docs\":["],
Chunk = [prepend_val(Acc), "{", string:join(Parts, ","), "\r\n"],
{ok, AccOut} = maybe_flush_response(Acc, Chunk, iolist_size(Chunk)),
- {ok, AccOut#vacc{prepend="", meta_sent=true}};
-
-
-docs_cb({meta, _Meta}, #vacc{}=Acc) ->
+ {ok, AccOut#vacc{prepend = "", meta_sent = true}};
+docs_cb({meta, _Meta}, #vacc{} = Acc) ->
%% ignore metadata
{ok, Acc};
-
-docs_cb({row, Row}, #vacc{meta_sent=false}=Acc) ->
+docs_cb({row, Row}, #vacc{meta_sent = false} = Acc) ->
%% sorted=false and row arrived before meta
% Adding another row
Chunk = [prepend_val(Acc), "{\"docs\":[\r\n", row_to_json(Row)],
- maybe_flush_response(Acc#vacc{meta_sent=true, row_sent=true}, Chunk, iolist_size(Chunk));
-
-docs_cb({row, Row}, #vacc{meta_sent=true}=Acc) ->
+ maybe_flush_response(Acc#vacc{meta_sent = true, row_sent = true}, Chunk, iolist_size(Chunk));
+docs_cb({row, Row}, #vacc{meta_sent = true} = Acc) ->
% Adding another row
Chunk = [prepend_val(Acc), row_to_json(Row)],
- maybe_flush_response(Acc#vacc{row_sent=true}, Chunk, iolist_size(Chunk)).
-
+ maybe_flush_response(Acc#vacc{row_sent = true}, Chunk, iolist_size(Chunk)).
update_db_name({Props}) ->
{value, {database, DbName}, Props1} = lists:keytake(database, 1, Props),
@@ -193,7 +194,6 @@ row_to_json(Row) ->
Doc1 = update_db_name(Doc0),
?JSON_ENCODE(Doc1).
-
%% Adjust Total as there is an automatically created validation design doc
adjust_total(Total) when is_integer(Total), Total > 0 ->
Total - 1;
diff --git a/src/couch_replicator/src/couch_replicator_ids.erl b/src/couch_replicator/src/couch_replicator_ids.erl
index 316e6a28a..6543cf069 100644
--- a/src/couch_replicator/src/couch_replicator_ids.erl
+++ b/src/couch_replicator/src/couch_replicator_ids.erl
@@ -34,7 +34,6 @@ replication_id(#rep{options = Options} = Rep) ->
BaseId = replication_id(Rep, ?REP_ID_VERSION),
{BaseId, maybe_append_options([continuous, create_target], Options)}.
-
% Versioned clauses for generating replication IDs.
% If a change is made to how replications are identified,
% please add a new clause and increase ?REP_ID_VERSION.
@@ -44,37 +43,34 @@ replication_id(#rep{} = Rep, 4) ->
SrcInfo = get_v4_endpoint(Rep#rep.source),
TgtInfo = get_v4_endpoint(Rep#rep.target),
maybe_append_filters([UUID, SrcInfo, TgtInfo], Rep);
-
replication_id(#rep{} = Rep, 3) ->
UUID = couch_server:get_uuid(),
Src = get_rep_endpoint(Rep#rep.source),
Tgt = get_rep_endpoint(Rep#rep.target),
maybe_append_filters([UUID, Src, Tgt], Rep);
-
replication_id(#rep{} = Rep, 2) ->
{ok, HostName} = inet:gethostname(),
- Port = case (catch mochiweb_socket_server:get(couch_httpd, port)) of
- P when is_number(P) ->
- P;
- _ ->
- % On restart we might be called before the couch_httpd process is
- % started.
- % TODO: we might be under an SSL socket server only, or both under
- % SSL and a non-SSL socket.
- % ... mochiweb_socket_server:get(https, port)
- config:get_integer("httpd", "port", 5984)
- end,
+ Port =
+ case (catch mochiweb_socket_server:get(couch_httpd, port)) of
+ P when is_number(P) ->
+ P;
+ _ ->
+ % On restart we might be called before the couch_httpd process is
+ % started.
+ % TODO: we might be under an SSL socket server only, or both under
+ % SSL and a non-SSL socket.
+ % ... mochiweb_socket_server:get(https, port)
+ config:get_integer("httpd", "port", 5984)
+ end,
Src = get_rep_endpoint(Rep#rep.source),
Tgt = get_rep_endpoint(Rep#rep.target),
maybe_append_filters([HostName, Port, Src, Tgt], Rep);
-
replication_id(#rep{} = Rep, 1) ->
{ok, HostName} = inet:gethostname(),
Src = get_rep_endpoint(Rep#rep.source),
Tgt = get_rep_endpoint(Rep#rep.target),
maybe_append_filters([HostName, Src, Tgt], Rep).
-
-spec convert([_] | binary() | {string(), string()}) -> {string(), string()}.
convert(Id) when is_list(Id) ->
convert(?l2b(Id));
@@ -87,59 +83,62 @@ convert(Id0) when is_binary(Id0) ->
convert({BaseId, Ext} = Id) when is_list(BaseId), is_list(Ext) ->
Id.
-
% Private functions
-maybe_append_filters(Base,
- #rep{source = Source, options = Options}) ->
- Base2 = Base ++
- case couch_replicator_filters:parse(Options) of
- {ok, nil} ->
- [];
- {ok, {view, Filter, QueryParams}} ->
- [Filter, QueryParams];
- {ok, {user, {Doc, Filter}, QueryParams}} ->
- case couch_replicator_filters:fetch(Doc, Filter, Source) of
- {ok, Code} ->
- [Code, QueryParams];
- {error, Error} ->
- throw({filter_fetch_error, Error})
- end;
- {ok, {docids, DocIds}} ->
- [DocIds];
- {ok, {mango, Selector}} ->
- [Selector];
- {error, FilterParseError} ->
- throw({error, FilterParseError})
- end,
+maybe_append_filters(
+ Base,
+ #rep{source = Source, options = Options}
+) ->
+ Base2 =
+ Base ++
+ case couch_replicator_filters:parse(Options) of
+ {ok, nil} ->
+ [];
+ {ok, {view, Filter, QueryParams}} ->
+ [Filter, QueryParams];
+ {ok, {user, {Doc, Filter}, QueryParams}} ->
+ case couch_replicator_filters:fetch(Doc, Filter, Source) of
+ {ok, Code} ->
+ [Code, QueryParams];
+ {error, Error} ->
+ throw({filter_fetch_error, Error})
+ end;
+ {ok, {docids, DocIds}} ->
+ [DocIds];
+ {ok, {mango, Selector}} ->
+ [Selector];
+ {error, FilterParseError} ->
+ throw({error, FilterParseError})
+ end,
couch_util:to_hex(couch_hash:md5_hash(term_to_binary(Base2))).
-
maybe_append_options(Options, RepOptions) ->
- lists:foldl(fun(Option, Acc) ->
- Acc ++
- case couch_util:get_value(Option, RepOptions, false) of
- true ->
- "+" ++ atom_to_list(Option);
- false ->
- ""
- end
- end, [], Options).
-
+ lists:foldl(
+ fun(Option, Acc) ->
+ Acc ++
+ case couch_util:get_value(Option, RepOptions, false) of
+ true ->
+ "+" ++ atom_to_list(Option);
+ false ->
+ ""
+ end
+ end,
+ [],
+ Options
+ ).
-get_rep_endpoint(#httpdb{url=Url, headers=Headers}) ->
+get_rep_endpoint(#httpdb{url = Url, headers = Headers}) ->
DefaultHeaders = (#httpdb{})#httpdb.headers,
{remote, Url, Headers -- DefaultHeaders}.
-
get_v4_endpoint(#httpdb{} = HttpDb) ->
{remote, Url, Headers} = get_rep_endpoint(HttpDb),
{User, _} = couch_replicator_utils:get_basic_auth_creds(HttpDb),
{Host, NonDefaultPort, Path} = get_v4_url_info(Url),
- OAuth = undefined, % Keep this to ensure checkpoints don't change
+ % Keep this to ensure checkpoints don't change
+ OAuth = undefined,
{remote, User, Host, NonDefaultPort, Path, Headers, OAuth}.
-
get_v4_url_info(Url) when is_binary(Url) ->
get_v4_url_info(binary_to_list(Url));
get_v4_url_info(Url) ->
@@ -158,7 +157,6 @@ get_v4_url_info(Url) ->
{Host, NonDefaultPort, Path}
end.
-
get_non_default_port(https, 443) ->
default;
get_non_default_port(http, 80) ->
@@ -168,112 +166,117 @@ get_non_default_port(http, 5984) ->
get_non_default_port(_Schema, Port) ->
Port.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
replication_id_convert_test_() ->
- [?_assertEqual(Expected, convert(Id)) || {Expected, Id} <- [
- {{"abc", ""}, "abc"},
- {{"abc", ""}, <<"abc">>},
- {{"abc", "+x+y"}, <<"abc+x+y">>},
- {{"abc", "+x+y"}, {"abc", "+x+y"}},
- {{"abc", "+x+y"}, <<"abc x y">>}
- ]].
+ [
+ ?_assertEqual(Expected, convert(Id))
+ || {Expected, Id} <- [
+ {{"abc", ""}, "abc"},
+ {{"abc", ""}, <<"abc">>},
+ {{"abc", "+x+y"}, <<"abc+x+y">>},
+ {{"abc", "+x+y"}, {"abc", "+x+y"}},
+ {{"abc", "+x+y"}, <<"abc x y">>}
+ ]
+ ].
http_v4_endpoint_test_() ->
- [?_assertMatch({remote, User, Host, Port, Path, HeadersNoAuth, undefined},
- begin
- HttpDb = #httpdb{url = Url, headers = Headers, auth_props = Auth},
- HttpDb1 = couch_replicator_utils:normalize_basic_auth(HttpDb),
- get_v4_endpoint(HttpDb1)
- end) ||
- {{User, Host, Port, Path, HeadersNoAuth}, {Url, Headers, Auth}} <- [
- {
- {undefined, "host", default, "/", []},
- {"http://host", [], []}
- },
- {
- {undefined, "host", default, "/", []},
- {"https://host", [], []}
- },
- {
- {undefined, "host", default, "/", []},
- {"http://host:5984", [], []}
- },
- {
- {undefined, "host", 1, "/", []},
- {"http://host:1", [], []}
- },
- {
- {undefined, "host", 2, "/", []},
- {"https://host:2", [], []}
- },
- {
- {undefined, "host", default, "/", [{"h","v"}]},
- {"http://host", [{"h","v"}], []}
- },
- {
- {undefined, "host", default, "/a/b", []},
- {"http://host/a/b", [], []}
- },
- {
- {"user", "host", default, "/", []},
- {"http://user:pass@host", [], []}
- },
- {
- {"user", "host", 3, "/", []},
- {"http://user:pass@host:3", [], []}
- },
- {
- {"user", "host", default, "/", []},
- {"http://user:newpass@host", [], []}
- },
- {
- {"user", "host", default, "/", []},
- {"http://host", [basic_auth("user","pass")], []}
- },
- {
- {"user", "host", default, "/", []},
- {"http://host", [basic_auth("user","newpass")], []}
- },
- {
- {"user3", "host", default, "/", []},
- {"http://user1:pass1@host", [basic_auth("user2","pass2")],
- auth_props("user3", "pass3")}
- },
- {
- {"user2", "host", default, "/", [{"h", "v"}]},
- {"http://host", [{"h", "v"}, basic_auth("user","pass")],
- auth_props("user2", "pass2")}
- },
- {
- {"user", "host", default, "/", [{"h", "v"}]},
- {"http://host", [{"h", "v"}], auth_props("user", "pass")}
- },
- {
- {undefined, "random_junk", undefined, undefined},
- {"random_junk", [], []}
- },
- {
- {undefined, "host", default, "/", []},
- {"http://host", [{"Authorization", "Basic bad"}], []}
- }
+ [
+ ?_assertMatch(
+ {remote, User, Host, Port, Path, HeadersNoAuth, undefined},
+ begin
+ HttpDb = #httpdb{url = Url, headers = Headers, auth_props = Auth},
+ HttpDb1 = couch_replicator_utils:normalize_basic_auth(HttpDb),
+ get_v4_endpoint(HttpDb1)
+ end
+ )
+ || {{User, Host, Port, Path, HeadersNoAuth}, {Url, Headers, Auth}} <- [
+ {
+ {undefined, "host", default, "/", []},
+ {"http://host", [], []}
+ },
+ {
+ {undefined, "host", default, "/", []},
+ {"https://host", [], []}
+ },
+ {
+ {undefined, "host", default, "/", []},
+ {"http://host:5984", [], []}
+ },
+ {
+ {undefined, "host", 1, "/", []},
+ {"http://host:1", [], []}
+ },
+ {
+ {undefined, "host", 2, "/", []},
+ {"https://host:2", [], []}
+ },
+ {
+ {undefined, "host", default, "/", [{"h", "v"}]},
+ {"http://host", [{"h", "v"}], []}
+ },
+ {
+ {undefined, "host", default, "/a/b", []},
+ {"http://host/a/b", [], []}
+ },
+ {
+ {"user", "host", default, "/", []},
+ {"http://user:pass@host", [], []}
+ },
+ {
+ {"user", "host", 3, "/", []},
+ {"http://user:pass@host:3", [], []}
+ },
+ {
+ {"user", "host", default, "/", []},
+ {"http://user:newpass@host", [], []}
+ },
+ {
+ {"user", "host", default, "/", []},
+ {"http://host", [basic_auth("user", "pass")], []}
+ },
+ {
+ {"user", "host", default, "/", []},
+ {"http://host", [basic_auth("user", "newpass")], []}
+ },
+ {
+ {"user3", "host", default, "/", []},
+ {"http://user1:pass1@host", [basic_auth("user2", "pass2")],
+ auth_props("user3", "pass3")}
+ },
+ {
+ {"user2", "host", default, "/", [{"h", "v"}]},
+ {"http://host", [{"h", "v"}, basic_auth("user", "pass")],
+ auth_props("user2", "pass2")}
+ },
+ {
+ {"user", "host", default, "/", [{"h", "v"}]},
+ {"http://host", [{"h", "v"}], auth_props("user", "pass")}
+ },
+ {
+ {undefined, "random_junk", undefined, undefined},
+ {"random_junk", [], []}
+ },
+ {
+ {undefined, "host", default, "/", []},
+ {"http://host", [{"Authorization", "Basic bad"}], []}
+ }
]
].
-
basic_auth(User, Pass) ->
B64Auth = base64:encode_to_string(User ++ ":" ++ Pass),
{"Authorization", "Basic " ++ B64Auth}.
-
auth_props(User, Pass) when is_list(User), is_list(Pass) ->
- [{<<"basic">>, {[
- {<<"username">>, list_to_binary(User)},
- {<<"password">>, list_to_binary(Pass)}
- ]}}].
+ [
+ {<<"basic">>,
+ {[
+ {<<"username">>, list_to_binary(User)},
+ {<<"password">>, list_to_binary(Pass)}
+ ]}}
+ ].
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_job_sup.erl b/src/couch_replicator/src/couch_replicator_job_sup.erl
index 9ea65e85f..e3d15c041 100644
--- a/src/couch_replicator/src/couch_replicator_job_sup.erl
+++ b/src/couch_replicator/src/couch_replicator_job_sup.erl
@@ -20,7 +20,7 @@
]).
start_link() ->
- supervisor:start_link({local,?MODULE}, ?MODULE, []).
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
%%=============================================================================
%% supervisor callbacks
diff --git a/src/couch_replicator/src/couch_replicator_notifier.erl b/src/couch_replicator/src/couch_replicator_notifier.erl
index f7640a349..451ec5de7 100644
--- a/src/couch_replicator/src/couch_replicator_notifier.erl
+++ b/src/couch_replicator/src/couch_replicator_notifier.erl
@@ -25,8 +25,11 @@
-include_lib("couch/include/couch_db.hrl").
start_link(FunAcc) ->
- couch_event_sup:start_link(couch_replication,
- {couch_replicator_notifier, make_ref()}, FunAcc).
+ couch_event_sup:start_link(
+ couch_replication,
+ {couch_replicator_notifier, make_ref()},
+ FunAcc
+ ).
notify(Event) ->
gen_event:notify(couch_replication, Event).
@@ -34,7 +37,6 @@ notify(Event) ->
stop(Pid) ->
couch_event_sup:stop(Pid).
-
init(FunAcc) ->
{ok, FunAcc}.
diff --git a/src/couch_replicator/src/couch_replicator_rate_limiter.erl b/src/couch_replicator/src/couch_replicator_rate_limiter.erl
index b7b70945c..5d2c184b8 100644
--- a/src/couch_replicator/src/couch_replicator_rate_limiter.erl
+++ b/src/couch_replicator/src/couch_replicator_rate_limiter.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
% This module implements rate limiting based on a variation the additive
% increase / multiplicative decrease feedback control algorithm.
%
@@ -36,29 +35,28 @@
% function is the current period value. Caller then might decide to sleep for
% that amount of time before or after each request.
-
-module(couch_replicator_rate_limiter).
-behaviour(gen_server).
-export([
- start_link/0
+ start_link/0
]).
-export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_info/2,
+ handle_cast/2,
+ code_change/3
]).
-export([
- interval/1,
- max_interval/0,
- failure/1,
- success/1
+ interval/1,
+ max_interval/0,
+ failure/1,
+ success/1
]).
% Types
@@ -66,7 +64,6 @@
-type interval() :: non_neg_integer().
-type msec() :: non_neg_integer().
-
% Definitions
% Main parameters of the algorithm. The factor is the multiplicative part and
@@ -98,79 +95,67 @@
% use something similar to solve the ACK compression problem).
-define(SENSITIVITY_TIME_WINDOW, 80).
-
-record(state, {timer}).
-record(rec, {id, backoff, ts}).
-
-spec start_link() -> {ok, pid()} | ignore | {error, term()}.
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-spec interval(key()) -> interval().
interval(Key) ->
{Interval, _Timestamp} = interval_and_timestamp(Key),
Interval.
-
-spec max_interval() -> interval().
max_interval() ->
?MAX_INTERVAL.
-
-spec failure(key()) -> interval().
failure(Key) ->
{Interval, Timestamp} = interval_and_timestamp(Key),
update_failure(Key, Interval, Timestamp, now_msec()).
-
-spec success(key()) -> interval().
success(Key) ->
{Interval, Timestamp} = interval_and_timestamp(Key),
update_success(Key, Interval, Timestamp, now_msec()).
-
% gen_server callbacks
init([]) ->
couch_replicator_rate_limiter_tables:create(#rec.id),
{ok, #state{timer = new_timer()}}.
-
terminate(_Reason, _State) ->
ok.
-
handle_call(_Msg, _From, State) ->
{reply, invalid, State}.
-
handle_cast(_, State) ->
{noreply, State}.
-
handle_info(cleanup, #state{timer = Timer}) ->
erlang:cancel_timer(Timer),
TIds = couch_replicator_rate_limiter_tables:tids(),
[cleanup_table(TId, now_msec() - ?MAX_INTERVAL) || TId <- TIds],
{noreply, #state{timer = new_timer()}}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
% Private functions
-spec update_success(any(), interval(), msec(), msec()) -> interval().
update_success(_Key, _Interval, _Timestamp = 0, _Now) ->
- 0; % No ets entry. Keep it that way and don't insert a new one.
-
-update_success(_Key, Interval, Timestamp, Now)
- when Now - Timestamp =< ?SENSITIVITY_TIME_WINDOW ->
- Interval; % Ignore too frequent updates.
-
+ % No ets entry. Keep it that way and don't insert a new one.
+ 0;
+update_success(_Key, Interval, Timestamp, Now) when
+ Now - Timestamp =< ?SENSITIVITY_TIME_WINDOW
+->
+ % Ignore too frequent updates.
+ Interval;
update_success(Key, Interval, Timestamp, Now) ->
DecayedInterval = time_decay(Now - Timestamp, Interval),
AdditiveFactor = additive_factor(DecayedInterval),
@@ -186,19 +171,18 @@ update_success(Key, Interval, Timestamp, Now) ->
insert(Key, NewInterval, Now)
end.
-
-spec update_failure(any(), interval(), msec(), msec()) -> interval().
-update_failure(_Key, Interval, Timestamp, Now)
- when Now - Timestamp =< ?SENSITIVITY_TIME_WINDOW ->
- Interval; % Ignore too frequent updates.
-
+update_failure(_Key, Interval, Timestamp, Now) when
+ Now - Timestamp =< ?SENSITIVITY_TIME_WINDOW
+->
+ % Ignore too frequent updates.
+ Interval;
update_failure(Key, Interval, _Timestamp, Now) ->
Interval1 = erlang:max(Interval, ?BASE_INTERVAL),
Interval2 = round(Interval1 * ?BACKOFF_FACTOR),
Interval3 = erlang:min(Interval2, ?MAX_INTERVAL),
insert(Key, Interval3, Now).
-
-spec insert(any(), interval(), msec()) -> interval().
insert(Key, Interval, Timestamp) ->
Entry = #rec{id = Key, backoff = Interval, ts = Timestamp},
@@ -206,7 +190,6 @@ insert(Key, Interval, Timestamp) ->
ets:insert(Table, Entry),
Interval.
-
-spec interval_and_timestamp(key()) -> {interval(), msec()}.
interval_and_timestamp(Key) ->
Table = couch_replicator_rate_limiter_tables:term_to_table(Key),
@@ -217,16 +200,13 @@ interval_and_timestamp(Key) ->
{Interval, Timestamp}
end.
-
-spec time_decay(msec(), interval()) -> interval().
time_decay(Dt, Interval) when Dt > ?TIME_DECAY_THRESHOLD ->
DecayedInterval = Interval - ?TIME_DECAY_FACTOR * Dt,
erlang:max(round(DecayedInterval), 0);
-
time_decay(_Dt, Interval) ->
Interval.
-
% Calculate additive factor. Ideally it would be a constant but in this case
% it is a step function to help handle larger values as they are approaching
% the backoff limit. Large success values closer to the limit add some
@@ -243,18 +223,15 @@ additive_factor(Interval) when Interval > 100 ->
additive_factor(_Interval) ->
?BASE_INTERVAL.
-
-spec new_timer() -> reference().
new_timer() ->
erlang:send_after(?MAX_INTERVAL * 2, self(), cleanup).
-
-spec now_msec() -> msec().
now_msec() ->
{Mega, Sec, Micro} = os:timestamp(),
((Mega * 1000000) + Sec) * 1000 + Micro div 1000.
-
-spec cleanup_table(atom(), msec()) -> non_neg_integer().
cleanup_table(Tid, LimitMSec) ->
Head = #rec{ts = '$1', _ = '_'},
diff --git a/src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl b/src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl
index 72892b410..2e2556888 100644
--- a/src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl
+++ b/src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
% Maintain cluster membership and stability notifications for replications.
% On changes to cluster membership, broadcast events to `replication` gen_event.
% Listeners will get `{cluster, stable}` or `{cluster, unstable}` events.
@@ -26,37 +25,32 @@
-module(couch_replicator_rate_limiter_tables).
-export([
- create/1,
- tids/0,
- term_to_table/1
+ create/1,
+ tids/0,
+ term_to_table/1
]).
-define(SHARDS_N, 16).
-
-spec create(non_neg_integer()) -> ok.
create(KeyPos) ->
Opts = [named_table, public, {keypos, KeyPos}, {read_concurrency, true}],
[ets:new(list_to_atom(TableName), Opts) || TableName <- table_names()],
ok.
-
-spec tids() -> [atom()].
tids() ->
[list_to_existing_atom(TableName) || TableName <- table_names()].
-
-spec term_to_table(any()) -> atom().
term_to_table(Term) ->
PHash = erlang:phash2(Term),
list_to_existing_atom(table_name(PHash rem ?SHARDS_N)).
-
-spec table_names() -> [string()].
table_names() ->
[table_name(N) || N <- lists:seq(0, ?SHARDS_N - 1)].
-
-spec table_name(non_neg_integer()) -> string().
table_name(Id) when is_integer(Id), Id >= 0 andalso Id < ?SHARDS_N ->
atom_to_list(?MODULE) ++ "_" ++ integer_to_list(Id).
diff --git a/src/couch_replicator/src/couch_replicator_scheduler.erl b/src/couch_replicator/src/couch_replicator_scheduler.erl
index d3b5b71a4..f544865af 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler.erl
@@ -20,28 +20,28 @@
]).
-export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3,
- format_status/2
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_info/2,
+ handle_cast/2,
+ code_change/3,
+ format_status/2
]).
-export([
- add_job/1,
- remove_job/1,
- reschedule/0,
- rep_state/1,
- find_jobs_by_dbname/1,
- find_jobs_by_doc/2,
- job_summary/2,
- health_threshold/0,
- jobs/0,
- job/1,
- restart_job/1,
- update_job_stats/2
+ add_job/1,
+ remove_job/1,
+ reschedule/0,
+ rep_state/1,
+ find_jobs_by_dbname/1,
+ find_jobs_by_doc/2,
+ job_summary/2,
+ health_threshold/0,
+ jobs/0,
+ job/1,
+ restart_job/1,
+ update_job_stats/2
]).
%% config_listener callbacks
@@ -59,7 +59,6 @@
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-include_lib("couch/include/couch_db.hrl").
-
%% definitions
-define(MAX_BACKOFF_EXPONENT, 10).
-define(BACKOFF_INTERVAL_MICROS, 30 * 1000 * 1000).
@@ -72,7 +71,6 @@
-define(DEFAULT_MAX_HISTORY, 20).
-define(DEFAULT_SCHEDULER_INTERVAL, 60000).
-
-record(state, {
interval = ?DEFAULT_SCHEDULER_INTERVAL,
timer,
@@ -88,14 +86,12 @@
crashed_n = 0 :: non_neg_integer()
}).
-
%% public functions
-spec start_link() -> {ok, pid()} | ignore | {error, term()}.
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-spec add_job(#rep{}) -> ok.
add_job(#rep{} = Rep) when Rep#rep.id /= undefined ->
case existing_replication(Rep) of
@@ -110,47 +106,44 @@ add_job(#rep{} = Rep) when Rep#rep.id /= undefined ->
ok
end.
-
-spec remove_job(job_id()) -> ok.
remove_job(Id) ->
gen_server:call(?MODULE, {remove_job, Id}, infinity).
-
-spec reschedule() -> ok.
% Trigger a manual reschedule. Used for testing and/or ops.
reschedule() ->
gen_server:call(?MODULE, reschedule, infinity).
-
-spec rep_state(rep_id()) -> #rep{} | nil.
rep_state(RepId) ->
case (catch ets:lookup_element(?MODULE, RepId, #job.rep)) of
- {'EXIT',{badarg, _}} ->
+ {'EXIT', {badarg, _}} ->
nil;
Rep ->
Rep
end.
-
-spec job_summary(job_id(), non_neg_integer()) -> [_] | nil.
job_summary(JobId, HealthThreshold) ->
case job_by_id(JobId) of
{ok, #job{pid = Pid, history = History, rep = Rep}} ->
ErrorCount = consecutive_crashes(History, HealthThreshold),
- {State, Info} = case {Pid, ErrorCount} of
- {undefined, 0} ->
- case History of
- [{{crashed, Error}, _When} | _] ->
- {crashing, crash_reason_json(Error)};
- [_ | _] ->
- {pending, Rep#rep.stats}
- end;
- {undefined, ErrorCount} when ErrorCount > 0 ->
- [{{crashed, Error}, _When} | _] = History,
- {crashing, crash_reason_json(Error)};
- {Pid, ErrorCount} when is_pid(Pid) ->
- {running, Rep#rep.stats}
- end,
+ {State, Info} =
+ case {Pid, ErrorCount} of
+ {undefined, 0} ->
+ case History of
+ [{{crashed, Error}, _When} | _] ->
+ {crashing, crash_reason_json(Error)};
+ [_ | _] ->
+ {pending, Rep#rep.stats}
+ end;
+ {undefined, ErrorCount} when ErrorCount > 0 ->
+ [{{crashed, Error}, _When} | _] = History,
+ {crashing, crash_reason_json(Error)};
+ {Pid, ErrorCount} when is_pid(Pid) ->
+ {running, Rep#rep.stats}
+ end,
[
{source, iolist_to_binary(ejson_url(Rep#rep.source))},
{target, iolist_to_binary(ejson_url(Rep#rep.target))},
@@ -158,22 +151,20 @@ job_summary(JobId, HealthThreshold) ->
{info, couch_replicator_utils:ejson_state_info(Info)},
{error_count, ErrorCount},
{last_updated, last_updated(History)},
- {start_time,
- couch_replicator_utils:iso8601(Rep#rep.start_time)},
+ {start_time, couch_replicator_utils:iso8601(Rep#rep.start_time)},
{source_proxy, job_proxy_url(Rep#rep.source)},
{target_proxy, job_proxy_url(Rep#rep.target)}
];
{error, not_found} ->
- nil % Job might have just completed
+ % Job might have just completed
+ nil
end.
-
job_proxy_url(#httpdb{proxy_url = ProxyUrl}) when is_list(ProxyUrl) ->
list_to_binary(couch_util:url_strip_password(ProxyUrl));
job_proxy_url(_Endpoint) ->
null.
-
% Health threshold is the minimum amount of time an unhealthy job should run
% crashing before it is considered to be healthy again. HealtThreashold should
% not be 0 as jobs could start and immediately crash, and it shouldn't be
@@ -181,9 +172,11 @@ job_proxy_url(_Endpoint) ->
% job is back to normal.
-spec health_threshold() -> non_neg_integer().
health_threshold() ->
- config:get_integer("replicator", "health_threshold",
- ?DEFAULT_HEALTH_THRESHOLD_SEC).
-
+ config:get_integer(
+ "replicator",
+ "health_threshold",
+ ?DEFAULT_HEALTH_THRESHOLD_SEC
+ ).
-spec find_jobs_by_dbname(binary()) -> list(#rep{}).
find_jobs_by_dbname(DbName) ->
@@ -191,14 +184,12 @@ find_jobs_by_dbname(DbName) ->
MatchSpec = #job{id = '$1', rep = Rep, _ = '_'},
[RepId || [RepId] <- ets:match(?MODULE, MatchSpec)].
-
-spec find_jobs_by_doc(binary(), binary()) -> list(#rep{}).
find_jobs_by_doc(DbName, DocId) ->
- Rep = #rep{db_name = DbName, doc_id = DocId, _ = '_'},
+ Rep = #rep{db_name = DbName, doc_id = DocId, _ = '_'},
MatchSpec = #job{id = '$1', rep = Rep, _ = '_'},
[RepId || [RepId] <- ets:match(?MODULE, MatchSpec)].
-
-spec restart_job(binary() | list() | rep_id()) ->
{ok, {[_]}} | {error, not_found}.
restart_job(JobId) ->
@@ -211,28 +202,39 @@ restart_job(JobId) ->
job(JobId)
end.
-
-spec update_job_stats(job_id(), term()) -> ok.
update_job_stats(JobId, Stats) ->
gen_server:cast(?MODULE, {update_job_stats, JobId, Stats}).
-
%% gen_server functions
init(_) ->
config:enable_feature('scheduler'),
- EtsOpts = [named_table, {keypos, #job.id}, {read_concurrency, true},
- {write_concurrency, true}],
+ EtsOpts = [
+ named_table,
+ {keypos, #job.id},
+ {read_concurrency, true},
+ {write_concurrency, true}
+ ],
?MODULE = ets:new(?MODULE, EtsOpts),
ok = couch_replicator_share:init(),
ok = config:listen_for_changes(?MODULE, nil),
- Interval = config:get_integer("replicator", "interval",
- ?DEFAULT_SCHEDULER_INTERVAL),
+ Interval = config:get_integer(
+ "replicator",
+ "interval",
+ ?DEFAULT_SCHEDULER_INTERVAL
+ ),
MaxJobs = config:get_integer("replicator", "max_jobs", ?DEFAULT_MAX_JOBS),
- MaxChurn = config:get_integer("replicator", "max_churn",
- ?DEFAULT_MAX_CHURN),
- MaxHistory = config:get_integer("replicator", "max_history",
- ?DEFAULT_MAX_HISTORY),
+ MaxChurn = config:get_integer(
+ "replicator",
+ "max_churn",
+ ?DEFAULT_MAX_CHURN
+ ),
+ MaxHistory = config:get_integer(
+ "replicator",
+ "max_history",
+ ?DEFAULT_MAX_HISTORY
+ ),
Timer = erlang:send_after(Interval, self(), reschedule),
State = #state{
interval = Interval,
@@ -244,7 +246,6 @@ init(_) ->
},
{ok, State}.
-
handle_call({add_job, Job}, _From, State) ->
ok = maybe_remove_job_int(Job#job.id, State),
true = add_job_int(Job),
@@ -253,50 +254,51 @@ handle_call({add_job, Job}, _From, State) ->
TotalJobs = ets:info(?MODULE, size),
couch_stats:update_gauge([couch_replicator, jobs, total], TotalJobs),
{reply, ok, State};
-
handle_call({remove_job, Id}, _From, State) ->
ok = maybe_remove_job_int(Id, State),
{reply, ok, State};
-
handle_call(reschedule, _From, State) ->
ok = reschedule(State),
{reply, ok, State};
-
handle_call(_, _From, State) ->
{noreply, State}.
-
-handle_cast({set_max_jobs, MaxJobs}, State) when is_integer(MaxJobs),
- MaxJobs >= 0 ->
+handle_cast({set_max_jobs, MaxJobs}, State) when
+ is_integer(MaxJobs),
+ MaxJobs >= 0
+->
couch_log:notice("~p: max_jobs set to ~B", [?MODULE, MaxJobs]),
{noreply, State#state{max_jobs = MaxJobs}};
-
-handle_cast({set_max_churn, MaxChurn}, State) when is_integer(MaxChurn),
- MaxChurn > 0 ->
+handle_cast({set_max_churn, MaxChurn}, State) when
+ is_integer(MaxChurn),
+ MaxChurn > 0
+->
couch_log:notice("~p: max_churn set to ~B", [?MODULE, MaxChurn]),
{noreply, State#state{max_churn = MaxChurn}};
-
-handle_cast({set_max_history, MaxHistory}, State) when is_integer(MaxHistory),
- MaxHistory > 0 ->
+handle_cast({set_max_history, MaxHistory}, State) when
+ is_integer(MaxHistory),
+ MaxHistory > 0
+->
couch_log:notice("~p: max_history set to ~B", [?MODULE, MaxHistory]),
{noreply, State#state{max_history = MaxHistory}};
-
-handle_cast({set_interval, Interval}, State) when is_integer(Interval),
- Interval > 0 ->
+handle_cast({set_interval, Interval}, State) when
+ is_integer(Interval),
+ Interval > 0
+->
couch_log:notice("~p: interval set to ~B", [?MODULE, Interval]),
{noreply, State#state{interval = Interval}};
-
-handle_cast({update_shares, Key, Shares}, State) when is_binary(Key),
- is_integer(Shares), Shares >= 0 ->
+handle_cast({update_shares, Key, Shares}, State) when
+ is_binary(Key),
+ is_integer(Shares),
+ Shares >= 0
+->
couch_log:notice("~p: shares for ~s set to ~B", [?MODULE, Key, Shares]),
couch_replicator_share:update_shares(Key, Shares),
{noreply, State};
-
-handle_cast({reset_shares, Key}, State) when is_binary(Key) ->
+handle_cast({reset_shares, Key}, State) when is_binary(Key) ->
couch_log:notice("~p: shares for ~s reset to default", [?MODULE, Key]),
couch_replicator_share:reset_shares(Key),
{noreply, State};
-
handle_cast({update_job_stats, JobId, Stats}, State) ->
case rep_state(JobId) of
nil ->
@@ -306,18 +308,15 @@ handle_cast({update_job_stats, JobId, Stats}, State) ->
true = ets:update_element(?MODULE, JobId, {#job.rep, NewRep})
end,
{noreply, State};
-
handle_cast(UnexpectedMsg, State) ->
couch_log:error("~p: received un-expected cast ~p", [?MODULE, UnexpectedMsg]),
{noreply, State}.
-
handle_info(reschedule, State) ->
ok = reschedule(State),
erlang:cancel_timer(State#state.timer),
Timer = erlang:send_after(State#state.interval, self(), reschedule),
{noreply, State#state{timer = Timer}};
-
handle_info({'DOWN', _Ref, process, Pid, normal}, State) ->
{ok, Job} = job_by_pid(Pid),
couch_log:notice("~p: Job ~p completed normally", [?MODULE, Job#job.id]),
@@ -326,82 +325,66 @@ handle_info({'DOWN', _Ref, process, Pid, normal}, State) ->
remove_job_int(Job),
update_running_jobs_stats(State#state.stats_pid),
{noreply, State};
-
handle_info({'DOWN', _Ref, process, Pid, Reason0}, State) ->
{ok, Job} = job_by_pid(Pid),
- Reason = case Reason0 of
- {shutdown, ShutdownReason} -> ShutdownReason;
- Other -> Other
- end,
+ Reason =
+ case Reason0 of
+ {shutdown, ShutdownReason} -> ShutdownReason;
+ Other -> Other
+ end,
Interval = State#state.interval,
couch_replicator_share:charge(Job, Interval, os:timestamp()),
ok = handle_crashed_job(Job, Reason, State),
{noreply, State};
-
handle_info(restart_config_listener, State) ->
ok = config:listen_for_changes(?MODULE, nil),
{noreply, State};
-
handle_info(_, State) ->
{noreply, State}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
terminate(_Reason, _State) ->
couch_replicator_share:clear(),
ok.
-
format_status(_Opt, [_PDict, State]) ->
[
- {max_jobs, State#state.max_jobs},
- {running_jobs, running_job_count()},
- {pending_jobs, pending_job_count()}
+ {max_jobs, State#state.max_jobs},
+ {running_jobs, running_job_count()},
+ {pending_jobs, pending_job_count()}
].
-
%% config listener functions
handle_config_change("replicator", "max_jobs", V, _, S) ->
ok = gen_server:cast(?MODULE, {set_max_jobs, list_to_integer(V)}),
{ok, S};
-
handle_config_change("replicator", "max_churn", V, _, S) ->
ok = gen_server:cast(?MODULE, {set_max_churn, list_to_integer(V)}),
{ok, S};
-
handle_config_change("replicator", "interval", V, _, S) ->
ok = gen_server:cast(?MODULE, {set_interval, list_to_integer(V)}),
{ok, S};
-
handle_config_change("replicator", "max_history", V, _, S) ->
ok = gen_server:cast(?MODULE, {set_max_history, list_to_integer(V)}),
{ok, S};
-
handle_config_change("replicator.shares", Key, deleted, _, S) ->
ok = gen_server:cast(?MODULE, {reset_shares, list_to_binary(Key)}),
{ok, S};
-
handle_config_change("replicator.shares", Key, V, _, S) ->
- ok = gen_server:cast(?MODULE, {update_shares, list_to_binary(Key),
- list_to_integer(V)}),
+ ok = gen_server:cast(?MODULE, {update_shares, list_to_binary(Key), list_to_integer(V)}),
{ok, S};
-
handle_config_change(_, _, _, _, S) ->
{ok, S}.
-
handle_config_terminate(_, stop, _) ->
ok;
-
handle_config_terminate(_, _, _) ->
Pid = whereis(?MODULE),
erlang:send_after(?RELISTEN_DELAY, Pid, restart_config_listener).
-
%% Private functions
% Handle crashed jobs. Handling differs between transient and permanent jobs.
@@ -420,7 +403,6 @@ handle_crashed_job(#job{rep = #rep{db_name = null}} = Job, Reason, State) ->
remove_job_int(Job),
update_running_jobs_stats(State#state.stats_pid),
ok;
-
handle_crashed_job(Job, Reason, State) ->
ok = update_state_crashed(Job, Reason, State),
case couch_replicator_doc_processor:update_docs() of
@@ -442,7 +424,6 @@ handle_crashed_job(Job, Reason, State) ->
ok
end.
-
% Attempt to start a newly added job. First quickly check if total jobs
% already exceed max jobs, then do a more expensive check which runs a
% select (an O(n) operation) to check pending jobs specifically.
@@ -459,7 +440,6 @@ maybe_start_newly_added_job(Job, State) ->
ok
end.
-
% Return up to a given number of oldest, not recently crashed jobs. Try to be
% memory efficient and use ets:foldl to accumulate jobs.
-spec pending_jobs(non_neg_integer()) -> [#job{}].
@@ -468,36 +448,34 @@ pending_jobs(0) ->
% other function clause it will crash as gb_sets:largest assumes set is not
% empty.
[];
-
pending_jobs(Count) when is_integer(Count), Count > 0 ->
- Set0 = gb_sets:new(), % [{{Priority, LastStart}, Job},...]
+ % [{{Priority, LastStart}, Job},...]
+ Set0 = gb_sets:new(),
Now = os:timestamp(),
Acc0 = {Set0, Now, Count, health_threshold()},
{Set1, _, _, _} = ets:foldl(fun pending_fold/2, Acc0, ?MODULE),
[Job || {_PriorityKey, Job} <- gb_sets:to_list(Set1)].
-
pending_fold(#job{pid = Pid}, Acc) when is_pid(Pid) ->
Acc;
-
pending_fold(Job, {Set, Now, Count, HealthThreshold}) ->
Healthy = not_recently_crashed(Job, Now, HealthThreshold),
- Set1 = case {Healthy, gb_sets:size(Set) >= Count} of
- {true, true} ->
- % Job is healthy but already reached accumulated limit, so might
- % have to replace one of the accumulated jobs
- pending_maybe_replace(Job, Set);
- {true, false} ->
- % Job is healthy and we haven't reached the limit, so add job
- % to accumulator
- gb_sets:add_element({start_priority_key(Job), Job}, Set);
- {false, _} ->
- % This job is not healthy (has crashed too recently), so skip it.
- Set
- end,
+ Set1 =
+ case {Healthy, gb_sets:size(Set) >= Count} of
+ {true, true} ->
+ % Job is healthy but already reached accumulated limit, so might
+ % have to replace one of the accumulated jobs
+ pending_maybe_replace(Job, Set);
+ {true, false} ->
+ % Job is healthy and we haven't reached the limit, so add job
+ % to accumulator
+ gb_sets:add_element({start_priority_key(Job), Job}, Set);
+ {false, _} ->
+ % This job is not healthy (has crashed too recently), so skip it.
+ Set
+ end,
{Set1, Now, Count, HealthThreshold}.
-
% Replace Job in the accumulator if it has a higher priority (lower priority
% value) than the lowest priority there. Job priority is indexed by
% {FairSharePiority, LastStarted} tuples. If the FairSharePriority is the same
@@ -526,16 +504,13 @@ pending_maybe_replace(Job, Set) ->
start_priority_key(#job{} = Job) ->
{couch_replicator_share:priority(Job#job.id), last_started(Job)}.
-
start_jobs(Count, State) ->
[start_job_int(Job, State) || Job <- pending_jobs(Count)],
ok.
-
-spec stop_jobs(non_neg_integer(), boolean(), #state{}) -> non_neg_integer().
stop_jobs(Count, _, _) when is_integer(Count), Count =< 0 ->
0;
-
stop_jobs(Count, IsContinuous, State) when is_integer(Count) ->
Running0 = running_jobs(),
ContinuousPred = fun(Job) -> is_continuous(Job) =:= IsContinuous end,
@@ -544,7 +519,6 @@ stop_jobs(Count, IsContinuous, State) when is_integer(Count) ->
Running3 = lists:sublist(lists:sort(Running2), Count),
length([stop_job_int(Job, State) || {_SortKey, Job} <- Running3]).
-
% Lower priority jobs have higher priority values, so we negate them, that way
% when sorted, they'll come up first. If priorities are equal, jobs are sorted
% by the lowest starting times as jobs with lowest start time have been running
@@ -553,7 +527,6 @@ stop_jobs(Count, IsContinuous, State) when is_integer(Count) ->
stop_priority_key(#job{} = Job) ->
{-couch_replicator_share:priority(Job#job.id), last_started(Job)}.
-
not_recently_crashed(#job{history = History}, Now, HealthThreshold) ->
case History of
[{added, _When}] ->
@@ -566,7 +539,6 @@ not_recently_crashed(#job{history = History}, Now, HealthThreshold) ->
timer:now_diff(Now, LatestCrashT) >= backoff_micros(CrashCount)
end.
-
% Count consecutive crashes. A crash happens when there is a `crashed` event
% within a short period of time (configurable) after any other event. It could
% be `crashed, started` for jobs crashing quickly after starting, `crashed,
@@ -583,40 +555,39 @@ not_recently_crashed(#job{history = History}, Now, HealthThreshold) ->
consecutive_crashes(History, HealthThreshold) when is_list(History) ->
consecutive_crashes(History, HealthThreshold, 0).
-
-spec consecutive_crashes(history(), non_neg_integer(), non_neg_integer()) ->
- non_neg_integer().
+ non_neg_integer().
consecutive_crashes([], _HealthThreashold, Count) ->
Count;
-
-consecutive_crashes([{{crashed, _}, CrashT}, {_, PrevT} = PrevEvent | Rest],
- HealthThreshold, Count) ->
+consecutive_crashes(
+ [{{crashed, _}, CrashT}, {_, PrevT} = PrevEvent | Rest],
+ HealthThreshold,
+ Count
+) ->
case timer:now_diff(CrashT, PrevT) > HealthThreshold * 1000000 of
true ->
Count;
false ->
consecutive_crashes([PrevEvent | Rest], HealthThreshold, Count + 1)
end;
-
-consecutive_crashes([{stopped, _}, {started, _} | _], _HealthThreshold,
- Count) ->
+consecutive_crashes(
+ [{stopped, _}, {started, _} | _],
+ _HealthThreshold,
+ Count
+) ->
Count;
-
consecutive_crashes([_ | Rest], HealthThreshold, Count) ->
consecutive_crashes(Rest, HealthThreshold, Count).
-
-spec latest_crash_timestamp(history()) -> erlang:timestamp().
latest_crash_timestamp([]) ->
- {0, 0, 0}; % Used to avoid special-casing "no crash" when doing now_diff
-
+ % Used to avoid special-casing "no crash" when doing now_diff
+ {0, 0, 0};
latest_crash_timestamp([{{crashed, _Reason}, When} | _]) ->
When;
-
latest_crash_timestamp([_Event | Rest]) ->
latest_crash_timestamp(Rest).
-
-spec backoff_micros(non_neg_integer()) -> non_neg_integer().
backoff_micros(CrashCount) ->
% When calculating the backoff interval treat consecutive crash count as the
@@ -626,13 +597,11 @@ backoff_micros(CrashCount) ->
BackoffExp = erlang:min(CrashCount - 1, ?MAX_BACKOFF_EXPONENT),
(1 bsl BackoffExp) * ?BACKOFF_INTERVAL_MICROS.
-
-spec add_job_int(#job{}) -> boolean().
add_job_int(#job{} = Job) ->
couch_replicator_share:job_added(Job),
ets:insert_new(?MODULE, Job).
-
-spec maybe_remove_job_int(job_id(), #state{}) -> ok.
maybe_remove_job_int(JobId, State) ->
case job_by_id(JobId) of
@@ -644,98 +613,99 @@ maybe_remove_job_int(JobId, State) ->
true = remove_job_int(Job),
couch_stats:increment_counter([couch_replicator, jobs, removes]),
TotalJobs = ets:info(?MODULE, size),
- couch_stats:update_gauge([couch_replicator, jobs, total],
- TotalJobs),
+ couch_stats:update_gauge(
+ [couch_replicator, jobs, total],
+ TotalJobs
+ ),
update_running_jobs_stats(State#state.stats_pid),
ok;
{error, not_found} ->
ok
end.
-
start_job_int(#job{pid = Pid}, _State) when Pid /= undefined ->
ok;
-
start_job_int(#job{} = Job0, State) ->
Job = maybe_optimize_job_for_rate_limiting(Job0),
case couch_replicator_scheduler_sup:start_child(Job#job.rep) of
{ok, Child} ->
Ref = monitor(process, Child),
ok = update_state_started(Job, Child, Ref, State),
- couch_log:notice("~p: Job ~p started as ~p",
- [?MODULE, Job#job.id, Child]);
+ couch_log:notice(
+ "~p: Job ~p started as ~p",
+ [?MODULE, Job#job.id, Child]
+ );
{error, {already_started, OtherPid}} when node(OtherPid) =:= node() ->
Ref = monitor(process, OtherPid),
ok = update_state_started(Job, OtherPid, Ref, State),
- couch_log:notice("~p: Job ~p already running as ~p. Most likely"
+ couch_log:notice(
+ "~p: Job ~p already running as ~p. Most likely"
" because replicator scheduler was restarted",
- [?MODULE, Job#job.id, OtherPid]);
+ [?MODULE, Job#job.id, OtherPid]
+ );
{error, {already_started, OtherPid}} when node(OtherPid) =/= node() ->
CrashMsg = "Duplicate replication running on another node",
- couch_log:notice("~p: Job ~p already running as ~p. Most likely"
+ couch_log:notice(
+ "~p: Job ~p already running as ~p. Most likely"
" because a duplicate replication is running on another node",
- [?MODULE, Job#job.id, OtherPid]),
+ [?MODULE, Job#job.id, OtherPid]
+ ),
ok = update_state_crashed(Job, CrashMsg, State);
{error, Reason} ->
- couch_log:notice("~p: Job ~p failed to start for reason ~p",
- [?MODULE, Job, Reason]),
+ couch_log:notice(
+ "~p: Job ~p failed to start for reason ~p",
+ [?MODULE, Job, Reason]
+ ),
ok = update_state_crashed(Job, Reason, State)
end.
-
-spec stop_job_int(#job{}, #state{}) -> ok | {error, term()}.
stop_job_int(#job{pid = undefined}, _State) ->
ok;
-
stop_job_int(#job{} = Job, State) ->
ok = couch_replicator_scheduler_sup:terminate_child(Job#job.pid),
demonitor(Job#job.monitor, [flush]),
ok = update_state_stopped(Job, State),
- couch_log:notice("~p: Job ~p stopped as ~p",
- [?MODULE, Job#job.id, Job#job.pid]).
-
+ couch_log:notice(
+ "~p: Job ~p stopped as ~p",
+ [?MODULE, Job#job.id, Job#job.pid]
+ ).
-spec remove_job_int(#job{}) -> true.
remove_job_int(#job{} = Job) ->
couch_replicator_share:job_removed(Job),
ets:delete(?MODULE, Job#job.id).
-
-spec running_job_count() -> non_neg_integer().
running_job_count() ->
ets:info(?MODULE, size) - pending_job_count().
-
-spec running_jobs() -> [#job{}].
running_jobs() ->
- ets:select(?MODULE, [{#job{pid = '$1', _='_'}, [{is_pid, '$1'}], ['$_']}]).
-
+ ets:select(?MODULE, [{#job{pid = '$1', _ = '_'}, [{is_pid, '$1'}], ['$_']}]).
-spec pending_job_count() -> non_neg_integer().
pending_job_count() ->
- ets:select_count(?MODULE, [{#job{pid=undefined, _='_'}, [], [true]}]).
-
+ ets:select_count(?MODULE, [{#job{pid = undefined, _ = '_'}, [], [true]}]).
-spec job_by_pid(pid()) -> {ok, #job{}} | {error, not_found}.
job_by_pid(Pid) when is_pid(Pid) ->
- case ets:match_object(?MODULE, #job{pid=Pid, _='_'}) of
+ case ets:match_object(?MODULE, #job{pid = Pid, _ = '_'}) of
[] ->
{error, not_found};
- [#job{}=Job] ->
+ [#job{} = Job] ->
{ok, Job}
end.
-
-spec job_by_id(job_id()) -> {ok, #job{}} | {error, not_found}.
job_by_id(Id) ->
case ets:lookup(?MODULE, Id) of
[] ->
{error, not_found};
- [#job{}=Job] ->
+ [#job{} = Job] ->
{ok, Job}
end.
-
-spec update_state_stopped(#job{}, #state{}) -> ok.
update_state_stopped(Job, State) ->
Job1 = reset_job_process(Job),
@@ -744,7 +714,6 @@ update_state_stopped(Job, State) ->
couch_stats:increment_counter([couch_replicator, jobs, stops]),
ok.
-
-spec update_state_started(#job{}, pid(), reference(), #state{}) -> ok.
update_state_started(Job, Pid, Ref, State) ->
Job1 = set_job_process(Job, Pid, Ref),
@@ -753,7 +722,6 @@ update_state_started(Job, Pid, Ref, State) ->
couch_stats:increment_counter([couch_replicator, jobs, starts]),
ok.
-
-spec update_state_crashed(#job{}, any(), #state{}) -> ok.
update_state_crashed(Job, Reason, State) ->
Job1 = reset_job_process(Job),
@@ -762,17 +730,14 @@ update_state_crashed(Job, Reason, State) ->
couch_stats:increment_counter([couch_replicator, jobs, crashes]),
ok.
-
-spec set_job_process(#job{}, pid(), reference()) -> #job{}.
set_job_process(#job{} = Job, Pid, Ref) when is_pid(Pid), is_reference(Ref) ->
Job#job{pid = Pid, monitor = Ref}.
-
-spec reset_job_process(#job{}) -> #job{}.
reset_job_process(#job{} = Job) ->
Job#job{pid = undefined, monitor = undefined}.
-
-spec reschedule(#state{}) -> ok.
reschedule(#state{interval = Interval} = State) ->
couch_replicator_share:update(running_jobs(), Interval, os:timestamp()),
@@ -780,50 +745,48 @@ reschedule(#state{interval = Interval} = State) ->
rotate_jobs(State, StopCount),
update_running_jobs_stats(State#state.stats_pid).
-
-spec stop_excess_jobs(#state{}, non_neg_integer()) -> non_neg_integer().
stop_excess_jobs(State, Running) ->
- #state{max_jobs=MaxJobs} = State,
+ #state{max_jobs = MaxJobs} = State,
StopCount = max(0, Running - MaxJobs),
Stopped = stop_jobs(StopCount, true, State),
OneshotLeft = StopCount - Stopped,
stop_jobs(OneshotLeft, false, State),
StopCount.
-
start_pending_jobs(State) ->
- #state{max_jobs=MaxJobs} = State,
+ #state{max_jobs = MaxJobs} = State,
Running = running_job_count(),
Pending = pending_job_count(),
- if Running < MaxJobs, Pending > 0 ->
- start_jobs(MaxJobs - Running, State);
- true ->
- ok
+ if
+ Running < MaxJobs, Pending > 0 ->
+ start_jobs(MaxJobs - Running, State);
+ true ->
+ ok
end.
-
-spec rotate_jobs(#state{}, non_neg_integer()) -> ok.
rotate_jobs(State, ChurnSoFar) ->
- #state{max_jobs=MaxJobs, max_churn=MaxChurn} = State,
+ #state{max_jobs = MaxJobs, max_churn = MaxChurn} = State,
Running = running_job_count(),
Pending = pending_job_count(),
% Reduce MaxChurn by the number of already stopped jobs in the
% current rescheduling cycle.
Churn = max(0, MaxChurn - ChurnSoFar),
SlotsAvailable = MaxJobs - Running,
- if SlotsAvailable >= 0 ->
- % If there is are enough SlotsAvailable reduce StopCount to avoid
- % unnesessarily stopping jobs. `stop_jobs/3` ignores 0 or negative
- % values so we don't worry about that here.
- StopCount = lists:min([Pending - SlotsAvailable, Running, Churn]),
- stop_jobs(StopCount, true, State),
- StartCount = max(0, MaxJobs - running_job_count()),
- start_jobs(StartCount, State);
- true ->
- ok
+ if
+ SlotsAvailable >= 0 ->
+ % If there is are enough SlotsAvailable reduce StopCount to avoid
+ % unnesessarily stopping jobs. `stop_jobs/3` ignores 0 or negative
+ % values so we don't worry about that here.
+ StopCount = lists:min([Pending - SlotsAvailable, Running, Churn]),
+ stop_jobs(StopCount, true, State),
+ StartCount = max(0, MaxJobs - running_job_count()),
+ start_jobs(StartCount, State);
+ true ->
+ ok
end.
-
-spec last_started(#job{}) -> erlang:timestamp().
last_started(#job{} = Job) ->
case lists:keyfind(started, 1, Job#job.history) of
@@ -833,7 +796,6 @@ last_started(#job{} = Job) ->
When
end.
-
-spec update_history(#job{}, event_type(), erlang:timestamp(), #state{}) ->
#job{}.
update_history(Job, Type, When, State) ->
@@ -841,35 +803,38 @@ update_history(Job, Type, When, State) ->
History1 = lists:sublist(History0, State#state.max_history),
Job#job{history = History1}.
-
-spec ejson_url(#httpdb{} | binary()) -> binary().
-ejson_url(#httpdb{}=Httpdb) ->
+ejson_url(#httpdb{} = Httpdb) ->
couch_util:url_strip_password(Httpdb#httpdb.url);
ejson_url(DbName) when is_binary(DbName) ->
DbName.
-
-spec job_ejson(#job{}) -> {[_ | _]}.
job_ejson(Job) ->
Rep = Job#job.rep,
Source = ejson_url(Rep#rep.source),
Target = ejson_url(Rep#rep.target),
- History = lists:map(fun({Type, When}) ->
- EventProps = case Type of
- {crashed, Reason} ->
- [{type, crashed}, {reason, crash_reason_json(Reason)}];
- Type ->
- [{type, Type}]
+ History = lists:map(
+ fun({Type, When}) ->
+ EventProps =
+ case Type of
+ {crashed, Reason} ->
+ [{type, crashed}, {reason, crash_reason_json(Reason)}];
+ Type ->
+ [{type, Type}]
+ end,
+ {[{timestamp, couch_replicator_utils:iso8601(When)} | EventProps]}
end,
- {[{timestamp, couch_replicator_utils:iso8601(When)} | EventProps]}
- end, Job#job.history),
+ Job#job.history
+ ),
{BaseID, Ext} = Job#job.id,
- Pid = case Job#job.pid of
- undefined ->
- null;
- P when is_pid(P) ->
- ?l2b(pid_to_list(P))
- end,
+ Pid =
+ case Job#job.pid of
+ undefined ->
+ null;
+ P when is_pid(P) ->
+ ?l2b(pid_to_list(P))
+ end,
{[
{id, iolist_to_binary([BaseID, Ext])},
{pid, Pid},
@@ -884,12 +849,10 @@ job_ejson(Job) ->
{start_time, couch_replicator_utils:iso8601(Rep#rep.start_time)}
]}.
-
-spec jobs() -> [[tuple()]].
jobs() ->
ets:foldl(fun(Job, Acc) -> [job_ejson(Job) | Acc] end, [], ?MODULE).
-
-spec job(job_id()) -> {ok, {[_ | _]}} | {error, not_found}.
job(JobId) ->
case job_by_id(JobId) of
@@ -899,7 +862,6 @@ job(JobId) ->
Error
end.
-
crash_reason_json({_CrashType, Info}) when is_binary(Info) ->
Info;
crash_reason_json(Reason) when is_binary(Reason) ->
@@ -907,22 +869,23 @@ crash_reason_json(Reason) when is_binary(Reason) ->
crash_reason_json(Error) ->
couch_replicator_utils:rep_error_to_binary(Error).
-
-spec last_updated([_]) -> binary().
last_updated([{_Type, When} | _]) ->
couch_replicator_utils:iso8601(When).
-
-spec is_continuous(#job{}) -> boolean().
is_continuous(#job{rep = Rep}) ->
couch_util:get_value(continuous, Rep#rep.options, false).
-
% If job crashed last time because it was rate limited, try to
% optimize some options to help the job make progress.
-spec maybe_optimize_job_for_rate_limiting(#job{}) -> #job{}.
-maybe_optimize_job_for_rate_limiting(Job = #job{history =
- [{{crashed, max_backoff}, _} | _]}) ->
+maybe_optimize_job_for_rate_limiting(
+ Job = #job{
+ history =
+ [{{crashed, max_backoff}, _} | _]
+ }
+) ->
Opts = [
{checkpoint_interval, 5000},
{worker_processes, 2},
@@ -934,7 +897,6 @@ maybe_optimize_job_for_rate_limiting(Job = #job{history =
maybe_optimize_job_for_rate_limiting(Job) ->
Job.
-
-spec optimize_int_option({atom(), any()}, #rep{}) -> #rep{}.
optimize_int_option({Key, Val}, #rep{options = Options} = Rep) ->
case couch_util:get_value(Key, Options) of
@@ -947,7 +909,6 @@ optimize_int_option({Key, Val}, #rep{options = Options} = Rep) ->
Rep
end.
-
% Updater is a separate process. It receives `update_stats` messages and
% updates scheduler stats from the scheduler jobs table. Updates are
% performed no more frequently than once per ?STATS_UPDATE_WAIT milliseconds.
@@ -956,11 +917,9 @@ update_running_jobs_stats(StatsPid) when is_pid(StatsPid) ->
StatsPid ! update_stats,
ok.
-
start_stats_updater() ->
erlang:spawn_link(?MODULE, stats_updater_loop, [undefined]).
-
stats_updater_loop(Timer) ->
receive
update_stats when Timer == undefined ->
@@ -975,31 +934,28 @@ stats_updater_loop(Timer) ->
erlang:exit({stats_updater_bad_msg, Else})
end.
-
-spec stats_updater_refresh() -> ok.
stats_updater_refresh() ->
#stats_acc{
- pending_n = PendingN,
- running_n = RunningN,
- crashed_n = CrashedN
- } = ets:foldl(fun stats_fold/2, #stats_acc{}, ?MODULE),
+ pending_n = PendingN,
+ running_n = RunningN,
+ crashed_n = CrashedN
+ } = ets:foldl(fun stats_fold/2, #stats_acc{}, ?MODULE),
couch_stats:update_gauge([couch_replicator, jobs, pending], PendingN),
couch_stats:update_gauge([couch_replicator, jobs, running], RunningN),
couch_stats:update_gauge([couch_replicator, jobs, crashed], CrashedN),
ok.
-
-spec stats_fold(#job{}, #stats_acc{}) -> #stats_acc{}.
stats_fold(#job{pid = undefined, history = [{added, _}]}, Acc) ->
Acc#stats_acc{pending_n = Acc#stats_acc.pending_n + 1};
stats_fold(#job{pid = undefined, history = [{stopped, _} | _]}, Acc) ->
Acc#stats_acc{pending_n = Acc#stats_acc.pending_n + 1};
stats_fold(#job{pid = undefined, history = [{{crashed, _}, _} | _]}, Acc) ->
- Acc#stats_acc{crashed_n =Acc#stats_acc.crashed_n + 1};
+ Acc#stats_acc{crashed_n = Acc#stats_acc.crashed_n + 1};
stats_fold(#job{pid = P, history = [{started, _} | _]}, Acc) when is_pid(P) ->
Acc#stats_acc{running_n = Acc#stats_acc.running_n + 1}.
-
-spec existing_replication(#rep{}) -> boolean().
existing_replication(#rep{} = NewRep) ->
case job_by_id(NewRep#rep.id) of
@@ -1011,67 +967,85 @@ existing_replication(#rep{} = NewRep) ->
false
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
backoff_micros_test_() ->
BaseInterval = ?BACKOFF_INTERVAL_MICROS,
- [?_assertEqual(R * BaseInterval, backoff_micros(N)) || {R, N} <- [
- {1, 1}, {2, 2}, {4, 3}, {8, 4}, {16, 5}, {32, 6}, {64, 7}, {128, 8},
- {256, 9}, {512, 10}, {1024, 11}, {1024, 12}
- ]].
-
+ [
+ ?_assertEqual(R * BaseInterval, backoff_micros(N))
+ || {R, N} <- [
+ {1, 1},
+ {2, 2},
+ {4, 3},
+ {8, 4},
+ {16, 5},
+ {32, 6},
+ {64, 7},
+ {128, 8},
+ {256, 9},
+ {512, 10},
+ {1024, 11},
+ {1024, 12}
+ ]
+ ].
consecutive_crashes_test_() ->
Threshold = ?DEFAULT_HEALTH_THRESHOLD_SEC,
- [?_assertEqual(R, consecutive_crashes(H, Threshold)) || {R, H} <- [
- {0, []},
- {0, [added()]},
- {0, [stopped()]},
- {0, [crashed()]},
- {1, [crashed(), added()]},
- {1, [crashed(), crashed()]},
- {1, [crashed(), stopped()]},
- {3, [crashed(), crashed(), crashed(), added()]},
- {2, [crashed(), crashed(), stopped()]},
- {1, [crashed(), started(), added()]},
- {2, [crashed(3), started(2), crashed(1), started(0)]},
- {0, [stopped(3), started(2), crashed(1), started(0)]},
- {1, [crashed(3), started(2), stopped(1), started(0)]},
- {0, [crashed(999), started(0)]},
- {1, [crashed(999), started(998), crashed(997), started(0)]}
- ]].
-
+ [
+ ?_assertEqual(R, consecutive_crashes(H, Threshold))
+ || {R, H} <- [
+ {0, []},
+ {0, [added()]},
+ {0, [stopped()]},
+ {0, [crashed()]},
+ {1, [crashed(), added()]},
+ {1, [crashed(), crashed()]},
+ {1, [crashed(), stopped()]},
+ {3, [crashed(), crashed(), crashed(), added()]},
+ {2, [crashed(), crashed(), stopped()]},
+ {1, [crashed(), started(), added()]},
+ {2, [crashed(3), started(2), crashed(1), started(0)]},
+ {0, [stopped(3), started(2), crashed(1), started(0)]},
+ {1, [crashed(3), started(2), stopped(1), started(0)]},
+ {0, [crashed(999), started(0)]},
+ {1, [crashed(999), started(998), crashed(997), started(0)]}
+ ]
+ ].
consecutive_crashes_non_default_threshold_test_() ->
- [?_assertEqual(R, consecutive_crashes(H, T)) || {R, H, T} <- [
- {0, [crashed(11), started(0)], 10},
- {1, [crashed(10), started(0)], 10}
- ]].
-
+ [
+ ?_assertEqual(R, consecutive_crashes(H, T))
+ || {R, H, T} <- [
+ {0, [crashed(11), started(0)], 10},
+ {1, [crashed(10), started(0)], 10}
+ ]
+ ].
latest_crash_timestamp_test_() ->
- [?_assertEqual({0, R, 0}, latest_crash_timestamp(H)) || {R, H} <- [
- {0, [added()]},
- {1, [crashed(1)]},
- {3, [crashed(3), started(2), crashed(1), started(0)]},
- {1, [started(3), stopped(2), crashed(1), started(0)]}
- ]].
-
+ [
+ ?_assertEqual({0, R, 0}, latest_crash_timestamp(H))
+ || {R, H} <- [
+ {0, [added()]},
+ {1, [crashed(1)]},
+ {3, [crashed(3), started(2), crashed(1), started(0)]},
+ {1, [started(3), stopped(2), crashed(1), started(0)]}
+ ]
+ ].
last_started_test_() ->
- [?_assertEqual({0, R, 0}, last_started(testjob(H))) || {R, H} <- [
- {0, [added()]},
- {0, [crashed(1)]},
- {1, [started(1)]},
- {1, [added(), started(1)]},
- {2, [started(2), started(1)]},
- {2, [crashed(3), started(2), started(1)]}
- ]].
-
+ [
+ ?_assertEqual({0, R, 0}, last_started(testjob(H)))
+ || {R, H} <- [
+ {0, [added()]},
+ {0, [crashed(1)]},
+ {1, [started(1)]},
+ {1, [added(), started(1)]},
+ {2, [started(2), started(1)]},
+ {2, [crashed(3), started(2), started(1)]}
+ ]
+ ].
longest_running_test() ->
J0 = testjob([crashed()]),
@@ -1084,7 +1058,6 @@ longest_running_test() ->
?assertEqual([J1, J2], Sort([J2, J1])),
?assertEqual([J0, J1, J2], Sort([J2, J1, J0])).
-
scheduler_test_() ->
{
setup,
@@ -1131,9 +1104,8 @@ scheduler_test_() ->
}
}.
-
t_pending_jobs_simple() ->
- ?_test(begin
+ ?_test(begin
Job1 = oneshot(1),
Job2 = oneshot(2),
setup_jobs([Job2, Job1]),
@@ -1143,9 +1115,8 @@ t_pending_jobs_simple() ->
?assertEqual([Job1, Job2], pending_jobs(3))
end).
-
t_pending_jobs_skip_crashed() ->
- ?_test(begin
+ ?_test(begin
Job = oneshot(1),
Ts = os:timestamp(),
History = [crashed(Ts), started(Ts) | Job#job.history],
@@ -1158,9 +1129,8 @@ t_pending_jobs_skip_crashed() ->
?assertEqual([Job2, Job3], pending_jobs(3))
end).
-
t_pending_jobs_skip_running() ->
- ?_test(begin
+ ?_test(begin
Job1 = continuous(1),
Job2 = continuous_running(2),
Job3 = oneshot(3),
@@ -1170,7 +1140,6 @@ t_pending_jobs_skip_running() ->
?assertEqual([Job1, Job3], pending_jobs(4))
end).
-
t_one_job_starts() ->
?_test(begin
setup_jobs([oneshot(1)]),
@@ -1179,7 +1148,6 @@ t_one_job_starts() ->
?assertEqual({1, 0}, run_stop_count())
end).
-
t_no_jobs_start_if_max_is_0() ->
?_test(begin
setup_jobs([oneshot(1)]),
@@ -1187,7 +1155,6 @@ t_no_jobs_start_if_max_is_0() ->
?assertEqual({0, 1}, run_stop_count())
end).
-
t_one_job_starts_if_max_is_1() ->
?_test(begin
setup_jobs([oneshot(1), oneshot(2)]),
@@ -1195,7 +1162,6 @@ t_one_job_starts_if_max_is_1() ->
?assertEqual({1, 1}, run_stop_count())
end).
-
t_max_churn_does_not_throttle_initial_start() ->
?_test(begin
setup_jobs([oneshot(1), oneshot(2)]),
@@ -1203,7 +1169,6 @@ t_max_churn_does_not_throttle_initial_start() ->
?assertEqual({2, 0}, run_stop_count())
end).
-
t_excess_oneshot_only_jobs() ->
?_test(begin
setup_jobs([oneshot_running(1), oneshot_running(2)]),
@@ -1214,7 +1179,6 @@ t_excess_oneshot_only_jobs() ->
?assertEqual({0, 2}, run_stop_count())
end).
-
t_excess_continuous_only_jobs() ->
?_test(begin
setup_jobs([continuous_running(1), continuous_running(2)]),
@@ -1225,7 +1189,6 @@ t_excess_continuous_only_jobs() ->
?assertEqual({0, 2}, run_stop_count())
end).
-
t_excess_prefer_continuous_first() ->
?_test(begin
Jobs = [
@@ -1245,7 +1208,6 @@ t_excess_prefer_continuous_first() ->
?assertEqual({0, 1}, oneshot_run_stop_count())
end).
-
t_stop_oldest_first() ->
?_test(begin
Jobs = [
@@ -1261,7 +1223,6 @@ t_stop_oldest_first() ->
?assertEqual([7], jobs_running())
end).
-
t_start_oldest_first() ->
?_test(begin
setup_jobs([continuous(7), continuous(2), continuous(5)]),
@@ -1275,7 +1236,6 @@ t_start_oldest_first() ->
?assertEqual([2], jobs_stopped())
end).
-
t_jobs_churn_even_if_not_all_max_jobs_are_running() ->
?_test(begin
setup_jobs([
@@ -1288,9 +1248,8 @@ t_jobs_churn_even_if_not_all_max_jobs_are_running() ->
?assertEqual([7], jobs_stopped())
end).
-
t_jobs_dont_churn_if_there_are_available_running_slots() ->
- ?_test(begin
+ ?_test(begin
setup_jobs([
continuous_running(1),
continuous_running(2)
@@ -1301,9 +1260,8 @@ t_jobs_dont_churn_if_there_are_available_running_slots() ->
?assertEqual(0, meck:num_calls(couch_replicator_scheduler_sup, start_child, 1))
end).
-
t_start_only_pending_jobs_do_not_churn_existing_ones() ->
- ?_test(begin
+ ?_test(begin
setup_jobs([
continuous(1),
continuous_running(2)
@@ -1314,7 +1272,6 @@ t_start_only_pending_jobs_do_not_churn_existing_ones() ->
?assertEqual({2, 0}, run_stop_count())
end).
-
t_dont_stop_if_nothing_pending() ->
?_test(begin
setup_jobs([continuous_running(1), continuous_running(2)]),
@@ -1322,7 +1279,6 @@ t_dont_stop_if_nothing_pending() ->
?assertEqual({2, 0}, run_stop_count())
end).
-
t_max_churn_limits_number_of_rotated_jobs() ->
?_test(begin
Jobs = [
@@ -1336,7 +1292,6 @@ t_max_churn_limits_number_of_rotated_jobs() ->
?assertEqual([2, 3], jobs_stopped())
end).
-
t_if_pending_less_than_running_start_all_pending() ->
?_test(begin
Jobs = [
@@ -1351,7 +1306,6 @@ t_if_pending_less_than_running_start_all_pending() ->
?assertEqual([1, 2, 5], jobs_running())
end).
-
t_running_less_than_pending_swap_all_running() ->
?_test(begin
Jobs = [
@@ -1366,7 +1320,6 @@ t_running_less_than_pending_swap_all_running() ->
?assertEqual([3, 4, 5], jobs_stopped())
end).
-
t_oneshot_dont_get_rotated() ->
?_test(begin
setup_jobs([oneshot_running(1), continuous(2)]),
@@ -1374,7 +1327,6 @@ t_oneshot_dont_get_rotated() ->
?assertEqual([1], jobs_running())
end).
-
t_rotate_continuous_only_if_mixed() ->
?_test(begin
setup_jobs([continuous(1), oneshot_running(2), continuous_running(3)]),
@@ -1382,7 +1334,6 @@ t_rotate_continuous_only_if_mixed() ->
?assertEqual([1, 2], jobs_running())
end).
-
t_oneshot_dont_get_starting_priority() ->
?_test(begin
setup_jobs([continuous(1), oneshot(2), continuous_running(3)]),
@@ -1390,7 +1341,6 @@ t_oneshot_dont_get_starting_priority() ->
?assertEqual([1], jobs_running())
end).
-
% This tested in other test cases, it is here to mainly make explicit a property
% of one-shot replications -- they can starve other jobs if they "take control"
% of all the available scheduler slots.
@@ -1407,7 +1357,6 @@ t_oneshot_will_hog_the_scheduler() ->
?assertEqual([1, 2], jobs_running())
end).
-
t_if_excess_is_trimmed_rotation_still_happens() ->
?_test(begin
Jobs = [
@@ -1420,12 +1369,11 @@ t_if_excess_is_trimmed_rotation_still_happens() ->
?assertEqual([1], jobs_running())
end).
-
t_if_transient_job_crashes_it_gets_removed() ->
?_test(begin
Pid = mock_pid(),
Rep = continuous_rep(),
- Job = #job{
+ Job = #job{
id = job1,
pid = Pid,
history = [added()],
@@ -1434,17 +1382,18 @@ t_if_transient_job_crashes_it_gets_removed() ->
setup_jobs([Job]),
?assertEqual(1, ets:info(?MODULE, size)),
State = #state{max_history = 3, stats_pid = self()},
- {noreply, State} = handle_info({'DOWN', r1, process, Pid, failed},
- State),
+ {noreply, State} = handle_info(
+ {'DOWN', r1, process, Pid, failed},
+ State
+ ),
?assertEqual(0, ets:info(?MODULE, size))
- end).
-
+ end).
t_if_permanent_job_crashes_it_stays_in_ets() ->
?_test(begin
Pid = mock_pid(),
Rep = continuous_rep(),
- Job = #job{
+ Job = #job{
id = job1,
pid = Pid,
history = [added()],
@@ -1457,14 +1406,15 @@ t_if_permanent_job_crashes_it_stays_in_ets() ->
max_history = 3,
stats_pid = self()
},
- {noreply, State} = handle_info({'DOWN', r1, process, Pid, failed},
- State),
+ {noreply, State} = handle_info(
+ {'DOWN', r1, process, Pid, failed},
+ State
+ ),
?assertEqual(1, ets:info(?MODULE, size)),
[Job1] = ets:lookup(?MODULE, job1),
[Latest | _] = Job1#job.history,
?assertMatch({{crashed, failed}, _}, Latest)
- end).
-
+ end).
t_existing_jobs() ->
?_test(begin
@@ -1479,11 +1429,10 @@ t_existing_jobs() ->
?assertNot(existing_replication(NewRep#rep{options = []}))
end).
-
t_job_summary_running() ->
?_test(begin
Rep = rep(<<"s">>, <<"t">>),
- Job = #job{
+ Job = #job{
id = job1,
pid = mock_pid(),
history = [added()],
@@ -1501,10 +1450,9 @@ t_job_summary_running() ->
?assertEqual({Stats}, proplists:get_value(info, Summary1))
end).
-
t_job_summary_pending() ->
?_test(begin
- Job = #job{
+ Job = #job{
id = job1,
pid = undefined,
history = [stopped(20), started(10), added()],
@@ -1522,10 +1470,9 @@ t_job_summary_pending() ->
?assertEqual({Stats}, proplists:get_value(info, Summary1))
end).
-
t_job_summary_crashing_once() ->
?_test(begin
- Job = #job{
+ Job = #job{
id = job1,
history = [crashed(?DEFAULT_HEALTH_THRESHOLD_SEC + 1), started(0)],
rep = rep(<<"s">>, <<"t">>)
@@ -1538,10 +1485,9 @@ t_job_summary_crashing_once() ->
?assertEqual(0, proplists:get_value(error_count, Summary))
end).
-
t_job_summary_crashing_many_times() ->
?_test(begin
- Job = #job{
+ Job = #job{
id = job1,
history = [crashed(4), started(3), crashed(2), started(1)],
rep = rep(<<"s">>, <<"t">>)
@@ -1554,7 +1500,6 @@ t_job_summary_crashing_many_times() ->
?assertEqual(2, proplists:get_value(error_count, Summary))
end).
-
t_job_summary_proxy_fields() ->
?_test(begin
Src = #httpdb{
@@ -1565,20 +1510,23 @@ t_job_summary_proxy_fields() ->
url = "http://t",
proxy_url = "socks5://u:p@tproxy:34"
},
- Job = #job{
+ Job = #job{
id = job1,
history = [started(10), added()],
rep = rep(Src, Tgt)
},
setup_jobs([Job]),
Summary = job_summary(job1, ?DEFAULT_HEALTH_THRESHOLD_SEC),
- ?assertEqual(<<"http://u:*****@sproxy:12">>,
- proplists:get_value(source_proxy, Summary)),
- ?assertEqual(<<"socks5://u:*****@tproxy:34">>,
- proplists:get_value(target_proxy, Summary))
+ ?assertEqual(
+ <<"http://u:*****@sproxy:12">>,
+ proplists:get_value(source_proxy, Summary)
+ ),
+ ?assertEqual(
+ <<"socks5://u:*****@tproxy:34">>,
+ proplists:get_value(target_proxy, Summary)
+ )
end).
-
% Test helper functions
setup_all() ->
@@ -1595,14 +1543,11 @@ setup_all() ->
meck:expect(couch_replicator_scheduler_sup, start_child, 1, {ok, Pid}),
couch_replicator_share:init().
-
-
teardown_all(_) ->
couch_replicator_share:clear(),
catch ets:delete(?MODULE),
meck:unload().
-
setup() ->
meck:reset([
couch_log,
@@ -1611,40 +1556,40 @@ setup() ->
config
]).
-
teardown(_) ->
ok.
-
setup_jobs(Jobs) when is_list(Jobs) ->
?MODULE = ets:new(?MODULE, [named_table, {keypos, #job.id}]),
ets:insert(?MODULE, Jobs).
-
all_jobs() ->
lists:usort(ets:tab2list(?MODULE)).
-
jobs_stopped() ->
[Job#job.id || Job <- all_jobs(), Job#job.pid =:= undefined].
-
jobs_running() ->
[Job#job.id || Job <- all_jobs(), Job#job.pid =/= undefined].
-
run_stop_count() ->
{length(jobs_running()), length(jobs_stopped())}.
-
oneshot_run_stop_count() ->
- Running = [Job#job.id || Job <- all_jobs(), Job#job.pid =/= undefined,
- not is_continuous(Job)],
- Stopped = [Job#job.id || Job <- all_jobs(), Job#job.pid =:= undefined,
- not is_continuous(Job)],
+ Running = [
+ Job#job.id
+ || Job <- all_jobs(),
+ Job#job.pid =/= undefined,
+ not is_continuous(Job)
+ ],
+ Stopped = [
+ Job#job.id
+ || Job <- all_jobs(),
+ Job#job.pid =:= undefined,
+ not is_continuous(Job)
+ ],
{length(Running), length(Stopped)}.
-
mock_state(MaxJobs) ->
#state{
max_jobs = MaxJobs,
@@ -1661,35 +1606,29 @@ mock_state(MaxJobs, MaxChurn) ->
stats_pid = self()
}.
-
rep() ->
#rep{options = [], user_ctx = #user_ctx{}}.
-
rep(Src, Tgt) ->
Rep = rep(),
Rep#rep{source = Src, target = Tgt}.
-
continuous_rep() ->
#rep{options = [{continuous, true}], user_ctx = #user_ctx{}}.
-
continuous_rep(Src, Tgt) ->
Rep = continuous_rep(),
Rep#rep{source = Src, target = Tgt}.
-
continuous(Id) when is_integer(Id) ->
Started = Id,
- Hist = [stopped(Started+1), started(Started), added()],
+ Hist = [stopped(Started + 1), started(Started), added()],
#job{
id = Id,
history = Hist,
rep = continuous_rep()
}.
-
continuous_running(Id) when is_integer(Id) ->
Started = Id,
Pid = mock_pid(),
@@ -1701,13 +1640,11 @@ continuous_running(Id) when is_integer(Id) ->
monitor = monitor(process, Pid)
}.
-
oneshot(Id) when is_integer(Id) ->
Started = Id,
Hist = [stopped(Started + 1), started(Started), added()],
#job{id = Id, history = Hist, rep = rep()}.
-
oneshot_running(Id) when is_integer(Id) ->
Started = Id,
Pid = mock_pid(),
@@ -1719,43 +1656,34 @@ oneshot_running(Id) when is_integer(Id) ->
monitor = monitor(process, Pid)
}.
-
testjob(Hist) when is_list(Hist) ->
#job{history = Hist}.
-
mock_pid() ->
- list_to_pid("<0.999.999>").
+ list_to_pid("<0.999.999>").
crashed() ->
crashed(0).
-
-crashed(WhenSec) when is_integer(WhenSec)->
+crashed(WhenSec) when is_integer(WhenSec) ->
{{crashed, some_reason}, {0, WhenSec, 0}};
crashed({MSec, Sec, USec}) ->
{{crashed, some_reason}, {MSec, Sec, USec}}.
-
started() ->
started(0).
-
-started(WhenSec) when is_integer(WhenSec)->
+started(WhenSec) when is_integer(WhenSec) ->
{started, {0, WhenSec, 0}};
-
started({MSec, Sec, USec}) ->
{started, {MSec, Sec, USec}}.
-
stopped() ->
stopped(0).
-
stopped(WhenSec) ->
{stopped, {0, WhenSec, 0}}.
-
added() ->
{added, {0, 0, 0}}.
diff --git a/src/couch_replicator/src/couch_replicator_scheduler_job.erl b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
index db8edfbef..f300dd44e 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler_job.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler_job.erl
@@ -15,17 +15,17 @@
-behaviour(gen_server).
-export([
- start_link/1
+ start_link/1
]).
-export([
- init/1,
- terminate/2,
- handle_call/3,
- handle_info/2,
- handle_cast/2,
- code_change/3,
- format_status/2
+ init/1,
+ terminate/2,
+ handle_call/3,
+ handle_info/2,
+ handle_cast/2,
+ code_change/3,
+ format_status/2
]).
-include_lib("couch/include/couch_db.hrl").
@@ -42,7 +42,6 @@
pp_rep_id/1
]).
-
-define(LOWEST_SEQ, 0).
-define(DEFAULT_CHECKPOINT_INTERVAL, 30000).
-define(STARTUP_JITTER_DEFAULT, 5000).
@@ -65,7 +64,8 @@
rep_starttime,
src_starttime,
tgt_starttime,
- timer, % checkpoint timer
+ % checkpoint timer
+ timer,
changes_queue,
changes_manager,
changes_reader,
@@ -79,7 +79,6 @@
view = nil
}).
-
start_link(#rep{id = {BaseId, Ext}, source = Src, target = Tgt} = Rep) ->
RepChildId = BaseId ++ Ext,
Source = couch_replicator_api_wrap:db_uri(Src),
@@ -90,17 +89,17 @@ start_link(#rep{id = {BaseId, Ext}, source = Src, target = Tgt} = Rep) ->
{ok, Pid} ->
{ok, Pid};
{error, Reason} ->
- couch_log:warning("failed to start replication `~s` (`~s` -> `~s`)",
- [RepChildId, Source, Target]),
+ couch_log:warning(
+ "failed to start replication `~s` (`~s` -> `~s`)",
+ [RepChildId, Source, Target]
+ ),
{error, Reason}
end.
-
init(InitArgs) ->
{ok, InitArgs, 0}.
-
-do_init(#rep{options = Options, id = {BaseId, Ext}, user_ctx=UserCtx} = Rep) ->
+do_init(#rep{options = Options, id = {BaseId, Ext}, user_ctx = UserCtx} = Rep) ->
process_flag(trap_exit, true),
timer:sleep(startup_jitter()),
@@ -137,23 +136,27 @@ do_init(#rep{options = Options, id = {BaseId, Ext}, user_ctx=UserCtx} = Rep) ->
fun(_) ->
couch_stats:increment_counter([couch_replicator, workers_started]),
{ok, Pid} = couch_replicator_worker:start_link(
- self(), Source, Target, ChangesManager, MaxConns),
+ self(), Source, Target, ChangesManager, MaxConns
+ ),
Pid
end,
- lists:seq(1, NumWorkers)),
-
- couch_task_status:add_task([
- {type, replication},
- {user, UserCtx#user_ctx.name},
- {replication_id, ?l2b(BaseId ++ Ext)},
- {database, Rep#rep.db_name},
- {doc_id, Rep#rep.doc_id},
- {source, ?l2b(SourceName)},
- {target, ?l2b(TargetName)},
- {continuous, get_value(continuous, Options, false)},
- {source_seq, HighestSeq},
- {checkpoint_interval, CheckpointInterval}
- ] ++ rep_stats(State)),
+ lists:seq(1, NumWorkers)
+ ),
+
+ couch_task_status:add_task(
+ [
+ {type, replication},
+ {user, UserCtx#user_ctx.name},
+ {replication_id, ?l2b(BaseId ++ Ext)},
+ {database, Rep#rep.db_name},
+ {doc_id, Rep#rep.doc_id},
+ {source, ?l2b(SourceName)},
+ {target, ?l2b(TargetName)},
+ {continuous, get_value(continuous, Options, false)},
+ {source_seq, HighestSeq},
+ {checkpoint_interval, CheckpointInterval}
+ ] ++ rep_stats(State)
+ ),
couch_task_status:set_update_frequency(1000),
% Until OTP R14B03:
@@ -172,44 +175,59 @@ do_init(#rep{options = Options, id = {BaseId, Ext}, user_ctx=UserCtx} = Rep) ->
doc_update_triggered(Rep),
{ok, State#rep_state{
- changes_queue = ChangesQueue,
- changes_manager = ChangesManager,
- changes_reader = ChangesReader,
- workers = Workers
- }
- }.
-
+ changes_queue = ChangesQueue,
+ changes_manager = ChangesManager,
+ changes_reader = ChangesReader,
+ workers = Workers
+ }}.
handle_call({add_stats, Stats}, From, State) ->
gen_server:reply(From, ok),
NewStats = couch_replicator_utils:sum_stats(State#rep_state.stats, Stats),
{noreply, State#rep_state{stats = NewStats}};
-
-handle_call({report_seq_done, Seq, StatsInc}, From,
- #rep_state{seqs_in_progress = SeqsInProgress, highest_seq_done = HighestDone,
- current_through_seq = ThroughSeq, stats = Stats} = State) ->
+handle_call(
+ {report_seq_done, Seq, StatsInc},
+ From,
+ #rep_state{
+ seqs_in_progress = SeqsInProgress,
+ highest_seq_done = HighestDone,
+ current_through_seq = ThroughSeq,
+ stats = Stats
+ } = State
+) ->
gen_server:reply(From, ok),
- {NewThroughSeq0, NewSeqsInProgress} = case SeqsInProgress of
- [] ->
- {Seq, []};
- [Seq | Rest] ->
- {Seq, Rest};
- [_ | _] ->
- {ThroughSeq, ordsets:del_element(Seq, SeqsInProgress)}
- end,
+ {NewThroughSeq0, NewSeqsInProgress} =
+ case SeqsInProgress of
+ [] ->
+ {Seq, []};
+ [Seq | Rest] ->
+ {Seq, Rest};
+ [_ | _] ->
+ {ThroughSeq, ordsets:del_element(Seq, SeqsInProgress)}
+ end,
NewHighestDone = lists:max([HighestDone, Seq]),
- NewThroughSeq = case NewSeqsInProgress of
- [] ->
- lists:max([NewThroughSeq0, NewHighestDone]);
- _ ->
- NewThroughSeq0
- end,
- couch_log:debug("Worker reported seq ~p, through seq was ~p, "
+ NewThroughSeq =
+ case NewSeqsInProgress of
+ [] ->
+ lists:max([NewThroughSeq0, NewHighestDone]);
+ _ ->
+ NewThroughSeq0
+ end,
+ couch_log:debug(
+ "Worker reported seq ~p, through seq was ~p, "
"new through seq is ~p, highest seq done was ~p, "
"new highest seq done is ~p~n"
"Seqs in progress were: ~p~nSeqs in progress are now: ~p",
- [Seq, ThroughSeq, NewThroughSeq, HighestDone,
- NewHighestDone, SeqsInProgress, NewSeqsInProgress]),
+ [
+ Seq,
+ ThroughSeq,
+ NewThroughSeq,
+ HighestDone,
+ NewHighestDone,
+ SeqsInProgress,
+ NewSeqsInProgress
+ ]
+ ),
NewState = State#rep_state{
stats = couch_replicator_utils:sum_stats(Stats, StatsInc),
current_through_seq = NewThroughSeq,
@@ -219,23 +237,22 @@ handle_call({report_seq_done, Seq, StatsInc}, From,
update_task(NewState),
{noreply, NewState}.
-
handle_cast(checkpoint, State) ->
case do_checkpoint(State) of
- {ok, NewState} ->
- couch_stats:increment_counter([couch_replicator, checkpoints, success]),
- {noreply, NewState#rep_state{timer = start_timer(State)}};
- Error ->
- couch_stats:increment_counter([couch_replicator, checkpoints, failure]),
- {stop, Error, State}
+ {ok, NewState} ->
+ couch_stats:increment_counter([couch_replicator, checkpoints, success]),
+ {noreply, NewState#rep_state{timer = start_timer(State)}};
+ Error ->
+ couch_stats:increment_counter([couch_replicator, checkpoints, failure]),
+ {stop, Error, State}
end;
-
-handle_cast({report_seq, Seq},
- #rep_state{seqs_in_progress = SeqsInProgress} = State) ->
+handle_cast(
+ {report_seq, Seq},
+ #rep_state{seqs_in_progress = SeqsInProgress} = State
+) ->
NewSeqsInProgress = ordsets:add_element(Seq, SeqsInProgress),
{noreply, State#rep_state{seqs_in_progress = NewSeqsInProgress}}.
-
handle_info(shutdown, St) ->
{stop, shutdown, St};
@@ -329,34 +346,36 @@ handle_info(timeout, InitArgs) ->
{stop, {shutdown, ShutdownReason}, ShutdownState}
end.
-
-terminate(normal, #rep_state{rep_details = #rep{id = RepId} = Rep,
- checkpoint_history = CheckpointHistory} = State) ->
+terminate(
+ normal,
+ #rep_state{
+ rep_details = #rep{id = RepId} = Rep,
+ checkpoint_history = CheckpointHistory
+ } = State
+) ->
terminate_cleanup(State),
couch_replicator_notifier:notify({finished, RepId, CheckpointHistory}),
doc_update_completed(Rep, rep_stats(State));
-
terminate(shutdown, #rep_state{rep_details = #rep{id = RepId}} = State) ->
% Replication stopped via _scheduler_sup:terminate_child/1, which can be
% occur during regular scheduler operation or when job is removed from
% the scheduler.
- State1 = case do_checkpoint(State) of
- {ok, NewState} ->
- NewState;
- Error ->
- LogMsg = "~p : Failed last checkpoint. Job: ~p Error: ~p",
- couch_log:error(LogMsg, [?MODULE, RepId, Error]),
- State
- end,
+ State1 =
+ case do_checkpoint(State) of
+ {ok, NewState} ->
+ NewState;
+ Error ->
+ LogMsg = "~p : Failed last checkpoint. Job: ~p Error: ~p",
+ couch_log:error(LogMsg, [?MODULE, RepId, Error]),
+ State
+ end,
couch_replicator_notifier:notify({stopped, RepId, <<"stopped">>}),
terminate_cleanup(State1);
-
terminate({shutdown, max_backoff}, {error, InitArgs}) ->
#rep{id = {BaseId, Ext} = RepId} = InitArgs,
couch_stats:increment_counter([couch_replicator, failed_starts]),
couch_log:warning("Replication `~s` reached max backoff ", [BaseId ++ Ext]),
couch_replicator_notifier:notify({error, RepId, max_backoff});
-
terminate({shutdown, {error, Error}}, {error, Class, Stack, InitArgs}) ->
#rep{
id = {BaseId, Ext} = RepId,
@@ -369,35 +388,44 @@ terminate({shutdown, {error, Error}}, {error, Class, Stack, InitArgs}) ->
Target = couch_replicator_api_wrap:db_uri(Target0),
RepIdStr = BaseId ++ Ext,
Msg = "~p:~p: Replication ~s failed to start ~p -> ~p doc ~p:~p stack:~p",
- couch_log:error(Msg, [Class, Error, RepIdStr, Source, Target, DbName,
- DocId, Stack]),
+ couch_log:error(Msg, [
+ Class,
+ Error,
+ RepIdStr,
+ Source,
+ Target,
+ DbName,
+ DocId,
+ Stack
+ ]),
couch_stats:increment_counter([couch_replicator, failed_starts]),
couch_replicator_notifier:notify({error, RepId, Error});
-
terminate({shutdown, max_backoff}, State) ->
#rep_state{
source_name = Source,
target_name = Target,
rep_details = #rep{id = {BaseId, Ext} = RepId}
} = State,
- couch_log:error("Replication `~s` (`~s` -> `~s`) reached max backoff",
- [BaseId ++ Ext, Source, Target]),
+ couch_log:error(
+ "Replication `~s` (`~s` -> `~s`) reached max backoff",
+ [BaseId ++ Ext, Source, Target]
+ ),
terminate_cleanup(State),
couch_replicator_notifier:notify({error, RepId, max_backoff});
-
terminate({shutdown, Reason}, State) ->
% Unwrap so when reporting we don't have an extra {shutdown, ...} tuple
% wrapped around the message
terminate(Reason, State);
-
terminate(Reason, State) ->
-#rep_state{
+ #rep_state{
source_name = Source,
target_name = Target,
rep_details = #rep{id = {BaseId, Ext} = RepId}
} = State,
- couch_log:error("Replication `~s` (`~s` -> `~s`) failed: ~s",
- [BaseId ++ Ext, Source, Target, to_binary(Reason)]),
+ couch_log:error(
+ "Replication `~s` (`~s` -> `~s`) failed: ~s",
+ [BaseId ++ Ext, Source, Target, to_binary(Reason)]
+ ),
terminate_cleanup(State),
couch_replicator_notifier:notify({error, RepId, Reason}).
@@ -406,28 +434,26 @@ terminate_cleanup(State) ->
couch_replicator_api_wrap:db_close(State#rep_state.source),
couch_replicator_api_wrap:db_close(State#rep_state.target).
-
-code_change(_OldVsn, #rep_state{}=State, _Extra) ->
+code_change(_OldVsn, #rep_state{} = State, _Extra) ->
{ok, State}.
-
format_status(_Opt, [_PDict, State]) ->
#rep_state{
- source = Source,
- target = Target,
- rep_details = RepDetails,
- start_seq = StartSeq,
- source_seq = SourceSeq,
- committed_seq = CommitedSeq,
- current_through_seq = ThroughSeq,
- highest_seq_done = HighestSeqDone,
- session_id = SessionId
+ source = Source,
+ target = Target,
+ rep_details = RepDetails,
+ start_seq = StartSeq,
+ source_seq = SourceSeq,
+ committed_seq = CommitedSeq,
+ current_through_seq = ThroughSeq,
+ highest_seq_done = HighestSeqDone,
+ session_id = SessionId
} = state_strip_creds(State),
#rep{
- id = RepId,
- options = Options,
- doc_id = DocId,
- db_name = DbName
+ id = RepId,
+ options = Options,
+ doc_id = DocId,
+ db_name = DbName
} = RepDetails,
[
{rep_id, RepId},
@@ -444,25 +470,26 @@ format_status(_Opt, [_PDict, State]) ->
{highest_seq_done, HighestSeqDone}
].
-
startup_jitter() ->
- Jitter = config:get_integer("replicator", "startup_jitter",
- ?STARTUP_JITTER_DEFAULT),
+ Jitter = config:get_integer(
+ "replicator",
+ "startup_jitter",
+ ?STARTUP_JITTER_DEFAULT
+ ),
couch_rand:uniform(erlang:max(1, Jitter)).
-
headers_strip_creds([], Acc) ->
lists:reverse(Acc);
headers_strip_creds([{Key, Value0} | Rest], Acc) ->
- Value = case string:to_lower(Key) of
- "authorization" ->
- "****";
- _ ->
- Value0
- end,
+ Value =
+ case string:to_lower(Key) of
+ "authorization" ->
+ "****";
+ _ ->
+ Value0
+ end,
headers_strip_creds(Rest, [{Key, Value} | Acc]).
-
httpdb_strip_creds(#httpdb{url = Url, headers = Headers} = HttpDb) ->
HttpDb#httpdb{
url = couch_util:url_strip_password(Url),
@@ -471,14 +498,12 @@ httpdb_strip_creds(#httpdb{url = Url, headers = Headers} = HttpDb) ->
httpdb_strip_creds(LocalDb) ->
LocalDb.
-
rep_strip_creds(#rep{source = Source, target = Target} = Rep) ->
Rep#rep{
source = httpdb_strip_creds(Source),
target = httpdb_strip_creds(Target)
}.
-
state_strip_creds(#rep_state{rep_details = Rep, source = Source, target = Target} = State) ->
% #rep_state contains the source and target at the top level and also
% in the nested #rep_details record
@@ -488,7 +513,6 @@ state_strip_creds(#rep_state{rep_details = Rep, source = Source, target = Target
target = httpdb_strip_creds(Target)
}.
-
adjust_maxconn(Src = #httpdb{http_connections = 1}, RepId) ->
Msg = "Adjusting minimum number of HTTP source connections to 2 for ~p",
couch_log:notice(Msg, [RepId]),
@@ -496,7 +520,6 @@ adjust_maxconn(Src = #httpdb{http_connections = 1}, RepId) ->
adjust_maxconn(Src, _RepId) ->
Src.
-
-spec doc_update_triggered(#rep{}) -> ok.
doc_update_triggered(#rep{db_name = null}) ->
ok;
@@ -507,62 +530,78 @@ doc_update_triggered(#rep{id = RepId, doc_id = DocId} = Rep) ->
false ->
ok
end,
- couch_log:notice("Document `~s` triggered replication `~s`",
- [DocId, pp_rep_id(RepId)]),
+ couch_log:notice(
+ "Document `~s` triggered replication `~s`",
+ [DocId, pp_rep_id(RepId)]
+ ),
ok.
-
-spec doc_update_completed(#rep{}, list()) -> ok.
doc_update_completed(#rep{db_name = null}, _Stats) ->
ok;
-doc_update_completed(#rep{id = RepId, doc_id = DocId, db_name = DbName,
- start_time = StartTime}, Stats0) ->
+doc_update_completed(
+ #rep{
+ id = RepId,
+ doc_id = DocId,
+ db_name = DbName,
+ start_time = StartTime
+ },
+ Stats0
+) ->
Stats = Stats0 ++ [{start_time, couch_replicator_utils:iso8601(StartTime)}],
couch_replicator_docs:update_doc_completed(DbName, DocId, Stats),
- couch_log:notice("Replication `~s` completed (triggered by `~s`)",
- [pp_rep_id(RepId), DocId]),
+ couch_log:notice(
+ "Replication `~s` completed (triggered by `~s`)",
+ [pp_rep_id(RepId), DocId]
+ ),
ok.
-
-do_last_checkpoint(#rep_state{seqs_in_progress = [],
- highest_seq_done = {_Ts, ?LOWEST_SEQ}} = State) ->
+do_last_checkpoint(
+ #rep_state{
+ seqs_in_progress = [],
+ highest_seq_done = {_Ts, ?LOWEST_SEQ}
+ } = State
+) ->
{stop, normal, cancel_timer(State)};
-do_last_checkpoint(#rep_state{seqs_in_progress = [],
- highest_seq_done = Seq} = State) ->
+do_last_checkpoint(
+ #rep_state{
+ seqs_in_progress = [],
+ highest_seq_done = Seq
+ } = State
+) ->
case do_checkpoint(State#rep_state{current_through_seq = Seq}) of
- {ok, NewState} ->
- couch_stats:increment_counter([couch_replicator, checkpoints, success]),
- {stop, normal, cancel_timer(NewState)};
- Error ->
- couch_stats:increment_counter([couch_replicator, checkpoints, failure]),
- {stop, Error, State}
+ {ok, NewState} ->
+ couch_stats:increment_counter([couch_replicator, checkpoints, success]),
+ {stop, normal, cancel_timer(NewState)};
+ Error ->
+ couch_stats:increment_counter([couch_replicator, checkpoints, failure]),
+ {stop, Error, State}
end.
-
start_timer(State) ->
After = State#rep_state.checkpoint_interval,
case timer:apply_after(After, gen_server, cast, [self(), checkpoint]) of
- {ok, Ref} ->
- Ref;
- Error ->
- couch_log:error("Replicator, error scheduling checkpoint: ~p", [Error]),
- nil
+ {ok, Ref} ->
+ Ref;
+ Error ->
+ couch_log:error("Replicator, error scheduling checkpoint: ~p", [Error]),
+ nil
end.
-
cancel_timer(#rep_state{timer = nil} = State) ->
State;
cancel_timer(#rep_state{timer = Timer} = State) ->
{ok, cancel} = timer:cancel(Timer),
State#rep_state{timer = nil}.
-
init_state(Rep) ->
#rep{
id = {BaseId, _Ext},
- source = Src0, target = Tgt,
+ source = Src0,
+ target = Tgt,
options = Options,
- type = Type, view = View,
+ type = Type,
+ view = View,
start_time = StartTime,
stats = ArgStats0
} = Rep,
@@ -570,8 +609,11 @@ init_state(Rep) ->
Src = adjust_maxconn(Src0, BaseId),
{ok, Source} = couch_replicator_api_wrap:db_open(Src),
{CreateTargetParams} = get_value(create_target_params, Options, {[]}),
- {ok, Target} = couch_replicator_api_wrap:db_open(Tgt,
- get_value(create_target, Options, false), CreateTargetParams),
+ {ok, Target} = couch_replicator_api_wrap:db_open(
+ Tgt,
+ get_value(create_target, Options, false),
+ CreateTargetParams
+ ),
{ok, SourceInfo} = couch_replicator_api_wrap:get_db_info(Source),
{ok, TargetInfo} = couch_replicator_api_wrap:get_db_info(Target),
@@ -581,10 +623,11 @@ init_state(Rep) ->
{StartSeq0, History} = compare_replication_logs(SourceLog, TargetLog),
ArgStats1 = couch_replicator_stats:new(ArgStats0),
- HistoryStats = case History of
- [{[_ | _] = HProps} | _] -> couch_replicator_stats:new(HProps);
- _ -> couch_replicator_stats:new()
- end,
+ HistoryStats =
+ case History of
+ [{[_ | _] = HProps} | _] -> couch_replicator_stats:new(HProps);
+ _ -> couch_replicator_stats:new()
+ end,
Stats = couch_replicator_stats:max_stats(ArgStats1, HistoryStats),
StartSeq1 = get_value(since_seq, Options, StartSeq0),
@@ -592,7 +635,7 @@ init_state(Rep) ->
SourceSeq = get_value(<<"update_seq">>, SourceInfo, ?LOWEST_SEQ),
- #doc{body={CheckpointHistory}} = SourceLog,
+ #doc{body = {CheckpointHistory}} = SourceLog,
State = #rep_state{
rep_details = Rep,
source_name = couch_replicator_api_wrap:db_uri(Source),
@@ -600,7 +643,7 @@ init_state(Rep) ->
source = Source,
target = Target,
history = History,
- checkpoint_history = {[{<<"no_changes">>, true}| CheckpointHistory]},
+ checkpoint_history = {[{<<"no_changes">>, true} | CheckpointHistory]},
start_seq = StartSeq,
current_through_seq = StartSeq,
committed_seq = StartSeq,
@@ -612,43 +655,51 @@ init_state(Rep) ->
session_id = couch_uuids:random(),
source_seq = SourceSeq,
use_checkpoints = get_value(use_checkpoints, Options, true),
- checkpoint_interval = get_value(checkpoint_interval, Options,
- ?DEFAULT_CHECKPOINT_INTERVAL),
+ checkpoint_interval = get_value(
+ checkpoint_interval,
+ Options,
+ ?DEFAULT_CHECKPOINT_INTERVAL
+ ),
type = Type,
view = View,
stats = Stats
},
State#rep_state{timer = start_timer(State)}.
-
find_and_migrate_logs(DbList, #rep{id = {BaseId, _}} = Rep) ->
LogId = ?l2b(?LOCAL_DOC_PREFIX ++ BaseId),
fold_replication_logs(DbList, ?REP_ID_VERSION, LogId, LogId, Rep, []).
-
fold_replication_logs([], _Vsn, _LogId, _NewId, _Rep, Acc) ->
lists:reverse(Acc);
-
fold_replication_logs([Db | Rest] = Dbs, Vsn, LogId, NewId, Rep, Acc) ->
case couch_replicator_api_wrap:open_doc(Db, LogId, [ejson_body]) of
- {error, <<"not_found">>} when Vsn > 1 ->
- OldRepId = couch_replicator_utils:replication_id(Rep, Vsn - 1),
- fold_replication_logs(Dbs, Vsn - 1,
- ?l2b(?LOCAL_DOC_PREFIX ++ OldRepId), NewId, Rep, Acc);
- {error, <<"not_found">>} ->
- fold_replication_logs(
- Rest, ?REP_ID_VERSION, NewId, NewId, Rep, [#doc{id = NewId} | Acc]);
- {ok, Doc} when LogId =:= NewId ->
- fold_replication_logs(
- Rest, ?REP_ID_VERSION, NewId, NewId, Rep, [Doc | Acc]);
- {ok, Doc} ->
- MigratedLog = #doc{id = NewId, body = Doc#doc.body},
- maybe_save_migrated_log(Rep, Db, MigratedLog, Doc#doc.id),
- fold_replication_logs(
- Rest, ?REP_ID_VERSION, NewId, NewId, Rep, [MigratedLog | Acc])
+ {error, <<"not_found">>} when Vsn > 1 ->
+ OldRepId = couch_replicator_utils:replication_id(Rep, Vsn - 1),
+ fold_replication_logs(
+ Dbs,
+ Vsn - 1,
+ ?l2b(?LOCAL_DOC_PREFIX ++ OldRepId),
+ NewId,
+ Rep,
+ Acc
+ );
+ {error, <<"not_found">>} ->
+ fold_replication_logs(
+ Rest, ?REP_ID_VERSION, NewId, NewId, Rep, [#doc{id = NewId} | Acc]
+ );
+ {ok, Doc} when LogId =:= NewId ->
+ fold_replication_logs(
+ Rest, ?REP_ID_VERSION, NewId, NewId, Rep, [Doc | Acc]
+ );
+ {ok, Doc} ->
+ MigratedLog = #doc{id = NewId, body = Doc#doc.body},
+ maybe_save_migrated_log(Rep, Db, MigratedLog, Doc#doc.id),
+ fold_replication_logs(
+ Rest, ?REP_ID_VERSION, NewId, NewId, Rep, [MigratedLog | Acc]
+ )
end.
-
maybe_save_migrated_log(Rep, Db, #doc{} = Doc, OldId) ->
case get_value(use_checkpoints, Rep#rep.options, true) of
true ->
@@ -659,49 +710,50 @@ maybe_save_migrated_log(Rep, Db, #doc{} = Doc, OldId) ->
ok
end.
-
spawn_changes_manager(Parent, ChangesQueue, BatchSize) ->
spawn_link(fun() ->
changes_manager_loop_open(Parent, ChangesQueue, BatchSize, 1)
end).
-
changes_manager_loop_open(Parent, ChangesQueue, BatchSize, Ts) ->
receive
- {get_changes, From} ->
- case couch_work_queue:dequeue(ChangesQueue, BatchSize) of
- closed ->
- From ! {closed, self()};
- {ok, ChangesOrLastSeqs} ->
- ReportSeq = case lists:last(ChangesOrLastSeqs) of
- {last_seq, Seq} ->
- {Ts, Seq};
- #doc_info{high_seq = Seq} ->
- {Ts, Seq}
+ {get_changes, From} ->
+ case couch_work_queue:dequeue(ChangesQueue, BatchSize) of
+ closed ->
+ From ! {closed, self()};
+ {ok, ChangesOrLastSeqs} ->
+ ReportSeq =
+ case lists:last(ChangesOrLastSeqs) of
+ {last_seq, Seq} ->
+ {Ts, Seq};
+ #doc_info{high_seq = Seq} ->
+ {Ts, Seq}
+ end,
+ Changes = lists:filter(
+ fun
+ (#doc_info{}) ->
+ true;
+ ({last_seq, _Seq}) ->
+ false
+ end,
+ ChangesOrLastSeqs
+ ),
+ ok = gen_server:cast(Parent, {report_seq, ReportSeq}),
+ From ! {changes, self(), Changes, ReportSeq}
end,
- Changes = lists:filter(
- fun(#doc_info{}) ->
- true;
- ({last_seq, _Seq}) ->
- false
- end, ChangesOrLastSeqs),
- ok = gen_server:cast(Parent, {report_seq, ReportSeq}),
- From ! {changes, self(), Changes, ReportSeq}
- end,
- changes_manager_loop_open(Parent, ChangesQueue, BatchSize, Ts + 1)
+ changes_manager_loop_open(Parent, ChangesQueue, BatchSize, Ts + 1)
end.
-
-do_checkpoint(#rep_state{use_checkpoints=false} = State) ->
- NewState = State#rep_state{checkpoint_history = {[{<<"use_checkpoints">>, false}]} },
+do_checkpoint(#rep_state{use_checkpoints = false} = State) ->
+ NewState = State#rep_state{checkpoint_history = {[{<<"use_checkpoints">>, false}]}},
{ok, NewState};
-do_checkpoint(#rep_state{current_through_seq=Seq, committed_seq=Seq} = State) ->
+do_checkpoint(#rep_state{current_through_seq = Seq, committed_seq = Seq} = State) ->
update_task(State),
{ok, State};
do_checkpoint(State) ->
#rep_state{
- source_name=SourceName,
- target_name=TargetName,
+ source_name = SourceName,
+ target_name = TargetName,
source = Source,
target = Target,
history = OldHistory,
@@ -717,119 +769,135 @@ do_checkpoint(State) ->
session_id = SessionId
} = State,
case commit_to_both(Source, Target) of
- {source_error, Reason} ->
- {checkpoint_commit_failure,
- <<"Failure on source commit: ", (to_binary(Reason))/binary>>};
- {target_error, Reason} ->
- {checkpoint_commit_failure,
- <<"Failure on target commit: ", (to_binary(Reason))/binary>>};
- {SrcInstanceStartTime, TgtInstanceStartTime} ->
- couch_log:notice("recording a checkpoint for `~s` -> `~s` at source update_seq ~p",
- [SourceName, TargetName, NewSeq]),
- LocalStartTime = calendar:now_to_local_time(ReplicationStartTime),
- StartTime = ?l2b(httpd_util:rfc1123_date(LocalStartTime)),
- EndTime = ?l2b(httpd_util:rfc1123_date()),
- NewHistoryEntry = {[
- {<<"session_id">>, SessionId},
- {<<"start_time">>, StartTime},
- {<<"end_time">>, EndTime},
- {<<"start_last_seq">>, StartSeq},
- {<<"end_last_seq">>, NewSeq},
- {<<"recorded_seq">>, NewSeq},
- {<<"missing_checked">>, couch_replicator_stats:missing_checked(Stats)},
- {<<"missing_found">>, couch_replicator_stats:missing_found(Stats)},
- {<<"docs_read">>, couch_replicator_stats:docs_read(Stats)},
- {<<"docs_written">>, couch_replicator_stats:docs_written(Stats)},
- {<<"doc_write_failures">>, couch_replicator_stats:doc_write_failures(Stats)}
- ]},
- BaseHistory = [
- {<<"session_id">>, SessionId},
- {<<"source_last_seq">>, NewSeq},
- {<<"replication_id_version">>, ?REP_ID_VERSION}
- ] ++ case get_value(doc_ids, Options) of
- undefined ->
- [];
- _DocIds ->
- % backwards compatibility with the result of a replication by
- % doc IDs in versions 0.11.x and 1.0.x
- % TODO: deprecate (use same history format, simplify code)
- [
- {<<"start_time">>, StartTime},
- {<<"end_time">>, EndTime},
- {<<"docs_read">>, couch_replicator_stats:docs_read(Stats)},
- {<<"docs_written">>, couch_replicator_stats:docs_written(Stats)},
- {<<"doc_write_failures">>, couch_replicator_stats:doc_write_failures(Stats)}
- ]
- end,
- % limit history to 50 entries
- NewRepHistory = {
- BaseHistory ++
- [{<<"history">>, lists:sublist([NewHistoryEntry | OldHistory], 50)}]
- },
-
- try
- {SrcRevPos, SrcRevId} = update_checkpoint(
- Source, SourceLog#doc{body = NewRepHistory}, source),
- {TgtRevPos, TgtRevId} = update_checkpoint(
- Target, TargetLog#doc{body = NewRepHistory}, target),
- NewState = State#rep_state{
- checkpoint_history = NewRepHistory,
- committed_seq = NewTsSeq,
- source_log = SourceLog#doc{revs={SrcRevPos, [SrcRevId]}},
- target_log = TargetLog#doc{revs={TgtRevPos, [TgtRevId]}}
+ {source_error, Reason} ->
+ {checkpoint_commit_failure,
+ <<"Failure on source commit: ", (to_binary(Reason))/binary>>};
+ {target_error, Reason} ->
+ {checkpoint_commit_failure,
+ <<"Failure on target commit: ", (to_binary(Reason))/binary>>};
+ {SrcInstanceStartTime, TgtInstanceStartTime} ->
+ couch_log:notice(
+ "recording a checkpoint for `~s` -> `~s` at source update_seq ~p",
+ [SourceName, TargetName, NewSeq]
+ ),
+ LocalStartTime = calendar:now_to_local_time(ReplicationStartTime),
+ StartTime = ?l2b(httpd_util:rfc1123_date(LocalStartTime)),
+ EndTime = ?l2b(httpd_util:rfc1123_date()),
+ NewHistoryEntry =
+ {[
+ {<<"session_id">>, SessionId},
+ {<<"start_time">>, StartTime},
+ {<<"end_time">>, EndTime},
+ {<<"start_last_seq">>, StartSeq},
+ {<<"end_last_seq">>, NewSeq},
+ {<<"recorded_seq">>, NewSeq},
+ {<<"missing_checked">>, couch_replicator_stats:missing_checked(Stats)},
+ {<<"missing_found">>, couch_replicator_stats:missing_found(Stats)},
+ {<<"docs_read">>, couch_replicator_stats:docs_read(Stats)},
+ {<<"docs_written">>, couch_replicator_stats:docs_written(Stats)},
+ {<<"doc_write_failures">>, couch_replicator_stats:doc_write_failures(Stats)}
+ ]},
+ BaseHistory =
+ [
+ {<<"session_id">>, SessionId},
+ {<<"source_last_seq">>, NewSeq},
+ {<<"replication_id_version">>, ?REP_ID_VERSION}
+ ] ++
+ case get_value(doc_ids, Options) of
+ undefined ->
+ [];
+ _DocIds ->
+ % backwards compatibility with the result of a replication by
+ % doc IDs in versions 0.11.x and 1.0.x
+ % TODO: deprecate (use same history format, simplify code)
+ [
+ {<<"start_time">>, StartTime},
+ {<<"end_time">>, EndTime},
+ {<<"docs_read">>, couch_replicator_stats:docs_read(Stats)},
+ {<<"docs_written">>, couch_replicator_stats:docs_written(Stats)},
+ {<<"doc_write_failures">>,
+ couch_replicator_stats:doc_write_failures(Stats)}
+ ]
+ end,
+ % limit history to 50 entries
+ NewRepHistory = {
+ BaseHistory ++
+ [{<<"history">>, lists:sublist([NewHistoryEntry | OldHistory], 50)}]
},
- update_task(NewState),
- {ok, NewState}
- catch throw:{checkpoint_commit_failure, _} = Failure ->
- Failure
- end;
- {SrcInstanceStartTime, _NewTgtInstanceStartTime} ->
- {checkpoint_commit_failure, <<"Target database out of sync. "
- "Try to increase max_dbs_open at the target's server.">>};
- {_NewSrcInstanceStartTime, TgtInstanceStartTime} ->
- {checkpoint_commit_failure, <<"Source database out of sync. "
- "Try to increase max_dbs_open at the source's server.">>};
- {_NewSrcInstanceStartTime, _NewTgtInstanceStartTime} ->
- {checkpoint_commit_failure, <<"Source and target databases out of "
- "sync. Try to increase max_dbs_open at both servers.">>}
- end.
+ try
+ {SrcRevPos, SrcRevId} = update_checkpoint(
+ Source, SourceLog#doc{body = NewRepHistory}, source
+ ),
+ {TgtRevPos, TgtRevId} = update_checkpoint(
+ Target, TargetLog#doc{body = NewRepHistory}, target
+ ),
+ NewState = State#rep_state{
+ checkpoint_history = NewRepHistory,
+ committed_seq = NewTsSeq,
+ source_log = SourceLog#doc{revs = {SrcRevPos, [SrcRevId]}},
+ target_log = TargetLog#doc{revs = {TgtRevPos, [TgtRevId]}}
+ },
+ update_task(NewState),
+ {ok, NewState}
+ catch
+ throw:{checkpoint_commit_failure, _} = Failure ->
+ Failure
+ end;
+ {SrcInstanceStartTime, _NewTgtInstanceStartTime} ->
+ {checkpoint_commit_failure, <<
+ "Target database out of sync. "
+ "Try to increase max_dbs_open at the target's server."
+ >>};
+ {_NewSrcInstanceStartTime, TgtInstanceStartTime} ->
+ {checkpoint_commit_failure, <<
+ "Source database out of sync. "
+ "Try to increase max_dbs_open at the source's server."
+ >>};
+ {_NewSrcInstanceStartTime, _NewTgtInstanceStartTime} ->
+ {checkpoint_commit_failure, <<
+ "Source and target databases out of "
+ "sync. Try to increase max_dbs_open at both servers."
+ >>}
+ end.
update_checkpoint(Db, Doc, DbType) ->
try
update_checkpoint(Db, Doc)
- catch throw:{checkpoint_commit_failure, Reason} ->
- throw({checkpoint_commit_failure,
- <<"Error updating the ", (to_binary(DbType))/binary,
- " checkpoint document: ", (to_binary(Reason))/binary>>})
+ catch
+ throw:{checkpoint_commit_failure, Reason} ->
+ throw(
+ {checkpoint_commit_failure,
+ <<"Error updating the ", (to_binary(DbType))/binary, " checkpoint document: ",
+ (to_binary(Reason))/binary>>}
+ )
end.
-
update_checkpoint(Db, #doc{id = LogId, body = LogBody} = Doc) ->
try
case couch_replicator_api_wrap:update_doc(Db, Doc, [delay_commit]) of
- {ok, PosRevId} ->
- PosRevId;
- {error, Reason} ->
- throw({checkpoint_commit_failure, Reason})
- end
- catch throw:conflict ->
- case (catch couch_replicator_api_wrap:open_doc(Db, LogId, [ejson_body])) of
- {ok, #doc{body = LogBody, revs = {Pos, [RevId | _]}}} ->
- % This means that we were able to update successfully the
- % checkpoint doc in a previous attempt but we got a connection
- % error (timeout for e.g.) before receiving the success response.
- % Therefore the request was retried and we got a conflict, as the
- % revision we sent is not the current one.
- % We confirm this by verifying the doc body we just got is the same
- % that we have just sent.
- {Pos, RevId};
- _ ->
- throw({checkpoint_commit_failure, conflict})
+ {ok, PosRevId} ->
+ PosRevId;
+ {error, Reason} ->
+ throw({checkpoint_commit_failure, Reason})
end
+ catch
+ throw:conflict ->
+ case (catch couch_replicator_api_wrap:open_doc(Db, LogId, [ejson_body])) of
+ {ok, #doc{body = LogBody, revs = {Pos, [RevId | _]}}} ->
+ % This means that we were able to update successfully the
+ % checkpoint doc in a previous attempt but we got a connection
+ % error (timeout for e.g.) before receiving the success response.
+ % Therefore the request was retried and we got a conflict, as the
+ % revision we sent is not the current one.
+ % We confirm this by verifying the doc body we just got is the same
+ % that we have just sent.
+ {Pos, RevId};
+ _ ->
+ throw({checkpoint_commit_failure, conflict})
+ end
end.
-
commit_to_both(Source, Target) ->
% commit the src async
ParentPid = self(),
@@ -837,90 +905,102 @@ commit_to_both(Source, Target) ->
fun() ->
Result = (catch couch_replicator_api_wrap:ensure_full_commit(Source)),
ParentPid ! {self(), Result}
- end),
+ end
+ ),
% commit tgt sync
TargetResult = (catch couch_replicator_api_wrap:ensure_full_commit(Target)),
- SourceResult = receive
- {SrcCommitPid, Result} ->
- unlink(SrcCommitPid),
- receive {'EXIT', SrcCommitPid, _} -> ok after 0 -> ok end,
- Result;
- {'EXIT', SrcCommitPid, Reason} ->
- {error, Reason}
- end,
+ SourceResult =
+ receive
+ {SrcCommitPid, Result} ->
+ unlink(SrcCommitPid),
+ receive
+ {'EXIT', SrcCommitPid, _} -> ok
+ after 0 -> ok
+ end,
+ Result;
+ {'EXIT', SrcCommitPid, Reason} ->
+ {error, Reason}
+ end,
case TargetResult of
- {ok, TargetStartTime} ->
- case SourceResult of
- {ok, SourceStartTime} ->
- {SourceStartTime, TargetStartTime};
- SourceError ->
- {source_error, SourceError}
- end;
- TargetError ->
- {target_error, TargetError}
+ {ok, TargetStartTime} ->
+ case SourceResult of
+ {ok, SourceStartTime} ->
+ {SourceStartTime, TargetStartTime};
+ SourceError ->
+ {source_error, SourceError}
+ end;
+ TargetError ->
+ {target_error, TargetError}
end.
-
compare_replication_logs(SrcDoc, TgtDoc) ->
- #doc{body={RepRecProps}} = SrcDoc,
- #doc{body={RepRecPropsTgt}} = TgtDoc,
- case get_value(<<"session_id">>, RepRecProps) ==
- get_value(<<"session_id">>, RepRecPropsTgt) of
- true ->
- % if the records have the same session id,
- % then we have a valid replication history
- OldSeqNum = get_value(<<"source_last_seq">>, RepRecProps, ?LOWEST_SEQ),
- OldHistory = get_value(<<"history">>, RepRecProps, []),
- {OldSeqNum, OldHistory};
- false ->
- SourceHistory = get_value(<<"history">>, RepRecProps, []),
- TargetHistory = get_value(<<"history">>, RepRecPropsTgt, []),
- couch_log:notice("Replication records differ. "
- "Scanning histories to find a common ancestor.", []),
- couch_log:debug("Record on source:~p~nRecord on target:~p~n",
- [RepRecProps, RepRecPropsTgt]),
- compare_rep_history(SourceHistory, TargetHistory)
+ #doc{body = {RepRecProps}} = SrcDoc,
+ #doc{body = {RepRecPropsTgt}} = TgtDoc,
+ case
+ get_value(<<"session_id">>, RepRecProps) ==
+ get_value(<<"session_id">>, RepRecPropsTgt)
+ of
+ true ->
+ % if the records have the same session id,
+ % then we have a valid replication history
+ OldSeqNum = get_value(<<"source_last_seq">>, RepRecProps, ?LOWEST_SEQ),
+ OldHistory = get_value(<<"history">>, RepRecProps, []),
+ {OldSeqNum, OldHistory};
+ false ->
+ SourceHistory = get_value(<<"history">>, RepRecProps, []),
+ TargetHistory = get_value(<<"history">>, RepRecPropsTgt, []),
+ couch_log:notice(
+ "Replication records differ. "
+ "Scanning histories to find a common ancestor.",
+ []
+ ),
+ couch_log:debug(
+ "Record on source:~p~nRecord on target:~p~n",
+ [RepRecProps, RepRecPropsTgt]
+ ),
+ compare_rep_history(SourceHistory, TargetHistory)
end.
-
compare_rep_history(S, T) when S =:= [] orelse T =:= [] ->
couch_log:notice("no common ancestry -- performing full replication", []),
{?LOWEST_SEQ, []};
compare_rep_history([{S} | SourceRest], [{T} | TargetRest] = Target) ->
SourceId = get_value(<<"session_id">>, S),
case has_session_id(SourceId, Target) of
- true ->
- RecordSeqNum = get_value(<<"recorded_seq">>, S, ?LOWEST_SEQ),
- couch_log:notice("found a common replication record with source_seq ~p",
- [RecordSeqNum]),
- {RecordSeqNum, SourceRest};
- false ->
- TargetId = get_value(<<"session_id">>, T),
- case has_session_id(TargetId, SourceRest) of
true ->
- RecordSeqNum = get_value(<<"recorded_seq">>, T, ?LOWEST_SEQ),
- couch_log:notice("found a common replication record with source_seq ~p",
- [RecordSeqNum]),
- {RecordSeqNum, TargetRest};
+ RecordSeqNum = get_value(<<"recorded_seq">>, S, ?LOWEST_SEQ),
+ couch_log:notice(
+ "found a common replication record with source_seq ~p",
+ [RecordSeqNum]
+ ),
+ {RecordSeqNum, SourceRest};
false ->
- compare_rep_history(SourceRest, TargetRest)
- end
+ TargetId = get_value(<<"session_id">>, T),
+ case has_session_id(TargetId, SourceRest) of
+ true ->
+ RecordSeqNum = get_value(<<"recorded_seq">>, T, ?LOWEST_SEQ),
+ couch_log:notice(
+ "found a common replication record with source_seq ~p",
+ [RecordSeqNum]
+ ),
+ {RecordSeqNum, TargetRest};
+ false ->
+ compare_rep_history(SourceRest, TargetRest)
+ end
end.
-
has_session_id(_SessionId, []) ->
false;
has_session_id(SessionId, [{Props} | Rest]) ->
case get_value(<<"session_id">>, Props, nil) of
- SessionId ->
- true;
- _Else ->
- has_session_id(SessionId, Rest)
+ SessionId ->
+ true;
+ _Else ->
+ has_session_id(SessionId, Rest)
end.
-
get_pending_count(St) ->
Rep = St#rep_state.rep_details,
Timeout = get_value(connection_timeout, Rep#rep.options),
@@ -941,36 +1021,35 @@ get_pending_count(St) ->
NewPendingCount
end.
-
-get_pending_count_int(#rep_state{source = #httpdb{} = Db0}=St) ->
+get_pending_count_int(#rep_state{source = #httpdb{} = Db0} = St) ->
{_, Seq} = St#rep_state.highest_seq_done,
Db = Db0#httpdb{retries = 3},
case (catch couch_replicator_api_wrap:get_pending_count(Db, Seq)) of
- {ok, Pending} ->
- Pending;
- _ ->
- null
+ {ok, Pending} ->
+ Pending;
+ _ ->
+ null
end;
-get_pending_count_int(#rep_state{source = Db}=St) ->
+get_pending_count_int(#rep_state{source = Db} = St) ->
{_, Seq} = St#rep_state.highest_seq_done,
{ok, Pending} = couch_replicator_api_wrap:get_pending_count(Db, Seq),
Pending.
-
update_task(State) ->
#rep_state{
rep_details = #rep{id = JobId},
current_through_seq = {_, ThroughSeq},
highest_seq_done = {_, HighestSeq}
} = State,
- Status = rep_stats(State) ++ [
- {source_seq, HighestSeq},
- {through_seq, ThroughSeq}
- ],
+ Status =
+ rep_stats(State) ++
+ [
+ {source_seq, HighestSeq},
+ {through_seq, ThroughSeq}
+ ],
couch_replicator_scheduler:update_job_stats(JobId, Status),
couch_task_status:update(Status).
-
rep_stats(State) ->
#rep_state{
committed_seq = {_, CommittedSeq},
@@ -986,68 +1065,80 @@ rep_stats(State) ->
{checkpointed_source_seq, CommittedSeq}
].
-
replication_start_error({unauthorized, DbUri}) ->
{unauthorized, <<"unauthorized to access or create database ", DbUri/binary>>};
replication_start_error({db_not_found, DbUri}) ->
{db_not_found, <<"could not open ", DbUri/binary>>};
-replication_start_error({http_request_failed, _Method, Url0,
- {error, {error, {conn_failed, {error, nxdomain}}}}}) ->
+replication_start_error(
+ {http_request_failed, _Method, Url0, {error, {error, {conn_failed, {error, nxdomain}}}}}
+) ->
Url = ?l2b(couch_util:url_strip_password(Url0)),
{nxdomain, <<"could not resolve ", Url/binary>>};
-replication_start_error({http_request_failed, Method0, Url0,
- {error, {code, Code}}}) when is_integer(Code) ->
+replication_start_error({http_request_failed, Method0, Url0, {error, {code, Code}}}) when
+ is_integer(Code)
+->
Url = ?l2b(couch_util:url_strip_password(Url0)),
Method = ?l2b(Method0),
{http_error_code, Code, <<Method/binary, " ", Url/binary>>};
replication_start_error(Error) ->
Error.
-
log_replication_start(#rep_state{rep_details = Rep} = RepState) ->
#rep{
- id = {BaseId, Ext},
- doc_id = DocId,
- db_name = DbName,
- options = Options
+ id = {BaseId, Ext},
+ doc_id = DocId,
+ db_name = DbName,
+ options = Options
} = Rep,
Id = BaseId ++ Ext,
Workers = get_value(worker_processes, Options),
BatchSize = get_value(worker_batch_size, Options),
#rep_state{
- source_name = Source, % credentials already stripped
- target_name = Target, % credentials already stripped
- session_id = Sid
+ % credentials already stripped
+ source_name = Source,
+ % credentials already stripped
+ target_name = Target,
+ session_id = Sid
} = RepState,
- From = case DbName of
- ShardName when is_binary(ShardName) ->
- io_lib:format("from doc ~s:~s", [mem3:dbname(ShardName), DocId]);
- _ ->
- "from _replicate endpoint"
- end,
- Msg = "Starting replication ~s (~s -> ~s) ~s worker_procesess:~p"
+ From =
+ case DbName of
+ ShardName when is_binary(ShardName) ->
+ io_lib:format("from doc ~s:~s", [mem3:dbname(ShardName), DocId]);
+ _ ->
+ "from _replicate endpoint"
+ end,
+ Msg =
+ "Starting replication ~s (~s -> ~s) ~s worker_procesess:~p"
" worker_batch_size:~p session_id:~s",
couch_log:notice(Msg, [Id, Source, Target, From, Workers, BatchSize, Sid]).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
replication_start_error_test() ->
- ?assertEqual({unauthorized, <<"unauthorized to access or create database"
- " http://x/y">>}, replication_start_error({unauthorized,
- <<"http://x/y">>})),
- ?assertEqual({db_not_found, <<"could not open http://x/y">>},
- replication_start_error({db_not_found, <<"http://x/y">>})),
- ?assertEqual({nxdomain,<<"could not resolve http://x/y">>},
- replication_start_error({http_request_failed, "GET", "http://x/y",
- {error, {error, {conn_failed, {error, nxdomain}}}}})),
- ?assertEqual({http_error_code,503,<<"GET http://x/y">>},
- replication_start_error({http_request_failed, "GET", "http://x/y",
- {error, {code, 503}}})).
-
+ ?assertEqual(
+ {unauthorized, <<
+ "unauthorized to access or create database"
+ " http://x/y"
+ >>},
+ replication_start_error({unauthorized, <<"http://x/y">>})
+ ),
+ ?assertEqual(
+ {db_not_found, <<"could not open http://x/y">>},
+ replication_start_error({db_not_found, <<"http://x/y">>})
+ ),
+ ?assertEqual(
+ {nxdomain, <<"could not resolve http://x/y">>},
+ replication_start_error(
+ {http_request_failed, "GET", "http://x/y",
+ {error, {error, {conn_failed, {error, nxdomain}}}}}
+ )
+ ),
+ ?assertEqual(
+ {http_error_code, 503, <<"GET http://x/y">>},
+ replication_start_error({http_request_failed, "GET", "http://x/y", {error, {code, 503}}})
+ ).
scheduler_job_format_status_test() ->
Source = <<"http://u:p@h1/d1">>,
@@ -1085,5 +1176,4 @@ scheduler_job_format_status_test() ->
?assertEqual(<<"4">>, proplists:get_value(current_through_seq, Format)),
?assertEqual(<<"5">>, proplists:get_value(highest_seq_done, Format)).
-
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_scheduler_sup.erl b/src/couch_replicator/src/couch_replicator_scheduler_sup.erl
index 8ab55f838..1d5104312 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler_sup.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler_sup.erl
@@ -23,24 +23,20 @@
%% supervisor api
-export([
- init/1
+ init/1
]).
-
%% includes
-include("couch_replicator.hrl").
-
%% public functions
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
start_child(#rep{} = Rep) ->
supervisor:start_child(?MODULE, [Rep]).
-
terminate_child(Pid) ->
supervisor:terminate_child(?MODULE, Pid).
@@ -48,7 +44,8 @@ terminate_child(Pid) ->
init(_Args) ->
Start = {couch_replicator_scheduler_job, start_link, []},
- Restart = temporary, % A crashed job is not entitled to immediate restart.
+ % A crashed job is not entitled to immediate restart.
+ Restart = temporary,
Shutdown = 5000,
Type = worker,
Modules = [couch_replicator_scheduler_job],
diff --git a/src/couch_replicator/src/couch_replicator_share.erl b/src/couch_replicator/src/couch_replicator_share.erl
index c1e52b2b8..8c9fa029a 100644
--- a/src/couch_replicator/src/couch_replicator_share.erl
+++ b/src/couch_replicator/src/couch_replicator_share.erl
@@ -53,7 +53,6 @@
%
% [1] : https://proteusmaster.urcf.drexel.edu/urcfwiki/images/KayLauderFairShare.pdf
-
-module(couch_replicator_share).
-export([
@@ -68,11 +67,9 @@
charge/3
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("couch_replicator.hrl").
-
% Usage coefficient decays historic usage every scheduling cycle. For example,
% the usage value for a job running 1 minute is 60000000 (i.e microseconds /
% minute), then if the job stops running it will take about 26 cycles (minutes)
@@ -101,7 +98,6 @@
%
-define(DEFAULT_PRIORITY_COEFF, 0.98).
-
-define(MIN_SHARES, 1).
-define(MAX_SHARES, 1000).
-define(DEFAULT_SHARES, 100).
@@ -112,34 +108,34 @@
-define(CHARGES, couch_replicator_stopped_usage).
-define(NUM_JOBS, couch_replicator_num_jobs).
-
init() ->
EtsOpts = [named_table, public],
- ?SHARES = ets:new(?SHARES, EtsOpts), % {Key, Shares}
- ?PRIORITIES = ets:new(?PRIORITIES, EtsOpts), % {JobId, Priority}
- ?USAGE = ets:new(?USAGE, EtsOpts), % {Key, Usage}
- ?CHARGES = ets:new(?CHARGES, EtsOpts), % {Key, Charges}
- ?NUM_JOBS = ets:new(?NUM_JOBS, EtsOpts), % {Key, NumJobs}
+ % {Key, Shares}
+ ?SHARES = ets:new(?SHARES, EtsOpts),
+ % {JobId, Priority}
+ ?PRIORITIES = ets:new(?PRIORITIES, EtsOpts),
+ % {Key, Usage}
+ ?USAGE = ets:new(?USAGE, EtsOpts),
+ % {Key, Charges}
+ ?CHARGES = ets:new(?CHARGES, EtsOpts),
+ % {Key, NumJobs}
+ ?NUM_JOBS = ets:new(?NUM_JOBS, EtsOpts),
lists:foreach(fun({K, V}) -> update_shares(K, V) end, get_config_shares()).
-
clear() ->
Tables = [?SHARES, ?PRIORITIES, ?USAGE, ?CHARGES, ?NUM_JOBS],
lists:foreach(fun(T) -> catch ets:delete(T) end, Tables).
-
% This should be called when user updates the replicator.shares config section
%
update_shares(Key, Shares) when is_integer(Shares) ->
ets:insert(?SHARES, {Key, bounded(Shares, ?MIN_SHARES, ?MAX_SHARES)}).
-
% Called when the config value is deleted and shares are reset to the default
% value.
reset_shares(Key) ->
ets:delete(?SHARES, Key).
-
job_added(#job{} = Job) ->
Key = key(Job),
% If the entry is not present {Key, 0} is used as the default
@@ -148,7 +144,6 @@ job_added(#job{} = Job) ->
% new jobs don't get to be at priority 0 (highest).
update_priority(Job).
-
job_removed(#job{} = Job) ->
Key = key(Job),
ets:delete(?PRIORITIES, Job#job.id),
@@ -160,7 +155,6 @@ job_removed(#job{} = Job) ->
end,
ok.
-
% This is the main algorithm update function. It should be called during each
% rescheduling cycle with a list of running jobs, the interval from the
% scheduler (in milliseconds), and the current timestamp.
@@ -180,7 +174,6 @@ update(RunningJobs, Interval, {_, _, _} = Now) ->
decay_priorities(),
lists:foreach(fun(Job) -> update_priority(Job) end, RunningJobs).
-
priority(JobId) ->
% Not found means it was removed because it's value was 0
case ets:lookup(?PRIORITIES, JobId) of
@@ -188,38 +181,32 @@ priority(JobId) ->
[] -> 0
end.
-
charge(#job{pid = undefined}, _, _) ->
0;
-
charge(#job{} = Job, Interval, {_, _, _} = Now) when is_integer(Interval) ->
Key = key(Job),
Charges = job_charges(Job, Interval, Now),
% If the entry is not present {Key, 0} is used as the default
ets:update_counter(?CHARGES, Key, Charges, {Key, 0}).
-
usage(Key) ->
case ets:lookup(?USAGE, Key) of
[{_, Usage}] -> Usage;
[] -> 0
end.
-
num_jobs(Key) ->
case ets:lookup(?NUM_JOBS, Key) of
[{_, NumJobs}] -> NumJobs;
[] -> 0
end.
-
shares(Key) ->
case ets:lookup(?SHARES, Key) of
[{_, Shares}] -> Shares;
[] -> ?DEFAULT_SHARES
end.
-
% In [1] this described in the "Decay of Process Priorities" section
%
decay_priorities() ->
@@ -228,7 +215,6 @@ decay_priorities() ->
% is missing we assume it is 0
clear_zero(?PRIORITIES).
-
% This is the main part of the alrgorithm. In [1] it is described in the
% "Priority Adjustment" section.
%
@@ -240,20 +226,22 @@ update_priority(#job{} = Job) ->
% If the entry is not present {Id, 0} is used as the default
ets:update_counter(?PRIORITIES, Id, trunc(Priority), {Id, 0}).
-
% This is the "User-Level Scheduling" part from [1]
%
update_usage() ->
decay(?USAGE, usage_coeff()),
clear_zero(?USAGE),
- ets:foldl(fun({Key, Charges}, _) ->
- % If the entry is not present {Key, 0} is used as the default
- ets:update_counter(?USAGE, Key, Charges, {Key, 0})
- end, 0, ?CHARGES),
+ ets:foldl(
+ fun({Key, Charges}, _) ->
+ % If the entry is not present {Key, 0} is used as the default
+ ets:update_counter(?USAGE, Key, Charges, {Key, 0})
+ end,
+ 0,
+ ?CHARGES
+ ),
% Start each interval with a fresh charges table
ets:delete_all_objects(?CHARGES).
-
% Private helper functions
decay(Ets, Coeff) when is_atom(Ets) ->
@@ -263,11 +251,9 @@ decay(Ets, Coeff) when is_atom(Ets) ->
Result = {{'$1', {trunc, {'*', '$2', {const, Coeff}}}}},
ets:select_replace(Ets, [{Head, [], [Result]}]).
-
clear_zero(Ets) when is_atom(Ets) ->
ets:select_delete(Ets, [{{'_', '$1'}, [{'=<', '$1', 0}], [true]}]).
-
key(#job{} = Job) ->
Rep = Job#job.rep,
case is_binary(Rep#rep.db_name) of
@@ -275,7 +261,6 @@ key(#job{} = Job) ->
false -> (Rep#rep.user_ctx)#user_ctx.name
end.
-
% Jobs are charged based on the amount of time the job was running during the
% last scheduling interval. The time units used are microseconds in order to
% have a large enough usage values so that when priority is calculated the
@@ -294,25 +279,25 @@ job_charges(#job{} = Job, IntervalMSec, {_, _, _} = Now) ->
IntervalUSec = IntervalMSec * 1000,
bounded(TimeRunning, 0, IntervalUSec).
-
last_started(#job{} = Job) ->
case lists:keyfind(started, 1, Job#job.history) of
- false -> {0, 0, 0}; % In case user set too low of a max history
+ % In case user set too low of a max history
+ false -> {0, 0, 0};
{started, When} -> When
end.
-
bounded(Val, Min, Max) ->
max(Min, min(Max, Val)).
-
% Config helper functions
get_config_shares() ->
- lists:map(fun({K, V}) ->
- {list_to_binary(K), int_val(V, ?DEFAULT_SHARES)}
- end, config:get("replicator.shares")).
-
+ lists:map(
+ fun({K, V}) ->
+ {list_to_binary(K), int_val(V, ?DEFAULT_SHARES)}
+ end,
+ config:get("replicator.shares")
+ ).
priority_coeff() ->
% This is the K2 coefficient from [1]
@@ -320,14 +305,12 @@ priority_coeff() ->
Val = float_val(config:get("replicator", "priority_coeff"), Default),
bounded(Val, 0.0, 1.0).
-
usage_coeff() ->
% This is the K1 coefficient from [1]
Default = ?DEFAULT_USAGE_COEFF,
Val = float_val(config:get("replicator", "usage_coeff"), Default),
bounded(Val, 0.0, 1.0).
-
int_val(Str, Default) when is_list(Str) ->
try list_to_integer(Str) of
Val -> Val
@@ -336,10 +319,8 @@ int_val(Str, Default) when is_list(Str) ->
Default
end.
-
float_val(undefined, Default) ->
Default;
-
float_val(Str, Default) when is_list(Str) ->
try list_to_float(Str) of
Val -> Val
@@ -348,14 +329,12 @@ float_val(Str, Default) when is_list(Str) ->
Default
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch_replicator/test/eunit/couch_replicator_test.hrl").
-
-define(DB1, <<"db1">>).
-define(DB2, <<"db2">>).
-define(DB3, <<"db3">>).
@@ -363,7 +342,6 @@ float_val(Str, Default) when is_list(Str) ->
-define(J2, <<"j2">>).
-define(J3, <<"j3">>).
-
fair_share_test_() ->
{
setup,
@@ -395,31 +373,26 @@ fair_share_test_() ->
}
}.
-
setup_all() ->
test_util:start_couch().
-
teardown_all(Ctx) ->
config_delete("priority_coeff"),
config_delete("usage_coeff"),
config_shares_delete(),
test_util:stop_couch(Ctx).
-
setup() ->
init(),
ok.
-
teardown(_) ->
clear(),
config_delete("priority_coeff"),
config_delete("usage_coeff"),
config_shares_delete().
-
-init_works(_)->
+init_works(_) ->
Tables = [?SHARES, ?PRIORITIES, ?USAGE, ?CHARGES, ?NUM_JOBS],
[?assert(is_list(ets:info(T))) || T <- Tables],
?assertEqual(#{}, tab2map(?SHARES)),
@@ -432,7 +405,6 @@ init_works(_)->
?assertEqual(200, shares(?DB1)),
?assertEqual(#{?DB1 => 200}, tab2map(?SHARES)).
-
shares_are_updated_and_reset(_) ->
?assertEqual(#{}, tab2map(?SHARES)),
@@ -451,7 +423,6 @@ shares_are_updated_and_reset(_) ->
update_shares(?DB1, 1001),
?assertEqual(1000, shares(?DB1)).
-
jobs_are_added_and_removed(_) ->
job_added(job(?J1, ?DB1)),
?assertEqual(1, num_jobs(?DB1)),
@@ -479,7 +450,6 @@ jobs_are_added_and_removed(_) ->
?assertEqual(0, priority(?J2)),
?assertEqual(#{}, tab2map(?PRIORITIES)).
-
can_fetch_job_priority(_) ->
job_added(job(?J1, ?DB1)),
?assertEqual(0, priority(?J1)),
@@ -490,7 +460,6 @@ can_fetch_job_priority(_) ->
ets:delete(?PRIORITIES, ?J1),
?assertEqual(0, priority(?J1)).
-
jobs_are_charged(_) ->
Job1 = running_job(?J1, ?DB1),
job_added(Job1),
@@ -518,7 +487,6 @@ jobs_are_charged(_) ->
job_removed(Job2),
?assertEqual(#{?DB1 => 2000001}, tab2map(?CHARGES)).
-
usage_is_updated(_) ->
Job = running_job(?J1, ?DB1),
job_added(Job),
@@ -549,7 +517,6 @@ usage_is_updated(_) ->
?assertEqual(0, usage(?DB1)),
?assertEqual(#{}, tab2map(?USAGE)).
-
priority_coefficient_works(_) ->
job_added(job(?J1, ?DB1)),
ets:insert(?PRIORITIES, {?J1, 1000}),
@@ -578,7 +545,6 @@ priority_coefficient_works(_) ->
?assertEqual(0, priority(?J1)),
?assertEqual(#{}, tab2map(?PRIORITIES)).
-
priority_decays_when_jobs_stop_running(_) ->
Job = running_job(?J1, ?DB1),
job_added(Job),
@@ -593,7 +559,6 @@ priority_decays_when_jobs_stop_running(_) ->
[reschedule(0, {[], Pending}) || _ <- lists:seq(1, 500)],
?assertEqual(0, priority(?J1)).
-
priority_increases_when_jobs_run(_) ->
Job = running_job(?J1, ?DB1),
job_added(Job),
@@ -617,7 +582,6 @@ priority_increases_when_jobs_run(_) ->
Pm = priority(?J1),
?assertEqual(Pn, Pm).
-
two_dbs_equal_shares_equal_number_of_jobs(_) ->
update_shares(?DB1, 100),
update_shares(?DB2, 100),
@@ -626,7 +590,6 @@ two_dbs_equal_shares_equal_number_of_jobs(_) ->
?assert(49 =< Db1 andalso Db1 =< 51),
?assert(49 =< Db2 andalso Db2 =< 51).
-
two_dbs_unequal_shares_equal_number_of_jobs(_) ->
update_shares(?DB1, 100),
update_shares(?DB1, 900),
@@ -635,7 +598,6 @@ two_dbs_unequal_shares_equal_number_of_jobs(_) ->
?assert(89 =< Db1 andalso Db1 =< 91),
?assert(9 =< Db2 andalso Db2 =< 11).
-
two_dbs_equal_shares_unequal_number_of_jobs(_) ->
update_shares(?DB1, 100),
update_shares(?DB2, 100),
@@ -644,7 +606,6 @@ two_dbs_equal_shares_unequal_number_of_jobs(_) ->
?assert(49 =< Db1 andalso Db1 =< 51),
?assert(49 =< Db2 andalso Db2 =< 51).
-
two_dbs_unequal_shares_unequal_number_of_jobs(_) ->
update_shares(?DB1, 1),
update_shares(?DB2, 100),
@@ -653,7 +614,6 @@ two_dbs_unequal_shares_unequal_number_of_jobs(_) ->
?assert(0 =< Db1 andalso Db1 =< 2),
?assert(98 =< Db2 andalso Db2 =< 100).
-
three_dbs_equal_shares_equal_number_of_jobs(_) ->
update_shares(?DB1, 100),
update_shares(?DB2, 100),
@@ -664,18 +624,16 @@ three_dbs_equal_shares_equal_number_of_jobs(_) ->
?assert(32 =< Db2 andalso Db2 =< 34),
?assert(32 =< Db3 andalso Db3 =< 34).
-
three_dbs_unequal_shares_equal_number_of_jobs(_) ->
update_shares(?DB1, 100),
update_shares(?DB2, 700),
update_shares(?DB3, 200),
Jobs = jobs(#{?DB1 => {25, 75}, ?DB2 => {25, 75}, ?DB3 => {25, 75}}),
#{?DB1 := Db1, ?DB2 := Db2, ?DB3 := Db3} = run_scheduler(1000, 10, Jobs),
- ?assert(9 =< Db1 andalso Db1 =< 11),
+ ?assert(9 =< Db1 andalso Db1 =< 11),
?assert(69 =< Db2 andalso Db2 =< 71),
?assert(19 =< Db3 andalso Db3 =< 21).
-
three_dbs_equal_shares_unequal_number_of_jobs(_) ->
update_shares(?DB1, 100),
update_shares(?DB2, 100),
@@ -686,7 +644,6 @@ three_dbs_equal_shares_unequal_number_of_jobs(_) ->
?assert(32 =< Db2 andalso Db2 =< 34),
?assert(32 =< Db3 andalso Db3 =< 34).
-
three_dbs_unequal_shares_unequal_number_of_jobs(_) ->
update_shares(?DB1, 1000),
update_shares(?DB2, 100),
@@ -697,31 +654,26 @@ three_dbs_unequal_shares_unequal_number_of_jobs(_) ->
?assert(9 =< Db2 andalso Db2 =< 11),
?assert(2 =< Db3 andalso Db3 =< 4).
-
config_set(K, V) ->
config:set("replicator", K, V, _Persist = false).
-
config_delete(K) ->
config:delete("replicator", K, _Persist = false).
-
config_share_set(K, V) ->
config:set("replicator.shares", K, V, _Persist = false).
-
config_shares_delete() ->
- [config:delete("replicator.shares", K, _Persist = false) ||
- {K, _} <- config:get("replicator.shares")].
-
+ [
+ config:delete("replicator.shares", K, _Persist = false)
+ || {K, _} <- config:get("replicator.shares")
+ ].
tab2map(T) when is_atom(T) ->
maps:from_list(ets:tab2list(T)).
-
job(rand, Db) ->
job(rand:uniform(1 bsl 59), Db);
-
job(Id, Db) ->
Job = #job{
id = Id,
@@ -732,25 +684,21 @@ job(Id, Db) ->
},
stop(Job).
-
running_job(Id, Db) ->
run(job(Id, Db)).
-
run(#job{} = Job) ->
Job#job{
pid = list_to_pid("<0.9999.999>"),
history = [{started, {0, 0, 0}}, {added, {0, 0, 0}}]
}.
-
stop(#job{} = Job) ->
Job#job{
pid = undefined,
history = [{added, {0, 0, 0}}]
}.
-
% Simple scheduler simulator. Start and stop N jobs and do the
% accounting steps. Return a new list of running and pending jobs. If
% N is 0 then jobs which were running stay running and jobs were
@@ -773,33 +721,42 @@ reschedule(N, {Running, Pending}) ->
{Running2, Pending2}.
-
% Run a few scheduling cycles and calculate usage percentage for each db
%
run_scheduler(Cycles, Churn, Jobs0) ->
Acc0 = {#{}, Jobs0},
- {Sum, _} = lists:foldl(fun(_CycleCnt, {UsageAcc, {Running, _} = Jobs}) ->
- UsageAcc1 = lists:foldl(fun(#job{} = Job, Acc) ->
- Db = Job#job.rep#rep.db_name,
- maps:update_with(Db, fun(V) -> V + 1 end, 0, Acc)
- end, UsageAcc, Running),
- {UsageAcc1, reschedule(Churn, Jobs)}
- end, Acc0, lists:seq(1, Cycles)),
+ {Sum, _} = lists:foldl(
+ fun(_CycleCnt, {UsageAcc, {Running, _} = Jobs}) ->
+ UsageAcc1 = lists:foldl(
+ fun(#job{} = Job, Acc) ->
+ Db = Job#job.rep#rep.db_name,
+ maps:update_with(Db, fun(V) -> V + 1 end, 0, Acc)
+ end,
+ UsageAcc,
+ Running
+ ),
+ {UsageAcc1, reschedule(Churn, Jobs)}
+ end,
+ Acc0,
+ lists:seq(1, Cycles)
+ ),
Total = maps:fold(fun(_, V, Acc) -> Acc + V end, 0, Sum),
maps:map(fun(_Db, V) -> round(V / Total * 100) end, Sum).
-
% Dbs = #{Db => {RunningCount, PendingCount}
%
jobs(#{} = Dbs) ->
- maps:fold(fun(Db, {RCnt, PCnt}, {Running, Pending}) ->
- RJobs = [running_job(rand, Db) || _ <- lists:seq(1, RCnt)],
- PJobs = [job(rand, Db) || _ <- lists:seq(1, PCnt)],
- [job_added(Job) || Job <- RJobs ++ PJobs],
- {Running ++ RJobs, Pending ++ PJobs}
- end, {[], []}, Dbs).
-
+ maps:fold(
+ fun(Db, {RCnt, PCnt}, {Running, Pending}) ->
+ RJobs = [running_job(rand, Db) || _ <- lists:seq(1, RCnt)],
+ PJobs = [job(rand, Db) || _ <- lists:seq(1, PCnt)],
+ [job_added(Job) || Job <- RJobs ++ PJobs],
+ {Running ++ RJobs, Pending ++ PJobs}
+ end,
+ {[], []},
+ Dbs
+ ).
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_stats.erl b/src/couch_replicator/src/couch_replicator_stats.erl
index 37848b3ee..e1f23a1bc 100644
--- a/src/couch_replicator/src/couch_replicator_stats.erl
+++ b/src/couch_replicator/src/couch_replicator_stats.erl
@@ -63,31 +63,25 @@ increment(Field, Stats) ->
orddict:update_counter(Field, 1, Stats).
sum_stats(S1, S2) ->
- orddict:merge(fun(_, V1, V2) -> V1+V2 end, S1, S2).
+ orddict:merge(fun(_, V1, V2) -> V1 + V2 end, S1, S2).
max_stats(S1, S2) ->
orddict:merge(fun(_, V1, V2) -> max(V1, V2) end, S1, S2).
-
% Handle initializing from a status object, which uses same values but
% different field names, as well as from ejson props from the checkpoint
% history
%
-fmap({missing_found, _}) -> true;
-fmap({missing_revisions_found, V}) -> {true, {missing_found, V}};
-fmap({<<"missing_found">>, V}) -> {true, {missing_found, V}};
-
-fmap({missing_checked, _}) -> true;
-fmap({revisions_checked, V}) -> {true, {missing_checked, V}};
-fmap({<<"missing_checked">>, V}) -> {true, {missing_checked, V}};
-
-fmap({docs_read, _}) -> true;
-fmap({<<"docs_read">>, V}) -> {true, {docs_read, V}};
-
-fmap({docs_written, _}) -> true;
-fmap({<<"docs_written">>, V}) -> {true, {docs_written, V}};
-
-fmap({doc_write_failures, _}) -> true;
-fmap({<<"doc_write_failures">>, V}) -> {true, {doc_write_failures, V}};
-
-fmap({_, _}) -> false.
+fmap({missing_found, _}) -> true;
+fmap({missing_revisions_found, V}) -> {true, {missing_found, V}};
+fmap({<<"missing_found">>, V}) -> {true, {missing_found, V}};
+fmap({missing_checked, _}) -> true;
+fmap({revisions_checked, V}) -> {true, {missing_checked, V}};
+fmap({<<"missing_checked">>, V}) -> {true, {missing_checked, V}};
+fmap({docs_read, _}) -> true;
+fmap({<<"docs_read">>, V}) -> {true, {docs_read, V}};
+fmap({docs_written, _}) -> true;
+fmap({<<"docs_written">>, V}) -> {true, {docs_written, V}};
+fmap({doc_write_failures, _}) -> true;
+fmap({<<"doc_write_failures">>, V}) -> {true, {doc_write_failures, V}};
+fmap({_, _}) -> false.
diff --git a/src/couch_replicator/src/couch_replicator_sup.erl b/src/couch_replicator/src/couch_replicator_sup.erl
index cd4512c54..33eee8659 100644
--- a/src/couch_replicator/src/couch_replicator_sup.erl
+++ b/src/couch_replicator/src/couch_replicator_sup.erl
@@ -20,53 +20,21 @@ start_link() ->
init(_Args) ->
Children = [
- {couch_replication_event,
- {gen_event, start_link, [{local, couch_replication}]},
- permanent,
- brutal_kill,
- worker,
- dynamic},
- {couch_replicator_clustering,
- {couch_replicator_clustering, start_link, []},
- permanent,
- brutal_kill,
- worker,
- [couch_replicator_clustering]},
- {couch_replicator_connection,
- {couch_replicator_connection, start_link, []},
- permanent,
- brutal_kill,
- worker,
- [couch_replicator_connection]},
- {couch_replicator_rate_limiter,
- {couch_replicator_rate_limiter, start_link, []},
- permanent,
- brutal_kill,
- worker,
- [couch_replicator_rate_limiter]},
- {couch_replicator_scheduler_sup,
- {couch_replicator_scheduler_sup, start_link, []},
- permanent,
- infinity,
- supervisor,
- [couch_replicator_scheduler_sup]},
- {couch_replicator_scheduler,
- {couch_replicator_scheduler, start_link, []},
- permanent,
- brutal_kill,
- worker,
- [couch_replicator_scheduler]},
- {couch_replicator_doc_processor,
- {couch_replicator_doc_processor, start_link, []},
- permanent,
- brutal_kill,
- worker,
- [couch_replicator_doc_processor]},
- {couch_replicator_db_changes,
- {couch_replicator_db_changes, start_link, []},
- permanent,
- brutal_kill,
- worker,
- [couch_multidb_changes]}
+ {couch_replication_event, {gen_event, start_link, [{local, couch_replication}]}, permanent,
+ brutal_kill, worker, dynamic},
+ {couch_replicator_clustering, {couch_replicator_clustering, start_link, []}, permanent,
+ brutal_kill, worker, [couch_replicator_clustering]},
+ {couch_replicator_connection, {couch_replicator_connection, start_link, []}, permanent,
+ brutal_kill, worker, [couch_replicator_connection]},
+ {couch_replicator_rate_limiter, {couch_replicator_rate_limiter, start_link, []}, permanent,
+ brutal_kill, worker, [couch_replicator_rate_limiter]},
+ {couch_replicator_scheduler_sup, {couch_replicator_scheduler_sup, start_link, []},
+ permanent, infinity, supervisor, [couch_replicator_scheduler_sup]},
+ {couch_replicator_scheduler, {couch_replicator_scheduler, start_link, []}, permanent,
+ brutal_kill, worker, [couch_replicator_scheduler]},
+ {couch_replicator_doc_processor, {couch_replicator_doc_processor, start_link, []},
+ permanent, brutal_kill, worker, [couch_replicator_doc_processor]},
+ {couch_replicator_db_changes, {couch_replicator_db_changes, start_link, []}, permanent,
+ brutal_kill, worker, [couch_multidb_changes]}
],
- {ok, {{rest_for_one,10,1}, Children}}.
+ {ok, {{rest_for_one, 10, 1}, Children}}.
diff --git a/src/couch_replicator/src/couch_replicator_utils.erl b/src/couch_replicator/src/couch_replicator_utils.erl
index dbadb3787..b2bc34078 100644
--- a/src/couch_replicator/src/couch_replicator_utils.erl
+++ b/src/couch_replicator/src/couch_replicator_utils.erl
@@ -13,24 +13,23 @@
-module(couch_replicator_utils).
-export([
- parse_rep_doc/2,
- replication_id/2,
- sum_stats/2,
- is_deleted/1,
- rep_error_to_binary/1,
- get_json_value/2,
- get_json_value/3,
- pp_rep_id/1,
- iso8601/1,
- filter_state/3,
- normalize_rep/1,
- ejson_state_info/1,
- get_basic_auth_creds/1,
- remove_basic_auth_creds/1,
- normalize_basic_auth/1
+ parse_rep_doc/2,
+ replication_id/2,
+ sum_stats/2,
+ is_deleted/1,
+ rep_error_to_binary/1,
+ get_json_value/2,
+ get_json_value/3,
+ pp_rep_id/1,
+ iso8601/1,
+ filter_state/3,
+ normalize_rep/1,
+ ejson_state_info/1,
+ get_basic_auth_creds/1,
+ remove_basic_auth_creds/1,
+ normalize_basic_auth/1
]).
-
-include_lib("ibrowse/include/ibrowse.hrl").
-include_lib("couch/include/couch_db.hrl").
-include("couch_replicator.hrl").
@@ -41,22 +40,20 @@
get_value/3
]).
-
rep_error_to_binary(Error) ->
couch_util:to_binary(error_reason(Error)).
-
error_reason({shutdown, Error}) ->
error_reason(Error);
-error_reason({error, {Error, Reason}})
- when is_atom(Error), is_binary(Reason) ->
+error_reason({error, {Error, Reason}}) when
+ is_atom(Error), is_binary(Reason)
+->
io_lib:format("~s: ~s", [Error, Reason]);
error_reason({error, Reason}) ->
Reason;
error_reason(Reason) ->
Reason.
-
get_json_value(Key, Props) ->
get_json_value(Key, Props, undefined).
@@ -77,7 +74,6 @@ get_json_value(Key, Props, Default) when is_binary(Key) ->
Else
end.
-
% pretty-print replication id
-spec pp_rep_id(#rep{} | rep_id()) -> string().
pp_rep_id(#rep{id = RepId}) ->
@@ -85,34 +81,28 @@ pp_rep_id(#rep{id = RepId}) ->
pp_rep_id({Base, Extension}) ->
Base ++ Extension.
-
% NV: TODO: this function is not used outside api wrap module
% consider moving it there during final cleanup
is_deleted(Change) ->
get_json_value(<<"deleted">>, Change, false).
-
% NV: TODO: proxy some functions which used to be here, later remove
% these and replace calls to their respective modules
replication_id(Rep, Version) ->
couch_replicator_ids:replication_id(Rep, Version).
-
sum_stats(S1, S2) ->
couch_replicator_stats:sum_stats(S1, S2).
-
parse_rep_doc(Props, UserCtx) ->
couch_replicator_docs:parse_rep_doc(Props, UserCtx).
-
-spec iso8601(erlang:timestamp()) -> binary().
iso8601({_Mega, _Sec, _Micro} = Timestamp) ->
{{Y, Mon, D}, {H, Min, S}} = calendar:now_to_universal_time(Timestamp),
Format = "~B-~2..0B-~2..0BT~2..0B:~2..0B:~2..0BZ",
iolist_to_binary(io_lib:format(Format, [Y, Mon, D, H, Min, S])).
-
%% Filter replication info ejson by state provided. If it matches return
%% the input value, if it doesn't return 'skip'. This is used from replicator
%% fabric coordinator and worker.
@@ -129,7 +119,6 @@ filter_state(State, States, Info) ->
skip
end.
-
remove_basic_auth_from_headers(Headers) ->
Headers1 = mochiweb_headers:make(Headers),
case mochiweb_headers:get_value("Authorization", Headers1) of
@@ -140,14 +129,12 @@ remove_basic_auth_from_headers(Headers) ->
maybe_remove_basic_auth(string:to_lower(Basic), Base64, Headers1)
end.
-
maybe_remove_basic_auth("basic", " " ++ Base64, Headers) ->
Headers1 = mochiweb_headers:delete_any("Authorization", Headers),
{decode_basic_creds(Base64), mochiweb_headers:to_list(Headers1)};
maybe_remove_basic_auth(_, _, Headers) ->
{{undefined, undefined}, mochiweb_headers:to_list(Headers)}.
-
decode_basic_creds(Base64) ->
try re:split(base64:decode(Base64), ":", [{return, list}, {parts, 2}]) of
[User, Pass] ->
@@ -160,47 +147,45 @@ decode_basic_creds(Base64) ->
{undefined, undefined}
end.
-
% Normalize a #rep{} record such that it doesn't contain time dependent fields
% pids (like httpc pools), and options / props are sorted. This function would
% used during comparisons.
-spec normalize_rep(#rep{} | nil) -> #rep{} | nil.
normalize_rep(nil) ->
nil;
-
-normalize_rep(#rep{} = Rep)->
+normalize_rep(#rep{} = Rep) ->
#rep{
source = couch_replicator_api_wrap:normalize_db(Rep#rep.source),
target = couch_replicator_api_wrap:normalize_db(Rep#rep.target),
- options = Rep#rep.options, % already sorted in make_options/1
+ % already sorted in make_options/1
+ options = Rep#rep.options,
type = Rep#rep.type,
view = Rep#rep.view,
doc_id = Rep#rep.doc_id,
db_name = Rep#rep.db_name
}.
-
-spec ejson_state_info(binary() | nil) -> binary() | null.
ejson_state_info(nil) ->
null;
ejson_state_info(Info) when is_binary(Info) ->
{[{<<"error">>, Info}]};
ejson_state_info([]) ->
- null; % Status not set yet => null for compatibility reasons
+ % Status not set yet => null for compatibility reasons
+ null;
ejson_state_info([{_, _} | _] = Info) ->
{Info};
ejson_state_info(Info) ->
ErrMsg = couch_replicator_utils:rep_error_to_binary(Info),
{[{<<"error">>, ErrMsg}]}.
-
-spec get_basic_auth_creds(#httpdb{}) ->
{string(), string()} | {undefined, undefined}.
get_basic_auth_creds(#httpdb{auth_props = AuthProps}) ->
case couch_util:get_value(<<"basic">>, AuthProps) of
undefined ->
{undefined, undefined};
- {UserPass} when is_list(UserPass) ->
+ {UserPass} when is_list(UserPass) ->
User = couch_util:get_value(<<"username">>, UserPass),
Pass = couch_util:get_value(<<"password">>, UserPass),
case {User, Pass} of
@@ -213,31 +198,30 @@ get_basic_auth_creds(#httpdb{auth_props = AuthProps}) ->
{undefined, undefined}
end.
-
-spec remove_basic_auth_creds(#httpd{}) -> #httpdb{}.
remove_basic_auth_creds(#httpdb{auth_props = Props} = HttpDb) ->
Props1 = lists:keydelete(<<"basic">>, 1, Props),
HttpDb#httpdb{auth_props = Props1}.
-
-spec set_basic_auth_creds(string(), string(), #httpd{}) -> #httpdb{}.
set_basic_auth_creds(undefined, undefined, #httpdb{} = HttpDb) ->
HttpDb;
-set_basic_auth_creds(User, Pass, #httpdb{} = HttpDb)
- when is_list(User), is_list(Pass) ->
+set_basic_auth_creds(User, Pass, #httpdb{} = HttpDb) when
+ is_list(User), is_list(Pass)
+->
HttpDb1 = remove_basic_auth_creds(HttpDb),
Props = HttpDb1#httpdb.auth_props,
- UserPass = {[
- {<<"username">>, list_to_binary(User)},
- {<<"password">>, list_to_binary(Pass)}
- ]},
+ UserPass =
+ {[
+ {<<"username">>, list_to_binary(User)},
+ {<<"password">>, list_to_binary(Pass)}
+ ]},
Props1 = lists:keystore(<<"basic">>, 1, Props, {<<"basic">>, UserPass}),
HttpDb1#httpdb{auth_props = Props1}.
-
-spec extract_creds_from_url(string()) ->
- {ok, {string() | undefined, string() | undefined}, string()} |
- {error, term()}.
+ {ok, {string() | undefined, string() | undefined}, string()}
+ | {error, term()}.
extract_creds_from_url(Url) ->
case ibrowse_lib:parse_url(Url) of
{error, Error} ->
@@ -253,7 +237,6 @@ extract_creds_from_url(Url) ->
{ok, {User, Pass}, NoCreds}
end.
-
% Normalize basic auth credentials so they are set only in the auth props
% object. If multiple basic auth credentials are provided, the resulting
% credentials are picked in the following order.
@@ -265,35 +248,39 @@ extract_creds_from_url(Url) ->
normalize_basic_auth(#httpdb{} = HttpDb) ->
#httpdb{url = Url, headers = Headers} = HttpDb,
{HeaderCreds, HeadersNoCreds} = remove_basic_auth_from_headers(Headers),
- {UrlCreds, UrlWithoutCreds} = case extract_creds_from_url(Url) of
- {ok, Creds = {_, _}, UrlNoCreds} ->
- {Creds, UrlNoCreds};
- {error, _Error} ->
- % Don't crash replicator if user provided an invalid
- % userinfo part
- {undefined, undefined}
- end,
+ {UrlCreds, UrlWithoutCreds} =
+ case extract_creds_from_url(Url) of
+ {ok, Creds = {_, _}, UrlNoCreds} ->
+ {Creds, UrlNoCreds};
+ {error, _Error} ->
+ % Don't crash replicator if user provided an invalid
+ % userinfo part
+ {undefined, undefined}
+ end,
AuthCreds = {_, _} = get_basic_auth_creds(HttpDb),
HttpDb1 = remove_basic_auth_creds(HttpDb#httpdb{
url = UrlWithoutCreds,
headers = HeadersNoCreds
}),
- {User, Pass} = case {AuthCreds, UrlCreds, HeaderCreds} of
- {{U, P}, {_, _}, {_, _}} when is_list(U), is_list(P) -> {U, P};
- {{_, _}, {U, P}, {_, _}} when is_list(U), is_list(P) -> {U, P};
- {{_, _}, {_, _}, {U, P}} -> {U, P}
- end,
+ {User, Pass} =
+ case {AuthCreds, UrlCreds, HeaderCreds} of
+ {{U, P}, {_, _}, {_, _}} when is_list(U), is_list(P) -> {U, P};
+ {{_, _}, {U, P}, {_, _}} when is_list(U), is_list(P) -> {U, P};
+ {{_, _}, {_, _}, {U, P}} -> {U, P}
+ end,
set_basic_auth_creds(User, Pass, HttpDb1).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
remove_basic_auth_from_headers_test_() ->
- [?_assertMatch({{User, Pass}, NoAuthHeaders},
- remove_basic_auth_from_headers(Headers)) ||
- {{User, Pass, NoAuthHeaders}, Headers} <- [
+ [
+ ?_assertMatch(
+ {{User, Pass}, NoAuthHeaders},
+ remove_basic_auth_from_headers(Headers)
+ )
+ || {{User, Pass, NoAuthHeaders}, Headers} <- [
{
{undefined, undefined, []},
[]
@@ -332,39 +319,42 @@ remove_basic_auth_from_headers_test_() ->
]
].
-
b64creds(User, Pass) ->
base64:encode_to_string(User ++ ":" ++ Pass).
-
normalize_rep_test_() ->
{
setup,
- fun() -> meck:expect(config, get,
- fun(_, _, Default) -> Default end)
+ fun() ->
+ meck:expect(
+ config,
+ get,
+ fun(_, _, Default) -> Default end
+ )
end,
fun(_) -> meck:unload() end,
?_test(begin
- EJson1 = {[
- {<<"source">>, <<"http://host.com/source_db">>},
- {<<"target">>, <<"http://target.local/db">>},
- {<<"doc_ids">>, [<<"a">>, <<"c">>, <<"b">>]},
- {<<"other_field">>, <<"some_value">>}
- ]},
+ EJson1 =
+ {[
+ {<<"source">>, <<"http://host.com/source_db">>},
+ {<<"target">>, <<"http://target.local/db">>},
+ {<<"doc_ids">>, [<<"a">>, <<"c">>, <<"b">>]},
+ {<<"other_field">>, <<"some_value">>}
+ ]},
Rep1 = couch_replicator_docs:parse_rep_doc_without_id(EJson1),
- EJson2 = {[
- {<<"other_field">>, <<"unrelated">>},
- {<<"target">>, <<"http://target.local/db">>},
- {<<"source">>, <<"http://host.com/source_db">>},
- {<<"doc_ids">>, [<<"c">>, <<"a">>, <<"b">>]},
- {<<"other_field2">>, <<"unrelated2">>}
- ]},
+ EJson2 =
+ {[
+ {<<"other_field">>, <<"unrelated">>},
+ {<<"target">>, <<"http://target.local/db">>},
+ {<<"source">>, <<"http://host.com/source_db">>},
+ {<<"doc_ids">>, [<<"c">>, <<"a">>, <<"b">>]},
+ {<<"other_field2">>, <<"unrelated2">>}
+ ]},
Rep2 = couch_replicator_docs:parse_rep_doc_without_id(EJson2),
?assertEqual(normalize_rep(Rep1), normalize_rep(Rep2))
end)
}.
-
get_basic_auth_creds_test() ->
Check = fun(Props) ->
get_basic_auth_creds(#httpdb{auth_props = Props})
@@ -384,7 +374,6 @@ get_basic_auth_creds_test() ->
UserPass3 = {[{<<"username">>, <<"u">>}, {<<"password">>, null}]},
?assertEqual({undefined, undefined}, Check([{<<"basic">>, UserPass3}])).
-
remove_basic_auth_creds_test() ->
Check = fun(Props) ->
HttpDb = remove_basic_auth_creds(#httpdb{auth_props = Props}),
@@ -395,21 +384,28 @@ remove_basic_auth_creds_test() ->
?assertEqual([{<<"other">>, {[]}}], Check([{<<"other">>, {[]}}])),
- ?assertEqual([], Check([
- {<<"basic">>, {[
- {<<"username">>, <<"u">>},
- {<<"password">>, <<"p">>}
- ]}}
- ])),
-
- ?assertEqual([{<<"other">>, {[]}}], Check([
- {<<"basic">>, {[
- {<<"username">>, <<"u">>},
- {<<"password">>, <<"p">>}
- ]}},
- {<<"other">>, {[]}}
- ])).
-
+ ?assertEqual(
+ [],
+ Check([
+ {<<"basic">>,
+ {[
+ {<<"username">>, <<"u">>},
+ {<<"password">>, <<"p">>}
+ ]}}
+ ])
+ ),
+
+ ?assertEqual(
+ [{<<"other">>, {[]}}],
+ Check([
+ {<<"basic">>,
+ {[
+ {<<"username">>, <<"u">>},
+ {<<"password">>, <<"p">>}
+ ]}},
+ {<<"other">>, {[]}}
+ ])
+ ).
set_basic_auth_creds_test() ->
Check = fun(User, Pass, Props) ->
@@ -419,121 +415,158 @@ set_basic_auth_creds_test() ->
?assertEqual([], Check(undefined, undefined, [])),
- ?assertEqual([{<<"other">>, {[]}}], Check(undefined, undefined,
- [{<<"other">>, {[]}}])),
-
- ?assertEqual([
- {<<"basic">>, {[
- {<<"username">>, <<"u">>},
- {<<"password">>, <<"p">>}
- ]}}
- ], Check("u", "p", [])),
-
- ?assertEqual([
- {<<"other">>, {[]}},
- {<<"basic">>, {[
- {<<"username">>, <<"u">>},
- {<<"password">>, <<"p">>}
- ]}}
- ], Check("u", "p", [{<<"other">>, {[]}}])).
-
+ ?assertEqual(
+ [{<<"other">>, {[]}}],
+ Check(
+ undefined,
+ undefined,
+ [{<<"other">>, {[]}}]
+ )
+ ),
+
+ ?assertEqual(
+ [
+ {<<"basic">>,
+ {[
+ {<<"username">>, <<"u">>},
+ {<<"password">>, <<"p">>}
+ ]}}
+ ],
+ Check("u", "p", [])
+ ),
+
+ ?assertEqual(
+ [
+ {<<"other">>, {[]}},
+ {<<"basic">>,
+ {[
+ {<<"username">>, <<"u">>},
+ {<<"password">>, <<"p">>}
+ ]}}
+ ],
+ Check("u", "p", [{<<"other">>, {[]}}])
+ ).
normalize_basic_creds_test_() ->
DefaultHeaders = (#httpdb{})#httpdb.headers,
- [?_assertEqual(Expect, normalize_basic_auth(Input)) || {Input, Expect} <- [
- {
- #httpdb{url = "http://u:p@x.y/db"},
- #httpdb{url = "http://x.y/db", auth_props = auth_props("u", "p")}
- },
- {
- #httpdb{url = "http://u:p@h:80/db"},
- #httpdb{url = "http://h:80/db", auth_props = auth_props("u", "p")}
- },
- {
- #httpdb{url = "https://u:p@h/db"},
- #httpdb{url = "https://h/db", auth_props = auth_props("u", "p")}
- },
- {
- #httpdb{url = "http://u:p@[2001:db8:a1b:12f9::1]/db"},
- #httpdb{url = "http://[2001:db8:a1b:12f9::1]/db",
- auth_props = auth_props("u", "p")}
- },
- {
- #httpdb{
- url = "http://h/db",
- headers = DefaultHeaders ++ [
- {"Authorization", "Basic " ++ b64creds("u", "p")}
- ]
+ [
+ ?_assertEqual(Expect, normalize_basic_auth(Input))
+ || {Input, Expect} <- [
+ {
+ #httpdb{url = "http://u:p@x.y/db"},
+ #httpdb{url = "http://x.y/db", auth_props = auth_props("u", "p")}
},
- #httpdb{url = "http://h/db", auth_props = auth_props("u", "p")}
- },
- {
- #httpdb{
- url = "http://h/db",
- headers = DefaultHeaders ++ [
- {"Authorization", "Basic " ++ b64creds("u", "p@")}
- ]
+ {
+ #httpdb{url = "http://u:p@h:80/db"},
+ #httpdb{url = "http://h:80/db", auth_props = auth_props("u", "p")}
},
- #httpdb{url = "http://h/db", auth_props = auth_props("u", "p@")}
- },
- {
- #httpdb{
- url = "http://h/db",
- headers = DefaultHeaders ++ [
- {"Authorization", "Basic " ++ b64creds("u", "p@%40")}
- ]
+ {
+ #httpdb{url = "https://u:p@h/db"},
+ #httpdb{url = "https://h/db", auth_props = auth_props("u", "p")}
},
- #httpdb{url = "http://h/db", auth_props = auth_props("u", "p@%40")}
- },
- {
- #httpdb{
- url = "http://h/db",
- headers = DefaultHeaders ++ [
- {"aUthoriZation", "bASIC " ++ b64creds("U", "p")}
- ]
+ {
+ #httpdb{url = "http://u:p@[2001:db8:a1b:12f9::1]/db"},
+ #httpdb{
+ url = "http://[2001:db8:a1b:12f9::1]/db",
+ auth_props = auth_props("u", "p")
+ }
},
- #httpdb{url = "http://h/db", auth_props = auth_props("U", "p")}
- },
- {
- #httpdb{
- url = "http://u1:p1@h/db",
- headers = DefaultHeaders ++ [
- {"Authorization", "Basic " ++ b64creds("u2", "p2")}
- ]
+ {
+ #httpdb{
+ url = "http://h/db",
+ headers =
+ DefaultHeaders ++
+ [
+ {"Authorization", "Basic " ++ b64creds("u", "p")}
+ ]
+ },
+ #httpdb{url = "http://h/db", auth_props = auth_props("u", "p")}
},
- #httpdb{url ="http://h/db", auth_props = auth_props("u1", "p1")}
- },
- {
- #httpdb{
- url = "http://u1:p1@h/db",
- auth_props = [{<<"basic">>, {[
- {<<"username">>, <<"u2">>},
- {<<"password">>, <<"p2">>}
- ]}}]
+ {
+ #httpdb{
+ url = "http://h/db",
+ headers =
+ DefaultHeaders ++
+ [
+ {"Authorization", "Basic " ++ b64creds("u", "p@")}
+ ]
+ },
+ #httpdb{url = "http://h/db", auth_props = auth_props("u", "p@")}
},
- #httpdb{url = "http://h/db", auth_props = auth_props("u2", "p2")}
- },
- {
- #httpdb{
- url = "http://u1:p1@h/db",
- auth_props = [{<<"basic">>, {[
- {<<"username">>, <<"u2">>},
- {<<"password">>, <<"p2">>}
- ]}}],
- headers = DefaultHeaders ++ [
- {"Authorization", "Basic " ++ b64creds("u3", "p3")}
- ]
+ {
+ #httpdb{
+ url = "http://h/db",
+ headers =
+ DefaultHeaders ++
+ [
+ {"Authorization", "Basic " ++ b64creds("u", "p@%40")}
+ ]
+ },
+ #httpdb{url = "http://h/db", auth_props = auth_props("u", "p@%40")}
},
- #httpdb{url = "http://h/db", auth_props = auth_props("u2", "p2")}
- }
- ]].
-
+ {
+ #httpdb{
+ url = "http://h/db",
+ headers =
+ DefaultHeaders ++
+ [
+ {"aUthoriZation", "bASIC " ++ b64creds("U", "p")}
+ ]
+ },
+ #httpdb{url = "http://h/db", auth_props = auth_props("U", "p")}
+ },
+ {
+ #httpdb{
+ url = "http://u1:p1@h/db",
+ headers =
+ DefaultHeaders ++
+ [
+ {"Authorization", "Basic " ++ b64creds("u2", "p2")}
+ ]
+ },
+ #httpdb{url = "http://h/db", auth_props = auth_props("u1", "p1")}
+ },
+ {
+ #httpdb{
+ url = "http://u1:p1@h/db",
+ auth_props = [
+ {<<"basic">>,
+ {[
+ {<<"username">>, <<"u2">>},
+ {<<"password">>, <<"p2">>}
+ ]}}
+ ]
+ },
+ #httpdb{url = "http://h/db", auth_props = auth_props("u2", "p2")}
+ },
+ {
+ #httpdb{
+ url = "http://u1:p1@h/db",
+ auth_props = [
+ {<<"basic">>,
+ {[
+ {<<"username">>, <<"u2">>},
+ {<<"password">>, <<"p2">>}
+ ]}}
+ ],
+ headers =
+ DefaultHeaders ++
+ [
+ {"Authorization", "Basic " ++ b64creds("u3", "p3")}
+ ]
+ },
+ #httpdb{url = "http://h/db", auth_props = auth_props("u2", "p2")}
+ }
+ ]
+ ].
auth_props(User, Pass) when is_list(User), is_list(Pass) ->
- [{<<"basic">>, {[
- {<<"username">>, list_to_binary(User)},
- {<<"password">>, list_to_binary(Pass)}
- ]}}].
-
+ [
+ {<<"basic">>,
+ {[
+ {<<"username">>, list_to_binary(User)},
+ {<<"password">>, list_to_binary(Pass)}
+ ]}}
+ ].
-endif.
diff --git a/src/couch_replicator/src/couch_replicator_worker.erl b/src/couch_replicator/src/couch_replicator_worker.erl
index eb8beaaa9..f66a019e2 100644
--- a/src/couch_replicator/src/couch_replicator_worker.erl
+++ b/src/couch_replicator/src/couch_replicator_worker.erl
@@ -27,8 +27,11 @@
-include("couch_replicator.hrl").
% TODO: maybe make both buffer max sizes configurable
--define(DOC_BUFFER_BYTE_SIZE, 512 * 1024). % for remote targets
--define(STATS_DELAY, 10000000). % 10 seconds (in microseconds)
+
+% for remote targets
+-define(DOC_BUFFER_BYTE_SIZE, 512 * 1024).
+% 10 seconds (in microseconds)
+-define(STATS_DELAY, 10000000).
-define(MISSING_DOC_RETRY_MSEC, 2000).
-import(couch_util, [
@@ -36,7 +39,6 @@
get_value/3
]).
-
-record(batch, {
docs = [],
size = 0
@@ -56,12 +58,10 @@
batch = #batch{}
}).
-
-
start_link(Cp, #httpdb{} = Source, Target, ChangesManager, MaxConns) ->
gen_server:start_link(
- ?MODULE, {Cp, Source, Target, ChangesManager, MaxConns}, []).
-
+ ?MODULE, {Cp, Source, Target, ChangesManager, MaxConns}, []
+ ).
init({Cp, Source, Target, ChangesManager, MaxConns}) ->
process_flag(trap_exit, true),
@@ -79,108 +79,122 @@ init({Cp, Source, Target, ChangesManager, MaxConns}) ->
},
{ok, State}.
-
-handle_call({fetch_doc, {_Id, _Revs, _PAs} = Params}, {Pid, _} = From,
- #state{loop = Pid, readers = Readers, pending_fetch = nil,
- source = Src, target = Tgt, max_parallel_conns = MaxConns} = State) ->
+handle_call(
+ {fetch_doc, {_Id, _Revs, _PAs} = Params},
+ {Pid, _} = From,
+ #state{
+ loop = Pid,
+ readers = Readers,
+ pending_fetch = nil,
+ source = Src,
+ target = Tgt,
+ max_parallel_conns = MaxConns
+ } = State
+) ->
case length(Readers) of
- Size when Size < MaxConns ->
- Reader = spawn_doc_reader(Src, Tgt, Params),
- NewState = State#state{
- readers = [Reader | Readers]
- },
- {reply, ok, NewState};
- _ ->
- NewState = State#state{
- pending_fetch = {From, Params}
- },
- {noreply, NewState}
+ Size when Size < MaxConns ->
+ Reader = spawn_doc_reader(Src, Tgt, Params),
+ NewState = State#state{
+ readers = [Reader | Readers]
+ },
+ {reply, ok, NewState};
+ _ ->
+ NewState = State#state{
+ pending_fetch = {From, Params}
+ },
+ {noreply, NewState}
end;
-
handle_call({batch_doc, Doc}, From, State) ->
gen_server:reply(From, ok),
{noreply, maybe_flush_docs(Doc, State)};
-
handle_call({add_stats, IncStats}, From, #state{stats = Stats} = State) ->
gen_server:reply(From, ok),
NewStats = couch_replicator_utils:sum_stats(Stats, IncStats),
NewStats2 = maybe_report_stats(State#state.cp, NewStats),
{noreply, State#state{stats = NewStats2}};
-
-handle_call(flush, {Pid, _} = From,
- #state{loop = Pid, writer = nil, flush_waiter = nil,
- target = Target, batch = Batch} = State) ->
- State2 = case State#state.readers of
- [] ->
- State#state{writer = spawn_writer(Target, Batch)};
- _ ->
- State
- end,
+handle_call(
+ flush,
+ {Pid, _} = From,
+ #state{
+ loop = Pid,
+ writer = nil,
+ flush_waiter = nil,
+ target = Target,
+ batch = Batch
+ } = State
+) ->
+ State2 =
+ case State#state.readers of
+ [] ->
+ State#state{writer = spawn_writer(Target, Batch)};
+ _ ->
+ State
+ end,
{noreply, State2#state{flush_waiter = From}}.
-
handle_cast(Msg, State) ->
{stop, {unexpected_async_call, Msg}, State}.
-
handle_info({'EXIT', Pid, normal}, #state{loop = Pid} = State) ->
#state{
- batch = #batch{docs = []}, readers = [], writer = nil,
- pending_fetch = nil, flush_waiter = nil
+ batch = #batch{docs = []},
+ readers = [],
+ writer = nil,
+ pending_fetch = nil,
+ flush_waiter = nil
} = State,
{stop, normal, State};
-
handle_info({'EXIT', Pid, normal}, #state{writer = Pid} = State) ->
{noreply, after_full_flush(State)};
-
handle_info({'EXIT', Pid, normal}, #state{writer = nil} = State) ->
#state{
- readers = Readers, writer = Writer, batch = Batch,
- source = Source, target = Target,
- pending_fetch = Fetch, flush_waiter = FlushWaiter
+ readers = Readers,
+ writer = Writer,
+ batch = Batch,
+ source = Source,
+ target = Target,
+ pending_fetch = Fetch,
+ flush_waiter = FlushWaiter
} = State,
case Readers -- [Pid] of
- Readers ->
- {noreply, State};
- Readers2 ->
- State2 = case Fetch of
- nil ->
- case (FlushWaiter =/= nil) andalso (Writer =:= nil) andalso
- (Readers2 =:= []) of
- true ->
- State#state{
- readers = Readers2,
- writer = spawn_writer(Target, Batch)
- };
- false ->
- State#state{readers = Readers2}
- end;
- {From, FetchParams} ->
- Reader = spawn_doc_reader(Source, Target, FetchParams),
- gen_server:reply(From, ok),
- State#state{
- readers = [Reader | Readers2],
- pending_fetch = nil
- }
- end,
- {noreply, State2}
+ Readers ->
+ {noreply, State};
+ Readers2 ->
+ State2 =
+ case Fetch of
+ nil ->
+ case
+ (FlushWaiter =/= nil) andalso (Writer =:= nil) andalso
+ (Readers2 =:= [])
+ of
+ true ->
+ State#state{
+ readers = Readers2,
+ writer = spawn_writer(Target, Batch)
+ };
+ false ->
+ State#state{readers = Readers2}
+ end;
+ {From, FetchParams} ->
+ Reader = spawn_doc_reader(Source, Target, FetchParams),
+ gen_server:reply(From, ok),
+ State#state{
+ readers = [Reader | Readers2],
+ pending_fetch = nil
+ }
+ end,
+ {noreply, State2}
end;
-
handle_info({'EXIT', _Pid, max_backoff}, State) ->
{stop, {shutdown, max_backoff}, State};
-
handle_info({'EXIT', _Pid, {bulk_docs_failed, _, _} = Err}, State) ->
{stop, {shutdown, Err}, State};
-
handle_info({'EXIT', _Pid, {revs_diff_failed, _, _} = Err}, State) ->
{stop, {shutdown, Err}, State};
-
handle_info({'EXIT', _Pid, {http_request_failed, _, _, _} = Err}, State) ->
{stop, {shutdown, Err}, State};
-
handle_info({'EXIT', Pid, Reason}, State) ->
- {stop, {process_died, Pid, Reason}, State}.
-
+ {stop, {process_died, Pid, Reason}, State}.
terminate(_Reason, _State) ->
ok.
@@ -208,31 +222,28 @@ format_status(_Opt, [_PDict, State]) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager) ->
ChangesManager ! {get_changes, self()},
receive
- {closed, ChangesManager} ->
- ok;
- {changes, ChangesManager, [], ReportSeq} ->
- Stats = couch_replicator_stats:new(),
- ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity),
- queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager);
- {changes, ChangesManager, Changes, ReportSeq} ->
- {IdRevs, Stats0} = find_missing(Changes, Target),
- ok = gen_server:call(Parent, {add_stats, Stats0}, infinity),
- remote_process_batch(IdRevs, Parent),
- {ok, Stats} = gen_server:call(Parent, flush, infinity),
- ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity),
- erlang:put(last_stats_report, os:timestamp()),
- couch_log:debug("Worker reported completion of seq ~p", [ReportSeq]),
- queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager)
+ {closed, ChangesManager} ->
+ ok;
+ {changes, ChangesManager, [], ReportSeq} ->
+ Stats = couch_replicator_stats:new(),
+ ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity),
+ queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager);
+ {changes, ChangesManager, Changes, ReportSeq} ->
+ {IdRevs, Stats0} = find_missing(Changes, Target),
+ ok = gen_server:call(Parent, {add_stats, Stats0}, infinity),
+ remote_process_batch(IdRevs, Parent),
+ {ok, Stats} = gen_server:call(Parent, flush, infinity),
+ ok = gen_server:call(Cp, {report_seq_done, ReportSeq, Stats}, infinity),
+ erlang:put(last_stats_report, os:timestamp()),
+ couch_log:debug("Worker reported completion of seq ~p", [ReportSeq]),
+ queue_fetch_loop(Source, Target, Parent, Cp, ChangesManager)
end.
-
remote_process_batch([], _Parent) ->
ok;
-
remote_process_batch([{Id, Revs, PAs} | Rest], Parent) ->
% When the source is a remote database, we fetch a single document revision
% per HTTP request. This is mostly to facilitate retrying of HTTP requests
@@ -242,44 +253,56 @@ remote_process_batch([{Id, Revs, PAs} | Rest], Parent) ->
fun(Rev) ->
ok = gen_server:call(Parent, {fetch_doc, {Id, [Rev], PAs}}, infinity)
end,
- Revs),
+ Revs
+ ),
remote_process_batch(Rest, Parent).
-
spawn_doc_reader(Source, Target, FetchParams) ->
Parent = self(),
spawn_link(fun() ->
fetch_doc(
- Source, FetchParams, fun remote_doc_handler/2, {Parent, Target})
+ Source, FetchParams, fun remote_doc_handler/2, {Parent, Target}
+ )
end).
-
fetch_doc(Source, {Id, Revs, PAs}, DocHandler, Acc) ->
try
couch_replicator_api_wrap:open_doc_revs(
- Source, Id, Revs, [{atts_since, PAs}, latest], DocHandler, Acc)
+ Source, Id, Revs, [{atts_since, PAs}, latest], DocHandler, Acc
+ )
catch
- throw:missing_doc ->
- couch_log:error("Retrying fetch and update of document `~s` as it is "
- "unexpectedly missing. Missing revisions are: ~s",
- [Id, couch_doc:revs_to_strs(Revs)]),
- WaitMSec = config:get_integer("replicator", "missing_doc_retry_msec",
- ?MISSING_DOC_RETRY_MSEC),
- timer:sleep(WaitMSec),
- couch_replicator_api_wrap:open_doc_revs(Source, Id, Revs, [latest], DocHandler, Acc);
- throw:{missing_stub, _} ->
- couch_log:error("Retrying fetch and update of document `~s` due to out of "
- "sync attachment stubs. Missing revisions are: ~s",
- [Id, couch_doc:revs_to_strs(Revs)]),
- WaitMSec = config:get_integer("replicator", "missing_doc_retry_msec",
- ?MISSING_DOC_RETRY_MSEC),
- timer:sleep(WaitMSec),
- couch_replicator_api_wrap:open_doc_revs(Source, Id, Revs, [latest], DocHandler, Acc)
+ throw:missing_doc ->
+ couch_log:error(
+ "Retrying fetch and update of document `~s` as it is "
+ "unexpectedly missing. Missing revisions are: ~s",
+ [Id, couch_doc:revs_to_strs(Revs)]
+ ),
+ WaitMSec = config:get_integer(
+ "replicator",
+ "missing_doc_retry_msec",
+ ?MISSING_DOC_RETRY_MSEC
+ ),
+ timer:sleep(WaitMSec),
+ couch_replicator_api_wrap:open_doc_revs(Source, Id, Revs, [latest], DocHandler, Acc);
+ throw:{missing_stub, _} ->
+ couch_log:error(
+ "Retrying fetch and update of document `~s` due to out of "
+ "sync attachment stubs. Missing revisions are: ~s",
+ [Id, couch_doc:revs_to_strs(Revs)]
+ ),
+ WaitMSec = config:get_integer(
+ "replicator",
+ "missing_doc_retry_msec",
+ ?MISSING_DOC_RETRY_MSEC
+ ),
+ timer:sleep(WaitMSec),
+ couch_replicator_api_wrap:open_doc_revs(Source, Id, Revs, [latest], DocHandler, Acc)
end.
-
-remote_doc_handler({ok, #doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc},
- Acc) ->
+remote_doc_handler(
+ {ok, #doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc},
+ Acc
+) ->
% Flush design docs in their own PUT requests to correctly process
% authorization failures for design doc updates.
couch_log:debug("Worker flushing design doc", []),
@@ -297,34 +320,33 @@ remote_doc_handler({ok, #doc{atts = []} = Doc}, {Parent, _} = Acc) ->
remote_doc_handler({{not_found, missing}, _}, _Acc) ->
throw(missing_doc).
-
doc_handler_flush_doc(#doc{} = Doc, {Parent, Target} = Acc) ->
Stats = couch_replicator_stats:new([{docs_read, 1}]),
Success = (flush_doc(Target, Doc) =:= ok),
- {Result, Stats2} = case Success of
- true ->
- {{ok, Acc}, couch_replicator_stats:increment(docs_written, Stats)};
- false ->
- {{skip, Acc}, couch_replicator_stats:increment(doc_write_failures, Stats)}
- end,
+ {Result, Stats2} =
+ case Success of
+ true ->
+ {{ok, Acc}, couch_replicator_stats:increment(docs_written, Stats)};
+ false ->
+ {{skip, Acc}, couch_replicator_stats:increment(doc_write_failures, Stats)}
+ end,
ok = gen_server:call(Parent, {add_stats, Stats2}, infinity),
Result.
-
spawn_writer(Target, #batch{docs = DocList, size = Size}) ->
case {Target, Size > 0} of
- {#httpdb{}, true} ->
- couch_log:debug("Worker flushing doc batch of size ~p bytes", [Size]);
- _ ->
- ok
+ {#httpdb{}, true} ->
+ couch_log:debug("Worker flushing doc batch of size ~p bytes", [Size]);
+ _ ->
+ ok
end,
Parent = self(),
spawn_link(
fun() ->
Stats = flush_docs(Target, DocList),
ok = gen_server:call(Parent, {add_stats, Stats}, infinity)
- end).
-
+ end
+ ).
after_full_flush(#state{stats = Stats, flush_waiter = Waiter} = State) ->
gen_server:reply(Waiter, {ok, Stats}),
@@ -336,11 +358,12 @@ after_full_flush(#state{stats = Stats, flush_waiter = Waiter} = State) ->
batch = #batch{}
}.
-
-maybe_flush_docs(Doc,State) ->
+maybe_flush_docs(Doc, State) ->
#state{
- target = Target, batch = Batch,
- stats = Stats, cp = Cp
+ target = Target,
+ batch = Batch,
+ stats = Stats,
+ cp = Cp
} = State,
{Batch2, WStats} = maybe_flush_docs(Target, Batch, Doc),
Stats2 = couch_replicator_stats:sum_stats(Stats, WStats),
@@ -348,39 +371,47 @@ maybe_flush_docs(Doc,State) ->
Stats4 = maybe_report_stats(Cp, Stats3),
State#state{stats = Stats4, batch = Batch2}.
-
maybe_flush_docs(#httpdb{} = Target, Batch, Doc) ->
#batch{docs = DocAcc, size = SizeAcc} = Batch,
JsonDoc = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
case SizeAcc + iolist_size(JsonDoc) of
- SizeAcc2 when SizeAcc2 > ?DOC_BUFFER_BYTE_SIZE ->
- couch_log:debug("Worker flushing doc batch of size ~p bytes", [SizeAcc2]),
- Stats = flush_docs(Target, [JsonDoc | DocAcc]),
- {#batch{}, Stats};
- SizeAcc2 ->
- Stats = couch_replicator_stats:new(),
- {#batch{docs = [JsonDoc | DocAcc], size = SizeAcc2}, Stats}
+ SizeAcc2 when SizeAcc2 > ?DOC_BUFFER_BYTE_SIZE ->
+ couch_log:debug("Worker flushing doc batch of size ~p bytes", [SizeAcc2]),
+ Stats = flush_docs(Target, [JsonDoc | DocAcc]),
+ {#batch{}, Stats};
+ SizeAcc2 ->
+ Stats = couch_replicator_stats:new(),
+ {#batch{docs = [JsonDoc | DocAcc], size = SizeAcc2}, Stats}
end.
-
flush_docs(_Target, []) ->
couch_replicator_stats:new();
flush_docs(Target, DocList) ->
- FlushResult = couch_replicator_api_wrap:update_docs(Target, DocList,
- [delay_commit], replicated_changes),
+ FlushResult = couch_replicator_api_wrap:update_docs(
+ Target,
+ DocList,
+ [delay_commit],
+ replicated_changes
+ ),
handle_flush_docs_result(FlushResult, Target, DocList).
-
handle_flush_docs_result({error, request_body_too_large}, _Target, [Doc]) ->
couch_log:error("Replicator: failed to write doc ~p. Too large", [Doc]),
couch_replicator_stats:new([{doc_write_failures, 1}]);
handle_flush_docs_result({error, request_body_too_large}, Target, DocList) ->
Len = length(DocList),
{DocList1, DocList2} = lists:split(Len div 2, DocList),
- couch_log:notice("Replicator: couldn't write batch of size ~p to ~p because"
+ couch_log:notice(
+ "Replicator: couldn't write batch of size ~p to ~p because"
" request body is too large. Splitting batch into 2 separate batches of"
- " sizes ~p and ~p", [Len, couch_replicator_api_wrap:db_uri(Target),
- length(DocList1), length(DocList2)]),
+ " sizes ~p and ~p",
+ [
+ Len,
+ couch_replicator_api_wrap:db_uri(Target),
+ length(DocList1),
+ length(DocList2)
+ ]
+ ),
Stats1 = flush_docs(Target, DocList1),
Stats2 = flush_docs(Target, DocList2),
couch_replicator_stats:sum_stats(Stats1, Stats2);
@@ -388,11 +419,20 @@ handle_flush_docs_result({ok, Errors}, Target, DocList) ->
DbUri = couch_replicator_api_wrap:db_uri(Target),
lists:foreach(
fun({Props}) ->
- couch_log:error("Replicator: couldn't write document `~s`, revision"
- " `~s`, to target database `~s`. Error: `~s`, reason: `~s`.", [
- get_value(id, Props, ""), get_value(rev, Props, ""), DbUri,
- get_value(error, Props, ""), get_value(reason, Props, "")])
- end, Errors),
+ couch_log:error(
+ "Replicator: couldn't write document `~s`, revision"
+ " `~s`, to target database `~s`. Error: `~s`, reason: `~s`.",
+ [
+ get_value(id, Props, ""),
+ get_value(rev, Props, ""),
+ DbUri,
+ get_value(error, Props, ""),
+ get_value(reason, Props, "")
+ ]
+ )
+ end,
+ Errors
+ ),
couch_replicator_stats:new([
{docs_written, length(DocList) - length(Errors)},
{doc_write_failures, length(Errors)}
@@ -400,74 +440,90 @@ handle_flush_docs_result({ok, Errors}, Target, DocList) ->
handle_flush_docs_result({error, {bulk_docs_failed, _, _} = Err}, _, _) ->
exit(Err).
-
flush_doc(Target, #doc{id = Id, revs = {Pos, [RevId | _]}} = Doc) ->
try couch_replicator_api_wrap:update_doc(Target, Doc, [], replicated_changes) of
- {ok, _} ->
- ok;
- Error ->
- couch_log:error("Replicator: error writing document `~s` to `~s`: ~s",
- [Id, couch_replicator_api_wrap:db_uri(Target), couch_util:to_binary(Error)]),
- Error
+ {ok, _} ->
+ ok;
+ Error ->
+ couch_log:error(
+ "Replicator: error writing document `~s` to `~s`: ~s",
+ [Id, couch_replicator_api_wrap:db_uri(Target), couch_util:to_binary(Error)]
+ ),
+ Error
catch
- throw:{missing_stub, _} = MissingStub ->
- throw(MissingStub);
- throw:{Error, Reason} ->
- couch_log:error("Replicator: couldn't write document `~s`, revision `~s`,"
- " to target database `~s`. Error: `~s`, reason: `~s`.",
- [Id, couch_doc:rev_to_str({Pos, RevId}),
- couch_replicator_api_wrap:db_uri(Target), to_binary(Error), to_binary(Reason)]),
- {error, Error};
- throw:Err ->
- couch_log:error("Replicator: couldn't write document `~s`, revision `~s`,"
- " to target database `~s`. Error: `~s`.",
- [Id, couch_doc:rev_to_str({Pos, RevId}),
- couch_replicator_api_wrap:db_uri(Target), to_binary(Err)]),
- {error, Err}
+ throw:{missing_stub, _} = MissingStub ->
+ throw(MissingStub);
+ throw:{Error, Reason} ->
+ couch_log:error(
+ "Replicator: couldn't write document `~s`, revision `~s`,"
+ " to target database `~s`. Error: `~s`, reason: `~s`.",
+ [
+ Id,
+ couch_doc:rev_to_str({Pos, RevId}),
+ couch_replicator_api_wrap:db_uri(Target),
+ to_binary(Error),
+ to_binary(Reason)
+ ]
+ ),
+ {error, Error};
+ throw:Err ->
+ couch_log:error(
+ "Replicator: couldn't write document `~s`, revision `~s`,"
+ " to target database `~s`. Error: `~s`.",
+ [
+ Id,
+ couch_doc:rev_to_str({Pos, RevId}),
+ couch_replicator_api_wrap:db_uri(Target),
+ to_binary(Err)
+ ]
+ ),
+ {error, Err}
end.
-
find_missing(DocInfos, Target) ->
- {IdRevs, AllRevsCount} = lists:foldr(fun
- (#doc_info{revs = []}, {IdRevAcc, CountAcc}) ->
- {IdRevAcc, CountAcc};
- (#doc_info{id = Id, revs = RevsInfo}, {IdRevAcc, CountAcc}) ->
- Revs = [Rev || #rev_info{rev = Rev} <- RevsInfo],
- {[{Id, Revs} | IdRevAcc], CountAcc + length(Revs)}
- end, {[], 0}, DocInfos),
-
-
- Missing = case couch_replicator_api_wrap:get_missing_revs(Target, IdRevs) of
- {ok, Result} -> Result;
- {error, Error} -> exit(Error)
- end,
+ {IdRevs, AllRevsCount} = lists:foldr(
+ fun
+ (#doc_info{revs = []}, {IdRevAcc, CountAcc}) ->
+ {IdRevAcc, CountAcc};
+ (#doc_info{id = Id, revs = RevsInfo}, {IdRevAcc, CountAcc}) ->
+ Revs = [Rev || #rev_info{rev = Rev} <- RevsInfo],
+ {[{Id, Revs} | IdRevAcc], CountAcc + length(Revs)}
+ end,
+ {[], 0},
+ DocInfos
+ ),
+
+ Missing =
+ case couch_replicator_api_wrap:get_missing_revs(Target, IdRevs) of
+ {ok, Result} -> Result;
+ {error, Error} -> exit(Error)
+ end,
MissingRevsCount = lists:foldl(
fun({_Id, MissingRevs, _PAs}, Acc) -> Acc + length(MissingRevs) end,
- 0, Missing),
+ 0,
+ Missing
+ ),
Stats = couch_replicator_stats:new([
{missing_checked, AllRevsCount},
{missing_found, MissingRevsCount}
]),
{Missing, Stats}.
-
maybe_report_stats(Cp, Stats) ->
Now = os:timestamp(),
case timer:now_diff(erlang:get(last_stats_report), Now) >= ?STATS_DELAY of
- true ->
- ok = gen_server:call(Cp, {add_stats, Stats}, infinity),
- erlang:put(last_stats_report, Now),
- couch_replicator_stats:new();
- false ->
- Stats
+ true ->
+ ok = gen_server:call(Cp, {add_stats, Stats}, infinity),
+ erlang:put(last_stats_report, Now),
+ couch_replicator_stats:new();
+ false ->
+ Stats
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
replication_worker_format_status_test() ->
State = #state{
cp = self(),
diff --git a/src/couch_replicator/src/json_stream_parse.erl b/src/couch_replicator/src/json_stream_parse.erl
index b63e01152..3478b9830 100644
--- a/src/couch_replicator/src/json_stream_parse.erl
+++ b/src/couch_replicator/src/json_stream_parse.erl
@@ -12,15 +12,12 @@
-module(json_stream_parse).
-
-export([events/2, to_ejson/1, collect_object/2]).
-define(IS_WS(X), (X == $\ orelse X == $\t orelse X == $\n orelse X == $\r)).
-define(IS_DELIM(X), (X == $} orelse X == $] orelse X == $,)).
-define(IS_DIGIT(X), (X >= $0 andalso X =< $9)).
-
-
% Parses the json into events.
%
% The DataFun param is a function that produces the data for parsing. When
@@ -49,11 +46,11 @@
% {key, KeyString}, and the following event is the value, or start of the
% value (array_start, object_start).
%
-events(Data,EventFun) when is_list(Data)->
- events(list_to_binary(Data),EventFun);
-events(Data,EventFun) when is_binary(Data)->
- events(fun() -> {Data, fun() -> done end} end,EventFun);
-events(DataFun,EventFun) ->
+events(Data, EventFun) when is_list(Data) ->
+ events(list_to_binary(Data), EventFun);
+events(Data, EventFun) when is_binary(Data) ->
+ events(fun() -> {Data, fun() -> done end} end, EventFun);
+events(DataFun, EventFun) ->
parse_one(DataFun, EventFun, <<>>).
% converts the JSON directly to the erlang represention of Json
@@ -62,7 +59,6 @@ to_ejson(DF) ->
[[EJson]] = make_ejson(EF(get_results), [[]]),
EJson.
-
% This function is used to return complete objects while parsing streams.
%
% Return this function from inside an event function right after getting an
@@ -98,228 +94,224 @@ to_ejson(DF) ->
collect_object(Ev, ReturnControl) ->
collect_object(Ev, 0, ReturnControl, [object_start]).
-
-
% internal methods
-parse_one(DF,EF,Acc) ->
+parse_one(DF, EF, Acc) ->
case toke(DF, Acc) of
- none ->
- none;
- {Token, DF2, Rest} ->
- case Token of
- "{" ->
- EF2 = EF(object_start),
- {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
- {DF3, EF3(object_end), Rest2};
- "[" ->
- EF2 = EF(array_start),
- {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
- {DF3, EF3(array_end), Rest2};
- Int when is_integer(Int)->
- {DF2, EF(Int), Rest};
- Float when is_float(Float)->
- {DF2, EF(Float), Rest};
- Atom when is_atom(Atom)->
- {DF2, EF(Atom), Rest};
- String when is_binary(String)->
- {DF2, EF(String), Rest};
- _OtherToken ->
- err(unexpected_token)
- end
+ none ->
+ none;
+ {Token, DF2, Rest} ->
+ case Token of
+ "{" ->
+ EF2 = EF(object_start),
+ {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
+ {DF3, EF3(object_end), Rest2};
+ "[" ->
+ EF2 = EF(array_start),
+ {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
+ {DF3, EF3(array_end), Rest2};
+ Int when is_integer(Int) ->
+ {DF2, EF(Int), Rest};
+ Float when is_float(Float) ->
+ {DF2, EF(Float), Rest};
+ Atom when is_atom(Atom) ->
+ {DF2, EF(Atom), Rest};
+ String when is_binary(String) ->
+ {DF2, EF(String), Rest};
+ _OtherToken ->
+ err(unexpected_token)
+ end
end.
-must_parse_one(DF,EF,Acc,Error)->
+must_parse_one(DF, EF, Acc, Error) ->
case parse_one(DF, EF, Acc) of
- none ->
- err(Error);
- Else ->
- Else
+ none ->
+ err(Error);
+ Else ->
+ Else
end.
must_toke(DF, Data, Error) ->
case toke(DF, Data) of
- none ->
- err(Error);
- Result ->
- Result
+ none ->
+ err(Error);
+ Result ->
+ Result
end.
toke(DF, <<>>) ->
case DF() of
- done ->
- none;
- {Data, DF2} ->
- toke(DF2, Data)
+ done ->
+ none;
+ {Data, DF2} ->
+ toke(DF2, Data)
end;
-toke(DF, <<C,Rest/binary>>) when ?IS_WS(C)->
+toke(DF, <<C, Rest/binary>>) when ?IS_WS(C) ->
toke(DF, Rest);
-toke(DF, <<${,Rest/binary>>) ->
+toke(DF, <<${, Rest/binary>>) ->
{"{", DF, Rest};
-toke(DF, <<$},Rest/binary>>) ->
+toke(DF, <<$}, Rest/binary>>) ->
{"}", DF, Rest};
-toke(DF, <<$[,Rest/binary>>) ->
+toke(DF, <<$[, Rest/binary>>) ->
{"[", DF, Rest};
-toke(DF, <<$],Rest/binary>>) ->
+toke(DF, <<$], Rest/binary>>) ->
{"]", DF, Rest};
-toke(DF, <<$",Rest/binary>>) ->
- toke_string(DF,Rest,[]);
-toke(DF, <<$,,Rest/binary>>) ->
+toke(DF, <<$", Rest/binary>>) ->
+ toke_string(DF, Rest, []);
+toke(DF, <<$,, Rest/binary>>) ->
{",", DF, Rest};
-toke(DF, <<$:,Rest/binary>>) ->
+toke(DF, <<$:, Rest/binary>>) ->
{":", DF, Rest};
-toke(DF, <<$-,Rest/binary>>) ->
- {<<C,_/binary>> = Data, DF2} = must_df(DF,1,Rest,expected_number),
+toke(DF, <<$-, Rest/binary>>) ->
+ {<<C, _/binary>> = Data, DF2} = must_df(DF, 1, Rest, expected_number),
case ?IS_DIGIT(C) of
- true ->
- toke_number_leading(DF2, Data, "-");
- false ->
- err(expected_number)
+ true ->
+ toke_number_leading(DF2, Data, "-");
+ false ->
+ err(expected_number)
end;
-toke(DF, <<C,_/binary>> = Data) when ?IS_DIGIT(C) ->
+toke(DF, <<C, _/binary>> = Data) when ?IS_DIGIT(C) ->
toke_number_leading(DF, Data, []);
-toke(DF, <<$t,Rest/binary>>) ->
+toke(DF, <<$t, Rest/binary>>) ->
{Data, DF2} = must_match(<<"rue">>, DF, Rest),
{true, DF2, Data};
-toke(DF, <<$f,Rest/binary>>) ->
+toke(DF, <<$f, Rest/binary>>) ->
{Data, DF2} = must_match(<<"alse">>, DF, Rest),
{false, DF2, Data};
-toke(DF, <<$n,Rest/binary>>) ->
+toke(DF, <<$n, Rest/binary>>) ->
{Data, DF2} = must_match(<<"ull">>, DF, Rest),
{null, DF2, Data};
toke(_, _) ->
err(bad_token).
-
must_match(Pattern, DF, Data) ->
Size = size(Pattern),
case must_df(DF, Size, Data, bad_token) of
- {<<Pattern:Size/binary,Data2/binary>>, DF2} ->
- {Data2, DF2};
- {_, _} ->
- err(bad_token)
+ {<<Pattern:Size/binary, Data2/binary>>, DF2} ->
+ {Data2, DF2};
+ {_, _} ->
+ err(bad_token)
end.
-must_df(DF,Error)->
+must_df(DF, Error) ->
case DF() of
- done ->
- err(Error);
- {Data, DF2} ->
- {Data, DF2}
- end.
-
-
-must_df(DF,NeedLen,Acc,Error)->
- if size(Acc) >= NeedLen ->
- {Acc, DF};
- true ->
- case DF() of
done ->
err(Error);
{Data, DF2} ->
- must_df(DF2, NeedLen, <<Acc/binary, Data/binary>>, Error)
- end
+ {Data, DF2}
end.
+must_df(DF, NeedLen, Acc, Error) ->
+ if
+ size(Acc) >= NeedLen ->
+ {Acc, DF};
+ true ->
+ case DF() of
+ done ->
+ err(Error);
+ {Data, DF2} ->
+ must_df(DF2, NeedLen, <<Acc/binary, Data/binary>>, Error)
+ end
+ end.
-parse_object(DF,EF,Acc) ->
+parse_object(DF, EF, Acc) ->
case must_toke(DF, Acc, unterminated_object) of
- {String, DF2, Rest} when is_binary(String)->
- EF2 = EF({key,String}),
- case must_toke(DF2,Rest,unterminated_object) of
- {":", DF3, Rest2} ->
- {DF4, EF3, Rest3} = must_parse_one(DF3, EF2, Rest2, expected_value),
- case must_toke(DF4,Rest3, unterminated_object) of
- {",", DF5, Rest4} ->
- parse_object(DF5, EF3, Rest4);
- {"}", DF5, Rest4} ->
- {DF5, EF3, Rest4};
- {_, _, _} ->
- err(unexpected_token)
+ {String, DF2, Rest} when is_binary(String) ->
+ EF2 = EF({key, String}),
+ case must_toke(DF2, Rest, unterminated_object) of
+ {":", DF3, Rest2} ->
+ {DF4, EF3, Rest3} = must_parse_one(DF3, EF2, Rest2, expected_value),
+ case must_toke(DF4, Rest3, unterminated_object) of
+ {",", DF5, Rest4} ->
+ parse_object(DF5, EF3, Rest4);
+ {"}", DF5, Rest4} ->
+ {DF5, EF3, Rest4};
+ {_, _, _} ->
+ err(unexpected_token)
+ end;
+ _Else ->
+ err(expected_colon)
end;
- _Else ->
- err(expected_colon)
- end;
- {"}", DF2, Rest} ->
- {DF2, EF, Rest};
- {_, _, _} ->
- err(unexpected_token)
- end.
-
-parse_array0(DF,EF,Acc) ->
- case toke(DF, Acc) of
- none ->
- err(unterminated_array);
- {",", DF2, Rest} ->
- parse_array(DF2,EF,Rest);
- {"]", DF2, Rest} ->
- {DF2,EF,Rest};
- _ ->
- err(unexpected_token)
+ {"}", DF2, Rest} ->
+ {DF2, EF, Rest};
+ {_, _, _} ->
+ err(unexpected_token)
end.
-parse_array(DF,EF,Acc) ->
+parse_array0(DF, EF, Acc) ->
case toke(DF, Acc) of
- none ->
- err(unterminated_array);
- {Token, DF2, Rest} ->
- case Token of
- "{" ->
- EF2 = EF(object_start),
- {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
- parse_array0(DF3, EF3(object_end), Rest2);
- "[" ->
- EF2 = EF(array_start),
- {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
- parse_array0(DF3, EF3(array_end), Rest2);
- Int when is_integer(Int)->
- parse_array0(DF2, EF(Int), Rest);
- Float when is_float(Float)->
- parse_array0(DF2, EF(Float), Rest);
- Atom when is_atom(Atom)->
- parse_array0(DF2, EF(Atom), Rest);
- String when is_binary(String)->
- parse_array0(DF2, EF(String), Rest);
- "]" ->
+ none ->
+ err(unterminated_array);
+ {",", DF2, Rest} ->
+ parse_array(DF2, EF, Rest);
+ {"]", DF2, Rest} ->
{DF2, EF, Rest};
_ ->
err(unexpected_token)
- end
end.
+parse_array(DF, EF, Acc) ->
+ case toke(DF, Acc) of
+ none ->
+ err(unterminated_array);
+ {Token, DF2, Rest} ->
+ case Token of
+ "{" ->
+ EF2 = EF(object_start),
+ {DF3, EF3, Rest2} = parse_object(DF2, EF2, Rest),
+ parse_array0(DF3, EF3(object_end), Rest2);
+ "[" ->
+ EF2 = EF(array_start),
+ {DF3, EF3, Rest2} = parse_array(DF2, EF2, Rest),
+ parse_array0(DF3, EF3(array_end), Rest2);
+ Int when is_integer(Int) ->
+ parse_array0(DF2, EF(Int), Rest);
+ Float when is_float(Float) ->
+ parse_array0(DF2, EF(Float), Rest);
+ Atom when is_atom(Atom) ->
+ parse_array0(DF2, EF(Atom), Rest);
+ String when is_binary(String) ->
+ parse_array0(DF2, EF(String), Rest);
+ "]" ->
+ {DF2, EF, Rest};
+ _ ->
+ err(unexpected_token)
+ end
+ end.
toke_string(DF, <<>>, Acc) ->
{Data, DF2} = must_df(DF, unterminated_string),
toke_string(DF2, Data, Acc);
-toke_string(DF, <<$\\,$",Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $", Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$" | Acc]);
-toke_string(DF, <<$\\,$\\,Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $\\, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\\ | Acc]);
-toke_string(DF, <<$\\,$/,Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $/, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$/ | Acc]);
-toke_string(DF, <<$\\,$b,Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $b, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\b | Acc]);
-toke_string(DF, <<$\\,$f,Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $f, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\f | Acc]);
-toke_string(DF, <<$\\,$n,Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $n, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\n | Acc]);
-toke_string(DF, <<$\\,$r,Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $r, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\r | Acc]);
-toke_string(DF, <<$\\,$t,Rest/binary>>, Acc) ->
+toke_string(DF, <<$\\, $t, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [$\t | Acc]);
-toke_string(DF, <<$\\,$u,Rest/binary>>, Acc) ->
- {<<A,B,C,D,Data/binary>>, DF2} = must_df(DF,4,Rest,missing_hex),
+toke_string(DF, <<$\\, $u, Rest/binary>>, Acc) ->
+ {<<A, B, C, D, Data/binary>>, DF2} = must_df(DF, 4, Rest, missing_hex),
UTFChar = erlang:list_to_integer([A, B, C, D], 16),
- if UTFChar == 16#FFFF orelse UTFChar == 16#FFFE ->
- err(invalid_utf_char);
- true ->
- ok
+ if
+ UTFChar == 16#FFFF orelse UTFChar == 16#FFFE ->
+ err(invalid_utf_char);
+ true ->
+ ok
end,
Chars = xmerl_ucs:to_utf8(UTFChar),
toke_string(DF2, Data, lists:reverse(Chars) ++ Acc);
toke_string(DF, <<$\\>>, Acc) ->
{Data, DF2} = must_df(DF, unterminated_string),
- toke_string(DF2, <<$\\,Data/binary>>, Acc);
+ toke_string(DF2, <<$\\, Data/binary>>, Acc);
toke_string(_DF, <<$\\, _/binary>>, _Acc) ->
err(bad_escape);
toke_string(DF, <<$", Rest/binary>>, Acc) ->
@@ -327,72 +319,74 @@ toke_string(DF, <<$", Rest/binary>>, Acc) ->
toke_string(DF, <<C, Rest/binary>>, Acc) ->
toke_string(DF, Rest, [C | Acc]).
-
-toke_number_leading(DF, <<Digit,Rest/binary>>, Acc)
- when ?IS_DIGIT(Digit) ->
+toke_number_leading(DF, <<Digit, Rest/binary>>, Acc) when
+ ?IS_DIGIT(Digit)
+->
toke_number_leading(DF, Rest, [Digit | Acc]);
-toke_number_leading(DF, <<C,_/binary>>=Rest, Acc)
- when ?IS_WS(C) orelse ?IS_DELIM(C) ->
+toke_number_leading(DF, <<C, _/binary>> = Rest, Acc) when
+ ?IS_WS(C) orelse ?IS_DELIM(C)
+->
{list_to_integer(lists:reverse(Acc)), DF, Rest};
toke_number_leading(DF, <<>>, Acc) ->
case DF() of
- done ->
- {list_to_integer(lists:reverse(Acc)), fun() -> done end, <<>>};
- {Data, DF2} ->
- toke_number_leading(DF2, Data, Acc)
+ done ->
+ {list_to_integer(lists:reverse(Acc)), fun() -> done end, <<>>};
+ {Data, DF2} ->
+ toke_number_leading(DF2, Data, Acc)
end;
toke_number_leading(DF, <<$., Rest/binary>>, Acc) ->
- toke_number_trailing(DF, Rest, [$.|Acc]);
+ toke_number_trailing(DF, Rest, [$. | Acc]);
toke_number_leading(DF, <<$e, Rest/binary>>, Acc) ->
- toke_number_exponent(DF, Rest, [$e, $0, $.|Acc]);
+ toke_number_exponent(DF, Rest, [$e, $0, $. | Acc]);
toke_number_leading(DF, <<$E, Rest/binary>>, Acc) ->
- toke_number_exponent(DF, Rest, [$e, $0, $.|Acc]);
+ toke_number_exponent(DF, Rest, [$e, $0, $. | Acc]);
toke_number_leading(_, _, _) ->
err(unexpected_character_in_number).
-toke_number_trailing(DF, <<Digit,Rest/binary>>, Acc)
- when ?IS_DIGIT(Digit) ->
+toke_number_trailing(DF, <<Digit, Rest/binary>>, Acc) when
+ ?IS_DIGIT(Digit)
+->
toke_number_trailing(DF, Rest, [Digit | Acc]);
-toke_number_trailing(DF, <<C,_/binary>>=Rest, Acc)
- when ?IS_WS(C) orelse ?IS_DELIM(C) ->
+toke_number_trailing(DF, <<C, _/binary>> = Rest, Acc) when
+ ?IS_WS(C) orelse ?IS_DELIM(C)
+->
{list_to_float(lists:reverse(Acc)), DF, Rest};
toke_number_trailing(DF, <<>>, Acc) ->
case DF() of
- done ->
- {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
- {Data, DF2} ->
- toke_number_trailing(DF2, Data, Acc)
+ done ->
+ {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
+ {Data, DF2} ->
+ toke_number_trailing(DF2, Data, Acc)
end;
-toke_number_trailing(DF, <<"e", Rest/binary>>, [C|_]=Acc) when C /= $. ->
- toke_number_exponent(DF, Rest, [$e|Acc]);
-toke_number_trailing(DF, <<"E", Rest/binary>>, [C|_]=Acc) when C /= $. ->
- toke_number_exponent(DF, Rest, [$e|Acc]);
+toke_number_trailing(DF, <<"e", Rest/binary>>, [C | _] = Acc) when C /= $. ->
+ toke_number_exponent(DF, Rest, [$e | Acc]);
+toke_number_trailing(DF, <<"E", Rest/binary>>, [C | _] = Acc) when C /= $. ->
+ toke_number_exponent(DF, Rest, [$e | Acc]);
toke_number_trailing(_, _, _) ->
err(unexpected_character_in_number).
-
-toke_number_exponent(DF, <<Digit,Rest/binary>>, Acc) when ?IS_DIGIT(Digit) ->
+toke_number_exponent(DF, <<Digit, Rest/binary>>, Acc) when ?IS_DIGIT(Digit) ->
toke_number_exponent(DF, Rest, [Digit | Acc]);
-toke_number_exponent(DF, <<Sign,Rest/binary>>, [$e|_]=Acc)
- when Sign == $+ orelse Sign == $- ->
+toke_number_exponent(DF, <<Sign, Rest/binary>>, [$e | _] = Acc) when
+ Sign == $+ orelse Sign == $-
+->
toke_number_exponent(DF, Rest, [Sign | Acc]);
-toke_number_exponent(DF, <<C,_/binary>>=Rest, Acc)
- when ?IS_WS(C) orelse ?IS_DELIM(C) ->
+toke_number_exponent(DF, <<C, _/binary>> = Rest, Acc) when
+ ?IS_WS(C) orelse ?IS_DELIM(C)
+->
{list_to_float(lists:reverse(Acc)), DF, Rest};
toke_number_exponent(DF, <<>>, Acc) ->
case DF() of
- done ->
- {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
- {Data, DF2} ->
- toke_number_exponent(DF2, Data, Acc)
+ done ->
+ {list_to_float(lists:reverse(Acc)), fun() -> done end, <<>>};
+ {Data, DF2} ->
+ toke_number_exponent(DF2, Data, Acc)
end;
toke_number_exponent(_, _, _) ->
- err(unexpected_character_in_number).
-
-
-err(Error)->
- throw({parse_error,Error}).
+ err(unexpected_character_in_number).
+err(Error) ->
+ throw({parse_error, Error}).
make_ejson([], Stack) ->
Stack;
@@ -404,8 +398,8 @@ make_ejson([object_start | RevEvs], [ObjValues, PrevValues | RestStack]) ->
make_ejson(RevEvs, [[{ObjValues} | PrevValues] | RestStack]);
make_ejson([object_end | RevEvs], Stack) ->
make_ejson(RevEvs, [[] | Stack]);
-make_ejson([{key, String} | RevEvs], [[PrevValue|RestObject] | RestStack] = _Stack) ->
- make_ejson(RevEvs, [[{String, PrevValue}|RestObject] | RestStack]);
+make_ejson([{key, String} | RevEvs], [[PrevValue | RestObject] | RestStack] = _Stack) ->
+ make_ejson(RevEvs, [[{String, PrevValue} | RestObject] | RestStack]);
make_ejson([Value | RevEvs], [Vals | RestStack] = _Stack) ->
make_ejson(RevEvs, [[Value | Vals] | RestStack]).
@@ -414,7 +408,6 @@ collect_events(get_results, Acc) ->
collect_events(Ev, Acc) ->
fun(NextEv) -> collect_events(NextEv, [Ev | Acc]) end.
-
collect_object(object_end, 0, ReturnControl, Acc) ->
[[Obj]] = make_ejson([object_end | Acc], [[]]),
ReturnControl(Obj);
diff --git a/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl b/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl
index ac4bb84f3..2d58f847e 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_attachments_too_large.erl
@@ -16,7 +16,6 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_replicator/src/couch_replicator.hrl").
-
setup(_) ->
Ctx = test_util:start_couch([couch_replicator]),
Source = create_db(),
@@ -24,47 +23,47 @@ setup(_) ->
Target = create_db(),
{Ctx, {Source, Target}}.
-
teardown(_, {Ctx, {Source, Target}}) ->
delete_db(Source),
delete_db(Target),
config:delete("couchdb", "max_attachment_size"),
ok = test_util:stop_couch(Ctx).
-
attachment_too_large_replication_test_() ->
Pairs = [{remote, remote}],
{
"Attachment size too large replication tests",
{
foreachx,
- fun setup/1, fun teardown/2,
+ fun setup/1,
+ fun teardown/2,
[{Pair, fun should_succeed/2} || Pair <- Pairs] ++
- [{Pair, fun should_fail/2} || Pair <- Pairs]
+ [{Pair, fun should_fail/2} || Pair <- Pairs]
}
}.
-
should_succeed({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject = {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, db_url(From, Source)},
+ {<<"target">>, db_url(To, Target)}
+ ]},
config:set("couchdb", "max_attachment_size", "1000", _Persist = false),
{ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
?_assertEqual(ok, couch_replicator_test_helper:compare_dbs(Source, Target)).
-
should_fail({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject = {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, db_url(From, Source)},
+ {<<"target">>, db_url(To, Target)}
+ ]},
config:set("couchdb", "max_attachment_size", "999", _Persist = false),
{ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
- ?_assertError({badmatch, {not_found, missing}},
- couch_replicator_test_helper:compare_dbs(Source, Target)).
-
+ ?_assertError(
+ {badmatch, {not_found, missing}},
+ couch_replicator_test_helper:compare_dbs(Source, Target)
+ ).
create_db() ->
DbName = ?tempdb(),
@@ -72,7 +71,6 @@ create_db() ->
ok = couch_db:close(Db),
DbName.
-
create_doc_with_attachment(DbName, DocId, AttSize) ->
{ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
Doc = #doc{id = DocId, atts = att(AttSize)},
@@ -80,22 +78,21 @@ create_doc_with_attachment(DbName, DocId, AttSize) ->
couch_db:close(Db),
ok.
-
att(Size) when is_integer(Size), Size >= 1 ->
- [couch_att:new([
- {name, <<"att">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, fun(_Bytes) ->
- << <<"x">> || _ <- lists:seq(1, Size) >>
- end}
- ])].
-
+ [
+ couch_att:new([
+ {name, <<"att">>},
+ {type, <<"app/binary">>},
+ {att_len, Size},
+ {data, fun(_Bytes) ->
+ <<<<"x">> || _ <- lists:seq(1, Size)>>
+ end}
+ ])
+ ].
delete_db(DbName) ->
ok = couch_server:delete(DbName, [?ADMIN_CTX]).
-
db_url(remote, DbName) ->
Addr = config:get("httpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(couch_httpd, port),
diff --git a/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl
index 997c84863..1c093d58c 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_compact_tests.erl
@@ -60,25 +60,29 @@ compact_test_() ->
"Compaction during replication tests",
{
foreachx,
- fun setup/1, fun teardown/2,
- [{Pair, fun should_populate_replicate_compact/2}
- || Pair <- Pairs]
+ fun setup/1,
+ fun teardown/2,
+ [
+ {Pair, fun should_populate_replicate_compact/2}
+ || Pair <- Pairs
+ ]
}
}.
-
should_populate_replicate_compact({From, To}, {_Ctx, {Source, Target}}) ->
{ok, RepPid, RepId} = replicate(Source, Target),
- {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [
- should_run_replication(RepPid, RepId, Source, Target),
- should_all_processes_be_alive(RepPid, Source, Target),
- should_populate_and_compact(RepPid, Source, Target, 50, 3),
- should_wait_target_in_sync(Source, Target),
- should_ensure_replication_still_running(RepPid, RepId, Source, Target),
- should_cancel_replication(RepId, RepPid),
- should_compare_databases(Source, Target)
- ]}}.
+ {
+ lists:flatten(io_lib:format("~p -> ~p", [From, To])),
+ {inorder, [
+ should_run_replication(RepPid, RepId, Source, Target),
+ should_all_processes_be_alive(RepPid, Source, Target),
+ should_populate_and_compact(RepPid, Source, Target, 50, 3),
+ should_wait_target_in_sync(Source, Target),
+ should_ensure_replication_still_running(RepPid, RepId, Source, Target),
+ should_cancel_replication(RepId, RepPid),
+ should_compare_databases(Source, Target)
+ ]}
+ }.
should_all_processes_be_alive(RepPid, Source, Target) ->
?_test(begin
@@ -96,18 +100,20 @@ should_ensure_replication_still_running(RepPid, RepId, Source, Target) ->
?_test(check_active_tasks(RepPid, RepId, Source, Target)).
check_active_tasks(RepPid, {BaseId, Ext} = _RepId, Src, Tgt) ->
- Source = case Src of
- {remote, NameSrc} ->
- <<(db_url(NameSrc))/binary, $/>>;
- _ ->
- Src
- end,
- Target = case Tgt of
- {remote, NameTgt} ->
- <<(db_url(NameTgt))/binary, $/>>;
- _ ->
- Tgt
- end,
+ Source =
+ case Src of
+ {remote, NameSrc} ->
+ <<(db_url(NameSrc))/binary, $/>>;
+ _ ->
+ Src
+ end,
+ Target =
+ case Tgt of
+ {remote, NameTgt} ->
+ <<(db_url(NameTgt))/binary, $/>>;
+ _ ->
+ Tgt
+ end,
FullRepId = ?l2b(BaseId ++ Ext),
Pid = ?l2b(pid_to_list(RepPid)),
RepTasks = wait_for_task_status(),
@@ -129,10 +135,12 @@ check_active_tasks(RepPid, {BaseId, Ext} = _RepId, Src, Tgt) ->
?assert(is_integer(Pending)).
replication_tasks() ->
- lists:filter(fun(P) ->
- couch_util:get_value(type, P) =:= replication
- end, couch_task_status:all()).
-
+ lists:filter(
+ fun(P) ->
+ couch_util:get_value(type, P) =:= replication
+ end,
+ couch_task_status:all()
+ ).
wait_for_task_status() ->
test_util:wait(fun() ->
@@ -151,66 +159,73 @@ should_cancel_replication(RepId, RepPid) ->
end).
should_populate_and_compact(RepPid, Source, Target, BatchSize, Rounds) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(begin
- {ok, SourceDb0} = reopen_db(Source),
- Writer = spawn_writer(SourceDb0),
- lists:foreach(
- fun(N) ->
- {ok, SourceDb} = reopen_db(Source),
- {ok, TargetDb} = reopen_db(Target),
- pause_writer(Writer),
-
- compact_db("source", SourceDb),
- ?assert(is_process_alive(RepPid)),
- ?assert(is_process_alive(couch_db:get_pid(SourceDb))),
- wait_for_compaction("source", SourceDb),
-
- compact_db("target", TargetDb),
- ?assert(is_process_alive(RepPid)),
- ?assert(is_process_alive(couch_db:get_pid(TargetDb))),
- wait_for_compaction("target", TargetDb),
-
- {ok, SourceDb2} = reopen_db(SourceDb),
- {ok, TargetDb2} = reopen_db(TargetDb),
-
- resume_writer(Writer),
- wait_writer(Writer, BatchSize * N),
-
- compact_db("source", SourceDb2),
- ?assert(is_process_alive(RepPid)),
- ?assert(is_process_alive(couch_db:get_pid(SourceDb2))),
- pause_writer(Writer),
- wait_for_compaction("source", SourceDb2),
- resume_writer(Writer),
-
- compact_db("target", TargetDb2),
- ?assert(is_process_alive(RepPid)),
- ?assert(is_process_alive(couch_db:get_pid(TargetDb2))),
- pause_writer(Writer),
- wait_for_compaction("target", TargetDb2),
- resume_writer(Writer)
- end, lists:seq(1, Rounds)),
- stop_writer(Writer)
- end)}.
+ {timeout, ?TIMEOUT_EUNIT,
+ ?_test(begin
+ {ok, SourceDb0} = reopen_db(Source),
+ Writer = spawn_writer(SourceDb0),
+ lists:foreach(
+ fun(N) ->
+ {ok, SourceDb} = reopen_db(Source),
+ {ok, TargetDb} = reopen_db(Target),
+ pause_writer(Writer),
+
+ compact_db("source", SourceDb),
+ ?assert(is_process_alive(RepPid)),
+ ?assert(is_process_alive(couch_db:get_pid(SourceDb))),
+ wait_for_compaction("source", SourceDb),
+
+ compact_db("target", TargetDb),
+ ?assert(is_process_alive(RepPid)),
+ ?assert(is_process_alive(couch_db:get_pid(TargetDb))),
+ wait_for_compaction("target", TargetDb),
+
+ {ok, SourceDb2} = reopen_db(SourceDb),
+ {ok, TargetDb2} = reopen_db(TargetDb),
+
+ resume_writer(Writer),
+ wait_writer(Writer, BatchSize * N),
+
+ compact_db("source", SourceDb2),
+ ?assert(is_process_alive(RepPid)),
+ ?assert(is_process_alive(couch_db:get_pid(SourceDb2))),
+ pause_writer(Writer),
+ wait_for_compaction("source", SourceDb2),
+ resume_writer(Writer),
+
+ compact_db("target", TargetDb2),
+ ?assert(is_process_alive(RepPid)),
+ ?assert(is_process_alive(couch_db:get_pid(TargetDb2))),
+ pause_writer(Writer),
+ wait_for_compaction("target", TargetDb2),
+ resume_writer(Writer)
+ end,
+ lists:seq(1, Rounds)
+ ),
+ stop_writer(Writer)
+ end)}.
should_wait_target_in_sync({remote, Source}, Target) ->
should_wait_target_in_sync(Source, Target);
should_wait_target_in_sync(Source, {remote, Target}) ->
should_wait_target_in_sync(Source, Target);
should_wait_target_in_sync(Source, Target) ->
- {timeout, ?TIMEOUT_EUNIT, ?_assert(begin
- {ok, SourceDb} = couch_db:open_int(Source, []),
- {ok, SourceInfo} = couch_db:get_db_info(SourceDb),
- ok = couch_db:close(SourceDb),
- SourceDocCount = couch_util:get_value(doc_count, SourceInfo),
- wait_target_in_sync_loop(SourceDocCount, Target, 300)
- end)}.
+ {timeout, ?TIMEOUT_EUNIT,
+ ?_assert(begin
+ {ok, SourceDb} = couch_db:open_int(Source, []),
+ {ok, SourceInfo} = couch_db:get_db_info(SourceDb),
+ ok = couch_db:close(SourceDb),
+ SourceDocCount = couch_util:get_value(doc_count, SourceInfo),
+ wait_target_in_sync_loop(SourceDocCount, Target, 300)
+ end)}.
wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, ?LINE},
- {reason, "Could not get source and target databases in sync"}]});
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Could not get source and target databases in sync"}
+ ]}
+ );
wait_target_in_sync_loop(DocCount, {remote, TargetName}, RetriesLeft) ->
wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft);
wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
@@ -231,33 +246,41 @@ should_compare_databases({remote, Source}, Target) ->
should_compare_databases(Source, {remote, Target}) ->
should_compare_databases(Source, Target);
should_compare_databases(Source, Target) ->
- {timeout, 35, ?_test(begin
- {ok, SourceDb} = couch_db:open_int(Source, []),
- {ok, TargetDb} = couch_db:open_int(Target, []),
- Fun = fun(FullDocInfo, Acc) ->
- {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
- {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
- DocId = couch_util:get_value(<<"_id">>, Props),
- DocTarget = case couch_db:open_doc(TargetDb, DocId) of
- {ok, DocT} ->
- DocT;
- Error ->
- erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, ?LINE},
- {reason, lists:concat(["Error opening document '",
- ?b2l(DocId), "' from target: ",
- couch_util:to_list(Error)])}]})
+ {timeout, 35,
+ ?_test(begin
+ {ok, SourceDb} = couch_db:open_int(Source, []),
+ {ok, TargetDb} = couch_db:open_int(Target, []),
+ Fun = fun(FullDocInfo, Acc) ->
+ {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
+ {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
+ DocId = couch_util:get_value(<<"_id">>, Props),
+ DocTarget =
+ case couch_db:open_doc(TargetDb, DocId) of
+ {ok, DocT} ->
+ DocT;
+ Error ->
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason,
+ lists:concat([
+ "Error opening document '",
+ ?b2l(DocId),
+ "' from target: ",
+ couch_util:to_list(Error)
+ ])}
+ ]}
+ )
+ end,
+ DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
+ ?assertEqual(DocJson, DocTargetJson),
+ {ok, Acc}
end,
- DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
- ?assertEqual(DocJson, DocTargetJson),
- {ok, Acc}
- end,
- {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []),
- ok = couch_db:close(SourceDb),
- ok = couch_db:close(TargetDb)
- end)}.
-
+ {ok, _} = couch_db:fold_docs(SourceDb, Fun, [], []),
+ ok = couch_db:close(SourceDb),
+ ok = couch_db:close(TargetDb)
+ end)}.
reopen_db({remote, Db}) ->
reopen_db(Db);
@@ -268,7 +291,6 @@ reopen_db(DbName) when is_binary(DbName) ->
reopen_db(Db) ->
reopen_db(couch_db:name(Db)).
-
compact_db(Type, Db0) ->
Name = couch_db:name(Db0),
{ok, Db} = couch_db:open_int(Name, []),
@@ -281,18 +303,35 @@ compact_db(Type, Db0) ->
ok;
{'DOWN', MonRef, process, CompactPid, Reason} ->
erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, ?LINE},
- {reason,
- lists:concat(["Error compacting ", Type, " database ",
- ?b2l(Name), ": ",
- couch_util:to_list(Reason)])}]})
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason,
+ lists:concat([
+ "Error compacting ",
+ Type,
+ " database ",
+ ?b2l(Name),
+ ": ",
+ couch_util:to_list(Reason)
+ ])}
+ ]}
+ )
after ?TIMEOUT ->
erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, ?LINE},
- {reason, lists:concat(["Compaction for ", Type, " database ",
- ?b2l(Name), " didn't finish"])}]})
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason,
+ lists:concat([
+ "Compaction for ",
+ Type,
+ " database ",
+ ?b2l(Name),
+ " didn't finish"
+ ])}
+ ]}
+ )
end,
ok = couch_db:close(Db).
@@ -304,31 +343,37 @@ wait_for_compaction(Type, Db) ->
ok;
{error, Reason} ->
erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, ?LINE},
- {reason, lists:concat(["Compaction of ", Type,
- " database failed with: ", Reason])}]})
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason,
+ lists:concat([
+ "Compaction of ",
+ Type,
+ " database failed with: ",
+ Reason
+ ])}
+ ]}
+ )
end.
replicate({remote, Db}, Target) ->
replicate(db_url(Db), Target);
-
replicate(Source, {remote, Db}) ->
replicate(Source, db_url(Db));
-
replicate(Source, Target) ->
- RepObject = {[
- {<<"source">>, Source},
- {<<"target">>, Target},
- {<<"continuous">>, true}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, Source},
+ {<<"target">>, Target},
+ {<<"continuous">>, true}
+ ]},
{ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER),
ok = couch_replicator_scheduler:add_job(Rep),
couch_replicator_scheduler:reschedule(),
Pid = get_pid(Rep#rep.id),
{ok, Pid, Rep#rep.id}.
-
wait_writer(Pid, NumDocs) ->
case get_writer_num_docs_written(Pid) of
N when N >= NumDocs ->
@@ -342,7 +387,6 @@ spawn_writer(Db) ->
Pid = spawn(fun() -> writer_loop(Db, Parent, 0) end),
Pid.
-
pause_writer(Pid) ->
Ref = make_ref(),
Pid ! {pause, Ref},
@@ -350,10 +394,13 @@ pause_writer(Pid) ->
{paused, Ref} ->
ok
after ?TIMEOUT_WRITER ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Failed to pause source database writer"}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Failed to pause source database writer"}
+ ]}
+ )
end.
resume_writer(Pid) ->
@@ -363,10 +410,13 @@ resume_writer(Pid) ->
{ok, Ref} ->
ok
after ?TIMEOUT_WRITER ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Failed to pause source database writer"}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Failed to pause source database writer"}
+ ]}
+ )
end.
get_writer_num_docs_written(Pid) ->
@@ -376,11 +426,15 @@ get_writer_num_docs_written(Pid) ->
{count, Ref, Count} ->
Count
after ?TIMEOUT_WRITER ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Timeout getting number of documents written"
- " from source database writer"}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason,
+ "Timeout getting number of documents written"
+ " from source database writer"}
+ ]}
+ )
end.
stop_writer(Pid) ->
@@ -393,38 +447,52 @@ stop_writer(Pid) ->
{'DOWN', MonRef, process, Pid, _Reason} ->
DocsWritten
after ?TIMEOUT ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Timeout stopping source database writer"}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout stopping source database writer"}
+ ]}
+ )
end
after ?TIMEOUT_WRITER ->
- erlang:error({assertion_failed,
- [{module, ?MODULE},
- {line, ?LINE},
- {reason, "Timeout stopping source database writer"}]})
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout stopping source database writer"}
+ ]}
+ )
end.
writer_loop(Db0, Parent, Counter) ->
DbName = couch_db:name(Db0),
{ok, Data} = file:read_file(?ATTFILE),
maybe_pause(Parent, Counter),
- Docs = lists:map(fun(I) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, ?l2b(integer_to_list(Counter + I))},
- {<<"value">>, Counter + I},
- {<<"_attachments">>, {[
- {<<"icon1.png">>, {[
- {<<"data">>, base64:encode(Data)},
- {<<"content_type">>, <<"image/png">>}
- ]}},
- {<<"icon2.png">>, {[
- {<<"data">>, base64:encode(iolist_to_binary([Data, Data]))},
- {<<"content_type">>, <<"image/png">>}
- ]}}
- ]}}
- ]})
- end, lists:seq(1, ?WRITE_BATCH_SIZE)),
+ Docs = lists:map(
+ fun(I) ->
+ couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, ?l2b(integer_to_list(Counter + I))},
+ {<<"value">>, Counter + I},
+ {<<"_attachments">>,
+ {[
+ {<<"icon1.png">>,
+ {[
+ {<<"data">>, base64:encode(Data)},
+ {<<"content_type">>, <<"image/png">>}
+ ]}},
+ {<<"icon2.png">>,
+ {[
+ {<<"data">>, base64:encode(iolist_to_binary([Data, Data]))},
+ {<<"content_type">>, <<"image/png">>}
+ ]}}
+ ]}}
+ ]}
+ )
+ end,
+ lists:seq(1, ?WRITE_BATCH_SIZE)
+ ),
maybe_pause(Parent, Counter),
{ok, Db} = couch_db:open_int(DbName, []),
{ok, _} = couch_db:update_docs(Db, Docs, []),
diff --git a/src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl
index e75cc5a63..7adbb6852 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_connection_tests.erl
@@ -17,7 +17,6 @@
-define(TIMEOUT, 1000).
-
setup() ->
Host = config:get("httpd", "bind_address", "127.0.0.1"),
Port = config:get("httpd", "port", "5984"),
@@ -26,16 +25,17 @@ setup() ->
teardown(_) ->
ok.
-
httpc_pool_test_() ->
{
"replicator connection sharing tests",
{
setup,
- fun() -> test_util:start_couch([couch_replicator]) end, fun test_util:stop_couch/1,
+ fun() -> test_util:start_couch([couch_replicator]) end,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun connections_shared_after_release/1,
fun connections_not_shared_after_owner_death/1,
@@ -50,7 +50,6 @@ httpc_pool_test_() ->
}
}.
-
connections_shared_after_release({Host, Port}) ->
?_test(begin
URL = "http://" ++ Host ++ ":" ++ Port,
@@ -66,7 +65,6 @@ connections_shared_after_release({Host, Port}) ->
end
end).
-
connections_not_shared_after_owner_death({Host, Port}) ->
?_test(begin
URL = "http://" ++ Host ++ ":" ++ Port,
@@ -80,14 +78,15 @@ connections_not_shared_after_owner_death({Host, Port}) ->
{ok, Pid2} = couch_replicator_connection:acquire(URL),
?assertNotEqual(Pid, Pid2),
MRef = monitor(process, Pid),
- receive {'DOWN', MRef, process, Pid, _Reason} ->
- ?assert(not is_process_alive(Pid));
- Other -> throw(Other)
+ receive
+ {'DOWN', MRef, process, Pid, _Reason} ->
+ ?assert(not is_process_alive(Pid));
+ Other ->
+ throw(Other)
end
end
end).
-
idle_connections_closed({Host, Port}) ->
?_test(begin
URL = "http://" ++ Host ++ ":" ++ Port,
@@ -103,7 +102,6 @@ idle_connections_closed({Host, Port}) ->
?assert(not ets:member(couch_replicator_connection, Pid))
end).
-
test_owner_monitors({Host, Port}) ->
?_test(begin
URL = "http://" ++ Host ++ ":" ++ Port,
@@ -111,21 +109,28 @@ test_owner_monitors({Host, Port}) ->
assert_monitors_equal([{process, self()}]),
couch_replicator_connection:release(Worker0),
assert_monitors_equal([]),
- {Workers, Monitors} = lists:foldl(fun(_, {WAcc, MAcc}) ->
- {ok, Worker1} = couch_replicator_connection:acquire(URL),
- MAcc1 = [{process, self()} | MAcc],
- assert_monitors_equal(MAcc1),
- {[Worker1 | WAcc], MAcc1}
- end, {[], []}, lists:seq(1,5)),
- lists:foldl(fun(Worker2, Acc) ->
- [_ | NewAcc] = Acc,
- couch_replicator_connection:release(Worker2),
- assert_monitors_equal(NewAcc),
- NewAcc
- end, Monitors, Workers)
+ {Workers, Monitors} = lists:foldl(
+ fun(_, {WAcc, MAcc}) ->
+ {ok, Worker1} = couch_replicator_connection:acquire(URL),
+ MAcc1 = [{process, self()} | MAcc],
+ assert_monitors_equal(MAcc1),
+ {[Worker1 | WAcc], MAcc1}
+ end,
+ {[], []},
+ lists:seq(1, 5)
+ ),
+ lists:foldl(
+ fun(Worker2, Acc) ->
+ [_ | NewAcc] = Acc,
+ couch_replicator_connection:release(Worker2),
+ assert_monitors_equal(NewAcc),
+ NewAcc
+ end,
+ Monitors,
+ Workers
+ )
end).
-
worker_discards_creds_on_create({Host, Port}) ->
?_test(begin
{User, Pass, B64Auth} = user_pass(),
@@ -136,89 +141,81 @@ worker_discards_creds_on_create({Host, Port}) ->
?assert(string:str(Internals, Pass) =:= 0)
end).
-
worker_discards_url_creds_after_request({Host, _}) ->
?_test(begin
- {User, Pass, B64Auth} = user_pass(),
- {Port, ServerPid} = server(),
- PortStr = integer_to_list(Port),
- URL = "http://" ++ User ++ ":" ++ Pass ++ "@" ++ Host ++ ":" ++ PortStr,
- {ok, WPid} = couch_replicator_connection:acquire(URL),
- ?assertMatch({ok, "200", _, _}, send_req(WPid, URL, [], [])),
- Internals = worker_internals(WPid),
- ?assert(string:str(Internals, B64Auth) =:= 0),
- ?assert(string:str(Internals, Pass) =:= 0),
- couch_replicator_connection:release(WPid),
- unlink(ServerPid),
- exit(ServerPid, kill)
+ {User, Pass, B64Auth} = user_pass(),
+ {Port, ServerPid} = server(),
+ PortStr = integer_to_list(Port),
+ URL = "http://" ++ User ++ ":" ++ Pass ++ "@" ++ Host ++ ":" ++ PortStr,
+ {ok, WPid} = couch_replicator_connection:acquire(URL),
+ ?assertMatch({ok, "200", _, _}, send_req(WPid, URL, [], [])),
+ Internals = worker_internals(WPid),
+ ?assert(string:str(Internals, B64Auth) =:= 0),
+ ?assert(string:str(Internals, Pass) =:= 0),
+ couch_replicator_connection:release(WPid),
+ unlink(ServerPid),
+ exit(ServerPid, kill)
end).
-
worker_discards_creds_in_headers_after_request({Host, _}) ->
?_test(begin
- {_User, Pass, B64Auth} = user_pass(),
- {Port, ServerPid} = server(),
- PortStr = integer_to_list(Port),
- URL = "http://" ++ Host ++ ":" ++ PortStr,
- {ok, WPid} = couch_replicator_connection:acquire(URL),
- Headers = [{"Authorization", "Basic " ++ B64Auth}],
- ?assertMatch({ok, "200", _, _}, send_req(WPid, URL, Headers, [])),
- Internals = worker_internals(WPid),
- ?assert(string:str(Internals, B64Auth) =:= 0),
- ?assert(string:str(Internals, Pass) =:= 0),
- couch_replicator_connection:release(WPid),
- unlink(ServerPid),
- exit(ServerPid, kill)
+ {_User, Pass, B64Auth} = user_pass(),
+ {Port, ServerPid} = server(),
+ PortStr = integer_to_list(Port),
+ URL = "http://" ++ Host ++ ":" ++ PortStr,
+ {ok, WPid} = couch_replicator_connection:acquire(URL),
+ Headers = [{"Authorization", "Basic " ++ B64Auth}],
+ ?assertMatch({ok, "200", _, _}, send_req(WPid, URL, Headers, [])),
+ Internals = worker_internals(WPid),
+ ?assert(string:str(Internals, B64Auth) =:= 0),
+ ?assert(string:str(Internals, Pass) =:= 0),
+ couch_replicator_connection:release(WPid),
+ unlink(ServerPid),
+ exit(ServerPid, kill)
end).
-
worker_discards_proxy_creds_after_request({Host, _}) ->
?_test(begin
- {User, Pass, B64Auth} = user_pass(),
- {Port, ServerPid} = server(),
- PortStr = integer_to_list(Port),
- URL = "http://" ++ Host ++ ":" ++ PortStr,
- {ok, WPid} = couch_replicator_connection:acquire(URL),
- Opts = [
- {proxy_host, Host},
- {proxy_port, Port},
- {proxy_user, User},
- {proxy_pass, Pass}
- ],
- ?assertMatch({ok, "200", _, _}, send_req(WPid, URL, [], Opts)),
- Internals = worker_internals(WPid),
- ?assert(string:str(Internals, B64Auth) =:= 0),
- ?assert(string:str(Internals, Pass) =:= 0),
- couch_replicator_connection:release(WPid),
- unlink(ServerPid),
- exit(ServerPid, kill)
+ {User, Pass, B64Auth} = user_pass(),
+ {Port, ServerPid} = server(),
+ PortStr = integer_to_list(Port),
+ URL = "http://" ++ Host ++ ":" ++ PortStr,
+ {ok, WPid} = couch_replicator_connection:acquire(URL),
+ Opts = [
+ {proxy_host, Host},
+ {proxy_port, Port},
+ {proxy_user, User},
+ {proxy_pass, Pass}
+ ],
+ ?assertMatch({ok, "200", _, _}, send_req(WPid, URL, [], Opts)),
+ Internals = worker_internals(WPid),
+ ?assert(string:str(Internals, B64Auth) =:= 0),
+ ?assert(string:str(Internals, Pass) =:= 0),
+ couch_replicator_connection:release(WPid),
+ unlink(ServerPid),
+ exit(ServerPid, kill)
end).
-
send_req(WPid, URL, Headers, Opts) ->
ibrowse:send_req_direct(WPid, URL, Headers, get, [], Opts).
-
user_pass() ->
User = "specialuser",
Pass = "averysecretpassword",
B64Auth = ibrowse_lib:encode_base64(User ++ ":" ++ Pass),
{User, Pass, B64Auth}.
-
worker_internals(Pid) ->
Dict = io_lib:format("~p", [erlang:process_info(Pid, dictionary)]),
State = io_lib:format("~p", [sys:get_state(Pid)]),
lists:flatten([Dict, State]).
-
server() ->
{ok, LSock} = gen_tcp:listen(0, [{recbuf, 256}, {active, false}]),
{ok, LPort} = inet:port(LSock),
SPid = spawn_link(fun() -> server_responder(LSock) end),
{LPort, SPid}.
-
server_responder(LSock) ->
{ok, Sock} = gen_tcp:accept(LSock),
case gen_tcp:recv(Sock, 0) of
@@ -234,7 +231,6 @@ server_responder(LSock) ->
end,
server_responder(LSock).
-
assert_monitors_equal(ShouldBe) ->
sys:get_status(couch_replicator_connection),
{monitors, Monitors} = process_info(whereis(couch_replicator_connection), monitors),
diff --git a/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl
index dae4cd122..8adcd25bd 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_create_target_with_options_tests.erl
@@ -22,23 +22,22 @@
setup() ->
Ctx = test_util:start_couch([fabric, mem3, couch_replicator, chttpd]),
Hashed = couch_passwords:hash_admin_password(?PASSWORD),
- ok = config:set("admins", ?USERNAME, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USERNAME, ?b2l(Hashed), _Persist = false),
Source = ?tempdb(),
Target = ?tempdb(),
{Ctx, {Source, Target}}.
-
teardown({Ctx, {_Source, _Target}}) ->
config:delete("admins", ?USERNAME),
ok = test_util:stop_couch(Ctx).
-
create_target_with_options_replication_test_() ->
{
"Create target with range partitions tests",
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_create_target_with_q_4/1,
fun should_create_target_with_q_2_n_1/1,
@@ -48,14 +47,14 @@ create_target_with_options_replication_test_() ->
}
}.
-
should_create_target_with_q_4({_Ctx, {Source, Target}}) ->
- RepObject = {[
- {<<"source">>, db_url(Source)},
- {<<"target">>, db_url(Target)},
- {<<"create_target">>, true},
- {<<"create_target_params">>, {[{<<"q">>, <<"4">>}]}}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, db_url(Source)},
+ {<<"target">>, db_url(Target)},
+ {<<"create_target">>, true},
+ {<<"create_target_params">>, {[{<<"q">>, <<"4">>}]}}
+ ]},
create_db(Source),
create_doc(Source),
{ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
@@ -66,15 +65,14 @@ should_create_target_with_q_4({_Ctx, {Source, Target}}) ->
delete_db(Target),
?_assertEqual(4, couch_util:get_value(q, ClusterInfo)).
-
should_create_target_with_q_2_n_1({_Ctx, {Source, Target}}) ->
- RepObject = {[
- {<<"source">>, db_url(Source)},
- {<<"target">>, db_url(Target)},
- {<<"create_target">>, true},
- {<<"create_target_params">>,
- {[{<<"q">>, <<"2">>}, {<<"n">>, <<"1">>}]}}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, db_url(Source)},
+ {<<"target">>, db_url(Target)},
+ {<<"create_target">>, true},
+ {<<"create_target_params">>, {[{<<"q">>, <<"2">>}, {<<"n">>, <<"1">>}]}}
+ ]},
create_db(Source),
create_doc(Source),
{ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
@@ -88,13 +86,13 @@ should_create_target_with_q_2_n_1({_Ctx, {Source, Target}}) ->
?_assertEqual(1, couch_util:get_value(n, ClusterInfo))
].
-
should_create_target_with_default({_Ctx, {Source, Target}}) ->
- RepObject = {[
- {<<"source">>, db_url(Source)},
- {<<"target">>, db_url(Target)},
- {<<"create_target">>, true}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, db_url(Source)},
+ {<<"target">>, db_url(Target)},
+ {<<"create_target">>, true}
+ ]},
create_db(Source),
create_doc(Source),
{ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
@@ -106,14 +104,14 @@ should_create_target_with_default({_Ctx, {Source, Target}}) ->
delete_db(Target),
?_assertEqual(Q, couch_util:get_value(q, ClusterInfo)).
-
should_not_create_target_with_q_any({_Ctx, {Source, Target}}) ->
- RepObject = {[
- {<<"source">>, db_url(Source)},
- {<<"target">>, db_url(Target)},
- {<<"create_target">>, false},
- {<<"create_target_params">>, {[{<<"q">>, <<"1">>}]}}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, db_url(Source)},
+ {<<"target">>, db_url(Target)},
+ {<<"create_target">>, false},
+ {<<"create_target_params">>, {[{<<"q">>, <<"1">>}]}}
+ ]},
create_db(Source),
create_doc(Source),
{error, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
@@ -121,23 +119,26 @@ should_not_create_target_with_q_any({_Ctx, {Source, Target}}) ->
delete_db(Source),
?_assertEqual(false, DbExist).
-
create_doc(DbName) ->
Body = {[{<<"foo">>, <<"bar">>}]},
NewDoc = #doc{body = Body},
{ok, _} = fabric:update_doc(DbName, NewDoc, [?ADMIN_CTX]).
-
create_db(DbName) ->
ok = fabric:create_db(DbName, [?ADMIN_CTX]).
-
delete_db(DbName) ->
ok = fabric:delete_db(DbName, [?ADMIN_CTX]).
-
db_url(DbName) ->
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
- ?l2b(io_lib:format("http://~s:~s@~s:~b/~s", [?USERNAME, ?PASSWORD, Addr,
- Port, DbName])).
+ ?l2b(
+ io_lib:format("http://~s:~s@~s:~b/~s", [
+ ?USERNAME,
+ ?PASSWORD,
+ Addr,
+ Port,
+ DbName
+ ])
+ ).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_error_reporting_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_error_reporting_tests.erl
index 6b4f95c25..7778bd77d 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_error_reporting_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_error_reporting_tests.erl
@@ -16,29 +16,24 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_replicator/src/couch_replicator.hrl").
-
setup_all() ->
test_util:start_couch([couch_replicator, chttpd, mem3, fabric]).
-
teardown_all(Ctx) ->
ok = test_util:stop_couch(Ctx).
-
setup() ->
meck:unload(),
Source = setup_db(),
Target = setup_db(),
{Source, Target}.
-
teardown({Source, Target}) ->
meck:unload(),
teardown_db(Source),
teardown_db(Target),
ok.
-
error_reporting_test_() ->
{
setup,
@@ -59,7 +54,6 @@ error_reporting_test_() ->
}
}.
-
t_fail_bulk_docs({Source, Target}) ->
?_test(begin
populate_db(Source, 1, 5),
@@ -76,7 +70,6 @@ t_fail_bulk_docs({Source, Target}) ->
couch_replicator_notifier:stop(Listener)
end).
-
t_fail_changes_reader({Source, Target}) ->
?_test(begin
populate_db(Source, 1, 5),
@@ -93,7 +86,6 @@ t_fail_changes_reader({Source, Target}) ->
couch_replicator_notifier:stop(Listener)
end).
-
t_fail_revs_diff({Source, Target}) ->
?_test(begin
populate_db(Source, 1, 5),
@@ -110,7 +102,6 @@ t_fail_revs_diff({Source, Target}) ->
couch_replicator_notifier:stop(Listener)
end).
-
t_fail_changes_queue({Source, Target}) ->
?_test(begin
populate_db(Source, 1, 5),
@@ -130,7 +121,6 @@ t_fail_changes_queue({Source, Target}) ->
couch_replicator_notifier:stop(Listener)
end).
-
t_fail_changes_manager({Source, Target}) ->
?_test(begin
populate_db(Source, 1, 5),
@@ -150,7 +140,6 @@ t_fail_changes_manager({Source, Target}) ->
couch_replicator_notifier:stop(Listener)
end).
-
t_fail_changes_reader_proc({Source, Target}) ->
?_test(begin
populate_db(Source, 1, 5),
@@ -170,9 +159,10 @@ t_fail_changes_reader_proc({Source, Target}) ->
couch_replicator_notifier:stop(Listener)
end).
-
mock_fail_req(Path, Return) ->
- meck:expect(ibrowse, send_req_direct,
+ meck:expect(
+ ibrowse,
+ send_req_direct,
fun(W, Url, Headers, Meth, Body, Opts, TOut) ->
Args = [W, Url, Headers, Meth, Body, Opts, TOut],
{ok, {_, _, _, _, UPath, _}} = http_uri:parse(Url),
@@ -180,18 +170,19 @@ mock_fail_req(Path, Return) ->
true -> Return;
false -> meck:passthrough(Args)
end
- end).
-
+ end
+ ).
rep_result_listener(RepId) ->
ReplyTo = self(),
{ok, _Listener} = couch_replicator_notifier:start_link(
- fun({_, RepId2, _} = Ev) when RepId2 =:= RepId ->
+ fun
+ ({_, RepId2, _} = Ev) when RepId2 =:= RepId ->
ReplyTo ! Ev;
(_) ->
ok
- end).
-
+ end
+ ).
wait_rep_result(RepId) ->
receive
@@ -199,19 +190,15 @@ wait_rep_result(RepId) ->
{error, RepId, Reason} -> {error, Reason}
end.
-
-
setup_db() ->
DbName = ?tempdb(),
{ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
ok = couch_db:close(Db),
DbName.
-
teardown_db(DbName) ->
ok = couch_server:delete(DbName, [?ADMIN_CTX]).
-
populate_db(DbName, Start, End) ->
{ok, Db} = couch_db:open_int(DbName, []),
Docs = lists:foldl(
@@ -220,11 +207,12 @@ populate_db(DbName, Start, End) ->
Doc = #doc{id = Id, body = {[]}},
[Doc | Acc]
end,
- [], lists:seq(Start, End)),
+ [],
+ lists:seq(Start, End)
+ ),
{ok, _} = couch_db:update_docs(Db, Docs, []),
ok = couch_db:close(Db).
-
wait_target_in_sync(Source, Target) ->
{ok, SourceDb} = couch_db:open_int(Source, []),
{ok, SourceInfo} = couch_db:get_db_info(SourceDb),
@@ -232,13 +220,14 @@ wait_target_in_sync(Source, Target) ->
SourceDocCount = couch_util:get_value(doc_count, SourceInfo),
wait_target_in_sync_loop(SourceDocCount, Target, 300).
-
wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
- erlang:error({assertion_failed, [
- {module, ?MODULE}, {line, ?LINE},
- {reason, "Could not get source and target databases in sync"}
- ]});
-
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Could not get source and target databases in sync"}
+ ]}
+ );
wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
{ok, Target} = couch_db:open_int(TargetName, []),
{ok, TargetInfo} = couch_db:get_db_info(Target),
@@ -252,19 +241,19 @@ wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1)
end.
-
replicate(Source, Target) ->
SrcUrl = couch_replicator_test_helper:db_url(Source),
TgtUrl = couch_replicator_test_helper:db_url(Target),
- RepObject = {[
- {<<"source">>, SrcUrl},
- {<<"target">>, TgtUrl},
- {<<"continuous">>, true},
- {<<"worker_processes">>, 1},
- {<<"retries_per_request">>, 1},
- % Low connection timeout so _changes feed gets restarted quicker
- {<<"connection_timeout">>, 3000}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, SrcUrl},
+ {<<"target">>, TgtUrl},
+ {<<"continuous">>, true},
+ {<<"worker_processes">>, 1},
+ {<<"retries_per_request">>, 1},
+ % Low connection timeout so _changes feed gets restarted quicker
+ {<<"connection_timeout">>, 3000}
+ ]},
{ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER),
ok = couch_replicator_scheduler:add_job(Rep),
couch_replicator_scheduler:reschedule(),
diff --git a/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl
index 7ac9a4d71..41f1772e6 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_filtered_tests.erl
@@ -16,35 +16,43 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_replicator/src/couch_replicator.hrl").
--define(DDOC, {[
- {<<"_id">>, <<"_design/filter_ddoc">>},
- {<<"filters">>, {[
- {<<"testfilter">>, <<"
- function(doc, req){if (doc.class == 'mammal') return true;}
- ">>},
- {<<"queryfilter">>, <<"
- function(doc, req) {
- if (doc.class && req.query.starts) {
- return doc.class.indexOf(req.query.starts) === 0;
- }
- else {
- return false;
- }
- }
- ">>}
- ]}},
- {<<"views">>, {[
- {<<"mammals">>, {[
- {<<"map">>, <<"
- function(doc) {
- if (doc.class == 'mammal') {
- emit(doc._id, null);
- }
- }
- ">>}
- ]}}
- ]}}
-]}).
+-define(DDOC,
+ {[
+ {<<"_id">>, <<"_design/filter_ddoc">>},
+ {<<"filters">>,
+ {[
+ {<<"testfilter">>,
+ <<"\n"
+ " function(doc, req){if (doc.class == 'mammal') return true;}\n"
+ " ">>},
+ {<<"queryfilter">>,
+ <<"\n"
+ " function(doc, req) {\n"
+ " if (doc.class && req.query.starts) {\n"
+ " return doc.class.indexOf(req.query.starts) === 0;\n"
+ " }\n"
+ " else {\n"
+ " return false;\n"
+ " }\n"
+ " }\n"
+ " ">>}
+ ]}},
+ {<<"views">>,
+ {[
+ {<<"mammals">>,
+ {[
+ {<<"map">>,
+ <<"\n"
+ " function(doc) {\n"
+ " if (doc.class == 'mammal') {\n"
+ " emit(doc._id, null);\n"
+ " }\n"
+ " }\n"
+ " ">>}
+ ]}}
+ ]}}
+ ]}
+).
setup(_) ->
Ctx = test_util:start_couch([couch_replicator]),
@@ -65,7 +73,8 @@ filtered_replication_test_() ->
"Filtered replication tests",
{
foreachx,
- fun setup/1, fun teardown/2,
+ fun setup/1,
+ fun teardown/2,
[{Pair, fun should_succeed/2} || Pair <- Pairs]
}
}.
@@ -76,7 +85,8 @@ query_filtered_replication_test_() ->
"Filtered with query replication tests",
{
foreachx,
- fun setup/1, fun teardown/2,
+ fun setup/1,
+ fun teardown/2,
[{Pair, fun should_succeed_with_query/2} || Pair <- Pairs]
}
}.
@@ -87,17 +97,19 @@ view_filtered_replication_test_() ->
"Filtered with a view replication tests",
{
foreachx,
- fun setup/1, fun teardown/2,
+ fun setup/1,
+ fun teardown/2,
[{Pair, fun should_succeed_with_view/2} || Pair <- Pairs]
}
}.
should_succeed({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject = {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)},
- {<<"filter">>, <<"filter_ddoc/testfilter">>}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, db_url(From, Source)},
+ {<<"target">>, db_url(To, Target)},
+ {<<"filter">>, <<"filter_ddoc/testfilter">>}
+ ]},
{ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
%% FilteredFun is an Erlang version of following JS function
%% function(doc, req){if (doc.class == 'mammal') return true;}
@@ -107,22 +119,24 @@ should_succeed({From, To}, {_Ctx, {Source, Target}}) ->
{ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun),
{lists:flatten(io_lib:format("~p -> ~p", [From, To])), [
{"Target DB has proper number of docs",
- ?_assertEqual(1, proplists:get_value(doc_count, TargetDbInfo))},
+ ?_assertEqual(1, proplists:get_value(doc_count, TargetDbInfo))},
{"Target DB doesn't have deleted docs",
- ?_assertEqual(0, proplists:get_value(doc_del_count, TargetDbInfo))},
+ ?_assertEqual(0, proplists:get_value(doc_del_count, TargetDbInfo))},
{"All the docs filtered as expected",
- ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))}
+ ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))}
]}.
should_succeed_with_query({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject = {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)},
- {<<"filter">>, <<"filter_ddoc/queryfilter">>},
- {<<"query_params">>, {[
- {<<"starts">>, <<"a">>}
- ]}}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, db_url(From, Source)},
+ {<<"target">>, db_url(To, Target)},
+ {<<"filter">>, <<"filter_ddoc/queryfilter">>},
+ {<<"query_params">>,
+ {[
+ {<<"starts">>, <<"a">>}
+ ]}}
+ ]},
{ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
FilterFun = fun(_DocId, {Props}) ->
case couch_util:get_value(<<"class">>, Props) of
@@ -133,22 +147,24 @@ should_succeed_with_query({From, To}, {_Ctx, {Source, Target}}) ->
{ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun),
{lists:flatten(io_lib:format("~p -> ~p", [From, To])), [
{"Target DB has proper number of docs",
- ?_assertEqual(2, proplists:get_value(doc_count, TargetDbInfo))},
+ ?_assertEqual(2, proplists:get_value(doc_count, TargetDbInfo))},
{"Target DB doesn't have deleted docs",
- ?_assertEqual(0, proplists:get_value(doc_del_count, TargetDbInfo))},
+ ?_assertEqual(0, proplists:get_value(doc_del_count, TargetDbInfo))},
{"All the docs filtered as expected",
- ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))}
+ ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))}
]}.
should_succeed_with_view({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject = {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)},
- {<<"filter">>, <<"_view">>},
- {<<"query_params">>, {[
- {<<"view">>, <<"filter_ddoc/mammals">>}
- ]}}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, db_url(From, Source)},
+ {<<"target">>, db_url(To, Target)},
+ {<<"filter">>, <<"_view">>},
+ {<<"query_params">>,
+ {[
+ {<<"view">>, <<"filter_ddoc/mammals">>}
+ ]}}
+ ]},
{ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
FilterFun = fun(_DocId, {Props}) ->
couch_util:get_value(<<"class">>, Props) == <<"mammal">>
@@ -156,11 +172,11 @@ should_succeed_with_view({From, To}, {_Ctx, {Source, Target}}) ->
{ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun),
{lists:flatten(io_lib:format("~p -> ~p", [From, To])), [
{"Target DB has proper number of docs",
- ?_assertEqual(1, proplists:get_value(doc_count, TargetDbInfo))},
+ ?_assertEqual(1, proplists:get_value(doc_count, TargetDbInfo))},
{"Target DB doesn't have deleted docs",
- ?_assertEqual(0, proplists:get_value(doc_del_count, TargetDbInfo))},
+ ?_assertEqual(0, proplists:get_value(doc_del_count, TargetDbInfo))},
{"All the docs filtered as expected",
- ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))}
+ ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))}
]}.
compare_dbs(Source, Target, FilterFun) ->
@@ -173,10 +189,10 @@ compare_dbs(Source, Target, FilterFun) ->
case FilterFun(DocId, SourceDoc) of
true ->
ValidReply = {ok, DocId, SourceDoc} == TargetReply,
- {ok, [ValidReply|Acc]};
+ {ok, [ValidReply | Acc]};
false ->
ValidReply = {not_found, missing} == TargetReply,
- {ok, [ValidReply|Acc]}
+ {ok, [ValidReply | Acc]}
end
end,
{ok, AllReplies} = couch_db:fold_docs(SourceDb, Fun, [], []),
@@ -203,30 +219,34 @@ create_db() ->
create_docs(DbName) ->
{ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
DDoc = couch_doc:from_json_obj(?DDOC),
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>},
- {<<"class">>, <<"mammal">>},
- {<<"value">>, 1}
-
- ]}),
- Doc2 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc2">>},
- {<<"class">>, <<"amphibians">>},
- {<<"value">>, 2}
-
- ]}),
- Doc3 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc3">>},
- {<<"class">>, <<"reptiles">>},
- {<<"value">>, 3}
-
- ]}),
- Doc4 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc4">>},
- {<<"class">>, <<"arthropods">>},
- {<<"value">>, 2}
-
- ]}),
+ Doc1 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc1">>},
+ {<<"class">>, <<"mammal">>},
+ {<<"value">>, 1}
+ ]}
+ ),
+ Doc2 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc2">>},
+ {<<"class">>, <<"amphibians">>},
+ {<<"value">>, 2}
+ ]}
+ ),
+ Doc3 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc3">>},
+ {<<"class">>, <<"reptiles">>},
+ {<<"value">>, 3}
+ ]}
+ ),
+ Doc4 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc4">>},
+ {<<"class">>, <<"arthropods">>},
+ {<<"value">>, 2}
+ ]}
+ ),
{ok, _} = couch_db:update_docs(Db, [DDoc, Doc1, Doc2, Doc3, Doc4]),
couch_db:close(Db).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl
index c4ad4e9b6..31f1da48e 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_httpc_pool_tests.erl
@@ -17,23 +17,23 @@
-define(TIMEOUT, 1000).
-
setup() ->
spawn_pool().
teardown(Pool) ->
stop_pool(Pool).
-
httpc_pool_test_() ->
{
"httpc pool tests",
{
setup,
- fun() -> test_util:start_couch([couch_replicator]) end, fun test_util:stop_couch/1,
+ fun() -> test_util:start_couch([couch_replicator]) end,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun should_block_new_clients_when_full/1,
fun should_replace_worker_on_death/1
@@ -42,7 +42,6 @@ httpc_pool_test_() ->
}
}.
-
should_block_new_clients_when_full(Pool) ->
?_test(begin
Client1 = spawn_client(Pool),
@@ -77,7 +76,9 @@ should_block_new_clients_when_full(Pool) ->
lists:foreach(
fun(C) ->
?assertEqual(ok, stop_client(C))
- end, [Client2, Client3, Client4])
+ end,
+ [Client2, Client3, Client4]
+ )
end).
should_replace_worker_on_death(Pool) ->
@@ -100,7 +101,6 @@ should_replace_worker_on_death(Pool) ->
?assertEqual(ok, stop_client(Client2))
end).
-
spawn_client(Pool) ->
Parent = self(),
Ref = make_ref(),
@@ -126,9 +126,12 @@ get_client_worker({Pid, Ref}, ClientName) ->
Worker
after ?TIMEOUT ->
erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, ?LINE},
- {reason, "Timeout getting client " ++ ClientName ++ " worker"}]})
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Timeout getting client " ++ ClientName ++ " worker"}
+ ]}
+ )
end.
stop_client({Pid, Ref}) ->
@@ -155,7 +158,7 @@ loop(Parent, Ref, Worker, Pool) ->
ping ->
Parent ! {pong, Ref},
loop(Parent, Ref, Worker, Pool);
- get_worker ->
+ get_worker ->
Parent ! {worker, Ref, Worker},
loop(Parent, Ref, Worker, Pool);
stop ->
@@ -167,7 +170,8 @@ spawn_pool() ->
Host = config:get("httpd", "bind_address", "127.0.0.1"),
Port = config:get("httpd", "port", "5984"),
{ok, Pool} = couch_replicator_httpc_pool:start_link(
- "http://" ++ Host ++ ":" ++ Port, [{max_connections, 3}]),
+ "http://" ++ Host ++ ":" ++ Port, [{max_connections, 3}]
+ ),
Pool.
stop_pool(Pool) ->
diff --git a/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl
index a4696c4b8..9ed415a29 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_id_too_long_tests.erl
@@ -16,7 +16,6 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_replicator/src/couch_replicator.hrl").
-
setup(_) ->
Ctx = test_util:start_couch([couch_replicator]),
Source = create_db(),
@@ -24,47 +23,47 @@ setup(_) ->
Target = create_db(),
{Ctx, {Source, Target}}.
-
teardown(_, {Ctx, {Source, Target}}) ->
delete_db(Source),
delete_db(Target),
config:set("replicator", "max_document_id_length", "infinity"),
ok = test_util:stop_couch(Ctx).
-
id_too_long_replication_test_() ->
Pairs = [{remote, remote}],
{
"Doc id too long tests",
{
foreachx,
- fun setup/1, fun teardown/2,
+ fun setup/1,
+ fun teardown/2,
[{Pair, fun should_succeed/2} || Pair <- Pairs] ++
- [{Pair, fun should_fail/2} || Pair <- Pairs]
+ [{Pair, fun should_fail/2} || Pair <- Pairs]
}
}.
-
should_succeed({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject = {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, db_url(From, Source)},
+ {<<"target">>, db_url(To, Target)}
+ ]},
config:set("replicator", "max_document_id_length", "5"),
{ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
?_assertEqual(ok, couch_replicator_test_helper:compare_dbs(Source, Target)).
-
should_fail({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject = {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, db_url(From, Source)},
+ {<<"target">>, db_url(To, Target)}
+ ]},
config:set("replicator", "max_document_id_length", "4"),
{ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
- ?_assertError({badmatch, {not_found, missing}},
- couch_replicator_test_helper:compare_dbs(Source, Target)).
-
+ ?_assertError(
+ {badmatch, {not_found, missing}},
+ couch_replicator_test_helper:compare_dbs(Source, Target)
+ ).
create_db() ->
DbName = ?tempdb(),
@@ -72,18 +71,15 @@ create_db() ->
ok = couch_db:close(Db),
DbName.
-
create_doc(DbName) ->
{ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
Doc = couch_doc:from_json_obj({[{<<"_id">>, <<"12345">>}]}),
{ok, _} = couch_db:update_doc(Db, Doc, []),
couch_db:close(Db).
-
delete_db(DbName) ->
ok = couch_server:delete(DbName, [?ADMIN_CTX]).
-
db_url(remote, DbName) ->
Addr = config:get("httpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(couch_httpd, port),
diff --git a/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl
index 27c89a0cd..2f0e2a1f0 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_large_atts_tests.erl
@@ -26,7 +26,6 @@
-define(DOCS_COUNT, 11).
-define(TIMEOUT_EUNIT, 120).
-
setup() ->
DbName = ?tempdb(),
{ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
@@ -61,18 +60,24 @@ large_atts_test_() ->
"Replicate docs with large attachments",
{
foreachx,
- fun setup/1, fun teardown/2,
- [{Pair, fun should_populate_replicate_compact/2}
- || Pair <- Pairs]
+ fun setup/1,
+ fun teardown/2,
+ [
+ {Pair, fun should_populate_replicate_compact/2}
+ || Pair <- Pairs
+ ]
}
}.
-
should_populate_replicate_compact({From, To}, {_Ctx, {Source, Target}}) ->
- {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [should_populate_source(Source),
- should_replicate(Source, Target),
- should_compare_databases(Source, Target)]}}.
+ {
+ lists:flatten(io_lib:format("~p -> ~p", [From, To])),
+ {inorder, [
+ should_populate_source(Source),
+ should_replicate(Source, Target),
+ should_compare_databases(Source, Target)
+ ]}
+ }.
should_populate_source({remote, Source}) ->
should_populate_source(Source);
@@ -93,7 +98,6 @@ should_compare_databases(Source, {remote, Target}) ->
should_compare_databases(Source, Target) ->
{timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}.
-
populate_db(DbName, DocCount) ->
{ok, Db} = couch_db:open_int(DbName, []),
Docs = lists:foldl(
@@ -108,7 +112,9 @@ populate_db(DbName, DocCount) ->
},
[Doc | Acc]
end,
- [], lists:seq(1, DocCount)),
+ [],
+ lists:seq(1, DocCount)
+ ),
{ok, _} = couch_db:update_docs(Db, Docs, []),
couch_db:close(Db).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl
index c7933b472..86daa808f 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_many_leaves_tests.erl
@@ -38,7 +38,6 @@ setup() ->
ok = couch_db:close(Db),
DbName.
-
setup(remote) ->
{remote, setup()};
setup({A, B}) ->
@@ -65,23 +64,27 @@ docs_with_many_leaves_test_() ->
"Replicate documents with many leaves",
{
foreachx,
- fun setup/1, fun teardown/2,
- [{Pair, fun should_populate_replicate_compact/2}
- || Pair <- Pairs]
+ fun setup/1,
+ fun teardown/2,
+ [
+ {Pair, fun should_populate_replicate_compact/2}
+ || Pair <- Pairs
+ ]
}
}.
-
should_populate_replicate_compact({From, To}, {_Ctx, {Source, Target}}) ->
- {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [
- should_populate_source(Source),
- should_replicate(Source, Target),
- should_verify_target(Source, Target),
- should_add_attachments_to_source(Source),
- should_replicate(Source, Target),
- should_verify_target(Source, Target)
- ]}}.
+ {
+ lists:flatten(io_lib:format("~p -> ~p", [From, To])),
+ {inorder, [
+ should_populate_source(Source),
+ should_replicate(Source, Target),
+ should_verify_target(Source, Target),
+ should_add_attachments_to_source(Source),
+ should_replicate(Source, Target),
+ should_verify_target(Source, Target)
+ ]}
+ }.
should_populate_source({remote, Source}) ->
should_populate_source(Source);
@@ -100,35 +103,39 @@ should_verify_target({remote, Source}, Target) ->
should_verify_target(Source, {remote, Target}) ->
should_verify_target(Source, Target);
should_verify_target(Source, Target) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(begin
- {ok, SourceDb} = couch_db:open_int(Source, []),
- {ok, TargetDb} = couch_db:open_int(Target, []),
- verify_target(SourceDb, TargetDb, ?DOCS_CONFLICTS),
- ok = couch_db:close(SourceDb),
- ok = couch_db:close(TargetDb)
- end)}.
+ {timeout, ?TIMEOUT_EUNIT,
+ ?_test(begin
+ {ok, SourceDb} = couch_db:open_int(Source, []),
+ {ok, TargetDb} = couch_db:open_int(Target, []),
+ verify_target(SourceDb, TargetDb, ?DOCS_CONFLICTS),
+ ok = couch_db:close(SourceDb),
+ ok = couch_db:close(TargetDb)
+ end)}.
should_add_attachments_to_source({remote, Source}) ->
should_add_attachments_to_source(Source);
should_add_attachments_to_source(Source) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(begin
- {ok, SourceDb} = couch_db:open_int(Source, [?ADMIN_CTX]),
- add_attachments(SourceDb, ?NUM_ATTS, ?DOCS_CONFLICTS),
- ok = couch_db:close(SourceDb)
- end)}.
+ {timeout, ?TIMEOUT_EUNIT,
+ ?_test(begin
+ {ok, SourceDb} = couch_db:open_int(Source, [?ADMIN_CTX]),
+ add_attachments(SourceDb, ?NUM_ATTS, ?DOCS_CONFLICTS),
+ ok = couch_db:close(SourceDb)
+ end)}.
populate_db(DbName) ->
{ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]),
lists:foreach(
- fun({DocId, NumConflicts}) ->
+ fun({DocId, NumConflicts}) ->
Value = <<"0">>,
Doc = #doc{
id = DocId,
- body = {[ {<<"value">>, Value} ]}
+ body = {[{<<"value">>, Value}]}
},
{ok, _} = couch_db:update_doc(Db, Doc, [?ADMIN_CTX]),
{ok, _} = add_doc_siblings(Db, DocId, NumConflicts)
- end, ?DOCS_CONFLICTS),
+ end,
+ ?DOCS_CONFLICTS
+ ),
couch_db:close(Db).
add_doc_siblings(Db, DocId, NumLeaves) when NumLeaves > 0 ->
@@ -137,17 +144,21 @@ add_doc_siblings(Db, DocId, NumLeaves) when NumLeaves > 0 ->
add_doc_siblings(Db, _DocId, 0, AccDocs, AccRevs) ->
{ok, []} = couch_db:update_docs(Db, AccDocs, [], replicated_changes),
{ok, AccRevs};
-
add_doc_siblings(Db, DocId, NumLeaves, AccDocs, AccRevs) ->
Value = ?l2b(?i2l(NumLeaves)),
Rev = couch_hash:md5_hash(Value),
Doc = #doc{
id = DocId,
revs = {1, [Rev]},
- body = {[ {<<"value">>, Value} ]}
+ body = {[{<<"value">>, Value}]}
},
- add_doc_siblings(Db, DocId, NumLeaves - 1,
- [Doc | AccDocs], [{1, Rev} | AccRevs]).
+ add_doc_siblings(
+ Db,
+ DocId,
+ NumLeaves - 1,
+ [Doc | AccDocs],
+ [{1, Rev} | AccRevs]
+ ).
verify_target(_SourceDb, _TargetDb, []) ->
ok;
@@ -156,12 +167,14 @@ verify_target(SourceDb, TargetDb, [{DocId, NumConflicts} | Rest]) ->
SourceDb,
DocId,
all,
- [conflicts, deleted_conflicts]),
+ [conflicts, deleted_conflicts]
+ ),
{ok, TargetLookups} = couch_db:open_doc_revs(
TargetDb,
DocId,
all,
- [conflicts, deleted_conflicts]),
+ [conflicts, deleted_conflicts]
+ ),
SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
TargetDocs = [Doc || {ok, Doc} <- TargetLookups],
Total = NumConflicts + 1,
@@ -172,34 +185,45 @@ verify_target(SourceDb, TargetDb, [{DocId, NumConflicts} | Rest]) ->
TargetJson = couch_doc:to_json_obj(TargetDoc, [attachments]),
?assertEqual(SourceJson, TargetJson)
end,
- lists:zip(SourceDocs, TargetDocs)),
+ lists:zip(SourceDocs, TargetDocs)
+ ),
verify_target(SourceDb, TargetDb, Rest).
-add_attachments(_SourceDb, _NumAtts, []) ->
+add_attachments(_SourceDb, _NumAtts, []) ->
ok;
-add_attachments(SourceDb, NumAtts, [{DocId, NumConflicts} | Rest]) ->
+add_attachments(SourceDb, NumAtts, [{DocId, NumConflicts} | Rest]) ->
{ok, SourceLookups} = couch_db:open_doc_revs(SourceDb, DocId, all, []),
SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
Total = NumConflicts + 1,
?assertEqual(Total, length(SourceDocs)),
NewDocs = lists:foldl(
fun(#doc{atts = Atts, revs = {Pos, [Rev | _]}} = Doc, Acc) ->
- NewAtts = lists:foldl(fun(I, AttAcc) ->
- AttData = crypto:strong_rand_bytes(100),
- NewAtt = couch_att:new([
- {name, ?io2b(["att_", ?i2l(I), "_",
- couch_doc:rev_to_str({Pos, Rev})])},
- {type, <<"application/foobar">>},
- {att_len, byte_size(AttData)},
- {data, AttData}
- ]),
- [NewAtt | AttAcc]
- end, [], lists:seq(1, NumAtts)),
+ NewAtts = lists:foldl(
+ fun(I, AttAcc) ->
+ AttData = crypto:strong_rand_bytes(100),
+ NewAtt = couch_att:new([
+ {name,
+ ?io2b([
+ "att_",
+ ?i2l(I),
+ "_",
+ couch_doc:rev_to_str({Pos, Rev})
+ ])},
+ {type, <<"application/foobar">>},
+ {att_len, byte_size(AttData)},
+ {data, AttData}
+ ]),
+ [NewAtt | AttAcc]
+ end,
+ [],
+ lists:seq(1, NumAtts)
+ ),
[Doc#doc{atts = Atts ++ NewAtts} | Acc]
end,
- [], SourceDocs),
+ [],
+ SourceDocs
+ ),
{ok, UpdateResults} = couch_db:update_docs(SourceDb, NewDocs, []),
NewRevs = [R || {ok, R} <- UpdateResults],
?assertEqual(length(NewDocs), length(NewRevs)),
add_attachments(SourceDb, NumAtts, Rest).
-
diff --git a/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl
index ff08b5ee5..ff3b5ee98 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_missing_stubs_tests.erl
@@ -56,24 +56,28 @@ missing_stubs_test_() ->
"Replicate docs with missing stubs (COUCHDB-1365)",
{
foreachx,
- fun setup/1, fun teardown/2,
- [{Pair, fun should_replicate_docs_with_missed_att_stubs/2}
- || Pair <- Pairs]
+ fun setup/1,
+ fun teardown/2,
+ [
+ {Pair, fun should_replicate_docs_with_missed_att_stubs/2}
+ || Pair <- Pairs
+ ]
}
}.
-
should_replicate_docs_with_missed_att_stubs({From, To}, {_Ctx, {Source, Target}}) ->
- {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [
- should_populate_source(Source),
- should_set_target_revs_limit(Target, ?REVS_LIMIT),
- should_replicate(Source, Target),
- should_compare_databases(Source, Target),
- should_update_source_docs(Source, ?REVS_LIMIT * 2),
- should_replicate(Source, Target),
- should_compare_databases(Source, Target)
- ]}}.
+ {
+ lists:flatten(io_lib:format("~p -> ~p", [From, To])),
+ {inorder, [
+ should_populate_source(Source),
+ should_set_target_revs_limit(Target, ?REVS_LIMIT),
+ should_replicate(Source, Target),
+ should_compare_databases(Source, Target),
+ should_update_source_docs(Source, ?REVS_LIMIT * 2),
+ should_replicate(Source, Target),
+ should_compare_databases(Source, Target)
+ ]}
+ }.
should_populate_source({remote, Source}) ->
should_populate_source(Source);
@@ -108,7 +112,6 @@ should_update_source_docs({remote, Source}, Times) ->
should_update_source_docs(Source, Times) ->
{timeout, ?TIMEOUT_EUNIT, ?_test(update_db_docs(Source, Times))}.
-
populate_db(DbName) ->
{ok, Db} = couch_db:open_int(DbName, []),
AttData = crypto:strong_rand_bytes(6000),
@@ -120,7 +123,7 @@ populate_db(DbName) ->
{type, <<"application/foobar">>},
{att_len, byte_size(AttData)},
{data, AttData}
- ])
+ ])
]
},
{ok, _} = couch_db:update_doc(Db, Doc, []),
@@ -132,7 +135,8 @@ update_db_docs(DbName, Times) ->
Db,
fun(FDI, Acc) -> db_fold_fun(FDI, Acc) end,
{DbName, Times},
- []),
+ []
+ ),
ok = couch_db:close(Db).
db_fold_fun(FullDocInfo, {DbName, Times}) ->
@@ -149,6 +153,7 @@ db_fold_fun(FullDocInfo, {DbName, Times}) ->
NewRev
end,
{element(1, Doc#doc.revs), hd(element(2, Doc#doc.revs))},
- lists:seq(1, Times)),
+ lists:seq(1, Times)
+ ),
ok = couch_db:close(Db),
{ok, {DbName, Times}}.
diff --git a/src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl
index da46b8a26..ca1816b33 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_proxy_tests.erl
@@ -16,24 +16,23 @@
-include_lib("couch_replicator/src/couch_replicator.hrl").
-include_lib("couch_replicator/include/couch_replicator_api_wrap.hrl").
-
setup() ->
ok.
-
teardown(_) ->
ok.
-
replicator_proxy_test_() ->
{
"replicator proxy tests",
{
setup,
- fun() -> test_util:start_couch([couch_replicator]) end, fun test_util:stop_couch/1,
+ fun() -> test_util:start_couch([couch_replicator]) end,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun parse_rep_doc_without_proxy/1,
fun parse_rep_doc_with_proxy/1,
@@ -45,72 +44,80 @@ replicator_proxy_test_() ->
}
}.
-
parse_rep_doc_without_proxy(_) ->
?_test(begin
- NoProxyDoc = {[
- {<<"source">>, <<"http://unproxied.com">>},
- {<<"target">>, <<"http://otherunproxied.com">>}
- ]},
+ NoProxyDoc =
+ {[
+ {<<"source">>, <<"http://unproxied.com">>},
+ {<<"target">>, <<"http://otherunproxied.com">>}
+ ]},
Rep = couch_replicator_docs:parse_rep_doc(NoProxyDoc),
?assertEqual((Rep#rep.source)#httpdb.proxy_url, undefined),
?assertEqual((Rep#rep.target)#httpdb.proxy_url, undefined)
end).
-
parse_rep_doc_with_proxy(_) ->
?_test(begin
ProxyURL = <<"http://myproxy.com">>,
- ProxyDoc = {[
- {<<"source">>, <<"http://unproxied.com">>},
- {<<"target">>, <<"http://otherunproxied.com">>},
- {<<"proxy">>, ProxyURL}
- ]},
+ ProxyDoc =
+ {[
+ {<<"source">>, <<"http://unproxied.com">>},
+ {<<"target">>, <<"http://otherunproxied.com">>},
+ {<<"proxy">>, ProxyURL}
+ ]},
Rep = couch_replicator_docs:parse_rep_doc(ProxyDoc),
?assertEqual((Rep#rep.source)#httpdb.proxy_url, binary_to_list(ProxyURL)),
?assertEqual((Rep#rep.target)#httpdb.proxy_url, binary_to_list(ProxyURL))
end).
-
parse_rep_source_target_proxy(_) ->
?_test(begin
SrcProxyURL = <<"http://mysrcproxy.com">>,
TgtProxyURL = <<"http://mytgtproxy.com:9999">>,
- ProxyDoc = {[
- {<<"source">>, <<"http://unproxied.com">>},
- {<<"target">>, <<"http://otherunproxied.com">>},
- {<<"source_proxy">>, SrcProxyURL},
- {<<"target_proxy">>, TgtProxyURL}
- ]},
+ ProxyDoc =
+ {[
+ {<<"source">>, <<"http://unproxied.com">>},
+ {<<"target">>, <<"http://otherunproxied.com">>},
+ {<<"source_proxy">>, SrcProxyURL},
+ {<<"target_proxy">>, TgtProxyURL}
+ ]},
Rep = couch_replicator_docs:parse_rep_doc(ProxyDoc),
- ?assertEqual((Rep#rep.source)#httpdb.proxy_url,
- binary_to_list(SrcProxyURL)),
- ?assertEqual((Rep#rep.target)#httpdb.proxy_url,
- binary_to_list(TgtProxyURL))
+ ?assertEqual(
+ (Rep#rep.source)#httpdb.proxy_url,
+ binary_to_list(SrcProxyURL)
+ ),
+ ?assertEqual(
+ (Rep#rep.target)#httpdb.proxy_url,
+ binary_to_list(TgtProxyURL)
+ )
end).
-
mutually_exclusive_proxy_and_source_proxy(_) ->
?_test(begin
- ProxyDoc = {[
- {<<"source">>, <<"http://unproxied.com">>},
- {<<"target">>, <<"http://otherunproxied.com">>},
- {<<"proxy">>, <<"oldstyleproxy.local">>},
- {<<"source_proxy">>, <<"sourceproxy.local">>}
- ]},
- ?assertThrow({bad_rep_doc, _},
- couch_replicator_docs:parse_rep_doc(ProxyDoc))
+ ProxyDoc =
+ {[
+ {<<"source">>, <<"http://unproxied.com">>},
+ {<<"target">>, <<"http://otherunproxied.com">>},
+ {<<"proxy">>, <<"oldstyleproxy.local">>},
+ {<<"source_proxy">>, <<"sourceproxy.local">>}
+ ]},
+ ?assertThrow(
+ {bad_rep_doc, _},
+ couch_replicator_docs:parse_rep_doc(ProxyDoc)
+ )
end).
-
mutually_exclusive_proxy_and_target_proxy(_) ->
?_test(begin
- ProxyDoc = {[
- {<<"source">>, <<"http://unproxied.com">>},
- {<<"target">>, <<"http://otherunproxied.com">>},
- {<<"proxy">>, <<"oldstyleproxy.local">>},
- {<<"target_proxy">>, <<"targetproxy.local">>}
- ]},
- ?assertThrow({bad_rep_doc, _},
- couch_replicator_docs:parse_rep_doc(ProxyDoc))
+ ProxyDoc =
+ {[
+ {<<"source">>, <<"http://unproxied.com">>},
+ {<<"target">>, <<"http://otherunproxied.com">>},
+ {<<"proxy">>, <<"oldstyleproxy.local">>},
+ {<<"target_proxy">>, <<"targetproxy.local">>}
+ ]},
+ ?assertThrow(
+ {bad_rep_doc, _},
+ couch_replicator_docs:parse_rep_doc(ProxyDoc)
+ )
end).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl
index 034550aec..a214d4607 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_rate_limiter_tests.erl
@@ -2,7 +2,6 @@
-include_lib("couch/include/couch_eunit.hrl").
-
rate_limiter_test_() ->
{
foreach,
@@ -18,19 +17,16 @@ rate_limiter_test_() ->
]
}.
-
t_new_key() ->
?_test(begin
?assertEqual(0, couch_replicator_rate_limiter:interval({"foo", get}))
end).
-
t_1_failure() ->
?_test(begin
?assertEqual(24, couch_replicator_rate_limiter:failure({"foo", get}))
end).
-
t_2_failures() ->
?_test(begin
couch_replicator_rate_limiter:failure({"foo", get}),
@@ -39,7 +35,6 @@ t_2_failures() ->
?assertEqual(29, Interval)
end).
-
t_2_failures_back_to_back() ->
?_test(begin
couch_replicator_rate_limiter:failure({"foo", get}),
@@ -47,7 +42,6 @@ t_2_failures_back_to_back() ->
?assertEqual(24, Interval)
end).
-
t_success_threshold() ->
?_test(begin
Interval = couch_replicator_rate_limiter:success({"foo", get}),
@@ -56,7 +50,6 @@ t_success_threshold() ->
?assertEqual(0, Interval)
end).
-
t_1_failure_2_successes() ->
?_test(begin
couch_replicator_rate_limiter:failure({"foo", get}),
@@ -68,16 +61,13 @@ t_1_failure_2_successes() ->
?assertEqual(0, Succ2)
end).
-
low_pass_filter_delay() ->
timer:sleep(100).
-
setup() ->
{ok, Pid} = couch_replicator_rate_limiter:start_link(),
Pid.
-
teardown(Pid) ->
Ref = erlang:monitor(process, Pid),
unlink(Pid),
diff --git a/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl b/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl
index a9a0fc943..ba29a3c68 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_retain_stats_between_job_runs.erl
@@ -19,27 +19,22 @@
-define(DELAY, 500).
-define(TIMEOUT, 60000).
-
setup_all() ->
test_util:start_couch([couch_replicator, chttpd, mem3, fabric]).
-
teardown_all(Ctx) ->
ok = test_util:stop_couch(Ctx).
-
setup() ->
Source = setup_db(),
Target = setup_db(),
{Source, Target}.
-
teardown({Source, Target}) ->
teardown_db(Source),
teardown_db(Target),
ok.
-
stats_retained_test_() ->
{
setup,
@@ -56,7 +51,6 @@ stats_retained_test_() ->
}
}.
-
t_stats_retained_by_scheduler({Source, Target}) ->
?_test(begin
{ok, _} = add_vdu(Target),
@@ -76,13 +70,13 @@ t_stats_retained_by_scheduler({Source, Target}) ->
couch_replicator_scheduler:remove_job(RepId)
end).
-
t_stats_retained_on_job_removal({Source, Target}) ->
?_test(begin
{ok, _} = add_vdu(Target),
populate_db_reject_even_docs(Source, 1, 10),
{ok, _, RepId} = replicate(Source, Target),
- wait_target_in_sync(6, Target), % 5 + 1 vdu
+ % 5 + 1 vdu
+ wait_target_in_sync(6, Target),
check_active_tasks(10, 5, 5),
check_scheduler_jobs(10, 5, 5),
@@ -91,7 +85,8 @@ t_stats_retained_on_job_removal({Source, Target}) ->
populate_db_reject_even_docs(Source, 11, 20),
{ok, _, RepId} = replicate(Source, Target),
- wait_target_in_sync(11, Target), % 6 + 5
+ % 6 + 5
+ wait_target_in_sync(11, Target),
check_scheduler_jobs(20, 10, 10),
check_active_tasks(20, 10, 10),
@@ -100,7 +95,8 @@ t_stats_retained_on_job_removal({Source, Target}) ->
populate_db_reject_even_docs(Source, 21, 30),
{ok, _, RepId} = replicate(Source, Target),
- wait_target_in_sync(16, Target), % 11 + 5
+ % 11 + 5
+ wait_target_in_sync(16, Target),
check_scheduler_jobs(30, 15, 15),
check_active_tasks(30, 15, 15),
@@ -108,19 +104,16 @@ t_stats_retained_on_job_removal({Source, Target}) ->
couch_replicator_scheduler:remove_job(RepId)
end).
-
setup_db() ->
DbName = ?tempdb(),
{ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
ok = couch_db:close(Db),
DbName.
-
teardown_db(DbName) ->
ok = couch_server:delete(DbName, [?ADMIN_CTX]),
ok.
-
stop_job(RepPid) ->
Ref = erlang:monitor(process, RepPid),
gen_server:cast(couch_replicator_scheduler, {set_max_jobs, 0}),
@@ -131,20 +124,22 @@ stop_job(RepPid) ->
erlang:error(timeout)
end.
-
start_job() ->
gen_server:cast(couch_replicator_scheduler, {set_max_jobs, 500}),
couch_replicator_scheduler:reschedule().
-
check_active_tasks(DocsRead, DocsWritten, DocsFailed) ->
RepTask = wait_for_task_status(DocsWritten),
?assertNotEqual(timeout, RepTask),
?assertEqual(DocsRead, couch_util:get_value(docs_read, RepTask)),
?assertEqual(DocsWritten, couch_util:get_value(docs_written, RepTask)),
- ?assertEqual(DocsFailed, couch_util:get_value(doc_write_failures,
- RepTask)).
-
+ ?assertEqual(
+ DocsFailed,
+ couch_util:get_value(
+ doc_write_failures,
+ RepTask
+ )
+ ).
check_scheduler_jobs(DocsRead, DocsWritten, DocFailed) ->
Info = wait_scheduler_info(DocsWritten),
@@ -160,17 +155,19 @@ check_scheduler_jobs(DocsRead, DocsWritten, DocFailed) ->
?assertMatch(#{<<"docs_written">> := DocsWritten}, Info),
?assertMatch(#{<<"doc_write_failures">> := DocFailed}, Info).
-
replication_tasks() ->
- lists:filter(fun(P) ->
- couch_util:get_value(type, P) =:= replication
- end, couch_task_status:all()).
-
+ lists:filter(
+ fun(P) ->
+ couch_util:get_value(type, P) =:= replication
+ end,
+ couch_task_status:all()
+ ).
wait_for_task_status(DocsWritten) ->
test_util:wait(fun() ->
case replication_tasks() of
- [] -> wait;
+ [] ->
+ wait;
[RepTask] ->
case couch_util:get_value(docs_written, RepTask) of
DocsWritten -> RepTask;
@@ -179,12 +176,13 @@ wait_for_task_status(DocsWritten) ->
end
end).
-
wait_scheduler_info(DocsWritten) ->
test_util:wait(fun() ->
case scheduler_jobs() of
- [] -> wait;
- [#{<<"info">> := null}] -> wait;
+ [] ->
+ wait;
+ [#{<<"info">> := null}] ->
+ wait;
[#{<<"info">> := Info}] ->
case maps:get(<<"docs_written">>, Info, undefined) of
DocsWritten -> Info;
@@ -193,7 +191,6 @@ wait_scheduler_info(DocsWritten) ->
end
end).
-
populate_db_reject_even_docs(DbName, Start, End) ->
BodyFun = fun(Id) ->
case Id rem 2 == 0 of
@@ -203,7 +200,6 @@ populate_db_reject_even_docs(DbName, Start, End) ->
end,
populate_db(DbName, Start, End, BodyFun).
-
populate_db(DbName, Start, End, BodyFun) when is_function(BodyFun, 1) ->
{ok, Db} = couch_db:open_int(DbName, []),
Docs = lists:foldl(
@@ -212,27 +208,29 @@ populate_db(DbName, Start, End, BodyFun) when is_function(BodyFun, 1) ->
Doc = #doc{id = Id, body = BodyFun(DocIdCounter)},
[Doc | Acc]
end,
- [], lists:seq(Start, End)),
+ [],
+ lists:seq(Start, End)
+ ),
{ok, _} = couch_db:update_docs(Db, Docs, []),
ok = couch_db:close(Db).
-
wait_target_in_sync(DocCount, Target) when is_integer(DocCount) ->
wait_target_in_sync_loop(DocCount, Target, 300).
-
wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
- erlang:error({assertion_failed, [
- {module, ?MODULE}, {line, ?LINE},
- {reason, "Could not get source and target databases in sync"}
- ]});
-
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason, "Could not get source and target databases in sync"}
+ ]}
+ );
wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
{ok, Target} = couch_db:open_int(TargetName, []),
{ok, TargetInfo} = couch_db:get_db_info(Target),
ok = couch_db:close(Target),
TargetDocCount = couch_util:get_value(doc_count, TargetInfo),
- case TargetDocCount == DocCount of
+ case TargetDocCount == DocCount of
true ->
true;
false ->
@@ -240,22 +238,21 @@ wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1)
end.
-
replicate(Source, Target) ->
SrcUrl = couch_replicator_test_helper:db_url(Source),
TgtUrl = couch_replicator_test_helper:db_url(Target),
- RepObject = {[
- {<<"source">>, SrcUrl},
- {<<"target">>, TgtUrl},
- {<<"continuous">>, true}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, SrcUrl},
+ {<<"target">>, TgtUrl},
+ {<<"continuous">>, true}
+ ]},
{ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER),
ok = couch_replicator_scheduler:add_job(Rep),
couch_replicator_scheduler:reschedule(),
Pid = couch_replicator_test_helper:get_pid(Rep#rep.id),
{ok, Pid, Rep#rep.id}.
-
scheduler_jobs() ->
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
@@ -264,16 +261,14 @@ scheduler_jobs() ->
Json = jiffy:decode(Body, [return_maps]),
maps:get(<<"jobs">>, Json).
-
vdu() ->
- <<"function(newDoc, oldDoc, userCtx) {
- if(newDoc.nope === true) {
- throw({forbidden: 'nope'});
- } else {
- return;
- }
- }">>.
-
+ <<"function(newDoc, oldDoc, userCtx) {\n"
+ " if(newDoc.nope === true) {\n"
+ " throw({forbidden: 'nope'});\n"
+ " } else {\n"
+ " return;\n"
+ " }\n"
+ " }">>.
add_vdu(DbName) ->
DocProps = [
diff --git a/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl
index 5026c1435..8f61a638c 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_selector_tests.erl
@@ -16,7 +16,6 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_replicator/src/couch_replicator.hrl").
-
setup(_) ->
Ctx = test_util:start_couch([couch_replicator]),
Source = create_db(),
@@ -36,17 +35,19 @@ selector_replication_test_() ->
"Selector filtered replication tests",
{
foreachx,
- fun setup/1, fun teardown/2,
+ fun setup/1,
+ fun teardown/2,
[{Pair, fun should_succeed/2} || Pair <- Pairs]
}
}.
should_succeed({From, To}, {_Ctx, {Source, Target}}) ->
- RepObject = {[
- {<<"source">>, db_url(From, Source)},
- {<<"target">>, db_url(To, Target)},
- {<<"selector">>, {[{<<"_id">>, <<"doc2">>}]}}
- ]},
+ RepObject =
+ {[
+ {<<"source">>, db_url(From, Source)},
+ {<<"target">>, db_url(To, Target)},
+ {<<"selector">>, {[{<<"_id">>, <<"doc2">>}]}}
+ ]},
{ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER),
%% FilteredFun is an Erlang version of following mango selector
FilterFun = fun(_DocId, {Props}) ->
@@ -55,9 +56,9 @@ should_succeed({From, To}, {_Ctx, {Source, Target}}) ->
{ok, TargetDbInfo, AllReplies} = compare_dbs(Source, Target, FilterFun),
{lists:flatten(io_lib:format("~p -> ~p", [From, To])), [
{"Target DB has proper number of docs",
- ?_assertEqual(1, proplists:get_value(doc_count, TargetDbInfo))},
+ ?_assertEqual(1, proplists:get_value(doc_count, TargetDbInfo))},
{"All the docs selected as expected",
- ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))}
+ ?_assert(lists:all(fun(Valid) -> Valid end, AllReplies))}
]}.
compare_dbs(Source, Target, FilterFun) ->
@@ -70,10 +71,10 @@ compare_dbs(Source, Target, FilterFun) ->
case FilterFun(DocId, SourceDoc) of
true ->
ValidReply = {ok, DocId, SourceDoc} == TargetReply,
- {ok, [ValidReply|Acc]};
+ {ok, [ValidReply | Acc]};
false ->
ValidReply = {not_found, missing} == TargetReply,
- {ok, [ValidReply|Acc]}
+ {ok, [ValidReply | Acc]}
end
end,
{ok, AllReplies} = couch_db:fold_docs(SourceDb, Fun, [], []),
@@ -99,12 +100,16 @@ create_db() ->
create_docs(DbName) ->
{ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
- Doc1 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc1">>}
- ]}),
- Doc2 = couch_doc:from_json_obj({[
- {<<"_id">>, <<"doc2">>}
- ]}),
+ Doc1 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc1">>}
+ ]}
+ ),
+ Doc2 = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"doc2">>}
+ ]}
+ ),
{ok, _} = couch_db:update_docs(Db, [Doc1, Doc2]),
couch_db:close(Db).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl b/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl
index 91bda33fa..3b020927d 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_small_max_request_size_target.erl
@@ -11,17 +11,14 @@
-define(TIMEOUT_EUNIT, 360).
-
setup() ->
DbName = ?tempdb(),
{ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]),
ok = couch_db:close(Db),
DbName.
-
setup(remote) ->
{remote, setup()};
-
setup({A, B}) ->
Ctx = test_util:start_couch([couch_replicator]),
config:set("chttpd", "max_http_request_size", "10000", false),
@@ -29,7 +26,6 @@ setup({A, B}) ->
Target = setup(B),
{Ctx, {Source, Target}}.
-
teardown({remote, DbName}) ->
teardown(DbName);
teardown(DbName) ->
@@ -42,144 +38,147 @@ teardown(_, {Ctx, {Source, Target}}) ->
ok = application:stop(couch_replicator),
ok = test_util:stop_couch(Ctx).
-
reduce_max_request_size_test_() ->
Pairs = [{remote, remote}],
{
"Replicate docs when target has a small max_http_request_size",
{
foreachx,
- fun setup/1, fun teardown/2,
- [{Pair, fun should_replicate_all_docs/2}
- || Pair <- Pairs]
- ++ [{Pair, fun should_replicate_one/2}
- || Pair <- Pairs]
- % Disabled. See issue 574. Sometimes PUTs with a doc and
- % attachment which exceed maximum request size are simply
- % closed instead of returning a 413 request. That makes these
- % tests flaky.
- ++ [{Pair, fun should_replicate_one_with_attachment/2}
- || Pair <- Pairs]
+ fun setup/1,
+ fun teardown/2,
+ [
+ {Pair, fun should_replicate_all_docs/2}
+ || Pair <- Pairs
+ ] ++
+ [
+ {Pair, fun should_replicate_one/2}
+ || Pair <- Pairs
+ ] ++
+ % Disabled. See issue 574. Sometimes PUTs with a doc and
+ % attachment which exceed maximum request size are simply
+ % closed instead of returning a 413 request. That makes these
+ % tests flaky.
+ [
+ {Pair, fun should_replicate_one_with_attachment/2}
+ || Pair <- Pairs
+ ]
}
}.
-
% Test documents which are below max_http_request_size but when batched, batch size
% will be greater than max_http_request_size. Replicator could automatically split
% the batch into smaller batches and POST those separately.
should_replicate_all_docs({From, To}, {_Ctx, {Source, Target}}) ->
- {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [should_populate_source(Source),
- should_replicate(Source, Target),
- should_compare_databases(Source, Target, [])]}}.
-
+ {
+ lists:flatten(io_lib:format("~p -> ~p", [From, To])),
+ {inorder, [
+ should_populate_source(Source),
+ should_replicate(Source, Target),
+ should_compare_databases(Source, Target, [])
+ ]}
+ }.
% If a document is too large to post as a single request, that document is
% skipped but replication overall will make progress and not crash.
should_replicate_one({From, To}, {_Ctx, {Source, Target}}) ->
- {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [should_populate_source_one_large_one_small(Source),
- should_replicate(Source, Target),
- should_compare_databases(Source, Target, [<<"doc0">>])]}}.
-
+ {
+ lists:flatten(io_lib:format("~p -> ~p", [From, To])),
+ {inorder, [
+ should_populate_source_one_large_one_small(Source),
+ should_replicate(Source, Target),
+ should_compare_databases(Source, Target, [<<"doc0">>])
+ ]}
+ }.
% If a document has an attachment > 64 * 1024 bytes, replicator will switch to
% POST-ing individual documents directly and skip bulk_docs. Test that case
% separately
% See note in main test function why this was disabled.
should_replicate_one_with_attachment({From, To}, {_Ctx, {Source, Target}}) ->
- {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [should_populate_source_one_large_attachment(Source),
- should_populate_source(Source),
- should_replicate(Source, Target),
- should_compare_databases(Source, Target, [<<"doc0">>])]}}.
-
+ {
+ lists:flatten(io_lib:format("~p -> ~p", [From, To])),
+ {inorder, [
+ should_populate_source_one_large_attachment(Source),
+ should_populate_source(Source),
+ should_replicate(Source, Target),
+ should_compare_databases(Source, Target, [<<"doc0">>])
+ ]}
+ }.
should_populate_source({remote, Source}) ->
should_populate_source(Source);
-
should_populate_source(Source) ->
{timeout, ?TIMEOUT_EUNIT, ?_test(add_docs(Source, 5, 3000, 0))}.
-
should_populate_source_one_large_one_small({remote, Source}) ->
should_populate_source_one_large_one_small(Source);
-
should_populate_source_one_large_one_small(Source) ->
{timeout, ?TIMEOUT_EUNIT, ?_test(one_large_one_small(Source, 12000, 3000))}.
-
should_populate_source_one_large_attachment({remote, Source}) ->
- should_populate_source_one_large_attachment(Source);
-
+ should_populate_source_one_large_attachment(Source);
should_populate_source_one_large_attachment(Source) ->
- {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_attachment(Source, 70000, 70000))}.
-
+ {timeout, ?TIMEOUT_EUNIT, ?_test(one_large_attachment(Source, 70000, 70000))}.
should_replicate({remote, Source}, Target) ->
should_replicate(db_url(Source), Target);
-
should_replicate(Source, {remote, Target}) ->
should_replicate(Source, db_url(Target));
-
should_replicate(Source, Target) ->
{timeout, ?TIMEOUT_EUNIT, ?_test(replicate(Source, Target))}.
-
should_compare_databases({remote, Source}, Target, ExceptIds) ->
should_compare_databases(Source, Target, ExceptIds);
-
should_compare_databases(Source, {remote, Target}, ExceptIds) ->
should_compare_databases(Source, Target, ExceptIds);
-
should_compare_databases(Source, Target, ExceptIds) ->
{timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target, ExceptIds))}.
-
binary_chunk(Size) when is_integer(Size), Size > 0 ->
- << <<"x">> || _ <- lists:seq(1, Size) >>.
-
+ <<<<"x">> || _ <- lists:seq(1, Size)>>.
add_docs(DbName, DocCount, DocSize, AttSize) ->
- [begin
- DocId = iolist_to_binary(["doc", integer_to_list(Id)]),
- add_doc(DbName, DocId, DocSize, AttSize)
- end || Id <- lists:seq(1, DocCount)],
+ [
+ begin
+ DocId = iolist_to_binary(["doc", integer_to_list(Id)]),
+ add_doc(DbName, DocId, DocSize, AttSize)
+ end
+ || Id <- lists:seq(1, DocCount)
+ ],
ok.
-
one_large_one_small(DbName, Large, Small) ->
add_doc(DbName, <<"doc0">>, Large, 0),
add_doc(DbName, <<"doc1">>, Small, 0).
-
one_large_attachment(DbName, Size, AttSize) ->
- add_doc(DbName, <<"doc0">>, Size, AttSize).
-
+ add_doc(DbName, <<"doc0">>, Size, AttSize).
add_doc(DbName, DocId, Size, AttSize) when is_binary(DocId) ->
- {ok, Db} = couch_db:open_int(DbName, []),
- Doc0 = #doc{id = DocId, body = {[{<<"x">>, binary_chunk(Size)}]}},
- Doc = Doc0#doc{atts = atts(AttSize)},
- {ok, _} = couch_db:update_doc(Db, Doc, []),
- couch_db:close(Db).
-
+ {ok, Db} = couch_db:open_int(DbName, []),
+ Doc0 = #doc{id = DocId, body = {[{<<"x">>, binary_chunk(Size)}]}},
+ Doc = Doc0#doc{atts = atts(AttSize)},
+ {ok, _} = couch_db:update_doc(Db, Doc, []),
+ couch_db:close(Db).
atts(0) ->
[];
-
atts(Size) ->
- [couch_att:new([
- {name, <<"att1">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, fun(Bytes) -> binary_chunk(Bytes) end}
- ])].
-
+ [
+ couch_att:new([
+ {name, <<"att1">>},
+ {type, <<"app/binary">>},
+ {att_len, Size},
+ {data, fun(Bytes) -> binary_chunk(Bytes) end}
+ ])
+ ].
replicate(Source, Target) ->
- replicate({[
- {<<"source">>, Source},
- {<<"target">>, Target},
- {<<"worker_processes">>, "1"} % This make batch_size predictable
- ]}).
+ replicate(
+ {[
+ {<<"source">>, Source},
+ {<<"target">>, Target},
+ % This make batch_size predictable
+ {<<"worker_processes">>, "1"}
+ ]}
+ ).
diff --git a/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl b/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl
index fd0409164..4044e7c72 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_test_helper.erl
@@ -13,11 +13,9 @@
replicate/2
]).
-
compare_dbs(Source, Target) ->
compare_dbs(Source, Target, []).
-
compare_dbs(Source, Target, ExceptIds) ->
{ok, SourceDb} = couch_db:open_int(Source, []),
{ok, TargetDb} = couch_db:open_int(Target, []),
@@ -39,13 +37,14 @@ compare_dbs(Source, Target, ExceptIds) ->
ok = couch_db:close(SourceDb),
ok = couch_db:close(TargetDb).
-
compare_docs(Doc1, Doc2) ->
?assertEqual(Doc1#doc.body, Doc2#doc.body),
#doc{atts = Atts1} = Doc1,
#doc{atts = Atts2} = Doc2,
- ?assertEqual(lists:sort([couch_att:fetch(name, Att) || Att <- Atts1]),
- lists:sort([couch_att:fetch(name, Att) || Att <- Atts2])),
+ ?assertEqual(
+ lists:sort([couch_att:fetch(name, Att) || Att <- Atts1]),
+ lists:sort([couch_att:fetch(name, Att) || Att <- Atts2])
+ ),
FunCompareAtts = fun(Att) ->
AttName = couch_att:fetch(name, Att),
{ok, AttTarget} = find_att(Atts2, AttName),
@@ -67,18 +66,25 @@ compare_docs(Doc1, Doc2) ->
?assert(is_integer(couch_att:fetch(att_len, Att))),
?assert(is_integer(couch_att:fetch(disk_len, AttTarget))),
?assert(is_integer(couch_att:fetch(att_len, AttTarget))),
- ?assertEqual(couch_att:fetch(disk_len, Att),
- couch_att:fetch(disk_len, AttTarget)),
- ?assertEqual(couch_att:fetch(att_len, Att),
- couch_att:fetch(att_len, AttTarget)),
- ?assertEqual(couch_att:fetch(type, Att),
- couch_att:fetch(type, AttTarget)),
- ?assertEqual(couch_att:fetch(md5, Att),
- couch_att:fetch(md5, AttTarget))
+ ?assertEqual(
+ couch_att:fetch(disk_len, Att),
+ couch_att:fetch(disk_len, AttTarget)
+ ),
+ ?assertEqual(
+ couch_att:fetch(att_len, Att),
+ couch_att:fetch(att_len, AttTarget)
+ ),
+ ?assertEqual(
+ couch_att:fetch(type, Att),
+ couch_att:fetch(type, AttTarget)
+ ),
+ ?assertEqual(
+ couch_att:fetch(md5, Att),
+ couch_att:fetch(md5, AttTarget)
+ )
end,
lists:foreach(FunCompareAtts, Atts1).
-
find_att([], _Name) ->
nil;
find_att([Att | Rest], Name) ->
@@ -89,38 +95,44 @@ find_att([Att | Rest], Name) ->
find_att(Rest, Name)
end.
-
att_md5(Att) ->
Md50 = couch_att:foldl(
Att,
fun(Chunk, Acc) -> couch_hash:md5_hash_update(Acc, Chunk) end,
- couch_hash:md5_hash_init()),
+ couch_hash:md5_hash_init()
+ ),
couch_hash:md5_hash_final(Md50).
att_decoded_md5(Att) ->
Md50 = couch_att:foldl_decode(
Att,
fun(Chunk, Acc) -> couch_hash:md5_hash_update(Acc, Chunk) end,
- couch_hash:md5_hash_init()),
+ couch_hash:md5_hash_init()
+ ),
couch_hash:md5_hash_final(Md50).
db_url(DbName) ->
iolist_to_binary([
- "http://", config:get("httpd", "bind_address", "127.0.0.1"),
- ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
- "/", DbName
+ "http://",
+ config:get("httpd", "bind_address", "127.0.0.1"),
+ ":",
+ integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+ "/",
+ DbName
]).
get_pid(RepId) ->
- Pid = global:whereis_name({couch_replicator_scheduler_job,RepId}),
+ Pid = global:whereis_name({couch_replicator_scheduler_job, RepId}),
?assert(is_pid(Pid)),
Pid.
replicate(Source, Target) ->
- replicate({[
- {<<"source">>, Source},
- {<<"target">>, Target}
- ]}).
+ replicate(
+ {[
+ {<<"source">>, Source},
+ {<<"target">>, Target}
+ ]}
+ ).
replicate({[_ | _]} = RepObject) ->
{ok, Rep} = couch_replicator_utils:parse_rep_doc(RepObject, ?ADMIN_USER),
diff --git a/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl
index 8e4a21dbb..a23f415c0 100644
--- a/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl
+++ b/src/couch_replicator/test/eunit/couch_replicator_use_checkpoints_tests.erl
@@ -25,19 +25,24 @@
-define(i2l(I), integer_to_list(I)).
-define(io2b(Io), iolist_to_binary(Io)).
-
start(false) ->
fun
({finished, _, {CheckpointHistory}}) ->
- ?assertEqual([{<<"use_checkpoints">>,false}], CheckpointHistory);
+ ?assertEqual([{<<"use_checkpoints">>, false}], CheckpointHistory);
(_) ->
ok
end;
start(true) ->
fun
({finished, _, {CheckpointHistory}}) ->
- ?assertNotEqual(false, lists:keyfind(<<"session_id">>,
- 1, CheckpointHistory));
+ ?assertNotEqual(
+ false,
+ lists:keyfind(
+ <<"session_id">>,
+ 1,
+ CheckpointHistory
+ )
+ );
(_) ->
ok
end.
@@ -79,9 +84,12 @@ use_checkpoints_test_() ->
"Replication use_checkpoints feature tests",
{
foreachx,
- fun start/1, fun stop/2,
- [{UseCheckpoints, fun use_checkpoints_tests/2}
- || UseCheckpoints <- [false, true]]
+ fun start/1,
+ fun stop/2,
+ [
+ {UseCheckpoints, fun use_checkpoints_tests/2}
+ || UseCheckpoints <- [false, true]
+ ]
}
}.
@@ -91,21 +99,26 @@ use_checkpoints_tests(UseCheckpoints, Fun) ->
"use_checkpoints: " ++ atom_to_list(UseCheckpoints),
{
foreachx,
- fun setup/1, fun teardown/2,
- [{{UseCheckpoints, Fun, Pair}, fun should_test_checkpoints/2}
- || Pair <- Pairs]
+ fun setup/1,
+ fun teardown/2,
+ [
+ {{UseCheckpoints, Fun, Pair}, fun should_test_checkpoints/2}
+ || Pair <- Pairs
+ ]
}
}.
should_test_checkpoints({UseCheckpoints, _, {From, To}}, {_Ctx, {Source, Target, _}}) ->
should_test_checkpoints(UseCheckpoints, {From, To}, {Source, Target}).
should_test_checkpoints(UseCheckpoints, {From, To}, {Source, Target}) ->
- {lists:flatten(io_lib:format("~p -> ~p", [From, To])),
- {inorder, [
- should_populate_source(Source, ?DOCS_COUNT),
- should_replicate(Source, Target, UseCheckpoints),
- should_compare_databases(Source, Target)
- ]}}.
+ {
+ lists:flatten(io_lib:format("~p -> ~p", [From, To])),
+ {inorder, [
+ should_populate_source(Source, ?DOCS_COUNT),
+ should_replicate(Source, Target, UseCheckpoints),
+ should_compare_databases(Source, Target)
+ ]}
+ }.
should_populate_source({remote, Source}, DocCount) ->
should_populate_source(Source, DocCount);
@@ -126,7 +139,6 @@ should_compare_databases(Source, {remote, Target}) ->
should_compare_databases(Source, Target) ->
{timeout, ?TIMEOUT_EUNIT, ?_test(compare_dbs(Source, Target))}.
-
populate_db(DbName, DocCount) ->
{ok, Db} = couch_db:open_int(DbName, []),
Docs = lists:foldl(
@@ -135,11 +147,13 @@ populate_db(DbName, DocCount) ->
Value = ?io2b(["val", ?i2l(DocIdCounter)]),
Doc = #doc{
id = Id,
- body = {[ {<<"value">>, Value} ]}
+ body = {[{<<"value">>, Value}]}
},
[Doc | Acc]
end,
- [], lists:seq(1, DocCount)),
+ [],
+ lists:seq(1, DocCount)
+ ),
{ok, _} = couch_db:update_docs(Db, Docs, []),
ok = couch_db:close(Db).
@@ -150,16 +164,24 @@ compare_dbs(Source, Target) ->
{ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
{Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
DocId = couch_util:get_value(<<"_id">>, Props),
- DocTarget = case couch_db:open_doc(TargetDb, DocId) of
- {ok, DocT} ->
- DocT;
- Error ->
- erlang:error(
- {assertion_failed,
- [{module, ?MODULE}, {line, ?LINE},
- {reason, lists:concat(["Error opening document '",
- ?b2l(DocId), "' from target: ",
- couch_util:to_list(Error)])}]})
+ DocTarget =
+ case couch_db:open_doc(TargetDb, DocId) of
+ {ok, DocT} ->
+ DocT;
+ Error ->
+ erlang:error(
+ {assertion_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {reason,
+ lists:concat([
+ "Error opening document '",
+ ?b2l(DocId),
+ "' from target: ",
+ couch_util:to_list(Error)
+ ])}
+ ]}
+ )
end,
DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
?assertEqual(DocJson, DocTargetJson),
@@ -170,9 +192,10 @@ compare_dbs(Source, Target) ->
ok = couch_db:close(TargetDb).
replicate(Source, Target, UseCheckpoints) ->
- replicate({[
- {<<"source">>, Source},
- {<<"target">>, Target},
- {<<"use_checkpoints">>, UseCheckpoints}
- ]}).
-
+ replicate(
+ {[
+ {<<"source">>, Source},
+ {<<"target">>, Target},
+ {<<"use_checkpoints">>, UseCheckpoints}
+ ]}
+ ).
diff --git a/src/couch_stats/src/couch_stats.erl b/src/couch_stats/src/couch_stats.erl
index 4fde14acb..e0303fc0f 100644
--- a/src/couch_stats/src/couch_stats.erl
+++ b/src/couch_stats/src/couch_stats.erl
@@ -29,10 +29,8 @@
update_gauge/2
]).
-
-include("couch_stats.hrl").
-
-type response() :: ok | {error, unknown_metric}.
-type stat() :: {any(), [{atom(), any()}]}.
@@ -95,8 +93,9 @@ decrement_counter(Name) ->
decrement_counter(Name, Value) ->
notify_existing_metric(Name, {dec, Value}, counter).
--spec update_histogram(any(), number()) -> response();
- (any(), function()) -> any().
+-spec update_histogram
+ (any(), number()) -> response();
+ (any(), function()) -> any().
update_histogram(Name, Fun) when is_function(Fun, 0) ->
Begin = os:timestamp(),
Result = Fun(),
@@ -118,9 +117,10 @@ update_gauge(Name, Value) ->
notify_existing_metric(Name, Op, Type) ->
try
ok = folsom_metrics:notify_existing_metric(Name, Op, Type)
- catch _:_ ->
- error_logger:error_msg("unknown metric: ~p", [Name]),
- {error, unknown_metric}
+ catch
+ _:_ ->
+ error_logger:error_msg("unknown metric: ~p", [Name]),
+ {error, unknown_metric}
end.
-spec sample_type(any(), atom()) -> stat().
diff --git a/src/couch_stats/src/couch_stats_aggregator.erl b/src/couch_stats/src/couch_stats_aggregator.erl
index 0416636c9..34b28bfd6 100644
--- a/src/couch_stats/src/couch_stats_aggregator.erl
+++ b/src/couch_stats/src/couch_stats_aggregator.erl
@@ -30,7 +30,6 @@
terminate/2
]).
-
-include("couch_stats.hrl").
-record(st, {
@@ -57,17 +56,17 @@ init([]) ->
{ok, Descs} = reload_metrics(),
CT = erlang:send_after(get_interval(collect), self(), collect),
RT = erlang:send_after(get_interval(reload), self(), reload),
- {ok, #st{descriptions=Descs, stats=[], collect_timer=CT, reload_timer=RT}}.
+ {ok, #st{descriptions = Descs, stats = [], collect_timer = CT, reload_timer = RT}}.
-handle_call(fetch, _from, #st{stats = Stats}=State) ->
+handle_call(fetch, _from, #st{stats = Stats} = State) ->
{reply, {ok, Stats}, State};
handle_call(flush, _From, State) ->
{reply, ok, collect(State)};
-handle_call(reload, _from, #st{reload_timer=OldRT} = State) ->
+handle_call(reload, _from, #st{reload_timer = OldRT} = State) ->
timer:cancel(OldRT),
{ok, Descriptions} = reload_metrics(),
RT = update_timer(reload),
- {reply, ok, State#st{descriptions=Descriptions, reload_timer=RT}};
+ {reply, ok, State#st{descriptions = Descriptions, reload_timer = RT}};
handle_call(Msg, _From, State) ->
{stop, {unknown_call, Msg}, error, State}.
@@ -78,7 +77,7 @@ handle_info(collect, State) ->
{noreply, collect(State)};
handle_info(reload, State) ->
{ok, Descriptions} = reload_metrics(),
- {noreply, State#st{descriptions=Descriptions}};
+ {noreply, State#st{descriptions = Descriptions}};
handle_info(Msg, State) ->
{stop, {unknown_info, Msg}, State}.
@@ -101,7 +100,10 @@ reload_metrics() ->
ToDelete = sets:subtract(ExistingSet, CurrentSet),
ToCreate = sets:subtract(CurrentSet, ExistingSet),
sets:fold(
- fun({Name, _}, _) -> couch_stats:delete(Name), nil end,
+ fun({Name, _}, _) ->
+ couch_stats:delete(Name),
+ nil
+ end,
nil,
ToDelete
),
@@ -141,16 +143,16 @@ load_metrics_for_application(AppName) ->
end
end.
-collect(#st{collect_timer=OldCT} = State) ->
+collect(#st{collect_timer = OldCT} = State) ->
timer:cancel(OldCT),
Stats = lists:map(
fun({Name, Props}) ->
- {Name, [{value, couch_stats:sample(Name)}|Props]}
+ {Name, [{value, couch_stats:sample(Name)} | Props]}
end,
State#st.descriptions
),
CT = update_timer(collect),
- State#st{stats=Stats, collect_timer=CT}.
+ State#st{stats = Stats, collect_timer = CT}.
update_timer(Type) ->
Interval = get_interval(Type),
diff --git a/src/couch_stats/src/couch_stats_httpd.erl b/src/couch_stats/src/couch_stats_httpd.erl
index 0c24d8856..b40ba6094 100644
--- a/src/couch_stats/src/couch_stats_httpd.erl
+++ b/src/couch_stats/src/couch_stats_httpd.erl
@@ -18,7 +18,7 @@
%% exported for use by chttpd_misc
-export([transform_stats/1, nest/1, to_ejson/1, extract_path/2]).
-handle_stats_req(#httpd{method='GET', path_parts=[_ | Path]}=Req) ->
+handle_stats_req(#httpd{method = 'GET', path_parts = [_ | Path]} = Req) ->
flush(Req),
Stats0 = couch_stats:fetch(),
Stats = transform_stats(Stats0),
@@ -27,7 +27,6 @@ handle_stats_req(#httpd{method='GET', path_parts=[_ | Path]}=Req) ->
EJSON1 = extract_path(Path, EJSON0),
couch_httpd:send_json(Req, EJSON1).
-
transform_stats(Stats) ->
transform_stats(Stats, []).
@@ -37,51 +36,55 @@ transform_stats([{Key, Props} | Rest], Acc) ->
{_, Type} = proplists:lookup(type, Props),
transform_stats(Rest, [{Key, transform_stat(Type, Props)} | Acc]).
-
transform_stat(counter, Props) ->
Props;
transform_stat(gauge, Props) ->
Props;
transform_stat(histogram, Props) ->
- lists:map(fun
- ({value, Value}) ->
- {value, lists:map(fun
- ({Key, List}) when Key == percentile; Key == histogram ->
- {Key, [tuple_to_list(Item) || Item <- List]};
- (Else) ->
- Else
- end, Value)};
- (Else) ->
- Else
- end, Props).
-
+ lists:map(
+ fun
+ ({value, Value}) ->
+ {value,
+ lists:map(
+ fun
+ ({Key, List}) when Key == percentile; Key == histogram ->
+ {Key, [tuple_to_list(Item) || Item <- List]};
+ (Else) ->
+ Else
+ end,
+ Value
+ )};
+ (Else) ->
+ Else
+ end,
+ Props
+ ).
nest(Proplist) ->
nest(Proplist, []).
nest([], Acc) ->
Acc;
-nest([{[Key|Keys], Value}|Rest], Acc) ->
- Acc1 = case proplists:lookup(Key, Acc) of
- {Key, Old} ->
- [{Key, nest([{Keys, Value}], Old)}|proplists:delete(Key, Acc)];
- none ->
- Term = lists:foldr(fun(K, A) -> [{K, A}] end, Value, Keys),
- [{Key, Term}|Acc]
- end,
+nest([{[Key | Keys], Value} | Rest], Acc) ->
+ Acc1 =
+ case proplists:lookup(Key, Acc) of
+ {Key, Old} ->
+ [{Key, nest([{Keys, Value}], Old)} | proplists:delete(Key, Acc)];
+ none ->
+ Term = lists:foldr(fun(K, A) -> [{K, A}] end, Value, Keys),
+ [{Key, Term} | Acc]
+ end,
nest(Rest, Acc1).
-
-to_ejson([{_, _}|_]=Proplist) ->
+to_ejson([{_, _} | _] = Proplist) ->
EJSONProps = lists:map(
- fun({Key, Value}) -> {maybe_format_key(Key), to_ejson(Value)} end,
- Proplist
+ fun({Key, Value}) -> {maybe_format_key(Key), to_ejson(Value)} end,
+ Proplist
),
{EJSONProps};
to_ejson(NotAProplist) ->
NotAProplist.
-
extract_path([], EJSON) ->
EJSON;
extract_path([Key | Rest], {Props}) ->
@@ -94,7 +97,6 @@ extract_path([Key | Rest], {Props}) ->
extract_path([_ | _], _NotAnObject) ->
null.
-
maybe_format_key(Key) when is_list(Key) ->
list_to_binary(Key);
maybe_format_key(Key) when is_atom(Key) ->
diff --git a/src/couch_stats/src/couch_stats_process_tracker.erl b/src/couch_stats/src/couch_stats_process_tracker.erl
index fef955efd..c53f0f887 100644
--- a/src/couch_stats/src/couch_stats_process_tracker.erl
+++ b/src/couch_stats/src/couch_stats_process_tracker.erl
@@ -28,9 +28,7 @@
terminate/2
]).
--record(st, {
-
-}).
+-record(st, {}).
-spec track(any()) -> ok.
track(Name) ->
@@ -60,7 +58,7 @@ handle_cast(Msg, State) ->
error_logger:error_msg("~p received unknown cast ~p", [?MODULE, Msg]),
{noreply, State}.
-handle_info({'DOWN', Ref, _, _, _}=Msg, State) ->
+handle_info({'DOWN', Ref, _, _, _} = Msg, State) ->
case ets:lookup(?MODULE, Ref) of
[] ->
error_logger:error_msg(
diff --git a/src/couch_stats/src/couch_stats_sup.erl b/src/couch_stats/src/couch_stats_sup.erl
index 55755bb83..2a92ac69c 100644
--- a/src/couch_stats/src/couch_stats_sup.erl
+++ b/src/couch_stats/src/couch_stats_sup.erl
@@ -24,12 +24,11 @@
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
init([]) ->
- {ok, {
- {one_for_one, 5, 10}, [
- ?CHILD(couch_stats_aggregator, worker),
- ?CHILD(couch_stats_process_tracker, worker)
- ]
- }}.
-
+ {ok,
+ {
+ {one_for_one, 5, 10}, [
+ ?CHILD(couch_stats_aggregator, worker),
+ ?CHILD(couch_stats_process_tracker, worker)
+ ]
+ }}.
diff --git a/src/couch_tests/setups/couch_epi_dispatch.erl b/src/couch_tests/setups/couch_epi_dispatch.erl
index 9c0b6b0b0..0094780d4 100644
--- a/src/couch_tests/setups/couch_epi_dispatch.erl
+++ b/src/couch_tests/setups/couch_epi_dispatch.erl
@@ -27,14 +27,17 @@
notify/3
]).
-
%% ------------------------------------------------------------------
%% API functions definitions
%% ------------------------------------------------------------------
dispatch(ServiceId, CallbackModule) ->
- couch_tests:new(?MODULE, dispatch,
- setup_dispatch(ServiceId, CallbackModule), teardown_dispatch()).
+ couch_tests:new(
+ ?MODULE,
+ dispatch,
+ setup_dispatch(ServiceId, CallbackModule),
+ teardown_dispatch()
+ ).
%% ------------------------------------------------------------------
%% setups and teardowns
diff --git a/src/couch_tests/src/couch_tests.erl b/src/couch_tests/src/couch_tests.erl
index 5dff3c5e1..de80addf5 100644
--- a/src/couch_tests/src/couch_tests.erl
+++ b/src/couch_tests/src/couch_tests.erl
@@ -116,19 +116,22 @@ validate_fixture(#couch_tests_fixture{} = Fixture0, Args, Opts) ->
StartedAppsAfterTeardown = Ctx1#couch_tests_ctx.started_apps,
validate_and_report([
- {equal, "Expected applications before calling fixture (~p) "
- "to be equal to applications after its calling",
- AppsBefore, AppsAfter},
- {equal, "Expected list of started applications (~p) "
- "to be equal to #couch_tests_fixture.apps (~p)",
- AppsStarted, FixtureApps},
- {equal, "Expected list of started applications (~p) "
- "to be equal to #couch_tests_ctx.started_apps (~p)",
- AppsStarted, StartedAppsBeforeTeardown},
- {equal, "Expected list of stopped applications (~p) "
- "to be equal to #couch_tests_ctx.stopped_apps (~p)",
- AppsStarted, StoppedAppsAfterTeardown},
- {equal, "Expected empty list ~i of #couch_tests_ctx.started_apps (~p) "
+ {equal,
+ "Expected applications before calling fixture (~p) "
+ "to be equal to applications after its calling", AppsBefore, AppsAfter},
+ {equal,
+ "Expected list of started applications (~p) "
+ "to be equal to #couch_tests_fixture.apps (~p)", AppsStarted, FixtureApps},
+ {equal,
+ "Expected list of started applications (~p) "
+ "to be equal to #couch_tests_ctx.started_apps (~p)", AppsStarted,
+ StartedAppsBeforeTeardown},
+ {equal,
+ "Expected list of stopped applications (~p) "
+ "to be equal to #couch_tests_ctx.stopped_apps (~p)", AppsStarted,
+ StoppedAppsAfterTeardown},
+ {equal,
+ "Expected empty list ~i of #couch_tests_ctx.started_apps (~p) "
"after teardown", [], StartedAppsAfterTeardown}
]).
@@ -151,16 +154,19 @@ validate_and_report(Sheet) ->
%% Helper functions definitions
%% ------------------------------------------------------------------
-
do_setup([#couch_tests_fixture{setup = Setup} = Fixture | Rest], Ctx0, Acc) ->
Ctx1 = Ctx0#couch_tests_ctx{started_apps = []},
#couch_tests_ctx{started_apps = Apps} = Ctx2 = Setup(Fixture, Ctx1),
Ctx3 = Ctx2#couch_tests_ctx{started_apps = []},
do_setup(Rest, Ctx3, [Fixture#couch_tests_fixture{apps = Apps} | Acc]);
do_setup([], Ctx, Acc) ->
- Apps = lists:foldl(fun(#couch_tests_fixture{apps = A}, AppsAcc) ->
- A ++ AppsAcc
- end, [], Acc),
+ Apps = lists:foldl(
+ fun(#couch_tests_fixture{apps = A}, AppsAcc) ->
+ A ++ AppsAcc
+ end,
+ [],
+ Acc
+ ),
Ctx#couch_tests_ctx{chain = lists:reverse(Acc), started_apps = Apps}.
do_teardown(Fixture, Ctx0) ->
@@ -175,14 +181,14 @@ do_start_applications([], Acc) ->
lists:reverse(Acc);
do_start_applications([App | Apps], Acc) ->
case application:start(App) of
- {error, {already_started, _}} ->
- do_start_applications(Apps, Acc);
- {error, {not_started, Dep}} ->
- do_start_applications([Dep, App | Apps], Acc);
- {error, {not_running, Dep}} ->
- do_start_applications([Dep, App | Apps], Acc);
- ok ->
- do_start_applications(Apps, [App | Acc])
+ {error, {already_started, _}} ->
+ do_start_applications(Apps, Acc);
+ {error, {not_started, Dep}} ->
+ do_start_applications([Dep, App | Apps], Acc);
+ {error, {not_running, Dep}} ->
+ do_start_applications([Dep, App | Apps], Acc);
+ ok ->
+ do_start_applications(Apps, [App | Acc])
end.
stop_applications(Apps) ->
@@ -192,26 +198,25 @@ do_stop_applications([], Acc) ->
lists:reverse(Acc);
do_stop_applications([App | Apps], Acc) ->
case application:stop(App) of
- {error, _} ->
- do_stop_applications(Apps, Acc);
- ok ->
- do_stop_applications(Apps, [App | Acc])
+ {error, _} ->
+ do_stop_applications(Apps, Acc);
+ ok ->
+ do_stop_applications(Apps, [App | Acc])
end.
-remove_duplicates([]) ->
+remove_duplicates([]) ->
[];
remove_duplicates([H | T]) ->
[H | [X || X <- remove_duplicates(T), X /= H]].
applications() ->
- lists:usort([App || {App, _, _} <-application:which_applications()]).
+ lists:usort([App || {App, _, _} <- application:which_applications()]).
do_validate({equal, _Message, Arg, Arg}, Acc) ->
Acc;
do_validate({equal, Message, Arg1, Arg2}, Acc) ->
[io_lib:format(Message, [Arg1, Arg2]) | Acc].
-
%% ------------------------------------------------------------------
%% Tests
%% ------------------------------------------------------------------
diff --git a/src/couch_tests/src/couch_tests_combinatorics.erl b/src/couch_tests/src/couch_tests_combinatorics.erl
index 343336277..f1ee6dd2e 100644
--- a/src/couch_tests/src/couch_tests_combinatorics.erl
+++ b/src/couch_tests/src/couch_tests_combinatorics.erl
@@ -65,7 +65,7 @@ powerset(X, [H | T], Acc) ->
permutations([]) ->
[[]];
-permutations(L) ->
+permutations(L) ->
[[H | T] || H <- L, T <- permutations(L -- [H])].
%% @doc product({Items1, Items2, ..., ItemsN})
@@ -83,7 +83,7 @@ permutations(L) ->
%% ]
-spec product(Elements :: list()) -> [list()].
-product([H]) ->
+product([H]) ->
[[A] || A <- H];
product([H | T]) ->
[[A | B] || A <- H, B <- product(T)].
@@ -109,7 +109,6 @@ product([H | T]) ->
binary_combinations(NBits) ->
product(lists:duplicate(NBits, [true, false])).
-
%% @doc combinations(N, Items).
%% Generate all combinations by choosing N values from a given list of Items
%% in sorted order. Each combination is sorted and the entire table is sorted.
diff --git a/src/couch_tests/test/couch_tests_app_tests.erl b/src/couch_tests/test/couch_tests_app_tests.erl
index 6f9c7e419..97f5c1750 100644
--- a/src/couch_tests/test/couch_tests_app_tests.erl
+++ b/src/couch_tests/test/couch_tests_app_tests.erl
@@ -25,52 +25,64 @@ teardown(Mocks) ->
%% ------------------------------------------------------------------
dummy_setup() ->
- couch_tests:new(?MODULE, dummy_setup,
+ couch_tests:new(
+ ?MODULE,
+ dummy_setup,
fun(_Fixture, Ctx) -> Ctx end,
- fun(_Fixture, Ctx) -> Ctx end).
-
+ fun(_Fixture, Ctx) -> Ctx end
+ ).
setup1(Arg1) ->
- couch_tests:new(?MODULE, setup1,
+ couch_tests:new(
+ ?MODULE,
+ setup1,
fun(Fixture, Ctx0) ->
- Ctx1 = couch_tests:start_applications([asn1], Ctx0),
- couch_tests:set_state(Fixture, Ctx1, {Arg1})
+ Ctx1 = couch_tests:start_applications([asn1], Ctx0),
+ couch_tests:set_state(Fixture, Ctx1, {Arg1})
end,
fun(_Fixture, Ctx) ->
- couch_tests:stop_applications([asn1], Ctx)
- end).
+ couch_tests:stop_applications([asn1], Ctx)
+ end
+ ).
setup2(Arg1, Arg2) ->
- couch_tests:new(?MODULE, setup2,
+ couch_tests:new(
+ ?MODULE,
+ setup2,
fun(Fixture, Ctx0) ->
- Ctx1 = couch_tests:start_applications([public_key], Ctx0),
- couch_tests:set_state(Fixture, Ctx1, {Arg1, Arg2})
+ Ctx1 = couch_tests:start_applications([public_key], Ctx0),
+ couch_tests:set_state(Fixture, Ctx1, {Arg1, Arg2})
end,
fun(_Fixture, Ctx) ->
- Ctx
- end).
-
+ Ctx
+ end
+ ).
couch_tests_test_() ->
{
"couch_tests tests",
{
- foreach, fun setup/0, fun teardown/1,
+ foreach,
+ fun setup/0,
+ fun teardown/1,
[
{"chained setup", fun chained_setup/0}
]
}
}.
-
chained_setup() ->
?assert(meck:validate(application)),
?assertEqual([], history(application, start)),
- Ctx0 = couch_tests:setup([
- setup1(foo),
- dummy_setup(),
- setup2(bar, baz)
- ], [], []),
+ Ctx0 = couch_tests:setup(
+ [
+ setup1(foo),
+ dummy_setup(),
+ setup2(bar, baz)
+ ],
+ [],
+ []
+ ),
?assertEqual([asn1, public_key], history(application, start)),
?assertEqual([asn1, public_key], couch_tests:get(started_apps, Ctx0)),
@@ -96,7 +108,10 @@ unmock(application) ->
history(Module, Function) ->
Self = self(),
- [A || {Pid, {M, F, [A]}, _Result} <- meck:history(Module)
- , Pid =:= Self
- , M =:= Module
- , F =:= Function].
+ [
+ A
+ || {Pid, {M, F, [A]}, _Result} <- meck:history(Module),
+ Pid =:= Self,
+ M =:= Module,
+ F =:= Function
+ ].
diff --git a/src/custodian/src/custodian.erl b/src/custodian/src/custodian.erl
index a16c925b5..5cb7d930c 100644
--- a/src/custodian/src/custodian.erl
+++ b/src/custodian/src/custodian.erl
@@ -18,4 +18,4 @@ report() ->
custodian_util:report().
summary() ->
- custodian_util:summary().
+ custodian_util:summary().
diff --git a/src/custodian/src/custodian_db_checker.erl b/src/custodian/src/custodian_db_checker.erl
index f9ab8c85e..96cf24a30 100644
--- a/src/custodian/src/custodian_db_checker.erl
+++ b/src/custodian/src/custodian_db_checker.erl
@@ -14,10 +14,8 @@
-behaviour(gen_server).
-vsn(1).
-
-export([start_link/0]).
-
-export([
init/1,
terminate/2,
@@ -31,75 +29,64 @@
check_dbs/0
]).
-
-record(st, {
checker
}).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
init(_) ->
process_flag(trap_exit, true),
net_kernel:monitor_nodes(true),
{ok, restart_checker(#st{})}.
-
terminate(_Reason, St) ->
couch_util:shutdown_sync(St#st.checker),
ok.
-
handle_call(Msg, _From, St) ->
{stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
handle_cast(refresh, St) ->
{noreply, restart_checker(St)};
-
handle_cast(Msg, St) ->
{stop, {invalid_cast, Msg}, St}.
-
handle_info({nodeup, _}, St) ->
{noreply, restart_checker(St)};
-
handle_info({nodedown, _}, St) ->
{noreply, restart_checker(St)};
-
-handle_info({'EXIT', Pid, normal}, #st{checker=Pid}=St) ->
- {noreply, St#st{checker=undefined}};
-
-handle_info({'EXIT', Pid, Reason}, #st{checker=Pid}=St) ->
+handle_info({'EXIT', Pid, normal}, #st{checker = Pid} = St) ->
+ {noreply, St#st{checker = undefined}};
+handle_info({'EXIT', Pid, Reason}, #st{checker = Pid} = St) ->
couch_log:notice("custodian db checker died ~p", [Reason]),
- {noreply, restart_checker(St#st{checker=undefined})};
-
+ {noreply, restart_checker(St#st{checker = undefined})};
handle_info(Msg, St) ->
{stop, {invalid_info, Msg}, St}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
-restart_checker(#st{checker=undefined}=St) ->
+restart_checker(#st{checker = undefined} = St) ->
Pid = spawn_link(fun ?MODULE:check_dbs/0),
- St#st{checker=Pid};
-restart_checker(#st{checker=Pid}=St) when is_pid(Pid) ->
+ St#st{checker = Pid};
+restart_checker(#st{checker = Pid} = St) when is_pid(Pid) ->
St.
-
check_dbs() ->
{ok, DbsDb} = custodian_util:ensure_dbs_exists(),
try
- Missing = lists:foldl(fun(DbName, Count) ->
- case check_db(DbsDb, DbName) of
- ok -> Count;
- missing -> Count + 1
- end
- end, 0, get_dbs()),
+ Missing = lists:foldl(
+ fun(DbName, Count) ->
+ case check_db(DbsDb, DbName) of
+ ok -> Count;
+ missing -> Count + 1
+ end
+ end,
+ 0,
+ get_dbs()
+ ),
case Missing == 0 of
true -> clear_missing_dbs_alert();
false -> ok
@@ -108,7 +95,6 @@ check_dbs() ->
couch_db:close(DbsDb)
end.
-
check_db(DbsDb, DbName) when is_binary(DbName) ->
try
case couch_db:open_doc(DbsDb, DbName, []) of
@@ -118,24 +104,22 @@ check_db(DbsDb, DbName) when is_binary(DbName) ->
send_missing_db_alert(DbName),
missing
end
- catch _:_ ->
- send_missing_db_alert(DbName),
- missing
+ catch
+ _:_ ->
+ send_missing_db_alert(DbName),
+ missing
end.
-
get_dbs() ->
lists:flatten([
get_users_db(),
get_stats_db()
]).
-
get_users_db() ->
UsersDb = chttpd_auth_cache:dbname(),
[list_to_binary(UsersDb)].
-
get_stats_db() ->
case application:get_env(ioq, stats_db) of
{ok, DbName} when is_binary(DbName) ->
@@ -146,12 +130,10 @@ get_stats_db() ->
[]
end.
-
send_missing_db_alert(DbName) ->
couch_log:notice("Missing system database ~s", [DbName]),
?CUSTODIAN_MONITOR:send_missing_db_alert(DbName).
-
clear_missing_dbs_alert() ->
couch_log:notice("All system databases exist.", []),
?CUSTODIAN_MONITOR:clear_missing_dbs_alert().
diff --git a/src/custodian/src/custodian_monitor.erl b/src/custodian/src/custodian_monitor.erl
index 3cca046ed..29a347374 100644
--- a/src/custodian/src/custodian_monitor.erl
+++ b/src/custodian/src/custodian_monitor.erl
@@ -12,17 +12,15 @@
-module(custodian_monitor).
-
% N.B. that callback return values are ignored
-callback send_missing_db_alert(DbName :: binary()) ->
Ignored :: any().
-
-callback clear_missing_dbs_alert() ->
Ignored :: any().
-
-callback send_event(
- Name :: string(), Count :: non_neg_integer(), Description :: string()) ->
+ Name :: string(), Count :: non_neg_integer(), Description :: string()
+) ->
Ignored :: any().
diff --git a/src/custodian/src/custodian_noop_monitor.erl b/src/custodian/src/custodian_noop_monitor.erl
index 5c793aeca..4cdd6d1d3 100644
--- a/src/custodian/src/custodian_noop_monitor.erl
+++ b/src/custodian/src/custodian_noop_monitor.erl
@@ -12,24 +12,19 @@
-module(custodian_noop_monitor).
-
-behaviour(custodian_monitor).
-
-export([
send_missing_db_alert/1,
clear_missing_dbs_alert/0,
send_event/3
]).
-
send_missing_db_alert(_DbName) ->
false.
-
clear_missing_dbs_alert() ->
false.
-
send_event(_Name, _Count, _Description) ->
false.
diff --git a/src/custodian/src/custodian_server.erl b/src/custodian/src/custodian_server.erl
index 0c8b87e87..e8bdc13c9 100644
--- a/src/custodian/src/custodian_server.erl
+++ b/src/custodian/src/custodian_server.erl
@@ -19,8 +19,14 @@
-export([start_link/0]).
% gen_server api.
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- code_change/3, terminate/2]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3,
+ terminate/2
+]).
% exported for callback.
-export([
@@ -35,7 +41,7 @@
-record(state, {
event_listener,
shard_checker,
- rescan=false
+ rescan = false
}).
-define(VSN_0_2_7, 184129240591641721395874905059581858099).
@@ -46,7 +52,6 @@
-define(RELISTEN_DELAY, 5000).
-endif.
-
% public functions.
start_link() ->
@@ -69,9 +74,10 @@ init(_) ->
net_kernel:monitor_nodes(true),
ok = config:listen_for_changes(?MODULE, nil),
{ok, LisPid} = start_event_listener(),
- {ok, start_shard_checker(#state{
- event_listener=LisPid
- })}.
+ {ok,
+ start_shard_checker(#state{
+ event_listener = LisPid
+ })}.
handle_call(_Msg, _From, State) ->
{noreply, State}.
@@ -81,29 +87,24 @@ handle_cast(refresh, State) ->
handle_info({nodeup, _}, State) ->
{noreply, start_shard_checker(State)};
-
handle_info({nodedown, _}, State) ->
{noreply, start_shard_checker(State)};
-
-handle_info({'EXIT', Pid, normal}, #state{shard_checker=Pid}=State) ->
- NewState = State#state{shard_checker=undefined},
+handle_info({'EXIT', Pid, normal}, #state{shard_checker = Pid} = State) ->
+ NewState = State#state{shard_checker = undefined},
case State#state.rescan of
true ->
{noreply, start_shard_checker(NewState)};
false ->
{noreply, NewState}
end;
-
-handle_info({'EXIT', Pid, Reason}, #state{shard_checker=Pid}=State) ->
+handle_info({'EXIT', Pid, Reason}, #state{shard_checker = Pid} = State) ->
couch_log:notice("custodian shard checker died ~p", [Reason]),
- NewState = State#state{shard_checker=undefined},
+ NewState = State#state{shard_checker = undefined},
{noreply, start_shard_checker(NewState)};
-
-handle_info({'EXIT', Pid, Reason}, #state{event_listener=Pid}=State) ->
+handle_info({'EXIT', Pid, Reason}, #state{event_listener = Pid} = State) ->
couch_log:notice("custodian update notifier died ~p", [Reason]),
{ok, Pid1} = start_event_listener(),
- {noreply, State#state{event_listener=Pid1}};
-
+ {noreply, State#state{event_listener = Pid1}};
handle_info(restart_config_listener, State) ->
ok = config:listen_for_changes(?MODULE, nil),
{noreply, State}.
@@ -116,26 +117,24 @@ terminate(_Reason, State) ->
code_change(?VSN_0_2_7, State, _Extra) ->
ok = config:listen_for_changes(?MODULE, nil),
{ok, State};
-code_change(_OldVsn, #state{}=State, _Extra) ->
+code_change(_OldVsn, #state{} = State, _Extra) ->
{ok, State}.
% private functions
-
-start_shard_checker(#state{shard_checker=undefined}=State) ->
+start_shard_checker(#state{shard_checker = undefined} = State) ->
State#state{
- shard_checker=spawn_link(fun ?MODULE:check_shards/0),
- rescan=false
+ shard_checker = spawn_link(fun ?MODULE:check_shards/0),
+ rescan = false
};
-start_shard_checker(#state{shard_checker=Pid}=State) when is_pid(Pid) ->
- State#state{rescan=true}.
-
+start_shard_checker(#state{shard_checker = Pid} = State) when is_pid(Pid) ->
+ State#state{rescan = true}.
start_event_listener() ->
DbName = mem3_sync:shards_db(),
couch_event:link_listener(
- ?MODULE, handle_db_event, nil, [{dbname, DbName}]
- ).
+ ?MODULE, handle_db_event, nil, [{dbname, DbName}]
+ ).
handle_db_event(_DbName, updated, _St) ->
gen_server:cast(?MODULE, refresh),
@@ -146,7 +145,6 @@ handle_db_event(_DbName, _Event, _St) ->
check_shards() ->
[send_event(Item) || Item <- custodian:summary()].
-
send_event({_, Count} = Item) ->
Description = describe(Item),
Name = check_name(Item),
@@ -160,13 +158,28 @@ send_event({_, Count} = Item) ->
end,
?CUSTODIAN_MONITOR:send_event(Name, Count, Description).
-
describe({{safe, N}, Count}) ->
- lists:concat([Count, " ", shards(Count), " in cluster with only ", N,
- " ", copies(N), " on nodes that are currently up"]);
+ lists:concat([
+ Count,
+ " ",
+ shards(Count),
+ " in cluster with only ",
+ N,
+ " ",
+ copies(N),
+ " on nodes that are currently up"
+ ]);
describe({{live, N}, Count}) ->
- lists:concat([Count, " ", shards(Count), " in cluster with only ",
- N, " ", copies(N), " on nodes not in maintenance mode"]);
+ lists:concat([
+ Count,
+ " ",
+ shards(Count),
+ " in cluster with only ",
+ N,
+ " ",
+ copies(N),
+ " on nodes not in maintenance mode"
+ ]);
describe({conflicted, Count}) ->
lists:concat([Count, " conflicted ", shards(Count), " in cluster"]).
@@ -185,7 +198,6 @@ copies(1) ->
copies(_) ->
"copies".
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -200,7 +212,7 @@ config_update_test_() ->
fun t_restart_config_listener/1
]
}
-}.
+ }.
t_restart_config_listener(_) ->
?_test(begin
diff --git a/src/custodian/src/custodian_sup.erl b/src/custodian/src/custodian_sup.erl
index 97dbd2321..c2be7c861 100644
--- a/src/custodian/src/custodian_sup.erl
+++ b/src/custodian/src/custodian_sup.erl
@@ -35,11 +35,11 @@ start_link() ->
%% ===================================================================
init([]) ->
- {ok, {
- {one_for_one, 5, 10},
- [
- ?CHILD(custodian_server, worker),
- ?CHILD(custodian_db_checker, worker)
- ]
- }}.
-
+ {ok,
+ {
+ {one_for_one, 5, 10},
+ [
+ ?CHILD(custodian_server, worker),
+ ?CHILD(custodian_db_checker, worker)
+ ]
+ }}.
diff --git a/src/custodian/src/custodian_util.erl b/src/custodian/src/custodian_util.erl
index 6d5a56093..866bcacb1 100644
--- a/src/custodian/src/custodian_util.erl
+++ b/src/custodian/src/custodian_util.erl
@@ -26,22 +26,26 @@
%% public functions.
summary() ->
- Dict0 = dict:from_list([{conflicted, 0}] ++
- [{{live, N}, 0} || N <- lists:seq(0, cluster_n() - 1)] ++
- [{{safe, N}, 0} || N <- lists:seq(0, cluster_n() - 1)]),
- Fun = fun(_Id, _Range, {conflicted, _N}, Dict) ->
- dict:update_counter(conflicted, 1, Dict);
- (_Id, _Range, Item, Dict) ->
- dict:update_counter(Item, 1, Dict)
+ Dict0 = dict:from_list(
+ [{conflicted, 0}] ++
+ [{{live, N}, 0} || N <- lists:seq(0, cluster_n() - 1)] ++
+ [{{safe, N}, 0} || N <- lists:seq(0, cluster_n() - 1)]
+ ),
+ Fun = fun
+ (_Id, _Range, {conflicted, _N}, Dict) ->
+ dict:update_counter(conflicted, 1, Dict);
+ (_Id, _Range, Item, Dict) ->
+ dict:update_counter(Item, 1, Dict)
end,
dict:to_list(fold_dbs(Dict0, Fun)).
report() ->
- Fun = fun(Id, _Range, {conflicted, N}, Acc) ->
- [{Id, {conflicted, N}} | Acc];
- (Id, Range, Item, Acc) ->
- [{Id, Range, Item} | Acc]
- end,
+ Fun = fun
+ (Id, _Range, {conflicted, N}, Acc) ->
+ [{Id, {conflicted, N}} | Acc];
+ (Id, Range, Item, Acc) ->
+ [{Id, Range, Item} | Acc]
+ end,
fold_dbs([], Fun).
ensure_dbs_exists() ->
@@ -57,7 +61,7 @@ fold_dbs(Acc, Fun) ->
Live = Safe -- maintenance_nodes(Safe),
{ok, Db} = ensure_dbs_exists(),
try
- State0 = #state{live=Live, safe=Safe, callback=Fun, db=Db, acc=Acc},
+ State0 = #state{live = Live, safe = Safe, callback = Fun, db = Db, acc = Acc},
{ok, State1} = couch_db:fold_docs(Db, fun fold_dbs1/2, State0, []),
State1#state.acc
after
@@ -66,17 +70,17 @@ fold_dbs(Acc, Fun) ->
fold_dbs1(#full_doc_info{id = <<"_design/", _/binary>>}, Acc) ->
{ok, Acc};
-fold_dbs1(#full_doc_info{deleted=true}, Acc) ->
+fold_dbs1(#full_doc_info{deleted = true}, Acc) ->
{ok, Acc};
fold_dbs1(#full_doc_info{id = Id} = FDI, State) ->
- InternalAcc = case count_conflicts(FDI) of
- 0 ->
- State#state.acc;
- ConflictCount ->
- (State#state.callback)(Id, null, {conflicted, ConflictCount}, State#state.acc)
- end,
- fold_dbs(Id, load_shards(State#state.db, FDI), State#state{acc=InternalAcc}).
-
+ InternalAcc =
+ case count_conflicts(FDI) of
+ 0 ->
+ State#state.acc;
+ ConflictCount ->
+ (State#state.callback)(Id, null, {conflicted, ConflictCount}, State#state.acc)
+ end,
+ fold_dbs(Id, load_shards(State#state.db, FDI), State#state{acc = InternalAcc}).
fold_dbs(Id, Shards, State) ->
IsSafe = fun(#shard{node = N}) -> lists:member(N, State#state.safe) end,
@@ -85,27 +89,36 @@ fold_dbs(Id, Shards, State) ->
SafeShards = lists:filter(IsSafe, Shards),
TargetN = mem3_util:calculate_max_n(Shards),
Acc0 = State#state.acc,
- Acc1 = case mem3_util:calculate_max_n(LiveShards) of
- LiveN when LiveN < TargetN ->
- LiveRanges = get_range_counts(LiveN, LiveShards, Shards),
- lists:foldl(fun({Range, N}, FAcc) ->
- (State#state.callback)(Id, Range, {live, N}, FAcc)
- end, Acc0, LiveRanges);
- _ ->
- Acc0
- end,
- Acc2 = case mem3_util:calculate_max_n(SafeShards) of
- SafeN when SafeN < TargetN ->
- SafeRanges = get_range_counts(SafeN, SafeShards, Shards),
- lists:foldl(fun({Range, N}, FAcc) ->
- (State#state.callback)(Id, Range, {safe, N}, FAcc)
- end, Acc1, SafeRanges);
- _ ->
- Acc1
- end,
+ Acc1 =
+ case mem3_util:calculate_max_n(LiveShards) of
+ LiveN when LiveN < TargetN ->
+ LiveRanges = get_range_counts(LiveN, LiveShards, Shards),
+ lists:foldl(
+ fun({Range, N}, FAcc) ->
+ (State#state.callback)(Id, Range, {live, N}, FAcc)
+ end,
+ Acc0,
+ LiveRanges
+ );
+ _ ->
+ Acc0
+ end,
+ Acc2 =
+ case mem3_util:calculate_max_n(SafeShards) of
+ SafeN when SafeN < TargetN ->
+ SafeRanges = get_range_counts(SafeN, SafeShards, Shards),
+ lists:foldl(
+ fun({Range, N}, FAcc) ->
+ (State#state.callback)(Id, Range, {safe, N}, FAcc)
+ end,
+ Acc1,
+ SafeRanges
+ );
+ _ ->
+ Acc1
+ end,
{ok, State#state{acc = Acc2}}.
-
get_range_counts(MaxN, Shards, AllShards) ->
Ranges = ranges(Shards),
AllRanges = ranges(AllShards),
@@ -131,16 +144,23 @@ get_range_counts(MaxN, Shards, AllShards) ->
RangeCounts1 = maps:filter(fun(_, N) -> N =< MaxN end, RangeCounts),
lists:sort(maps:to_list(RangeCounts1)).
-
update_counts(Ranges, Acc0, Init, UpdateFun) ->
- lists:foldl(fun({B, E}, Acc) ->
- maps:update_with({B, E}, UpdateFun, Init, Acc)
- end, Acc0, Ranges).
-
+ lists:foldl(
+ fun({B, E}, Acc) ->
+ maps:update_with({B, E}, UpdateFun, Init, Acc)
+ end,
+ Acc0,
+ Ranges
+ ).
ranges(Shards) ->
- lists:map(fun(S) -> [B, E] = mem3:range(S), {B, E} end, Shards).
-
+ lists:map(
+ fun(S) ->
+ [B, E] = mem3:range(S),
+ {B, E}
+ end,
+ Shards
+ ).
get_n_rings(N, Ranges, Rings) when N =< 0 ->
{Ranges, Rings};
@@ -148,7 +168,6 @@ get_n_rings(N, Ranges, Rings) ->
Ring = mem3_util:get_ring(Ranges),
get_n_rings(N - 1, Ranges -- Ring, Rings ++ Ring).
-
cluster_n() ->
config:get_integer("cluster", "n", 3).
@@ -169,19 +188,18 @@ maybe_redirect(Nodes) ->
maybe_redirect([], Acc) ->
Acc;
-maybe_redirect([Node|Rest], Acc) ->
+maybe_redirect([Node | Rest], Acc) ->
case config:get("mem3.redirects", atom_to_list(Node)) of
undefined ->
- maybe_redirect(Rest, [Node|Acc]);
+ maybe_redirect(Rest, [Node | Acc]);
Redirect ->
- maybe_redirect(Rest, [list_to_atom(Redirect)|Acc])
+ maybe_redirect(Rest, [list_to_atom(Redirect) | Acc])
end.
count_conflicts(#full_doc_info{rev_tree = T}) ->
- Leafs = [1 || {#leaf{deleted=false}, _} <- couch_key_tree:get_all_leafs(T)],
+ Leafs = [1 || {#leaf{deleted = false}, _} <- couch_key_tree:get_all_leafs(T)],
length(Leafs) - 1.
-
% Ensure the design doc which was added 3.2.0 is deleted as we switched to using a BDU
% function instead. After a few releases this function could be removed as well
%
@@ -203,58 +221,63 @@ ensure_custodian_ddoc_is_deleted(Db) ->
end
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
get_range_counts_test_() ->
- [?_assertEqual(Res, get_range_counts(N, Shards, AllShards)) || {N, Shards,
- AllShards, Res} <- [
- % No shards are present. There is a full range shard that would
- % fit. Report that range as missing.
- {0, [], [full()], [{{0, ?RING_END}, 0}]},
-
- % Can't complete the ring. But would complete it if had the
- % {2, ?RING_END} interval available.
- {0, [sh(0, 1)], [sh(0, 1), sh(2, ?RING_END)], [{{2, ?RING_END}, 0}]},
-
- % Can complete the ring only 1 time. Report that range as the
- % one available with a count of 1
- {1, [full()], [full(), full()], [{{0, ?RING_END}, 1}]},
-
- % Can complete the ring only 1 time with a full range shard, but
- % there is also {2, ?RING_END} that would complete another the
- % the ring as well if {0, 1} was present.
- {1, [sh(2, ?RING_END), full()], [sh(0, 1), sh(2, ?RING_END), full()],
- [
+ [
+ ?_assertEqual(Res, get_range_counts(N, Shards, AllShards))
+ || {N, Shards, AllShards, Res} <- [
+ % No shards are present. There is a full range shard that would
+ % fit. Report that range as missing.
+ {0, [], [full()], [{{0, ?RING_END}, 0}]},
+
+ % Can't complete the ring. But would complete it if had the
+ % {2, ?RING_END} interval available.
+ {0, [sh(0, 1)], [sh(0, 1), sh(2, ?RING_END)], [{{2, ?RING_END}, 0}]},
+
+ % Can complete the ring only 1 time. Report that range as the
+ % one available with a count of 1
+ {1, [full()], [full(), full()], [{{0, ?RING_END}, 1}]},
+
+ % Can complete the ring only 1 time with a full range shard, but
+ % there is also {2, ?RING_END} that would complete another the
+ % the ring as well if {0, 1} was present.
+ {1, [sh(2, ?RING_END), full()], [sh(0, 1), sh(2, ?RING_END), full()], [
{{0, 1}, 0},
{{0, ?RING_END}, 1},
{{2, ?RING_END}, 1}
- ]
- },
-
- % Can complete the ring 2 times [{0, 2},{3, ?RING_END)] and full(),
- % and there is remnant of a 5, 9 range that would comlete the ring
- % as well if {0, 4} and {10, ?RING_END} were present. So report
- {2, [sh(0, 2), sh(3, ?RING_END), sh(5, 9), full()], [sh(0, 2), sh(3,
- ?RING_END), full(), sh(0, 4), sh(5, 9), sh(10, ?RING_END)],
- [
- {{0, 2}, 1},
- {{0, 4}, 0},
- {{0, ?RING_END}, 1},
- {{3, ?RING_END}, 1},
- {{5, 9}, 1},
- {{10, ?RING_END}, 0}
- ]
- }
- ]].
-
+ ]},
+
+ % Can complete the ring 2 times [{0, 2},{3, ?RING_END)] and full(),
+ % and there is remnant of a 5, 9 range that would comlete the ring
+ % as well if {0, 4} and {10, ?RING_END} were present. So report
+ {2, [sh(0, 2), sh(3, ?RING_END), sh(5, 9), full()],
+ [
+ sh(0, 2),
+ sh(
+ 3,
+ ?RING_END
+ ),
+ full(),
+ sh(0, 4),
+ sh(5, 9),
+ sh(10, ?RING_END)
+ ],
+ [
+ {{0, 2}, 1},
+ {{0, 4}, 0},
+ {{0, ?RING_END}, 1},
+ {{3, ?RING_END}, 1},
+ {{5, 9}, 1},
+ {{10, ?RING_END}, 0}
+ ]}
+ ]
+ ].
full() ->
#shard{range = [0, ?RING_END]}.
-
sh(B, E) ->
#shard{range = [B, E]}.
diff --git a/src/ddoc_cache/src/ddoc_cache.erl b/src/ddoc_cache/src/ddoc_cache.erl
index 50cac3039..747abc753 100644
--- a/src/ddoc_cache/src/ddoc_cache.erl
+++ b/src/ddoc_cache/src/ddoc_cache.erl
@@ -12,7 +12,6 @@
-module(ddoc_cache).
-
-export([
open_doc/2,
open_doc/3,
@@ -24,37 +23,31 @@
open/2
]).
-
open_doc(DbName, DocId) ->
Key = {ddoc_cache_entry_ddocid, {DbName, DocId}},
ddoc_cache_lru:open(Key).
-
open_doc(DbName, DocId, RevId) ->
Key = {ddoc_cache_entry_ddocid_rev, {DbName, DocId, RevId}},
ddoc_cache_lru:open(Key).
-
open_validation_funs(DbName) ->
Key = {ddoc_cache_entry_validation_funs, DbName},
ddoc_cache_lru:open(Key).
-
open_custom(DbName, Mod) ->
Key = {ddoc_cache_entry_custom, {DbName, Mod}},
ddoc_cache_lru:open(Key).
-
refresh(ShardDbName, DDocIds) when is_list(DDocIds) ->
DbName = mem3:dbname(ShardDbName),
ddoc_cache_lru:refresh(DbName, DDocIds).
-
open(DbName, validation_funs) ->
open_validation_funs(DbName);
open(DbName, Module) when is_atom(Module) ->
open_custom(DbName, Module);
-open(DbName, <<"_design/", _/binary>>=DDocId) when is_binary(DbName) ->
+open(DbName, <<"_design/", _/binary>> = DDocId) when is_binary(DbName) ->
open_doc(DbName, DDocId);
open(DbName, DDocId) when is_binary(DDocId) ->
open_doc(DbName, <<"_design/", DDocId/binary>>).
diff --git a/src/ddoc_cache/src/ddoc_cache_app.erl b/src/ddoc_cache/src/ddoc_cache_app.erl
index 5afa7ac95..3f2f02d5d 100644
--- a/src/ddoc_cache/src/ddoc_cache_app.erl
+++ b/src/ddoc_cache/src/ddoc_cache_app.erl
@@ -13,13 +13,10 @@
-module(ddoc_cache_app).
-behaviour(application).
-
-export([start/2, stop/1]).
-
start(_StartType, _StartArgs) ->
ddoc_cache_sup:start_link().
-
stop(_State) ->
ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry.erl b/src/ddoc_cache/src/ddoc_cache_entry.erl
index 32d3ec1a7..5a1711dd8 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry.erl
@@ -43,17 +43,14 @@
do_open/1
]).
-
-include("ddoc_cache.hrl").
-
-ifndef(TEST).
-define(ENTRY_SHUTDOWN_TIMEOUT, 5000).
-else.
-define(ENTRY_SHUTDOWN_TIMEOUT, 500).
-endif.
-
-record(st, {
key,
val,
@@ -63,28 +60,22 @@
accessed
}).
-
dbname({Mod, Arg}) ->
Mod:dbname(Arg).
-
ddocid({Mod, Arg}) ->
Mod:ddocid(Arg).
-
recover({Mod, Arg}) ->
Mod:recover(Arg).
-
insert({Mod, Arg}, Value) ->
Mod:insert(Arg, Value).
-
start_link(Key, Default) ->
Pid = proc_lib:spawn_link(?MODULE, init, [{Key, Default}]),
{ok, Pid}.
-
shutdown(Pid) ->
Ref = erlang:monitor(process, Pid),
ok = gen_server:cast(Pid, shutdown),
@@ -98,7 +89,6 @@ shutdown(Pid) ->
erlang:exit({timeout, {entry_shutdown, Pid}})
end.
-
open(Pid, Key) ->
try
Resp = gen_server:call(Pid, open),
@@ -118,15 +108,12 @@ open(Pid, Key) ->
recover(Key)
end.
-
accessed(Pid) ->
gen_server:cast(Pid, accessed).
-
refresh(Pid) ->
gen_server:cast(Pid, force_refresh).
-
init({Key, undefined}) ->
true = ets:update_element(?CACHE, Key, {#entry.pid, self()}),
St = #st{
@@ -137,7 +124,6 @@ init({Key, undefined}) ->
},
?EVENT(started, Key),
gen_server:enter_loop(?MODULE, [], St);
-
init({Key, Wrapped}) ->
Default = ddoc_cache_value:unwrap(Wrapped),
Updates = [
@@ -158,7 +144,6 @@ init({Key, Wrapped}) ->
?EVENT(default_started, Key),
gen_server:enter_loop(?MODULE, [], St, hibernate).
-
terminate(_Reason, St) ->
#st{
key = Key,
@@ -172,30 +157,30 @@ terminate(_Reason, St) ->
true = ets:select_delete(?CACHE, CacheMSpec) < 2,
% We may have already deleted our LRU entry
% during shutdown
- if Ts == undefined -> ok; true ->
- LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
- true = ets:select_delete(?LRU, LruMSpec) < 2
+ if
+ Ts == undefined ->
+ ok;
+ true ->
+ LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
+ true = ets:select_delete(?LRU, LruMSpec) < 2
end,
% Blow away any current opener if it exists
- if not is_pid(Pid) -> ok; true ->
- catch exit(Pid, kill)
+ if
+ not is_pid(Pid) -> ok;
+ true -> catch exit(Pid, kill)
end,
ok.
-
handle_call(open, From, #st{opener = Pid} = St) when is_pid(Pid) ->
NewSt = St#st{
waiters = [From | St#st.waiters]
},
{noreply, NewSt};
-
handle_call(open, _From, St) ->
{reply, St#st.val, St};
-
handle_call(Msg, _From, St) ->
{stop, {bad_call, Msg}, {bad_call, Msg}, St}.
-
handle_cast(accessed, St) ->
?EVENT(accessed, St#st.key),
drain_accessed(),
@@ -203,7 +188,6 @@ handle_cast(accessed, St) ->
accessed = St#st.accessed + 1
},
{noreply, update_lru(NewSt)};
-
handle_cast(force_refresh, St) ->
% If we had frequent design document updates
% they could end up racing accessed events and
@@ -211,18 +195,18 @@ handle_cast(force_refresh, St) ->
% cache. To prevent this we just make sure that
% accessed is set to at least 1 before we
% execute a refresh.
- NewSt = if St#st.accessed > 0 -> St; true ->
- St#st{accessed = 1}
- end,
+ NewSt =
+ if
+ St#st.accessed > 0 -> St;
+ true -> St#st{accessed = 1}
+ end,
% We remove the cache entry value so that any
% new client comes to us for the refreshed
% value.
true = ets:update_element(?CACHE, St#st.key, {#entry.val, undefined}),
handle_cast(refresh, NewSt);
-
handle_cast(refresh, #st{accessed = 0} = St) ->
{stop, normal, St};
-
handle_cast(refresh, #st{opener = Ref} = St) when is_reference(Ref) ->
#st{
key = Key
@@ -233,7 +217,6 @@ handle_cast(refresh, #st{opener = Ref} = St) when is_reference(Ref) ->
accessed = 0
},
{noreply, NewSt};
-
handle_cast(refresh, #st{opener = Pid} = St) when is_pid(Pid) ->
catch exit(Pid, kill),
receive
@@ -244,15 +227,12 @@ handle_cast(refresh, #st{opener = Pid} = St) when is_pid(Pid) ->
accessed = 0
},
{noreply, NewSt};
-
handle_cast(shutdown, St) ->
remove_from_cache(St),
{stop, normal, St};
-
handle_cast(Msg, St) ->
{stop, {bad_cast, Msg}, St}.
-
handle_info({'DOWN', _, _, Pid, Resp}, #st{key = Key, opener = Pid} = St) ->
case Resp of
{open_ok, Key, {ok, Val}} ->
@@ -275,26 +255,22 @@ handle_info({'DOWN', _, _, Pid, Resp}, #st{key = Key, opener = Pid} = St) ->
respond(St#st.waiters, {Status, Other}),
{stop, normal, NewSt}
end;
-
handle_info(Msg, St) ->
{stop, {bad_info, Msg}, St}.
-
code_change(_, St, _) ->
{ok, St}.
-
spawn_opener(Key) ->
{Pid, _} = erlang:spawn_monitor(?MODULE, do_open, [Key]),
Pid.
-
start_timer() ->
TimeOut = config:get_integer(
- "ddoc_cache", "refresh_timeout", ?REFRESH_TIMEOUT),
+ "ddoc_cache", "refresh_timeout", ?REFRESH_TIMEOUT
+ ),
erlang:send_after(TimeOut, self(), {'$gen_cast', refresh}).
-
do_open(Key) ->
try recover(Key) of
Resp ->
@@ -303,26 +279,21 @@ do_open(Key) ->
erlang:exit({open_error, Key, {T, R, S}})
end.
-
update_lru(#st{key = Key, ts = Ts} = St) ->
remove_from_lru(Ts, Key),
NewTs = os:timestamp(),
true = ets:insert(?LRU, {{NewTs, Key, self()}}),
St#st{ts = NewTs}.
-
update_cache(#st{val = undefined} = St, Val) ->
true = ets:update_element(?CACHE, St#st.key, {#entry.val, Val}),
?EVENT(inserted, St#st.key);
-
update_cache(#st{val = V1} = _St, V2) when {open_ok, {ok, V2}} == V1 ->
?EVENT(update_noop, _St#st.key);
-
update_cache(St, Val) ->
true = ets:update_element(?CACHE, St#st.key, {#entry.val, Val}),
?EVENT(updated, {St#st.key, Val}).
-
remove_from_cache(St) ->
#st{
key = Key,
@@ -335,14 +306,15 @@ remove_from_cache(St) ->
?EVENT(removed, St#st.key),
ok.
-
remove_from_lru(Ts, Key) ->
- if Ts == undefined -> ok; true ->
- LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
- 1 = ets:select_delete(?LRU, LruMSpec)
+ if
+ Ts == undefined ->
+ ok;
+ true ->
+ LruMSpec = [{{{Ts, Key, self()}}, [], [true]}],
+ 1 = ets:select_delete(?LRU, LruMSpec)
end.
-
drain_accessed() ->
receive
{'$gen_cast', accessed} ->
@@ -351,6 +323,5 @@ drain_accessed() ->
ok
end.
-
respond(Waiters, Resp) ->
[gen_server:reply(W, Resp) || W <- Waiters].
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_custom.erl b/src/ddoc_cache/src/ddoc_cache_entry_custom.erl
index 9eaf16f34..8747b46bc 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry_custom.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_custom.erl
@@ -12,7 +12,6 @@
-module(ddoc_cache_entry_custom).
-
-export([
dbname/1,
ddocid/1,
@@ -20,18 +19,14 @@
insert/2
]).
-
dbname({DbName, _}) ->
DbName.
-
ddocid(_) ->
no_ddocid.
-
recover({DbName, Mod}) ->
Mod:recover(DbName).
-
insert(_, _) ->
ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
index 5248469fb..cf40725e4 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl
@@ -12,7 +12,6 @@
-module(ddoc_cache_entry_ddocid).
-
-export([
dbname/1,
ddocid/1,
@@ -20,27 +19,21 @@
insert/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-
dbname({DbName, _}) ->
DbName.
-
ddocid({_, DDocId}) ->
DDocId.
-
recover({DbName, DDocId}) ->
fabric:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX]).
-
insert({DbName, DDocId}, {ok, #doc{revs = Revs} = DDoc}) ->
{Depth, [RevId | _]} = Revs,
Rev = {Depth, RevId},
Key = {ddoc_cache_entry_ddocid_rev, {DbName, DDocId, Rev}},
spawn(fun() -> ddoc_cache_lru:insert(Key, DDoc) end);
-
insert(_, _) ->
ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
index 868fa7789..5126f5210 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl
@@ -12,7 +12,6 @@
-module(ddoc_cache_entry_ddocid_rev).
-
-export([
dbname/1,
ddocid/1,
@@ -20,28 +19,21 @@
insert/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-
dbname({DbName, _, _}) ->
DbName.
-
ddocid({_, DDocId, _}) ->
DDocId.
-
recover({DbName, DDocId, Rev}) ->
Opts = [ejson_body, ?ADMIN_CTX],
{ok, [Resp]} = fabric:open_revs(DbName, DDocId, [Rev], Opts),
Resp.
-
insert({DbName, DDocId, _Rev}, {ok, #doc{} = DDoc}) ->
Key = {ddoc_cache_entry_ddocid, {DbName, DDocId}},
spawn(fun() -> ddoc_cache_lru:insert(Key, DDoc) end);
-
insert(_, _) ->
ok.
-
diff --git a/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl b/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
index 2182dead6..bcd122252 100644
--- a/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
+++ b/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl
@@ -12,7 +12,6 @@
-module(ddoc_cache_entry_validation_funs).
-
-export([
dbname/1,
ddocid/1,
@@ -20,25 +19,24 @@
insert/2
]).
-
dbname(DbName) ->
DbName.
-
ddocid(_) ->
no_ddocid.
-
recover(DbName) ->
{ok, DDocs} = fabric:design_docs(mem3:dbname(DbName)),
- Funs = lists:flatmap(fun(DDoc) ->
- case couch_doc:get_validate_doc_fun(DDoc) of
- nil -> [];
- Fun -> [Fun]
- end
- end, DDocs),
+ Funs = lists:flatmap(
+ fun(DDoc) ->
+ case couch_doc:get_validate_doc_fun(DDoc) of
+ nil -> [];
+ Fun -> [Fun]
+ end
+ end,
+ DDocs
+ ),
{ok, Funs}.
-
insert(_, _) ->
ok.
diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl
index 28a8a64c4..10b4aa0f0 100644
--- a/src/ddoc_cache/src/ddoc_cache_lru.erl
+++ b/src/ddoc_cache/src/ddoc_cache_lru.erl
@@ -14,7 +14,6 @@
-behaviour(gen_server).
-vsn(1).
-
-export([
start_link/0,
open/1,
@@ -35,24 +34,21 @@
handle_db_event/3
]).
-
-include("ddoc_cache.hrl").
-
-define(OPENER, ddoc_cache_opener).
-
-record(st, {
- pids, % pid -> key
- dbs, % dbname -> docid -> key -> pid
+ % pid -> key
+ pids,
+ % dbname -> docid -> key -> pid
+ dbs,
evictor
}).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
open(Key) ->
try ets:lookup(?CACHE, Key) of
[] ->
@@ -66,12 +62,12 @@ open(Key) ->
couch_stats:increment_counter([ddoc_cache, hit]),
ddoc_cache_entry:accessed(Pid),
{ok, Val}
- catch _:_ ->
- couch_stats:increment_counter([ddoc_cache, recovery]),
- ddoc_cache_entry:recover(Key)
+ catch
+ _:_ ->
+ couch_stats:increment_counter([ddoc_cache, recovery]),
+ ddoc_cache_entry:recover(Key)
end.
-
insert(Key, Value) ->
case ets:lookup(?CACHE, Key) of
[] ->
@@ -81,27 +77,26 @@ insert(Key, Value) ->
ok
end.
-
refresh(DbName, DDocIds) ->
gen_server:cast(?MODULE, {refresh, DbName, DDocIds}).
-
init(_) ->
couch_util:set_mqd_off_heap(?MODULE),
process_flag(trap_exit, true),
BaseOpts = [public, named_table],
- CacheOpts = [
- set,
- {read_concurrency, true},
- {keypos, #entry.key}
- ] ++ BaseOpts,
+ CacheOpts =
+ [
+ set,
+ {read_concurrency, true},
+ {keypos, #entry.key}
+ ] ++ BaseOpts,
ets:new(?CACHE, CacheOpts),
ets:new(?LRU, [ordered_set, {write_concurrency, true}] ++ BaseOpts),
{ok, Pids} = khash:new(),
{ok, Dbs} = khash:new(),
{ok, Evictor} = couch_event:link_listener(
- ?MODULE, handle_db_event, nil, [all_dbs]
- ),
+ ?MODULE, handle_db_event, nil, [all_dbs]
+ ),
?EVENT(lru_init, nil),
{ok, #st{
pids = Pids,
@@ -109,7 +104,6 @@ init(_) ->
evictor = Evictor
}}.
-
terminate(_Reason, St) ->
case is_pid(St#st.evictor) of
true -> exit(St#st.evictor, kill);
@@ -117,7 +111,6 @@ terminate(_Reason, St) ->
end,
ok.
-
handle_call({start, Key, Default}, _From, St) ->
#st{
pids = Pids,
@@ -141,40 +134,43 @@ handle_call({start, Key, Default}, _From, St) ->
[#entry{pid = Pid}] ->
{reply, {ok, Pid}, St}
end;
-
handle_call(Msg, _From, St) ->
{stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
handle_cast({evict, DbName}, St) ->
gen_server:abcast(mem3:nodes(), ?OPENER, {do_evict, DbName}),
{noreply, St};
-
handle_cast({refresh, DbName, DDocIds}, St) ->
gen_server:abcast(mem3:nodes(), ?OPENER, {do_evict, DbName, DDocIds}),
{noreply, St};
-
handle_cast({do_evict, DbName}, St) ->
#st{
dbs = Dbs
} = St,
- ToRem = case khash:lookup(Dbs, DbName) of
- {value, DDocIds} ->
- AccOut = khash:fold(DDocIds, fun(_, Keys, Acc1) ->
- khash:to_list(Keys) ++ Acc1
- end, []),
- ?EVENT(evicted, DbName),
- AccOut;
- not_found ->
- ?EVENT(evict_noop, DbName),
- []
- end,
- lists:foreach(fun({Key, Pid}) ->
- remove_entry(St, Key, Pid)
- end, ToRem),
+ ToRem =
+ case khash:lookup(Dbs, DbName) of
+ {value, DDocIds} ->
+ AccOut = khash:fold(
+ DDocIds,
+ fun(_, Keys, Acc1) ->
+ khash:to_list(Keys) ++ Acc1
+ end,
+ []
+ ),
+ ?EVENT(evicted, DbName),
+ AccOut;
+ not_found ->
+ ?EVENT(evict_noop, DbName),
+ []
+ end,
+ lists:foreach(
+ fun({Key, Pid}) ->
+ remove_entry(St, Key, Pid)
+ end,
+ ToRem
+ ),
khash:del(Dbs, DbName),
{noreply, St};
-
handle_cast({do_refresh, DbName, DDocIdList}, St) ->
#st{
dbs = Dbs
@@ -185,28 +181,32 @@ handle_cast({do_refresh, DbName, DDocIdList}, St) ->
% design documents.
case khash:lookup(Dbs, DbName) of
{value, DDocIds} ->
- lists:foreach(fun(DDocId) ->
- case khash:lookup(DDocIds, DDocId) of
- {value, Keys} ->
- khash:fold(Keys, fun(_, Pid, _) ->
- ddoc_cache_entry:refresh(Pid)
- end, nil);
- not_found ->
- ok
- end
- end, [no_ddocid | DDocIdList]);
+ lists:foreach(
+ fun(DDocId) ->
+ case khash:lookup(DDocIds, DDocId) of
+ {value, Keys} ->
+ khash:fold(
+ Keys,
+ fun(_, Pid, _) ->
+ ddoc_cache_entry:refresh(Pid)
+ end,
+ nil
+ );
+ not_found ->
+ ok
+ end
+ end,
+ [no_ddocid | DDocIdList]
+ );
not_found ->
ok
end,
{noreply, St};
-
handle_cast(Msg, St) ->
{stop, {invalid_cast, Msg}, St}.
-
handle_info({'EXIT', Pid, Reason}, #st{evictor = Pid} = St) ->
{stop, Reason, St};
-
handle_info({'EXIT', Pid, normal}, St) ->
% This clause handles when an entry starts
% up but encounters an error or uncacheable
@@ -218,34 +218,29 @@ handle_info({'EXIT', Pid, normal}, St) ->
khash:del(Pids, Pid),
remove_key(St, Key),
{noreply, St};
-
handle_info(Msg, St) ->
{stop, {invalid_info, Msg}, St}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
handle_db_event(ShardDbName, created, St) ->
gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
{ok, St};
-
handle_db_event(ShardDbName, deleted, St) ->
gen_server:cast(?MODULE, {evict, mem3:dbname(ShardDbName)}),
{ok, St};
-
handle_db_event(_DbName, _Event, St) ->
{ok, St}.
-
lru_start(Key, DoInsert) ->
case gen_server:call(?MODULE, {start, Key, undefined}, infinity) of
{ok, Pid} ->
couch_stats:increment_counter([ddoc_cache, miss]),
Resp = ddoc_cache_entry:open(Pid, Key),
- if not DoInsert -> ok; true ->
- ddoc_cache_entry:insert(Key, Resp)
+ if
+ not DoInsert -> ok;
+ true -> ddoc_cache_entry:insert(Key, Resp)
end,
Resp;
full ->
@@ -253,23 +248,23 @@ lru_start(Key, DoInsert) ->
ddoc_cache_entry:recover(Key)
end.
-
trim(_, 0) ->
full;
-
trim(St, MaxSize) ->
CurSize = ets:info(?CACHE, memory) * erlang:system_info(wordsize),
- if CurSize =< MaxSize -> ok; true ->
- case ets:first(?LRU) of
- {_Ts, Key, Pid} ->
- remove_entry(St, Key, Pid),
- trim(St, MaxSize);
- '$end_of_table' ->
- full
- end
+ if
+ CurSize =< MaxSize ->
+ ok;
+ true ->
+ case ets:first(?LRU) of
+ {_Ts, Key, Pid} ->
+ remove_entry(St, Key, Pid),
+ trim(St, MaxSize);
+ '$end_of_table' ->
+ full
+ end
end.
-
remove_entry(St, Key, Pid) ->
#st{
pids = Pids
@@ -279,7 +274,6 @@ remove_entry(St, Key, Pid) ->
khash:del(Pids, Pid),
remove_key(St, Key).
-
store_key(Dbs, Key, Pid) ->
DbName = ddoc_cache_entry:dbname(Key),
DDocId = ddoc_cache_entry:ddocid(Key),
@@ -298,7 +292,6 @@ store_key(Dbs, Key, Pid) ->
khash:put(Dbs, DbName, DDocIds)
end.
-
remove_key(St, Key) ->
#st{
dbs = Dbs
@@ -317,7 +310,6 @@ remove_key(St, Key) ->
_ -> ok
end.
-
unlink_and_flush(Pid) ->
erlang:unlink(Pid),
% Its possible that the entry process has already exited before
diff --git a/src/ddoc_cache/src/ddoc_cache_opener.erl b/src/ddoc_cache/src/ddoc_cache_opener.erl
index 52de54217..b6b3dc9d5 100644
--- a/src/ddoc_cache/src/ddoc_cache_opener.erl
+++ b/src/ddoc_cache/src/ddoc_cache_opener.erl
@@ -14,7 +14,6 @@
-behaviour(gen_server).
-vsn(1).
-
-export([
start_link/0
]).
@@ -28,39 +27,31 @@
code_change/3
]).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
init(_) ->
{ok, nil}.
terminate(_Reason, _St) ->
ok.
-
handle_call(Msg, _From, St) ->
{stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
% The do_evict clauses are upgrades while we're
% in a rolling reboot.
handle_cast({do_evict, _} = Msg, St) ->
gen_server:cast(ddoc_cache_lru, Msg),
{noreply, St};
-
handle_cast({do_evict, DbName, DDocIds}, St) ->
gen_server:cast(ddoc_cache_lru, {do_refresh, DbName, DDocIds}),
{noreply, St};
-
handle_cast(Msg, St) ->
{stop, {invalid_cast, Msg}, St}.
-
handle_info(Msg, St) ->
{stop, {invalid_info, Msg}, St}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
diff --git a/src/ddoc_cache/src/ddoc_cache_sup.erl b/src/ddoc_cache/src/ddoc_cache_sup.erl
index 6fff9ef4f..cf1a1aafb 100644
--- a/src/ddoc_cache/src/ddoc_cache_sup.erl
+++ b/src/ddoc_cache/src/ddoc_cache_sup.erl
@@ -13,17 +13,14 @@
-module(ddoc_cache_sup).
-behaviour(supervisor).
-
-export([
start_link/0,
init/1
]).
-
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
init([]) ->
Children = [
{
diff --git a/src/ddoc_cache/src/ddoc_cache_value.erl b/src/ddoc_cache/src/ddoc_cache_value.erl
index 21a5bb549..59585ee58 100644
--- a/src/ddoc_cache/src/ddoc_cache_value.erl
+++ b/src/ddoc_cache/src/ddoc_cache_value.erl
@@ -12,16 +12,13 @@
-module(ddoc_cache_value).
-
-export([
wrap/1,
unwrap/1
]).
-
wrap(Value) ->
{?MODULE, term_to_binary(Value)}.
-
unwrap({?MODULE, Bin}) when is_binary(Bin) ->
binary_to_term(Bin).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl
index b576d88bb..54c8c585b 100644
--- a/src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_basic_test.erl
@@ -12,32 +12,26 @@
-module(ddoc_cache_basic_test).
-
-export([
recover/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("eunit/include/eunit.hrl").
-include("ddoc_cache_test.hrl").
-
recover(DbName) ->
{ok, {DbName, totes_custom}}.
-
start_couch() ->
Ctx = ddoc_cache_tutil:start_couch(),
meck:new(ddoc_cache_ev, [passthrough]),
Ctx.
-
stop_couch(Ctx) ->
meck:unload(),
ddoc_cache_tutil:stop_couch(Ctx).
-
check_basic_test_() ->
{
setup,
@@ -54,7 +48,6 @@ check_basic_test_() ->
])
}.
-
check_no_vdu_test_() ->
{
setup,
@@ -66,7 +59,6 @@ check_no_vdu_test_() ->
])
}.
-
cache_ddoc({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
@@ -80,7 +72,6 @@ cache_ddoc({DbName, _}) ->
?assertEqual(Resp1, Resp2),
?assertEqual(2, ets:info(?CACHE, size)).
-
cache_ddoc_rev({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
@@ -100,7 +91,6 @@ cache_ddoc_rev({DbName, _}) ->
?assertMatch({ok, #doc{id = ?FOOBAR}}, Resp3),
?assertEqual(2, ets:info(?CACHE, size)).
-
cache_vdu({DbName, _}) ->
ddoc_cache_tutil:clear(),
?assertEqual(0, ets:info(?CACHE, size)),
@@ -111,7 +101,6 @@ cache_vdu({DbName, _}) ->
?assertEqual(Resp1, Resp2),
?assertEqual(1, ets:info(?CACHE, size)).
-
cache_custom({DbName, _}) ->
ddoc_cache_tutil:clear(),
?assertEqual(0, ets:info(?CACHE, size)),
@@ -122,7 +111,6 @@ cache_custom({DbName, _}) ->
?assertEqual(Resp1, Resp2),
?assertEqual(1, ets:info(?CACHE, size)).
-
cache_ddoc_refresher_unchanged({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
@@ -136,7 +124,6 @@ cache_ddoc_refresher_unchanged({DbName, _}) ->
Tab2 = lists:sort(ets:tab2list(?CACHE)),
?assertEqual(Tab2, Tab1).
-
dont_cache_not_found({DbName, _}) ->
DDocId = <<"_design/not_found">>,
ddoc_cache_tutil:clear(),
@@ -145,7 +132,6 @@ dont_cache_not_found({DbName, _}) ->
?assertEqual(0, ets:info(?CACHE, size)),
?assertEqual(0, ets:info(?LRU, size)).
-
deprecated_api_works({DbName, _}) ->
ddoc_cache_tutil:clear(),
{ok, _} = ddoc_cache:open(DbName, ?FOOBAR),
@@ -153,7 +139,6 @@ deprecated_api_works({DbName, _}) ->
{ok, _} = ddoc_cache:open(DbName, ?MODULE),
{ok, _} = ddoc_cache:open(DbName, validation_funs).
-
cache_no_vdu_no_ddoc({DbName, _}) ->
ddoc_cache_tutil:clear(),
Resp = ddoc_cache:open_validation_funs(DbName),
@@ -161,7 +146,6 @@ cache_no_vdu_no_ddoc({DbName, _}) ->
?assertEqual(1, ets:info(?CACHE, size)),
?assertEqual(1, ets:info(?LRU, size)).
-
cache_no_vdu_empty_ddoc({DbName, _}) ->
ddoc_cache_tutil:clear(),
DDoc = #doc{
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl
index b1a185bdc..d2d0559c6 100644
--- a/src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_coverage_test.erl
@@ -12,12 +12,10 @@
-module(ddoc_cache_coverage_test).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("eunit/include/eunit.hrl").
-include("ddoc_cache_test.hrl").
-
coverage_test_() ->
{
setup,
@@ -29,13 +27,11 @@ coverage_test_() ->
]
}.
-
restart_lru() ->
send_bad_messages(ddoc_cache_lru),
?assertEqual(ok, ddoc_cache_lru:terminate(bang, {st, a, b, c})),
?assertEqual({ok, foo}, ddoc_cache_lru:code_change(1, foo, [])).
-
stop_on_evictor_death() ->
meck:new(ddoc_cache_ev, [passthrough]),
try
@@ -54,7 +50,6 @@ stop_on_evictor_death() ->
meck:unload()
end.
-
send_bad_messages(Name) ->
wait_for_restart(Name, fun() ->
?assertEqual({invalid_call, foo}, gen_server:call(Name, foo))
@@ -66,7 +61,6 @@ send_bad_messages(Name) ->
whereis(Name) ! foo
end).
-
wait_for_restart(Server, Fun) ->
Ref = erlang:monitor(process, whereis(Server)),
Fun(),
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl
index d46bdde32..d6538e4a3 100644
--- a/src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_disabled_test.erl
@@ -12,18 +12,15 @@
-module(ddoc_cache_disabled_test).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("eunit/include/eunit.hrl").
-include("ddoc_cache_test.hrl").
-
start_couch() ->
Ctx = ddoc_cache_tutil:start_couch(),
config:set("ddoc_cache", "max_size", "0", false),
Ctx.
-
check_disabled_test_() ->
{
setup,
@@ -36,7 +33,6 @@ check_disabled_test_() ->
])
}.
-
resp_ok({DbName, _}) ->
ddoc_cache_tutil:clear(),
Resp = ddoc_cache:open_doc(DbName, ?FOOBAR),
@@ -44,7 +40,6 @@ resp_ok({DbName, _}) ->
?assertEqual(0, ets:info(?CACHE, size)),
?assertEqual(0, ets:info(?LRU, size)).
-
resp_not_found({DbName, _}) ->
ddoc_cache_tutil:clear(),
Resp = ddoc_cache:open_doc(DbName, <<"_design/not_found">>),
@@ -52,7 +47,6 @@ resp_not_found({DbName, _}) ->
?assertEqual(0, ets:info(?CACHE, size)),
?assertEqual(0, ets:info(?LRU, size)).
-
check_effectively_disabled({DbName, _}) ->
config:set("ddoc_cache", "max_size", "1", false),
ddoc_cache_tutil:clear(),
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl
index c992bea8d..fdba0f030 100644
--- a/src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_entry_test.erl
@@ -12,35 +12,28 @@
-module(ddoc_cache_entry_test).
-
-export([
recover/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("eunit/include/eunit.hrl").
-include("ddoc_cache_test.hrl").
-
recover(<<"foo">>) ->
timer:sleep(30000);
-
recover(DbName) ->
{ok, {DbName, such_custom}}.
-
start_couch() ->
Ctx = ddoc_cache_tutil:start_couch(),
meck:new(ddoc_cache_ev, [passthrough]),
Ctx.
-
stop_couch(Ctx) ->
meck:unload(),
ddoc_cache_tutil:stop_couch(Ctx).
-
check_entry_test_() ->
{
setup,
@@ -57,7 +50,6 @@ check_entry_test_() ->
])
}.
-
cancel_and_replace_opener(_) ->
Key = {ddoc_cache_entry_custom, {<<"foo">>, ?MODULE}},
true = ets:insert_new(?CACHE, #entry{key = Key}),
@@ -65,7 +57,9 @@ cancel_and_replace_opener(_) ->
Opener1 = element(4, sys:get_state(Entry)),
Ref1 = erlang:monitor(process, Opener1),
gen_server:cast(Entry, force_refresh),
- receive {'DOWN', Ref1, _, _, _} -> ok end,
+ receive
+ {'DOWN', Ref1, _, _, _} -> ok
+ end,
Opener2 = element(4, sys:get_state(Entry)),
?assert(Opener2 /= Opener1),
?assert(is_process_alive(Opener2)),
@@ -73,34 +67,38 @@ cancel_and_replace_opener(_) ->
unlink(Entry),
ddoc_cache_entry:shutdown(Entry).
-
condenses_access_messages({DbName, _}) ->
meck:reset(ddoc_cache_ev),
Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}},
true = ets:insert(?CACHE, #entry{key = Key}),
{ok, Entry} = ddoc_cache_entry:start_link(Key, undefined),
erlang:suspend_process(Entry),
- lists:foreach(fun(_) ->
- gen_server:cast(Entry, accessed)
- end, lists:seq(1, 100)),
+ lists:foreach(
+ fun(_) ->
+ gen_server:cast(Entry, accessed)
+ end,
+ lists:seq(1, 100)
+ ),
erlang:resume_process(Entry),
meck:wait(1, ddoc_cache_ev, event, [accessed, Key], 1000),
?assertError(
- timeout,
- meck:wait(2, ddoc_cache_ev, event, [accessed, Key], 100)
- ),
+ timeout,
+ meck:wait(2, ddoc_cache_ev, event, [accessed, Key], 100)
+ ),
unlink(Entry),
ddoc_cache_entry:shutdown(Entry).
-
kill_opener_on_terminate(_) ->
- Pid = spawn(fun() -> receive _ -> ok end end),
+ Pid = spawn(fun() ->
+ receive
+ _ -> ok
+ end
+ end),
?assert(is_process_alive(Pid)),
St = {st, key, val, Pid, waiters, ts, accessed},
?assertEqual(ok, ddoc_cache_entry:terminate(normal, St)),
?assert(not is_process_alive(Pid)).
-
evict_when_not_accessed(_) ->
meck:reset(ddoc_cache_ev),
Key = {ddoc_cache_entry_custom, {<<"bar">>, ?MODULE}},
@@ -116,17 +114,17 @@ evict_when_not_accessed(_) ->
AccessCount2 = element(7, sys:get_state(Entry)),
?assertEqual(0, AccessCount2),
ok = gen_server:cast(Entry, refresh),
- receive {'DOWN', Ref, _, _, Reason} -> Reason end,
+ receive
+ {'DOWN', Ref, _, _, Reason} -> Reason
+ end,
?assertEqual(normal, Reason),
?assertEqual(0, ets:info(?CACHE, size)).
-
open_dead_entry({DbName, _}) ->
Pid = spawn(fun() -> ok end),
Key = {ddoc_cache_entry_custom, {DbName, ?MODULE}},
?assertEqual(recover(DbName), ddoc_cache_entry:open(Pid, Key)).
-
handles_bad_messages(_) ->
CallExpect = {stop, {bad_call, foo}, {bad_call, foo}, baz},
CastExpect = {stop, {bad_cast, foo}, bar},
@@ -135,25 +133,24 @@ handles_bad_messages(_) ->
?assertEqual(CastExpect, ddoc_cache_entry:handle_cast(foo, bar)),
?assertEqual(InfoExpect, ddoc_cache_entry:handle_info(foo, bar)).
-
handles_code_change(_) ->
CCExpect = {ok, bar},
?assertEqual(CCExpect, ddoc_cache_entry:code_change(foo, bar, baz)).
-
handles_bad_shutdown_test_() ->
- {timeout, 10, ?_test(begin
- ErrorPid = spawn(fun() ->
- receive
- _ -> exit(bad_shutdown)
- end
- end),
- ?assertExit(bad_shutdown, ddoc_cache_entry:shutdown(ErrorPid)),
- NotDeadYetPid = spawn(fun() ->
- timer:sleep(infinity)
- end),
- ?assertExit(
+ {timeout, 10,
+ ?_test(begin
+ ErrorPid = spawn(fun() ->
+ receive
+ _ -> exit(bad_shutdown)
+ end
+ end),
+ ?assertExit(bad_shutdown, ddoc_cache_entry:shutdown(ErrorPid)),
+ NotDeadYetPid = spawn(fun() ->
+ timer:sleep(infinity)
+ end),
+ ?assertExit(
{timeout, {entry_shutdown, NotDeadYetPid}},
ddoc_cache_entry:shutdown(NotDeadYetPid)
)
- end)}.
+ end)}.
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_ev.erl b/src/ddoc_cache/test/eunit/ddoc_cache_ev.erl
index a451342cf..ded2469a4 100644
--- a/src/ddoc_cache/test/eunit/ddoc_cache_ev.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_ev.erl
@@ -16,6 +16,5 @@
event/2
]).
-
event(Name, Arg) ->
couch_log:error("~s :: ~s :: ~p", [?MODULE, Name, Arg]).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl
index bd61afc37..68f9f2efe 100644
--- a/src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_eviction_test.erl
@@ -12,33 +12,27 @@
-module(ddoc_cache_eviction_test).
-
-export([
recover/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("mem3/include/mem3.hrl").
-include("ddoc_cache_test.hrl").
-
recover(DbName) ->
{ok, {DbName, totes_custom}}.
-
start_couch() ->
Ctx = ddoc_cache_tutil:start_couch(),
meck:new(ddoc_cache_ev, [passthrough]),
Ctx.
-
stop_couch(Ctx) ->
meck:unload(),
ddoc_cache_tutil:stop_couch(Ctx).
-
check_eviction_test_() ->
{
setup,
@@ -51,7 +45,6 @@ check_eviction_test_() ->
])
}.
-
evict_all({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
@@ -67,7 +60,6 @@ evict_all({DbName, _}) ->
meck:wait(4, ddoc_cache_ev, event, [removed, '_'], 1000),
?assertEqual(0, ets:info(?CACHE, size)).
-
dont_evict_all_unrelated({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
@@ -82,7 +74,6 @@ dont_evict_all_unrelated({DbName, _}) ->
meck:wait(ddoc_cache_ev, event, [evict_noop, <<"test">>], 1000),
?assertEqual(4, ets:info(?CACHE, size)).
-
check_upgrade_clause({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl
index 9a5391587..d1dac869a 100644
--- a/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_lru_test.erl
@@ -12,39 +12,33 @@
-module(ddoc_cache_lru_test).
-
-export([
recover/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("eunit/include/eunit.hrl").
-include("ddoc_cache_test.hrl").
-
recover(<<"pause", _/binary>>) ->
- receive go -> ok end,
+ receive
+ go -> ok
+ end,
{ok, paused};
-
recover(<<"big", _/binary>>) ->
{ok, [couch_rand:uniform() || _ <- lists:seq(1, 8192)]};
-
recover(DbName) ->
{ok, DbName}.
-
start_couch() ->
Ctx = ddoc_cache_tutil:start_couch(),
meck:new(ddoc_cache_ev, [passthrough]),
Ctx.
-
stop_couch(Ctx) ->
meck:unload(),
ddoc_cache_tutil:stop_couch(Ctx).
-
check_not_started_test() ->
% Starting couch, but not ddoc_cache
{
@@ -73,33 +67,42 @@ check_lru_test_() ->
])
}.
-
check_multi_start(_) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
Key = {ddoc_cache_entry_custom, {<<"pause">>, ?MODULE}},
% These will all get sent through ddoc_cache_lru
- Clients = lists:map(fun(_) ->
- spawn_monitor(fun() ->
- ddoc_cache_lru:open(Key)
- end)
- end, lists:seq(1, 10)),
+ Clients = lists:map(
+ fun(_) ->
+ spawn_monitor(fun() ->
+ ddoc_cache_lru:open(Key)
+ end)
+ end,
+ lists:seq(1, 10)
+ ),
meck:wait(ddoc_cache_ev, event, [started, Key], 1000),
- lists:foreach(fun({Pid, _Ref}) ->
- ?assert(is_process_alive(Pid))
- end, Clients),
+ lists:foreach(
+ fun({Pid, _Ref}) ->
+ ?assert(is_process_alive(Pid))
+ end,
+ Clients
+ ),
[#entry{pid = Pid}] = ets:tab2list(?CACHE),
Opener = element(4, sys:get_state(Pid)),
OpenerRef = erlang:monitor(process, Opener),
?assert(is_process_alive(Opener)),
Opener ! go,
- receive {'DOWN', OpenerRef, _, _, _} -> ok end,
- lists:foreach(fun({_, Ref}) ->
- receive
- {'DOWN', Ref, _, _, normal} -> ok
- end
- end, Clients).
-
+ receive
+ {'DOWN', OpenerRef, _, _, _} -> ok
+ end,
+ lists:foreach(
+ fun({_, Ref}) ->
+ receive
+ {'DOWN', Ref, _, _, normal} -> ok
+ end
+ end,
+ Clients
+ ).
check_multi_open(_) ->
ddoc_cache_tutil:clear(),
@@ -112,24 +115,38 @@ check_multi_open(_) ->
ddoc_cache_lru:open(Key)
end),
meck:wait(ddoc_cache_ev, event, [started, Key], 1000),
- Clients = [Client1] ++ lists:map(fun(_) ->
- spawn_monitor(fun() ->
- ddoc_cache_lru:open(Key)
- end)
- end, lists:seq(1, 9)),
- lists:foreach(fun({Pid, _Ref}) ->
- ?assert(is_process_alive(Pid))
- end, Clients),
+ Clients =
+ [Client1] ++
+ lists:map(
+ fun(_) ->
+ spawn_monitor(fun() ->
+ ddoc_cache_lru:open(Key)
+ end)
+ end,
+ lists:seq(1, 9)
+ ),
+ lists:foreach(
+ fun({Pid, _Ref}) ->
+ ?assert(is_process_alive(Pid))
+ end,
+ Clients
+ ),
[#entry{pid = Pid}] = ets:tab2list(?CACHE),
Opener = element(4, sys:get_state(Pid)),
OpenerRef = erlang:monitor(process, Opener),
?assert(is_process_alive(Opener)),
Opener ! go,
- receive {'DOWN', OpenerRef, _, _, _} -> ok end,
- lists:foreach(fun({_, Ref}) ->
- receive {'DOWN', Ref, _, _, normal} -> ok end
- end, Clients).
-
+ receive
+ {'DOWN', OpenerRef, _, _, _} -> ok
+ end,
+ lists:foreach(
+ fun({_, Ref}) ->
+ receive
+ {'DOWN', Ref, _, _, normal} -> ok
+ end
+ end,
+ Clients
+ ).
check_capped_size(_) ->
% The extra factor of two in the size checks is
@@ -141,19 +158,24 @@ check_capped_size(_) ->
MaxSize = 1048576,
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
- lists:foreach(fun(I) ->
- DbName = list_to_binary("big_" ++ integer_to_list(I)),
- ddoc_cache:open_custom(DbName, ?MODULE),
- meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
- ?assert(cache_size() < MaxSize * 2)
- end, lists:seq(1, 25)),
- lists:foreach(fun(I) ->
- DbName = list_to_binary("big_" ++ integer_to_list(I)),
- ddoc_cache:open_custom(DbName, ?MODULE),
- meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
- ?assert(cache_size() < MaxSize * 2)
- end, lists:seq(26, 100)).
-
+ lists:foreach(
+ fun(I) ->
+ DbName = list_to_binary("big_" ++ integer_to_list(I)),
+ ddoc_cache:open_custom(DbName, ?MODULE),
+ meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
+ ?assert(cache_size() < MaxSize * 2)
+ end,
+ lists:seq(1, 25)
+ ),
+ lists:foreach(
+ fun(I) ->
+ DbName = list_to_binary("big_" ++ integer_to_list(I)),
+ ddoc_cache:open_custom(DbName, ?MODULE),
+ meck:wait(I, ddoc_cache_ev, event, [started, '_'], 1000),
+ ?assert(cache_size() < MaxSize * 2)
+ end,
+ lists:seq(26, 100)
+ ).
check_cache_refill({DbName, _}) ->
ddoc_cache_tutil:clear(),
@@ -168,10 +190,13 @@ check_cache_refill({DbName, _}) ->
{ddoc_cache_entry_ddocid, {DbName, DDocId}}
end,
- lists:foreach(fun(I) ->
- Key = InitDDoc(I),
- meck:wait(ddoc_cache_ev, event, [started, Key], 1000)
- end, lists:seq(1, 5)),
+ lists:foreach(
+ fun(I) ->
+ Key = InitDDoc(I),
+ meck:wait(ddoc_cache_ev, event, [started, Key], 1000)
+ end,
+ lists:seq(1, 5)
+ ),
ShardName = mem3:name(hd(mem3:shards(DbName))),
{ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo),
@@ -179,11 +204,13 @@ check_cache_refill({DbName, _}) ->
meck:wait(10, ddoc_cache_ev, event, [removed, '_'], 1000),
?assertEqual(0, ets:info(?CACHE, size)),
- lists:foreach(fun(I) ->
- Key = InitDDoc(I),
- meck:wait(ddoc_cache_ev, event, [started, Key], 1000)
- end, lists:seq(6, 10)).
-
+ lists:foreach(
+ fun(I) ->
+ Key = InitDDoc(I),
+ meck:wait(ddoc_cache_ev, event, [started, Key], 1000)
+ end,
+ lists:seq(6, 10)
+ ).
check_evict_and_exit(_) ->
ddoc_cache_tutil:clear(),
@@ -214,6 +241,5 @@ check_evict_and_exit(_) ->
timer:sleep(500),
?assertEqual({messages, []}, process_info(self(), messages)).
-
cache_size() ->
ets:info(?CACHE, memory) * erlang:system_info(wordsize).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl
index 96682910c..8da535294 100644
--- a/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_no_cache_test.erl
@@ -12,29 +12,25 @@
-module(ddoc_cache_no_cache_test).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("eunit/include/eunit.hrl").
-
ddoc(DDocId) ->
{ok, #doc{
id = DDocId,
revs = {1, [<<"deadbeefdeadbeef">>]},
- body = {[
- {<<"ohai">>, null}
- ]}
+ body =
+ {[
+ {<<"ohai">>, null}
+ ]}
}}.
-
not_found(_DDocId) ->
{not_found, missing}.
-
return_error(_DDocId) ->
{error, timeout}.
-
no_cache_test_() ->
{
"ddoc_cache no cache test",
@@ -76,12 +72,10 @@ no_cache_open_ok_test(_, _) ->
Resp = ddoc_cache:open_doc(<<"foo">>, <<"bar">>),
?_assertEqual(ddoc(<<"bar">>), Resp).
-
no_cache_open_not_found_test(_, _) ->
Resp = ddoc_cache:open_doc(<<"foo">>, <<"baz">>),
?_assertEqual(not_found(<<"baz">>), Resp).
-
no_cache_open_error_test(_, _) ->
Resp = ddoc_cache:open_doc(<<"foo">>, <<"bif">>),
?_assertEqual(return_error(<<"bif">>), Resp).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl
index c7379d26a..8e71b1270 100644
--- a/src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_open_error_test.erl
@@ -12,12 +12,10 @@
-module(ddoc_cache_open_error_test).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("eunit/include/eunit.hrl").
-include("ddoc_cache_test.hrl").
-
start_couch() ->
Ctx = ddoc_cache_tutil:start_couch(),
meck:expect(fabric, open_doc, fun(_, ?FOOBAR, _) ->
@@ -25,12 +23,10 @@ start_couch() ->
end),
Ctx.
-
stop_couch(Ctx) ->
meck:unload(),
ddoc_cache_tutil:stop_couch(Ctx).
-
check_open_error_test_() ->
{
setup,
@@ -41,6 +37,5 @@ check_open_error_test_() ->
])
}.
-
handle_open_error({DbName, _}) ->
?assertError(test_kaboom, ddoc_cache:open_doc(DbName, ?FOOBAR)).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl
index 73d644f71..f9a9460e7 100644
--- a/src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_open_test.erl
@@ -23,85 +23,80 @@
-include_lib("eunit/include/eunit.hrl").
-include("ddoc_cache_test.hrl").
-
%% behaviour callbacks
dbname(DbName) ->
DbName.
-
ddocid(_) ->
no_ddocid.
-
recover({deleted, _DbName}) ->
erlang:error(database_does_not_exist);
recover(DbName) ->
ddoc_cache_entry_validation_funs:recover(DbName).
-
insert(_, _) ->
ok.
-
start_couch() ->
Ctx = ddoc_cache_tutil:start_couch(),
meck:new(ddoc_cache_entry_validation_funs, [passthrough]),
- meck:expect(ddoc_cache_entry_validation_funs, recover,
- ['_'], meck:passthrough()),
+ meck:expect(
+ ddoc_cache_entry_validation_funs,
+ recover,
+ ['_'],
+ meck:passthrough()
+ ),
Ctx.
-
stop_couch(Ctx) ->
meck:unload(),
ddoc_cache_tutil:stop_couch(Ctx).
-
check_open_error_test_() ->
- {
- setup,
- fun start_couch/0,
- fun stop_couch/1,
- ddoc_cache_tutil:with([
- {"should_return_database_does_not_exist",
- fun should_return_database_does_not_exist/1},
- {"should_not_call_recover_when_database_does_not_exist",
- fun should_not_call_recover_when_database_does_not_exist/1},
- {"should_call_recover_when_needed",
- fun should_call_recover_when_needed/1},
- {"should_call_recover_when_needed",
- fun should_not_crash_lru_process/1}
- ])
+ {
+ setup,
+ fun start_couch/0,
+ fun stop_couch/1,
+ ddoc_cache_tutil:with([
+ {"should_return_database_does_not_exist", fun should_return_database_does_not_exist/1},
+ {"should_not_call_recover_when_database_does_not_exist",
+ fun should_not_call_recover_when_database_does_not_exist/1},
+ {"should_call_recover_when_needed", fun should_call_recover_when_needed/1},
+ {"should_call_recover_when_needed", fun should_not_crash_lru_process/1}
+ ])
}.
-
should_return_database_does_not_exist({DbName, _}) ->
?assertError(
database_does_not_exist,
- ddoc_cache_lru:open({?MODULE, {deleted, DbName}})).
-
+ ddoc_cache_lru:open({?MODULE, {deleted, DbName}})
+ ).
should_not_call_recover_when_database_does_not_exist({DbName, _}) ->
meck:reset(ddoc_cache_entry_validation_funs),
?assertError(
- database_does_not_exist,
- ddoc_cache_lru:open({?MODULE, {deleted, DbName}})),
+ database_does_not_exist,
+ ddoc_cache_lru:open({?MODULE, {deleted, DbName}})
+ ),
?assertError(
timeout,
- meck:wait(1, ddoc_cache_entry_validation_funs, recover, '_', 100)).
-
+ meck:wait(1, ddoc_cache_entry_validation_funs, recover, '_', 100)
+ ).
should_call_recover_when_needed({DbName, _}) ->
meck:reset(ddoc_cache_entry_validation_funs),
ddoc_cache_lru:open({?MODULE, DbName}),
?assertEqual(
ok,
- meck:wait(1, ddoc_cache_entry_validation_funs, recover, '_', 500)).
-
+ meck:wait(1, ddoc_cache_entry_validation_funs, recover, '_', 500)
+ ).
should_not_crash_lru_process({DbName, _}) ->
LRUPid = whereis(ddoc_cache_lru),
?assert(is_process_alive(LRUPid)),
?assertError(
database_does_not_exist,
- ddoc_cache_lru:open({?MODULE, {deleted, DbName}})),
+ ddoc_cache_lru:open({?MODULE, {deleted, DbName}})
+ ),
?assert(is_process_alive(LRUPid)).
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl
index c3846360c..ba5bff057 100644
--- a/src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_opener_test.erl
@@ -12,12 +12,10 @@
-module(ddoc_cache_opener_test).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("eunit/include/eunit.hrl").
-include("ddoc_cache_test.hrl").
-
empty_hull_test() ->
InitExpect = {ok, nil},
TermExpect = ok,
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl
index 24ae346d4..2db5b6cd2 100644
--- a/src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_refresh_test.erl
@@ -12,32 +12,26 @@
-module(ddoc_cache_refresh_test).
-
-export([
recover/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("eunit/include/eunit.hrl").
-include("ddoc_cache_test.hrl").
-
recover(DbName) ->
{ok, {DbName, rand_string()}}.
-
start_couch() ->
Ctx = ddoc_cache_tutil:start_couch(),
meck:new(ddoc_cache_ev, [passthrough]),
Ctx.
-
stop_couch(Ctx) ->
meck:unload(),
ddoc_cache_tutil:stop_couch(Ctx).
-
check_refresh_test_() ->
{
setup,
@@ -53,7 +47,6 @@ check_refresh_test_() ->
])
}.
-
refresh_ddoc({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
@@ -74,7 +67,6 @@ refresh_ddoc({DbName, _}) ->
?assertMatch({ok, Expect}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
?assertEqual(2, ets:info(?CACHE, size)).
-
refresh_ddoc_rev({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
@@ -95,7 +87,6 @@ refresh_ddoc_rev({DbName, _}) ->
?assertMatch({ok, RevDDoc}, ddoc_cache:open_doc(DbName, ?FOOBAR, Rev)),
?assertEqual(2, ets:info(?CACHE, size)).
-
refresh_vdu({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
@@ -107,7 +98,6 @@ refresh_vdu({DbName, _}) ->
?assertMatch({ok, []}, ddoc_cache:open_validation_funs(DbName)),
?assertEqual(1, ets:info(?CACHE, size)).
-
refresh_custom({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
@@ -118,7 +108,6 @@ refresh_custom({DbName, _}) ->
?assertNotEqual({ok, Resp1}, ddoc_cache:open_custom(DbName, ?MODULE)),
?assertEqual(1, ets:info(?CACHE, size)).
-
refresh_multiple({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
@@ -147,7 +136,6 @@ refresh_multiple({DbName, _}) ->
?assertEqual({ok, DDoc}, ddoc_cache:open_doc(DbName, ?FOOBAR, Rev)),
?assertEqual(2, ets:info(?CACHE, size)).
-
check_upgrade_clause({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
@@ -156,18 +144,15 @@ check_upgrade_clause({DbName, _}) ->
gen_server:cast(ddoc_cache_opener, {do_evict, DbName, [?FOOBAR]}),
meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000).
-
rand_string() ->
Bin = crypto:strong_rand_bytes(8),
to_hex(Bin, []).
-
to_hex(<<>>, Acc) ->
list_to_binary(lists:reverse(Acc));
to_hex(<<C1:4, C2:4, Rest/binary>>, Acc) ->
to_hex(Rest, [hexdig(C1), hexdig(C2) | Acc]).
-
hexdig(C) when C >= 0, C =< 9 ->
C + $0;
hexdig(C) when C >= 10, C =< 15 ->
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl b/src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl
index e40518529..f974dd804 100644
--- a/src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_remove_test.erl
@@ -12,18 +12,15 @@
-module(ddoc_cache_remove_test).
-
-export([
recover/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("mem3/include/mem3.hrl").
-include_lib("eunit/include/eunit.hrl").
-include("ddoc_cache_test.hrl").
-
recover(DbName) ->
{ok, #doc{body = {Body}}} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
case couch_util:get_value(<<"status">>, Body) of
@@ -35,18 +32,15 @@ recover(DbName) ->
erlang:error(thpppt)
end.
-
start_couch() ->
Ctx = ddoc_cache_tutil:start_couch(),
meck:new(ddoc_cache_ev, [passthrough]),
Ctx.
-
stop_couch(Ctx) ->
meck:unload(),
ddoc_cache_tutil:stop_couch(Ctx).
-
check_refresh_test_() ->
{
setup,
@@ -61,7 +55,6 @@ check_refresh_test_() ->
])
}.
-
remove_ddoc({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
@@ -88,7 +81,6 @@ remove_ddoc({DbName, _}) ->
?assertMatch({not_found, deleted}, ddoc_cache:open_doc(DbName, ?FOOBAR)),
?assertEqual(1, ets:info(?CACHE, size)).
-
remove_ddoc_rev({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
@@ -100,8 +92,8 @@ remove_ddoc_rev({DbName, _}) ->
% Notice the sort so that we know we're getting the
% revid version second.
- [_, #entry{key = Key, val = DDoc, pid = Pid}]
- = lists:sort(ets:tab2list(?CACHE)),
+ [_, #entry{key = Key, val = DDoc, pid = Pid}] =
+ lists:sort(ets:tab2list(?CACHE)),
NewDDoc = DDoc#doc{
body = {[{<<"an">>, <<"update">>}]}
@@ -109,19 +101,21 @@ remove_ddoc_rev({DbName, _}) ->
{ok, _} = fabric:update_doc(DbName, NewDDoc, [?ADMIN_CTX]),
meck:wait(ddoc_cache_ev, event, [update_noop, Key], 1000),
% Compact the database so that the old rev is removed
- lists:foreach(fun(Shard) ->
- do_compact(Shard#shard.name)
- end, mem3:local_shards(DbName)),
+ lists:foreach(
+ fun(Shard) ->
+ do_compact(Shard#shard.name)
+ end,
+ mem3:local_shards(DbName)
+ ),
% Trigger a refresh rather than wait for the timeout
ddoc_cache_entry:refresh(Pid),
meck:wait(ddoc_cache_ev, event, [removed, Key], 1000),
?assertMatch(
- {{not_found, missing}, _},
- ddoc_cache:open_doc(DbName, ?VDU, Rev)
- ),
+ {{not_found, missing}, _},
+ ddoc_cache:open_doc(DbName, ?VDU, Rev)
+ ),
?assertEqual(1, ets:info(?CACHE, size)).
-
remove_ddoc_rev_only({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
@@ -141,9 +135,12 @@ remove_ddoc_rev_only({DbName, _}) ->
meck:wait(ddoc_cache_ev, event, [updated, '_'], 1000),
meck:wait(ddoc_cache_ev, event, [update_noop, RevKey], 1000),
% Compact the database so that the old rev is removed
- lists:foreach(fun(Shard) ->
- do_compact(Shard#shard.name)
- end, mem3:local_shards(DbName)),
+ lists:foreach(
+ fun(Shard) ->
+ do_compact(Shard#shard.name)
+ end,
+ mem3:local_shards(DbName)
+ ),
% Trigger a refresh rather than wait for the timeout
ddoc_cache_entry:refresh(NoRevPid),
ddoc_cache_entry:refresh(RevPid),
@@ -151,9 +148,9 @@ remove_ddoc_rev_only({DbName, _}) ->
meck:wait(ddoc_cache_ev, event, [removed, RevKey], 1000),
?assertMatch({ok, _}, ddoc_cache:open_doc(DbName, ?VDU)),
?assertMatch(
- {{not_found, missing}, _},
- ddoc_cache:open_doc(DbName, ?VDU, Rev)
- ),
+ {{not_found, missing}, _},
+ ddoc_cache:open_doc(DbName, ?VDU, Rev)
+ ),
?assertEqual(1, ets:info(?CACHE, size)).
remove_custom_not_ok({DbName, _}) ->
@@ -171,7 +168,6 @@ remove_custom_not_ok({DbName, _}) ->
?assertEqual({ruh, roh}, ddoc_cache:open_custom(DbName, ?MODULE)),
?assertEqual(0, ets:info(?CACHE, size)).
-
remove_custom_error({DbName, _}) ->
ddoc_cache_tutil:clear(),
meck:reset(ddoc_cache_ev),
@@ -187,14 +183,12 @@ remove_custom_error({DbName, _}) ->
?assertError(thpppt, ddoc_cache:open_custom(DbName, ?MODULE)),
?assertEqual(0, ets:info(?CACHE, size)).
-
init_custom_ddoc(DbName) ->
Body = {[{<<"status">>, <<"ok">>}]},
{ok, Doc} = fabric:open_doc(DbName, ?CUSTOM, [?ADMIN_CTX]),
NewDoc = Doc#doc{body = Body},
{ok, _} = fabric:update_doc(DbName, NewDoc, [?ADMIN_CTX]).
-
do_compact(ShardName) ->
{ok, Db} = couch_db:open_int(ShardName, []),
try
@@ -209,16 +203,19 @@ do_compact(ShardName) ->
end,
wait_for_compaction(ShardName).
-
wait_for_compaction(ShardName) ->
{ok, Db} = couch_db:open_int(ShardName, []),
- CompactRunning = try
- {ok, Info} = couch_db:get_db_info(Db),
- couch_util:get_value(compact_running, Info)
- after
- couch_db:close(Db)
- end,
- if not CompactRunning -> ok; true ->
- timer:sleep(100),
- wait_for_compaction(ShardName)
- end. \ No newline at end of file
+ CompactRunning =
+ try
+ {ok, Info} = couch_db:get_db_info(Db),
+ couch_util:get_value(compact_running, Info)
+ after
+ couch_db:close(Db)
+ end,
+ if
+ not CompactRunning ->
+ ok;
+ true ->
+ timer:sleep(100),
+ wait_for_compaction(ShardName)
+ end.
diff --git a/src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl b/src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl
index b34d4b163..cc7d694f0 100644
--- a/src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl
+++ b/src/ddoc_cache/test/eunit/ddoc_cache_tutil.erl
@@ -12,7 +12,6 @@
-module(ddoc_cache_tutil).
-
-export([
start_couch/0,
start_couch/1,
@@ -24,36 +23,31 @@
with/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch/include/couch_eunit.hrl").
-
start_couch() ->
start_couch([{write_ddocs, true}]).
-
start_couch(Options) ->
WriteDDocs = couch_util:get_value(write_ddocs, Options, true),
purge_modules(),
Ctx = test_util:start_couch(?CONFIG_CHAIN, [chttpd, ddoc_cache]),
TmpDb = ?tempdb(),
ok = fabric:create_db(TmpDb, [{q, "1"}, {n, "1"}]),
- if not WriteDDocs -> ok; true ->
- {ok, _} = fabric:update_docs(TmpDb, ddocs(), [?ADMIN_CTX])
+ if
+ not WriteDDocs -> ok;
+ true -> {ok, _} = fabric:update_docs(TmpDb, ddocs(), [?ADMIN_CTX])
end,
{TmpDb, Ctx}.
-
stop_couch({_TmpDb, Ctx}) ->
test_util:stop_couch(Ctx).
-
clear() ->
application:stop(ddoc_cache),
application:start(ddoc_cache).
-
get_rev(DbName, DDocId) ->
{_, Ref} = erlang:spawn_monitor(fun() ->
{ok, #doc{revs = Revs}} = fabric:open_doc(DbName, DDocId, [?ADMIN_CTX]),
@@ -64,48 +58,52 @@ get_rev(DbName, DDocId) ->
{'DOWN', Ref, _, _, Rev} -> Rev
end.
-
ddocs() ->
FooBar = #doc{
id = <<"_design/foobar">>,
- body = {[
- {<<"foo">>, <<"bar">>}
- ]}
+ body =
+ {[
+ {<<"foo">>, <<"bar">>}
+ ]}
},
VDU = #doc{
id = <<"_design/vdu">>,
- body = {[
- {<<"validate_doc_update">>, <<"function(doc) {return;}">>}
- ]}
+ body =
+ {[
+ {<<"validate_doc_update">>, <<"function(doc) {return;}">>}
+ ]}
},
Custom = #doc{
id = <<"_design/custom">>,
- body = {[
- {<<"status">>, <<"ok">>},
- {<<"custom">>, <<"hotrod">>}
- ]}
+ body =
+ {[
+ {<<"status">>, <<"ok">>},
+ {<<"custom">>, <<"hotrod">>}
+ ]}
},
[FooBar, VDU, Custom].
-
purge_modules() ->
case application:get_key(ddoc_cache, modules) of
{ok, Mods} ->
- lists:foreach(fun(Mod) ->
- case code:which(Mod) of
- cover_compiled ->
- ok;
- _ ->
- code:delete(Mod),
- code:purge(Mod)
- end
- end, Mods);
+ lists:foreach(
+ fun(Mod) ->
+ case code:which(Mod) of
+ cover_compiled ->
+ ok;
+ _ ->
+ code:delete(Mod),
+ code:purge(Mod)
+ end
+ end,
+ Mods
+ );
undefined ->
ok
end.
%% eunit implementation of {with, Tests} doesn't detect test name correctly
with(Tests) ->
- fun(ArgsTuple) ->
- [{Name, ?_test(Fun(ArgsTuple))} || {Name, Fun} <- Tests]
- end.
+ fun(ArgsTuple) ->
+ [{Name, ?_test(Fun(ArgsTuple))} || {Name, Fun} <- Tests]
+ end.
diff --git a/src/dreyfus/src/clouseau_rpc.erl b/src/dreyfus/src/clouseau_rpc.erl
index b8190b32c..908182793 100644
--- a/src/dreyfus/src/clouseau_rpc.erl
+++ b/src/dreyfus/src/clouseau_rpc.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(clouseau_rpc).
@@ -55,12 +54,12 @@ search(Ref, Args) ->
case rpc(Ref, {search, Args}) of
{ok, Response} when is_list(Response) ->
{ok, #top_docs{
- update_seq = couch_util:get_value(update_seq, Response),
- total_hits = couch_util:get_value(total_hits, Response),
- hits = couch_util:get_value(hits, Response),
- counts = couch_util:get_value(counts, Response),
- ranges = couch_util:get_value(ranges, Response)
- }};
+ update_seq = couch_util:get_value(update_seq, Response),
+ total_hits = couch_util:get_value(total_hits, Response),
+ hits = couch_util:get_value(hits, Response),
+ counts = couch_util:get_value(counts, Response),
+ ranges = couch_util:get_value(ranges, Response)
+ }};
Else ->
Else
end.
@@ -81,7 +80,7 @@ cleanup(DbName) ->
gen_server:cast({cleanup, clouseau()}, {cleanup, DbName}).
rename(DbName) ->
- gen_server:cast({cleanup, clouseau()}, {rename, DbName}).
+ gen_server:cast({cleanup, clouseau()}, {rename, DbName}).
cleanup(DbName, ActiveSigs) ->
gen_server:cast({cleanup, clouseau()}, {cleanup, DbName, ActiveSigs}).
diff --git a/src/dreyfus/src/dreyfus_app.erl b/src/dreyfus/src/dreyfus_app.erl
index 7cd7f4a31..3f2fc5086 100644
--- a/src/dreyfus/src/dreyfus_app.erl
+++ b/src/dreyfus/src/dreyfus_app.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(dreyfus_app).
diff --git a/src/dreyfus/src/dreyfus_bookmark.erl b/src/dreyfus/src/dreyfus_bookmark.erl
index 9a2979b25..e33087a27 100644
--- a/src/dreyfus/src/dreyfus_bookmark.erl
+++ b/src/dreyfus/src/dreyfus_bookmark.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(dreyfus_bookmark).
@@ -25,7 +24,6 @@
add_missing_shards/2
]).
-
update(_Sort, Bookmark, []) ->
Bookmark;
update(relevance, Bookmark, [#sortable{} = Sortable | Rest]) ->
@@ -45,46 +43,46 @@ update(Sort, Bookmark, [#sortable{} = Sortable | Rest]) ->
B2 = fabric_view:remove_overlapping_shards(Shard, B1),
update(Sort, B2, Rest).
-
-unpack(DbName, #index_query_args{bookmark=nil} = Args) ->
+unpack(DbName, #index_query_args{bookmark = nil} = Args) ->
fabric_dict:init(dreyfus_util:get_shards(DbName, Args), nil);
unpack(DbName, #index_query_args{} = Args) ->
unpack(DbName, Args#index_query_args.bookmark);
unpack(DbName, Packed) when is_binary(Packed) ->
- lists:map(fun({Node, Range, After}) ->
- case mem3:get_shard(DbName, Node, Range) of
- {ok, Shard} ->
- {Shard, After};
- {error, not_found} ->
- PlaceHolder = #shard{
- node = Node,
- range = Range,
- dbname = DbName,
- _='_'
- },
- {PlaceHolder, After}
- end
- end, binary_to_term(couch_util:decodeBase64Url(Packed))).
-
+ lists:map(
+ fun({Node, Range, After}) ->
+ case mem3:get_shard(DbName, Node, Range) of
+ {ok, Shard} ->
+ {Shard, After};
+ {error, not_found} ->
+ PlaceHolder = #shard{
+ node = Node,
+ range = Range,
+ dbname = DbName,
+ _ = '_'
+ },
+ {PlaceHolder, After}
+ end
+ end,
+ binary_to_term(couch_util:decodeBase64Url(Packed))
+ ).
pack(nil) ->
null;
pack(Workers) ->
- Workers1 = [{N,R,A} || {#shard{node=N, range=R}, A} <- Workers, A =/= nil],
- Bin = term_to_binary(Workers1, [compressed, {minor_version,1}]),
+ Workers1 = [{N, R, A} || {#shard{node = N, range = R}, A} <- Workers, A =/= nil],
+ Bin = term_to_binary(Workers1, [compressed, {minor_version, 1}]),
couch_util:encodeBase64Url(Bin).
-
add_missing_shards(Bookmark, LiveShards) ->
{BookmarkShards, _} = lists:unzip(Bookmark),
add_missing_shards(Bookmark, BookmarkShards, LiveShards).
-
add_missing_shards(Bookmark, _, []) ->
Bookmark;
add_missing_shards(Bookmark, BMShards, [H | T]) ->
- Bookmark1 = case lists:keymember(H#shard.range, #shard.range, BMShards) of
- true -> Bookmark;
- false -> fabric_dict:store(H, nil, Bookmark)
- end,
+ Bookmark1 =
+ case lists:keymember(H#shard.range, #shard.range, BMShards) of
+ true -> Bookmark;
+ false -> fabric_dict:store(H, nil, Bookmark)
+ end,
add_missing_shards(Bookmark1, BMShards, T).
diff --git a/src/dreyfus/src/dreyfus_config.erl b/src/dreyfus/src/dreyfus_config.erl
index b7555c1d0..df138f35d 100644
--- a/src/dreyfus/src/dreyfus_config.erl
+++ b/src/dreyfus/src/dreyfus_config.erl
@@ -1,13 +1,14 @@
- -module(dreyfus_config).
+-module(dreyfus_config).
- -export([data/0, get/1]).
+-export([data/0, get/1]).
data() ->
try
config:get("dreyfus_blacklist")
- catch error:badarg ->
- % lazy workaround to address issue with epi invocation on startup
- []
+ catch
+ error:badarg ->
+ % lazy workaround to address issue with epi invocation on startup
+ []
end.
get(Key) ->
diff --git a/src/dreyfus/src/dreyfus_epi.erl b/src/dreyfus/src/dreyfus_epi.erl
index cb07f8a34..22c2c90a8 100644
--- a/src/dreyfus/src/dreyfus_epi.erl
+++ b/src/dreyfus/src/dreyfus_epi.erl
@@ -23,7 +23,6 @@ providers() ->
{chttpd_handlers, dreyfus_httpd_handlers}
].
-
services() ->
[].
@@ -32,8 +31,7 @@ data_subscriptions() ->
data_providers() ->
[
- {{dreyfus, black_list}, {callback_module, dreyfus_config},
- [{interval, ?DATA_INTERVAL}]}
+ {{dreyfus, black_list}, {callback_module, dreyfus_config}, [{interval, ?DATA_INTERVAL}]}
].
processes() ->
@@ -41,6 +39,9 @@ processes() ->
notify(_Key, _Old, _New) ->
Listeners = application:get_env(dreyfus, config_listeners, []),
- lists:foreach(fun(L) ->
- L ! dreyfus_config_change_finished
- end, Listeners).
+ lists:foreach(
+ fun(L) ->
+ L ! dreyfus_config_change_finished
+ end,
+ Listeners
+ ).
diff --git a/src/dreyfus/src/dreyfus_fabric.erl b/src/dreyfus/src/dreyfus_fabric.erl
index 0b25a6cc6..5689c1d4e 100644
--- a/src/dreyfus/src/dreyfus_fabric.erl
+++ b/src/dreyfus/src/dreyfus_fabric.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(dreyfus_fabric).
@@ -23,98 +22,167 @@
get_json_docs(DbName, DocIds) ->
fabric:all_docs(DbName, fun callback/2, [], [{keys, DocIds}, {include_docs, true}]).
-callback({meta,_}, Acc) ->
+callback({meta, _}, Acc) ->
{ok, Acc};
callback({error, Reason}, _Acc) ->
{error, Reason};
callback({row, Row}, Acc) ->
{id, Id} = lists:keyfind(id, 1, Row),
- {ok, [{Id, lists:keyfind(doc, 1, Row)}|Acc]};
+ {ok, [{Id, lists:keyfind(doc, 1, Row)} | Acc]};
callback(complete, Acc) ->
{ok, lists:reverse(Acc)};
callback(timeout, _Acc) ->
{error, timeout}.
-handle_error_message({rexi_DOWN, _, {_, NodeRef}, _}, _Worker,
- Counters, _Replacements, _StartFun, _StartArgs, RingOpts) ->
+handle_error_message(
+ {rexi_DOWN, _, {_, NodeRef}, _},
+ _Worker,
+ Counters,
+ _Replacements,
+ _StartFun,
+ _StartArgs,
+ RingOpts
+) ->
case fabric_util:remove_down_workers(Counters, NodeRef, RingOpts) of
- {ok, NewCounters} ->
- {ok, NewCounters};
- error ->
- {error, {nodedown, <<"progress not possible">>}}
+ {ok, NewCounters} ->
+ {ok, NewCounters};
+ error ->
+ {error, {nodedown, <<"progress not possible">>}}
end;
-handle_error_message({rexi_EXIT, {maintenance_mode, _}}, Worker,
- Counters, Replacements, StartFun, StartArgs, RingOpts) ->
- handle_replacement(Worker, Counters, Replacements, StartFun, StartArgs,
- RingOpts);
-handle_error_message({rexi_EXIT, Reason}, Worker,
- Counters, _Replacements, _StartFun, _StartArgs, RingOpts) ->
+handle_error_message(
+ {rexi_EXIT, {maintenance_mode, _}},
+ Worker,
+ Counters,
+ Replacements,
+ StartFun,
+ StartArgs,
+ RingOpts
+) ->
+ handle_replacement(
+ Worker,
+ Counters,
+ Replacements,
+ StartFun,
+ StartArgs,
+ RingOpts
+ );
+handle_error_message(
+ {rexi_EXIT, Reason},
+ Worker,
+ Counters,
+ _Replacements,
+ _StartFun,
+ _StartArgs,
+ RingOpts
+) ->
handle_error(Reason, Worker, Counters, RingOpts);
-handle_error_message({error, Reason}, Worker,
- Counters, _Replacements, _StartFun, _StartArgs, RingOpts) ->
+handle_error_message(
+ {error, Reason},
+ Worker,
+ Counters,
+ _Replacements,
+ _StartFun,
+ _StartArgs,
+ RingOpts
+) ->
handle_error(Reason, Worker, Counters, RingOpts);
-handle_error_message({'EXIT', Reason}, Worker,
- Counters, _Replacements, _StartFun, _StartArgs, RingOpts) ->
+handle_error_message(
+ {'EXIT', Reason},
+ Worker,
+ Counters,
+ _Replacements,
+ _StartFun,
+ _StartArgs,
+ RingOpts
+) ->
handle_error({exit, Reason}, Worker, Counters, RingOpts);
-handle_error_message(Reason, Worker, Counters,
- _Replacements, _StartFun, _StartArgs, RingOpts) ->
+handle_error_message(
+ Reason,
+ Worker,
+ Counters,
+ _Replacements,
+ _StartFun,
+ _StartArgs,
+ RingOpts
+) ->
couch_log:error("Unexpected error during request: ~p", [Reason]),
handle_error(Reason, Worker, Counters, RingOpts).
handle_error(Reason, Worker, Counters0, RingOpts) ->
Counters = fabric_dict:erase(Worker, Counters0),
case fabric_ring:is_progress_possible(Counters, RingOpts) of
- true ->
- {ok, Counters};
- false ->
- {error, Reason}
+ true ->
+ {ok, Counters};
+ false ->
+ {error, Reason}
end.
-handle_replacement(Worker, OldCntrs0, OldReplacements, StartFun, StartArgs,
- RingOpts) ->
- OldCounters = lists:filter(fun({#shard{ref=R}, _}) ->
- R /= Worker#shard.ref
- end, OldCntrs0),
+handle_replacement(
+ Worker,
+ OldCntrs0,
+ OldReplacements,
+ StartFun,
+ StartArgs,
+ RingOpts
+) ->
+ OldCounters = lists:filter(
+ fun({#shard{ref = R}, _}) ->
+ R /= Worker#shard.ref
+ end,
+ OldCntrs0
+ ),
case lists:keytake(Worker#shard.range, 1, OldReplacements) of
{value, {_Range, Replacements}, NewReplacements} ->
- NewCounters = lists:foldl(fun(Repl, CounterAcc) ->
- NewCounter = start_replacement(StartFun, StartArgs, Repl),
- fabric_dict:store(NewCounter, nil, CounterAcc)
- end, OldCounters, Replacements),
+ NewCounters = lists:foldl(
+ fun(Repl, CounterAcc) ->
+ NewCounter = start_replacement(StartFun, StartArgs, Repl),
+ fabric_dict:store(NewCounter, nil, CounterAcc)
+ end,
+ OldCounters,
+ Replacements
+ ),
true = fabric_ring:is_progress_possible(NewCounters, RingOpts),
NewRefs = fabric_dict:fetch_keys(NewCounters),
{new_refs, NewRefs, NewCounters, NewReplacements};
false ->
- handle_error({nodedown, <<"progress not possible">>},
- Worker, OldCounters, RingOpts)
+ handle_error(
+ {nodedown, <<"progress not possible">>},
+ Worker,
+ OldCounters,
+ RingOpts
+ )
end.
start_replacement(StartFun, StartArgs, Shard) ->
[DDoc, IndexName, QueryArgs] = StartArgs,
- After = case QueryArgs#index_query_args.bookmark of
- Bookmark when is_list(Bookmark) ->
- lists:foldl(fun({#shard{range=R0}, After0}, Acc) ->
- case R0 == Shard#shard.range of
- true -> After0;
- false -> Acc
- end
- end, nil, Bookmark);
- _ ->
- nil
- end,
- QueryArgs1 = QueryArgs#index_query_args{bookmark=After},
+ After =
+ case QueryArgs#index_query_args.bookmark of
+ Bookmark when is_list(Bookmark) ->
+ lists:foldl(
+ fun({#shard{range = R0}, After0}, Acc) ->
+ case R0 == Shard#shard.range of
+ true -> After0;
+ false -> Acc
+ end
+ end,
+ nil,
+ Bookmark
+ );
+ _ ->
+ nil
+ end,
+ QueryArgs1 = QueryArgs#index_query_args{bookmark = After},
StartArgs1 = [DDoc, IndexName, QueryArgs1],
- Ref = rexi:cast(Shard#shard.node,
- {dreyfus_rpc, StartFun,
- [Shard#shard.name|StartArgs1]}),
+ Ref = rexi:cast(
+ Shard#shard.node,
+ {dreyfus_rpc, StartFun, [Shard#shard.name | StartArgs1]}
+ ),
Shard#shard{ref = Ref}.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
node_down_test() ->
[S1, S2, S3] = [
mk_shard("n1", [0, 4]),
@@ -122,24 +190,23 @@ node_down_test() ->
mk_shard("n2", [0, ?RING_END])
],
[W1, W2, W3] = [
- S1#shard{ref = make_ref()},
- S2#shard{ref = make_ref()},
- S3#shard{ref = make_ref()}
+ S1#shard{ref = make_ref()},
+ S2#shard{ref = make_ref()},
+ S3#shard{ref = make_ref()}
],
Counters1 = fabric_dict:init([W1, W2, W3], nil),
N1 = S1#shard.node,
Msg1 = {rexi_DOWN, nil, {nil, N1}, nil},
- Res1 = handle_error_message(Msg1, nil, Counters1, nil, nil, nil, []),
+ Res1 = handle_error_message(Msg1, nil, Counters1, nil, nil, nil, []),
?assertEqual({ok, [{W3, nil}]}, Res1),
{ok, Counters2} = Res1,
N2 = S3#shard.node,
Msg2 = {rexi_DOWN, nil, {nil, N2}, nil},
- Res2 = handle_error_message(Msg2, nil, Counters2, nil, nil, nil, []),
+ Res2 = handle_error_message(Msg2, nil, Counters2, nil, nil, nil, []),
?assertEqual({error, {nodedown, <<"progress not possible">>}}, Res2).
-
worker_error_test() ->
[S1, S2] = [
mk_shard("n1", [0, ?RING_END]),
@@ -154,7 +221,6 @@ worker_error_test() ->
{ok, Counters2} = Res1,
?assertEqual({error, boom}, handle_error(boom, W2, Counters2, [])).
-
node_down_with_partitions_test() ->
[S1, S2] = [
mk_shard("n1", [0, 4]),
@@ -169,20 +235,20 @@ node_down_with_partitions_test() ->
N1 = S1#shard.node,
Msg1 = {rexi_DOWN, nil, {nil, N1}, nil},
- Res1 = handle_error_message(Msg1, nil, Counters1, nil, nil, nil, RingOpts),
+ Res1 = handle_error_message(Msg1, nil, Counters1, nil, nil, nil, RingOpts),
?assertEqual({ok, [{W2, nil}]}, Res1),
{ok, Counters2} = Res1,
N2 = S2#shard.node,
Msg2 = {rexi_DOWN, nil, {nil, N2}, nil},
- Res2 = handle_error_message(Msg2, nil, Counters2, nil, nil, nil, RingOpts),
+ Res2 = handle_error_message(Msg2, nil, Counters2, nil, nil, nil, RingOpts),
?assertEqual({error, {nodedown, <<"progress not possible">>}}, Res2).
-
worker_error_with_partitions_test() ->
[S1, S2] = [
mk_shard("n1", [0, 4]),
- mk_shard("n2", [0, 8])],
+ mk_shard("n2", [0, 8])
+ ],
[W1, W2] = [
S1#shard{ref = make_ref()},
S2#shard{ref = make_ref()}
@@ -196,7 +262,6 @@ worker_error_with_partitions_test() ->
{ok, Counters2} = Res1,
?assertEqual({error, boom}, handle_error(boom, W2, Counters2, RingOpts)).
-
mk_shard(Name, Range) ->
Node = list_to_atom(Name),
BName = list_to_binary(Name),
diff --git a/src/dreyfus/src/dreyfus_fabric_cleanup.erl b/src/dreyfus/src/dreyfus_fabric_cleanup.erl
index 681712748..e2710744d 100644
--- a/src/dreyfus/src/dreyfus_fabric_cleanup.erl
+++ b/src/dreyfus/src/dreyfus_fabric_cleanup.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(dreyfus_fabric_cleanup).
@@ -23,22 +22,30 @@
go(DbName) ->
{ok, DesignDocs} = fabric:design_docs(DbName),
- ActiveSigs = lists:usort(lists:flatmap(fun active_sigs/1,
- [couch_doc:from_json_obj(DD) || DD <- DesignDocs])),
+ ActiveSigs = lists:usort(
+ lists:flatmap(
+ fun active_sigs/1,
+ [couch_doc:from_json_obj(DD) || DD <- DesignDocs]
+ )
+ ),
cleanup_local_purge_doc(DbName, ActiveSigs),
clouseau_rpc:cleanup(DbName, ActiveSigs),
ok.
-active_sigs(#doc{body={Fields}}=Doc) ->
+active_sigs(#doc{body = {Fields}} = Doc) ->
try
{RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}),
{IndexNames, _} = lists:unzip(RawIndexes),
- [begin
- {ok, Index} = dreyfus_index:design_doc_to_index(Doc, IndexName),
- Index#index.sig
- end || IndexName <- IndexNames]
- catch error:{badmatch, _Error} ->
- []
+ [
+ begin
+ {ok, Index} = dreyfus_index:design_doc_to_index(Doc, IndexName),
+ Index#index.sig
+ end
+ || IndexName <- IndexNames
+ ]
+ catch
+ error:{badmatch, _Error} ->
+ []
end.
cleanup_local_purge_doc(DbName, ActiveSigs) ->
@@ -49,30 +56,50 @@ cleanup_local_purge_doc(DbName, ActiveSigs) ->
DirListStrs = filelib:wildcard(Pattern),
DirList = [iolist_to_binary(DL) || DL <- DirListStrs],
LocalShards = mem3:local_shards(DbName),
- ActiveDirs = lists:foldl(fun(LS, AccOuter) ->
- lists:foldl(fun(Sig, AccInner) ->
- DirName = filename:join([BaseDir, LS#shard.name, Sig]),
- [DirName | AccInner]
- end, AccOuter, ActiveSigs)
- end, [], LocalShards),
+ ActiveDirs = lists:foldl(
+ fun(LS, AccOuter) ->
+ lists:foldl(
+ fun(Sig, AccInner) ->
+ DirName = filename:join([BaseDir, LS#shard.name, Sig]),
+ [DirName | AccInner]
+ end,
+ AccOuter,
+ ActiveSigs
+ )
+ end,
+ [],
+ LocalShards
+ ),
DeadDirs = DirList -- ActiveDirs,
- lists:foreach(fun(IdxDir) ->
- Sig = dreyfus_util:get_signature_from_idxdir(IdxDir),
- case Sig of undefined -> ok; _ ->
- DocId = dreyfus_util:get_local_purge_doc_id(Sig),
- LocalShards = mem3:local_shards(DbName),
- lists:foreach(fun(LS) ->
- ShardDbName = LS#shard.name,
- {ok, ShardDb} = couch_db:open_int(ShardDbName, []),
- case couch_db:open_doc(ShardDb, DocId, []) of
- {ok, LocalPurgeDoc} ->
- couch_db:update_doc(ShardDb,
- LocalPurgeDoc#doc{deleted=true}, [?ADMIN_CTX]);
- {not_found, _} ->
- ok
- end,
- couch_db:close(ShardDb)
- end, LocalShards)
- end
- end, DeadDirs).
+ lists:foreach(
+ fun(IdxDir) ->
+ Sig = dreyfus_util:get_signature_from_idxdir(IdxDir),
+ case Sig of
+ undefined ->
+ ok;
+ _ ->
+ DocId = dreyfus_util:get_local_purge_doc_id(Sig),
+ LocalShards = mem3:local_shards(DbName),
+ lists:foreach(
+ fun(LS) ->
+ ShardDbName = LS#shard.name,
+ {ok, ShardDb} = couch_db:open_int(ShardDbName, []),
+ case couch_db:open_doc(ShardDb, DocId, []) of
+ {ok, LocalPurgeDoc} ->
+ couch_db:update_doc(
+ ShardDb,
+ LocalPurgeDoc#doc{deleted = true},
+ [?ADMIN_CTX]
+ );
+ {not_found, _} ->
+ ok
+ end,
+ couch_db:close(ShardDb)
+ end,
+ LocalShards
+ )
+ end
+ end,
+ DeadDirs
+ ).
diff --git a/src/dreyfus/src/dreyfus_fabric_group1.erl b/src/dreyfus/src/dreyfus_fabric_group1.erl
index bdae6f040..1edfd653f 100644
--- a/src/dreyfus/src/dreyfus_fabric_group1.erl
+++ b/src/dreyfus/src/dreyfus_fabric_group1.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(dreyfus_fabric_group1).
@@ -35,14 +34,16 @@ go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) ->
{ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
dreyfus_util:maybe_deny_index(DbName, GroupId, IndexName),
go(DbName, DDoc, IndexName, QueryArgs);
-
-go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) ->
+go(DbName, DDoc, IndexName, #index_query_args{} = QueryArgs) ->
DesignName = dreyfus_util:get_design_docid(DDoc),
dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
Shards = dreyfus_util:get_shards(DbName, QueryArgs),
RingOpts = dreyfus_util:get_ring_opts(QueryArgs, Shards),
- Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, group1, [DDoc,
- IndexName, dreyfus_util:export(QueryArgs)]),
+ Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, group1, [
+ DDoc,
+ IndexName,
+ dreyfus_util:export(QueryArgs)
+ ]),
Replacements = fabric_view:get_shard_replacements(DbName, Workers),
Counters = fabric_dict:init(Workers, nil),
RexiMon = fabric_util:create_monitors(Workers),
@@ -56,8 +57,14 @@ go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) ->
ring_opts = RingOpts
},
try
- rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, infinity, 1000 * 60 * 60)
+ rexi_utils:recv(
+ Workers,
+ #shard.ref,
+ fun handle_message/3,
+ State,
+ infinity,
+ 1000 * 60 * 60
+ )
after
rexi_monitor:stop(RexiMon),
fabric_util:cleanup(Workers)
@@ -67,34 +74,43 @@ go(DbName, DDoc, IndexName, OldArgs) ->
handle_message({ok, NewTopGroups}, Shard, State0) ->
State = upgrade_state(State0),
- #state{top_groups=TopGroups, limit=Limit, sort=Sort} = State,
+ #state{top_groups = TopGroups, limit = Limit, sort = Sort} = State,
case fabric_dict:lookup_element(Shard, State#state.counters) of
- undefined ->
- %% already heard from someone else in this range
- {ok, State};
- nil ->
- C1 = fabric_dict:store(Shard, ok, State#state.counters),
- C2 = fabric_view:remove_overlapping_shards(Shard, C1),
- MergedTopGroups = merge_top_groups(TopGroups, make_sortable(Shard, NewTopGroups), Limit, Sort),
- State1 = State#state{
- counters=C2,
- top_groups=MergedTopGroups
- },
- case fabric_dict:any(nil, C2) of
- true ->
- {ok, State1};
- false ->
- {stop, remove_sortable(MergedTopGroups)}
- end
+ undefined ->
+ %% already heard from someone else in this range
+ {ok, State};
+ nil ->
+ C1 = fabric_dict:store(Shard, ok, State#state.counters),
+ C2 = fabric_view:remove_overlapping_shards(Shard, C1),
+ MergedTopGroups = merge_top_groups(
+ TopGroups, make_sortable(Shard, NewTopGroups), Limit, Sort
+ ),
+ State1 = State#state{
+ counters = C2,
+ top_groups = MergedTopGroups
+ },
+ case fabric_dict:any(nil, C2) of
+ true ->
+ {ok, State1};
+ false ->
+ {stop, remove_sortable(MergedTopGroups)}
+ end
end;
-
handle_message(Error, Worker, State0) ->
State = upgrade_state(State0),
- case dreyfus_fabric:handle_error_message(Error, Worker,
- State#state.counters, State#state.replacements,
- group1, State#state.start_args, State#state.ring_opts) of
+ case
+ dreyfus_fabric:handle_error_message(
+ Error,
+ Worker,
+ State#state.counters,
+ State#state.replacements,
+ group1,
+ State#state.start_args,
+ State#state.ring_opts
+ )
+ of
{ok, Counters} ->
- {ok, State#state{counters=Counters}};
+ {ok, State#state{counters = Counters}};
{new_refs, NewRefs, NewCounters, NewReplacements} ->
NewState = State#state{
counters = NewCounters,
@@ -107,8 +123,11 @@ handle_message(Error, Worker, State0) ->
merge_top_groups(TopGroupsA, TopGroupsB, Limit, Sort) ->
MergedGroups0 = TopGroupsA ++ TopGroupsB,
- GNs = lists:usort([N || #sortable{item={N,_}} <- MergedGroups0]),
- MergedGroups = [merge_top_group(Sort, [S || #sortable{item={N,_}}=S <- MergedGroups0, N =:= GN]) || GN <- GNs],
+ GNs = lists:usort([N || #sortable{item = {N, _}} <- MergedGroups0]),
+ MergedGroups = [
+ merge_top_group(Sort, [S || #sortable{item = {N, _}} = S <- MergedGroups0, N =:= GN])
+ || GN <- GNs
+ ],
lists:sublist(dreyfus_util:sort(Sort, MergedGroups), Limit).
merge_top_group(_Sort, [Group]) ->
@@ -117,13 +136,18 @@ merge_top_group(Sort, [_, _] = Groups) ->
hd(dreyfus_util:sort(Sort, Groups)).
make_sortable(Shard, TopGroups) ->
- [#sortable{item=G, order=Order, shard=Shard} || {_Name, Order}=G <- TopGroups].
+ [#sortable{item = G, order = Order, shard = Shard} || {_Name, Order} = G <- TopGroups].
remove_sortable(Sortables) ->
- [Item || #sortable{item=Item} <- Sortables].
+ [Item || #sortable{item = Item} <- Sortables].
upgrade_state({state, Limit, Sort, TopGroups, Counters}) ->
- #state{limit=Limit, sort=Sort, top_groups=TopGroups, counters=Counters,
- replacements=[]};
-upgrade_state(#state{}=State) ->
+ #state{
+ limit = Limit,
+ sort = Sort,
+ top_groups = TopGroups,
+ counters = Counters,
+ replacements = []
+ };
+upgrade_state(#state{} = State) ->
State.
diff --git a/src/dreyfus/src/dreyfus_fabric_group2.erl b/src/dreyfus/src/dreyfus_fabric_group2.erl
index 8d864dd0c..e962c7e4a 100644
--- a/src/dreyfus/src/dreyfus_fabric_group2.erl
+++ b/src/dreyfus/src/dreyfus_fabric_group2.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(dreyfus_fabric_group2).
@@ -37,14 +36,17 @@ go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) ->
{ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
dreyfus_util:maybe_deny_index(DbName, GroupId, IndexName),
go(DbName, DDoc, IndexName, QueryArgs);
-
-go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) ->
+go(DbName, DDoc, IndexName, #index_query_args{} = QueryArgs) ->
DesignName = dreyfus_util:get_design_docid(DDoc),
dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
Shards = dreyfus_util:get_shards(DbName, QueryArgs),
RingOpts = dreyfus_util:get_ring_opts(QueryArgs, Shards),
- Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, group2,
- [DDoc, IndexName, dreyfus_util:export(QueryArgs)]),
+ Workers = fabric_util:submit_jobs(
+ Shards,
+ dreyfus_rpc,
+ group2,
+ [DDoc, IndexName, dreyfus_util:export(QueryArgs)]
+ ),
Replacements = fabric_view:get_shard_replacements(DbName, Workers),
Counters = fabric_dict:init(Workers, nil),
RexiMon = fabric_util:create_monitors(Workers),
@@ -60,8 +62,14 @@ go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) ->
ring_opts = RingOpts
},
try
- rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, infinity, 1000 * 60 * 60)
+ rexi_utils:recv(
+ Workers,
+ #shard.ref,
+ fun handle_message/3,
+ State,
+ infinity,
+ 1000 * 60 * 60
+ )
after
rexi_monitor:stop(RexiMon),
fabric_util:cleanup(Workers)
@@ -69,45 +77,59 @@ go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) ->
go(DbName, DDoc, IndexName, OldArgs) ->
go(DbName, DDoc, IndexName, dreyfus_util:upgrade(OldArgs)).
-
-handle_message({ok, NewTotalHits, NewTotalGroupedHits, NewTopGroups},
- Shard, State0) ->
+handle_message(
+ {ok, NewTotalHits, NewTotalGroupedHits, NewTopGroups},
+ Shard,
+ State0
+) ->
State = upgrade_state(State0),
- #state{total_hits=TotalHits, total_grouped_hits=TotalGroupedHits,
- top_groups=TopGroups, limit=Limit, sort=Sort} = State,
+ #state{
+ total_hits = TotalHits,
+ total_grouped_hits = TotalGroupedHits,
+ top_groups = TopGroups,
+ limit = Limit,
+ sort = Sort
+ } = State,
case fabric_dict:lookup_element(Shard, State#state.counters) of
- undefined ->
- %% already heard from someone else in this range
- {ok, State};
- nil ->
- C1 = fabric_dict:store(Shard, ok, State#state.counters),
- C2 = fabric_view:remove_overlapping_shards(Shard, C1),
- MergedTotalHits = NewTotalHits + TotalHits,
- MergedTotalGroupedHits = NewTotalGroupedHits + TotalGroupedHits,
- Sortable = make_sortable(Shard, NewTopGroups),
- MergedTopGroups = merge_top_groups(TopGroups, Sortable, Limit, Sort),
- State1 = State#state{
- counters=C2,
- total_hits=MergedTotalHits,
- total_grouped_hits=MergedTotalGroupedHits,
- top_groups=MergedTopGroups
- },
- case fabric_dict:any(nil, C2) of
- true ->
- {ok, State1};
- false ->
- {stop, {MergedTotalHits, MergedTotalGroupedHits,
- remove_sortable(MergedTopGroups)}}
- end
+ undefined ->
+ %% already heard from someone else in this range
+ {ok, State};
+ nil ->
+ C1 = fabric_dict:store(Shard, ok, State#state.counters),
+ C2 = fabric_view:remove_overlapping_shards(Shard, C1),
+ MergedTotalHits = NewTotalHits + TotalHits,
+ MergedTotalGroupedHits = NewTotalGroupedHits + TotalGroupedHits,
+ Sortable = make_sortable(Shard, NewTopGroups),
+ MergedTopGroups = merge_top_groups(TopGroups, Sortable, Limit, Sort),
+ State1 = State#state{
+ counters = C2,
+ total_hits = MergedTotalHits,
+ total_grouped_hits = MergedTotalGroupedHits,
+ top_groups = MergedTopGroups
+ },
+ case fabric_dict:any(nil, C2) of
+ true ->
+ {ok, State1};
+ false ->
+ {stop,
+ {MergedTotalHits, MergedTotalGroupedHits, remove_sortable(MergedTopGroups)}}
+ end
end;
-
handle_message(Error, Worker, State0) ->
State = upgrade_state(State0),
- case dreyfus_fabric:handle_error_message(Error, Worker,
- State#state.counters, State#state.replacements,
- group2, State#state.start_args, State#state.ring_opts) of
+ case
+ dreyfus_fabric:handle_error_message(
+ Error,
+ Worker,
+ State#state.counters,
+ State#state.replacements,
+ group2,
+ State#state.start_args,
+ State#state.ring_opts
+ )
+ of
{ok, Counters} ->
- {ok, State#state{counters=Counters}};
+ {ok, State#state{counters = Counters}};
{new_refs, NewRefs, NewCounters, NewReplacements} ->
NewState = State#state{
counters = NewCounters,
@@ -121,15 +143,16 @@ handle_message(Error, Worker, State0) ->
merge_top_groups([], TopGroups, _Limit, _Sort) ->
TopGroups;
merge_top_groups(TopGroupsA, TopGroupsB, Limit, Sort) ->
- lists:zipwith(fun(A,B) -> merge_top_group(A, B, Limit, Sort) end,
- TopGroupsA,
- TopGroupsB).
+ lists:zipwith(
+ fun(A, B) -> merge_top_group(A, B, Limit, Sort) end,
+ TopGroupsA,
+ TopGroupsB
+ ).
merge_top_group({Name, TotalA, HitsA}, {Name, TotalB, HitsB}, Limit, Sort) ->
MergedHits = lists:sublist(dreyfus_util:sort(Sort, HitsA ++ HitsB), Limit),
{Name, TotalA + TotalB, MergedHits}.
-
make_sortable(Shard, TopGroups) ->
[make_sortable_group(Shard, TopGroup) || TopGroup <- TopGroups].
@@ -137,7 +160,7 @@ make_sortable_group(Shard, {Name, TotalHits, Hits}) ->
{Name, TotalHits, [make_sortable_hit(Shard, Hit) || Hit <- Hits]}.
make_sortable_hit(Shard, Hit) ->
- #sortable{item=Hit, order=Hit#hit.order, shard=Shard}.
+ #sortable{item = Hit, order = Hit#hit.order, shard = Shard}.
remove_sortable(SortableGroups) ->
[remove_sortable_group(G) || G <- SortableGroups].
@@ -148,11 +171,15 @@ remove_sortable_group({Name, TotalHits, SortableHits}) ->
remove_sortable_hit(SortableHit) ->
SortableHit#sortable.item.
-upgrade_state({state, Limit, Sort, TotalHits, TotalGroupedHits,
- TopGroups, Counters}) ->
- #state{limit = Limit, sort = Sort, total_hits = TotalHits,
- total_grouped_hits = TotalGroupedHits,
- top_groups = TopGroups, counters = Counters,
- replacements = []};
+upgrade_state({state, Limit, Sort, TotalHits, TotalGroupedHits, TopGroups, Counters}) ->
+ #state{
+ limit = Limit,
+ sort = Sort,
+ total_hits = TotalHits,
+ total_grouped_hits = TotalGroupedHits,
+ top_groups = TopGroups,
+ counters = Counters,
+ replacements = []
+ };
upgrade_state(#state{} = State) ->
State.
diff --git a/src/dreyfus/src/dreyfus_fabric_info.erl b/src/dreyfus/src/dreyfus_fabric_info.erl
index e217bc0ef..5ca589c1d 100644
--- a/src/dreyfus/src/dreyfus_fabric_info.erl
+++ b/src/dreyfus/src/dreyfus_fabric_info.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(dreyfus_fabric_info).
@@ -25,7 +24,6 @@ go(DbName, DDocId, IndexName, InfoLevel) when is_binary(DDocId) ->
{ok, DDoc} = fabric:open_doc(DbName, <<"_design/", DDocId/binary>>, []),
dreyfus_util:maybe_deny_index(DbName, DDocId, IndexName),
go(DbName, DDoc, IndexName, InfoLevel);
-
go(DbName, DDoc, IndexName, InfoLevel) ->
DesignName = dreyfus_util:get_design_docid(DDoc),
dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
@@ -39,70 +37,74 @@ go(DbName, DDoc, IndexName, InfoLevel) ->
rexi_monitor:stop(RexiMon)
end.
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Worker, {Counters, Acc}) ->
+handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Worker, {Counters, Acc}) ->
case fabric_util:remove_down_workers(Counters, NodeRef) of
- {ok, NewCounters} ->
- {ok, {NewCounters, Acc}};
- error ->
- {error, {nodedown, <<"progress not possible">>}}
+ {ok, NewCounters} ->
+ {ok, {NewCounters, Acc}};
+ error ->
+ {error, {nodedown, <<"progress not possible">>}}
end;
-
handle_message({rexi_EXIT, Reason}, Worker, {Counters, Acc}) ->
NewCounters = fabric_dict:erase(Worker, Counters),
case fabric_ring:is_progress_possible(NewCounters) of
- true ->
- {ok, {NewCounters, Acc}};
- false ->
- {error, Reason}
+ true ->
+ {ok, {NewCounters, Acc}};
+ false ->
+ {error, Reason}
end;
-
handle_message({ok, Info}, Worker, {Counters, Acc}) ->
case fabric_dict:lookup_element(Worker, Counters) of
- undefined ->
- % already heard from someone else in this range
- {ok, {Counters, Acc}};
- nil ->
- C1 = fabric_dict:store(Worker, ok, Counters),
- C2 = fabric_view:remove_overlapping_shards(Worker, C1),
- case fabric_dict:any(nil, C2) of
- true ->
- {ok, {C2, [Info|Acc]}};
- false ->
- {stop, merge_results(lists:flatten([Info|Acc]))}
- end
+ undefined ->
+ % already heard from someone else in this range
+ {ok, {Counters, Acc}};
+ nil ->
+ C1 = fabric_dict:store(Worker, ok, Counters),
+ C2 = fabric_view:remove_overlapping_shards(Worker, C1),
+ case fabric_dict:any(nil, C2) of
+ true ->
+ {ok, {C2, [Info | Acc]}};
+ false ->
+ {stop, merge_results(lists:flatten([Info | Acc]))}
+ end
end;
-
handle_message({error, Reason}, Worker, {Counters, Acc}) ->
NewCounters = fabric_dict:erase(Worker, Counters),
case fabric_ring:is_progress_possible(NewCounters) of
- true ->
- {ok, {NewCounters, Acc}};
- false ->
- {error, Reason}
+ true ->
+ {ok, {NewCounters, Acc}};
+ false ->
+ {error, Reason}
end;
handle_message({'EXIT', _}, Worker, {Counters, Acc}) ->
NewCounters = fabric_dict:erase(Worker, Counters),
case fabric_ring:is_progress_possible(NewCounters) of
- true ->
- {ok, {NewCounters, Acc}};
- false ->
- {error, {nodedown, <<"progress not possible">>}}
+ true ->
+ {ok, {NewCounters, Acc}};
+ false ->
+ {error, {nodedown, <<"progress not possible">>}}
end.
merge_results(Info) ->
- Dict = lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end,
- orddict:new(), Info),
- orddict:fold(fun
- (disk_size, X, Acc) ->
- [{disk_size, lists:sum(X)} | Acc];
- (doc_count, X, Acc) ->
- [{doc_count, lists:sum(X)} | Acc];
- (doc_del_count, X, Acc) ->
- [{doc_del_count, lists:sum(X)} | Acc];
- (committed_seq, X, Acc) ->
- [{committed_seq, lists:sum(X)} | Acc];
- (pending_seq, X, Acc) ->
- [{pending_seq, lists:sum(X)} | Acc];
- (_, _, Acc) ->
- Acc
- end, [], Dict).
+ Dict = lists:foldl(
+ fun({K, V}, D0) -> orddict:append(K, V, D0) end,
+ orddict:new(),
+ Info
+ ),
+ orddict:fold(
+ fun
+ (disk_size, X, Acc) ->
+ [{disk_size, lists:sum(X)} | Acc];
+ (doc_count, X, Acc) ->
+ [{doc_count, lists:sum(X)} | Acc];
+ (doc_del_count, X, Acc) ->
+ [{doc_del_count, lists:sum(X)} | Acc];
+ (committed_seq, X, Acc) ->
+ [{committed_seq, lists:sum(X)} | Acc];
+ (pending_seq, X, Acc) ->
+ [{pending_seq, lists:sum(X)} | Acc];
+ (_, _, Acc) ->
+ Acc
+ end,
+ [],
+ Dict
+ ).
diff --git a/src/dreyfus/src/dreyfus_fabric_search.erl b/src/dreyfus/src/dreyfus_fabric_search.erl
index 8edaa385a..7e78e5fc3 100644
--- a/src/dreyfus/src/dreyfus_fabric_search.erl
+++ b/src/dreyfus/src/dreyfus_fabric_search.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(dreyfus_fabric_search).
@@ -32,48 +31,61 @@
}).
go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) ->
- {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>,
- [ejson_body]),
+ {ok, DDoc} = fabric:open_doc(
+ DbName,
+ <<"_design/", GroupId/binary>>,
+ [ejson_body]
+ ),
dreyfus_util:maybe_deny_index(DbName, GroupId, IndexName),
go(DbName, DDoc, IndexName, QueryArgs);
-
-go(DbName, DDoc, IndexName, #index_query_args{bookmark=nil}=QueryArgs) ->
+go(DbName, DDoc, IndexName, #index_query_args{bookmark = nil} = QueryArgs) ->
DesignName = dreyfus_util:get_design_docid(DDoc),
dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName),
Shards = dreyfus_util:get_shards(DbName, QueryArgs),
RingOpts = dreyfus_util:get_ring_opts(QueryArgs, Shards),
- Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, search,
- [DDoc, IndexName, dreyfus_util:export(QueryArgs)]),
+ Workers = fabric_util:submit_jobs(
+ Shards,
+ dreyfus_rpc,
+ search,
+ [DDoc, IndexName, dreyfus_util:export(QueryArgs)]
+ ),
Counters = fabric_dict:init(Workers, nil),
go(DbName, DDoc, IndexName, QueryArgs, Counters, Counters, RingOpts);
-
-go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) ->
- Bookmark0 = try dreyfus_bookmark:unpack(DbName, QueryArgs)
- catch
- _:_ ->
- throw({bad_request, "Invalid bookmark parameter supplied"})
- end,
+go(DbName, DDoc, IndexName, #index_query_args{} = QueryArgs) ->
+ Bookmark0 =
+ try
+ dreyfus_bookmark:unpack(DbName, QueryArgs)
+ catch
+ _:_ ->
+ throw({bad_request, "Invalid bookmark parameter supplied"})
+ end,
Shards = dreyfus_util:get_shards(DbName, QueryArgs),
LiveNodes = [node() | nodes()],
- LiveShards = [S || #shard{node=Node} = S <- Shards, lists:member(Node, LiveNodes)],
+ LiveShards = [S || #shard{node = Node} = S <- Shards, lists:member(Node, LiveNodes)],
Bookmark1 = dreyfus_bookmark:add_missing_shards(Bookmark0, LiveShards),
- Counters0 = lists:flatmap(fun({#shard{name=Name, node=N} = Shard, After}) ->
- QueryArgs1 = dreyfus_util:export(QueryArgs#index_query_args{
- bookmark = After
- }),
- case lists:member(Shard, LiveShards) of
- true ->
- Ref = rexi:cast(N, {dreyfus_rpc, search,
- [Name, DDoc, IndexName, QueryArgs1]}),
- [Shard#shard{ref = Ref}];
- false ->
- lists:map(fun(#shard{name=Name2, node=N2} = NewShard) ->
- Ref = rexi:cast(N2, {dreyfus_rpc, search,
- [Name2, DDoc, IndexName, QueryArgs1]}),
- NewShard#shard{ref = Ref}
- end, find_replacement_shards(Shard, LiveShards))
- end
- end, Bookmark1),
+ Counters0 = lists:flatmap(
+ fun({#shard{name = Name, node = N} = Shard, After}) ->
+ QueryArgs1 = dreyfus_util:export(QueryArgs#index_query_args{
+ bookmark = After
+ }),
+ case lists:member(Shard, LiveShards) of
+ true ->
+ Ref = rexi:cast(N, {dreyfus_rpc, search, [Name, DDoc, IndexName, QueryArgs1]}),
+ [Shard#shard{ref = Ref}];
+ false ->
+ lists:map(
+ fun(#shard{name = Name2, node = N2} = NewShard) ->
+ Ref = rexi:cast(
+ N2, {dreyfus_rpc, search, [Name2, DDoc, IndexName, QueryArgs1]}
+ ),
+ NewShard#shard{ref = Ref}
+ end,
+ find_replacement_shards(Shard, LiveShards)
+ )
+ end
+ end,
+ Bookmark1
+ ),
Counters = fabric_dict:init(Counters0, nil),
WorkerShards = fabric_dict:fetch_keys(Counters),
RingOpts = dreyfus_util:get_ring_opts(QueryArgs, WorkerShards),
@@ -95,73 +107,92 @@ go(DbName, DDoc, IndexName, QueryArgs, Counters, Bookmark, RingOpts) ->
State = #state{
limit = Limit,
sort = Sort,
- top_docs = #top_docs{total_hits=0,hits=[]},
+ top_docs = #top_docs{total_hits = 0, hits = []},
counters = Counters,
start_args = [DDoc, IndexName, QueryArgs],
replacements = Replacements,
ring_opts = RingOpts
- },
+ },
RexiMon = fabric_util:create_monitors(Workers),
- try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, infinity, 1000 * 60 * 60) of
- {ok, Result} ->
- #state{top_docs=TopDocs} = Result,
- #top_docs{total_hits=TotalHits, hits=Hits,
- counts=Counts, ranges=Ranges} = TopDocs,
- case RawBookmark of
- true ->
- {ok, Bookmark, TotalHits, Hits, Counts, Ranges};
- false ->
- Bookmark1 = dreyfus_bookmark:update(Sort, Bookmark, Hits),
- Hits1 = remove_sortable(Hits),
- {ok, Bookmark1, TotalHits, Hits1, Counts, Ranges}
- end;
- {error, Reason} ->
- {error, Reason}
+ try
+ rexi_utils:recv(
+ Workers,
+ #shard.ref,
+ fun handle_message/3,
+ State,
+ infinity,
+ 1000 * 60 * 60
+ )
+ of
+ {ok, Result} ->
+ #state{top_docs = TopDocs} = Result,
+ #top_docs{
+ total_hits = TotalHits,
+ hits = Hits,
+ counts = Counts,
+ ranges = Ranges
+ } = TopDocs,
+ case RawBookmark of
+ true ->
+ {ok, Bookmark, TotalHits, Hits, Counts, Ranges};
+ false ->
+ Bookmark1 = dreyfus_bookmark:update(Sort, Bookmark, Hits),
+ Hits1 = remove_sortable(Hits),
+ {ok, Bookmark1, TotalHits, Hits1, Counts, Ranges}
+ end;
+ {error, Reason} ->
+ {error, Reason}
after
rexi_monitor:stop(RexiMon),
fabric_util:cleanup(Workers)
end.
-handle_message({ok, #top_docs{}=NewTopDocs}, Shard, State0) ->
+handle_message({ok, #top_docs{} = NewTopDocs}, Shard, State0) ->
State = upgrade_state(State0),
- #state{top_docs=TopDocs, limit=Limit, sort=Sort} = State,
+ #state{top_docs = TopDocs, limit = Limit, sort = Sort} = State,
case fabric_dict:lookup_element(Shard, State#state.counters) of
- undefined ->
- %% already heard from someone else in this range
- {ok, State};
- nil ->
- C1 = fabric_dict:store(Shard, ok, State#state.counters),
- C2 = fabric_view:remove_overlapping_shards(Shard, C1),
- Sortable = make_sortable(Shard, NewTopDocs),
- MergedTopDocs = merge_top_docs(TopDocs, Sortable, Limit, Sort),
- State1 = State#state{
- counters=C2,
- top_docs=MergedTopDocs
- },
- case fabric_dict:any(nil, C2) of
- true ->
- {ok, State1};
- false ->
- {stop, State1}
- end
+ undefined ->
+ %% already heard from someone else in this range
+ {ok, State};
+ nil ->
+ C1 = fabric_dict:store(Shard, ok, State#state.counters),
+ C2 = fabric_view:remove_overlapping_shards(Shard, C1),
+ Sortable = make_sortable(Shard, NewTopDocs),
+ MergedTopDocs = merge_top_docs(TopDocs, Sortable, Limit, Sort),
+ State1 = State#state{
+ counters = C2,
+ top_docs = MergedTopDocs
+ },
+ case fabric_dict:any(nil, C2) of
+ true ->
+ {ok, State1};
+ false ->
+ {stop, State1}
+ end
end;
-
% upgrade clause
handle_message({ok, {top_docs, UpdateSeq, TotalHits, Hits}}, Shard, State) ->
TopDocs = #top_docs{
- update_seq = UpdateSeq,
- total_hits = TotalHits,
- hits = Hits},
+ update_seq = UpdateSeq,
+ total_hits = TotalHits,
+ hits = Hits
+ },
handle_message({ok, TopDocs}, Shard, State);
-
handle_message(Error, Worker, State0) ->
State = upgrade_state(State0),
- case dreyfus_fabric:handle_error_message(Error, Worker,
- State#state.counters, State#state.replacements,
- search, State#state.start_args, State#state.ring_opts) of
+ case
+ dreyfus_fabric:handle_error_message(
+ Error,
+ Worker,
+ State#state.counters,
+ State#state.replacements,
+ search,
+ State#state.start_args,
+ State#state.ring_opts
+ )
+ of
{ok, Counters} ->
- {ok, State#state{counters=Counters}};
+ {ok, State#state{counters = Counters}};
{new_refs, NewRefs, NewCounters, NewReplacements} ->
NewState = State#state{
counters = NewCounters,
@@ -172,36 +203,45 @@ handle_message(Error, Worker, State0) ->
Else
end.
-find_replacement_shards(#shard{range=Range}, AllShards) ->
+find_replacement_shards(#shard{range = Range}, AllShards) ->
[Shard || Shard <- AllShards, Shard#shard.range =:= Range].
-make_sortable(Shard, #top_docs{}=TopDocs) ->
+make_sortable(Shard, #top_docs{} = TopDocs) ->
Hits = make_sortable(Shard, TopDocs#top_docs.hits),
- TopDocs#top_docs{hits=Hits};
+ TopDocs#top_docs{hits = Hits};
make_sortable(Shard, List) when is_list(List) ->
make_sortable(Shard, List, []).
make_sortable(_, [], Acc) ->
lists:reverse(Acc);
-make_sortable(Shard, [#hit{}=Hit|Rest], Acc) ->
- make_sortable(Shard, Rest, [#sortable{item=Hit, order=Hit#hit.order, shard=Shard} | Acc]).
+make_sortable(Shard, [#hit{} = Hit | Rest], Acc) ->
+ make_sortable(Shard, Rest, [#sortable{item = Hit, order = Hit#hit.order, shard = Shard} | Acc]).
remove_sortable(List) ->
remove_sortable(List, []).
remove_sortable([], Acc) ->
lists:reverse(Acc);
-remove_sortable([#sortable{item=Item} | Rest], Acc) ->
+remove_sortable([#sortable{item = Item} | Rest], Acc) ->
remove_sortable(Rest, [Item | Acc]).
-merge_top_docs(#top_docs{}=TopDocsA, #top_docs{}=TopDocsB, Limit, Sort) ->
+merge_top_docs(#top_docs{} = TopDocsA, #top_docs{} = TopDocsB, Limit, Sort) ->
MergedTotal = sum_element(#top_docs.total_hits, TopDocsA, TopDocsB),
- MergedHits = lists:sublist(dreyfus_util:sort(Sort,
- TopDocsA#top_docs.hits ++ TopDocsB#top_docs.hits), Limit),
+ MergedHits = lists:sublist(
+ dreyfus_util:sort(
+ Sort,
+ TopDocsA#top_docs.hits ++ TopDocsB#top_docs.hits
+ ),
+ Limit
+ ),
MergedCounts = merge_facets(TopDocsA#top_docs.counts, TopDocsB#top_docs.counts),
MergedRanges = merge_facets(TopDocsA#top_docs.ranges, TopDocsB#top_docs.ranges),
- #top_docs{total_hits=MergedTotal, hits=MergedHits,
- counts=MergedCounts, ranges=MergedRanges}.
+ #top_docs{
+ total_hits = MergedTotal,
+ hits = MergedHits,
+ counts = MergedCounts,
+ ranges = MergedRanges
+ }.
merge_facets(undefined, undefined) ->
undefined;
@@ -218,26 +258,35 @@ merge_facets_int(FacetsA, []) ->
FacetsA;
merge_facets_int([], FacetsB) ->
FacetsB;
-merge_facets_int([{KA, _, _}=A | RA], [{KB, _, _} | _]=FB) when KA < KB ->
+merge_facets_int([{KA, _, _} = A | RA], [{KB, _, _} | _] = FB) when KA < KB ->
[A | merge_facets_int(RA, FB)];
merge_facets_int([{KA, VA, CA} | RA], [{KB, VB, CB} | RB]) when KA =:= KB ->
- [{KA, VA+VB, merge_facets_int(CA, CB)} | merge_facets_int(RA, RB)];
-merge_facets_int([{KA, _, _} | _]=FA, [{KB, _, _}=B | RB]) when KA > KB ->
+ [{KA, VA + VB, merge_facets_int(CA, CB)} | merge_facets_int(RA, RB)];
+merge_facets_int([{KA, _, _} | _] = FA, [{KB, _, _} = B | RB]) when KA > KB ->
[B | merge_facets_int(FA, RB)].
sort_facets([]) ->
[];
sort_facets(Facets) ->
- lists:sort(lists:map(fun({K, V, C}) -> {K, V, sort_facets(C)} end,
- Facets)).
+ lists:sort(
+ lists:map(
+ fun({K, V, C}) -> {K, V, sort_facets(C)} end,
+ Facets
+ )
+ ).
sum_element(N, T1, T2) ->
element(N, T1) + element(N, T2).
upgrade_state({state, Limit, Sort, TopDocs, Counters}) ->
- #state{limit=Limit, sort=Sort, top_docs=TopDocs, counters=Counters,
- replacements=[]};
-upgrade_state(#state{}=State) ->
+ #state{
+ limit = Limit,
+ sort = Sort,
+ top_docs = TopDocs,
+ counters = Counters,
+ replacements = []
+ };
+upgrade_state(#state{} = State) ->
State.
-ifdef(TEST).
@@ -248,23 +297,38 @@ merge_facets_test() ->
?assertEqual([{foo, 1.0, []}], merge_facets([{foo, 1.0, []}], [])),
% one level, one key
- ?assertEqual([{foo, 3.0, []}],
- merge_facets([{foo, 1.0, []}],
- [{foo, 2.0, []}])),
+ ?assertEqual(
+ [{foo, 3.0, []}],
+ merge_facets(
+ [{foo, 1.0, []}],
+ [{foo, 2.0, []}]
+ )
+ ),
% one level, two keys
- ?assertEqual([{bar, 6.0, []}, {foo, 9.0, []}],
- merge_facets([{foo, 1.0, []}, {bar, 2.0, []}],
- [{bar, 4.0, []}, {foo, 8.0, []}])),
+ ?assertEqual(
+ [{bar, 6.0, []}, {foo, 9.0, []}],
+ merge_facets(
+ [{foo, 1.0, []}, {bar, 2.0, []}],
+ [{bar, 4.0, []}, {foo, 8.0, []}]
+ )
+ ),
% multi level, multi keys
- ?assertEqual([{foo, 2.0, [{bar, 2.0, []}]}],
- merge_facets([{foo, 1.0, [{bar, 1.0, []}]}],
- [{foo, 1.0, [{bar, 1.0, []}]}])),
-
- ?assertEqual([{foo, 5.0, [{bar, 7.0, [{bar, 1.0, []}, {baz, 3.0, []}, {foo, 6.5, []}]}]}],
- merge_facets([{foo, 1.0, [{bar, 2.0, [{baz, 3.0, []}, {foo, 0.5, []}]}]}],
- [{foo, 4.0, [{bar, 5.0, [{foo, 6.0, []}, {bar, 1.0, []}]}]}])).
-
+ ?assertEqual(
+ [{foo, 2.0, [{bar, 2.0, []}]}],
+ merge_facets(
+ [{foo, 1.0, [{bar, 1.0, []}]}],
+ [{foo, 1.0, [{bar, 1.0, []}]}]
+ )
+ ),
+
+ ?assertEqual(
+ [{foo, 5.0, [{bar, 7.0, [{bar, 1.0, []}, {baz, 3.0, []}, {foo, 6.5, []}]}]}],
+ merge_facets(
+ [{foo, 1.0, [{bar, 2.0, [{baz, 3.0, []}, {foo, 0.5, []}]}]}],
+ [{foo, 4.0, [{bar, 5.0, [{foo, 6.0, []}, {bar, 1.0, []}]}]}]
+ )
+ ).
-endif.
diff --git a/src/dreyfus/src/dreyfus_httpd.erl b/src/dreyfus/src/dreyfus_httpd.erl
index 007dace8f..39d205b95 100644
--- a/src/dreyfus/src/dreyfus_httpd.erl
+++ b/src/dreyfus/src/dreyfus_httpd.erl
@@ -10,139 +10,188 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(dreyfus_httpd).
--export([handle_search_req/3, handle_info_req/3, handle_disk_size_req/3,
- handle_cleanup_req/2, handle_analyze_req/1]).
+-export([
+ handle_search_req/3,
+ handle_info_req/3,
+ handle_disk_size_req/3,
+ handle_cleanup_req/2,
+ handle_analyze_req/1
+]).
-include("dreyfus.hrl").
-include_lib("couch/include/couch_db.hrl").
--import(chttpd, [send_method_not_allowed/2, send_json/2, send_json/3,
- send_error/2]).
+-import(chttpd, [
+ send_method_not_allowed/2,
+ send_json/2, send_json/3,
+ send_error/2
+]).
handle_search_req(Req, Db, DDoc) ->
handle_search_req(Req, Db, DDoc, 0, 500).
-handle_search_req(#httpd{method=Method, path_parts=[_, _, _, _, IndexName]}=Req
- ,Db, DDoc, RetryCount, RetryPause)
- when Method == 'GET'; Method == 'POST' ->
+handle_search_req(
+ #httpd{method = Method, path_parts = [_, _, _, _, IndexName]} = Req,
+ Db,
+ DDoc,
+ RetryCount,
+ RetryPause
+) when
+ Method == 'GET'; Method == 'POST'
+->
DbName = couch_db:name(Db),
Start = os:timestamp(),
- QueryArgs = #index_query_args{
- include_docs = IncludeDocs,
- grouping = Grouping
- } = parse_index_params(Req, Db),
+ QueryArgs =
+ #index_query_args{
+ include_docs = IncludeDocs,
+ grouping = Grouping
+ } = parse_index_params(Req, Db),
validate_search_restrictions(Db, DDoc, QueryArgs),
- Response = case Grouping#grouping.by of
- nil ->
- case dreyfus_fabric_search:go(DbName, DDoc, IndexName, QueryArgs) of
- {ok, Bookmark0, TotalHits, Hits0} -> % legacy clause
- Hits = hits_to_json(DbName, IncludeDocs, Hits0),
- Bookmark = dreyfus_bookmark:pack(Bookmark0),
- send_json(Req, 200, {[
- {total_rows, TotalHits},
- {bookmark, Bookmark},
- {rows, Hits}
- ]});
- {ok, Bookmark0, TotalHits, Hits0, Counts0, Ranges0} ->
- Hits = hits_to_json(DbName, IncludeDocs, Hits0),
- Bookmark = dreyfus_bookmark:pack(Bookmark0),
- Counts = case Counts0 of
- undefined ->
- [];
- _ ->
- [{counts, facets_to_json(Counts0)}]
- end,
- Ranges = case Ranges0 of
- undefined ->
- [];
- _ ->
- [{ranges, facets_to_json(Ranges0)}]
- end,
- send_json(Req, 200, {[
- {total_rows, TotalHits},
- {bookmark, Bookmark},
- {rows, Hits}
- ] ++ Counts ++ Ranges
- });
- {error, Reason} ->
- handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason)
- end;
- _ ->
- % ensure limit in group query >0
- UseNewApi = Grouping#grouping.new_api,
- case dreyfus_fabric_group1:go(DbName, DDoc, IndexName, QueryArgs) of
- {ok, []} ->
- send_grouped_response(Req, {0, 0, []}, UseNewApi);
- {ok, TopGroups} ->
- QueryArgs1 = QueryArgs#index_query_args{grouping=Grouping#grouping{groups=TopGroups}},
- case dreyfus_fabric_group2:go(DbName, DDoc,
- IndexName, QueryArgs1) of
- {ok, {TotalHits, TotalGroupedHits, Groups0}} ->
- Groups = [group_to_json(DbName, IncludeDocs, Group, UseNewApi) || Group <- Groups0],
- send_grouped_response(Req, {TotalHits, TotalGroupedHits, Groups}, UseNewApi);
- {error, Reason} ->
- handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason)
- end;
- {error, Reason} ->
- handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason)
- end
- end,
+ Response =
+ case Grouping#grouping.by of
+ nil ->
+ case dreyfus_fabric_search:go(DbName, DDoc, IndexName, QueryArgs) of
+ % legacy clause
+ {ok, Bookmark0, TotalHits, Hits0} ->
+ Hits = hits_to_json(DbName, IncludeDocs, Hits0),
+ Bookmark = dreyfus_bookmark:pack(Bookmark0),
+ send_json(
+ Req,
+ 200,
+ {[
+ {total_rows, TotalHits},
+ {bookmark, Bookmark},
+ {rows, Hits}
+ ]}
+ );
+ {ok, Bookmark0, TotalHits, Hits0, Counts0, Ranges0} ->
+ Hits = hits_to_json(DbName, IncludeDocs, Hits0),
+ Bookmark = dreyfus_bookmark:pack(Bookmark0),
+ Counts =
+ case Counts0 of
+ undefined ->
+ [];
+ _ ->
+ [{counts, facets_to_json(Counts0)}]
+ end,
+ Ranges =
+ case Ranges0 of
+ undefined ->
+ [];
+ _ ->
+ [{ranges, facets_to_json(Ranges0)}]
+ end,
+ send_json(Req, 200, {
+ [
+ {total_rows, TotalHits},
+ {bookmark, Bookmark},
+ {rows, Hits}
+ ] ++ Counts ++ Ranges
+ });
+ {error, Reason} ->
+ handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason)
+ end;
+ _ ->
+ % ensure limit in group query >0
+ UseNewApi = Grouping#grouping.new_api,
+ case dreyfus_fabric_group1:go(DbName, DDoc, IndexName, QueryArgs) of
+ {ok, []} ->
+ send_grouped_response(Req, {0, 0, []}, UseNewApi);
+ {ok, TopGroups} ->
+ QueryArgs1 = QueryArgs#index_query_args{
+ grouping = Grouping#grouping{groups = TopGroups}
+ },
+ case
+ dreyfus_fabric_group2:go(
+ DbName,
+ DDoc,
+ IndexName,
+ QueryArgs1
+ )
+ of
+ {ok, {TotalHits, TotalGroupedHits, Groups0}} ->
+ Groups = [
+ group_to_json(DbName, IncludeDocs, Group, UseNewApi)
+ || Group <- Groups0
+ ],
+ send_grouped_response(
+ Req, {TotalHits, TotalGroupedHits, Groups}, UseNewApi
+ );
+ {error, Reason} ->
+ handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason)
+ end;
+ {error, Reason} ->
+ handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason)
+ end
+ end,
RequestTime = timer:now_diff(os:timestamp(), Start) div 1000,
couch_stats:update_histogram([dreyfus, httpd, search], RequestTime),
Response;
-handle_search_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc, _RetryCount, _RetryPause) ->
+handle_search_req(#httpd{path_parts = [_, _, _, _, _]} = Req, _Db, _DDoc, _RetryCount, _RetryPause) ->
send_method_not_allowed(Req, "GET,POST");
handle_search_req(Req, _Db, _DDoc, _RetryCount, _RetryPause) ->
send_error(Req, {bad_request, "path not recognized"}).
-handle_info_req(#httpd{method='GET', path_parts=[_, _, _, _, IndexName]}=Req
- ,Db, #doc{id=Id}=DDoc) ->
+handle_info_req(
+ #httpd{method = 'GET', path_parts = [_, _, _, _, IndexName]} = Req,
+ Db,
+ #doc{id = Id} = DDoc
+) ->
DbName = couch_db:name(Db),
case dreyfus_fabric_info:go(DbName, DDoc, IndexName, info) of
{ok, IndexInfoList} ->
- send_json(Req, 200, {[
- {name, <<Id/binary,"/",IndexName/binary>>},
- {search_index, {IndexInfoList}}
- ]});
+ send_json(
+ Req,
+ 200,
+ {[
+ {name, <<Id/binary, "/", IndexName/binary>>},
+ {search_index, {IndexInfoList}}
+ ]}
+ );
{error, Reason} ->
send_error(Req, Reason)
end;
-handle_info_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc) ->
+handle_info_req(#httpd{path_parts = [_, _, _, _, _]} = Req, _Db, _DDoc) ->
send_method_not_allowed(Req, "GET");
handle_info_req(Req, _Db, _DDoc) ->
send_error(Req, {bad_request, "path not recognized"}).
-handle_disk_size_req(#httpd{method='GET', path_parts=[_, _, _, _, IndexName]}=Req, Db, #doc{id=Id}=DDoc) ->
+handle_disk_size_req(
+ #httpd{method = 'GET', path_parts = [_, _, _, _, IndexName]} = Req, Db, #doc{id = Id} = DDoc
+) ->
DbName = couch_db:name(Db),
case dreyfus_fabric_info:go(DbName, DDoc, IndexName, disk_size) of
{ok, IndexInfoList} ->
- send_json(Req, 200, {[
- {name, <<Id/binary,"/",IndexName/binary>>},
- {search_index, {IndexInfoList}}
- ]});
+ send_json(
+ Req,
+ 200,
+ {[
+ {name, <<Id/binary, "/", IndexName/binary>>},
+ {search_index, {IndexInfoList}}
+ ]}
+ );
{error, Reason} ->
send_error(Req, Reason)
end;
-handle_disk_size_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc) ->
+handle_disk_size_req(#httpd{path_parts = [_, _, _, _, _]} = Req, _Db, _DDoc) ->
send_method_not_allowed(Req, "GET");
handle_disk_size_req(Req, _Db, _DDoc) ->
send_error(Req, {bad_request, "path not recognized"}).
-handle_cleanup_req(#httpd{method='POST'}=Req, Db) ->
+handle_cleanup_req(#httpd{method = 'POST'} = Req, Db) ->
ok = dreyfus_fabric_cleanup:go(couch_db:name(Db)),
send_json(Req, 202, {[{ok, true}]});
handle_cleanup_req(Req, _Db) ->
send_method_not_allowed(Req, "POST").
-handle_analyze_req(#httpd{method='GET'}=Req) ->
+handle_analyze_req(#httpd{method = 'GET'} = Req) ->
Analyzer = couch_httpd:qs_value(Req, "analyzer"),
Text = couch_httpd:qs_value(Req, "text"),
analyze(Req, Analyzer, Text);
-handle_analyze_req(#httpd{method='POST'}=Req) ->
+handle_analyze_req(#httpd{method = 'POST'} = Req) ->
couch_httpd:validate_ctype(Req, "application/json"),
{Fields} = chttpd:json_body_obj(Req),
Analyzer = couch_util:get_value(<<"analyzer">>, Fields),
@@ -159,7 +208,7 @@ analyze(Req, Analyzer, Text) ->
ok;
_ when is_binary(Analyzer) ->
ok;
- {[_|_]} ->
+ {[_ | _]} ->
ok;
_ ->
throw({bad_request, "analyzer parameter must be a string or an object"})
@@ -174,86 +223,101 @@ analyze(Req, Analyzer, Text) ->
_ ->
throw({bad_request, "text parameter must be a string"})
end,
- case clouseau_rpc:analyze(couch_util:to_binary(Analyzer),
- couch_util:to_binary(Text)) of
+ case
+ clouseau_rpc:analyze(
+ couch_util:to_binary(Analyzer),
+ couch_util:to_binary(Text)
+ )
+ of
{ok, Tokens} ->
send_json(Req, 200, {[{tokens, Tokens}]});
{error, Reason} ->
send_error(Req, Reason)
end.
-parse_index_params(#httpd{method='GET'}=Req, Db) ->
- IndexParams = lists:flatmap(fun({K, V}) -> parse_index_param(K, V) end,
- chttpd:qs(Req)),
+parse_index_params(#httpd{method = 'GET'} = Req, Db) ->
+ IndexParams = lists:flatmap(
+ fun({K, V}) -> parse_index_param(K, V) end,
+ chttpd:qs(Req)
+ ),
parse_index_params(IndexParams, Db);
-parse_index_params(#httpd{method='POST'}=Req, Db) ->
+parse_index_params(#httpd{method = 'POST'} = Req, Db) ->
{JsonBody} = chttpd:json_body_obj(Req),
- QSEntry = case chttpd:qs_value(Req, "partition") of
- undefined -> [];
- StrVal -> [{<<"partition">>, ?l2b(StrVal)}]
- end,
- IndexParams = lists:flatmap(fun({K, V}) ->
- parse_json_index_param(K, V)
- end, QSEntry ++ JsonBody),
+ QSEntry =
+ case chttpd:qs_value(Req, "partition") of
+ undefined -> [];
+ StrVal -> [{<<"partition">>, ?l2b(StrVal)}]
+ end,
+ IndexParams = lists:flatmap(
+ fun({K, V}) ->
+ parse_json_index_param(K, V)
+ end,
+ QSEntry ++ JsonBody
+ ),
ensure_unique_partition(IndexParams),
parse_index_params(IndexParams, Db);
parse_index_params(IndexParams, Db) ->
- DefaultLimit = case fabric_util:is_partitioned(Db) of
- true ->
- list_to_integer(config:get("dreyfus", "limit_partitions", "2000"));
- false ->
- list_to_integer(config:get("dreyfus", "limit", "25"))
- end,
- Args = #index_query_args{limit=DefaultLimit},
- lists:foldl(fun({K, V}, Args2) ->
- validate_index_query(K, V, Args2)
- end, Args, IndexParams).
+ DefaultLimit =
+ case fabric_util:is_partitioned(Db) of
+ true ->
+ list_to_integer(config:get("dreyfus", "limit_partitions", "2000"));
+ false ->
+ list_to_integer(config:get("dreyfus", "limit", "25"))
+ end,
+ Args = #index_query_args{limit = DefaultLimit},
+ lists:foldl(
+ fun({K, V}, Args2) ->
+ validate_index_query(K, V, Args2)
+ end,
+ Args,
+ IndexParams
+ ).
validate_index_query(q, Value, Args) ->
- Args#index_query_args{q=Value};
+ Args#index_query_args{q = Value};
validate_index_query(partition, Value, Args) ->
- Args#index_query_args{partition=Value};
+ Args#index_query_args{partition = Value};
validate_index_query(stale, Value, Args) ->
- Args#index_query_args{stale=Value};
+ Args#index_query_args{stale = Value};
validate_index_query(limit, Value, Args) ->
- Args#index_query_args{limit=Value};
+ Args#index_query_args{limit = Value};
validate_index_query(include_docs, Value, Args) ->
- Args#index_query_args{include_docs=Value};
+ Args#index_query_args{include_docs = Value};
validate_index_query(include_fields, Value, Args) ->
- Args#index_query_args{include_fields=Value};
+ Args#index_query_args{include_fields = Value};
validate_index_query(bookmark, Value, Args) ->
- Args#index_query_args{bookmark=Value};
+ Args#index_query_args{bookmark = Value};
validate_index_query(sort, Value, Args) ->
- Args#index_query_args{sort=Value};
-validate_index_query(group_by, Value, #index_query_args{grouping=Grouping}=Args) ->
- Args#index_query_args{grouping=Grouping#grouping{by=Value, new_api=false}};
-validate_index_query(group_field, Value, #index_query_args{grouping=Grouping}=Args) ->
- Args#index_query_args{grouping=Grouping#grouping{by=Value, new_api=true}};
-validate_index_query(group_sort, Value, #index_query_args{grouping=Grouping}=Args) ->
- Args#index_query_args{grouping=Grouping#grouping{sort=Value}};
-validate_index_query(group_limit, Value, #index_query_args{grouping=Grouping}=Args) ->
- Args#index_query_args{grouping=Grouping#grouping{limit=Value}};
+ Args#index_query_args{sort = Value};
+validate_index_query(group_by, Value, #index_query_args{grouping = Grouping} = Args) ->
+ Args#index_query_args{grouping = Grouping#grouping{by = Value, new_api = false}};
+validate_index_query(group_field, Value, #index_query_args{grouping = Grouping} = Args) ->
+ Args#index_query_args{grouping = Grouping#grouping{by = Value, new_api = true}};
+validate_index_query(group_sort, Value, #index_query_args{grouping = Grouping} = Args) ->
+ Args#index_query_args{grouping = Grouping#grouping{sort = Value}};
+validate_index_query(group_limit, Value, #index_query_args{grouping = Grouping} = Args) ->
+ Args#index_query_args{grouping = Grouping#grouping{limit = Value}};
validate_index_query(stable, Value, Args) ->
- Args#index_query_args{stable=Value};
+ Args#index_query_args{stable = Value};
validate_index_query(counts, Value, Args) ->
- Args#index_query_args{counts=Value};
+ Args#index_query_args{counts = Value};
validate_index_query(ranges, Value, Args) ->
- Args#index_query_args{ranges=Value};
-validate_index_query(drilldown, [[_|_]|_] = Value, Args) ->
- Args#index_query_args{drilldown=Value};
+ Args#index_query_args{ranges = Value};
+validate_index_query(drilldown, [[_ | _] | _] = Value, Args) ->
+ Args#index_query_args{drilldown = Value};
validate_index_query(drilldown, Value, Args) ->
DrillDown = Args#index_query_args.drilldown,
- Args#index_query_args{drilldown=[Value|DrillDown]};
+ Args#index_query_args{drilldown = [Value | DrillDown]};
validate_index_query(highlight_fields, Value, Args) ->
- Args#index_query_args{highlight_fields=Value};
+ Args#index_query_args{highlight_fields = Value};
validate_index_query(highlight_pre_tag, Value, Args) ->
- Args#index_query_args{highlight_pre_tag=Value};
+ Args#index_query_args{highlight_pre_tag = Value};
validate_index_query(highlight_post_tag, Value, Args) ->
- Args#index_query_args{highlight_post_tag=Value};
+ Args#index_query_args{highlight_post_tag = Value};
validate_index_query(highlight_number, Value, Args) ->
- Args#index_query_args{highlight_number=Value};
+ Args#index_query_args{highlight_number = Value};
validate_index_query(highlight_size, Value, Args) ->
- Args#index_query_args{highlight_size=Value};
+ Args#index_query_args{highlight_size = Value};
validate_index_query(extra, _Value, Args) ->
Args.
@@ -359,8 +423,10 @@ parse_json_index_param(Key, Value) ->
parse_bool_param(_, Val) when is_boolean(Val) ->
Val;
-parse_bool_param(_, "true") -> true;
-parse_bool_param(_, "false") -> false;
+parse_bool_param(_, "true") ->
+ true;
+parse_bool_param(_, "false") ->
+ false;
parse_bool_param(Name, Val) ->
Msg = io_lib:format("Invalid value for ~s: ~p", [Name, Val]),
throw({query_parse_error, ?l2b(Msg)}).
@@ -369,72 +435,76 @@ parse_int_param(_, Val) when is_integer(Val) ->
Val;
parse_int_param(Name, Val) ->
case (catch list_to_integer(Val)) of
- IntVal when is_integer(IntVal) ->
- IntVal;
- _ ->
- Msg = io_lib:format("Invalid value for ~s: ~p", [Name, Val]),
- throw({query_parse_error, ?l2b(Msg)})
+ IntVal when is_integer(IntVal) ->
+ IntVal;
+ _ ->
+ Msg = io_lib:format("Invalid value for ~s: ~p", [Name, Val]),
+ throw({query_parse_error, ?l2b(Msg)})
end.
parse_positive_int_param(Name, Val, Prop, Default) ->
MaximumVal = list_to_integer(
- config:get("dreyfus", Prop, Default)),
+ config:get("dreyfus", Prop, Default)
+ ),
case parse_int_param(Name, Val) of
- IntVal when IntVal > MaximumVal ->
- Fmt = "Value for ~s is too large, must not exceed ~p",
- Msg = io_lib:format(Fmt, [Name, MaximumVal]),
- throw({query_parse_error, ?l2b(Msg)});
- IntVal when IntVal > 0 ->
- IntVal;
- IntVal when IntVal =< 0 ->
- Fmt = "~s must be greater than zero",
- Msg = io_lib:format(Fmt, [Name]),
- throw({query_parse_error, ?l2b(Msg)});
- _ ->
- Fmt = "Invalid value for ~s: ~p",
- Msg = io_lib:format(Fmt, [Name, Val]),
- throw({query_parse_error, ?l2b(Msg)})
+ IntVal when IntVal > MaximumVal ->
+ Fmt = "Value for ~s is too large, must not exceed ~p",
+ Msg = io_lib:format(Fmt, [Name, MaximumVal]),
+ throw({query_parse_error, ?l2b(Msg)});
+ IntVal when IntVal > 0 ->
+ IntVal;
+ IntVal when IntVal =< 0 ->
+ Fmt = "~s must be greater than zero",
+ Msg = io_lib:format(Fmt, [Name]),
+ throw({query_parse_error, ?l2b(Msg)});
+ _ ->
+ Fmt = "Invalid value for ~s: ~p",
+ Msg = io_lib:format(Fmt, [Name, Val]),
+ throw({query_parse_error, ?l2b(Msg)})
end.
parse_positive_int_param2(Name, Val) ->
case parse_int_param(Name, Val) of
- IntVal when IntVal > 0 ->
- IntVal;
- IntVal when IntVal =< 0 ->
- Fmt = "~s must be greater than zero",
- Msg = io_lib:format(Fmt, [Name]),
- throw({query_parse_error, ?l2b(Msg)});
- _ ->
- Fmt = "Invalid value for ~s: ~p",
- Msg = io_lib:format(Fmt, [Name, Val]),
- throw({query_parse_error, ?l2b(Msg)})
+ IntVal when IntVal > 0 ->
+ IntVal;
+ IntVal when IntVal =< 0 ->
+ Fmt = "~s must be greater than zero",
+ Msg = io_lib:format(Fmt, [Name]),
+ throw({query_parse_error, ?l2b(Msg)});
+ _ ->
+ Fmt = "Invalid value for ~s: ~p",
+ Msg = io_lib:format(Fmt, [Name, Val]),
+ throw({query_parse_error, ?l2b(Msg)})
end.
parse_non_negative_int_param(Name, Val, Prop, Default) ->
MaximumVal = list_to_integer(
- config:get("dreyfus", Prop, Default)),
+ config:get("dreyfus", Prop, Default)
+ ),
case parse_int_param(Name, Val) of
- IntVal when IntVal > MaximumVal ->
- Fmt = "Value for ~s is too large, must not exceed ~p",
- Msg = io_lib:format(Fmt, [Name, MaximumVal]),
- throw({query_parse_error, ?l2b(Msg)});
- IntVal when IntVal >= 0 ->
- IntVal;
- IntVal when IntVal < 0 ->
- Fmt = "~s must be greater than or equal to zero",
- Msg = io_lib:format(Fmt, [Name]),
- throw({query_parse_error, ?l2b(Msg)});
- _ ->
- Fmt = "Invalid value for ~s: ~p",
- Msg = io_lib:format(Fmt, [Name, Val]),
- throw({query_parse_error, ?l2b(Msg)})
+ IntVal when IntVal > MaximumVal ->
+ Fmt = "Value for ~s is too large, must not exceed ~p",
+ Msg = io_lib:format(Fmt, [Name, MaximumVal]),
+ throw({query_parse_error, ?l2b(Msg)});
+ IntVal when IntVal >= 0 ->
+ IntVal;
+ IntVal when IntVal < 0 ->
+ Fmt = "~s must be greater than or equal to zero",
+ Msg = io_lib:format(Fmt, [Name]),
+ throw({query_parse_error, ?l2b(Msg)});
+ _ ->
+ Fmt = "Invalid value for ~s: ~p",
+ Msg = io_lib:format(Fmt, [Name, Val]),
+ throw({query_parse_error, ?l2b(Msg)})
end.
-
ensure_unique_partition(IndexParams) ->
- Partitions = lists:filter(fun({Key, _Val}) ->
- Key == partition
- end, IndexParams),
+ Partitions = lists:filter(
+ fun({Key, _Val}) ->
+ Key == partition
+ end,
+ IndexParams
+ ),
case length(lists:usort(Partitions)) > 1 of
true ->
Msg = <<"Multiple conflicting values for `partition` provided">>,
@@ -443,7 +513,6 @@ ensure_unique_partition(IndexParams) ->
ok
end.
-
validate_search_restrictions(Db, DDoc, Args) ->
#index_query_args{
q = Query,
@@ -485,8 +554,10 @@ validate_search_restrictions(Db, DDoc, Args) ->
{true, true} ->
ok;
{true, false} ->
- Msg3 = <<"`partition` parameter is mandatory "
- "for queries to this index.">>,
+ Msg3 = <<
+ "`partition` parameter is mandatory "
+ "for queries to this index."
+ >>,
throw({bad_request, Msg3});
{false, true} ->
Msg4 = <<"`partition` not supported on this index">>,
@@ -497,7 +568,8 @@ validate_search_restrictions(Db, DDoc, Args) ->
true ->
MaxLimit = config:get("dreyfus", "max_limit", "2000"),
parse_non_negative_int_param(
- "limit", Limit, "max_limit_partitions", MaxLimit);
+ "limit", Limit, "max_limit_partitions", MaxLimit
+ );
false ->
MaxLimit = config:get("dreyfus", "max_limit", "200"),
parse_non_negative_int_param("limit", Limit, "max_limit", MaxLimit)
@@ -505,52 +577,69 @@ validate_search_restrictions(Db, DDoc, Args) ->
DefaultArgs = #index_query_args{},
- case is_binary(Partition) andalso (
- Counts /= DefaultArgs#index_query_args.counts
- orelse Drilldown /= DefaultArgs#index_query_args.drilldown
- orelse Ranges /= DefaultArgs#index_query_args.ranges
- orelse GroupSort /= DefaultArgs#index_query_args.grouping#grouping.sort
- orelse GroupBy /= DefaultArgs#index_query_args.grouping#grouping.by
- orelse GroupLimit /= DefaultArgs#index_query_args.grouping#grouping.limit
- ) of
+ case
+ is_binary(Partition) andalso
+ (Counts /= DefaultArgs#index_query_args.counts orelse
+ Drilldown /= DefaultArgs#index_query_args.drilldown orelse
+ Ranges /= DefaultArgs#index_query_args.ranges orelse
+ GroupSort /= DefaultArgs#index_query_args.grouping#grouping.sort orelse
+ GroupBy /= DefaultArgs#index_query_args.grouping#grouping.by orelse
+ GroupLimit /= DefaultArgs#index_query_args.grouping#grouping.limit)
+ of
true ->
- Msg5 = <<"`partition` and any of `drilldown`, `ranges`, `group_field`, `group_sort`, `group_limit` or `group_by` are incompatible">>,
+ Msg5 =
+ <<"`partition` and any of `drilldown`, `ranges`, `group_field`, `group_sort`, `group_limit` or `group_by` are incompatible">>,
throw({bad_request, Msg5});
false ->
ok
end.
-
get_view_partition_option(#doc{body = {Props}}, Default) ->
{Options} = couch_util:get_value(<<"options">>, Props, {[]}),
couch_util:get_value(<<"partitioned">>, Options, Default).
-
hits_to_json(DbName, IncludeDocs, Hits) ->
{Ids, HitData} = lists:unzip(lists:map(fun get_hit_data/1, Hits)),
chttpd_stats:incr_rows(length(Hits)),
- if IncludeDocs ->
- chttpd_stats:incr_reads(length(Hits)),
- {ok, JsonDocs} = dreyfus_fabric:get_json_docs(DbName, Ids),
- lists:zipwith(fun(Hit, {Id, Doc}) ->
- case Hit of
- {Id, Order, Fields} ->
- {[{id, Id}, {order, Order}, {fields, {Fields}}, Doc]};
- {Id, Order, Fields, Highlights} ->
- {[{id, Id}, {order, Order}, {fields, {Fields}},
- {highlights, {Highlights}}, Doc]}
- end
- end, HitData, JsonDocs);
-
- true ->
- lists:map(fun(Hit) ->
- case Hit of
- {Id, Order, Fields} ->
- {[{id, Id}, {order, Order}, {fields, {Fields}}]};
- {Id, Order, Fields, Highlights} ->
- {[{id, Id}, {order, Order}, {fields, {Fields}}, {highlights, {Highlights}}]}
- end
- end, HitData)
+ if
+ IncludeDocs ->
+ chttpd_stats:incr_reads(length(Hits)),
+ {ok, JsonDocs} = dreyfus_fabric:get_json_docs(DbName, Ids),
+ lists:zipwith(
+ fun(Hit, {Id, Doc}) ->
+ case Hit of
+ {Id, Order, Fields} ->
+ {[{id, Id}, {order, Order}, {fields, {Fields}}, Doc]};
+ {Id, Order, Fields, Highlights} ->
+ {[
+ {id, Id},
+ {order, Order},
+ {fields, {Fields}},
+ {highlights, {Highlights}},
+ Doc
+ ]}
+ end
+ end,
+ HitData,
+ JsonDocs
+ );
+ true ->
+ lists:map(
+ fun(Hit) ->
+ case Hit of
+ {Id, Order, Fields} ->
+ {[{id, Id}, {order, Order}, {fields, {Fields}}]};
+ {Id, Order, Fields, Highlights} ->
+ {[
+ {id, Id},
+ {order, Order},
+ {fields, {Fields}},
+ {highlights, {Highlights}}
+ ]}
+ end
+ end,
+ HitData
+ )
end.
get_hit_data(Hit) ->
@@ -565,13 +654,16 @@ get_hit_data(Hit) ->
end.
group_to_json(DbName, IncludeDocs, {Name, TotalHits, Hits}, UseNewApi) ->
- {TotalHitsKey, HitsKey} = case UseNewApi of
- true -> {total_rows, rows};
- false -> {total_hits, hits}
- end,
- {[{by, Name},
- {TotalHitsKey, TotalHits},
- {HitsKey, hits_to_json(DbName, IncludeDocs, Hits)}]}.
+ {TotalHitsKey, HitsKey} =
+ case UseNewApi of
+ true -> {total_rows, rows};
+ false -> {total_hits, hits}
+ end,
+ {[
+ {by, Name},
+ {TotalHitsKey, TotalHits},
+ {HitsKey, hits_to_json(DbName, IncludeDocs, Hits)}
+ ]}.
facets_to_json(Facets) ->
{[facet_to_json(F) || F <- Facets]}.
@@ -583,10 +675,13 @@ facet_to_json({K0, _V0, C0}) ->
{hd(K0), facets_to_json(C2)}.
send_grouped_response(Req, {TotalHits, TotalGroupedHits, Groups}, UseNewApi) ->
- GroupResponsePairs = case UseNewApi of
- true -> [{total_rows, TotalHits}, {groups, Groups}];
- false -> [{total_hits, TotalHits}, {total_grouped_hits, TotalGroupedHits}, {groups, Groups}]
- end,
+ GroupResponsePairs =
+ case UseNewApi of
+ true ->
+ [{total_rows, TotalHits}, {groups, Groups}];
+ false ->
+ [{total_hits, TotalHits}, {total_grouped_hits, TotalGroupedHits}, {groups, Groups}]
+ end,
send_json(Req, 200, {GroupResponsePairs}).
handle_error(Req, Db, DDoc, RetryCount, RetryPause, {exit, _} = Err) ->
diff --git a/src/dreyfus/src/dreyfus_httpd_handlers.erl b/src/dreyfus/src/dreyfus_httpd_handlers.erl
index bf2be23b1..05188c252 100644
--- a/src/dreyfus/src/dreyfus_httpd_handlers.erl
+++ b/src/dreyfus/src/dreyfus_httpd_handlers.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(dreyfus_httpd_handlers).
@@ -20,10 +19,10 @@
url_handler(<<"_search_analyze">>) -> fun dreyfus_httpd:handle_analyze_req/1;
url_handler(_) -> no_match.
-db_handler(<<"_search_cleanup">>) -> fun dreyfus_httpd:handle_cleanup_req/2;
+db_handler(<<"_search_cleanup">>) -> fun dreyfus_httpd:handle_cleanup_req/2;
db_handler(_) -> no_match.
-design_handler(<<"_search">>) -> fun dreyfus_httpd:handle_search_req/3;
+design_handler(<<"_search">>) -> fun dreyfus_httpd:handle_search_req/3;
design_handler(<<"_search_info">>) -> fun dreyfus_httpd:handle_info_req/3;
design_handler(<<"_search_disk_size">>) -> fun dreyfus_httpd:handle_disk_size_req/3;
design_handler(_) -> no_match.
diff --git a/src/dreyfus/src/dreyfus_index.erl b/src/dreyfus/src/dreyfus_index.erl
index 2bf560f37..df3e68f84 100644
--- a/src/dreyfus/src/dreyfus_index.erl
+++ b/src/dreyfus/src/dreyfus_index.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
%% A dreyfus_index gen_server is linked to its clouseau twin.
@@ -21,23 +20,35 @@
-include_lib("couch/include/couch_db.hrl").
-include("dreyfus.hrl").
-
% public api.
--export([start_link/2, design_doc_to_index/2, await/2, search/2, info/1,
- group1/2, group2/2,
- design_doc_to_indexes/1]).
+-export([
+ start_link/2,
+ design_doc_to_index/2,
+ await/2,
+ search/2,
+ info/1,
+ group1/2,
+ group2/2,
+ design_doc_to_indexes/1
+]).
% gen_server api.
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
% private definitions.
-record(state, {
dbname,
index,
- updater_pid=nil,
- index_pid=nil,
- waiting_list=[]
+ updater_pid = nil,
+ index_pid = nil,
+ waiting_list = []
}).
% exported for callback.
@@ -75,11 +86,11 @@ info(Pid0) ->
%% clouseau pid.
to_index_pid(Pid) ->
case node(Pid) == node() of
- true -> gen_server:call(Pid, get_index_pid, infinity);
+ true -> gen_server:call(Pid, get_index_pid, infinity);
false -> Pid
end.
-design_doc_to_indexes(#doc{body={Fields}}=Doc) ->
+design_doc_to_indexes(#doc{body = {Fields}} = Doc) ->
RawIndexes = couch_util:get_value(<<"indexes">>, Fields, {[]}),
case RawIndexes of
{IndexList} when is_list(IndexList) ->
@@ -87,12 +98,14 @@ design_doc_to_indexes(#doc{body={Fields}}=Doc) ->
lists:flatmap(
fun(IndexName) ->
case (catch design_doc_to_index(Doc, IndexName)) of
- {ok, #index{}=Index} -> [Index];
+ {ok, #index{} = Index} -> [Index];
_ -> []
end
end,
- IndexNames);
- _ -> []
+ IndexNames
+ );
+ _ ->
+ []
end.
% gen_server functions.
@@ -101,14 +114,18 @@ init({DbName, Index}) ->
process_flag(trap_exit, true),
case open_index(DbName, Index) of
{ok, Pid, Seq} ->
- State=#state{
- dbname=DbName,
- index=Index#index{current_seq=Seq, dbname=DbName},
- index_pid=Pid
- },
+ State = #state{
+ dbname = DbName,
+ index = Index#index{current_seq = Seq, dbname = DbName},
+ index_pid = Pid
+ },
case couch_db:open_int(DbName, []) of
{ok, Db} ->
- try couch_db:monitor(Db) after couch_db:close(Db) end,
+ try
+ couch_db:monitor(Db)
+ after
+ couch_db:close(Db)
+ end,
dreyfus_util:maybe_create_local_purge_doc(Db, Pid, Index),
proc_lib:init_ack({ok, self()}),
gen_server:enter_loop(?MODULE, [], State);
@@ -119,122 +136,155 @@ init({DbName, Index}) ->
proc_lib:init_ack(Error)
end.
-handle_call({await, RequestSeq}, From,
- #state{
- index=#index{dbname=DbName,name=IdxName,ddoc_id=DDocId,current_seq=Seq}=Index,
- index_pid=IndexPid,
- updater_pid=nil,
- waiting_list=WaitList
- }=State) when RequestSeq > Seq ->
+handle_call(
+ {await, RequestSeq},
+ From,
+ #state{
+ index =
+ #index{dbname = DbName, name = IdxName, ddoc_id = DDocId, current_seq = Seq} = Index,
+ index_pid = IndexPid,
+ updater_pid = nil,
+ waiting_list = WaitList
+ } = State
+) when RequestSeq > Seq ->
DbName2 = mem3:dbname(DbName),
<<"_design/", GroupId/binary>> = DDocId,
- NewState = case dreyfus_util:in_black_list(DbName2, GroupId, IdxName) of
- false ->
- UpPid = spawn_link(fun() ->
- dreyfus_index_updater:update(IndexPid,Index)
- end),
- State#state{
- updater_pid=UpPid,
- waiting_list=[{From,RequestSeq}|WaitList]
- };
- _ ->
- couch_log:notice("Index Blocked from Updating - db: ~p,"
- " ddocid: ~p name: ~p", [DbName, DDocId, IdxName]),
- State
- end,
+ NewState =
+ case dreyfus_util:in_black_list(DbName2, GroupId, IdxName) of
+ false ->
+ UpPid = spawn_link(fun() ->
+ dreyfus_index_updater:update(IndexPid, Index)
+ end),
+ State#state{
+ updater_pid = UpPid,
+ waiting_list = [{From, RequestSeq} | WaitList]
+ };
+ _ ->
+ couch_log:notice(
+ "Index Blocked from Updating - db: ~p,"
+ " ddocid: ~p name: ~p",
+ [DbName, DDocId, IdxName]
+ ),
+ State
+ end,
{noreply, NewState};
-handle_call({await, RequestSeq}, _From,
- #state{index=#index{current_seq=Seq}}=State) when RequestSeq =< Seq ->
+handle_call(
+ {await, RequestSeq},
+ _From,
+ #state{index = #index{current_seq = Seq}} = State
+) when RequestSeq =< Seq ->
{reply, {ok, State#state.index_pid, Seq}, State};
-handle_call({await, RequestSeq}, From, #state{waiting_list=WaitList}=State) ->
+handle_call({await, RequestSeq}, From, #state{waiting_list = WaitList} = State) ->
{noreply, State#state{
- waiting_list=[{From,RequestSeq}|WaitList]
+ waiting_list = [{From, RequestSeq} | WaitList]
}};
-
-handle_call(get_index_pid, _From, State) -> % upgrade
+% upgrade
+handle_call(get_index_pid, _From, State) ->
{reply, State#state.index_pid, State};
-
-handle_call({search, QueryArgs0}, _From, State) -> % obsolete
+% obsolete
+handle_call({search, QueryArgs0}, _From, State) ->
Reply = search_int(State#state.index_pid, QueryArgs0),
{reply, Reply, State};
-
-handle_call({group1, QueryArgs0}, _From, State) -> % obsolete
+% obsolete
+handle_call({group1, QueryArgs0}, _From, State) ->
Reply = group1_int(State#state.index_pid, QueryArgs0),
{reply, Reply, State};
-
-handle_call({group2, QueryArgs0}, _From, State) -> % obsolete
+% obsolete
+handle_call({group2, QueryArgs0}, _From, State) ->
Reply = group2_int(State#state.index_pid, QueryArgs0),
{reply, Reply, State};
-
-handle_call(info, _From, State) -> % obsolete
+% obsolete
+handle_call(info, _From, State) ->
Reply = info_int(State#state.index_pid),
{reply, Reply, State}.
handle_cast(_Msg, State) ->
{noreply, State}.
-handle_info({'EXIT', FromPid, {updated, NewSeq}},
- #state{
- index=#index{dbname=DbName,name=IdxName,ddoc_id=DDocId}=Index0,
- index_pid=IndexPid,
- updater_pid=UpPid,
- waiting_list=WaitList
- }=State) when UpPid == FromPid ->
- Index = Index0#index{current_seq=NewSeq},
+handle_info(
+ {'EXIT', FromPid, {updated, NewSeq}},
+ #state{
+ index = #index{dbname = DbName, name = IdxName, ddoc_id = DDocId} = Index0,
+ index_pid = IndexPid,
+ updater_pid = UpPid,
+ waiting_list = WaitList
+ } = State
+) when UpPid == FromPid ->
+ Index = Index0#index{current_seq = NewSeq},
case reply_with_index(IndexPid, Index, WaitList) of
- [] ->
- {noreply, State#state{index=Index,
- updater_pid=nil,
- waiting_list=[]
- }};
- StillWaiting ->
- DbName2 = mem3:dbname(DbName),
- <<"_design/", GroupId/binary>> = DDocId,
- Pid = case dreyfus_util:in_black_list(DbName2, GroupId, IdxName) of
- true ->
- couch_log:notice("Index Blocked from Updating - db: ~p, ddocid: ~p"
- " name: ~p", [DbName, GroupId, IdxName]),
- nil;
- false ->
- spawn_link(fun() ->
- dreyfus_index_updater:update(IndexPid, Index)
- end)
- end,
- {noreply, State#state{index=Index,
- updater_pid=Pid,
- waiting_list=StillWaiting
- }}
+ [] ->
+ {noreply, State#state{
+ index = Index,
+ updater_pid = nil,
+ waiting_list = []
+ }};
+ StillWaiting ->
+ DbName2 = mem3:dbname(DbName),
+ <<"_design/", GroupId/binary>> = DDocId,
+ Pid =
+ case dreyfus_util:in_black_list(DbName2, GroupId, IdxName) of
+ true ->
+ couch_log:notice(
+ "Index Blocked from Updating - db: ~p, ddocid: ~p"
+ " name: ~p",
+ [DbName, GroupId, IdxName]
+ ),
+ nil;
+ false ->
+ spawn_link(fun() ->
+ dreyfus_index_updater:update(IndexPid, Index)
+ end)
+ end,
+ {noreply, State#state{
+ index = Index,
+ updater_pid = Pid,
+ waiting_list = StillWaiting
+ }}
end;
handle_info({'EXIT', _, {updated, _}}, State) ->
{noreply, State};
-handle_info({'EXIT', FromPid, Reason}, #state{
- index=Index,
- index_pid=IndexPid,
- waiting_list=WaitList
- }=State) when FromPid == IndexPid ->
+handle_info(
+ {'EXIT', FromPid, Reason},
+ #state{
+ index = Index,
+ index_pid = IndexPid,
+ waiting_list = WaitList
+ } = State
+) when FromPid == IndexPid ->
couch_log:notice(
- "index for ~p closed with reason ~p", [index_name(Index), Reason]),
+ "index for ~p closed with reason ~p", [index_name(Index), Reason]
+ ),
[gen_server:reply(Pid, {error, Reason}) || {Pid, _} <- WaitList],
{stop, normal, State};
-handle_info({'EXIT', FromPid, Reason}, #state{
- index=Index,
- updater_pid=UpPid,
- waiting_list=WaitList
- }=State) when FromPid == UpPid ->
- couch_log:info("Shutting down index server ~p, updater ~p closing w/ reason ~w",
- [index_name(Index), UpPid, Reason]),
+handle_info(
+ {'EXIT', FromPid, Reason},
+ #state{
+ index = Index,
+ updater_pid = UpPid,
+ waiting_list = WaitList
+ } = State
+) when FromPid == UpPid ->
+ couch_log:info(
+ "Shutting down index server ~p, updater ~p closing w/ reason ~w",
+ [index_name(Index), UpPid, Reason]
+ ),
[gen_server:reply(Pid, {error, Reason}) || {Pid, _} <- WaitList],
{stop, normal, State};
handle_info({'EXIT', Pid, Reason}, State) ->
% probably dreyfus_index_manager.
couch_log:notice("Unknown pid ~p closed with reason ~p", [Pid, Reason]),
{stop, normal, State};
-handle_info({'DOWN',_,_,Pid,Reason}, #state{
- index=Index,
- waiting_list=WaitList
- }=State) ->
- couch_log:info("Shutting down index server ~p, db ~p closing w/ reason ~w",
- [index_name(Index), Pid, Reason]),
+handle_info(
+ {'DOWN', _, _, Pid, Reason},
+ #state{
+ index = Index,
+ waiting_list = WaitList
+ } = State
+) ->
+ couch_log:info(
+ "Shutting down index server ~p, db ~p closing w/ reason ~w",
+ [index_name(Index), Pid, Reason]
+ ),
[gen_server:reply(P, {error, Reason}) || {P, _} <- WaitList],
{stop, normal, State}.
@@ -246,8 +296,8 @@ code_change(_OldVsn, State, _Extra) ->
% private functions.
-open_index(DbName, #index{analyzer=Analyzer, sig=Sig}) ->
- Path = <<DbName/binary,"/",Sig/binary>>,
+open_index(DbName, #index{analyzer = Analyzer, sig = Sig}) ->
+ Path = <<DbName/binary, "/", Sig/binary>>,
case clouseau_rpc:open_index(self(), Path, Analyzer) of
{ok, Pid} ->
case clouseau_rpc:get_update_seq(Pid) of
@@ -260,11 +310,11 @@ open_index(DbName, #index{analyzer=Analyzer, sig=Sig}) ->
Error
end.
-design_doc_to_index(#doc{id=Id,body={Fields}}, IndexName) ->
+design_doc_to_index(#doc{id = Id, body = {Fields}}, IndexName) ->
Language = couch_util:get_value(<<"language">>, Fields, <<"javascript">>),
{RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}),
- InvalidDDocError = {invalid_design_doc,
- <<"index `", IndexName/binary, "` must have parameter `index`">>},
+ InvalidDDocError =
+ {invalid_design_doc, <<"index `", IndexName/binary, "` must have parameter `index`">>},
case lists:keyfind(IndexName, 1, RawIndexes) of
false ->
{error, {not_found, <<IndexName/binary, " not found.">>}};
@@ -274,15 +324,21 @@ design_doc_to_index(#doc{id=Id,body={Fields}}, IndexName) ->
undefined ->
{error, InvalidDDocError};
Def ->
- Sig = ?l2b(couch_util:to_hex(couch_hash:md5_hash(
- term_to_binary({Analyzer, Def})))),
+ Sig = ?l2b(
+ couch_util:to_hex(
+ couch_hash:md5_hash(
+ term_to_binary({Analyzer, Def})
+ )
+ )
+ ),
{ok, #index{
- analyzer=Analyzer,
- ddoc_id=Id,
- def=Def,
- def_lang=Language,
- name=IndexName,
- sig=Sig}}
+ analyzer = Analyzer,
+ ddoc_id = Id,
+ def = Def,
+ def_lang = Language,
+ name = IndexName,
+ sig = Sig
+ }}
end;
_ ->
{error, InvalidDDocError}
@@ -293,49 +349,51 @@ reply_with_index(IndexPid, Index, WaitList) ->
reply_with_index(_IndexPid, _Index, [], Acc) ->
Acc;
-reply_with_index(IndexPid, #index{current_seq=IndexSeq}=Index, [{Pid, Seq}|Rest], Acc) when Seq =< IndexSeq ->
+reply_with_index(IndexPid, #index{current_seq = IndexSeq} = Index, [{Pid, Seq} | Rest], Acc) when
+ Seq =< IndexSeq
+->
gen_server:reply(Pid, {ok, IndexPid, IndexSeq}),
reply_with_index(IndexPid, Index, Rest, Acc);
-reply_with_index(IndexPid, Index, [{Pid, Seq}|Rest], Acc) ->
- reply_with_index(IndexPid, Index, Rest, [{Pid, Seq}|Acc]).
+reply_with_index(IndexPid, Index, [{Pid, Seq} | Rest], Acc) ->
+ reply_with_index(IndexPid, Index, Rest, [{Pid, Seq} | Acc]).
-index_name(#index{dbname=DbName,ddoc_id=DDocId,name=IndexName}) ->
+index_name(#index{dbname = DbName, ddoc_id = DDocId, name = IndexName}) ->
<<DbName/binary, " ", DDocId/binary, " ", IndexName/binary>>.
args_to_proplist(#index_query_args{} = Args) ->
[
- {'query', Args#index_query_args.q},
- {partition, Args#index_query_args.partition},
- {limit, Args#index_query_args.limit},
- {refresh, Args#index_query_args.stale =:= false},
- {'after', Args#index_query_args.bookmark},
- {sort, Args#index_query_args.sort},
- {include_fields, Args#index_query_args.include_fields},
- {counts, Args#index_query_args.counts},
- {ranges, Args#index_query_args.ranges},
- {drilldown, Args#index_query_args.drilldown},
- {highlight_fields, Args#index_query_args.highlight_fields},
- {highlight_pre_tag, Args#index_query_args.highlight_pre_tag},
- {highlight_post_tag, Args#index_query_args.highlight_post_tag},
- {highlight_number, Args#index_query_args.highlight_number},
- {highlight_size, Args#index_query_args.highlight_size}
+ {'query', Args#index_query_args.q},
+ {partition, Args#index_query_args.partition},
+ {limit, Args#index_query_args.limit},
+ {refresh, Args#index_query_args.stale =:= false},
+ {'after', Args#index_query_args.bookmark},
+ {sort, Args#index_query_args.sort},
+ {include_fields, Args#index_query_args.include_fields},
+ {counts, Args#index_query_args.counts},
+ {ranges, Args#index_query_args.ranges},
+ {drilldown, Args#index_query_args.drilldown},
+ {highlight_fields, Args#index_query_args.highlight_fields},
+ {highlight_pre_tag, Args#index_query_args.highlight_pre_tag},
+ {highlight_post_tag, Args#index_query_args.highlight_post_tag},
+ {highlight_number, Args#index_query_args.highlight_number},
+ {highlight_size, Args#index_query_args.highlight_size}
].
args_to_proplist2(#index_query_args{} = Args) ->
[
- {'query', Args#index_query_args.q},
- {field, Args#index_query_args.grouping#grouping.by},
- {refresh, Args#index_query_args.stale =:= false},
- {groups, Args#index_query_args.grouping#grouping.groups},
- {group_sort, Args#index_query_args.grouping#grouping.sort},
- {sort, Args#index_query_args.sort},
- {limit, Args#index_query_args.limit},
- {include_fields, Args#index_query_args.include_fields},
- {highlight_fields, Args#index_query_args.highlight_fields},
- {highlight_pre_tag, Args#index_query_args.highlight_pre_tag},
- {highlight_post_tag, Args#index_query_args.highlight_post_tag},
- {highlight_number, Args#index_query_args.highlight_number},
- {highlight_size, Args#index_query_args.highlight_size}
+ {'query', Args#index_query_args.q},
+ {field, Args#index_query_args.grouping#grouping.by},
+ {refresh, Args#index_query_args.stale =:= false},
+ {groups, Args#index_query_args.grouping#grouping.groups},
+ {group_sort, Args#index_query_args.grouping#grouping.sort},
+ {sort, Args#index_query_args.sort},
+ {limit, Args#index_query_args.limit},
+ {include_fields, Args#index_query_args.include_fields},
+ {highlight_fields, Args#index_query_args.highlight_fields},
+ {highlight_pre_tag, Args#index_query_args.highlight_pre_tag},
+ {highlight_post_tag, Args#index_query_args.highlight_post_tag},
+ {highlight_number, Args#index_query_args.highlight_number},
+ {highlight_size, Args#index_query_args.highlight_size}
].
search_int(Pid, QueryArgs0) ->
@@ -355,8 +413,15 @@ group1_int(Pid, QueryArgs0) ->
sort = Sort
}
} = QueryArgs,
- clouseau_rpc:group1(Pid, Query, GroupBy, Stale =:= false, Sort,
- Offset, Limit).
+ clouseau_rpc:group1(
+ Pid,
+ Query,
+ GroupBy,
+ Stale =:= false,
+ Sort,
+ Offset,
+ Limit
+ ).
group2_int(Pid, QueryArgs0) ->
QueryArgs = dreyfus_util:upgrade(QueryArgs0),
diff --git a/src/dreyfus/src/dreyfus_index_manager.erl b/src/dreyfus/src/dreyfus_index_manager.erl
index 9c6cad2ae..f0dbbec64 100644
--- a/src/dreyfus/src/dreyfus_index_manager.erl
+++ b/src/dreyfus/src/dreyfus_index_manager.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(dreyfus_index_manager).
@@ -26,8 +25,14 @@
-export([start_link/0, get_index/2, get_disk_size/2]).
% gen_server api.
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
-export([handle_db_event/3]).
@@ -38,7 +43,7 @@ start_link() ->
get_index(DbName, Index) ->
gen_server:call(?MODULE, {get_index, DbName, Index}, infinity).
-get_disk_size(DbName, #index{sig=Sig}) ->
+get_disk_size(DbName, #index{sig = Sig}) ->
Path = <<DbName/binary, "/", Sig/binary>>,
clouseau_rpc:disk_size(Path).
@@ -52,20 +57,19 @@ init([]) ->
process_flag(trap_exit, true),
{ok, nil}.
-handle_call({get_index, DbName, #index{sig=Sig}=Index}, From, State) ->
+handle_call({get_index, DbName, #index{sig = Sig} = Index}, From, State) ->
case ets:lookup(?BY_SIG, {DbName, Sig}) of
- [] ->
- Pid = spawn_link(fun() -> new_index(DbName, Index) end),
- ets:insert(?BY_PID, {Pid, opening, {DbName, Sig}}),
- ets:insert(?BY_SIG, {{DbName,Sig}, [From]}),
- {noreply, State};
- [{_, WaitList}] when is_list(WaitList) ->
- ets:insert(?BY_SIG, {{DbName, Sig}, [From | WaitList]}),
- {noreply, State};
- [{_, ExistingPid}] ->
- {reply, {ok, ExistingPid}, State}
+ [] ->
+ Pid = spawn_link(fun() -> new_index(DbName, Index) end),
+ ets:insert(?BY_PID, {Pid, opening, {DbName, Sig}}),
+ ets:insert(?BY_SIG, {{DbName, Sig}, [From]}),
+ {noreply, State};
+ [{_, WaitList}] when is_list(WaitList) ->
+ ets:insert(?BY_SIG, {{DbName, Sig}, [From | WaitList]}),
+ {noreply, State};
+ [{_, ExistingPid}] ->
+ {reply, {ok, ExistingPid}, State}
end;
-
handle_call({open_ok, DbName, Sig, NewPid}, {OpenerPid, _}, State) ->
link(NewPid),
[{_, WaitList}] = ets:lookup(?BY_SIG, {DbName, Sig}),
@@ -73,7 +77,6 @@ handle_call({open_ok, DbName, Sig, NewPid}, {OpenerPid, _}, State) ->
ets:delete(?BY_PID, OpenerPid),
add_to_ets(NewPid, DbName, Sig),
{reply, ok, State};
-
handle_call({open_error, DbName, Sig, Error}, {OpenerPid, _}, State) ->
[{_, WaitList}] = ets:lookup(?BY_SIG, {DbName, Sig}),
[gen_server:reply(From, Error) || From <- WaitList],
@@ -84,27 +87,28 @@ handle_call({open_error, DbName, Sig, Error}, {OpenerPid, _}, State) ->
handle_cast({cleanup, DbName}, State) ->
clouseau_rpc:cleanup(DbName),
{noreply, State};
-
handle_cast({rename, DbName}, State) ->
clouseau_rpc:rename(DbName),
{noreply, State}.
handle_info({'EXIT', FromPid, Reason}, State) ->
case ets:lookup(?BY_PID, FromPid) of
- [] ->
- if Reason =/= normal ->
- couch_log:error("Exit on non-updater process: ~p", [Reason]),
- exit(Reason);
- true -> ok
- end;
- % Using Reason /= normal to force a match error
- % if we didn't delete the Pid in a handle_call
- % message for some reason.
- [{_, opening, {DbName, Sig}}] when Reason /= normal ->
- Msg = {open_error, DbName, Sig, Reason},
- {reply, ok, _} = handle_call(Msg, {FromPid, nil}, State);
- [{_, {DbName, Sig}}] ->
- delete_from_ets(FromPid, DbName, Sig)
+ [] ->
+ if
+ Reason =/= normal ->
+ couch_log:error("Exit on non-updater process: ~p", [Reason]),
+ exit(Reason);
+ true ->
+ ok
+ end;
+ % Using Reason /= normal to force a match error
+ % if we didn't delete the Pid in a handle_call
+ % message for some reason.
+ [{_, opening, {DbName, Sig}}] when Reason /= normal ->
+ Msg = {open_error, DbName, Sig, Reason},
+ {reply, ok, _} = handle_call(Msg, {FromPid, nil}, State);
+ [{_, {DbName, Sig}}] ->
+ delete_from_ets(FromPid, DbName, Sig)
end,
{noreply, State}.
@@ -120,8 +124,11 @@ handle_db_event(DbName, created, _St) ->
gen_server:cast(?MODULE, {cleanup, DbName}),
{ok, nil};
handle_db_event(DbName, deleted, _St) ->
- RecoveryEnabled = config:get_boolean("couchdb",
- "enable_database_recovery", false),
+ RecoveryEnabled = config:get_boolean(
+ "couchdb",
+ "enable_database_recovery",
+ false
+ ),
case RecoveryEnabled of
true ->
gen_server:cast(?MODULE, {rename, DbName});
@@ -133,15 +140,15 @@ handle_db_event(DbName, deleted, _St) ->
handle_db_event(_DbName, _Event, _St) ->
{ok, nil}.
-new_index(DbName, #index{sig=Sig}=Index) ->
+new_index(DbName, #index{sig = Sig} = Index) ->
case (catch dreyfus_index:start_link(DbName, Index)) of
- {ok, NewPid} ->
- Msg = {open_ok, DbName, Sig, NewPid},
- ok = gen_server:call(?MODULE, Msg, infinity),
- unlink(NewPid);
- Error ->
- Msg = {open_error, DbName, Sig, Error},
- ok = gen_server:call(?MODULE, Msg, infinity)
+ {ok, NewPid} ->
+ Msg = {open_ok, DbName, Sig, NewPid},
+ ok = gen_server:call(?MODULE, Msg, infinity),
+ unlink(NewPid);
+ Error ->
+ Msg = {open_error, DbName, Sig, Error},
+ ok = gen_server:call(?MODULE, Msg, infinity)
end.
add_to_ets(Pid, DbName, Sig) ->
@@ -151,4 +158,3 @@ add_to_ets(Pid, DbName, Sig) ->
delete_from_ets(Pid, DbName, Sig) ->
true = ets:delete(?BY_PID, Pid),
true = ets:delete(?BY_SIG, {DbName, Sig}).
-
diff --git a/src/dreyfus/src/dreyfus_index_updater.erl b/src/dreyfus/src/dreyfus_index_updater.erl
index 87edef0ad..6edc5a257 100644
--- a/src/dreyfus/src/dreyfus_index_updater.erl
+++ b/src/dreyfus/src/dreyfus_index_updater.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(dreyfus_index_updater).
@@ -70,10 +69,10 @@ update(IndexPid, Index) ->
couch_db:close(Db)
end.
-load_docs(FDI, {I, IndexPid, Db, Proc, Total, LastCommitTime, ExcludeIdRevs}=Acc) ->
+load_docs(FDI, {I, IndexPid, Db, Proc, Total, LastCommitTime, ExcludeIdRevs} = Acc) ->
couch_task_status:update([{changes_done, I}, {progress, (I * 100) div Total}]),
DI = couch_doc:to_doc_info(FDI),
- #doc_info{id=Id, high_seq=Seq, revs=[#rev_info{rev=Rev}|_]} = DI,
+ #doc_info{id = Id, high_seq = Seq, revs = [#rev_info{rev = Rev} | _]} = DI,
%check if it is processed in purge_index to avoid update the index again.
case lists:member({Id, Rev}, ExcludeIdRevs) of
true -> ok;
@@ -83,9 +82,9 @@ load_docs(FDI, {I, IndexPid, Db, Proc, Total, LastCommitTime, ExcludeIdRevs}=Acc
case timer:now_diff(Now = erlang:timestamp(), LastCommitTime) >= 60000000 of
true ->
ok = clouseau_rpc:commit(IndexPid, Seq),
- {ok, {I+1, IndexPid, Db, Proc, Total, Now, ExcludeIdRevs}};
+ {ok, {I + 1, IndexPid, Db, Proc, Total, Now, ExcludeIdRevs}};
false ->
- {ok, setelement(1, Acc, I+1)}
+ {ok, setelement(1, Acc, I + 1)}
end.
purge_index(Db, IndexPid, Index) ->
@@ -94,26 +93,29 @@ purge_index(Db, IndexPid, Index) ->
try
true = proc_prompt(Proc, [<<"add_fun">>, Index#index.def]),
FoldFun = fun({PurgeSeq, _UUID, Id, _Revs}, {Acc, _}) ->
- Acc0 = case couch_db:get_full_doc_info(Db, Id) of
- not_found ->
- ok = clouseau_rpc:delete(IndexPid, Id),
- Acc;
- FDI ->
- DI = couch_doc:to_doc_info(FDI),
- #doc_info{id=Id, revs=[#rev_info{rev=Rev}|_]} = DI,
- case lists:member({Id, Rev}, Acc) of
- true -> Acc;
- false ->
- update_or_delete_index(IndexPid, Db, DI, Proc),
- [{Id, Rev} | Acc]
- end
- end,
+ Acc0 =
+ case couch_db:get_full_doc_info(Db, Id) of
+ not_found ->
+ ok = clouseau_rpc:delete(IndexPid, Id),
+ Acc;
+ FDI ->
+ DI = couch_doc:to_doc_info(FDI),
+ #doc_info{id = Id, revs = [#rev_info{rev = Rev} | _]} = DI,
+ case lists:member({Id, Rev}, Acc) of
+ true ->
+ Acc;
+ false ->
+ update_or_delete_index(IndexPid, Db, DI, Proc),
+ [{Id, Rev} | Acc]
+ end
+ end,
update_task(1),
{ok, {Acc0, PurgeSeq}}
end,
{ok, {ExcludeList, NewPurgeSeq}} = couch_db:fold_purge_infos(
- Db, IdxPurgeSeq, FoldFun, {[], 0}, []),
+ Db, IdxPurgeSeq, FoldFun, {[], 0}, []
+ ),
clouseau_rpc:set_purge_seq(IndexPid, NewPurgeSeq),
update_local_doc(Db, Index, NewPurgeSeq),
{ok, ExcludeList}
@@ -127,7 +129,7 @@ count_pending_purged_docs_since(Db, IndexPid) ->
DbPurgeSeq - IdxPurgeSeq.
update_or_delete_index(IndexPid, Db, DI, Proc) ->
- #doc_info{id=Id, revs=[#rev_info{deleted=Del}|_]} = DI,
+ #doc_info{id = Id, revs = [#rev_info{deleted = Del} | _]} = DI,
case Del of
true ->
ok = clouseau_rpc:delete(IndexPid, Id);
@@ -138,12 +140,12 @@ update_or_delete_index(IndexPid, Db, DI, Proc) ->
false ->
{ok, Doc} = couch_db:open_doc(Db, DI, []),
Json = couch_doc:to_json_obj(Doc, []),
- [Fields|_] = proc_prompt(Proc, [<<"index_doc">>, Json]),
+ [Fields | _] = proc_prompt(Proc, [<<"index_doc">>, Json]),
Fields1 = [list_to_tuple(Field) || Field <- Fields],
Fields2 = maybe_add_partition(Db, Id, Fields1),
case Fields2 of
[] -> ok = clouseau_rpc:delete(IndexPid, Id);
- _ -> ok = clouseau_rpc:update(IndexPid, Id, Fields2)
+ _ -> ok = clouseau_rpc:update(IndexPid, Id, Fields2)
end
end
end.
@@ -156,12 +158,13 @@ update_local_doc(Db, Index, PurgeSeq) ->
update_task(NumChanges) ->
[Changes, Total] = couch_task_status:get([changes_done, total_changes]),
Changes2 = Changes + NumChanges,
- Progress = case Total of
- 0 ->
- 0;
- _ ->
- (Changes2 * 100) div Total
- end,
+ Progress =
+ case Total of
+ 0 ->
+ 0;
+ _ ->
+ (Changes2 * 100) div Total
+ end,
couch_task_status:update([{progress, Progress}, {changes_done, Changes2}]).
maybe_skip_doc(Db, <<"_design/", _/binary>>) ->
diff --git a/src/dreyfus/src/dreyfus_plugin_couch_db.erl b/src/dreyfus/src/dreyfus_plugin_couch_db.erl
index b9f48ba74..a55c26373 100644
--- a/src/dreyfus/src/dreyfus_plugin_couch_db.erl
+++ b/src/dreyfus/src/dreyfus_plugin_couch_db.erl
@@ -17,10 +17,8 @@
on_compact/2
]).
-
is_valid_purge_client(DbName, Props) ->
dreyfus_util:verify_index_exists(DbName, Props).
-
on_compact(DbName, DDocs) ->
dreyfus_util:ensure_local_purge_docs(DbName, DDocs).
diff --git a/src/dreyfus/src/dreyfus_rpc.erl b/src/dreyfus/src/dreyfus_rpc.erl
index cc50d0999..08b719435 100644
--- a/src/dreyfus/src/dreyfus_rpc.erl
+++ b/src/dreyfus/src/dreyfus_rpc.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(dreyfus_rpc).
@@ -110,7 +109,6 @@ get_or_create_db(DbName, Options) ->
Else
end.
-
calculate_seqs(Db, Stale) ->
LastSeq = couch_db:get_update_seq(Db),
if
diff --git a/src/dreyfus/src/dreyfus_sup.erl b/src/dreyfus/src/dreyfus_sup.erl
index d855a822e..e19203af2 100644
--- a/src/dreyfus/src/dreyfus_sup.erl
+++ b/src/dreyfus/src/dreyfus_sup.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(dreyfus_sup).
@@ -25,8 +24,7 @@ init(_Args) ->
Children = [
child(dreyfus_index_manager)
],
- {ok, {{one_for_one,10,1},
- couch_epi:register_service(dreyfus_epi, Children)}}.
+ {ok, {{one_for_one, 10, 1}, couch_epi:register_service(dreyfus_epi, Children)}}.
child(Child) ->
{Child, {Child, start_link, []}, permanent, 1000, worker, [Child]}.
diff --git a/src/dreyfus/src/dreyfus_util.erl b/src/dreyfus/src/dreyfus_util.erl
index 05ecdb621..301d3887a 100644
--- a/src/dreyfus/src/dreyfus_util.erl
+++ b/src/dreyfus/src/dreyfus_util.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*-
-module(dreyfus_util).
@@ -33,7 +32,6 @@
verify_index_exists/2
]).
-
get_shards(DbName, #index_query_args{partition = nil} = Args) ->
case use_ushards(Args) of
true ->
@@ -52,20 +50,22 @@ get_shards(DbName, #index_query_args{partition = Partition} = Args) ->
get_shards(DbName, Args) ->
get_shards(DbName, upgrade(Args)).
-use_ushards(#index_query_args{stale=ok}) ->
+use_ushards(#index_query_args{stale = ok}) ->
true;
-use_ushards(#index_query_args{stable=true}) ->
+use_ushards(#index_query_args{stable = true}) ->
true;
use_ushards(#index_query_args{}) ->
false.
-
get_ring_opts(#index_query_args{partition = nil}, _Shards) ->
[];
get_ring_opts(#index_query_args{}, Shards) ->
- Shards1 = lists:map(fun(#shard{} = S) ->
- S#shard{ref = undefined}
- end, Shards),
+ Shards1 = lists:map(
+ fun(#shard{} = S) ->
+ S#shard{ref = undefined}
+ end,
+ Shards
+ ),
[{any, Shards1}].
-spec sort(Order :: relevance | [any()], [#sortable{}]) -> [#sortable{}].
@@ -79,70 +79,80 @@ stash_items(List) ->
stash_item(Item) ->
Ref = make_ref(),
- {Item#sortable{item=Ref}, {Ref, Item#sortable.item}}.
+ {Item#sortable{item = Ref}, {Ref, Item#sortable.item}}.
unstash_items(List, Stash) ->
[unstash_item(Item, Stash) || Item <- List].
unstash_item(Stashed, Stash) ->
{_, Item} = lists:keyfind(Stashed#sortable.item, 1, Stash),
- Stashed#sortable{item=Item}.
+ Stashed#sortable{item = Item}.
-spec sort(Order :: relevance | [any()], #sortable{}, #sortable{}) -> boolean().
-sort(relevance, #sortable{}=A, #sortable{}=B) ->
+sort(relevance, #sortable{} = A, #sortable{} = B) ->
sort2(pad([<<"-">>], <<"">>, length(A#sortable.order)), A, B);
-sort(Sort, #sortable{}=A, #sortable{}=B) when is_binary(Sort) ->
+sort(Sort, #sortable{} = A, #sortable{} = B) when is_binary(Sort) ->
sort2(pad([Sort], <<"">>, length(A#sortable.order)), A, B);
-sort(Sort, #sortable{}=A, #sortable{}=B) when is_list(Sort) ->
+sort(Sort, #sortable{} = A, #sortable{} = B) when is_list(Sort) ->
sort2(pad(Sort, <<"">>, length(A#sortable.order)), A, B).
-spec sort2([any()], #sortable{}, #sortable{}) -> boolean().
-sort2([<<"-",_/binary>>|_], #sortable{order=[A|_]}, #sortable{order=[B|_]}) when A =/= B ->
+sort2([<<"-", _/binary>> | _], #sortable{order = [A | _]}, #sortable{order = [B | _]}) when
+ A =/= B
+->
A > B;
-sort2([_|_], #sortable{order=[A|_]}, #sortable{order=[B|_]}) when A =/= B ->
+sort2([_ | _], #sortable{order = [A | _]}, #sortable{order = [B | _]}) when A =/= B ->
A < B;
-sort2([], #sortable{shard=#shard{range=A}}, #sortable{shard=#shard{range=B}}) ->
+sort2([], #sortable{shard = #shard{range = A}}, #sortable{shard = #shard{range = B}}) ->
% arbitrary tie-breaker
A =< B;
-sort2([_|Rest], #sortable{order=[_|RestA]}=SortableA, #sortable{order=[_|RestB]}=SortableB) ->
- sort2(Rest, SortableA#sortable{order=RestA}, SortableB#sortable{order=RestB}).
+sort2(
+ [_ | Rest],
+ #sortable{order = [_ | RestA]} = SortableA,
+ #sortable{order = [_ | RestB]} = SortableB
+) ->
+ sort2(Rest, SortableA#sortable{order = RestA}, SortableB#sortable{order = RestB}).
pad(List, _Padding, Length) when length(List) >= Length ->
List;
pad(List, Padding, Length) ->
pad(List ++ [Padding], Padding, Length).
-upgrade(#index_query_args{}=Args) ->
+upgrade(#index_query_args{} = Args) ->
Args;
-upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
- Sort, Grouping, Stable}) ->
+upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark, Sort, Grouping, Stable}) ->
#index_query_args{
- q = Query,
- limit = Limit,
- stale = Stale,
- include_docs = IncludeDocs,
- bookmark = Bookmark,
- sort = Sort,
- grouping = Grouping,
- stable = Stable};
-upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
- Sort, Grouping, Stable, Counts, Ranges, Drilldown}) ->
+ q = Query,
+ limit = Limit,
+ stale = Stale,
+ include_docs = IncludeDocs,
+ bookmark = Bookmark,
+ sort = Sort,
+ grouping = Grouping,
+ stable = Stable
+ };
+upgrade(
+ {index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark, Sort, Grouping, Stable, Counts,
+ Ranges, Drilldown}
+) ->
#index_query_args{
- q = Query,
- limit = Limit,
- stale = Stale,
- include_docs = IncludeDocs,
- bookmark = Bookmark,
- sort = Sort,
- grouping = Grouping,
- stable = Stable,
- counts=Counts,
- ranges = Ranges,
- drilldown = Drilldown};
-upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
- Sort, Grouping, Stable, Counts, Ranges, Drilldown,
- IncludeFields, HighlightFields, HighlightPreTag, HighlightPostTag,
- HighlightNumber, HighlightSize}) ->
+ q = Query,
+ limit = Limit,
+ stale = Stale,
+ include_docs = IncludeDocs,
+ bookmark = Bookmark,
+ sort = Sort,
+ grouping = Grouping,
+ stable = Stable,
+ counts = Counts,
+ ranges = Ranges,
+ drilldown = Drilldown
+ };
+upgrade(
+ {index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark, Sort, Grouping, Stable, Counts,
+ Ranges, Drilldown, IncludeFields, HighlightFields, HighlightPreTag, HighlightPostTag,
+ HighlightNumber, HighlightSize}
+) ->
#index_query_args{
q = Query,
limit = Limit,
@@ -150,7 +160,7 @@ upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
include_docs = IncludeDocs,
bookmark = Bookmark,
sort = Sort,
- grouping = Grouping,
+ grouping = Grouping,
stable = Stable,
counts = Counts,
ranges = Ranges,
@@ -162,10 +172,11 @@ upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
highlight_number = HighlightNumber,
highlight_size = HighlightSize
};
-upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
- Sort, Grouping, Stable, Counts, Ranges, Drilldown,
- IncludeFields, HighlightFields, HighlightPreTag, HighlightPostTag,
- HighlightNumber, HighlightSize, RawBookmark}) ->
+upgrade(
+ {index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark, Sort, Grouping, Stable, Counts,
+ Ranges, Drilldown, IncludeFields, HighlightFields, HighlightPreTag, HighlightPostTag,
+ HighlightNumber, HighlightSize, RawBookmark}
+) ->
#index_query_args{
q = Query,
limit = Limit,
@@ -173,7 +184,7 @@ upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
include_docs = IncludeDocs,
bookmark = Bookmark,
sort = Sort,
- grouping = Grouping,
+ grouping = Grouping,
stable = Stable,
counts = Counts,
ranges = Ranges,
@@ -187,56 +198,43 @@ upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark,
raw_bookmark = RawBookmark
}.
-export(#index_query_args{partition = nil, counts = nil, ranges = nil,
- drilldown = [], include_fields = nil, highlight_fields = nil} = Args) ->
+export(
+ #index_query_args{
+ partition = nil,
+ counts = nil,
+ ranges = nil,
+ drilldown = [],
+ include_fields = nil,
+ highlight_fields = nil
+ } = Args
+) ->
% Ensure existing searches work during the upgrade by creating an
% #index_query_args record in the old format
- {index_query_args,
- Args#index_query_args.q,
- Args#index_query_args.limit,
- Args#index_query_args.stale,
- Args#index_query_args.include_docs,
- Args#index_query_args.bookmark,
- Args#index_query_args.sort,
- Args#index_query_args.grouping,
- Args#index_query_args.stable
- };
-export(#index_query_args{partition = nil, include_fields = nil,
- highlight_fields = nil} = Args) ->
- {index_query_args,
- Args#index_query_args.q,
- Args#index_query_args.limit,
- Args#index_query_args.stale,
- Args#index_query_args.include_docs,
- Args#index_query_args.bookmark,
- Args#index_query_args.sort,
- Args#index_query_args.grouping,
- Args#index_query_args.stable,
- Args#index_query_args.counts,
- Args#index_query_args.ranges,
- Args#index_query_args.drilldown
- };
+ {index_query_args, Args#index_query_args.q, Args#index_query_args.limit,
+ Args#index_query_args.stale, Args#index_query_args.include_docs,
+ Args#index_query_args.bookmark, Args#index_query_args.sort, Args#index_query_args.grouping,
+ Args#index_query_args.stable};
+export(
+ #index_query_args{
+ partition = nil,
+ include_fields = nil,
+ highlight_fields = nil
+ } = Args
+) ->
+ {index_query_args, Args#index_query_args.q, Args#index_query_args.limit,
+ Args#index_query_args.stale, Args#index_query_args.include_docs,
+ Args#index_query_args.bookmark, Args#index_query_args.sort, Args#index_query_args.grouping,
+ Args#index_query_args.stable, Args#index_query_args.counts, Args#index_query_args.ranges,
+ Args#index_query_args.drilldown};
export(#index_query_args{partition = nil} = Args) ->
- {index_query_args,
- Args#index_query_args.q,
- Args#index_query_args.limit,
- Args#index_query_args.stale,
- Args#index_query_args.include_docs,
- Args#index_query_args.bookmark,
- Args#index_query_args.sort,
- Args#index_query_args.grouping,
- Args#index_query_args.stable,
- Args#index_query_args.counts,
- Args#index_query_args.ranges,
- Args#index_query_args.drilldown,
- Args#index_query_args.include_fields,
- Args#index_query_args.highlight_fields,
- Args#index_query_args.highlight_pre_tag,
- Args#index_query_args.highlight_post_tag,
- Args#index_query_args.highlight_number,
- Args#index_query_args.highlight_size,
- Args#index_query_args.raw_bookmark
- };
+ {index_query_args, Args#index_query_args.q, Args#index_query_args.limit,
+ Args#index_query_args.stale, Args#index_query_args.include_docs,
+ Args#index_query_args.bookmark, Args#index_query_args.sort, Args#index_query_args.grouping,
+ Args#index_query_args.stable, Args#index_query_args.counts, Args#index_query_args.ranges,
+ Args#index_query_args.drilldown, Args#index_query_args.include_fields,
+ Args#index_query_args.highlight_fields, Args#index_query_args.highlight_pre_tag,
+ Args#index_query_args.highlight_post_tag, Args#index_query_args.highlight_number,
+ Args#index_query_args.highlight_size, Args#index_query_args.raw_bookmark};
export(QueryArgs) ->
QueryArgs.
@@ -246,14 +244,20 @@ time(Metric, {M, F, A}) when is_list(Metric) ->
erlang:apply(M, F, A)
after
Length = timer:now_diff(os:timestamp(), Start) / 1000,
- couch_stats:update_histogram([dreyfus | Metric], Length)
+ couch_stats:update_histogram([dreyfus | Metric], Length)
end.
-in_black_list(DbName, GroupId, IndexName) when is_binary(DbName),
- is_binary(GroupId), is_binary(IndexName) ->
+in_black_list(DbName, GroupId, IndexName) when
+ is_binary(DbName),
+ is_binary(GroupId),
+ is_binary(IndexName)
+->
in_black_list(?b2l(DbName), ?b2l(GroupId), ?b2l(IndexName));
-in_black_list(DbName, GroupId, IndexName) when is_list(DbName),
- is_list(GroupId), is_list(IndexName) ->
+in_black_list(DbName, GroupId, IndexName) when
+ is_list(DbName),
+ is_list(GroupId),
+ is_list(IndexName)
+->
in_black_list(lists:flatten([DbName, ".", GroupId, ".", IndexName]));
in_black_list(_DbName, _GroupId, _IndexName) ->
false.
@@ -269,9 +273,13 @@ in_black_list(_IndexEntry) ->
maybe_deny_index(DbName, GroupId, IndexName) ->
case in_black_list(DbName, GroupId, IndexName) of
true ->
- Reason = ?l2b(io_lib:format("Index <~s, ~s, ~s>, is BlackListed",
- [?b2l(DbName), ?b2l(GroupId), ?b2l(IndexName)])),
- throw ({bad_request, Reason});
+ Reason = ?l2b(
+ io_lib:format(
+ "Index <~s, ~s, ~s>, is BlackListed",
+ [?b2l(DbName), ?b2l(GroupId), ?b2l(IndexName)]
+ )
+ ),
+ throw({bad_request, Reason});
_ ->
ok
end.
@@ -284,31 +292,43 @@ get_value_from_options(Key, Options) ->
undefined ->
Reason = binary_to_list(Key) ++ " must exist in Options.",
throw({bad_request, Reason});
- Value -> Value
+ Value ->
+ Value
end.
ensure_local_purge_docs(DbName, DDocs) ->
couch_util:with_db(DbName, fun(Db) ->
- lists:foreach(fun(DDoc) ->
- #doc{body = {Props}} = DDoc,
- case couch_util:get_value(<<"indexes">>, Props) of
- undefined -> false;
- _ ->
- try dreyfus_index:design_doc_to_indexes(DDoc) of
- SIndexes -> ensure_local_purge_doc(Db, SIndexes)
- catch _:_ ->
- ok
- end
- end
- end, DDocs)
+ lists:foreach(
+ fun(DDoc) ->
+ #doc{body = {Props}} = DDoc,
+ case couch_util:get_value(<<"indexes">>, Props) of
+ undefined ->
+ false;
+ _ ->
+ try dreyfus_index:design_doc_to_indexes(DDoc) of
+ SIndexes -> ensure_local_purge_doc(Db, SIndexes)
+ catch
+ _:_ ->
+ ok
+ end
+ end
+ end,
+ DDocs
+ )
end).
ensure_local_purge_doc(Db, SIndexes) ->
- if SIndexes =/= [] ->
- lists:map(fun(SIndex) ->
- maybe_create_local_purge_doc(Db, SIndex)
- end, SIndexes);
- true -> ok end.
+ if
+ SIndexes =/= [] ->
+ lists:map(
+ fun(SIndex) ->
+ maybe_create_local_purge_doc(Db, SIndex)
+ end,
+ SIndexes
+ );
+ true ->
+ ok
+ end.
maybe_create_local_purge_doc(Db, Index) ->
DocId = dreyfus_util:get_local_purge_doc_id(Index#index.sig),
@@ -316,7 +336,8 @@ maybe_create_local_purge_doc(Db, Index) ->
{not_found, _} ->
DbPurgeSeq = couch_db:get_purge_seq(Db),
DocContent = dreyfus_util:get_local_purge_doc_body(
- Db, DocId, DbPurgeSeq, Index),
+ Db, DocId, DbPurgeSeq, Index
+ ),
couch_db:update_doc(Db, DocContent, []);
_ ->
ok
@@ -329,7 +350,8 @@ maybe_create_local_purge_doc(Db, IndexPid, Index) ->
DbPurgeSeq = couch_db:get_purge_seq(Db),
clouseau_rpc:set_purge_seq(IndexPid, DbPurgeSeq),
DocContent = dreyfus_util:get_local_purge_doc_body(
- Db, DocId, DbPurgeSeq, Index),
+ Db, DocId, DbPurgeSeq, Index
+ ),
couch_db:update_doc(Db, DocContent, []);
_ ->
ok
@@ -341,12 +363,20 @@ get_local_purge_doc_id(Sig) ->
get_signature_from_idxdir(IdxDir) ->
IdxDirList = filename:split(IdxDir),
Sig = lists:last(IdxDirList),
- Sig2 = if not is_binary(Sig) -> Sig; true ->
- binary_to_list(Sig)
- end,
- case [Ch || Ch <- Sig2, not (((Ch >= $0) and (Ch =< $9))
- orelse ((Ch >= $a) and (Ch =< $f))
- orelse ((Ch >= $A) and (Ch =< $F)))] == [] of
+ Sig2 =
+ if
+ not is_binary(Sig) -> Sig;
+ true -> binary_to_list(Sig)
+ end,
+ case
+ [
+ Ch
+ || Ch <- Sig2,
+ not (((Ch >= $0) and (Ch =< $9)) orelse
+ ((Ch >= $a) and (Ch =< $f)) orelse
+ ((Ch >= $A) and (Ch =< $F)))
+ ] == []
+ of
true -> Sig;
false -> undefined
end.
@@ -359,37 +389,43 @@ get_local_purge_doc_body(_, LocalDocId, PurgeSeq, Index) ->
} = Index,
{Mega, Secs, _} = os:timestamp(),
NowSecs = Mega * 1000000 + Secs,
- JsonList = {[
- {<<"_id">>, LocalDocId},
- {<<"purge_seq">>, PurgeSeq},
- {<<"updated_on">>, NowSecs},
- {<<"indexname">>, IdxName},
- {<<"ddoc_id">>, DDocId},
- {<<"signature">>, Sig},
- {<<"type">>, <<"dreyfus">>}
- ]},
+ JsonList =
+ {[
+ {<<"_id">>, LocalDocId},
+ {<<"purge_seq">>, PurgeSeq},
+ {<<"updated_on">>, NowSecs},
+ {<<"indexname">>, IdxName},
+ {<<"ddoc_id">>, DDocId},
+ {<<"signature">>, Sig},
+ {<<"type">>, <<"dreyfus">>}
+ ]},
couch_doc:from_json_obj(JsonList).
verify_index_exists(DbName, Props) ->
try
Type = couch_util:get_value(<<"type">>, Props),
- if Type =/= <<"dreyfus">> -> false; true ->
- DDocId = couch_util:get_value(<<"ddoc_id">>, Props),
- IndexName = couch_util:get_value(<<"indexname">>, Props),
- Sig = couch_util:get_value(<<"signature">>, Props),
- couch_util:with_db(DbName, fun(Db) ->
- case couch_db:get_design_doc(Db, DDocId) of
- {ok, #doc{} = DDoc} ->
- {ok, IdxState} = dreyfus_index:design_doc_to_index(
- DDoc, IndexName),
- IdxState#index.sig == Sig;
- {not_found, _} ->
- false
- end
- end)
+ if
+ Type =/= <<"dreyfus">> ->
+ false;
+ true ->
+ DDocId = couch_util:get_value(<<"ddoc_id">>, Props),
+ IndexName = couch_util:get_value(<<"indexname">>, Props),
+ Sig = couch_util:get_value(<<"signature">>, Props),
+ couch_util:with_db(DbName, fun(Db) ->
+ case couch_db:get_design_doc(Db, DDocId) of
+ {ok, #doc{} = DDoc} ->
+ {ok, IdxState} = dreyfus_index:design_doc_to_index(
+ DDoc, IndexName
+ ),
+ IdxState#index.sig == Sig;
+ {not_found, _} ->
+ false
+ end
+ end)
end
- catch _:_ ->
- false
+ catch
+ _:_ ->
+ false
end.
-ifdef(TEST).
@@ -405,29 +441,36 @@ empty_test() ->
?assertEqual([], ?SORT([], [])).
primary_asc_test() ->
- ?assertMatch([#sortable{order=[1]}, #sortable{order=[2]}],
- ?SORT([?ASC], [#sortable{order=[2]}, #sortable{order=[1]}])).
+ ?assertMatch(
+ [#sortable{order = [1]}, #sortable{order = [2]}],
+ ?SORT([?ASC], [#sortable{order = [2]}, #sortable{order = [1]}])
+ ).
primary_desc_test() ->
- ?assertMatch([#sortable{order=[2]}, #sortable{order=[1]}],
- ?SORT([?DESC], [#sortable{order=[1]}, #sortable{order=[2]}])).
+ ?assertMatch(
+ [#sortable{order = [2]}, #sortable{order = [1]}],
+ ?SORT([?DESC], [#sortable{order = [1]}, #sortable{order = [2]}])
+ ).
secondary_asc_test() ->
- ?assertMatch([#sortable{order=[1, 1]}, #sortable{order=[1, 2]}],
- ?SORT([?ASC, ?ASC], [#sortable{order=[1, 2]}, #sortable{order=[1, 1]}])).
+ ?assertMatch(
+ [#sortable{order = [1, 1]}, #sortable{order = [1, 2]}],
+ ?SORT([?ASC, ?ASC], [#sortable{order = [1, 2]}, #sortable{order = [1, 1]}])
+ ).
secondary_desc_test() ->
- ?assertMatch([#sortable{order=[1, 2]}, #sortable{order=[1, 1]}],
- ?SORT([?DESC, ?DESC], [#sortable{order=[1, 1]}, #sortable{order=[1, 2]}])).
+ ?assertMatch(
+ [#sortable{order = [1, 2]}, #sortable{order = [1, 1]}],
+ ?SORT([?DESC, ?DESC], [#sortable{order = [1, 1]}, #sortable{order = [1, 2]}])
+ ).
stash_test() ->
- {Stashed, Stash} = stash_items([#sortable{order=foo, item=bar}]),
+ {Stashed, Stash} = stash_items([#sortable{order = foo, item = bar}]),
First = hd(Stashed),
?assert(is_reference(First#sortable.item)),
Unstashed = hd(unstash_items(Stashed, Stash)),
?assertEqual(Unstashed#sortable.item, bar).
-
ring_opts_test() ->
Shards = [#shard{name = foo, ref = make_ref()}],
@@ -435,7 +478,9 @@ ring_opts_test() ->
?assertEqual([], get_ring_opts(QArgs1, Shards)),
QArgs2 = #index_query_args{partition = <<"x">>},
- ?assertMatch([{any, [#shard{name = foo, ref = undefined}]}],
- get_ring_opts(QArgs2, Shards)).
+ ?assertMatch(
+ [{any, [#shard{name = foo, ref = undefined}]}],
+ get_ring_opts(QArgs2, Shards)
+ ).
-endif.
diff --git a/src/dreyfus/test/dreyfus_blacklist_await_test.erl b/src/dreyfus/test/dreyfus_blacklist_await_test.erl
index 82665eb02..de3d629e5 100644
--- a/src/dreyfus/test/dreyfus_blacklist_await_test.erl
+++ b/src/dreyfus/test/dreyfus_blacklist_await_test.erl
@@ -41,10 +41,12 @@ dreyfus_blacklist_await_test_() ->
"dreyfus black_list_doc await tests",
{
setup,
- fun start/0, fun stop/1,
+ fun start/0,
+ fun stop/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun do_not_await_1/0
]
@@ -54,16 +56,24 @@ dreyfus_blacklist_await_test_() ->
do_not_await_1() ->
ok = meck:new(dreyfus_index, [passthrough]),
- Denied = lists:flatten([?b2l(?DBNAME), ".", "black_list_doc", ".",
- "my_index"]),
+ Denied = lists:flatten([
+ ?b2l(?DBNAME),
+ ".",
+ "black_list_doc",
+ ".",
+ "my_index"
+ ]),
config:set("dreyfus_blacklist", Denied, "true"),
dreyfus_test_util:wait_config_change(Denied, "true"),
- Index = #index{dbname=?DBNAME, name=?INDEX_NAME, ddoc_id=?DDOC_ID},
+ Index = #index{dbname = ?DBNAME, name = ?INDEX_NAME, ddoc_id = ?DDOC_ID},
State = create_state(?DBNAME, Index, nil, nil, []),
Msg = "Index Blocked from Updating - db: ~p, ddocid: ~p name: ~p",
Return = wait_log_message(Msg, fun() ->
- {noreply, _NewState} = dreyfus_index:handle_call({await, 1},
- self(), State)
+ {noreply, _NewState} = dreyfus_index:handle_call(
+ {await, 1},
+ self(),
+ State
+ )
end),
?assertEqual(Return, ok).
diff --git a/src/dreyfus/test/dreyfus_blacklist_request_test.erl b/src/dreyfus/test/dreyfus_blacklist_request_test.erl
index 8e5598ae1..3e466327d 100644
--- a/src/dreyfus/test/dreyfus_blacklist_request_test.erl
+++ b/src/dreyfus/test/dreyfus_blacklist_request_test.erl
@@ -41,10 +41,12 @@ dreyfus_blacklist_request_test_() ->
"dreyfus blacklist request tests",
{
setup,
- fun start/0, fun stop/1,
+ fun start/0,
+ fun stop/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun deny_fabric_requests/0,
fun allow_fabric_request/0
@@ -61,29 +63,88 @@ deny_fabric_requests() ->
Denied = "mydb.myddocid.myindexname",
config:set("dreyfus_blacklist", Denied, "true"),
dreyfus_test_util:wait_config_change(Denied, "true"),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_search:go(<<"mydb">>,
- <<"myddocid">>, <<"myindexname">>, QueryArgs)),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_group1:go(<<"mydb">>,
- <<"myddocid">>, <<"myindexname">>, QueryArgs)),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_group2:go(<<"mydb">>,
- <<"myddocid">>, <<"myindexname">>, QueryArgs)),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_info:go(<<"mydb">>,
- <<"myddocid">>, <<"myindexname">>, QueryArgs)),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_search:go(<<"mydb">>,
- DDoc, <<"myindexname">>, IndexQueryArgs)),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_group1:go(<<"mydb">>,
- DDoc, <<"myindexname">>, IndexQueryArgs)),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_group2:go(<<"mydb">>,
- DDoc, <<"myindexname">>, IndexQueryArgs)),
- ?assertThrow({bad_request, Reason}, dreyfus_fabric_info:go(<<"mydb">>,
- DDoc, <<"myindexname">>, IndexQueryArgs)).
+ ?assertThrow(
+ {bad_request, Reason},
+ dreyfus_fabric_search:go(
+ <<"mydb">>,
+ <<"myddocid">>,
+ <<"myindexname">>,
+ QueryArgs
+ )
+ ),
+ ?assertThrow(
+ {bad_request, Reason},
+ dreyfus_fabric_group1:go(
+ <<"mydb">>,
+ <<"myddocid">>,
+ <<"myindexname">>,
+ QueryArgs
+ )
+ ),
+ ?assertThrow(
+ {bad_request, Reason},
+ dreyfus_fabric_group2:go(
+ <<"mydb">>,
+ <<"myddocid">>,
+ <<"myindexname">>,
+ QueryArgs
+ )
+ ),
+ ?assertThrow(
+ {bad_request, Reason},
+ dreyfus_fabric_info:go(
+ <<"mydb">>,
+ <<"myddocid">>,
+ <<"myindexname">>,
+ QueryArgs
+ )
+ ),
+ ?assertThrow(
+ {bad_request, Reason},
+ dreyfus_fabric_search:go(
+ <<"mydb">>,
+ DDoc,
+ <<"myindexname">>,
+ IndexQueryArgs
+ )
+ ),
+ ?assertThrow(
+ {bad_request, Reason},
+ dreyfus_fabric_group1:go(
+ <<"mydb">>,
+ DDoc,
+ <<"myindexname">>,
+ IndexQueryArgs
+ )
+ ),
+ ?assertThrow(
+ {bad_request, Reason},
+ dreyfus_fabric_group2:go(
+ <<"mydb">>,
+ DDoc,
+ <<"myindexname">>,
+ IndexQueryArgs
+ )
+ ),
+ ?assertThrow(
+ {bad_request, Reason},
+ dreyfus_fabric_info:go(
+ <<"mydb">>,
+ DDoc,
+ <<"myindexname">>,
+ IndexQueryArgs
+ )
+ ).
allow_fabric_request() ->
ok = meck:new(dreyfus_fabric_search, [passthrough]),
- ok = meck:expect(dreyfus_fabric_search, go,
- fun(A, GroupId, B, C) when is_binary(GroupId) ->
+ ok = meck:expect(
+ dreyfus_fabric_search,
+ go,
+ fun(A, GroupId, B, C) when is_binary(GroupId) ->
meck:passthrough([A, GroupId, B, C])
- end),
+ end
+ ),
ok = meck:expect(dreyfus_fabric_search, go, fun(_, _, _, _) ->
ok
end),
@@ -91,6 +152,13 @@ allow_fabric_request() ->
QueryArgs = #index_query_args{},
config:set("dreyfus_blacklist", Denied, "true"),
dreyfus_test_util:wait_config_change(Denied, "true"),
- ?assertEqual(ok, dreyfus_fabric_search:go(<<"mydb">>,
- <<"myddocid">>, <<"indexnotthere">>, QueryArgs)),
+ ?assertEqual(
+ ok,
+ dreyfus_fabric_search:go(
+ <<"mydb">>,
+ <<"myddocid">>,
+ <<"indexnotthere">>,
+ QueryArgs
+ )
+ ),
ok = meck:unload(dreyfus_fabric_search).
diff --git a/src/dreyfus/test/dreyfus_config_test.erl b/src/dreyfus/test/dreyfus_config_test.erl
index 775e49d7f..9ae0e56e0 100644
--- a/src/dreyfus/test/dreyfus_config_test.erl
+++ b/src/dreyfus/test/dreyfus_config_test.erl
@@ -12,13 +12,11 @@
-module(dreyfus_config_test).
-
-include_lib("couch_log/include/couch_log.hrl").
-include_lib("eunit/include/eunit.hrl").
-define(TIMEOUT, 1000).
-
start() ->
test_util:start_couch([dreyfus]).
@@ -33,10 +31,12 @@ dreyfus_config_test_() ->
"dreyfus config tests",
{
setup,
- fun start/0, fun test_util:stop_couch/1,
+ fun start/0,
+ fun test_util:stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun check_black_list/0,
fun check_delete_from_blacklist/0
@@ -54,9 +54,12 @@ check_black_list() ->
ok = config:set("dreyfus_blacklist", Index3, "true"),
dreyfus_test_util:wait_config_change(Index3, "true"),
FinalBl = [Index3, Index2, Index],
- lists:foreach(fun (I) ->
- ?assertEqual("true", dreyfus_config:get(I))
- end, FinalBl).
+ lists:foreach(
+ fun(I) ->
+ ?assertEqual("true", dreyfus_config:get(I))
+ end,
+ FinalBl
+ ).
check_delete_from_blacklist() ->
Index = "mydb.myddocid.myindexname",
diff --git a/src/dreyfus/test/dreyfus_purge_test.erl b/src/dreyfus/test/dreyfus_purge_test.erl
index 9b24d6f64..e64a046a7 100644
--- a/src/dreyfus/test/dreyfus_purge_test.erl
+++ b/src/dreyfus/test/dreyfus_purge_test.erl
@@ -17,19 +17,33 @@
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("mem3/include/mem3.hrl").
-
--export([test_purge_single/0, test_purge_multiple/0, test_purge_multiple2/0,
- test_purge_conflict/0, test_purge_conflict2/0, test_purge_conflict3/0, test_purge_conflict4/0,
- test_purge_update/0, test_purge_update2/0,
- test_delete/0, test_delete_purge_conflict/0, test_delete_conflict/0,
- test_all/0]).
--export([test_verify_index_exists1/0, test_verify_index_exists2/0, test_verify_index_exists_failed/0,
- test_local_doc/0, test_delete_local_doc/0, test_purge_search/0]).
+-export([
+ test_purge_single/0,
+ test_purge_multiple/0,
+ test_purge_multiple2/0,
+ test_purge_conflict/0,
+ test_purge_conflict2/0,
+ test_purge_conflict3/0,
+ test_purge_conflict4/0,
+ test_purge_update/0,
+ test_purge_update2/0,
+ test_delete/0,
+ test_delete_purge_conflict/0,
+ test_delete_conflict/0,
+ test_all/0
+]).
+-export([
+ test_verify_index_exists1/0,
+ test_verify_index_exists2/0,
+ test_verify_index_exists_failed/0,
+ test_local_doc/0,
+ test_delete_local_doc/0,
+ test_purge_search/0
+]).
-compile(export_all).
-compile(nowarn_export_all).
-
test_all() ->
test_purge_single(),
test_purge_multiple(),
@@ -75,8 +89,13 @@ test_purge_multiple() ->
?assertEqual(HitCount1, 5),
%purge 5 docs
- purge_docs(DbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, <<"haw">>,
- <<"strawberry">>]),
+ purge_docs(DbName, [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ]),
%second search request
{ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, Query),
@@ -130,36 +149,64 @@ test_purge_conflict() ->
%first search
{ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(5, RedHitCount1 + GreenHitCount1),
%do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
- SourceDbName, TargetDbName), [?ADMIN_CTX]),
+ {ok, _} = fabric:update_doc(
+ <<"_replicator">>,
+ make_replicate_doc(
+ SourceDbName, TargetDbName
+ ),
+ [?ADMIN_CTX]
+ ),
%%check doc version
- wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>], 2, 5),
+ wait_for_replicate(
+ TargetDbName,
+ [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ],
+ 2,
+ 5
+ ),
%second search
{ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(5, RedHitCount2 + GreenHitCount2),
- purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, <<"haw">>,
- <<"strawberry">>]),
+ purge_docs(TargetDbName, [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ]),
%third search
- {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(TargetDbName,
- <<"color:red">>),
- {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(TargetDbName,
- <<"color:green">>),
+ {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
+ TargetDbName,
+ <<"color:red">>
+ ),
+ {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
+ TargetDbName,
+ <<"color:green">>
+ ),
?assertEqual(5, RedHitCount3 + GreenHitCount3),
?assertEqual(RedHitCount2, GreenHitCount3),
@@ -179,38 +226,71 @@ test_purge_conflict2() ->
create_db_docs(TargetDbName, <<"green">>),
%first search
- {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(TargetDbName,
- <<"color:red">>),
- {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(TargetDbName,
- <<"color:green">>),
+ {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
+ TargetDbName,
+ <<"color:red">>
+ ),
+ {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
+ TargetDbName,
+ <<"color:green">>
+ ),
?assertEqual(5, RedHitCount1 + GreenHitCount1),
%do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
- SourceDbName, TargetDbName), [?ADMIN_CTX]),
+ {ok, _} = fabric:update_doc(
+ <<"_replicator">>,
+ make_replicate_doc(
+ SourceDbName, TargetDbName
+ ),
+ [?ADMIN_CTX]
+ ),
- wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>], 2, 5),
+ wait_for_replicate(
+ TargetDbName,
+ [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ],
+ 2,
+ 5
+ ),
%second search
{ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(5, RedHitCount2 + GreenHitCount2),
- purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>]),
- purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>]),
+ purge_docs(TargetDbName, [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ]),
+ purge_docs(TargetDbName, [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ]),
%third search
{ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(0, RedHitCount3 + GreenHitCount3),
@@ -218,7 +298,6 @@ test_purge_conflict2() ->
delete_db(TargetDbName),
ok.
-
test_purge_conflict3() ->
%create dbs and docs
SourceDbName = db_name(),
@@ -230,47 +309,80 @@ test_purge_conflict3() ->
%first search
{ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(5, RedHitCount1 + GreenHitCount1),
%do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
- SourceDbName, TargetDbName), [?ADMIN_CTX]),
+ {ok, _} = fabric:update_doc(
+ <<"_replicator">>,
+ make_replicate_doc(
+ SourceDbName, TargetDbName
+ ),
+ [?ADMIN_CTX]
+ ),
%%check doc version
- wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>], 2, 5),
+ wait_for_replicate(
+ TargetDbName,
+ [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ],
+ 2,
+ 5
+ ),
%second search
{ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(5, RedHitCount2 + GreenHitCount2),
- purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>]),
+ purge_docs(TargetDbName, [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ]),
%third search
{ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(5, RedHitCount3 + GreenHitCount3),
?assertEqual(RedHitCount2, GreenHitCount3),
?assertEqual(GreenHitCount2, RedHitCount3),
- purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>]),
+ purge_docs(TargetDbName, [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ]),
{ok, _, RedHitCount4, _, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount4, _, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(0, RedHitCount4 + GreenHitCount4),
@@ -289,36 +401,62 @@ test_purge_conflict4() ->
%first search
{ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(5, RedHitCount1 + GreenHitCount1),
%do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
- SourceDbName, TargetDbName), [?ADMIN_CTX]),
+ {ok, _} = fabric:update_doc(
+ <<"_replicator">>,
+ make_replicate_doc(
+ SourceDbName, TargetDbName
+ ),
+ [?ADMIN_CTX]
+ ),
%%check doc version
- wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>], 2, 5),
+ wait_for_replicate(
+ TargetDbName,
+ [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ],
+ 2,
+ 5
+ ),
%second search
{ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(5, RedHitCount2 + GreenHitCount2),
- purge_docs_with_all_revs(TargetDbName, [<<"apple">>, <<"tomato">>,
- <<"cherry">>, <<"haw">>, <<"strawberry">>]),
+ purge_docs_with_all_revs(TargetDbName, [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ]),
%third search
{ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(0, RedHitCount3 + GreenHitCount3),
@@ -341,12 +479,14 @@ test_purge_update() ->
%update doc
Rev = get_rev(DbName, <<"apple">>),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"apple">>},
- {<<"_rev">>, couch_doc:rev_to_str(Rev)},
- {<<"color">>, <<"green">>},
- {<<"size">>, 8}
- ]}),
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"apple">>},
+ {<<"_rev">>, couch_doc:rev_to_str(Rev)},
+ {<<"color">>, <<"green">>},
+ {<<"size">>, 8}
+ ]}
+ ),
{ok, _} = fabric:update_docs(DbName, [Doc], [?ADMIN_CTX]),
%second search request
@@ -433,36 +573,62 @@ test_delete_conflict() ->
%first search
{ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(5, RedHitCount1 + GreenHitCount1),
%do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
- SourceDbName, TargetDbName), [?ADMIN_CTX]),
+ {ok, _} = fabric:update_doc(
+ <<"_replicator">>,
+ make_replicate_doc(
+ SourceDbName, TargetDbName
+ ),
+ [?ADMIN_CTX]
+ ),
- wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>], 2, 5),
+ wait_for_replicate(
+ TargetDbName,
+ [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ],
+ 2,
+ 5
+ ),
%second search
{ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(5, RedHitCount2 + GreenHitCount2),
%delete docs
- delete_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>]),
+ delete_docs(TargetDbName, [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ]),
%third search
{ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(5, RedHitCount3 + GreenHitCount3),
?assertEqual(RedHitCount2, GreenHitCount3),
@@ -483,40 +649,71 @@ test_delete_purge_conflict() ->
%first search
{ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(5, RedHitCount1 + GreenHitCount1),
%do replicate and make conflicted docs
- {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc(
- SourceDbName, TargetDbName), [?ADMIN_CTX]),
+ {ok, _} = fabric:update_doc(
+ <<"_replicator">>,
+ make_replicate_doc(
+ SourceDbName, TargetDbName
+ ),
+ [?ADMIN_CTX]
+ ),
- wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>], 2, 5),
+ wait_for_replicate(
+ TargetDbName,
+ [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ],
+ 2,
+ 5
+ ),
%second search
{ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(5, RedHitCount2 + GreenHitCount2),
%purge docs
- purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>]),
+ purge_docs(TargetDbName, [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ]),
%delete docs
- delete_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"haw">>, <<"strawberry">>]),
+ delete_docs(TargetDbName, [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"haw">>,
+ <<"strawberry">>
+ ]),
%third search
{ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:red">>),
+ TargetDbName, <<"color:red">>
+ ),
{ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(
- TargetDbName, <<"color:green">>),
+ TargetDbName, <<"color:green">>
+ ),
?assertEqual(RedHitCount3, 0),
?assertEqual(GreenHitCount3, 0),
@@ -533,26 +730,33 @@ test_local_doc() ->
{ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>),
?assertEqual(HitCount1, 1),
- purge_docs(DbName, [<<"apple">>, <<"tomato">>, <<"cherry">>,
- <<"strawberry">>]),
+ purge_docs(DbName, [
+ <<"apple">>,
+ <<"tomato">>,
+ <<"cherry">>,
+ <<"strawberry">>
+ ]),
{ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>),
?assertEqual(HitCount2, 0),
%get local doc
- [Sig|_] = get_sigs(DbName),
+ [Sig | _] = get_sigs(DbName),
LocalId = dreyfus_util:get_local_purge_doc_id(Sig),
LocalShards = mem3:local_shards(DbName),
- PurgeSeqs = lists:map(fun(Shard) ->
- {ok, Db} = couch_db:open_int(Shard#shard.name, [?ADMIN_CTX]),
- {ok, LDoc} = couch_db:open_doc(Db, LocalId, []),
- {Props} = couch_doc:to_json_obj(LDoc, []),
- dreyfus_util:get_value_from_options(<<"updated_on">>, Props),
- PurgeSeq = dreyfus_util:get_value_from_options(<<"purge_seq">>, Props),
- Type = dreyfus_util:get_value_from_options(<<"type">>, Props),
- ?assertEqual(<<"dreyfus">>, Type),
- couch_db:close(Db),
- PurgeSeq
- end, LocalShards),
+ PurgeSeqs = lists:map(
+ fun(Shard) ->
+ {ok, Db} = couch_db:open_int(Shard#shard.name, [?ADMIN_CTX]),
+ {ok, LDoc} = couch_db:open_doc(Db, LocalId, []),
+ {Props} = couch_doc:to_json_obj(LDoc, []),
+ dreyfus_util:get_value_from_options(<<"updated_on">>, Props),
+ PurgeSeq = dreyfus_util:get_value_from_options(<<"purge_seq">>, Props),
+ Type = dreyfus_util:get_value_from_options(<<"type">>, Props),
+ ?assertEqual(<<"dreyfus">>, Type),
+ couch_db:close(Db),
+ PurgeSeq
+ end,
+ LocalShards
+ ),
?assertEqual(lists:sum(PurgeSeqs), 4),
delete_db(DbName),
@@ -571,11 +775,14 @@ test_verify_index_exists1() ->
?assertEqual(HitCount2, 0),
ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
- [ShardDbName | _Rest ] = ShardNames,
+ [ShardDbName | _Rest] = ShardNames,
{ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
- {ok, LDoc} = couch_db:open_doc(Db,
+ {ok, LDoc} = couch_db:open_doc(
+ Db,
dreyfus_util:get_local_purge_doc_id(
- <<"49e82c2a910b1046b55cc45ad058a7ee">>), []
+ <<"49e82c2a910b1046b55cc45ad058a7ee">>
+ ),
+ []
),
#doc{body = {Props}} = LDoc,
?assertEqual(true, dreyfus_util:verify_index_exists(ShardDbName, Props)),
@@ -590,11 +797,14 @@ test_verify_index_exists2() ->
?assertEqual(HitCount1, 1),
ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
- [ShardDbName | _Rest ] = ShardNames,
+ [ShardDbName | _Rest] = ShardNames,
{ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
- {ok, LDoc} = couch_db:open_doc(Db,
+ {ok, LDoc} = couch_db:open_doc(
+ Db,
dreyfus_util:get_local_purge_doc_id(
- <<"49e82c2a910b1046b55cc45ad058a7ee">>), []
+ <<"49e82c2a910b1046b55cc45ad058a7ee">>
+ ),
+ []
),
#doc{body = {Props}} = LDoc,
?assertEqual(true, dreyfus_util:verify_index_exists(ShardDbName, Props)),
@@ -610,55 +820,63 @@ test_verify_index_exists_failed() ->
?assertEqual(HitCount1, 1),
ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
- [ShardDbName | _Rest ] = ShardNames,
+ [ShardDbName | _Rest] = ShardNames,
{ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
- {ok, LDoc} = couch_db:open_doc(Db,
+ {ok, LDoc} = couch_db:open_doc(
+ Db,
dreyfus_util:get_local_purge_doc_id(
- <<"49e82c2a910b1046b55cc45ad058a7ee">>), []
+ <<"49e82c2a910b1046b55cc45ad058a7ee">>
+ ),
+ []
),
#doc{body = {Options}} = LDoc,
OptionsDbErr = [
- {<<"indexname">>,
- dreyfus_util:get_value_from_options(<<"indexname">>, Options)},
- {<<"ddoc_id">>,
- dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)},
- {<<"signature">>,
- dreyfus_util:get_value_from_options(<<"signature">>, Options)}
+ {<<"indexname">>, dreyfus_util:get_value_from_options(<<"indexname">>, Options)},
+ {<<"ddoc_id">>, dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)},
+ {<<"signature">>, dreyfus_util:get_value_from_options(<<"signature">>, Options)}
],
- ?assertEqual(false, dreyfus_util:verify_index_exists(
- ShardDbName, OptionsDbErr)),
+ ?assertEqual(
+ false,
+ dreyfus_util:verify_index_exists(
+ ShardDbName, OptionsDbErr
+ )
+ ),
OptionsIdxErr = [
{<<"indexname">>, <<"someindex">>},
- {<<"ddoc_id">>,
- dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)},
- {<<"signature">>,
- dreyfus_util:get_value_from_options(<<"signature">>, Options)}
+ {<<"ddoc_id">>, dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)},
+ {<<"signature">>, dreyfus_util:get_value_from_options(<<"signature">>, Options)}
],
- ?assertEqual(false, dreyfus_util:verify_index_exists(
- ShardDbName, OptionsIdxErr)),
+ ?assertEqual(
+ false,
+ dreyfus_util:verify_index_exists(
+ ShardDbName, OptionsIdxErr
+ )
+ ),
OptionsDDocErr = [
- {<<"indexname">>,
- dreyfus_util:get_value_from_options(<<"indexname">>, Options)},
- {<<"ddoc_id">>,
- <<"somedesigndoc">>},
- {<<"signature">>,
- dreyfus_util:get_value_from_options(<<"signature">>, Options)}
+ {<<"indexname">>, dreyfus_util:get_value_from_options(<<"indexname">>, Options)},
+ {<<"ddoc_id">>, <<"somedesigndoc">>},
+ {<<"signature">>, dreyfus_util:get_value_from_options(<<"signature">>, Options)}
],
- ?assertEqual(false, dreyfus_util:verify_index_exists(
- ShardDbName, OptionsDDocErr)),
+ ?assertEqual(
+ false,
+ dreyfus_util:verify_index_exists(
+ ShardDbName, OptionsDDocErr
+ )
+ ),
OptionsSigErr = [
- {<<"indexname">>,
- dreyfus_util:get_value_from_options(<<"indexname">>, Options)},
- {<<"ddoc_id">>,
- dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)},
- {<<"signature">>,
- <<"12345678901234567890123456789012">>}
+ {<<"indexname">>, dreyfus_util:get_value_from_options(<<"indexname">>, Options)},
+ {<<"ddoc_id">>, dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)},
+ {<<"signature">>, <<"12345678901234567890123456789012">>}
],
- ?assertEqual(false, dreyfus_util:verify_index_exists(
- ShardDbName, OptionsSigErr)),
+ ?assertEqual(
+ false,
+ dreyfus_util:verify_index_exists(
+ ShardDbName, OptionsSigErr
+ )
+ ),
delete_db(DbName),
ok.
@@ -676,9 +894,10 @@ test_delete_local_doc() ->
?assertEqual(HitCount2, 0),
LDocId = dreyfus_util:get_local_purge_doc_id(
- <<"49e82c2a910b1046b55cc45ad058a7ee">>),
+ <<"49e82c2a910b1046b55cc45ad058a7ee">>
+ ),
ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)],
- [ShardDbName | _Rest ] = ShardNames,
+ [ShardDbName | _Rest] = ShardNames,
{ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
{ok, _} = couch_db:open_doc(Db, LDocId, []),
@@ -686,7 +905,6 @@ test_delete_local_doc() ->
io:format("DbName ~p~n", [DbName]),
?debugFmt("Converting ... ~n~p~n", [DbName]),
-
dreyfus_fabric_cleanup:go(DbName),
{ok, Db2} = couch_db:open(ShardDbName, [?ADMIN_CTX]),
{not_found, _} = couch_db:open_doc(Db2, LDocId, []),
@@ -706,8 +924,10 @@ test_purge_search() ->
%private API
db_name() ->
iolist_to_binary([
- "dreyfus-test-db-", [
- integer_to_list(I) || I <- [
+ "dreyfus-test-db-",
+ [
+ integer_to_list(I)
+ || I <- [
erlang:unique_integer([positive]),
rand:uniform(10000)
]
@@ -751,11 +971,13 @@ make_docs(Count, Color) ->
[make_doc(I, Color) || I <- lists:seq(1, Count)].
make_doc(Id, Color) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, get_value(Id)},
- {<<"color">>, Color},
- {<<"size">>, 1}
- ]}).
+ couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, get_value(Id)},
+ {<<"color">>, Color},
+ {<<"size">>, 1}
+ ]}
+ ).
get_value(Key) ->
case Key of
@@ -772,34 +994,43 @@ get_value(Key) ->
end.
make_design_doc(dreyfus) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, <<"_design/search">>},
- {<<"language">>, <<"javascript">>},
- {<<"indexes">>, {[
- {<<"index">>, {[
- {<<"analyzer">>, <<"standard">>},
- {<<"index">>, <<
- "function (doc) { \n"
- " index(\"default\", doc._id);\n"
- " if(doc.color) {\n"
- " index(\"color\", doc.color);\n"
- " }\n"
- " if(doc.size) {\n"
- " index(\"size\", doc.size);\n"
- " }\n"
- "}"
- >>}
- ]}}
- ]}}
- ]}).
+ couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"_design/search">>},
+ {<<"language">>, <<"javascript">>},
+ {<<"indexes">>,
+ {[
+ {<<"index">>,
+ {[
+ {<<"analyzer">>, <<"standard">>},
+ {<<"index">>, <<
+ "function (doc) { \n"
+ " index(\"default\", doc._id);\n"
+ " if(doc.color) {\n"
+ " index(\"color\", doc.color);\n"
+ " }\n"
+ " if(doc.size) {\n"
+ " index(\"size\", doc.size);\n"
+ " }\n"
+ "}"
+ >>}
+ ]}}
+ ]}}
+ ]}
+ ).
make_replicate_doc(SourceDbName, TargetDbName) ->
- couch_doc:from_json_obj({[
- {<<"_id">>, list_to_binary("replicate_fm_" ++
- binary_to_list(SourceDbName) ++ "_to_" ++ binary_to_list(TargetDbName))},
- {<<"source">>, list_to_binary("http://localhost:15984/" ++ SourceDbName)},
- {<<"target">>, list_to_binary("http://localhost:15984/" ++ TargetDbName)}
- ]}).
+ couch_doc:from_json_obj(
+ {[
+ {<<"_id">>,
+ list_to_binary(
+ "replicate_fm_" ++
+ binary_to_list(SourceDbName) ++ "_to_" ++ binary_to_list(TargetDbName)
+ )},
+ {<<"source">>, list_to_binary("http://localhost:15984/" ++ SourceDbName)},
+ {<<"target">>, list_to_binary("http://localhost:15984/" ++ TargetDbName)}
+ ]}
+ ).
get_rev(DbName, DocId) ->
FDI = fabric:get_full_doc_info(DbName, DocId, []),
@@ -808,20 +1039,22 @@ get_rev(DbName, DocId) ->
get_revs(DbName, DocId) ->
FDI = fabric:get_full_doc_info(DbName, DocId, []),
- #doc_info{ revs = Revs } = couch_doc:to_doc_info(FDI),
+ #doc_info{revs = Revs} = couch_doc:to_doc_info(FDI),
[Rev#rev_info.rev || Rev <- Revs].
update_doc(_, _, 0) ->
ok;
update_doc(DbName, DocId, Times) ->
Rev = get_rev(DbName, DocId),
- Doc = couch_doc:from_json_obj({[
- {<<"_id">>, <<"apple">>},
- {<<"_rev">>, couch_doc:rev_to_str(Rev)},
- {<<"size">>, 1001 - Times}
- ]}),
+ Doc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, <<"apple">>},
+ {<<"_rev">>, couch_doc:rev_to_str(Rev)},
+ {<<"size">>, 1001 - Times}
+ ]}
+ ),
{ok, _} = fabric:update_docs(DbName, [Doc], [?ADMIN_CTX]),
- update_doc(DbName, DocId, Times-1).
+ update_doc(DbName, DocId, Times - 1).
delete_docs(DbName, DocIds) ->
lists:foreach(
@@ -831,43 +1064,55 @@ delete_docs(DbName, DocIds) ->
delete_doc(DbName, DocId) ->
Rev = get_rev(DbName, DocId),
- DDoc = couch_doc:from_json_obj({[
- {<<"_id">>, DocId},
- {<<"_rev">>, couch_doc:rev_to_str(Rev)},
- {<<"_deleted">>, true}
- ]}),
+ DDoc = couch_doc:from_json_obj(
+ {[
+ {<<"_id">>, DocId},
+ {<<"_rev">>, couch_doc:rev_to_str(Rev)},
+ {<<"_deleted">>, true}
+ ]}
+ ),
{ok, _} = fabric:update_doc(DbName, DDoc, [?ADMIN_CTX]),
ok.
wait_for_replicate(_, _, _, 0) ->
couch_log:notice("[~p] wait time out", [?MODULE]),
ok;
-wait_for_replicate(DbName, DocIds, ExpectRevCount ,TimeOut)
- when is_list(DocIds) ->
- [wait_for_replicate(DbName, DocId, ExpectRevCount ,TimeOut) || DocId <- DocIds];
-wait_for_replicate(DbName, DocId, ExpectRevCount ,TimeOut) ->
+wait_for_replicate(DbName, DocIds, ExpectRevCount, TimeOut) when
+ is_list(DocIds)
+->
+ [wait_for_replicate(DbName, DocId, ExpectRevCount, TimeOut) || DocId <- DocIds];
+wait_for_replicate(DbName, DocId, ExpectRevCount, TimeOut) ->
FDI = fabric:get_full_doc_info(DbName, DocId, []),
- #doc_info{ revs = Revs } = couch_doc:to_doc_info(FDI),
+ #doc_info{revs = Revs} = couch_doc:to_doc_info(FDI),
case erlang:length(Revs) of
ExpectRevCount ->
- couch_log:notice("[~p] wait end by expect, time used:~p, DocId:~p",
- [?MODULE, 5-TimeOut, DocId]),
+ couch_log:notice(
+ "[~p] wait end by expect, time used:~p, DocId:~p",
+ [?MODULE, 5 - TimeOut, DocId]
+ ),
ok;
true ->
timer:sleep(1000),
- wait_for_replicate(DbName, DocId, ExpectRevCount ,TimeOut-1)
+ wait_for_replicate(DbName, DocId, ExpectRevCount, TimeOut - 1)
end,
ok.
get_sigs(DbName) ->
{ok, DesignDocs} = fabric:design_docs(DbName),
- lists:usort(lists:flatmap(fun active_sigs/1,
- [couch_doc:from_json_obj(DD) || DD <- DesignDocs])).
+ lists:usort(
+ lists:flatmap(
+ fun active_sigs/1,
+ [couch_doc:from_json_obj(DD) || DD <- DesignDocs]
+ )
+ ).
-active_sigs(#doc{body={Fields}}=Doc) ->
+active_sigs(#doc{body = {Fields}} = Doc) ->
{RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}),
{IndexNames, _} = lists:unzip(RawIndexes),
- [begin
- {ok, Index} = dreyfus_index:design_doc_to_index(Doc, IndexName),
- Index#index.sig
- end || IndexName <- IndexNames].
+ [
+ begin
+ {ok, Index} = dreyfus_index:design_doc_to_index(Doc, IndexName),
+ Index#index.sig
+ end
+ || IndexName <- IndexNames
+ ].
diff --git a/src/fabric/src/fabric.erl b/src/fabric/src/fabric.erl
index aaaa7e011..5840cd63c 100644
--- a/src/fabric/src/fabric.erl
+++ b/src/fabric/src/fabric.erl
@@ -17,27 +17,57 @@
-include_lib("couch_mrview/include/couch_mrview.hrl").
% DBs
--export([all_dbs/0, all_dbs/1, create_db/1, create_db/2, delete_db/1,
- delete_db/2, get_db_info/1, get_doc_count/1, get_doc_count/2,
- set_revs_limit/3, set_security/2, set_security/3,
- get_revs_limit/1, get_security/1, get_security/2,
+-export([
+ all_dbs/0, all_dbs/1,
+ create_db/1, create_db/2,
+ delete_db/1,
+ delete_db/2,
+ get_db_info/1,
+ get_doc_count/1, get_doc_count/2,
+ set_revs_limit/3,
+ set_security/2, set_security/3,
+ get_revs_limit/1,
+ get_security/1, get_security/2,
get_all_security/1, get_all_security/2,
- get_purge_infos_limit/1, set_purge_infos_limit/3,
- compact/1, compact/2, get_partition_info/2]).
+ get_purge_infos_limit/1,
+ set_purge_infos_limit/3,
+ compact/1, compact/2,
+ get_partition_info/2
+]).
% Documents
--export([open_doc/3, open_revs/4, get_doc_info/3, get_full_doc_info/3,
- get_missing_revs/2, get_missing_revs/3, update_doc/3, update_docs/3,
- purge_docs/3, att_receiver/3]).
+-export([
+ open_doc/3,
+ open_revs/4,
+ get_doc_info/3,
+ get_full_doc_info/3,
+ get_missing_revs/2, get_missing_revs/3,
+ update_doc/3,
+ update_docs/3,
+ purge_docs/3,
+ att_receiver/3
+]).
% Views
--export([all_docs/4, all_docs/5, changes/4, query_view/3, query_view/4,
- query_view/6, query_view/7, get_view_group_info/2, end_changes/0]).
+-export([
+ all_docs/4, all_docs/5,
+ changes/4,
+ query_view/3, query_view/4, query_view/6, query_view/7,
+ get_view_group_info/2,
+ end_changes/0
+]).
% miscellany
--export([design_docs/1, reset_validation_funs/1, cleanup_index_files/0,
- cleanup_index_files/1, cleanup_index_files_all_nodes/1, dbname/1,
- inactive_index_files/1, db_uuids/1]).
+-export([
+ design_docs/1,
+ reset_validation_funs/1,
+ cleanup_index_files/0,
+ cleanup_index_files/1,
+ cleanup_index_files_all_nodes/1,
+ dbname/1,
+ inactive_index_files/1,
+ db_uuids/1
+]).
-include_lib("fabric/include/fabric.hrl").
@@ -54,19 +84,21 @@ all_dbs() ->
all_dbs(<<>>).
%% @doc returns a list of all database names
--spec all_dbs(Prefix::iodata()) -> {ok, [binary()]}.
+-spec all_dbs(Prefix :: iodata()) -> {ok, [binary()]}.
all_dbs(Prefix) when is_binary(Prefix) ->
Length = byte_size(Prefix),
- MatchingDbs = mem3:fold_shards(fun(#shard{dbname=DbName}, Acc) ->
- case DbName of
- <<Prefix:Length/binary, _/binary>> ->
- [DbName | Acc];
- _ ->
- Acc
- end
- end, []),
+ MatchingDbs = mem3:fold_shards(
+ fun(#shard{dbname = DbName}, Acc) ->
+ case DbName of
+ <<Prefix:Length/binary, _/binary>> ->
+ [DbName | Acc];
+ _ ->
+ Acc
+ end
+ end,
+ []
+ ),
{ok, lists:usort(MatchingDbs)};
-
%% @equiv all_dbs(list_to_binary(Prefix))
all_dbs(Prefix) when is_list(Prefix) ->
all_dbs(list_to_binary(Prefix)).
@@ -76,40 +108,39 @@ all_dbs(Prefix) when is_list(Prefix) ->
%% etc.
-spec get_db_info(dbname()) ->
{ok, [
- {instance_start_time, binary()} |
- {doc_count, non_neg_integer()} |
- {doc_del_count, non_neg_integer()} |
- {purge_seq, non_neg_integer()} |
- {compact_running, boolean()} |
- {disk_size, non_neg_integer()} |
- {disk_format_version, pos_integer()}
+ {instance_start_time, binary()}
+ | {doc_count, non_neg_integer()}
+ | {doc_del_count, non_neg_integer()}
+ | {purge_seq, non_neg_integer()}
+ | {compact_running, boolean()}
+ | {disk_size, non_neg_integer()}
+ | {disk_format_version, pos_integer()}
]}.
get_db_info(DbName) ->
fabric_db_info:go(dbname(DbName)).
%% @doc returns the size of a given partition
--spec get_partition_info(dbname(), Partition::binary()) ->
+-spec get_partition_info(dbname(), Partition :: binary()) ->
{ok, [
- {db_name, binary()} |
- {partition, binary()} |
- {doc_count, non_neg_integer()} |
- {doc_del_count, non_neg_integer()} |
- {sizes, json_obj()}
+ {db_name, binary()}
+ | {partition, binary()}
+ | {doc_count, non_neg_integer()}
+ | {doc_del_count, non_neg_integer()}
+ | {sizes, json_obj()}
]}.
get_partition_info(DbName, Partition) ->
fabric_db_partition_info:go(dbname(DbName), Partition).
-
%% @doc the number of docs in a database
%% @equiv get_doc_count(DbName, <<"_all_docs">>)
get_doc_count(DbName) ->
get_doc_count(DbName, <<"_all_docs">>).
%% @doc the number of design docs in a database
--spec get_doc_count(dbname(), Namespace::binary()) ->
- {ok, non_neg_integer() | null} |
- {error, atom()} |
- {error, atom(), any()}.
+-spec get_doc_count(dbname(), Namespace :: binary()) ->
+ {ok, non_neg_integer() | null}
+ | {error, atom()}
+ | {error, atom(), any()}.
get_doc_count(DbName, <<"_all_docs">>) ->
fabric_db_doc_count:go(dbname(DbName));
get_doc_count(DbName, <<"_design">>) ->
@@ -150,29 +181,38 @@ set_revs_limit(DbName, Limit, Options) when is_integer(Limit), Limit > 0 ->
-spec get_revs_limit(dbname()) -> pos_integer() | no_return().
get_revs_limit(DbName) ->
{ok, Db} = fabric_util:get_db(dbname(DbName), [?ADMIN_CTX]),
- try couch_db:get_revs_limit(Db) after catch couch_db:close(Db) end.
+ try
+ couch_db:get_revs_limit(Db)
+ after
+ catch couch_db:close(Db)
+ end.
%% @doc sets the readers/writers/admin permissions for a database
--spec set_security(dbname(), SecObj::json_obj()) -> ok.
+-spec set_security(dbname(), SecObj :: json_obj()) -> ok.
set_security(DbName, SecObj) ->
fabric_db_meta:set_security(dbname(DbName), SecObj, [?ADMIN_CTX]).
%% @doc sets the readers/writers/admin permissions for a database
--spec set_security(dbname(), SecObj::json_obj(), [option()]) -> ok.
+-spec set_security(dbname(), SecObj :: json_obj(), [option()]) -> ok.
set_security(DbName, SecObj, Options) ->
fabric_db_meta:set_security(dbname(DbName), SecObj, opts(Options)).
%% @doc sets the upper bound for the number of stored purge requests
-spec set_purge_infos_limit(dbname(), pos_integer(), [option()]) -> ok.
-set_purge_infos_limit(DbName, Limit, Options)
- when is_integer(Limit), Limit > 0 ->
+set_purge_infos_limit(DbName, Limit, Options) when
+ is_integer(Limit), Limit > 0
+->
fabric_db_meta:set_purge_infos_limit(dbname(DbName), Limit, opts(Options)).
%% @doc retrieves the upper bound for the number of stored purge requests
-spec get_purge_infos_limit(dbname()) -> pos_integer() | no_return().
get_purge_infos_limit(DbName) ->
{ok, Db} = fabric_util:get_db(dbname(DbName), [?ADMIN_CTX]),
- try couch_db:get_purge_infos_limit(Db) after catch couch_db:close(Db) end.
+ try
+ couch_db:get_purge_infos_limit(Db)
+ after
+ catch couch_db:close(Db)
+ end.
get_security(DbName) ->
get_security(DbName, [?ADMIN_CTX]).
@@ -181,80 +221,88 @@ get_security(DbName) ->
-spec get_security(dbname()) -> json_obj() | no_return().
get_security(DbName, Options) ->
{ok, Db} = fabric_util:get_db(dbname(DbName), opts(Options)),
- try couch_db:get_security(Db) after catch couch_db:close(Db) end.
+ try
+ couch_db:get_security(Db)
+ after
+ catch couch_db:close(Db)
+ end.
%% @doc retrieve the security object for all shards of a database
-spec get_all_security(dbname()) ->
- {ok, [{#shard{}, json_obj()}]} |
- {error, no_majority | timeout} |
- {error, atom(), any()}.
+ {ok, [{#shard{}, json_obj()}]}
+ | {error, no_majority | timeout}
+ | {error, atom(), any()}.
get_all_security(DbName) ->
get_all_security(DbName, []).
%% @doc retrieve the security object for all shards of a database
-spec get_all_security(dbname(), [option()]) ->
- {ok, [{#shard{}, json_obj()}]} |
- {error, no_majority | timeout} |
- {error, atom(), any()}.
+ {ok, [{#shard{}, json_obj()}]}
+ | {error, no_majority | timeout}
+ | {error, atom(), any()}.
get_all_security(DbName, Options) ->
fabric_db_meta:get_all_security(dbname(DbName), opts(Options)).
compact(DbName) ->
- [rexi:cast(Node, {fabric_rpc, compact, [Name]}) ||
- #shard{node=Node, name=Name} <- mem3:shards(dbname(DbName))],
+ [
+ rexi:cast(Node, {fabric_rpc, compact, [Name]})
+ || #shard{node = Node, name = Name} <- mem3:shards(dbname(DbName))
+ ],
ok.
compact(DbName, DesignName) ->
- [rexi:cast(Node, {fabric_rpc, compact, [Name, DesignName]}) ||
- #shard{node=Node, name=Name} <- mem3:shards(dbname(DbName))],
+ [
+ rexi:cast(Node, {fabric_rpc, compact, [Name, DesignName]})
+ || #shard{node = Node, name = Name} <- mem3:shards(dbname(DbName))
+ ],
ok.
% doc operations
%% @doc retrieve the doc with a given id
-spec open_doc(dbname(), docid(), [option()]) ->
- {ok, #doc{}} |
- {not_found, missing | deleted} |
- {timeout, any()} |
- {error, any()} |
- {error, any() | any()}.
+ {ok, #doc{}}
+ | {not_found, missing | deleted}
+ | {timeout, any()}
+ | {error, any()}
+ | {error, any() | any()}.
open_doc(DbName, Id, Options) ->
case proplists:get_value(doc_info, Options) of
- undefined ->
- fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options));
- Else ->
- {error, {invalid_option, {doc_info, Else}}}
+ undefined ->
+ fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options));
+ Else ->
+ {error, {invalid_option, {doc_info, Else}}}
end.
%% @doc retrieve a collection of revisions, possible all
-spec open_revs(dbname(), docid(), [revision()] | all, [option()]) ->
- {ok, [{ok, #doc{}} | {{not_found,missing}, revision()}]} |
- {timeout, any()} |
- {error, any()} |
- {error, any(), any()}.
+ {ok, [{ok, #doc{}} | {{not_found, missing}, revision()}]}
+ | {timeout, any()}
+ | {error, any()}
+ | {error, any(), any()}.
open_revs(DbName, Id, Revs, Options) ->
fabric_doc_open_revs:go(dbname(DbName), docid(Id), Revs, opts(Options)).
%% @doc Retrieves an information on a document with a given id
-spec get_doc_info(dbname(), docid(), [options()]) ->
- {ok, #doc_info{}} |
- {not_found, missing} |
- {timeout, any()} |
- {error, any()} |
- {error, any() | any()}.
+ {ok, #doc_info{}}
+ | {not_found, missing}
+ | {timeout, any()}
+ | {error, any()}
+ | {error, any() | any()}.
get_doc_info(DbName, Id, Options) ->
- Options1 = [doc_info|Options],
+ Options1 = [doc_info | Options],
fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options1)).
%% @doc Retrieves a full information on a document with a given id
-spec get_full_doc_info(dbname(), docid(), [options()]) ->
- {ok, #full_doc_info{}} |
- {not_found, missing | deleted} |
- {timeout, any()} |
- {error, any()} |
- {error, any() | any()}.
+ {ok, #full_doc_info{}}
+ | {not_found, missing | deleted}
+ | {timeout, any()}
+ | {error, any()}
+ | {error, any() | any()}.
get_full_doc_info(DbName, Id, Options) ->
- Options1 = [{doc_info, full}|Options],
+ Options1 = [{doc_info, full} | Options],
fabric_doc_open:go(dbname(DbName), docid(Id), opts(Options1)).
%% @equiv get_missing_revs(DbName, IdsRevs, [])
@@ -262,7 +310,7 @@ get_missing_revs(DbName, IdsRevs) ->
get_missing_revs(DbName, IdsRevs, []).
%% @doc retrieve missing revisions for a list of `{Id, Revs}'
--spec get_missing_revs(dbname(),[{docid(), [revision()]}], [option()]) ->
+-spec get_missing_revs(dbname(), [{docid(), [revision()]}], [option()]) ->
{ok, [{docid(), any(), [any()]}]}.
get_missing_revs(DbName, IdsRevs, Options) when is_list(IdsRevs) ->
Sanitized = [idrevs(IdR) || IdR <- IdsRevs],
@@ -274,20 +322,20 @@ get_missing_revs(DbName, IdsRevs, Options) when is_list(IdsRevs) ->
{ok, any()} | any().
update_doc(DbName, Doc, Options) ->
case update_docs(DbName, [Doc], opts(Options)) of
- {ok, [{ok, NewRev}]} ->
- {ok, NewRev};
- {accepted, [{accepted, NewRev}]} ->
- {accepted, NewRev};
- {ok, [{{_Id, _Rev}, Error}]} ->
- throw(Error);
- {ok, [Error]} ->
- throw(Error);
- {ok, []} ->
- % replication success
- #doc{revs = {Pos, [RevId | _]}} = doc(DbName, Doc),
- {ok, {Pos, RevId}};
- {error, [Error]} ->
- throw(Error)
+ {ok, [{ok, NewRev}]} ->
+ {ok, NewRev};
+ {accepted, [{accepted, NewRev}]} ->
+ {accepted, NewRev};
+ {ok, [{{_Id, _Rev}, Error}]} ->
+ throw(Error);
+ {ok, [Error]} ->
+ throw(Error);
+ {ok, []} ->
+ % replication success
+ #doc{revs = {Pos, [RevId | _]}} = doc(DbName, Doc),
+ {ok, {Pos, RevId}};
+ {error, [Error]} ->
+ throw(Error)
end.
%% @doc update a list of docs
@@ -296,7 +344,8 @@ update_doc(DbName, Doc, Options) ->
update_docs(DbName, Docs0, Options) ->
try
Docs1 = docs(DbName, Docs0),
- fabric_doc_update:go(dbname(DbName), Docs1, opts(Options)) of
+ fabric_doc_update:go(dbname(DbName), Docs1, opts(Options))
+ of
{ok, Results} ->
{ok, Results};
{accepted, Results} ->
@@ -305,27 +354,34 @@ update_docs(DbName, Docs0, Options) ->
{error, Error};
Error ->
throw(Error)
- catch {aborted, PreCommitFailures} ->
- {aborted, PreCommitFailures}
+ catch
+ {aborted, PreCommitFailures} ->
+ {aborted, PreCommitFailures}
end.
-
%% @doc purge revisions for a list '{Id, Revs}'
%% returns {ok, {PurgeSeq, Results}}
-spec purge_docs(dbname(), [{docid(), [revision()]}], [option()]) ->
- {ok, [{Health, [revision()]}] | {error, any()}} when
+ {ok, [{Health, [revision()]}] | {error, any()}}
+when
Health :: ok | accepted.
purge_docs(DbName, IdsRevs, Options) when is_list(IdsRevs) ->
IdsRevs2 = [idrevs(IdRs) || IdRs <- IdsRevs],
fabric_doc_purge:go(dbname(DbName), IdsRevs2, opts(Options)).
-
%% @doc spawns a process to upload attachment data and
%% returns a fabric attachment receiver context tuple
%% with the spawned middleman process, an empty binary,
%% or exits with an error tuple {Error, Arg}
--spec att_receiver(#httpd{}, dbname(), Length :: undefined | chunked | pos_integer() |
- {unknown_transfer_encoding, any()}) ->
+-spec att_receiver(
+ #httpd{},
+ dbname(),
+ Length ::
+ undefined
+ | chunked
+ | pos_integer()
+ | {unknown_transfer_encoding, any()}
+) ->
{fabric_attachment_receiver, pid(), chunked | pos_integer()} | binary().
att_receiver(Req, DbName, Length) ->
fabric_doc_atts:receiver(Req, DbName, Length).
@@ -340,26 +396,28 @@ all_docs(DbName, Callback, Acc, QueryArgs) ->
%% "http://wiki.apache.org/couchdb/HTTP_Document_API#All_Documents">
%% all_docs</a> for details
-spec all_docs(
- dbname(), [{atom(), any()}], callback(), [] | tuple(),
- #mrargs{} | [option()]) ->
+ dbname(),
+ [{atom(), any()}],
+ callback(),
+ [] | tuple(),
+ #mrargs{} | [option()]
+) ->
{ok, any()} | {error, Reason :: term()}.
all_docs(DbName, Options, Callback, Acc0, #mrargs{} = QueryArgs) when
- is_function(Callback, 2) ->
+ is_function(Callback, 2)
+->
fabric_view_all_docs:go(dbname(DbName), opts(Options), QueryArgs, Callback, Acc0);
-
%% @doc convenience function that takes a keylist rather than a record
%% @equiv all_docs(DbName, Callback, Acc0, kl_to_query_args(QueryArgs))
all_docs(DbName, Options, Callback, Acc0, QueryArgs) ->
all_docs(DbName, Options, Callback, Acc0, kl_to_query_args(QueryArgs)).
-
--spec changes(dbname(), callback(), any(), #changes_args{} | [{atom(),any()}]) ->
+-spec changes(dbname(), callback(), any(), #changes_args{} | [{atom(), any()}]) ->
{ok, any()}.
-changes(DbName, Callback, Acc0, #changes_args{}=Options) ->
+changes(DbName, Callback, Acc0, #changes_args{} = Options) ->
Feed = Options#changes_args.feed,
fabric_view_changes:go(dbname(DbName), Feed, Options, Callback, Acc0);
-
%% @doc convenience function, takes keylist instead of record
%% @equiv changes(DbName, Callback, Acc0, kl_to_changes_args(Options))
changes(DbName, Callback, Acc0, Options) ->
@@ -375,22 +433,28 @@ query_view(DbName, DesignName, ViewName, QueryArgs) ->
Callback = fun default_callback/2,
query_view(DbName, DesignName, ViewName, Callback, [], QueryArgs).
-
%% @equiv query_view(DbName, DesignName, [],
%% ViewName, fun default_callback/2, [], QueryArgs)
query_view(DbName, DDoc, ViewName, Callback, Acc, QueryArgs) ->
query_view(DbName, [], DDoc, ViewName, Callback, Acc, QueryArgs).
-
%% @doc execute a given view.
%% There are many additional query args that can be passed to a view,
%% see <a href="http://wiki.apache.org/couchdb/HTTP_view_API#Querying_Options">
%% query args</a> for details.
--spec query_view(dbname(), [{atom(), any()}] | [],
- #doc{} | binary(), iodata(), callback(), any(), #mrargs{}) ->
+-spec query_view(
+ dbname(),
+ [{atom(), any()}] | [],
+ #doc{} | binary(),
+ iodata(),
+ callback(),
+ any(),
+ #mrargs{}
+) ->
any().
-query_view(Db, Options, GroupId, ViewName, Callback, Acc0, QueryArgs)
- when is_binary(GroupId) ->
+query_view(Db, Options, GroupId, ViewName, Callback, Acc0, QueryArgs) when
+ is_binary(GroupId)
+->
DbName = dbname(Db),
{ok, DDoc} = ddoc_cache:open(DbName, <<"_design/", GroupId/binary>>),
query_view(Db, Options, DDoc, ViewName, Callback, Acc0, QueryArgs);
@@ -398,13 +462,13 @@ query_view(Db, Options, DDoc, ViewName, Callback, Acc0, QueryArgs0) ->
DbName = dbname(Db),
View = name(ViewName),
case fabric_util:is_users_db(DbName) of
- true ->
- FakeDb = fabric_util:open_cluster_db(DbName, Options),
- couch_users_db:after_doc_read(DDoc, FakeDb);
- false ->
- ok
+ true ->
+ FakeDb = fabric_util:open_cluster_db(DbName, Options),
+ couch_users_db:after_doc_read(DDoc, FakeDb);
+ false ->
+ ok
end,
- {ok, #mrst{views=Views, language=Lang}} =
+ {ok, #mrst{views = Views, language = Lang}} =
couch_mrview_util:ddoc_to_mrst(DbName, DDoc),
QueryArgs1 = couch_mrview_util:set_view_type(QueryArgs0, View, Views),
QueryArgs2 = fabric_util:validate_args(Db, DDoc, QueryArgs1),
@@ -437,24 +501,24 @@ query_view(Db, Options, DDoc, ViewName, Callback, Acc0, QueryArgs0) ->
%% is running and so forth
-spec get_view_group_info(dbname(), #doc{} | docid()) ->
{ok, [
- {signature, binary()} |
- {language, binary()} |
- {disk_size, non_neg_integer()} |
- {compact_running, boolean()} |
- {updater_running, boolean()} |
- {waiting_commit, boolean()} |
- {waiting_clients, non_neg_integer()} |
- {update_seq, pos_integer()} |
- {purge_seq, non_neg_integer()} |
- {sizes, [
- {active, non_neg_integer()} |
- {external, non_neg_integer()} |
- {file, non_neg_integer()}
- ]} |
- {updates_pending, [
- {minimum, non_neg_integer()} |
- {preferred, non_neg_integer()} |
- {total, non_neg_integer()}
+ {signature, binary()}
+ | {language, binary()}
+ | {disk_size, non_neg_integer()}
+ | {compact_running, boolean()}
+ | {updater_running, boolean()}
+ | {waiting_commit, boolean()}
+ | {waiting_clients, non_neg_integer()}
+ | {update_seq, pos_integer()}
+ | {purge_seq, non_neg_integer()}
+ | {sizes, [
+ {active, non_neg_integer()}
+ | {external, non_neg_integer()}
+ | {file, non_neg_integer()}
+ ]}
+ | {updates_pending, [
+ {minimum, non_neg_integer()}
+ | {preferred, non_neg_integer()}
+ | {total, non_neg_integer()}
]}
]}.
get_view_group_info(DbName, DesignId) ->
@@ -467,23 +531,25 @@ end_changes() ->
%% @doc retrieve all the design docs from a database
-spec design_docs(dbname()) -> {ok, [json_obj()]} | {error, Reason :: term()}.
design_docs(DbName) ->
- Extra = case get(io_priority) of
- undefined -> [];
- Else -> [{io_priority, Else}]
- end,
+ Extra =
+ case get(io_priority) of
+ undefined -> [];
+ Else -> [{io_priority, Else}]
+ end,
QueryArgs0 = #mrargs{
- include_docs=true,
- extra=Extra
+ include_docs = true,
+ extra = Extra
},
QueryArgs = set_namespace(<<"_design">>, QueryArgs0),
- Callback = fun({meta, _}, []) ->
- {ok, []};
- ({row, Props}, Acc) ->
- {ok, [couch_util:get_value(doc, Props) | Acc]};
- (complete, Acc) ->
- {ok, lists:reverse(Acc)};
- ({error, Reason}, _Acc) ->
- {error, Reason}
+ Callback = fun
+ ({meta, _}, []) ->
+ {ok, []};
+ ({row, Props}, Acc) ->
+ {ok, [couch_util:get_value(doc, Props) | Acc]};
+ (complete, Acc) ->
+ {ok, lists:reverse(Acc)};
+ ({error, Reason}, _Acc) ->
+ {error, Reason}
end,
fabric:all_docs(dbname(DbName), [?ADMIN_CTX], Callback, [], QueryArgs).
@@ -492,8 +558,10 @@ design_docs(DbName) ->
%% NOTE: This function probably doesn't belong here as part fo the API
-spec reset_validation_funs(dbname()) -> [reference()].
reset_validation_funs(DbName) ->
- [rexi:cast(Node, {fabric_rpc, reset_validation_funs, [Name]}) ||
- #shard{node=Node, name=Name} <- mem3:shards(DbName)].
+ [
+ rexi:cast(Node, {fabric_rpc, reset_validation_funs, [Name]})
+ || #shard{node = Node, name = Name} <- mem3:shards(DbName)
+ ].
%% @doc clean up index files for all Dbs
-spec cleanup_index_files() -> [ok].
@@ -504,15 +572,19 @@ cleanup_index_files() ->
%% @doc clean up index files for a specific db
-spec cleanup_index_files(dbname()) -> ok.
cleanup_index_files(DbName) ->
- try lists:foreach(
- fun(File) ->
- file:delete(File)
- end, inactive_index_files(DbName))
+ try
+ lists:foreach(
+ fun(File) ->
+ file:delete(File)
+ end,
+ inactive_index_files(DbName)
+ )
catch
error:Error ->
couch_log:error(
"~p:cleanup_index_files. Error: ~p",
- [?MODULE, Error]),
+ [?MODULE, Error]
+ ),
ok
end.
@@ -521,31 +593,48 @@ cleanup_index_files(DbName) ->
inactive_index_files(DbName) ->
{ok, DesignDocs} = fabric:design_docs(DbName),
- ActiveSigs = maps:from_list(lists:map(fun(#doc{id = GroupId}) ->
- {ok, Info} = fabric:get_view_group_info(DbName, GroupId),
- {binary_to_list(couch_util:get_value(signature, Info)), nil}
- end, [couch_doc:from_json_obj(DD) || DD <- DesignDocs])),
-
- FileList = lists:flatmap(fun(#shard{name = ShardName}) ->
- IndexDir = couch_index_util:index_dir(mrview, ShardName),
- filelib:wildcard([IndexDir, "/*"])
- end, mem3:local_shards(dbname(DbName))),
-
- if ActiveSigs =:= [] -> FileList; true ->
- %% <sig>.view and <sig>.compact.view where <sig> is in ActiveSigs
- %% will be excluded from FileList because they are active view
- %% files and should not be deleted.
- lists:filter(fun(FilePath) ->
- not maps:is_key(get_view_sig_from_filename(FilePath), ActiveSigs)
- end, FileList)
+ ActiveSigs = maps:from_list(
+ lists:map(
+ fun(#doc{id = GroupId}) ->
+ {ok, Info} = fabric:get_view_group_info(DbName, GroupId),
+ {binary_to_list(couch_util:get_value(signature, Info)), nil}
+ end,
+ [couch_doc:from_json_obj(DD) || DD <- DesignDocs]
+ )
+ ),
+
+ FileList = lists:flatmap(
+ fun(#shard{name = ShardName}) ->
+ IndexDir = couch_index_util:index_dir(mrview, ShardName),
+ filelib:wildcard([IndexDir, "/*"])
+ end,
+ mem3:local_shards(dbname(DbName))
+ ),
+
+ if
+ ActiveSigs =:= [] ->
+ FileList;
+ true ->
+ %% <sig>.view and <sig>.compact.view where <sig> is in ActiveSigs
+ %% will be excluded from FileList because they are active view
+ %% files and should not be deleted.
+ lists:filter(
+ fun(FilePath) ->
+ not maps:is_key(get_view_sig_from_filename(FilePath), ActiveSigs)
+ end,
+ FileList
+ )
end.
%% @doc clean up index files for a specific db on all nodes
-spec cleanup_index_files_all_nodes(dbname()) -> [reference()].
cleanup_index_files_all_nodes(DbName) ->
- lists:foreach(fun(Node) ->
- rexi:cast(Node, {?MODULE, cleanup_index_files, [DbName]})
- end, mem3:nodes()).
+ lists:foreach(
+ fun(Node) ->
+ rexi:cast(Node, {?MODULE, cleanup_index_files, [DbName]})
+ end,
+ mem3:nodes()
+ ).
%% some simple type validation and transcoding
dbname(DbName) when is_list(DbName) ->
@@ -555,12 +644,13 @@ dbname(DbName) when is_binary(DbName) ->
dbname(Db) ->
try
couch_db:name(Db)
- catch error:badarg ->
- erlang:error({illegal_database_name, Db})
+ catch
+ error:badarg ->
+ erlang:error({illegal_database_name, Db})
end.
%% @doc get db shard uuids
--spec db_uuids(dbname()) -> map().
+-spec db_uuids(dbname()) -> map().
db_uuids(DbName) ->
fabric_db_uuids:go(dbname(DbName)).
@@ -580,15 +670,16 @@ docs(_Db, Docs) ->
doc(_Db, #doc{} = Doc) ->
Doc;
doc(Db0, {_} = Doc) ->
- Db = case couch_db:is_db(Db0) of
- true ->
- Db0;
- false ->
- Shard = hd(mem3:shards(Db0)),
- Props = couch_util:get_value(props, Shard#shard.opts, []),
- {ok, Db1} = couch_db:clustered_db(Db0, [{props, Props}]),
- Db1
- end,
+ Db =
+ case couch_db:is_db(Db0) of
+ true ->
+ Db0;
+ false ->
+ Shard = hd(mem3:shards(Db0)),
+ Props = couch_util:get_value(props, Shard#shard.opts, []),
+ {ok, Db1} = couch_db:clustered_db(Db0, [{props, Props}]),
+ Db1
+ end,
couch_db:doc_from_json_obj_validate(Db, Doc);
doc(_Db, Doc) ->
erlang:error({illegal_doc_format, Doc}).
@@ -616,15 +707,15 @@ opts(Options) ->
add_option(Key, Options) ->
case couch_util:get_value(Key, Options) of
- undefined ->
- case erlang:get(Key) of
undefined ->
- Options;
- Value ->
- [{Key, Value} | Options]
- end;
- _ ->
- Options
+ case erlang:get(Key) of
+ undefined ->
+ Options;
+ Value ->
+ [{Key, Value} | Options]
+ end;
+ _ ->
+ Options
end.
default_callback(complete, Acc) ->
@@ -632,7 +723,7 @@ default_callback(complete, Acc) ->
default_callback(Row, Acc) ->
{ok, [Row | Acc]}.
-is_reduce_view(#mrargs{view_type=ViewType}) ->
+is_reduce_view(#mrargs{view_type = ViewType}) ->
ViewType =:= red;
is_reduce_view({Reduce, _, _}) ->
Reduce =:= red.
@@ -651,29 +742,38 @@ kl_to_query_args(KeyList) ->
%% note that record_info is only known at compile time
%% so the code must be written in this way. For each new
%% record type add a case clause
-lookup_index(Key,RecName) ->
+lookup_index(Key, RecName) ->
Indexes =
case RecName of
- changes_args ->
- lists:zip(record_info(fields, changes_args),
- lists:seq(2, record_info(size, changes_args)));
- mrargs ->
- lists:zip(record_info(fields, mrargs),
- lists:seq(2, record_info(size, mrargs)))
+ changes_args ->
+ lists:zip(
+ record_info(fields, changes_args),
+ lists:seq(2, record_info(size, changes_args))
+ );
+ mrargs ->
+ lists:zip(
+ record_info(fields, mrargs),
+ lists:seq(2, record_info(size, mrargs))
+ )
end,
couch_util:get_value(Key, Indexes).
%% @doc convert a keylist to record with given `RecName'
%% @see lookup_index
-kl_to_record(KeyList,RecName) ->
- Acc0 = case RecName of
- changes_args -> #changes_args{};
- mrargs -> #mrargs{}
- end,
- lists:foldl(fun({Key, Value}, Acc) ->
- Index = lookup_index(couch_util:to_existing_atom(Key),RecName),
- setelement(Index, Acc, Value)
- end, Acc0, KeyList).
+kl_to_record(KeyList, RecName) ->
+ Acc0 =
+ case RecName of
+ changes_args -> #changes_args{};
+ mrargs -> #mrargs{}
+ end,
+ lists:foldl(
+ fun({Key, Value}, Acc) ->
+ Index = lookup_index(couch_util:to_existing_atom(Key), RecName),
+ setelement(Index, Acc, Value)
+ end,
+ Acc0,
+ KeyList
+ ).
set_namespace(NS, #mrargs{extra = Extra} = Args) ->
Args#mrargs{extra = [{namespace, NS} | Extra]}.
@@ -686,11 +786,16 @@ get_view_sig_from_filename(FilePath) ->
update_doc_test_() ->
{
- "Update doc tests", {
- setup, fun setup/0, fun teardown/1,
- fun(Ctx) -> [
- should_throw_conflict(Ctx)
- ] end
+ "Update doc tests",
+ {
+ setup,
+ fun setup/0,
+ fun teardown/1,
+ fun(Ctx) ->
+ [
+ should_throw_conflict(Ctx)
+ ]
+ end
}
}.
@@ -699,35 +804,39 @@ should_throw_conflict(Doc) ->
?assertThrow(conflict, update_doc(<<"test-db">>, Doc, []))
end).
-
setup() ->
Doc = #doc{
id = <<"test_doc">>,
- revs = {3, [<<5,68,252,180,43,161,216,223,26,119,71,219,212,229,
- 159,113>>]},
- body = {[{<<"foo">>,<<"asdf">>},{<<"author">>,<<"tom">>}]},
- atts = [], deleted = false, meta = []
+ revs = {3, [<<5, 68, 252, 180, 43, 161, 216, 223, 26, 119, 71, 219, 212, 229, 159, 113>>]},
+ body = {[{<<"foo">>, <<"asdf">>}, {<<"author">>, <<"tom">>}]},
+ atts = [],
+ deleted = false,
+ meta = []
},
ok = application:ensure_started(config),
ok = meck:expect(mem3, shards, fun(_, _) -> [] end),
ok = meck:expect(mem3, quorum, fun(_) -> 1 end),
ok = meck:expect(rexi, cast, fun(_, _) -> ok end),
- ok = meck:expect(rexi_utils, recv,
+ ok = meck:expect(
+ rexi_utils,
+ recv,
fun(_, _, _, _, _, _) ->
{ok, {error, [{Doc, conflict}]}}
- end),
- ok = meck:expect(couch_util, reorder_results,
+ end
+ ),
+ ok = meck:expect(
+ couch_util,
+ reorder_results,
fun(_, [{_, Res}]) ->
[Res]
- end),
+ end
+ ),
ok = meck:expect(fabric_util, create_monitors, fun(_) -> ok end),
ok = meck:expect(rexi_monitor, stop, fun(_) -> ok end),
Doc.
-
teardown(_) ->
meck:unload(),
ok = application:stop(config).
-
-endif.
diff --git a/src/fabric/src/fabric_db_create.erl b/src/fabric/src/fabric_db_create.erl
index 1048cb0bb..38770aea4 100644
--- a/src/fabric/src/fabric_db_create.erl
+++ b/src/fabric/src/fabric_db_create.erl
@@ -17,61 +17,60 @@
-include_lib("mem3/include/mem3.hrl").
-include_lib("couch/include/couch_db.hrl").
-
%% @doc Create a new database, and all its partition files across the cluster
%% Options is proplist with user_ctx, n, q, validate_name
go(DbName, Options) ->
case validate_dbname(DbName, Options) of
- ok ->
- couch_partition:validate_dbname(DbName, Options),
- case db_exists(DbName) of
- true ->
- {error, file_exists};
- false ->
- {Shards, Doc} = generate_shard_map(DbName, Options),
- CreateShardResult = create_shard_files(Shards, Options),
- case CreateShardResult of
- enametoolong ->
- {error, {database_name_too_long, DbName}};
- _ ->
- case {CreateShardResult, create_shard_db_doc(Doc)} of
- {ok, {ok, Status}} ->
- Status;
- {ok, {error, conflict} = ShardDocError} ->
- % Check if it is just a race to create the shard doc
- case db_exists(DbName) of
- true -> {error, file_exists};
- false -> ShardDocError
- end;
- {file_exists, {ok, _}} ->
+ ok ->
+ couch_partition:validate_dbname(DbName, Options),
+ case db_exists(DbName) of
+ true ->
{error, file_exists};
- {_, Error} ->
- Error
- end
- end
- end;
- Error ->
- Error
+ false ->
+ {Shards, Doc} = generate_shard_map(DbName, Options),
+ CreateShardResult = create_shard_files(Shards, Options),
+ case CreateShardResult of
+ enametoolong ->
+ {error, {database_name_too_long, DbName}};
+ _ ->
+ case {CreateShardResult, create_shard_db_doc(Doc)} of
+ {ok, {ok, Status}} ->
+ Status;
+ {ok, {error, conflict} = ShardDocError} ->
+ % Check if it is just a race to create the shard doc
+ case db_exists(DbName) of
+ true -> {error, file_exists};
+ false -> ShardDocError
+ end;
+ {file_exists, {ok, _}} ->
+ {error, file_exists};
+ {_, Error} ->
+ Error
+ end
+ end
+ end;
+ Error ->
+ Error
end.
validate_dbname(DbName, Options) ->
case couch_util:get_value(validate_name, Options, true) of
- false ->
- ok;
- true ->
- couch_db:validate_dbname(DbName)
+ false ->
+ ok;
+ true ->
+ couch_db:validate_dbname(DbName)
end.
generate_shard_map(DbName, Options) ->
{MegaSecs, Secs, _} = os:timestamp(),
- Suffix = "." ++ integer_to_list(MegaSecs*1000000 + Secs),
- Shards = mem3:choose_shards(DbName, [{shard_suffix,Suffix} | Options]),
+ Suffix = "." ++ integer_to_list(MegaSecs * 1000000 + Secs),
+ Shards = mem3:choose_shards(DbName, [{shard_suffix, Suffix} | Options]),
case mem3_util:open_db_doc(DbName) of
- {ok, Doc} ->
- % the DB already exists, and may have a different Suffix
- ok;
- {not_found, _} ->
- Doc = make_document(Shards, Suffix, Options)
+ {ok, Doc} ->
+ % the DB already exists, and may have a different Suffix
+ ok;
+ {not_found, _} ->
+ Doc = make_document(Shards, Suffix, Options)
end,
{Shards, Doc}.
@@ -79,56 +78,53 @@ create_shard_files(Shards, Options) ->
Workers = fabric_util:submit_jobs(Shards, create_db, [Options]),
RexiMon = fabric_util:create_monitors(Shards),
try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Workers) of
- {error, file_exists} ->
- file_exists;
- {error, enametoolong} ->
- enametoolong;
- {timeout, DefunctWorkers} ->
- fabric_util:log_timeout(DefunctWorkers, "create_db"),
- {error, timeout};
- _ ->
- ok
+ {error, file_exists} ->
+ file_exists;
+ {error, enametoolong} ->
+ enametoolong;
+ {timeout, DefunctWorkers} ->
+ fabric_util:log_timeout(DefunctWorkers, "create_db"),
+ {error, timeout};
+ _ ->
+ ok
after
rexi_monitor:stop(RexiMon)
end.
handle_message({error, enametoolong}, _, _) ->
{error, enametoolong};
-
handle_message(file_exists, _, _) ->
{error, file_exists};
-
handle_message({rexi_DOWN, _, {_, Node}, _}, _, Workers) ->
case lists:filter(fun(S) -> S#shard.node =/= Node end, Workers) of
- [] ->
- {stop, ok};
- RemainingWorkers ->
- {ok, RemainingWorkers}
+ [] ->
+ {stop, ok};
+ RemainingWorkers ->
+ {ok, RemainingWorkers}
end;
-
handle_message(_, Worker, Workers) ->
case lists:delete(Worker, Workers) of
- [] ->
- {stop, ok};
- RemainingWorkers ->
- {ok, RemainingWorkers}
+ [] ->
+ {stop, ok};
+ RemainingWorkers ->
+ {ok, RemainingWorkers}
end.
create_shard_db_doc(Doc) ->
- Shards = [#shard{node=N} || N <- mem3:nodes()],
+ Shards = [#shard{node = N} || N <- mem3:nodes()],
RexiMon = fabric_util:create_monitors(Shards),
Workers = fabric_util:submit_jobs(Shards, create_shard_db_doc, [Doc]),
Acc0 = {length(Shards), fabric_dict:init(Workers, nil)},
try fabric_util:recv(Workers, #shard.ref, fun handle_db_update/3, Acc0) of
- {timeout, {_, WorkersDict}} ->
- DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
- fabric_util:log_timeout(
- DefunctWorkers,
- "create_shard_db_doc"
- ),
- {error, timeout};
- Else ->
- Else
+ {timeout, {_, WorkersDict}} ->
+ DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
+ fabric_util:log_timeout(
+ DefunctWorkers,
+ "create_shard_db_doc"
+ ),
+ {error, timeout};
+ Else ->
+ Else
after
rexi_monitor:stop(RexiMon)
end.
@@ -136,57 +132,68 @@ create_shard_db_doc(Doc) ->
handle_db_update({rexi_DOWN, _, {_, Node}, _}, _Worker, {W, Counters}) ->
New = fabric_dict:filter(fun(S, _) -> S#shard.node =/= Node end, Counters),
maybe_stop(W, New);
-
handle_db_update({rexi_EXIT, _Reason}, Worker, {W, Counters}) ->
maybe_stop(W, fabric_dict:erase(Worker, Counters));
-
handle_db_update(conflict, _, _) ->
% just fail when we get any conflicts
{error, conflict};
-
handle_db_update(Msg, Worker, {W, Counters}) ->
maybe_stop(W, fabric_dict:store(Worker, Msg, Counters)).
maybe_stop(W, Counters) ->
case fabric_dict:any(nil, Counters) of
- true ->
- {ok, {W, Counters}};
- false ->
- case lists:sum([1 || {_, ok} <- Counters]) of
- NumOk when NumOk >= (W div 2 +1) ->
- {stop, ok};
- NumOk when NumOk > 0 ->
- {stop, accepted};
- _ ->
- {error, internal_server_error}
- end
+ true ->
+ {ok, {W, Counters}};
+ false ->
+ case lists:sum([1 || {_, ok} <- Counters]) of
+ NumOk when NumOk >= (W div 2 + 1) ->
+ {stop, ok};
+ NumOk when NumOk > 0 ->
+ {stop, accepted};
+ _ ->
+ {error, internal_server_error}
+ end
end.
-make_document([#shard{dbname=DbName}|_] = Shards, Suffix, Options) ->
+make_document([#shard{dbname = DbName} | _] = Shards, Suffix, Options) ->
{RawOut, ByNodeOut, ByRangeOut} =
- lists:foldl(fun(#shard{node=N, range=[B,E]}, {Raw, ByNode, ByRange}) ->
- Range = ?l2b([couch_util:to_hex(<<B:32/integer>>), "-",
- couch_util:to_hex(<<E:32/integer>>)]),
- Node = couch_util:to_binary(N),
- {[[<<"add">>, Range, Node] | Raw], orddict:append(Node, Range, ByNode),
- orddict:append(Range, Node, ByRange)}
- end, {[], [], []}, Shards),
- EngineProp = case couch_util:get_value(engine, Options) of
- E when is_binary(E) -> [{<<"engine">>, E}];
- _ -> []
- end,
- DbProps = case couch_util:get_value(props, Options) of
- Props when is_list(Props) -> [{<<"props">>, {Props}}];
- _ -> []
- end,
+ lists:foldl(
+ fun(#shard{node = N, range = [B, E]}, {Raw, ByNode, ByRange}) ->
+ Range = ?l2b([
+ couch_util:to_hex(<<B:32/integer>>),
+ "-",
+ couch_util:to_hex(<<E:32/integer>>)
+ ]),
+ Node = couch_util:to_binary(N),
+ {
+ [[<<"add">>, Range, Node] | Raw],
+ orddict:append(Node, Range, ByNode),
+ orddict:append(Range, Node, ByRange)
+ }
+ end,
+ {[], [], []},
+ Shards
+ ),
+ EngineProp =
+ case couch_util:get_value(engine, Options) of
+ E when is_binary(E) -> [{<<"engine">>, E}];
+ _ -> []
+ end,
+ DbProps =
+ case couch_util:get_value(props, Options) of
+ Props when is_list(Props) -> [{<<"props">>, {Props}}];
+ _ -> []
+ end,
#doc{
id = DbName,
- body = {[
- {<<"shard_suffix">>, Suffix},
- {<<"changelog">>, lists:sort(RawOut)},
- {<<"by_node">>, {[{K,lists:sort(V)} || {K,V} <- ByNodeOut]}},
- {<<"by_range">>, {[{K,lists:sort(V)} || {K,V} <- ByRangeOut]}}
- ] ++ EngineProp ++ DbProps}
+ body = {
+ [
+ {<<"shard_suffix">>, Suffix},
+ {<<"changelog">>, lists:sort(RawOut)},
+ {<<"by_node">>, {[{K, lists:sort(V)} || {K, V} <- ByNodeOut]}},
+ {<<"by_range">>, {[{K, lists:sort(V)} || {K, V} <- ByRangeOut]}}
+ ] ++ EngineProp ++ DbProps
+ }
}.
db_exists(DbName) -> is_list(catch mem3:shards(DbName)).
@@ -205,24 +212,20 @@ db_exists_test_() ->
]
}.
-
setup_all() ->
meck:new(mem3).
-
teardown_all(_) ->
meck:unload().
-
db_exists_for_existing_db() ->
Mock = fun(DbName) when is_binary(DbName) ->
- [#shard{dbname = DbName, range = [0,100]}]
+ [#shard{dbname = DbName, range = [0, 100]}]
end,
ok = meck:expect(mem3, shards, Mock),
?assertEqual(true, db_exists(<<"foobar">>)),
?assertEqual(true, meck:validate(mem3)).
-
db_exists_for_missing_db() ->
Mock = fun(DbName) ->
erlang:error(database_does_not_exist, DbName)
diff --git a/src/fabric/src/fabric_db_delete.erl b/src/fabric/src/fabric_db_delete.erl
index c146cb6cd..a257b0d6e 100644
--- a/src/fabric/src/fabric_db_delete.erl
+++ b/src/fabric/src/fabric_db_delete.erl
@@ -24,34 +24,34 @@ go(DbName, _Options) ->
Shards = mem3:shards(DbName),
% delete doc from shard_db
try delete_shard_db_doc(DbName) of
- {ok, ok} ->
- ok;
- {ok, accepted} ->
- accepted;
- {ok, not_found} ->
- erlang:error(database_does_not_exist, DbName);
- Error ->
- Error
+ {ok, ok} ->
+ ok;
+ {ok, accepted} ->
+ accepted;
+ {ok, not_found} ->
+ erlang:error(database_does_not_exist, DbName);
+ Error ->
+ Error
after
% delete the shard files
fabric_util:submit_jobs(Shards, delete_db, [])
end.
delete_shard_db_doc(Doc) ->
- Shards = [#shard{node=N} || N <- mem3:nodes()],
+ Shards = [#shard{node = N} || N <- mem3:nodes()],
RexiMon = fabric_util:create_monitors(Shards),
Workers = fabric_util:submit_jobs(Shards, delete_shard_db_doc, [Doc]),
Acc0 = {length(Shards), fabric_dict:init(Workers, nil)},
try fabric_util:recv(Workers, #shard.ref, fun handle_db_update/3, Acc0) of
- {timeout, {_, WorkersDict}} ->
- DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
- fabric_util:log_timeout(
- DefunctWorkers,
- "delete_shard_db_doc"
- ),
- {error, timeout};
- Else ->
- Else
+ {timeout, {_, WorkersDict}} ->
+ DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
+ fabric_util:log_timeout(
+ DefunctWorkers,
+ "delete_shard_db_doc"
+ ),
+ {error, timeout};
+ Else ->
+ Else
after
rexi_monitor:stop(RexiMon)
end.
@@ -59,40 +59,37 @@ delete_shard_db_doc(Doc) ->
handle_db_update({rexi_DOWN, _, {_, Node}, _}, _Worker, {W, Counters}) ->
New = fabric_dict:filter(fun(S, _) -> S#shard.node =/= Node end, Counters),
maybe_stop(W, New);
-
handle_db_update({rexi_EXIT, _Reason}, Worker, {W, Counters}) ->
maybe_stop(W, fabric_dict:erase(Worker, Counters));
-
handle_db_update(conflict, _, _) ->
% just fail when we get any conflicts
{error, conflict};
-
handle_db_update(Msg, Worker, {W, Counters}) ->
maybe_stop(W, fabric_dict:store(Worker, Msg, Counters)).
maybe_stop(W, Counters) ->
case fabric_dict:any(nil, Counters) of
- true ->
- {ok, {W, Counters}};
- false ->
- {Ok,NotFound} = fabric_dict:fold(fun count_replies/3, {0,0}, Counters),
- case {Ok + NotFound, Ok, NotFound} of
- {W, 0, W} ->
- {#shard{dbname=Name}, _} = hd(Counters),
- couch_log:warning("~p not_found ~d", [?MODULE, Name]),
- {stop, not_found};
- {W, _, _} ->
- {stop, ok};
- {_, M, _} when M > 0 ->
- {stop,accepted};
- _ ->
- {error, internal_server_error}
- end
+ true ->
+ {ok, {W, Counters}};
+ false ->
+ {Ok, NotFound} = fabric_dict:fold(fun count_replies/3, {0, 0}, Counters),
+ case {Ok + NotFound, Ok, NotFound} of
+ {W, 0, W} ->
+ {#shard{dbname = Name}, _} = hd(Counters),
+ couch_log:warning("~p not_found ~d", [?MODULE, Name]),
+ {stop, not_found};
+ {W, _, _} ->
+ {stop, ok};
+ {_, M, _} when M > 0 ->
+ {stop, accepted};
+ _ ->
+ {error, internal_server_error}
+ end
end.
count_replies(_, ok, {Ok, NotFound}) ->
- {Ok+1, NotFound};
+ {Ok + 1, NotFound};
count_replies(_, not_found, {Ok, NotFound}) ->
- {Ok, NotFound+1};
+ {Ok, NotFound + 1};
count_replies(_, _, Acc) ->
Acc.
diff --git a/src/fabric/src/fabric_db_doc_count.erl b/src/fabric/src/fabric_db_doc_count.erl
index a91014b7c..b2ab35b81 100644
--- a/src/fabric/src/fabric_db_doc_count.erl
+++ b/src/fabric/src/fabric_db_doc_count.erl
@@ -24,28 +24,26 @@ go(DbName) ->
RexiMon = fabric_util:create_monitors(Shards),
Acc0 = {fabric_dict:init(Workers, nil), []},
try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {timeout, {WorkersDict, _}} ->
- DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
- fabric_util:log_timeout(DefunctWorkers, "get_doc_count"),
- {error, timeout};
- Else ->
- Else
+ {timeout, {WorkersDict, _}} ->
+ DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
+ fabric_util:log_timeout(DefunctWorkers, "get_doc_count"),
+ {error, timeout};
+ Else ->
+ Else
after
rexi_monitor:stop(RexiMon)
end.
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, {Counters, Resps}) ->
+handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Shard, {Counters, Resps}) ->
case fabric_ring:node_down(NodeRef, Counters, Resps) of
{ok, Counters1} -> {ok, {Counters1, Resps}};
error -> {error, {nodedown, <<"progress not possible">>}}
end;
-
handle_message({rexi_EXIT, Reason}, Shard, {Counters, Resps}) ->
case fabric_ring:handle_error(Shard, Counters, Resps) of
{ok, Counters1} -> {ok, {Counters1, Resps}};
error -> {error, Reason}
end;
-
handle_message({ok, Count}, Shard, {Counters, Resps}) ->
case fabric_ring:handle_response(Shard, Count, Counters, Resps) of
{ok, {Counters1, Resps1}} ->
@@ -54,7 +52,6 @@ handle_message({ok, Count}, Shard, {Counters, Resps}) ->
Total = fabric_dict:fold(fun(_, C, A) -> A + C end, 0, Resps1),
{stop, Total}
end;
-
handle_message(Reason, Shard, {Counters, Resps}) ->
case fabric_ring:handle_error(Shard, Counters, Resps) of
{ok, Counters1} -> {ok, {Counters1, Resps}};
diff --git a/src/fabric/src/fabric_db_info.erl b/src/fabric/src/fabric_db_info.erl
index 586f282c2..2366420c8 100644
--- a/src/fabric/src/fabric_db_info.erl
+++ b/src/fabric/src/fabric_db_info.erl
@@ -23,11 +23,10 @@ go(DbName) ->
RexiMon = fabric_util:create_monitors(Shards),
Fun = fun handle_message/3,
{ok, ClusterInfo} = get_cluster_info(Shards),
- CInfo = [{cluster, ClusterInfo}],
+ CInfo = [{cluster, ClusterInfo}],
Acc0 = {fabric_dict:init(Workers, nil), [], CInfo},
try
case fabric_util:recv(Workers, #shard.ref, Fun, Acc0) of
-
{ok, Acc} ->
{ok, Acc};
{timeout, {WorkersDict, _, _}} ->
@@ -47,19 +46,16 @@ go(DbName) ->
rexi_monitor:stop(RexiMon)
end.
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _, {Counters, Resps, CInfo}) ->
+handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, {Counters, Resps, CInfo}) ->
case fabric_ring:node_down(NodeRef, Counters, Resps) of
{ok, Counters1} -> {ok, {Counters1, Resps, CInfo}};
error -> {error, {nodedown, <<"progress not possible">>}}
end;
-
handle_message({rexi_EXIT, Reason}, Shard, {Counters, Resps, CInfo}) ->
case fabric_ring:handle_error(Shard, Counters, Resps) of
{ok, Counters1} -> {ok, {Counters1, Resps, CInfo}};
error -> {error, Reason}
end;
-
handle_message({ok, Info}, Shard, {Counters, Resps, CInfo}) ->
case fabric_ring:handle_response(Shard, Info, Counters, Resps) of
{ok, {Counters1, Resps1}} ->
@@ -67,84 +63,102 @@ handle_message({ok, Info}, Shard, {Counters, Resps, CInfo}) ->
{stop, Resps1} ->
{stop, build_final_response(CInfo, Shard#shard.dbname, Resps1)}
end;
-
handle_message(Reason, Shard, {Counters, Resps, CInfo}) ->
case fabric_ring:handle_error(Shard, Counters, Resps) of
{ok, Counters1} -> {ok, {Counters1, Resps, CInfo}};
error -> {error, Reason}
end.
-
build_final_response(CInfo, DbName, Responses) ->
- AccF = fabric_dict:fold(fun(Shard, Info, {Seqs, PSeqs, Infos}) ->
- Seq = build_seq(Shard, Info),
- PSeq = couch_util:get_value(purge_seq, Info),
- {[{Shard, Seq} | Seqs], [{Shard, PSeq} | PSeqs], [Info | Infos]}
- end, {[], [], []}, Responses),
+ AccF = fabric_dict:fold(
+ fun(Shard, Info, {Seqs, PSeqs, Infos}) ->
+ Seq = build_seq(Shard, Info),
+ PSeq = couch_util:get_value(purge_seq, Info),
+ {[{Shard, Seq} | Seqs], [{Shard, PSeq} | PSeqs], [Info | Infos]}
+ end,
+ {[], [], []},
+ Responses
+ ),
{Seqs, PSeqs, Infos} = AccF,
- PackedSeq = fabric_view_changes:pack_seqs(Seqs),
+ PackedSeq = fabric_view_changes:pack_seqs(Seqs),
PackedPSeq = fabric_view_changes:pack_seqs(PSeqs),
MergedInfos = merge_results(lists:flatten([CInfo | Infos])),
Sequences = [{purge_seq, PackedPSeq}, {update_seq, PackedSeq}],
[{db_name, DbName}] ++ Sequences ++ MergedInfos.
-
build_seq(#shard{node = Node}, Info) when is_list(Info) ->
Seq = couch_util:get_value(update_seq, Info),
Uuid = couch_util:get_value(uuid, Info),
PrefixLen = fabric_util:get_uuid_prefix_len(),
{Seq, binary:part(Uuid, {0, PrefixLen}), Node}.
-
merge_results(Info) ->
- Dict = lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end,
- orddict:new(), Info),
- orddict:fold(fun
- (doc_count, X, Acc) ->
- [{doc_count, lists:sum(X)} | Acc];
- (doc_del_count, X, Acc) ->
- [{doc_del_count, lists:sum(X)} | Acc];
- (compact_running, X, Acc) ->
- [{compact_running, lists:member(true, X)} | Acc];
- (sizes, X, Acc) ->
- [{sizes, {merge_object(X)}} | Acc];
- (disk_format_version, X, Acc) ->
- [{disk_format_version, lists:max(X)} | Acc];
- (cluster, [X], Acc) ->
- [{cluster, {X}} | Acc];
- (props, Xs, Acc) ->
- [{props, {merge_object(Xs)}} | Acc];
- (_K, _V, Acc) ->
- Acc
- end, [{instance_start_time, <<"0">>}], Dict).
+ Dict = lists:foldl(
+ fun({K, V}, D0) -> orddict:append(K, V, D0) end,
+ orddict:new(),
+ Info
+ ),
+ orddict:fold(
+ fun
+ (doc_count, X, Acc) ->
+ [{doc_count, lists:sum(X)} | Acc];
+ (doc_del_count, X, Acc) ->
+ [{doc_del_count, lists:sum(X)} | Acc];
+ (compact_running, X, Acc) ->
+ [{compact_running, lists:member(true, X)} | Acc];
+ (sizes, X, Acc) ->
+ [{sizes, {merge_object(X)}} | Acc];
+ (disk_format_version, X, Acc) ->
+ [{disk_format_version, lists:max(X)} | Acc];
+ (cluster, [X], Acc) ->
+ [{cluster, {X}} | Acc];
+ (props, Xs, Acc) ->
+ [{props, {merge_object(Xs)}} | Acc];
+ (_K, _V, Acc) ->
+ Acc
+ end,
+ [{instance_start_time, <<"0">>}],
+ Dict
+ ).
merge_object(Objects) ->
- Dict = lists:foldl(fun({Props}, D) ->
- lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end, D, Props)
- end, orddict:new(), Objects),
- orddict:fold(fun
- (Key, [X | _] = Xs, Acc) when is_integer(X) ->
- [{Key, lists:sum(Xs)} | Acc];
- (Key, [X | _] = Xs, Acc) when is_boolean(X) ->
- [{Key, lists:all(fun all_true/1, Xs)} | Acc];
- (_Key, _Xs, Acc) ->
- Acc
- end, [], Dict).
+ Dict = lists:foldl(
+ fun({Props}, D) ->
+ lists:foldl(fun({K, V}, D0) -> orddict:append(K, V, D0) end, D, Props)
+ end,
+ orddict:new(),
+ Objects
+ ),
+ orddict:fold(
+ fun
+ (Key, [X | _] = Xs, Acc) when is_integer(X) ->
+ [{Key, lists:sum(Xs)} | Acc];
+ (Key, [X | _] = Xs, Acc) when is_boolean(X) ->
+ [{Key, lists:all(fun all_true/1, Xs)} | Acc];
+ (_Key, _Xs, Acc) ->
+ Acc
+ end,
+ [],
+ Dict
+ ).
all_true(true) -> true;
all_true(_) -> false.
get_cluster_info(Shards) ->
- Dict = lists:foldl(fun(#shard{range = R}, Acc) ->
- dict:update_counter(R, 1, Acc)
- end, dict:new(), Shards),
+ Dict = lists:foldl(
+ fun(#shard{range = R}, Acc) ->
+ dict:update_counter(R, 1, Acc)
+ end,
+ dict:new(),
+ Shards
+ ),
Q = dict:size(Dict),
N = dict:fold(fun(_, X, Acc) -> max(X, Acc) end, 0, Dict),
%% defaults as per mem3:quorum/1
WR = N div 2 + 1,
{ok, [{q, Q}, {n, N}, {w, WR}, {r, WR}]}.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -155,7 +169,6 @@ get_cluster_info_test_() ->
fun get_cluster_info_test_generator/1
}.
-
setup() ->
Quorums = [1, 2, 3],
Shards = [1, 3, 5, 8, 12, 24],
@@ -164,8 +177,7 @@ setup() ->
get_cluster_info_test_generator([]) ->
[];
get_cluster_info_test_generator([{N, Q} | Rest]) ->
- {generator,
- fun() ->
+ {generator, fun() ->
Nodes = lists:seq(1, 8),
Shards = mem3_util:create_partition_map(<<"foo">>, N, Q, Nodes),
{ok, Info} = get_cluster_info(Shards),
diff --git a/src/fabric/src/fabric_db_meta.erl b/src/fabric/src/fabric_db_meta.erl
index 348b06d51..1013b958d 100644
--- a/src/fabric/src/fabric_db_meta.erl
+++ b/src/fabric/src/fabric_db_meta.erl
@@ -12,8 +12,12 @@
-module(fabric_db_meta).
--export([set_revs_limit/3, set_security/3, get_all_security/2,
- set_purge_infos_limit/3]).
+-export([
+ set_revs_limit/3,
+ set_security/3,
+ get_all_security/2,
+ set_purge_infos_limit/3
+]).
-include_lib("fabric/include/fabric.hrl").
-include_lib("mem3/include/mem3.hrl").
@@ -25,20 +29,19 @@
num_workers
}).
-
set_revs_limit(DbName, Limit, Options) ->
Shards = mem3:shards(DbName),
Workers = fabric_util:submit_jobs(Shards, set_revs_limit, [Limit, Options]),
Handler = fun handle_revs_message/3,
Acc0 = {Workers, length(Workers) - 1},
case fabric_util:recv(Workers, #shard.ref, Handler, Acc0) of
- {ok, ok} ->
- ok;
- {timeout, {DefunctWorkers, _}} ->
- fabric_util:log_timeout(DefunctWorkers, "set_revs_limit"),
- {error, timeout};
- Error ->
- Error
+ {ok, ok} ->
+ ok;
+ {timeout, {DefunctWorkers, _}} ->
+ fabric_util:log_timeout(DefunctWorkers, "set_revs_limit"),
+ {error, timeout};
+ Error ->
+ Error
end.
handle_revs_message(ok, _, {_Workers, 0}) ->
@@ -48,7 +51,6 @@ handle_revs_message(ok, Worker, {Workers, Waiting}) ->
handle_revs_message(Error, _, _Acc) ->
{error, Error}.
-
set_purge_infos_limit(DbName, Limit, Options) ->
Shards = mem3:shards(DbName),
Workers = fabric_util:submit_jobs(Shards, set_purge_infos_limit, [Limit, Options]),
@@ -71,35 +73,34 @@ handle_purge_message(ok, Worker, {Workers, Waiting}) ->
handle_purge_message(Error, _, _Acc) ->
{error, Error}.
-
set_security(DbName, SecObj, Options) ->
Shards = mem3:shards(DbName),
RexiMon = fabric_util:create_monitors(Shards),
Workers = fabric_util:submit_jobs(Shards, set_security, [SecObj, Options]),
Handler = fun handle_set_message/3,
Acc = #acc{
- workers=Workers,
- finished=[],
- num_workers=length(Workers)
+ workers = Workers,
+ finished = [],
+ num_workers = length(Workers)
},
try fabric_util:recv(Workers, #shard.ref, Handler, Acc) of
- {ok, #acc{finished=Finished}} ->
- case check_sec_set(length(Workers), Finished) of
- ok -> ok;
- Error -> Error
- end;
- {timeout, #acc{workers=DefunctWorkers}} ->
- fabric_util:log_timeout(DefunctWorkers, "set_security"),
- {error, timeout};
- Error ->
- Error
+ {ok, #acc{finished = Finished}} ->
+ case check_sec_set(length(Workers), Finished) of
+ ok -> ok;
+ Error -> Error
+ end;
+ {timeout, #acc{workers = DefunctWorkers}} ->
+ fabric_util:log_timeout(DefunctWorkers, "set_security"),
+ {error, timeout};
+ Error ->
+ Error
after
rexi_monitor:stop(RexiMon)
end.
-handle_set_message({rexi_DOWN, _, {_, Node}, _}, _, #acc{workers=Wrkrs}=Acc) ->
+handle_set_message({rexi_DOWN, _, {_, Node}, _}, _, #acc{workers = Wrkrs} = Acc) ->
RemWorkers = lists:filter(fun(S) -> S#shard.node =/= Node end, Wrkrs),
- maybe_finish_set(Acc#acc{workers=RemWorkers});
+ maybe_finish_set(Acc#acc{workers = RemWorkers});
handle_set_message(ok, W, Acc) ->
NewAcc = Acc#acc{
workers = (Acc#acc.workers -- [W]),
@@ -115,9 +116,9 @@ handle_set_message(Error, W, Acc) ->
NewAcc = Acc#acc{workers = (Acc#acc.workers -- [W])},
maybe_finish_set(NewAcc).
-maybe_finish_set(#acc{workers=[]}=Acc) ->
+maybe_finish_set(#acc{workers = []} = Acc) ->
{stop, Acc};
-maybe_finish_set(#acc{finished=Finished, num_workers=NumWorkers}=Acc) ->
+maybe_finish_set(#acc{finished = Finished, num_workers = NumWorkers} = Acc) ->
case check_sec_set(NumWorkers, Finished) of
ok -> {stop, Acc};
_ -> {ok, Acc}
@@ -126,8 +127,9 @@ maybe_finish_set(#acc{finished=Finished, num_workers=NumWorkers}=Acc) ->
check_sec_set(NumWorkers, SetWorkers) ->
try
check_sec_set_int(NumWorkers, SetWorkers)
- catch throw:Reason ->
- {error, Reason}
+ catch
+ throw:Reason ->
+ {error, Reason}
end.
check_sec_set_int(NumWorkers, SetWorkers) ->
@@ -143,41 +145,41 @@ check_sec_set_int(NumWorkers, SetWorkers) ->
end,
ok.
-
get_all_security(DbName, Options) ->
- Shards = case proplists:get_value(shards, Options) of
- Shards0 when is_list(Shards0) -> Shards0;
- _ -> mem3:shards(DbName)
- end,
+ Shards =
+ case proplists:get_value(shards, Options) of
+ Shards0 when is_list(Shards0) -> Shards0;
+ _ -> mem3:shards(DbName)
+ end,
RexiMon = fabric_util:create_monitors(Shards),
Workers = fabric_util:submit_jobs(Shards, get_all_security, [[?ADMIN_CTX]]),
Handler = fun handle_get_message/3,
Acc = #acc{
- workers=Workers,
- finished=[],
- num_workers=length(Workers)
+ workers = Workers,
+ finished = [],
+ num_workers = length(Workers)
},
try fabric_util:recv(Workers, #shard.ref, Handler, Acc) of
- {ok, #acc{finished=SecObjs}} when length(SecObjs) > length(Workers) / 2 ->
- {ok, SecObjs};
- {ok, _} ->
- {error, no_majority};
- {timeout, #acc{workers=DefunctWorkers}} ->
- fabric_util:log_timeout(
- DefunctWorkers,
- "get_all_security"
- ),
- {error, timeout};
- Error ->
- Error
+ {ok, #acc{finished = SecObjs}} when length(SecObjs) > length(Workers) / 2 ->
+ {ok, SecObjs};
+ {ok, _} ->
+ {error, no_majority};
+ {timeout, #acc{workers = DefunctWorkers}} ->
+ fabric_util:log_timeout(
+ DefunctWorkers,
+ "get_all_security"
+ ),
+ {error, timeout};
+ Error ->
+ Error
after
rexi_monitor:stop(RexiMon)
end.
-handle_get_message({rexi_DOWN, _, {_, Node}, _}, _, #acc{workers=Wrkrs}=Acc) ->
+handle_get_message({rexi_DOWN, _, {_, Node}, _}, _, #acc{workers = Wrkrs} = Acc) ->
RemWorkers = lists:filter(fun(S) -> S#shard.node =/= Node end, Wrkrs),
- maybe_finish_get(Acc#acc{workers=RemWorkers});
-handle_get_message({Props}=SecObj, W, Acc) when is_list(Props) ->
+ maybe_finish_get(Acc#acc{workers = RemWorkers});
+handle_get_message({Props} = SecObj, W, Acc) when is_list(Props) ->
NewAcc = Acc#acc{
workers = (Acc#acc.workers -- [W]),
finished = [{W, SecObj} | Acc#acc.finished]
@@ -192,7 +194,7 @@ handle_get_message(Error, W, Acc) ->
NewAcc = Acc#acc{workers = (Acc#acc.workers -- [W])},
maybe_finish_get(NewAcc).
-maybe_finish_get(#acc{workers=[]}=Acc) ->
+maybe_finish_get(#acc{workers = []} = Acc) ->
{stop, Acc};
maybe_finish_get(Acc) ->
{ok, Acc}.
diff --git a/src/fabric/src/fabric_db_partition_info.erl b/src/fabric/src/fabric_db_partition_info.erl
index 954c52db2..efc895534 100644
--- a/src/fabric/src/fabric_db_partition_info.erl
+++ b/src/fabric/src/fabric_db_partition_info.erl
@@ -17,14 +17,12 @@
-include_lib("fabric/include/fabric.hrl").
-include_lib("mem3/include/mem3.hrl").
-
-record(acc, {
counters,
replies,
ring_opts
}).
-
go(DbName, Partition) ->
Shards = mem3:shards(DbName, couch_partition:shard_key(Partition)),
Workers = fabric_util:submit_jobs(Shards, get_partition_info, [Partition]),
@@ -33,11 +31,12 @@ go(DbName, Partition) ->
Acc0 = #acc{
counters = fabric_dict:init(Workers, nil),
replies = [],
- ring_opts = [{any, Shards}]
+ ring_opts = [{any, Shards}]
},
try
case fabric_util:recv(Workers, #shard.ref, Fun, Acc0) of
- {ok, Res} -> {ok, Res};
+ {ok, Res} ->
+ {ok, Res};
{timeout, {WorkersDict, _}} ->
DefunctWorkers = fabric_util:remove_done_workers(
WorkersDict,
@@ -48,32 +47,31 @@ go(DbName, Partition) ->
"get_partition_info"
),
{error, timeout};
- {error, Error} -> throw(Error)
+ {error, Error} ->
+ throw(Error)
end
after
rexi_monitor:stop(RexiMon)
end.
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, #acc{} = Acc) ->
+handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Shard, #acc{} = Acc) ->
#acc{counters = Counters, ring_opts = RingOpts} = Acc,
case fabric_util:remove_down_workers(Counters, NodeRef, RingOpts) of
- {ok, NewCounters} ->
- {ok, Acc#acc{counters = NewCounters}};
- error ->
- {error, {nodedown, <<"progress not possible">>}}
+ {ok, NewCounters} ->
+ {ok, Acc#acc{counters = NewCounters}};
+ error ->
+ {error, {nodedown, <<"progress not possible">>}}
end;
-
handle_message({rexi_EXIT, Reason}, Shard, #acc{} = Acc) ->
#acc{counters = Counters, ring_opts = RingOpts} = Acc,
NewCounters = fabric_dict:erase(Shard, Counters),
case fabric_ring:is_progress_possible(NewCounters, RingOpts) of
- true ->
- {ok, Acc#acc{counters = NewCounters}};
- false ->
- {error, Reason}
+ true ->
+ {ok, Acc#acc{counters = NewCounters}};
+ false ->
+ {error, Reason}
end;
-
-handle_message({ok, Info}, #shard{dbname=Name} = Shard, #acc{} = Acc) ->
+handle_message({ok, Info}, #shard{dbname = Name} = Shard, #acc{} = Acc) ->
#acc{counters = Counters, replies = Replies} = Acc,
Replies1 = [Info | Replies],
Counters1 = fabric_dict:erase(Shard, Counters),
@@ -85,11 +83,9 @@ handle_message({ok, Info}, #shard{dbname=Name} = Shard, #acc{} = Acc) ->
false ->
{ok, Acc#acc{counters = Counters1, replies = Replies1}}
end;
-
handle_message(_, _, #acc{} = Acc) ->
{ok, Acc}.
-
get_max_partition_size(Max, []) ->
Max;
get_max_partition_size(MaxInfo, [NextInfo | Rest]) ->
@@ -105,21 +101,18 @@ get_max_partition_size(MaxInfo, [NextInfo | Rest]) ->
get_max_partition_size(MaxInfo, Rest)
end.
-
% for JS to work nicely we need to convert the size list
% to a jiffy object
format_partition(PartitionInfo) ->
{value, {sizes, Size}, PartitionInfo1} = lists:keytake(sizes, 1, PartitionInfo),
[{sizes, {Size}} | PartitionInfo1].
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
node_down_test() ->
- [S1, S2] = [mk_shard("n1", [0, 4]), mk_shard("n2", [0, 8])],
+ [S1, S2] = [mk_shard("n1", [0, 4]), mk_shard("n2", [0, 8])],
Acc1 = #acc{
counters = fabric_dict:init([S1, S2], nil),
ring_opts = [{any, [S1, S2]}]
@@ -130,12 +123,13 @@ node_down_test() ->
?assertEqual([{S2, nil}], Acc2#acc.counters),
N2 = S2#shard.node,
- ?assertEqual({error, {nodedown, <<"progress not possible">>}},
- handle_message({rexi_DOWN, nil, {nil, N2}, nil}, nil, Acc2)).
-
+ ?assertEqual(
+ {error, {nodedown, <<"progress not possible">>}},
+ handle_message({rexi_DOWN, nil, {nil, N2}, nil}, nil, Acc2)
+ ).
worker_exit_test() ->
- [S1, S2] = [mk_shard("n1", [0, 4]), mk_shard("n2", [0, 8])],
+ [S1, S2] = [mk_shard("n1", [0, 4]), mk_shard("n2", [0, 8])],
Acc1 = #acc{
counters = fabric_dict:init([S1, S2], nil),
ring_opts = [{any, [S1, S2]}]
@@ -146,7 +140,6 @@ worker_exit_test() ->
?assertEqual({error, bam}, handle_message({rexi_EXIT, bam}, S2, Acc2)).
-
mk_shard(Name, Range) ->
Node = list_to_atom(Name),
BName = list_to_binary(Name),
diff --git a/src/fabric/src/fabric_db_update_listener.erl b/src/fabric/src/fabric_db_update_listener.erl
index fb2937be1..78ccf5a4d 100644
--- a/src/fabric/src/fabric_db_update_listener.erl
+++ b/src/fabric/src/fabric_db_update_listener.erl
@@ -45,18 +45,19 @@ go(Parent, ParentRef, DbName, Timeout) ->
%% This is not a common pattern for rexi but to enable the calling
%% process to communicate via handle_message/3 we "fake" it as a
%% a spawned worker.
- Workers = [#worker{ref=ParentRef, pid=Parent} | Notifiers],
+ Workers = [#worker{ref = ParentRef, pid = Parent} | Notifiers],
Acc = #acc{
parent = Parent,
state = unset,
shards = Shards
},
- Resp = try
- receive_results(Workers, Acc, Timeout)
- after
- rexi_monitor:stop(RexiMon),
- stop_cleanup_monitor(MonPid)
- end,
+ Resp =
+ try
+ receive_results(Workers, Acc, Timeout)
+ after
+ rexi_monitor:stop(RexiMon),
+ stop_cleanup_monitor(MonPid)
+ end,
case Resp of
{ok, _} -> ok;
{error, Error} -> erlang:error(Error);
@@ -64,13 +65,20 @@ go(Parent, ParentRef, DbName, Timeout) ->
end.
start_update_notifiers(Shards) ->
- EndPointDict = lists:foldl(fun(#shard{node=Node, name=Name}, Acc) ->
- dict:append(Node, Name, Acc)
- end, dict:new(), Shards),
- lists:map(fun({Node, DbNames}) ->
- Ref = rexi:cast(Node, {?MODULE, start_update_notifier, [DbNames]}),
- #worker{ref=Ref, node=Node}
- end, dict:to_list(EndPointDict)).
+ EndPointDict = lists:foldl(
+ fun(#shard{node = Node, name = Name}, Acc) ->
+ dict:append(Node, Name, Acc)
+ end,
+ dict:new(),
+ Shards
+ ),
+ lists:map(
+ fun({Node, DbNames}) ->
+ Ref = rexi:cast(Node, {?MODULE, start_update_notifier, [DbNames]}),
+ #worker{ref = Ref, node = Node}
+ end,
+ dict:to_list(EndPointDict)
+ ).
% rexi endpoint
start_update_notifier(DbNames) ->
@@ -132,41 +140,39 @@ wait_db_updated({Pid, Ref}) ->
receive_results(Workers, Acc0, Timeout) ->
Fun = fun handle_message/3,
case rexi_utils:recv(Workers, #worker.ref, Fun, Acc0, infinity, Timeout) of
- {timeout, #acc{state=updated}=Acc} ->
- receive_results(Workers, Acc, Timeout);
- {timeout, #acc{state=waiting}=Acc} ->
- erlang:send(Acc#acc.parent, {state, self(), timeout}),
- receive_results(Workers, Acc#acc{state=unset}, Timeout);
- {timeout, Acc} ->
- receive_results(Workers, Acc#acc{state=timeout}, Timeout);
- {_, Acc} ->
- {ok, Acc}
+ {timeout, #acc{state = updated} = Acc} ->
+ receive_results(Workers, Acc, Timeout);
+ {timeout, #acc{state = waiting} = Acc} ->
+ erlang:send(Acc#acc.parent, {state, self(), timeout}),
+ receive_results(Workers, Acc#acc{state = unset}, Timeout);
+ {timeout, Acc} ->
+ receive_results(Workers, Acc#acc{state = timeout}, Timeout);
+ {_, Acc} ->
+ {ok, Acc}
end.
-
handle_message({rexi_DOWN, _, {_, Node}, _}, _Worker, Acc) ->
handle_error(Node, {nodedown, Node}, Acc);
handle_message({rexi_EXIT, _Reason}, Worker, Acc) ->
handle_error(Worker#worker.node, {worker_exit, Worker}, Acc);
handle_message({gen_event_EXIT, Node, Reason}, _Worker, Acc) ->
handle_error(Node, {gen_event_EXIT, Node, Reason}, Acc);
-handle_message(db_updated, _Worker, #acc{state=waiting}=Acc) ->
+handle_message(db_updated, _Worker, #acc{state = waiting} = Acc) ->
% propagate message to calling controller
erlang:send(Acc#acc.parent, {state, self(), updated}),
- {ok, Acc#acc{state=unset}};
+ {ok, Acc#acc{state = unset}};
handle_message(db_updated, _Worker, Acc) ->
- {ok, Acc#acc{state=updated}};
+ {ok, Acc#acc{state = updated}};
handle_message(db_deleted, _Worker, _Acc) ->
{stop, ok};
-handle_message(get_state, _Worker, #acc{state=unset}=Acc) ->
- {ok, Acc#acc{state=waiting}};
+handle_message(get_state, _Worker, #acc{state = unset} = Acc) ->
+ {ok, Acc#acc{state = waiting}};
handle_message(get_state, _Worker, Acc) ->
erlang:send(Acc#acc.parent, {state, self(), Acc#acc.state}),
- {ok, Acc#acc{state=unset}};
+ {ok, Acc#acc{state = unset}};
handle_message(done, _, _) ->
{stop, ok}.
-
handle_error(Node, Reason, #acc{shards = Shards} = Acc) ->
Rest = lists:filter(fun(#shard{node = N}) -> N /= Node end, Shards),
case fabric_ring:is_progress_possible([{R, nil} || R <- Rest]) of
diff --git a/src/fabric/src/fabric_db_uuids.erl b/src/fabric/src/fabric_db_uuids.erl
index a440d74c2..12931a3d1 100644
--- a/src/fabric/src/fabric_db_uuids.erl
+++ b/src/fabric/src/fabric_db_uuids.erl
@@ -12,14 +12,11 @@
-module(fabric_db_uuids).
-
-export([go/1]).
-
-include_lib("fabric/include/fabric.hrl").
-include_lib("mem3/include/mem3.hrl").
-
go(DbName) when is_binary(DbName) ->
Shards = mem3:live_shards(DbName, [node() | nodes()]),
Workers = fabric_util:submit_jobs(Shards, get_uuid, []),
@@ -36,30 +33,30 @@ go(DbName) when is_binary(DbName) ->
rexi_monitor:stop(RexiMon)
end.
-
-handle_message({rexi_DOWN, _, {_, NodeRef},_}, _Shard, {Cntrs, Res}) ->
+handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Shard, {Cntrs, Res}) ->
case fabric_ring:node_down(NodeRef, Cntrs, Res, [all]) of
{ok, Cntrs1} -> {ok, {Cntrs1, Res}};
error -> {error, {nodedown, <<"progress not possible">>}}
end;
-
handle_message({rexi_EXIT, Reason}, Shard, {Cntrs, Res}) ->
case fabric_ring:handle_error(Shard, Cntrs, Res, [all]) of
{ok, Cntrs1} -> {ok, {Cntrs1, Res}};
error -> {error, Reason}
end;
-
handle_message(Uuid, Shard, {Cntrs, Res}) when is_binary(Uuid) ->
case fabric_ring:handle_response(Shard, Uuid, Cntrs, Res, [all]) of
{ok, {Cntrs1, Res1}} ->
{ok, {Cntrs1, Res1}};
{stop, Res1} ->
- Uuids = fabric_dict:fold(fun(#shard{} = S, Id, #{} = Acc) ->
- Acc#{Id => S#shard{ref = undefined}}
- end, #{}, Res1),
+ Uuids = fabric_dict:fold(
+ fun(#shard{} = S, Id, #{} = Acc) ->
+ Acc#{Id => S#shard{ref = undefined}}
+ end,
+ #{},
+ Res1
+ ),
{stop, Uuids}
end;
-
handle_message(Reason, Shard, {Cntrs, Res}) ->
case fabric_ring:handle_error(Shard, Cntrs, Res, [all]) of
{ok, Cntrs1} -> {ok, {Cntrs1, Res}};
diff --git a/src/fabric/src/fabric_design_doc_count.erl b/src/fabric/src/fabric_design_doc_count.erl
index b0efc3007..f6f866a24 100644
--- a/src/fabric/src/fabric_design_doc_count.erl
+++ b/src/fabric/src/fabric_design_doc_count.erl
@@ -34,18 +34,16 @@ go(DbName) ->
rexi_monitor:stop(RexiMon)
end.
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, {Counters, Resps}) ->
+handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Shard, {Counters, Resps}) ->
case fabric_ring:node_down(NodeRef, Counters, Resps) of
{ok, Counters1} -> {ok, {Counters1, Resps}};
error -> {error, {nodedown, <<"progress not possible">>}}
end;
-
handle_message({rexi_EXIT, Reason}, Shard, {Counters, Resps}) ->
case fabric_ring:handle_error(Shard, Counters, Resps) of
{ok, Counters1} -> {ok, {Counters1, Resps}};
error -> {error, Reason}
end;
-
handle_message({ok, Count}, Shard, {Counters, Resps}) ->
case fabric_ring:handle_response(Shard, Count, Counters, Resps) of
{ok, {Counters1, Resps1}} ->
@@ -54,7 +52,6 @@ handle_message({ok, Count}, Shard, {Counters, Resps}) ->
Total = fabric_dict:fold(fun(_, C, A) -> A + C end, 0, Resps1),
{stop, Total}
end;
-
handle_message(Reason, Shard, {Counters, Resps}) ->
case fabric_ring:handle_error(Shard, Counters, Resps) of
{ok, Counters1} -> {ok, {Counters1, Resps}};
diff --git a/src/fabric/src/fabric_dict.erl b/src/fabric/src/fabric_dict.erl
index b63ed2095..8395221b4 100644
--- a/src/fabric/src/fabric_dict.erl
+++ b/src/fabric/src/fabric_dict.erl
@@ -27,7 +27,7 @@ fetch_keys(Dict) ->
orddict:fetch_keys(Dict).
decrement_all(Dict) ->
- [{K,V-1} || {K,V} <- Dict].
+ [{K, V - 1} || {K, V} <- Dict].
store(Key, Value, Dict) ->
orddict:store(Key, Value, Dict).
@@ -38,7 +38,6 @@ erase(Key, Dict) ->
update_counter(Key, Incr, Dict0) ->
orddict:update_counter(Key, Incr, Dict0).
-
lookup_element(Key, Dict) ->
couch_util:get_value(Key, Dict).
diff --git a/src/fabric/src/fabric_doc_atts.erl b/src/fabric/src/fabric_doc_atts.erl
index bd0687b30..80a36ee51 100644
--- a/src/fabric/src/fabric_doc_atts.erl
+++ b/src/fabric/src/fabric_doc_atts.erl
@@ -22,7 +22,6 @@
receiver_callback/2
]).
-
receiver(_Req, _DbName, undefined) ->
<<"">>;
receiver(_Req, _DbName, {unknown_transfer_encoding, Unknown}) ->
@@ -39,7 +38,6 @@ receiver(Req, DbName, Length) when is_integer(Length) ->
receiver(_Req, _DbName, Length) ->
exit({length_not_integer, Length}).
-
receiver_callback(Middleman, chunked) ->
fun(4096, ChunkFun, State) ->
write_chunks(Middleman, ChunkFun, State)
@@ -57,36 +55,35 @@ receiver_callback(Middleman, Length) when is_integer(Length) ->
end
end.
-
%%
%% internal
%%
maybe_send_continue(#httpd{mochi_req = MochiReq} = Req) ->
case couch_httpd:header_value(Req, "expect") of
- undefined ->
- ok;
- Expect ->
- case string:to_lower(Expect) of
- "100-continue" ->
- MochiReq:start_raw_response({100, gb_trees:empty()});
- _ ->
- ok
- end
+ undefined ->
+ ok;
+ Expect ->
+ case string:to_lower(Expect) of
+ "100-continue" ->
+ MochiReq:start_raw_response({100, gb_trees:empty()});
+ _ ->
+ ok
+ end
end.
write_chunks(MiddleMan, ChunkFun, State) ->
MiddleMan ! {self(), gimme_data},
Timeout = fabric_util:attachments_timeout(),
receive
- {MiddleMan, ChunkRecordList} ->
- rexi:reply(attachment_chunk_received),
- case flush_chunks(ChunkRecordList, ChunkFun, State) of
- {continue, NewState} ->
- write_chunks(MiddleMan, ChunkFun, NewState);
- {done, NewState} ->
- NewState
- end
+ {MiddleMan, ChunkRecordList} ->
+ rexi:reply(attachment_chunk_received),
+ case flush_chunks(ChunkRecordList, ChunkFun, State) of
+ {continue, NewState} ->
+ write_chunks(MiddleMan, ChunkFun, NewState);
+ {done, NewState} ->
+ NewState
+ end
after Timeout ->
exit(timeout)
end.
@@ -102,24 +99,27 @@ flush_chunks([Chunk | Rest], ChunkFun, State) ->
receive_unchunked_attachment(_Req, 0) ->
ok;
receive_unchunked_attachment(Req, Length) ->
- receive {MiddleMan, go} ->
- Data = couch_httpd:recv(Req, 0),
- MiddleMan ! {self(), Data}
+ receive
+ {MiddleMan, go} ->
+ Data = couch_httpd:recv(Req, 0),
+ MiddleMan ! {self(), Data}
end,
receive_unchunked_attachment(Req, Length - size(Data)).
middleman(Req, DbName, chunked) ->
% spawn a process to actually receive the uploaded data
RcvFun = fun(ChunkRecord, ok) ->
- receive {From, go} -> From ! {self(), ChunkRecord} end, ok
+ receive
+ {From, go} -> From ! {self(), ChunkRecord}
+ end,
+ ok
end,
- Receiver = spawn(fun() -> couch_httpd:recv_chunked(Req,4096,RcvFun,ok) end),
+ Receiver = spawn(fun() -> couch_httpd:recv_chunked(Req, 4096, RcvFun, ok) end),
% take requests from the DB writers and get data from the receiver
N = mem3:n(DbName),
Timeout = fabric_util:attachments_timeout(),
middleman_loop(Receiver, N, [], [], Timeout);
-
middleman(Req, DbName, Length) ->
Receiver = spawn(fun() -> receive_unchunked_attachment(Req, Length) end),
N = mem3:n(DbName),
@@ -127,43 +127,50 @@ middleman(Req, DbName, Length) ->
middleman_loop(Receiver, N, [], [], Timeout).
middleman_loop(Receiver, N, Counters0, ChunkList0, Timeout) ->
- receive {From, gimme_data} ->
- % Figure out how far along this writer (From) is in the list
- ListIndex = case fabric_dict:lookup_element(From, Counters0) of
- undefined -> 0;
- I -> I
- end,
-
- % Talk to the receiver to get another chunk if necessary
- ChunkList1 = if ListIndex == length(ChunkList0) ->
- Receiver ! {self(), go},
- receive
- {Receiver, ChunkRecord} ->
- ChunkList0 ++ [ChunkRecord]
- end;
- true -> ChunkList0 end,
-
- % reply to the writer
- Reply = lists:nthtail(ListIndex, ChunkList1),
- From ! {self(), Reply},
-
- % Update the counter for this writer
- Counters1 = fabric_dict:update_counter(From, length(Reply), Counters0),
-
- % Drop any chunks that have been sent to all writers
- Size = fabric_dict:size(Counters1),
- NumToDrop = lists:min([I || {_, I} <- Counters1]),
-
- {ChunkList3, Counters3} =
- if Size == N andalso NumToDrop > 0 ->
- ChunkList2 = lists:nthtail(NumToDrop, ChunkList1),
- Counters2 = [{F, I-NumToDrop} || {F, I} <- Counters1],
- {ChunkList2, Counters2};
- true ->
- {ChunkList1, Counters1}
- end,
-
- middleman_loop(Receiver, N, Counters3, ChunkList3, Timeout)
+ receive
+ {From, gimme_data} ->
+ % Figure out how far along this writer (From) is in the list
+ ListIndex =
+ case fabric_dict:lookup_element(From, Counters0) of
+ undefined -> 0;
+ I -> I
+ end,
+
+ % Talk to the receiver to get another chunk if necessary
+ ChunkList1 =
+ if
+ ListIndex == length(ChunkList0) ->
+ Receiver ! {self(), go},
+ receive
+ {Receiver, ChunkRecord} ->
+ ChunkList0 ++ [ChunkRecord]
+ end;
+ true ->
+ ChunkList0
+ end,
+
+ % reply to the writer
+ Reply = lists:nthtail(ListIndex, ChunkList1),
+ From ! {self(), Reply},
+
+ % Update the counter for this writer
+ Counters1 = fabric_dict:update_counter(From, length(Reply), Counters0),
+
+ % Drop any chunks that have been sent to all writers
+ Size = fabric_dict:size(Counters1),
+ NumToDrop = lists:min([I || {_, I} <- Counters1]),
+
+ {ChunkList3, Counters3} =
+ if
+ Size == N andalso NumToDrop > 0 ->
+ ChunkList2 = lists:nthtail(NumToDrop, ChunkList1),
+ Counters2 = [{F, I - NumToDrop} || {F, I} <- Counters1],
+ {ChunkList2, Counters2};
+ true ->
+ {ChunkList1, Counters1}
+ end,
+
+ middleman_loop(Receiver, N, Counters3, ChunkList3, Timeout)
after Timeout ->
exit(Receiver, kill),
ok
diff --git a/src/fabric/src/fabric_doc_missing_revs.erl b/src/fabric/src/fabric_doc_missing_revs.erl
index 993c21dc2..ffd408f4e 100644
--- a/src/fabric/src/fabric_doc_missing_revs.erl
+++ b/src/fabric/src/fabric_doc_missing_revs.erl
@@ -23,48 +23,57 @@ go(DbName, AllIdsRevs) ->
go(_, [], _) ->
{ok, []};
go(DbName, AllIdsRevs, Options) ->
- Workers = lists:map(fun({#shard{name=Name, node=Node} = Shard, IdsRevs}) ->
- Ref = rexi:cast(Node, {fabric_rpc, get_missing_revs, [Name, IdsRevs,
- Options]}),
- Shard#shard{ref=Ref}
- end, group_idrevs_by_shard(DbName, AllIdsRevs)),
- ResultDict = dict:from_list([{Id, {{nil,Revs},[]}} || {Id, Revs} <- AllIdsRevs]),
+ Workers = lists:map(
+ fun({#shard{name = Name, node = Node} = Shard, IdsRevs}) ->
+ Ref = rexi:cast(
+ Node,
+ {fabric_rpc, get_missing_revs, [
+ Name,
+ IdsRevs,
+ Options
+ ]}
+ ),
+ Shard#shard{ref = Ref}
+ end,
+ group_idrevs_by_shard(DbName, AllIdsRevs)
+ ),
+ ResultDict = dict:from_list([{Id, {{nil, Revs}, []}} || {Id, Revs} <- AllIdsRevs]),
RexiMon = fabric_util:create_monitors(Workers),
Acc0 = {length(Workers), ResultDict, Workers},
try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {timeout, {_, _, DefunctWorkers}} ->
- fabric_util:log_timeout(
- DefunctWorkers,
- "get_missing_revs"
- ),
- {error, timeout};
- Else ->
- Else
+ {timeout, {_, _, DefunctWorkers}} ->
+ fabric_util:log_timeout(
+ DefunctWorkers,
+ "get_missing_revs"
+ ),
+ {error, timeout};
+ Else ->
+ Else
after
rexi_monitor:stop(RexiMon)
end.
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Shard, {_WorkerLen, ResultDict, Workers}) ->
- NewWorkers = [W || #shard{node=Node} = W <- Workers, Node =/= NodeRef],
+handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Shard, {_WorkerLen, ResultDict, Workers}) ->
+ NewWorkers = [W || #shard{node = Node} = W <- Workers, Node =/= NodeRef],
skip_message({fabric_dict:size(NewWorkers), ResultDict, NewWorkers});
handle_message({rexi_EXIT, _}, Worker, {W, D, Workers}) ->
- skip_message({W-1,D,lists:delete(Worker, Workers)});
+ skip_message({W - 1, D, lists:delete(Worker, Workers)});
handle_message({ok, Results}, _Worker, {1, D0, _}) ->
D = update_dict(D0, Results),
{stop, dict:fold(fun force_reply/3, [], D)};
handle_message({ok, Results}, Worker, {WaitingCount, D0, Workers}) ->
D = update_dict(D0, Results),
case dict:fold(fun maybe_reply/3, {stop, []}, D) of
- continue ->
- % still haven't heard about some Ids
- {ok, {WaitingCount - 1, D, lists:delete(Worker,Workers)}};
- {stop, FinalReply} ->
- % finished, stop the rest of the jobs
- fabric_util:cleanup(lists:delete(Worker,Workers)),
- {stop, FinalReply}
+ continue ->
+ % still haven't heard about some Ids
+ {ok, {WaitingCount - 1, D, lists:delete(Worker, Workers)}};
+ {stop, FinalReply} ->
+ % finished, stop the rest of the jobs
+ fabric_util:cleanup(lists:delete(Worker, Workers)),
+ {stop, FinalReply}
end.
-force_reply(Id, {{nil,Revs}, Anc}, Acc) ->
+force_reply(Id, {{nil, Revs}, Anc}, Acc) ->
% never heard about this ID, assume it's missing
[{Id, Revs, Anc} | Acc];
force_reply(_, {[], _}, Acc) ->
@@ -82,14 +91,24 @@ maybe_reply(Id, {Revs, Anc}, {stop, Acc}) ->
{stop, [{Id, Revs, Anc} | Acc]}.
group_idrevs_by_shard(DbName, IdsRevs) ->
- dict:to_list(lists:foldl(fun({Id, Revs}, D0) ->
- lists:foldl(fun(Shard, D1) ->
- dict:append(Shard, {Id, Revs}, D1)
- end, D0, mem3:shards(DbName,Id))
- end, dict:new(), IdsRevs)).
+ dict:to_list(
+ lists:foldl(
+ fun({Id, Revs}, D0) ->
+ lists:foldl(
+ fun(Shard, D1) ->
+ dict:append(Shard, {Id, Revs}, D1)
+ end,
+ D0,
+ mem3:shards(DbName, Id)
+ )
+ end,
+ dict:new(),
+ IdsRevs
+ )
+ ).
update_dict(D0, KVs) ->
- lists:foldl(fun({K,V,A}, D1) -> dict:store(K, {V,A}, D1) end, D0, KVs).
+ lists:foldl(fun({K, V, A}, D1) -> dict:store(K, {V, A}, D1) end, D0, KVs).
skip_message({0, Dict, _Workers}) ->
{stop, dict:fold(fun force_reply/3, [], Dict)};
diff --git a/src/fabric/src/fabric_doc_open.erl b/src/fabric/src/fabric_doc_open.erl
index 8ef604b60..ba348112c 100644
--- a/src/fabric/src/fabric_doc_open.erl
+++ b/src/fabric/src/fabric_doc_open.erl
@@ -18,7 +18,6 @@
-include_lib("mem3/include/mem3.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-record(acc, {
dbname,
workers,
@@ -29,15 +28,18 @@
q_reply
}).
-
go(DbName, Id, Options) ->
- Handler = case proplists:get_value(doc_info, Options) of
- true -> get_doc_info;
- full -> get_full_doc_info;
- undefined -> open_doc
- end,
- Workers = fabric_util:submit_jobs(mem3:shards(DbName,Id), Handler,
- [Id, [deleted|Options]]),
+ Handler =
+ case proplists:get_value(doc_info, Options) of
+ true -> get_doc_info;
+ full -> get_full_doc_info;
+ undefined -> open_doc
+ end,
+ Workers = fabric_util:submit_jobs(
+ mem3:shards(DbName, Id),
+ Handler,
+ [Id, [deleted | Options]]
+ ),
SuppressDeletedDoc = not lists:member(deleted, Options),
N = mem3:n(DbName),
R = couch_util:get_value(r, Options, integer_to_list(mem3:quorum(DbName))),
@@ -50,64 +52,65 @@ go(DbName, Id, Options) ->
},
RexiMon = fabric_util:create_monitors(Workers),
try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) of
- {ok, #acc{}=Acc} when Handler =:= open_doc ->
- Reply = handle_response(Acc),
- format_reply(Reply, SuppressDeletedDoc);
- {ok, #acc{state = r_not_met}} ->
- {error, quorum_not_met};
- {ok, #acc{q_reply = QuorumReply}} ->
- format_reply(QuorumReply, SuppressDeletedDoc);
- {timeout, #acc{workers=DefunctWorkers}} ->
- fabric_util:log_timeout(DefunctWorkers, atom_to_list(Handler)),
- {error, timeout};
- Error ->
- Error
+ {ok, #acc{} = Acc} when Handler =:= open_doc ->
+ Reply = handle_response(Acc),
+ format_reply(Reply, SuppressDeletedDoc);
+ {ok, #acc{state = r_not_met}} ->
+ {error, quorum_not_met};
+ {ok, #acc{q_reply = QuorumReply}} ->
+ format_reply(QuorumReply, SuppressDeletedDoc);
+ {timeout, #acc{workers = DefunctWorkers}} ->
+ fabric_util:log_timeout(DefunctWorkers, atom_to_list(Handler)),
+ {error, timeout};
+ Error ->
+ Error
after
rexi_monitor:stop(RexiMon)
end.
handle_message({rexi_DOWN, _, {_, Node}, _}, _Worker, Acc) ->
- NewWorkers = [W || #shard{node=N}=W <- Acc#acc.workers, N /= Node],
+ NewWorkers = [W || #shard{node = N} = W <- Acc#acc.workers, N /= Node],
case NewWorkers of
- [] ->
- {stop, Acc#acc{workers=[]}};
- _ ->
- {ok, Acc#acc{workers=NewWorkers}}
+ [] ->
+ {stop, Acc#acc{workers = []}};
+ _ ->
+ {ok, Acc#acc{workers = NewWorkers}}
end;
handle_message({rexi_EXIT, _Reason}, Worker, Acc) ->
NewWorkers = lists:delete(Worker, Acc#acc.workers),
case NewWorkers of
- [] ->
- {stop, Acc#acc{workers=[]}};
- _ ->
- {ok, Acc#acc{workers=NewWorkers}}
+ [] ->
+ {stop, Acc#acc{workers = []}};
+ _ ->
+ {ok, Acc#acc{workers = NewWorkers}}
end;
handle_message(Reply, Worker, Acc) ->
NewReplies = fabric_util:update_counter(Reply, 1, Acc#acc.replies),
- NewNodeRevs = case Reply of
- {ok, #doc{revs = {Pos, [Rev | _]}}} ->
- [{Worker#shard.node, [{Pos, Rev}]} | Acc#acc.node_revs];
- _ ->
- Acc#acc.node_revs
- end,
+ NewNodeRevs =
+ case Reply of
+ {ok, #doc{revs = {Pos, [Rev | _]}}} ->
+ [{Worker#shard.node, [{Pos, Rev}]} | Acc#acc.node_revs];
+ _ ->
+ Acc#acc.node_revs
+ end,
NewAcc = Acc#acc{replies = NewReplies, node_revs = NewNodeRevs},
case is_r_met(Acc#acc.workers, NewReplies, Acc#acc.r) of
- {true, QuorumReply} ->
- fabric_util:cleanup(lists:delete(Worker, Acc#acc.workers)),
- {stop, NewAcc#acc{workers=[], state=r_met, q_reply=QuorumReply}};
- wait_for_more ->
- NewWorkers = lists:delete(Worker, Acc#acc.workers),
- {ok, NewAcc#acc{workers=NewWorkers}};
- no_more_workers ->
- {stop, NewAcc#acc{workers=[]}}
+ {true, QuorumReply} ->
+ fabric_util:cleanup(lists:delete(Worker, Acc#acc.workers)),
+ {stop, NewAcc#acc{workers = [], state = r_met, q_reply = QuorumReply}};
+ wait_for_more ->
+ NewWorkers = lists:delete(Worker, Acc#acc.workers),
+ {ok, NewAcc#acc{workers = NewWorkers}};
+ no_more_workers ->
+ {stop, NewAcc#acc{workers = []}}
end.
-handle_response(#acc{state=r_met, replies=Replies, q_reply=QuorumReply}=Acc) ->
+handle_response(#acc{state = r_met, replies = Replies, q_reply = QuorumReply} = Acc) ->
case {Replies, fabric_util:remove_ancestors(Replies, [])} of
{[_], [_]} ->
% Complete agreement amongst all copies
QuorumReply;
- {[_|_], [{_, {QuorumReply, _}}]} ->
+ {[_ | _], [{_, {QuorumReply, _}}]} ->
% Any divergent replies are ancestors of the QuorumReply,
% repair the document asynchronously
spawn(fun() -> read_repair(Acc) end),
@@ -120,68 +123,70 @@ handle_response(Acc) ->
read_repair(Acc).
is_r_met(Workers, Replies, R) ->
- case lists:dropwhile(fun({_,{_, Count}}) -> Count < R end, Replies) of
- [{_,{QuorumReply, _}} | _] ->
- {true, QuorumReply};
- [] when length(Workers) > 1 ->
- wait_for_more;
- [] ->
- no_more_workers
+ case lists:dropwhile(fun({_, {_, Count}}) -> Count < R end, Replies) of
+ [{_, {QuorumReply, _}} | _] ->
+ {true, QuorumReply};
+ [] when length(Workers) > 1 ->
+ wait_for_more;
+ [] ->
+ no_more_workers
end.
-read_repair(#acc{dbname=DbName, replies=Replies, node_revs=NodeRevs}) ->
- Docs = [Doc || {_, {{ok, #doc{}=Doc}, _}} <- Replies],
+read_repair(#acc{dbname = DbName, replies = Replies, node_revs = NodeRevs}) ->
+ Docs = [Doc || {_, {{ok, #doc{} = Doc}, _}} <- Replies],
case Docs of
- % omit local docs from read repair
- [#doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>} | _] ->
- choose_reply(Docs);
- [#doc{id=Id} | _] ->
- Opts = [?ADMIN_CTX, replicated_changes, {read_repair, NodeRevs}],
- Res = fabric:update_docs(DbName, Docs, Opts),
- case Res of
- {ok, []} ->
- couch_stats:increment_counter([fabric, read_repairs, success]);
- _ ->
- couch_stats:increment_counter([fabric, read_repairs, failure]),
- couch_log:notice("read_repair ~s ~s ~p", [DbName, Id, Res])
- end,
- choose_reply(Docs);
- [] ->
- % Try hard to return some sort of information
- % to the client.
- Values = [V || {_, {V, _}} <- Replies],
- case lists:member({not_found, missing}, Values) of
- true ->
- {not_found, missing};
- false when length(Values) > 0 ->
- % Sort for stability in responses in
- % case we have some weird condition
- hd(lists:sort(Values));
- false ->
- {error, read_failure}
- end
+ % omit local docs from read repair
+ [#doc{id = <<?LOCAL_DOC_PREFIX, _/binary>>} | _] ->
+ choose_reply(Docs);
+ [#doc{id = Id} | _] ->
+ Opts = [?ADMIN_CTX, replicated_changes, {read_repair, NodeRevs}],
+ Res = fabric:update_docs(DbName, Docs, Opts),
+ case Res of
+ {ok, []} ->
+ couch_stats:increment_counter([fabric, read_repairs, success]);
+ _ ->
+ couch_stats:increment_counter([fabric, read_repairs, failure]),
+ couch_log:notice("read_repair ~s ~s ~p", [DbName, Id, Res])
+ end,
+ choose_reply(Docs);
+ [] ->
+ % Try hard to return some sort of information
+ % to the client.
+ Values = [V || {_, {V, _}} <- Replies],
+ case lists:member({not_found, missing}, Values) of
+ true ->
+ {not_found, missing};
+ false when length(Values) > 0 ->
+ % Sort for stability in responses in
+ % case we have some weird condition
+ hd(lists:sort(Values));
+ false ->
+ {error, read_failure}
+ end
end.
choose_reply(Docs) ->
% Sort descending by {not deleted, rev}. This should match
% the logic of couch_doc:to_doc_info/1.
- [Winner | _] = lists:sort(fun(DocA, DocB) ->
- InfoA = {not DocA#doc.deleted, DocA#doc.revs},
- InfoB = {not DocB#doc.deleted, DocB#doc.revs},
- InfoA > InfoB
- end, Docs),
+ [Winner | _] = lists:sort(
+ fun(DocA, DocB) ->
+ InfoA = {not DocA#doc.deleted, DocA#doc.revs},
+ InfoB = {not DocB#doc.deleted, DocB#doc.revs},
+ InfoA > InfoB
+ end,
+ Docs
+ ),
{ok, Winner}.
-format_reply({ok, #full_doc_info{deleted=true}}, true) ->
+format_reply({ok, #full_doc_info{deleted = true}}, true) ->
{not_found, deleted};
-format_reply({ok, #doc{deleted=true}}, true) ->
+format_reply({ok, #doc{deleted = true}}, true) ->
{not_found, deleted};
format_reply(not_found, _) ->
{not_found, missing};
format_reply(Else, _) ->
Else.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -195,23 +200,18 @@ format_reply(Else, _) ->
rexi_monitor
]).
-
setup_all() ->
meck:new(?MECK_MODS, [passthrough]).
-
teardown_all(_) ->
meck:unload().
-
setup() ->
meck:reset(?MECK_MODS).
-
teardown(_) ->
ok.
-
open_doc_test_() ->
{
setup,
@@ -234,7 +234,6 @@ open_doc_test_() ->
}
}.
-
t_is_r_met() ->
?_test(begin
Workers0 = [],
@@ -249,18 +248,24 @@ t_is_r_met() ->
{{true, bar}, [fabric_util:kv(bar, 1), fabric_util:kv(bar, 2)], 2},
{{true, bar}, [fabric_util:kv(bar, 2), fabric_util:kv(foo, 1)], 2}
],
- lists:foreach(fun({Expect, Replies, Q}) ->
- ?assertEqual(Expect, is_r_met(Workers0, Replies, Q))
- end, SuccessCases),
+ lists:foreach(
+ fun({Expect, Replies, Q}) ->
+ ?assertEqual(Expect, is_r_met(Workers0, Replies, Q))
+ end,
+ SuccessCases
+ ),
WaitForMoreCases = [
{[fabric_util:kv(foo, 1)], 2},
{[fabric_util:kv(foo, 2)], 3},
{[fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2}
],
- lists:foreach(fun({Replies, Q}) ->
- ?assertEqual(wait_for_more, is_r_met(Workers2, Replies, Q))
- end, WaitForMoreCases),
+ lists:foreach(
+ fun({Replies, Q}) ->
+ ?assertEqual(wait_for_more, is_r_met(Workers2, Replies, Q))
+ end,
+ WaitForMoreCases
+ ),
FailureCases = [
{Workers0, [fabric_util:kv(foo, 1)], 2},
@@ -268,73 +273,73 @@ t_is_r_met() ->
{Workers1, [fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)], 2},
{Workers1, [fabric_util:kv(foo, 2)], 3}
],
- lists:foreach(fun({Workers, Replies, Q}) ->
- ?assertEqual(no_more_workers, is_r_met(Workers, Replies, Q))
- end, FailureCases)
+ lists:foreach(
+ fun({Workers, Replies, Q}) ->
+ ?assertEqual(no_more_workers, is_r_met(Workers, Replies, Q))
+ end,
+ FailureCases
+ )
end).
-
t_handle_message_down() ->
Node0 = 'foo@localhost',
Node1 = 'bar@localhost',
Down0 = {rexi_DOWN, nil, {nil, Node0}, nil},
Down1 = {rexi_DOWN, nil, {nil, Node1}, nil},
- Workers0 = [#shard{node=Node0} || _ <- [a, b]],
- Worker1 = #shard{node=Node1},
+ Workers0 = [#shard{node = Node0} || _ <- [a, b]],
+ Worker1 = #shard{node = Node1},
Workers1 = Workers0 ++ [Worker1],
?_test(begin
% Stop when no more workers are left
?assertEqual(
- {stop, #acc{workers=[]}},
- handle_message(Down0, nil, #acc{workers=Workers0})
+ {stop, #acc{workers = []}},
+ handle_message(Down0, nil, #acc{workers = Workers0})
),
% Continue when we have more workers
?assertEqual(
- {ok, #acc{workers=[Worker1]}},
- handle_message(Down0, nil, #acc{workers=Workers1})
+ {ok, #acc{workers = [Worker1]}},
+ handle_message(Down0, nil, #acc{workers = Workers1})
),
% A second DOWN removes the remaining workers
?assertEqual(
- {stop, #acc{workers=[]}},
- handle_message(Down1, nil, #acc{workers=[Worker1]})
+ {stop, #acc{workers = []}},
+ handle_message(Down1, nil, #acc{workers = [Worker1]})
)
end).
-
t_handle_message_exit() ->
Exit = {rexi_EXIT, nil},
- Worker0 = #shard{ref=erlang:make_ref()},
- Worker1 = #shard{ref=erlang:make_ref()},
+ Worker0 = #shard{ref = erlang:make_ref()},
+ Worker1 = #shard{ref = erlang:make_ref()},
?_test(begin
% Only removes the specified worker
?assertEqual(
- {ok, #acc{workers=[Worker1]}},
- handle_message(Exit, Worker0, #acc{workers=[Worker0, Worker1]})
+ {ok, #acc{workers = [Worker1]}},
+ handle_message(Exit, Worker0, #acc{workers = [Worker0, Worker1]})
),
?assertEqual(
- {ok, #acc{workers=[Worker0]}},
- handle_message(Exit, Worker1, #acc{workers=[Worker0, Worker1]})
+ {ok, #acc{workers = [Worker0]}},
+ handle_message(Exit, Worker1, #acc{workers = [Worker0, Worker1]})
),
% We bail if it was the last worker
?assertEqual(
- {stop, #acc{workers=[]}},
- handle_message(Exit, Worker0, #acc{workers=[Worker0]})
+ {stop, #acc{workers = []}},
+ handle_message(Exit, Worker0, #acc{workers = [Worker0]})
)
end).
-
t_handle_message_reply() ->
- Worker0 = #shard{ref=erlang:make_ref()},
- Worker1 = #shard{ref=erlang:make_ref()},
- Worker2 = #shard{ref=erlang:make_ref()},
+ Worker0 = #shard{ref = erlang:make_ref()},
+ Worker1 = #shard{ref = erlang:make_ref()},
+ Worker2 = #shard{ref = erlang:make_ref()},
Workers = [Worker0, Worker1, Worker2],
- Acc0 = #acc{workers=Workers, r=2, replies=[]},
+ Acc0 = #acc{workers = Workers, r = 2, replies = []},
?_test(begin
meck:expect(rexi, kill_all, fun(_) -> ok end),
@@ -342,19 +347,19 @@ t_handle_message_reply() ->
% Test that we continue when we haven't met R yet
?assertMatch(
{ok, #acc{
- workers=[Worker0, Worker1],
- replies=[{foo, {foo, 1}}]
+ workers = [Worker0, Worker1],
+ replies = [{foo, {foo, 1}}]
}},
handle_message(foo, Worker2, Acc0)
),
?assertMatch(
{ok, #acc{
- workers=[Worker0, Worker1],
- replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
+ workers = [Worker0, Worker1],
+ replies = [{bar, {bar, 1}}, {foo, {foo, 1}}]
}},
handle_message(bar, Worker2, Acc0#acc{
- replies=[{foo, {foo, 1}}]
+ replies = [{foo, {foo, 1}}]
})
),
@@ -363,18 +368,18 @@ t_handle_message_reply() ->
% is returned. Bit subtle on the assertions here.
?assertMatch(
- {stop, #acc{workers=[], replies=[{foo, {foo, 1}}]}},
- handle_message(foo, Worker0, Acc0#acc{workers=[Worker0]})
+ {stop, #acc{workers = [], replies = [{foo, {foo, 1}}]}},
+ handle_message(foo, Worker0, Acc0#acc{workers = [Worker0]})
),
?assertMatch(
{stop, #acc{
- workers=[],
- replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
+ workers = [],
+ replies = [{bar, {bar, 1}}, {foo, {foo, 1}}]
}},
handle_message(bar, Worker0, Acc0#acc{
- workers=[Worker0],
- replies=[{foo, {foo, 1}}]
+ workers = [Worker0],
+ replies = [{foo, {foo, 1}}]
})
),
@@ -383,43 +388,42 @@ t_handle_message_reply() ->
?assertMatch(
{stop, #acc{
- workers=[],
- replies=[{foo, {foo, 2}}],
- state=r_met,
- q_reply=foo
+ workers = [],
+ replies = [{foo, {foo, 2}}],
+ state = r_met,
+ q_reply = foo
}},
handle_message(foo, Worker1, Acc0#acc{
- workers=[Worker0, Worker1],
- replies=[{foo, {foo, 1}}]
+ workers = [Worker0, Worker1],
+ replies = [{foo, {foo, 1}}]
})
),
?assertEqual(
{stop, #acc{
- workers=[],
- r=1,
- replies=[{foo, {foo, 1}}],
- state=r_met,
- q_reply=foo
+ workers = [],
+ r = 1,
+ replies = [{foo, {foo, 1}}],
+ state = r_met,
+ q_reply = foo
}},
- handle_message(foo, Worker0, Acc0#acc{r=1})
+ handle_message(foo, Worker0, Acc0#acc{r = 1})
),
?assertMatch(
{stop, #acc{
- workers=[],
- replies=[{bar, {bar, 1}}, {foo, {foo, 2}}],
- state=r_met,
- q_reply=foo
+ workers = [],
+ replies = [{bar, {bar, 1}}, {foo, {foo, 2}}],
+ state = r_met,
+ q_reply = foo
}},
handle_message(foo, Worker0, Acc0#acc{
- workers=[Worker0],
- replies=[{bar, {bar, 1}}, {foo, {foo, 1}}]
+ workers = [Worker0],
+ replies = [{bar, {bar, 1}}, {foo, {foo, 1}}]
})
)
end).
-
t_store_node_revs() ->
W1 = #shard{node = w1, ref = erlang:make_ref()},
W2 = #shard{node = w2, ref = erlang:make_ref()},
@@ -449,9 +453,9 @@ t_store_node_revs() ->
Acc1 = InitAcc#acc{node_revs = [{w1, [{1, <<"foo">>}]}]},
{ok, #acc{node_revs = NodeRevs4}} = handle_message(Foo2, W2, Acc1),
?assertEqual(
- [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
- NodeRevs4
- ),
+ [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
+ NodeRevs4
+ ),
% Make sure rexi_DOWN doesn't modify node_revs
Down = {rexi_DOWN, nil, {nil, w1}, nil},
@@ -473,15 +477,14 @@ t_store_node_revs() ->
{ok, Acc3} = handle_message(Foo2, W2, Acc2),
{stop, Acc4} = handle_message(NFM, W3, Acc3),
?assertEqual(
- [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
- Acc4#acc.node_revs
- )
+ [{w2, [{2, <<"foo2">>}]}, {w1, [{1, <<"foo">>}]}],
+ Acc4#acc.node_revs
+ )
end).
-
t_read_repair() ->
- Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
- Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
+ Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
+ Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
NFM = {not_found, missing},
?_test(begin
@@ -492,33 +495,32 @@ t_read_repair() ->
meck:expect(fabric, update_docs, fun(_, [_], _) -> {ok, []} end),
Acc0 = #acc{
dbname = <<"name">>,
- replies = [fabric_util:kv(Foo1,1)]
+ replies = [fabric_util:kv(Foo1, 1)]
},
?assertEqual(Foo1, read_repair(Acc0)),
meck:expect(fabric, update_docs, fun(_, [_, _], _) -> {ok, []} end),
Acc1 = #acc{
dbname = <<"name">>,
- replies = [fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,1)]
+ replies = [fabric_util:kv(Foo1, 1), fabric_util:kv(Foo2, 1)]
},
?assertEqual(Foo2, read_repair(Acc1)),
% Test when we have nothing but errors
- Acc2 = #acc{replies=[fabric_util:kv(NFM, 1)]},
+ Acc2 = #acc{replies = [fabric_util:kv(NFM, 1)]},
?assertEqual(NFM, read_repair(Acc2)),
- Acc3 = #acc{replies=[fabric_util:kv(NFM,1), fabric_util:kv(foo,2)]},
+ Acc3 = #acc{replies = [fabric_util:kv(NFM, 1), fabric_util:kv(foo, 2)]},
?assertEqual(NFM, read_repair(Acc3)),
- Acc4 = #acc{replies=[fabric_util:kv(foo,1), fabric_util:kv(bar,1)]},
+ Acc4 = #acc{replies = [fabric_util:kv(foo, 1), fabric_util:kv(bar, 1)]},
?assertEqual(bar, read_repair(Acc4))
end).
-
t_handle_response_quorum_met() ->
- Foo1 = {ok, #doc{revs = {1,[<<"foo">>]}}},
- Foo2 = {ok, #doc{revs = {2,[<<"foo2">>,<<"foo">>]}}},
- Bar1 = {ok, #doc{revs = {1,[<<"bar">>]}}},
+ Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
+ Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
+ Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
?_test(begin
meck:expect(couch_log, notice, fun(_, _) -> ok end),
@@ -526,34 +528,34 @@ t_handle_response_quorum_met() ->
meck:expect(couch_stats, increment_counter, fun(_) -> ok end),
BasicOkAcc = #acc{
- state=r_met,
- replies=[fabric_util:kv(Foo1,2)],
- q_reply=Foo1
+ state = r_met,
+ replies = [fabric_util:kv(Foo1, 2)],
+ q_reply = Foo1
},
?assertEqual(Foo1, handle_response(BasicOkAcc)),
WithAncestorsAcc = #acc{
- state=r_met,
- replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Foo2,2)],
- q_reply=Foo2
+ state = r_met,
+ replies = [fabric_util:kv(Foo1, 1), fabric_util:kv(Foo2, 2)],
+ q_reply = Foo2
},
?assertEqual(Foo2, handle_response(WithAncestorsAcc)),
% This also checks when the quorum isn't the most recent
% revision.
DeeperWinsAcc = #acc{
- state=r_met,
- replies=[fabric_util:kv(Foo1,2), fabric_util:kv(Foo2,1)],
- q_reply=Foo1
+ state = r_met,
+ replies = [fabric_util:kv(Foo1, 2), fabric_util:kv(Foo2, 1)],
+ q_reply = Foo1
},
?assertEqual(Foo2, handle_response(DeeperWinsAcc)),
% Check that we return the proper doc based on rev
% (ie, pos is equal)
BiggerRevWinsAcc = #acc{
- state=r_met,
- replies=[fabric_util:kv(Foo1,1), fabric_util:kv(Bar1,2)],
- q_reply=Bar1
+ state = r_met,
+ replies = [fabric_util:kv(Foo1, 1), fabric_util:kv(Bar1, 2)],
+ q_reply = Bar1
},
?assertEqual(Foo1, handle_response(BiggerRevWinsAcc))
@@ -561,7 +563,6 @@ t_handle_response_quorum_met() ->
% read_repair_test for those conditions.
end).
-
t_get_doc_info() ->
?_test(begin
meck:expect(fabric, update_docs, fun(_, _, _) -> {ok, []} end),
@@ -603,7 +604,7 @@ t_get_doc_info() ->
end),
Rsp3 = fabric_doc_open:go("test", "one", [{doc_info, full}]),
?assertEqual({not_found, deleted}, Rsp3),
- {ok, Rec2} = fabric_doc_open:go("test", "one", [{doc_info, full},deleted]),
+ {ok, Rec2} = fabric_doc_open:go("test", "one", [{doc_info, full}, deleted]),
?assert(is_record(Rec2, full_doc_info))
end).
diff --git a/src/fabric/src/fabric_doc_open_revs.erl b/src/fabric/src/fabric_doc_open_revs.erl
index 3d7b9dc3c..284187bff 100644
--- a/src/fabric/src/fabric_doc_open_revs.erl
+++ b/src/fabric/src/fabric_doc_open_revs.erl
@@ -34,8 +34,11 @@
}).
go(DbName, Id, Revs, Options) ->
- Workers = fabric_util:submit_jobs(mem3:shards(DbName,Id), open_revs,
- [Id, Revs, Options]),
+ Workers = fabric_util:submit_jobs(
+ mem3:shards(DbName, Id),
+ open_revs,
+ [Id, Revs, Options]
+ ),
R = couch_util:get_value(r, Options, integer_to_list(mem3:quorum(DbName))),
State = #state{
dbname = DbName,
@@ -48,34 +51,31 @@ go(DbName, Id, Revs, Options) ->
},
RexiMon = fabric_util:create_monitors(Workers),
try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, State) of
- {ok, all_workers_died} ->
- {error, all_workers_died};
- {ok, Replies} ->
- {ok, Replies};
- {timeout, #state{workers=DefunctWorkers}} ->
- fabric_util:log_timeout(DefunctWorkers, "open_revs"),
- {error, timeout};
- Else ->
- Else
+ {ok, all_workers_died} ->
+ {error, all_workers_died};
+ {ok, Replies} ->
+ {ok, Replies};
+ {timeout, #state{workers = DefunctWorkers}} ->
+ fabric_util:log_timeout(DefunctWorkers, "open_revs"),
+ {error, timeout};
+ Else ->
+ Else
after
rexi_monitor:stop(RexiMon)
end.
-
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Worker, #state{workers=Workers}=State) ->
+handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Worker, #state{workers = Workers} = State) ->
NewState = State#state{
workers = lists:keydelete(NodeRef, #shard.node, Workers),
reply_error_count = State#state.reply_error_count + 1
},
handle_message({ok, []}, nil, NewState);
-
-handle_message({rexi_EXIT, _}, Worker, #state{workers=Workers}=State) ->
+handle_message({rexi_EXIT, _}, Worker, #state{workers = Workers} = State) ->
NewState = State#state{
workers = lists:delete(Worker, Workers),
reply_error_count = State#state.reply_error_count + 1
},
handle_message({ok, []}, nil, NewState);
-
handle_message({ok, RawReplies}, Worker, State) ->
#state{
dbname = DbName,
@@ -96,30 +96,40 @@ handle_message({ok, RawReplies}, Worker, State) ->
% Do not count error replies when checking quorum
RealReplyCount = ReplyCount + 1 - ReplyErrorCount,
QuorumReplies = RealReplyCount >= R,
- {NewReplies, QuorumMet, Repair} = case IsTree of
- true ->
- {NewReplies0, AllInternal, Repair0} =
+ {NewReplies, QuorumMet, Repair} =
+ case IsTree of
+ true ->
+ {NewReplies0, AllInternal, Repair0} =
tree_replies(PrevReplies, tree_sort(RawReplies)),
- NumLeafs = couch_key_tree:count_leafs(PrevReplies),
- SameNumRevs = length(RawReplies) == NumLeafs,
- QMet = AllInternal andalso SameNumRevs andalso QuorumReplies,
- % Don't set repair=true on the first reply
- {NewReplies0, QMet, (ReplyCount > 0) and Repair0};
- false ->
- {NewReplies0, MinCount} = dict_replies(PrevReplies, RawReplies),
- {NewReplies0, MinCount >= R, false}
- end,
- NewNodeRevs = if Worker == nil -> PrevNodeRevs; true ->
- IdRevs = lists:foldl(fun
- ({ok, #doc{revs = {Pos, [Rev | _]}}}, Acc) ->
- [{Pos, Rev} | Acc];
- (_, Acc) ->
- Acc
- end, [], RawReplies),
- if IdRevs == [] -> PrevNodeRevs; true ->
- [{Worker#shard.node, IdRevs} | PrevNodeRevs]
- end
- end,
+ NumLeafs = couch_key_tree:count_leafs(PrevReplies),
+ SameNumRevs = length(RawReplies) == NumLeafs,
+ QMet = AllInternal andalso SameNumRevs andalso QuorumReplies,
+ % Don't set repair=true on the first reply
+ {NewReplies0, QMet, (ReplyCount > 0) and Repair0};
+ false ->
+ {NewReplies0, MinCount} = dict_replies(PrevReplies, RawReplies),
+ {NewReplies0, MinCount >= R, false}
+ end,
+ NewNodeRevs =
+ if
+ Worker == nil ->
+ PrevNodeRevs;
+ true ->
+ IdRevs = lists:foldl(
+ fun
+ ({ok, #doc{revs = {Pos, [Rev | _]}}}, Acc) ->
+ [{Pos, Rev} | Acc];
+ (_, Acc) ->
+ Acc
+ end,
+ [],
+ RawReplies
+ ),
+ if
+ IdRevs == [] -> PrevNodeRevs;
+ true -> [{Worker#shard.node, IdRevs} | PrevNodeRevs]
+ end
+ end,
Complete = (ReplyCount =:= (WorkerCount - 1)),
@@ -127,13 +137,13 @@ handle_message({ok, RawReplies}, Worker, State) ->
true ->
fabric_util:cleanup(lists:delete(Worker, Workers)),
maybe_read_repair(
- DbName,
- IsTree,
- NewReplies,
- NewNodeRevs,
- ReplyCount + 1,
- InRepair orelse Repair
- ),
+ DbName,
+ IsTree,
+ NewReplies,
+ NewNodeRevs,
+ ReplyCount + 1,
+ InRepair orelse Repair
+ ),
{stop, format_reply(IsTree, NewReplies, RealReplyCount)};
false ->
{ok, State#state{
@@ -145,10 +155,8 @@ handle_message({ok, RawReplies}, Worker, State) ->
}}
end.
-
tree_replies(RevTree, []) ->
{RevTree, true, false};
-
tree_replies(RevTree0, [{ok, Doc} | Rest]) ->
{RevTree1, Done, Repair} = tree_replies(RevTree0, Rest),
Path = couch_doc:to_path(Doc),
@@ -160,7 +168,6 @@ tree_replies(RevTree0, [{ok, Doc} | Rest]) ->
{RevTree2, _} ->
{RevTree2, false, true}
end;
-
tree_replies(RevTree0, [{{not_found, missing}, {Pos, Rev}} | Rest]) ->
{RevTree1, Done, Repair} = tree_replies(RevTree0, Rest),
Node = {Rev, ?REV_MISSING, []},
@@ -172,34 +179,30 @@ tree_replies(RevTree0, [{{not_found, missing}, {Pos, Rev}} | Rest]) ->
{RevTree2, false, Repair}
end.
-
tree_sort(Replies) ->
SortFun = fun(A, B) -> sort_key(A) =< sort_key(B) end,
lists:sort(SortFun, Replies).
-
sort_key({ok, #doc{revs = {Pos, [Rev | _]}}}) ->
{Pos, Rev};
sort_key({{not_found, _}, {Pos, Rev}}) ->
{Pos, Rev}.
-
dict_replies(Dict, []) ->
case [Count || {_Key, {_Reply, Count}} <- Dict] of
[] -> {Dict, 0};
Counts -> {Dict, lists:min(Counts)}
end;
-
dict_replies(Dict, [Reply | Rest]) ->
NewDict = fabric_util:update_counter(Reply, 1, Dict),
dict_replies(NewDict, Rest).
-
maybe_read_repair(Db, IsTree, Replies, NodeRevs, ReplyCount, DoRepair) ->
- Docs = case IsTree of
- true -> tree_repair_docs(Replies, DoRepair);
- false -> dict_repair_docs(Replies, ReplyCount)
- end,
+ Docs =
+ case IsTree of
+ true -> tree_repair_docs(Replies, DoRepair);
+ false -> dict_repair_docs(Replies, ReplyCount)
+ end,
case Docs of
[] ->
ok;
@@ -207,22 +210,19 @@ maybe_read_repair(Db, IsTree, Replies, NodeRevs, ReplyCount, DoRepair) ->
erlang:spawn(fun() -> read_repair(Db, Docs, NodeRevs) end)
end.
-
tree_repair_docs(_Replies, false) ->
[];
-
tree_repair_docs(Replies, true) ->
Leafs = couch_key_tree:get_all_leafs(Replies),
[Doc || {Doc, {_Pos, _}} <- Leafs, is_record(Doc, doc)].
-
dict_repair_docs(Replies, ReplyCount) ->
NeedsRepair = lists:any(fun({_, {_, C}}) -> C < ReplyCount end, Replies),
- if not NeedsRepair -> []; true ->
- [Doc || {_, {{ok, Doc}, _}} <- Replies]
+ if
+ not NeedsRepair -> [];
+ true -> [Doc || {_, {{ok, Doc}, _}} <- Replies]
end.
-
read_repair(Db, Docs, NodeRevs) ->
Opts = [?ADMIN_CTX, replicated_changes, {read_repair, NodeRevs}],
Res = fabric:update_docs(Db, Docs, Opts),
@@ -235,57 +235,63 @@ read_repair(Db, Docs, NodeRevs) ->
couch_log:notice("read_repair ~s ~s ~p", [Db, Id, Res])
end.
-
format_reply(_, _, RealReplyCount) when RealReplyCount =< 0 ->
all_workers_died;
-
format_reply(true, Replies, _) ->
tree_format_replies(Replies);
-
format_reply(false, Replies, _) ->
dict_format_replies(Replies).
-
tree_format_replies(RevTree) ->
Leafs = couch_key_tree:get_all_leafs(RevTree),
- lists:sort(lists:map(fun(Reply) ->
- case Reply of
- {?REV_MISSING, {Pos, [Rev]}} ->
- {{not_found, missing}, {Pos, Rev}};
- {Doc, _} when is_record(Doc, doc) ->
- {ok, Doc}
- end
- end, Leafs)).
-
+ lists:sort(
+ lists:map(
+ fun(Reply) ->
+ case Reply of
+ {?REV_MISSING, {Pos, [Rev]}} ->
+ {{not_found, missing}, {Pos, Rev}};
+ {Doc, _} when is_record(Doc, doc) ->
+ {ok, Doc}
+ end
+ end,
+ Leafs
+ )
+ ).
dict_format_replies(Dict) ->
Replies0 = [Reply || {_, {Reply, _}} <- Dict],
- AllFoundRevs = lists:foldl(fun(Reply, Acc) ->
- case Reply of
- {ok, #doc{revs = {Pos, [RevId | _]}}} ->
- [{Pos, RevId} | Acc];
- _ ->
- Acc
- end
- end, [], Replies0),
+ AllFoundRevs = lists:foldl(
+ fun(Reply, Acc) ->
+ case Reply of
+ {ok, #doc{revs = {Pos, [RevId | _]}}} ->
+ [{Pos, RevId} | Acc];
+ _ ->
+ Acc
+ end
+ end,
+ [],
+ Replies0
+ ),
%% Drop any not_found replies for which we
%% found the revision on a different node.
- Replies1 = lists:filter(fun(Reply) ->
- case Reply of
- {{not_found, missing}, Rev} ->
- not lists:member(Rev, AllFoundRevs);
- _ ->
- true
- end
- end, Replies0),
+ Replies1 = lists:filter(
+ fun(Reply) ->
+ case Reply of
+ {{not_found, missing}, Rev} ->
+ not lists:member(Rev, AllFoundRevs);
+ _ ->
+ true
+ end
+ end,
+ Replies0
+ ),
% Remove replies with shorter revision
% paths for a given revision.
collapse_duplicate_revs(Replies1).
-
collapse_duplicate_revs(Replies) ->
% The collapse logic requires that replies are
% sorted so that shorter rev paths are in
@@ -296,27 +302,23 @@ collapse_duplicate_revs(Replies) ->
% of C.
collapse_duplicate_revs_int(lists:sort(Replies)).
-
collapse_duplicate_revs_int([]) ->
[];
-
collapse_duplicate_revs_int([{ok, Doc1}, {ok, Doc2} | Rest]) ->
{D1, R1} = Doc1#doc.revs,
{D2, R2} = Doc2#doc.revs,
- Head = case D1 == D2 andalso lists:prefix(R1, R2) of
- true -> [];
- false -> [{ok, Doc1}]
- end,
+ Head =
+ case D1 == D2 andalso lists:prefix(R1, R2) of
+ true -> [];
+ false -> [{ok, Doc1}]
+ end,
Head ++ collapse_duplicate_revs([{ok, Doc2} | Rest]);
-
collapse_duplicate_revs_int([Reply | Rest]) ->
[Reply | collapse_duplicate_revs(Rest)].
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
setup_all() ->
config:start_link([]),
meck:new([fabric, couch_stats, couch_log]),
@@ -326,13 +328,10 @@ setup_all() ->
meck:expect(couch_log, notice, fun(_, _) -> ok end),
meck:expect(fabric_util, cleanup, fun(_) -> ok end).
-
-
teardown_all(_) ->
meck:unload(),
config:stop().
-
setup() ->
meck:reset([
couch_log,
@@ -341,37 +340,31 @@ setup() ->
fabric_util
]).
-
teardown(_) ->
ok.
-
state0(Revs, Latest) ->
#state{
worker_count = 3,
workers =
- [#shard{node='node1'}, #shard{node='node2'}, #shard{node='node3'}],
+ [#shard{node = 'node1'}, #shard{node = 'node2'}, #shard{node = 'node3'}],
r = 2,
revs = Revs,
latest = Latest
}.
-
-revs() -> [{1,<<"foo">>}, {1,<<"bar">>}, {1,<<"baz">>}].
-
+revs() -> [{1, <<"foo">>}, {1, <<"bar">>}, {1, <<"baz">>}].
foo1() -> {ok, #doc{revs = {1, [<<"foo">>]}}}.
foo2() -> {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}}.
foo2stemmed() -> {ok, #doc{revs = {2, [<<"foo2">>]}}}.
-fooNF() -> {{not_found, missing}, {1,<<"foo">>}}.
+fooNF() -> {{not_found, missing}, {1, <<"foo">>}}.
foo2NF() -> {{not_found, missing}, {2, <<"foo2">>}}.
bar1() -> {ok, #doc{revs = {1, [<<"bar">>]}}}.
-barNF() -> {{not_found, missing}, {1,<<"bar">>}}.
-bazNF() -> {{not_found, missing}, {1,<<"baz">>}}.
+barNF() -> {{not_found, missing}, {1, <<"bar">>}}.
+bazNF() -> {{not_found, missing}, {1, <<"baz">>}}.
baz1() -> {ok, #doc{revs = {1, [<<"baz">>]}}}.
-
-
open_doc_revs_test_() ->
{
setup,
@@ -414,52 +407,47 @@ open_doc_revs_test_() ->
}
}.
-
% Tests for revs=all
-
check_empty_response_not_quorum() ->
% Simple smoke test that we don't think we're
% done with a first empty response
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
+ W1 = #shard{node = 'node1'},
+ W2 = #shard{node = 'node2'},
+ W3 = #shard{node = 'node3'},
?_assertMatch(
{ok, #state{workers = [W2, W3]}},
handle_message({ok, []}, W1, state0(all, false))
).
-
check_basic_response() ->
% Check that we've handle a response
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
+ W1 = #shard{node = 'node1'},
+ W2 = #shard{node = 'node2'},
+ W3 = #shard{node = 'node3'},
?_assertMatch(
{ok, #state{reply_count = 1, workers = [W2, W3]}},
handle_message({ok, [foo1(), bar1()]}, W1, state0(all, false))
).
-
check_finish_quorum() ->
% Two messages with the same revisions means we're done
?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
+ W1 = #shard{node = 'node1'},
+ W2 = #shard{node = 'node2'},
S0 = state0(all, false),
{ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
Expect = {stop, [bar1(), foo1()]},
?assertEqual(Expect, handle_message({ok, [foo1(), bar1()]}, W2, S1))
end).
-
check_finish_quorum_newer() ->
% We count a descendant of a revision for quorum so
% foo1 should count for foo2 which means we're finished.
% We also validate that read_repair was triggered.
?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
+ W1 = #shard{node = 'node1'},
+ W2 = #shard{node = 'node2'},
S0 = state0(all, false),
{ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
Expect = {stop, [bar1(), foo2()]},
@@ -472,14 +460,13 @@ check_finish_quorum_newer() ->
)
end).
-
check_no_quorum_on_second() ->
% Quorum not yet met for the foo revision so we
% would wait for w3
?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
+ W1 = #shard{node = 'node1'},
+ W2 = #shard{node = 'node2'},
+ W3 = #shard{node = 'node3'},
S0 = state0(all, false),
{ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
?assertMatch(
@@ -488,15 +475,14 @@ check_no_quorum_on_second() ->
)
end).
-
check_done_on_third() ->
% The third message of three means we're done no matter
% what. Every revision seen in this pattern should be
% included.
?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
+ W1 = #shard{node = 'node1'},
+ W2 = #shard{node = 'node2'},
+ W3 = #shard{node = 'node3'},
S0 = state0(all, false),
{ok, S1} = handle_message({ok, [foo1(), bar1()]}, W1, S0),
{ok, S2} = handle_message({ok, [bar1()]}, W2, S1),
@@ -504,15 +490,13 @@ check_done_on_third() ->
?assertEqual(Expect, handle_message({ok, [bar1()]}, W3, S2))
end).
-
% Tests for a specific list of revs
-
check_specific_revs_first_msg() ->
?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
+ W1 = #shard{node = 'node1'},
+ W2 = #shard{node = 'node2'},
+ W3 = #shard{node = 'node3'},
S0 = state0(revs(), false),
?assertMatch(
{ok, #state{reply_count = 1, workers = [W2, W3]}},
@@ -520,11 +504,10 @@ check_specific_revs_first_msg() ->
)
end).
-
check_revs_done_on_agreement() ->
?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
+ W1 = #shard{node = 'node1'},
+ W2 = #shard{node = 'node2'},
S0 = state0(revs(), false),
Msg = {ok, [foo1(), bar1(), bazNF()]},
{ok, S1} = handle_message(Msg, W1, S0),
@@ -532,11 +515,10 @@ check_revs_done_on_agreement() ->
?assertEqual(Expect, handle_message(Msg, W2, S1))
end).
-
check_latest_true() ->
?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
+ W1 = #shard{node = 'node1'},
+ W2 = #shard{node = 'node2'},
S0 = state0(revs(), true),
Msg1 = {ok, [foo2(), bar1(), bazNF()]},
Msg2 = {ok, [foo2(), bar1(), bazNF()]},
@@ -545,11 +527,10 @@ check_latest_true() ->
?assertEqual(Expect, handle_message(Msg2, W2, S1))
end).
-
check_ancestor_counted_in_quorum() ->
?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
+ W1 = #shard{node = 'node1'},
+ W2 = #shard{node = 'node2'},
S0 = state0(revs(), true),
Msg1 = {ok, [foo1(), bar1(), bazNF()]},
Msg2 = {ok, [foo2(), bar1(), bazNF()]},
@@ -564,11 +545,10 @@ check_ancestor_counted_in_quorum() ->
?assertEqual(Expect, handle_message(Msg1, W1, S2))
end).
-
check_not_found_counts_for_descendant() ->
?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
+ W1 = #shard{node = 'node1'},
+ W2 = #shard{node = 'node2'},
S0 = state0(revs(), true),
Msg1 = {ok, [foo1(), bar1(), bazNF()]},
Msg2 = {ok, [foo1(), bar1(), baz1()]},
@@ -583,12 +563,11 @@ check_not_found_counts_for_descendant() ->
?assertEqual(Expect, handle_message(Msg1, W1, S2))
end).
-
check_worker_error_skipped() ->
?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
+ W1 = #shard{node = 'node1'},
+ W2 = #shard{node = 'node2'},
+ W3 = #shard{node = 'node3'},
S0 = state0(revs(), true),
Msg1 = {ok, [foo1(), bar1(), baz1()]},
Msg2 = {rexi_EXIT, reason},
@@ -600,12 +579,11 @@ check_worker_error_skipped() ->
?assertEqual(Expect, handle_message(Msg3, W3, S2))
end).
-
check_quorum_only_counts_valid_responses() ->
?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
+ W1 = #shard{node = 'node1'},
+ W2 = #shard{node = 'node2'},
+ W3 = #shard{node = 'node3'},
S0 = state0(revs(), true),
Msg1 = {rexi_EXIT, reason},
Msg2 = {rexi_EXIT, reason},
@@ -617,12 +595,11 @@ check_quorum_only_counts_valid_responses() ->
?assertEqual(Expect, handle_message(Msg3, W3, S2))
end).
-
check_empty_list_when_no_workers_reply() ->
?_test(begin
- W1 = #shard{node='node1'},
- W2 = #shard{node='node2'},
- W3 = #shard{node='node3'},
+ W1 = #shard{node = 'node1'},
+ W2 = #shard{node = 'node2'},
+ W3 = #shard{node = 'node3'},
S0 = state0(revs(), true),
Msg1 = {rexi_EXIT, reason},
Msg2 = {rexi_EXIT, reason},
@@ -634,7 +611,6 @@ check_empty_list_when_no_workers_reply() ->
?assertEqual(Expect, handle_message(Msg3, W3, S2))
end).
-
check_node_rev_stored() ->
?_test(begin
W1 = #shard{node = node1},
@@ -644,7 +620,6 @@ check_node_rev_stored() ->
?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
end).
-
check_node_rev_store_head_only() ->
?_test(begin
W1 = #shard{node = node1},
@@ -654,7 +629,6 @@ check_node_rev_store_head_only() ->
?assertEqual([{node1, [{2, <<"foo2">>}]}], S1#state.node_revs)
end).
-
check_node_rev_store_multiple() ->
?_test(begin
W1 = #shard{node = node1},
@@ -662,12 +636,11 @@ check_node_rev_store_multiple() ->
{ok, S1} = handle_message({ok, [foo1(), foo2()]}, W1, S0),
?assertEqual(
- [{node1, [{2, <<"foo2">>}, {1, <<"foo">>}]}],
- S1#state.node_revs
- )
+ [{node1, [{2, <<"foo2">>}, {1, <<"foo">>}]}],
+ S1#state.node_revs
+ )
end).
-
check_node_rev_dont_store_errors() ->
?_test(begin
W1 = #shard{node = node1},
@@ -677,7 +650,6 @@ check_node_rev_dont_store_errors() ->
?assertEqual([], S1#state.node_revs)
end).
-
check_node_rev_store_non_errors() ->
?_test(begin
W1 = #shard{node = node1},
@@ -687,7 +659,6 @@ check_node_rev_store_non_errors() ->
?assertEqual([{node1, [{1, <<"foo">>}]}], S1#state.node_revs)
end).
-
check_node_rev_store_concatenate() ->
?_test(begin
W2 = #shard{node = node2},
@@ -696,12 +667,11 @@ check_node_rev_store_concatenate() ->
{ok, S2} = handle_message({ok, [foo2()]}, W2, S1),
?assertEqual(
- [{node2, [{2, <<"foo2">>}]}, {node1, [{1, <<"foo">>}]}],
- S2#state.node_revs
- )
+ [{node2, [{2, <<"foo2">>}]}, {node1, [{1, <<"foo">>}]}],
+ S2#state.node_revs
+ )
end).
-
check_node_rev_store_concantenate_multiple() ->
?_test(begin
W2 = #shard{node = node2},
@@ -710,15 +680,14 @@ check_node_rev_store_concantenate_multiple() ->
{ok, S2} = handle_message({ok, [foo2(), bar1()]}, W2, S1),
?assertEqual(
- [
- {node2, [{1, <<"bar">>}, {2, <<"foo2">>}]},
- {node1, [{1, <<"foo">>}]}
- ],
- S2#state.node_revs
- )
+ [
+ {node2, [{1, <<"bar">>}, {2, <<"foo2">>}]},
+ {node1, [{1, <<"foo">>}]}
+ ],
+ S2#state.node_revs
+ )
end).
-
check_node_rev_unmodified_on_down_or_exit() ->
?_test(begin
W2 = #shard{node = node2},
@@ -728,19 +697,18 @@ check_node_rev_unmodified_on_down_or_exit() ->
Down = {rexi_DOWN, nodedown, {nil, node()}, nil},
{ok, S2} = handle_message(Down, W2, S1),
?assertEqual(
- [{node1, [{1, <<"foo">>}]}],
- S2#state.node_revs
- ),
+ [{node1, [{1, <<"foo">>}]}],
+ S2#state.node_revs
+ ),
Exit = {rexi_EXIT, reason},
{ok, S3} = handle_message(Exit, W2, S1),
?assertEqual(
- [{node1, [{1, <<"foo">>}]}],
- S3#state.node_revs
- )
+ [{node1, [{1, <<"foo">>}]}],
+ S3#state.node_revs
+ )
end).
-
check_not_found_replies_are_removed_when_doc_found() ->
?_test(begin
Replies = replies_to_dict([foo1(), bar1(), fooNF()]),
@@ -786,7 +754,6 @@ check_not_found_removed_and_longer_rev_list() ->
?assertEqual(Expect, dict_format_replies(Replies))
end).
-
replies_to_dict(Replies) ->
[reply_to_element(R) || R <- Replies].
diff --git a/src/fabric/src/fabric_doc_purge.erl b/src/fabric/src/fabric_doc_purge.erl
index 3492f88c5..64221ab0e 100644
--- a/src/fabric/src/fabric_doc_purge.erl
+++ b/src/fabric/src/fabric_doc_purge.erl
@@ -12,16 +12,13 @@
-module(fabric_doc_purge).
-
-export([
go/3
]).
-
-include_lib("fabric/include/fabric.hrl").
-include_lib("mem3/include/mem3.hrl").
-
-record(acc, {
worker_uuids,
resps,
@@ -29,7 +26,6 @@
w
}).
-
go(_, [], _) ->
{ok, []};
go(DbName, IdsRevs, Options) ->
@@ -37,20 +33,32 @@ go(DbName, IdsRevs, Options) ->
{UUIDs, Reqs} = create_reqs(IdsRevs, [], []),
% Fire off rexi workers for each shard.
- {Workers, WorkerUUIDs} = dict:fold(fun(Shard, ShardReqs, {Ws, WUUIDs}) ->
- #shard{name = ShardDbName, node = Node} = Shard,
- Args = [ShardDbName, ShardReqs, Options],
- Ref = rexi:cast(Node, {fabric_rpc, purge_docs, Args}),
- Worker = Shard#shard{ref=Ref},
- ShardUUIDs = [UUID || {UUID, _Id, _Revs} <- ShardReqs],
- {[Worker | Ws], [{Worker, ShardUUIDs} | WUUIDs]}
- end, {[], []}, group_reqs_by_shard(DbName, Reqs)),
-
- UUIDCounts = lists:foldl(fun({_Worker, WUUIDs}, CountAcc) ->
- lists:foldl(fun(UUID, InnerCountAcc) ->
- dict:update_counter(UUID, 1, InnerCountAcc)
- end, CountAcc, WUUIDs)
- end, dict:new(), WorkerUUIDs),
+ {Workers, WorkerUUIDs} = dict:fold(
+ fun(Shard, ShardReqs, {Ws, WUUIDs}) ->
+ #shard{name = ShardDbName, node = Node} = Shard,
+ Args = [ShardDbName, ShardReqs, Options],
+ Ref = rexi:cast(Node, {fabric_rpc, purge_docs, Args}),
+ Worker = Shard#shard{ref = Ref},
+ ShardUUIDs = [UUID || {UUID, _Id, _Revs} <- ShardReqs],
+ {[Worker | Ws], [{Worker, ShardUUIDs} | WUUIDs]}
+ end,
+ {[], []},
+ group_reqs_by_shard(DbName, Reqs)
+ ),
+
+ UUIDCounts = lists:foldl(
+ fun({_Worker, WUUIDs}, CountAcc) ->
+ lists:foldl(
+ fun(UUID, InnerCountAcc) ->
+ dict:update_counter(UUID, 1, InnerCountAcc)
+ end,
+ CountAcc,
+ WUUIDs
+ )
+ end,
+ dict:new(),
+ WorkerUUIDs
+ ),
RexiMon = fabric_util:create_monitors(Workers),
Timeout = fabric_util:request_timeout(),
@@ -60,29 +68,37 @@ go(DbName, IdsRevs, Options) ->
uuid_counts = UUIDCounts,
w = w(DbName, Options)
},
- Acc2 = try rexi_utils:recv(Workers, #shard.ref,
- fun handle_message/3, Acc0, infinity, Timeout) of
- {ok, Acc1} ->
- Acc1;
- {timeout, Acc1} ->
- #acc{
- worker_uuids = WorkerUUIDs,
- resps = Resps
- } = Acc1,
- DefunctWorkers = [Worker || {Worker, _} <- WorkerUUIDs],
- fabric_util:log_timeout(DefunctWorkers, "purge_docs"),
- NewResps = append_errors(timeout, WorkerUUIDs, Resps),
- Acc1#acc{worker_uuids = [], resps = NewResps};
- Else ->
- Else
- after
- rexi_monitor:stop(RexiMon)
- end,
+ Acc2 =
+ try
+ rexi_utils:recv(
+ Workers,
+ #shard.ref,
+ fun handle_message/3,
+ Acc0,
+ infinity,
+ Timeout
+ )
+ of
+ {ok, Acc1} ->
+ Acc1;
+ {timeout, Acc1} ->
+ #acc{
+ worker_uuids = WorkerUUIDs,
+ resps = Resps
+ } = Acc1,
+ DefunctWorkers = [Worker || {Worker, _} <- WorkerUUIDs],
+ fabric_util:log_timeout(DefunctWorkers, "purge_docs"),
+ NewResps = append_errors(timeout, WorkerUUIDs, Resps),
+ Acc1#acc{worker_uuids = [], resps = NewResps};
+ Else ->
+ Else
+ after
+ rexi_monitor:stop(RexiMon)
+ end,
FinalResps = format_resps(UUIDs, Acc2),
{resp_health(FinalResps), FinalResps}.
-
handle_message({rexi_DOWN, _, {_, Node}, _}, _Worker, Acc) ->
#acc{
worker_uuids = WorkerUUIDs,
@@ -92,7 +108,6 @@ handle_message({rexi_DOWN, _, {_, Node}, _}, _Worker, Acc) ->
{Failed, Rest} = lists:partition(Pred, WorkerUUIDs),
NewResps = append_errors(internal_server_error, Failed, Resps),
maybe_stop(Acc#acc{worker_uuids = Rest, resps = NewResps});
-
handle_message({rexi_EXIT, _}, Worker, Acc) ->
#acc{
worker_uuids = WorkerUUIDs,
@@ -101,7 +116,6 @@ handle_message({rexi_EXIT, _}, Worker, Acc) ->
{value, WorkerPair, Rest} = lists:keytake(Worker, 1, WorkerUUIDs),
NewResps = append_errors(internal_server_error, [WorkerPair], Resps),
maybe_stop(Acc#acc{worker_uuids = Rest, resps = NewResps});
-
handle_message({ok, Replies}, Worker, Acc) ->
#acc{
worker_uuids = WorkerUUIDs,
@@ -110,43 +124,49 @@ handle_message({ok, Replies}, Worker, Acc) ->
{value, {_W, UUIDs}, Rest} = lists:keytake(Worker, 1, WorkerUUIDs),
NewResps = append_resps(UUIDs, Replies, Resps),
maybe_stop(Acc#acc{worker_uuids = Rest, resps = NewResps});
-
handle_message({bad_request, Msg}, _, _) ->
throw({bad_request, Msg}).
-
create_reqs([], UUIDs, Reqs) ->
{lists:reverse(UUIDs), lists:reverse(Reqs)};
-
create_reqs([{Id, Revs} | RestIdsRevs], UUIDs, Reqs) ->
UUID = couch_uuids:new(),
NewUUIDs = [UUID | UUIDs],
NewReqs = [{UUID, Id, Revs} | Reqs],
create_reqs(RestIdsRevs, NewUUIDs, NewReqs).
-
group_reqs_by_shard(DbName, Reqs) ->
- lists:foldl(fun({_UUID, Id, _Revs} = Req, D0) ->
- lists:foldl(fun(Shard, D1) ->
- dict:append(Shard, Req, D1)
- end, D0, mem3:shards(DbName, Id))
- end, dict:new(), Reqs).
-
+ lists:foldl(
+ fun({_UUID, Id, _Revs} = Req, D0) ->
+ lists:foldl(
+ fun(Shard, D1) ->
+ dict:append(Shard, Req, D1)
+ end,
+ D0,
+ mem3:shards(DbName, Id)
+ )
+ end,
+ dict:new(),
+ Reqs
+ ).
w(DbName, Options) ->
try
list_to_integer(couch_util:get_value(w, Options))
- catch _:_ ->
- mem3:quorum(DbName)
+ catch
+ _:_ ->
+ mem3:quorum(DbName)
end.
-
append_errors(Type, WorkerUUIDs, Resps) ->
- lists:foldl(fun({_Worker, UUIDs}, RespAcc) ->
- Errors = [{error, Type} || _UUID <- UUIDs],
- append_resps(UUIDs, Errors, RespAcc)
- end, Resps, WorkerUUIDs).
-
+ lists:foldl(
+ fun({_Worker, UUIDs}, RespAcc) ->
+ Errors = [{error, Type} || _UUID <- UUIDs],
+ append_resps(UUIDs, Errors, RespAcc)
+ end,
+ Resps,
+ WorkerUUIDs
+ ).
append_resps([], [], Resps) ->
Resps;
@@ -154,24 +174,27 @@ append_resps([UUID | RestUUIDs], [Reply | RestReplies], Resps) ->
NewResps = dict:append(UUID, Reply, Resps),
append_resps(RestUUIDs, RestReplies, NewResps).
-
maybe_stop(#acc{worker_uuids = []} = Acc) ->
{stop, Acc};
maybe_stop(#acc{resps = Resps, uuid_counts = Counts, w = W} = Acc) ->
try
- dict:fold(fun(UUID, UUIDResps, _) ->
- UUIDCount = dict:fetch(UUID, Counts),
- case has_quorum(UUIDResps, UUIDCount, W) of
- true -> ok;
- false -> throw(keep_going)
- end
- end, nil, Resps),
+ dict:fold(
+ fun(UUID, UUIDResps, _) ->
+ UUIDCount = dict:fetch(UUID, Counts),
+ case has_quorum(UUIDResps, UUIDCount, W) of
+ true -> ok;
+ false -> throw(keep_going)
+ end
+ end,
+ nil,
+ Resps
+ ),
{stop, Acc}
- catch throw:keep_going ->
- {ok, Acc}
+ catch
+ throw:keep_going ->
+ {ok, Acc}
end.
-
format_resps(UUIDs, #acc{} = Acc) ->
#acc{
resps = Resps,
@@ -185,19 +208,22 @@ format_resps(UUIDs, #acc{} = Acc) ->
[{UUID, Error} | ReplyAcc];
_ ->
AllRevs = lists:usort(lists:flatten(OkReplies)),
- IsOk = length(OkReplies) >= W
- andalso length(lists:usort(OkReplies)) == 1,
- Health = if IsOk -> ok; true -> accepted end,
+ IsOk =
+ length(OkReplies) >= W andalso
+ length(lists:usort(OkReplies)) == 1,
+ Health =
+ if
+ IsOk -> ok;
+ true -> accepted
+ end,
[{UUID, {Health, AllRevs}} | ReplyAcc]
end
end,
FinalReplies = dict:fold(FoldFun, [], Resps),
couch_util:reorder_results(UUIDs, FinalReplies);
-
format_resps(_UUIDs, Else) ->
Else.
-
resp_health(Resps) ->
Healths = lists:usort([H || {H, _} <- Resps]),
HasError = lists:member(error, Healths),
@@ -210,12 +236,15 @@ resp_health(Resps) ->
true -> error
end.
-
has_quorum(Resps, Count, W) ->
OkResps = [R || {ok, _} = R <- Resps],
- OkCounts = lists:foldl(fun(R, Acc) ->
- orddict:update_counter(R, 1, Acc)
- end, orddict:new(), OkResps),
+ OkCounts = lists:foldl(
+ fun(R, Acc) ->
+ orddict:update_counter(R, 1, Acc)
+ end,
+ orddict:new(),
+ OkResps
+ ),
MaxOk = lists:max([0 | element(2, lists:unzip(OkCounts))]),
if
MaxOk >= W -> true;
@@ -223,7 +252,6 @@ has_quorum(Resps, Count, W) ->
true -> false
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -250,17 +278,14 @@ purge_test_() ->
]
}.
-
setup() ->
meck:new(couch_log),
meck:expect(couch_log, warning, fun(_, _) -> ok end),
meck:expect(couch_log, notice, fun(_, _) -> ok end).
-
teardown(_) ->
meck:unload().
-
t_w2_ok() ->
?_test(begin
Acc0 = create_init_acc(2),
@@ -280,7 +305,6 @@ t_w2_ok() ->
?assertEqual(ok, resp_health(Resps))
end).
-
t_w3_ok() ->
?_test(begin
Acc0 = create_init_acc(3),
@@ -303,7 +327,6 @@ t_w3_ok() ->
?assertEqual(ok, resp_health(Resps))
end).
-
t_w2_mixed_accepted() ->
?_test(begin
Acc0 = create_init_acc(2),
@@ -331,7 +354,6 @@ t_w2_mixed_accepted() ->
?assertEqual(accepted, resp_health(Resps))
end).
-
t_w3_mixed_accepted() ->
?_test(begin
Acc0 = create_init_acc(3),
@@ -359,7 +381,6 @@ t_w3_mixed_accepted() ->
?assertEqual(accepted, resp_health(Resps))
end).
-
t_w2_exit1_ok() ->
?_test(begin
Acc0 = create_init_acc(2),
@@ -384,7 +405,6 @@ t_w2_exit1_ok() ->
?assertEqual(ok, resp_health(Resps))
end).
-
t_w2_exit2_accepted() ->
?_test(begin
Acc0 = create_init_acc(2),
@@ -409,7 +429,6 @@ t_w2_exit2_accepted() ->
?assertEqual(accepted, resp_health(Resps))
end).
-
t_w2_exit3_error() ->
?_test(begin
Acc0 = create_init_acc(2),
@@ -436,7 +455,6 @@ t_w2_exit3_error() ->
?assertEqual(error, resp_health(Resps))
end).
-
t_w4_accepted() ->
% Make sure we return when all workers have responded
% rather than wait around for a timeout if a user asks
@@ -464,7 +482,6 @@ t_w4_accepted() ->
?assertEqual(accepted, resp_health(Resps))
end).
-
t_mixed_ok_accepted() ->
?_test(begin
WorkerUUIDs = [
@@ -500,7 +517,6 @@ t_mixed_ok_accepted() ->
?assertEqual(accepted, resp_health(Resps))
end).
-
t_mixed_errors() ->
?_test(begin
WorkerUUIDs = [
@@ -535,7 +551,6 @@ t_mixed_errors() ->
?assertEqual(error, resp_health(Resps))
end).
-
create_init_acc(W) ->
UUID1 = <<"uuid1">>,
UUID2 = <<"uuid2">>,
@@ -546,9 +561,12 @@ create_init_acc(W) ->
% Create our worker_uuids. We're relying on the fact that
% we're using a fake Q=1 db so we don't have to worry
% about any hashing here.
- WorkerUUIDs = lists:map(fun(Shard) ->
- {Shard#shard{ref = erlang:make_ref()}, [UUID1, UUID2]}
- end, Shards),
+ WorkerUUIDs = lists:map(
+ fun(Shard) ->
+ {Shard#shard{ref = erlang:make_ref()}, [UUID1, UUID2]}
+ end,
+ Shards
+ ),
#acc{
worker_uuids = WorkerUUIDs,
@@ -557,15 +575,17 @@ create_init_acc(W) ->
w = W
}.
-
worker(N, #acc{worker_uuids = WorkerUUIDs}) ->
{Worker, _} = lists:nth(N, WorkerUUIDs),
Worker.
-
check_quorum(Acc, Expect) ->
- dict:fold(fun(_Shard, Resps, _) ->
- ?assertEqual(Expect, has_quorum(Resps, 3, Acc#acc.w))
- end, nil, Acc#acc.resps).
+ dict:fold(
+ fun(_Shard, Resps, _) ->
+ ?assertEqual(Expect, has_quorum(Resps, 3, Acc#acc.w))
+ end,
+ nil,
+ Acc#acc.resps
+ ).
-endif.
diff --git a/src/fabric/src/fabric_doc_update.erl b/src/fabric/src/fabric_doc_update.erl
index 69babc14b..62e180ae2 100644
--- a/src/fabric/src/fabric_doc_update.erl
+++ b/src/fabric/src/fabric_doc_update.erl
@@ -25,48 +25,53 @@ go(DbName, AllDocs0, Opts) ->
AllDocs = tag_docs(AllDocs1),
validate_atomic_update(DbName, AllDocs, lists:member(all_or_nothing, Opts)),
Options = lists:delete(all_or_nothing, Opts),
- GroupedDocs = lists:map(fun({#shard{name=Name, node=Node} = Shard, Docs}) ->
- Docs1 = untag_docs(Docs),
- Ref = rexi:cast(Node, {fabric_rpc, update_docs, [Name,Docs1,Options]}),
- {Shard#shard{ref=Ref}, Docs}
- end, group_docs_by_shard(DbName, AllDocs)),
+ GroupedDocs = lists:map(
+ fun({#shard{name = Name, node = Node} = Shard, Docs}) ->
+ Docs1 = untag_docs(Docs),
+ Ref = rexi:cast(Node, {fabric_rpc, update_docs, [Name, Docs1, Options]}),
+ {Shard#shard{ref = Ref}, Docs}
+ end,
+ group_docs_by_shard(DbName, AllDocs)
+ ),
{Workers, _} = lists:unzip(GroupedDocs),
RexiMon = fabric_util:create_monitors(Workers),
W = couch_util:get_value(w, Options, integer_to_list(mem3:quorum(DbName))),
- Acc0 = {length(Workers), length(AllDocs), list_to_integer(W), GroupedDocs,
- dict:new()},
+ Acc0 = {length(Workers), length(AllDocs), list_to_integer(W), GroupedDocs, dict:new()},
Timeout = fabric_util:request_timeout(),
try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, Acc0, infinity, Timeout) of
- {ok, {Health, Results}}
- when Health =:= ok; Health =:= accepted; Health =:= error ->
- {Health, [R || R <- couch_util:reorder_results(AllDocs, Results), R =/= noreply]};
- {timeout, Acc} ->
- {_, _, W1, GroupedDocs1, DocReplDict} = Acc,
- {DefunctWorkers, _} = lists:unzip(GroupedDocs1),
- fabric_util:log_timeout(DefunctWorkers, "update_docs"),
- {Health, _, Resp} = dict:fold(fun force_reply/3, {ok, W1, []},
- DocReplDict),
- {Health, [R || R <- couch_util:reorder_results(AllDocs, Resp), R =/= noreply]};
- Else ->
- Else
+ {ok, {Health, Results}} when
+ Health =:= ok; Health =:= accepted; Health =:= error
+ ->
+ {Health, [R || R <- couch_util:reorder_results(AllDocs, Results), R =/= noreply]};
+ {timeout, Acc} ->
+ {_, _, W1, GroupedDocs1, DocReplDict} = Acc,
+ {DefunctWorkers, _} = lists:unzip(GroupedDocs1),
+ fabric_util:log_timeout(DefunctWorkers, "update_docs"),
+ {Health, _, Resp} = dict:fold(
+ fun force_reply/3,
+ {ok, W1, []},
+ DocReplDict
+ ),
+ {Health, [R || R <- couch_util:reorder_results(AllDocs, Resp), R =/= noreply]};
+ Else ->
+ Else
after
rexi_monitor:stop(RexiMon)
end.
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Worker, Acc0) ->
+handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _Worker, Acc0) ->
{_, LenDocs, W, GroupedDocs, DocReplyDict} = Acc0,
- NewGrpDocs = [X || {#shard{node=N}, _} = X <- GroupedDocs, N =/= NodeRef],
+ NewGrpDocs = [X || {#shard{node = N}, _} = X <- GroupedDocs, N =/= NodeRef],
skip_message({length(NewGrpDocs), LenDocs, W, NewGrpDocs, DocReplyDict});
-
handle_message({rexi_EXIT, _}, Worker, Acc0) ->
- {WC,LenDocs,W,GrpDocs,DocReplyDict} = Acc0,
- NewGrpDocs = lists:keydelete(Worker,1,GrpDocs),
- skip_message({WC-1,LenDocs,W,NewGrpDocs,DocReplyDict});
+ {WC, LenDocs, W, GrpDocs, DocReplyDict} = Acc0,
+ NewGrpDocs = lists:keydelete(Worker, 1, GrpDocs),
+ skip_message({WC - 1, LenDocs, W, NewGrpDocs, DocReplyDict});
handle_message(internal_server_error, Worker, Acc0) ->
% happens when we fail to load validation functions in an RPC worker
- {WC,LenDocs,W,GrpDocs,DocReplyDict} = Acc0,
- NewGrpDocs = lists:keydelete(Worker,1,GrpDocs),
- skip_message({WC-1,LenDocs,W,NewGrpDocs,DocReplyDict});
+ {WC, LenDocs, W, GrpDocs, DocReplyDict} = Acc0,
+ NewGrpDocs = lists:keydelete(Worker, 1, GrpDocs),
+ skip_message({WC - 1, LenDocs, W, NewGrpDocs, DocReplyDict});
handle_message(attachment_chunk_received, _Worker, Acc0) ->
{ok, Acc0};
handle_message({ok, Replies}, Worker, Acc0) ->
@@ -74,21 +79,24 @@ handle_message({ok, Replies}, Worker, Acc0) ->
{value, {_, Docs}, NewGrpDocs} = lists:keytake(Worker, 1, GroupedDocs),
DocReplyDict = append_update_replies(Docs, Replies, DocReplyDict0),
case {WaitingCount, dict:size(DocReplyDict)} of
- {1, _} ->
- % last message has arrived, we need to conclude things
- {Health, W, Reply} = dict:fold(fun force_reply/3, {ok, W, []},
- DocReplyDict),
- {stop, {Health, Reply}};
- {_, DocCount} ->
- % we've got at least one reply for each document, let's take a look
- case dict:fold(fun maybe_reply/3, {stop,W,[]}, DocReplyDict) of
- continue ->
- {ok, {WaitingCount - 1, DocCount, W, NewGrpDocs, DocReplyDict}};
- {stop, W, FinalReplies} ->
- {stop, {ok, FinalReplies}}
- end;
- _ ->
- {ok, {WaitingCount - 1, DocCount, W, NewGrpDocs, DocReplyDict}}
+ {1, _} ->
+ % last message has arrived, we need to conclude things
+ {Health, W, Reply} = dict:fold(
+ fun force_reply/3,
+ {ok, W, []},
+ DocReplyDict
+ ),
+ {stop, {Health, Reply}};
+ {_, DocCount} ->
+ % we've got at least one reply for each document, let's take a look
+ case dict:fold(fun maybe_reply/3, {stop, W, []}, DocReplyDict) of
+ continue ->
+ {ok, {WaitingCount - 1, DocCount, W, NewGrpDocs, DocReplyDict}};
+ {stop, W, FinalReplies} ->
+ {stop, {ok, FinalReplies}}
+ end;
+ _ ->
+ {ok, {WaitingCount - 1, DocCount, W, NewGrpDocs, DocReplyDict}}
end;
handle_message({missing_stub, Stub}, _, _) ->
throw({missing_stub, Stub});
@@ -106,53 +114,61 @@ before_doc_update(DbName, Docs, Opts) ->
{true, _} ->
%% cluster db is expensive to create so we only do it if we have to
Db = fabric_util:open_cluster_db(DbName, Opts),
- [couch_replicator_docs:before_doc_update(Doc, Db, replicated_changes)
- || Doc <- Docs];
+ [
+ couch_replicator_docs:before_doc_update(Doc, Db, replicated_changes)
+ || Doc <- Docs
+ ];
{_, true} ->
%% cluster db is expensive to create so we only do it if we have to
Db = fabric_util:open_cluster_db(DbName, Opts),
- [couch_users_db:before_doc_update(Doc, Db, interactive_edit)
- || Doc <- Docs];
+ [
+ couch_users_db:before_doc_update(Doc, Db, interactive_edit)
+ || Doc <- Docs
+ ];
_ ->
Docs
end.
tag_docs([]) ->
[];
-tag_docs([#doc{meta=Meta}=Doc | Rest]) ->
- [Doc#doc{meta=[{ref, make_ref()} | Meta]} | tag_docs(Rest)].
+tag_docs([#doc{meta = Meta} = Doc | Rest]) ->
+ [Doc#doc{meta = [{ref, make_ref()} | Meta]} | tag_docs(Rest)].
untag_docs([]) ->
[];
-untag_docs([#doc{meta=Meta}=Doc | Rest]) ->
- [Doc#doc{meta=lists:keydelete(ref, 1, Meta)} | untag_docs(Rest)].
+untag_docs([#doc{meta = Meta} = Doc | Rest]) ->
+ [Doc#doc{meta = lists:keydelete(ref, 1, Meta)} | untag_docs(Rest)].
force_reply(Doc, [], {_, W, Acc}) ->
{error, W, [{Doc, {error, internal_server_error}} | Acc]};
-force_reply(Doc, [FirstReply|_] = Replies, {Health, W, Acc}) ->
+force_reply(Doc, [FirstReply | _] = Replies, {Health, W, Acc}) ->
case update_quorum_met(W, Replies) of
- {true, Reply} ->
- {Health, W, [{Doc,Reply} | Acc]};
- false ->
- case [Reply || {ok, Reply} <- Replies] of
- [] ->
- % check if all errors are identical, if so inherit health
- case lists:all(fun(E) -> E =:= FirstReply end, Replies) of
- true ->
- CounterKey = [fabric, doc_update, errors],
- couch_stats:increment_counter(CounterKey),
- {Health, W, [{Doc, FirstReply} | Acc]};
- false ->
- CounterKey = [fabric, doc_update, mismatched_errors],
- couch_stats:increment_counter(CounterKey),
- {error, W, [{Doc, FirstReply} | Acc]}
- end;
- [AcceptedRev | _] ->
- CounterKey = [fabric, doc_update, write_quorum_errors],
- couch_stats:increment_counter(CounterKey),
- NewHealth = case Health of ok -> accepted; _ -> Health end,
- {NewHealth, W, [{Doc, {accepted,AcceptedRev}} | Acc]}
- end
+ {true, Reply} ->
+ {Health, W, [{Doc, Reply} | Acc]};
+ false ->
+ case [Reply || {ok, Reply} <- Replies] of
+ [] ->
+ % check if all errors are identical, if so inherit health
+ case lists:all(fun(E) -> E =:= FirstReply end, Replies) of
+ true ->
+ CounterKey = [fabric, doc_update, errors],
+ couch_stats:increment_counter(CounterKey),
+ {Health, W, [{Doc, FirstReply} | Acc]};
+ false ->
+ CounterKey = [fabric, doc_update, mismatched_errors],
+ couch_stats:increment_counter(CounterKey),
+ {error, W, [{Doc, FirstReply} | Acc]}
+ end;
+ [AcceptedRev | _] ->
+ CounterKey = [fabric, doc_update, write_quorum_errors],
+ couch_stats:increment_counter(CounterKey),
+ NewHealth =
+ case Health of
+ ok -> accepted;
+ _ -> Health
+ end,
+ {NewHealth, W, [{Doc, {accepted, AcceptedRev}} | Acc]}
+ end
end.
maybe_reply(_, _, continue) ->
@@ -160,21 +176,24 @@ maybe_reply(_, _, continue) ->
continue;
maybe_reply(Doc, Replies, {stop, W, Acc}) ->
case update_quorum_met(W, Replies) of
- {true, Reply} ->
- {stop, W, [{Doc, Reply} | Acc]};
- false ->
- continue
+ {true, Reply} ->
+ {stop, W, [{Doc, Reply} | Acc]};
+ false ->
+ continue
end.
update_quorum_met(W, Replies) ->
- Counters = lists:foldl(fun(R,D) -> orddict:update_counter(R,1,D) end,
- orddict:new(), Replies),
+ Counters = lists:foldl(
+ fun(R, D) -> orddict:update_counter(R, 1, D) end,
+ orddict:new(),
+ Replies
+ ),
GoodReplies = lists:filter(fun good_reply/1, Counters),
case lists:dropwhile(fun({_, Count}) -> Count < W end, GoodReplies) of
- [] ->
- false;
- [{FinalReply, _} | _] ->
- {true, FinalReply}
+ [] ->
+ false;
+ [{FinalReply, _} | _] ->
+ {true, FinalReply}
end.
good_reply({{ok, _}, _}) ->
@@ -186,18 +205,28 @@ good_reply(_) ->
-spec group_docs_by_shard(binary(), [#doc{}]) -> [{#shard{}, [#doc{}]}].
group_docs_by_shard(DbName, Docs) ->
- dict:to_list(lists:foldl(fun(#doc{id=Id} = Doc, D0) ->
- lists:foldl(fun(Shard, D1) ->
- dict:append(Shard, Doc, D1)
- end, D0, mem3:shards(DbName,Id))
- end, dict:new(), Docs)).
+ dict:to_list(
+ lists:foldl(
+ fun(#doc{id = Id} = Doc, D0) ->
+ lists:foldl(
+ fun(Shard, D1) ->
+ dict:append(Shard, Doc, D1)
+ end,
+ D0,
+ mem3:shards(DbName, Id)
+ )
+ end,
+ dict:new(),
+ Docs
+ )
+ ).
append_update_replies([], [], DocReplyDict) ->
DocReplyDict;
-append_update_replies([Doc|Rest], [], Dict0) ->
+append_update_replies([Doc | Rest], [], Dict0) ->
% icky, if replicated_changes only errors show up in result
append_update_replies(Rest, [], dict:append(Doc, noreply, Dict0));
-append_update_replies([Doc|Rest1], [Reply|Rest2], Dict0) ->
+append_update_replies([Doc | Rest1], [Reply | Rest2], Dict0) ->
append_update_replies(Rest1, Rest2, dict:append(Doc, Reply, Dict0)).
skip_message({0, _, W, _, DocReplyDict}) ->
@@ -213,27 +242,29 @@ validate_atomic_update(_DbName, AllDocs, true) ->
% to basically extract the prep_and_validate_updates function from couch_db
% and only run that, without actually writing in case of a success.
Error = {not_implemented, <<"all_or_nothing is not supported">>},
- PreCommitFailures = lists:map(fun(#doc{id=Id, revs = {Pos,Revs}}) ->
- case Revs of [] -> RevId = <<>>; [RevId|_] -> ok end,
- {{Id, {Pos, RevId}}, Error}
- end, AllDocs),
+ PreCommitFailures = lists:map(
+ fun(#doc{id = Id, revs = {Pos, Revs}}) ->
+ case Revs of
+ [] -> RevId = <<>>;
+ [RevId | _] -> ok
+ end,
+ {{Id, {Pos, RevId}}, Error}
+ end,
+ AllDocs
+ ),
throw({aborted, PreCommitFailures}).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
setup_all() ->
meck:new([couch_log, couch_stats]),
- meck:expect(couch_log, warning, fun(_,_) -> ok end),
+ meck:expect(couch_log, warning, fun(_, _) -> ok end),
meck:expect(couch_stats, increment_counter, fun(_) -> ok end).
-
teardown_all(_) ->
meck:unload().
-
doc_update_test_() ->
{
setup,
@@ -246,132 +277,151 @@ doc_update_test_() ->
]
}.
-
% eunits
doc_update1() ->
- Doc1 = #doc{revs = {1,[<<"foo">>]}},
- Doc2 = #doc{revs = {1,[<<"bar">>]}},
+ Doc1 = #doc{revs = {1, [<<"foo">>]}},
+ Doc2 = #doc{revs = {1, [<<"bar">>]}},
Docs = [Doc1],
Docs2 = [Doc2, Doc1],
- Dict = dict:from_list([{Doc,[]} || Doc <- Docs]),
- Dict2 = dict:from_list([{Doc,[]} || Doc <- Docs2]),
+ Dict = dict:from_list([{Doc, []} || Doc <- Docs]),
+ Dict2 = dict:from_list([{Doc, []} || Doc <- Docs2]),
Shards =
- mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
-
+ mem3_util:create_partition_map("foo", 3, 1, ["node1", "node2", "node3"]),
+ GroupedDocs = group_docs_by_shard_hack(<<"foo">>, Shards, Docs),
% test for W = 2
- AccW2 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
- Dict},
+ AccW2 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs, Dict},
- {ok,{WaitingCountW2_1,_,_,_,_}=AccW2_1} =
- handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW2),
- ?assertEqual(WaitingCountW2_1,2),
- {stop, FinalReplyW2 } =
- handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW2_1),
- ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW2),
+ {ok, {WaitingCountW2_1, _, _, _, _} = AccW2_1} =
+ handle_message({ok, [{ok, Doc1}]}, hd(Shards), AccW2),
+ ?assertEqual(WaitingCountW2_1, 2),
+ {stop, FinalReplyW2} =
+ handle_message({ok, [{ok, Doc1}]}, lists:nth(2, Shards), AccW2_1),
+ ?assertEqual({ok, [{Doc1, {ok, Doc1}}]}, FinalReplyW2),
% test for W = 3
- AccW3 = {length(Shards), length(Docs), list_to_integer("3"), GroupedDocs,
- Dict},
+ AccW3 = {length(Shards), length(Docs), list_to_integer("3"), GroupedDocs, Dict},
- {ok,{WaitingCountW3_1,_,_,_,_}=AccW3_1} =
- handle_message({ok, [{ok, Doc1}]},hd(Shards),AccW3),
- ?assertEqual(WaitingCountW3_1,2),
+ {ok, {WaitingCountW3_1, _, _, _, _} = AccW3_1} =
+ handle_message({ok, [{ok, Doc1}]}, hd(Shards), AccW3),
+ ?assertEqual(WaitingCountW3_1, 2),
- {ok,{WaitingCountW3_2,_,_,_,_}=AccW3_2} =
- handle_message({ok, [{ok, Doc1}]},lists:nth(2,Shards),AccW3_1),
- ?assertEqual(WaitingCountW3_2,1),
+ {ok, {WaitingCountW3_2, _, _, _, _} = AccW3_2} =
+ handle_message({ok, [{ok, Doc1}]}, lists:nth(2, Shards), AccW3_1),
+ ?assertEqual(WaitingCountW3_2, 1),
- {stop, FinalReplyW3 } =
- handle_message({ok, [{ok, Doc1}]},lists:nth(3,Shards),AccW3_2),
- ?assertEqual({ok, [{Doc1, {ok,Doc1}}]},FinalReplyW3),
+ {stop, FinalReplyW3} =
+ handle_message({ok, [{ok, Doc1}]}, lists:nth(3, Shards), AccW3_2),
+ ?assertEqual({ok, [{Doc1, {ok, Doc1}}]}, FinalReplyW3),
% test w quorum > # shards, which should fail immediately
- Shards2 = mem3_util:create_partition_map("foo",1,1,["node1"]),
- GroupedDocs2 = group_docs_by_shard_hack(<<"foo">>,Shards2,Docs),
+ Shards2 = mem3_util:create_partition_map("foo", 1, 1, ["node1"]),
+ GroupedDocs2 = group_docs_by_shard_hack(<<"foo">>, Shards2, Docs),
AccW4 =
{length(Shards2), length(Docs), list_to_integer("2"), GroupedDocs2, Dict},
Bool =
- case handle_message({ok, [{ok, Doc1}]},hd(Shards2),AccW4) of
- {stop, _Reply} ->
- true;
- _ -> false
- end,
- ?assertEqual(Bool,true),
+ case handle_message({ok, [{ok, Doc1}]}, hd(Shards2), AccW4) of
+ {stop, _Reply} ->
+ true;
+ _ ->
+ false
+ end,
+ ?assertEqual(Bool, true),
% Docs with no replies should end up as {error, internal_server_error}
- SA1 = #shard{node=a, range=1},
- SB1 = #shard{node=b, range=1},
- SA2 = #shard{node=a, range=2},
- SB2 = #shard{node=b, range=2},
- GroupedDocs3 = [{SA1,[Doc1]}, {SB1,[Doc1]}, {SA2,[Doc2]}, {SB2,[Doc2]}],
+ SA1 = #shard{node = a, range = 1},
+ SB1 = #shard{node = b, range = 1},
+ SA2 = #shard{node = a, range = 2},
+ SB2 = #shard{node = b, range = 2},
+ GroupedDocs3 = [{SA1, [Doc1]}, {SB1, [Doc1]}, {SA2, [Doc2]}, {SB2, [Doc2]}],
StW5_0 = {length(GroupedDocs3), length(Docs2), 2, GroupedDocs3, Dict2},
{ok, StW5_1} = handle_message({ok, [{ok, "A"}]}, SA1, StW5_0),
{ok, StW5_2} = handle_message({rexi_EXIT, nil}, SB1, StW5_1),
{ok, StW5_3} = handle_message({rexi_EXIT, nil}, SA2, StW5_2),
{stop, ReplyW5} = handle_message({rexi_EXIT, nil}, SB2, StW5_3),
?assertEqual(
- {error, [{Doc1,{accepted,"A"}},{Doc2,{error,internal_server_error}}]},
+ {error, [{Doc1, {accepted, "A"}}, {Doc2, {error, internal_server_error}}]},
ReplyW5
).
doc_update2() ->
- Doc1 = #doc{revs = {1,[<<"foo">>]}},
- Doc2 = #doc{revs = {1,[<<"bar">>]}},
+ Doc1 = #doc{revs = {1, [<<"foo">>]}},
+ Doc2 = #doc{revs = {1, [<<"bar">>]}},
Docs = [Doc2, Doc1],
Shards =
- mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
- Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
- dict:from_list([{Doc,[]} || Doc <- Docs])},
-
- {ok,{WaitingCount1,_,_,_,_}=Acc1} =
- handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
- ?assertEqual(WaitingCount1,2),
-
- {ok,{WaitingCount2,_,_,_,_}=Acc2} =
- handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
- ?assertEqual(WaitingCount2,1),
+ mem3_util:create_partition_map("foo", 3, 1, ["node1", "node2", "node3"]),
+ GroupedDocs = group_docs_by_shard_hack(<<"foo">>, Shards, Docs),
+ Acc0 = {
+ length(Shards),
+ length(Docs),
+ list_to_integer("2"),
+ GroupedDocs,
+ dict:from_list([{Doc, []} || Doc <- Docs])
+ },
+
+ {ok, {WaitingCount1, _, _, _, _} = Acc1} =
+ handle_message({ok, [{ok, Doc1}, {ok, Doc2}]}, hd(Shards), Acc0),
+ ?assertEqual(WaitingCount1, 2),
+
+ {ok, {WaitingCount2, _, _, _, _} = Acc2} =
+ handle_message({rexi_EXIT, 1}, lists:nth(2, Shards), Acc1),
+ ?assertEqual(WaitingCount2, 1),
{stop, Reply} =
- handle_message({rexi_EXIT, 1},lists:nth(3,Shards),Acc2),
+ handle_message({rexi_EXIT, 1}, lists:nth(3, Shards), Acc2),
- ?assertEqual({accepted, [{Doc1,{accepted,Doc2}}, {Doc2,{accepted,Doc1}}]},
- Reply).
+ ?assertEqual(
+ {accepted, [{Doc1, {accepted, Doc2}}, {Doc2, {accepted, Doc1}}]},
+ Reply
+ ).
doc_update3() ->
- Doc1 = #doc{revs = {1,[<<"foo">>]}},
- Doc2 = #doc{revs = {1,[<<"bar">>]}},
+ Doc1 = #doc{revs = {1, [<<"foo">>]}},
+ Doc2 = #doc{revs = {1, [<<"bar">>]}},
Docs = [Doc2, Doc1],
Shards =
- mem3_util:create_partition_map("foo",3,1,["node1","node2","node3"]),
- GroupedDocs = group_docs_by_shard_hack(<<"foo">>,Shards,Docs),
- Acc0 = {length(Shards), length(Docs), list_to_integer("2"), GroupedDocs,
- dict:from_list([{Doc,[]} || Doc <- Docs])},
-
- {ok,{WaitingCount1,_,_,_,_}=Acc1} =
- handle_message({ok, [{ok, Doc1},{ok, Doc2}]},hd(Shards),Acc0),
- ?assertEqual(WaitingCount1,2),
-
- {ok,{WaitingCount2,_,_,_,_}=Acc2} =
- handle_message({rexi_EXIT, 1},lists:nth(2,Shards),Acc1),
- ?assertEqual(WaitingCount2,1),
+ mem3_util:create_partition_map("foo", 3, 1, ["node1", "node2", "node3"]),
+ GroupedDocs = group_docs_by_shard_hack(<<"foo">>, Shards, Docs),
+ Acc0 = {
+ length(Shards),
+ length(Docs),
+ list_to_integer("2"),
+ GroupedDocs,
+ dict:from_list([{Doc, []} || Doc <- Docs])
+ },
+
+ {ok, {WaitingCount1, _, _, _, _} = Acc1} =
+ handle_message({ok, [{ok, Doc1}, {ok, Doc2}]}, hd(Shards), Acc0),
+ ?assertEqual(WaitingCount1, 2),
+
+ {ok, {WaitingCount2, _, _, _, _} = Acc2} =
+ handle_message({rexi_EXIT, 1}, lists:nth(2, Shards), Acc1),
+ ?assertEqual(WaitingCount2, 1),
{stop, Reply} =
- handle_message({ok, [{ok, Doc1},{ok, Doc2}]},lists:nth(3,Shards),Acc2),
+ handle_message({ok, [{ok, Doc1}, {ok, Doc2}]}, lists:nth(3, Shards), Acc2),
- ?assertEqual({ok, [{Doc1, {ok, Doc2}},{Doc2, {ok,Doc1}}]},Reply).
+ ?assertEqual({ok, [{Doc1, {ok, Doc2}}, {Doc2, {ok, Doc1}}]}, Reply).
% needed for testing to avoid having to start the mem3 application
group_docs_by_shard_hack(_DbName, Shards, Docs) ->
- dict:to_list(lists:foldl(fun(#doc{id=_Id} = Doc, D0) ->
- lists:foldl(fun(Shard, D1) ->
- dict:append(Shard, Doc, D1)
- end, D0, Shards)
- end, dict:new(), Docs)).
+ dict:to_list(
+ lists:foldl(
+ fun(#doc{id = _Id} = Doc, D0) ->
+ lists:foldl(
+ fun(Shard, D1) ->
+ dict:append(Shard, Doc, D1)
+ end,
+ D0,
+ Shards
+ )
+ end,
+ dict:new(),
+ Docs
+ )
+ ).
-endif.
diff --git a/src/fabric/src/fabric_group_info.erl b/src/fabric/src/fabric_group_info.erl
index 91bdf01b0..c7d7293fd 100644
--- a/src/fabric/src/fabric_group_info.erl
+++ b/src/fabric/src/fabric_group_info.erl
@@ -21,8 +21,7 @@
go(DbName, GroupId) when is_binary(GroupId) ->
{ok, DDoc} = fabric:open_doc(DbName, GroupId, [?ADMIN_CTX]),
go(DbName, DDoc);
-
-go(DbName, #doc{id=DDocId}) ->
+go(DbName, #doc{id = DDocId}) ->
Shards = mem3:shards(DbName),
Ushards = mem3:ushards(DbName),
Workers = fabric_util:submit_jobs(Shards, group_info, [DDocId]),
@@ -30,63 +29,66 @@ go(DbName, #doc{id=DDocId}) ->
USet = sets:from_list([{Id, N} || #shard{name = Id, node = N} <- Ushards]),
Acc = {fabric_dict:init(Workers, nil), [], USet},
try fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc) of
- {timeout, {WorkersDict, _, _}} ->
- DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
- fabric_util:log_timeout(DefunctWorkers, "group_info"),
- {error, timeout};
- Else ->
- Else
+ {timeout, {WorkersDict, _, _}} ->
+ DefunctWorkers = fabric_util:remove_done_workers(WorkersDict, nil),
+ fabric_util:log_timeout(DefunctWorkers, "group_info"),
+ {error, timeout};
+ Else ->
+ Else
after
rexi_monitor:stop(RexiMon)
end.
-handle_message({rexi_DOWN, _, {_,NodeRef},_}, _, {Counters, Resps, USet}) ->
+handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, {Counters, Resps, USet}) ->
case fabric_ring:node_down(NodeRef, Counters, Resps) of
{ok, Counters1} -> {ok, {Counters1, Resps, USet}};
error -> {error, {nodedown, <<"progress not possible">>}}
end;
-
handle_message({rexi_EXIT, Reason}, Shard, {Counters, Resps, USet}) ->
case fabric_ring:handle_error(Shard, Counters, Resps) of
{ok, Counters1} -> {ok, {Counters1, Resps, USet}};
error -> {error, Reason}
end;
-
handle_message({ok, Info}, Shard, {Counters, Resps, USet}) ->
- case fabric_ring:handle_response(Shard, Info, Counters, Resps) of
+ case fabric_ring:handle_response(Shard, Info, Counters, Resps) of
{ok, {Counters1, Resps1}} ->
{ok, {Counters1, Resps1, USet}};
{stop, Resps1} ->
{stop, build_final_response(USet, Resps1)}
end;
-
handle_message(Reason, Shard, {Counters, Resps, USet}) ->
case fabric_ring:handle_error(Shard, Counters, Resps) of
{ok, Counters1} -> {ok, {Counters1, Resps, USet}};
error -> {error, Reason}
end.
-
build_final_response(USet, Responses) ->
- AccF = fabric_dict:fold(fun(#shard{name = Id, node = Node}, Info, Acc) ->
- IsPreferred = sets:is_element({Id, Node}, USet),
- dict:append(Id, {Node, IsPreferred, Info}, Acc)
- end, dict:new(), Responses),
+ AccF = fabric_dict:fold(
+ fun(#shard{name = Id, node = Node}, Info, Acc) ->
+ IsPreferred = sets:is_element({Id, Node}, USet),
+ dict:append(Id, {Node, IsPreferred, Info}, Acc)
+ end,
+ dict:new(),
+ Responses
+ ),
Pending = aggregate_pending(AccF),
Infos = get_infos(AccF),
[{updates_pending, {Pending}} | merge_results(Infos)].
-
get_infos(Acc) ->
Values = [V || {_, V} <- dict:to_list(Acc)],
lists:flatten([Info || {_Node, _Pref, Info} <- lists:flatten(Values)]).
aggregate_pending(Dict) ->
{Preferred, Total, Minimum} =
- dict:fold(fun(_Name, Results, {P, T, M}) ->
- {Preferred, Total, Minimum} = calculate_pending(Results),
- {P + Preferred, T + Total, M + Minimum}
- end, {0, 0, 0}, Dict),
+ dict:fold(
+ fun(_Name, Results, {P, T, M}) ->
+ {Preferred, Total, Minimum} = calculate_pending(Results),
+ {P + Preferred, T + Total, M + Minimum}
+ end,
+ {0, 0, 0},
+ Dict
+ ),
[
{minimum, Minimum},
{preferred, Preferred},
@@ -94,46 +96,64 @@ aggregate_pending(Dict) ->
].
calculate_pending(Results) ->
- lists:foldl(fun
- ({_Node, true, Info}, {P, T, V}) ->
- Pending = couch_util:get_value(pending_updates, Info),
- {P + Pending, T + Pending, min(Pending, V)};
- ({_Node, false, Info}, {P, T, V}) ->
- Pending = couch_util:get_value(pending_updates, Info),
- {P, T + Pending, min(Pending, V)}
- end, {0, 0, infinity}, Results).
+ lists:foldl(
+ fun
+ ({_Node, true, Info}, {P, T, V}) ->
+ Pending = couch_util:get_value(pending_updates, Info),
+ {P + Pending, T + Pending, min(Pending, V)};
+ ({_Node, false, Info}, {P, T, V}) ->
+ Pending = couch_util:get_value(pending_updates, Info),
+ {P, T + Pending, min(Pending, V)}
+ end,
+ {0, 0, infinity},
+ Results
+ ).
merge_results(Info) ->
- Dict = lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end,
- orddict:new(), Info),
- orddict:fold(fun
- (signature, [X | _], Acc) ->
- [{signature, X} | Acc];
- (language, [X | _], Acc) ->
- [{language, X} | Acc];
- (sizes, X, Acc) ->
- [{sizes, {merge_object(X)}} | Acc];
- (compact_running, X, Acc) ->
- [{compact_running, lists:member(true, X)} | Acc];
- (updater_running, X, Acc) ->
- [{updater_running, lists:member(true, X)} | Acc];
- (waiting_commit, X, Acc) ->
- [{waiting_commit, lists:member(true, X)} | Acc];
- (waiting_clients, X, Acc) ->
- [{waiting_clients, lists:sum(X)} | Acc];
- (update_seq, X, Acc) ->
- [{update_seq, lists:sum(X)} | Acc];
- (purge_seq, X, Acc) ->
- [{purge_seq, lists:sum(X)} | Acc];
- (_, _, Acc) ->
- Acc
- end, [], Dict).
+ Dict = lists:foldl(
+ fun({K, V}, D0) -> orddict:append(K, V, D0) end,
+ orddict:new(),
+ Info
+ ),
+ orddict:fold(
+ fun
+ (signature, [X | _], Acc) ->
+ [{signature, X} | Acc];
+ (language, [X | _], Acc) ->
+ [{language, X} | Acc];
+ (sizes, X, Acc) ->
+ [{sizes, {merge_object(X)}} | Acc];
+ (compact_running, X, Acc) ->
+ [{compact_running, lists:member(true, X)} | Acc];
+ (updater_running, X, Acc) ->
+ [{updater_running, lists:member(true, X)} | Acc];
+ (waiting_commit, X, Acc) ->
+ [{waiting_commit, lists:member(true, X)} | Acc];
+ (waiting_clients, X, Acc) ->
+ [{waiting_clients, lists:sum(X)} | Acc];
+ (update_seq, X, Acc) ->
+ [{update_seq, lists:sum(X)} | Acc];
+ (purge_seq, X, Acc) ->
+ [{purge_seq, lists:sum(X)} | Acc];
+ (_, _, Acc) ->
+ Acc
+ end,
+ [],
+ Dict
+ ).
merge_object(Objects) ->
- Dict = lists:foldl(fun({Props}, D) ->
- lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end, D, Props)
- end, orddict:new(), Objects),
- orddict:fold(fun
- (Key, X, Acc) ->
+ Dict = lists:foldl(
+ fun({Props}, D) ->
+ lists:foldl(fun({K, V}, D0) -> orddict:append(K, V, D0) end, D, Props)
+ end,
+ orddict:new(),
+ Objects
+ ),
+ orddict:fold(
+ fun(Key, X, Acc) ->
[{Key, lists:sum(X)} | Acc]
- end, [], Dict).
+ end,
+ [],
+ Dict
+ ).
diff --git a/src/fabric/src/fabric_ring.erl b/src/fabric/src/fabric_ring.erl
index bad0f42d1..2bb7d717f 100644
--- a/src/fabric/src/fabric_ring.erl
+++ b/src/fabric/src/fabric_ring.erl
@@ -12,7 +12,6 @@
-module(fabric_ring).
-
-export([
is_progress_possible/1,
is_progress_possible/2,
@@ -25,21 +24,17 @@
handle_response/5
]).
-
-include_lib("fabric/include/fabric.hrl").
-include_lib("mem3/include/mem3.hrl").
-
-type fabric_dict() :: [{#shard{}, any()}].
-type ring_opts() :: [atom() | tuple()].
-
%% @doc looks for a fully covered keyrange in the list of counters
-spec is_progress_possible(fabric_dict()) -> boolean().
is_progress_possible(Counters) ->
is_progress_possible(Counters, []).
-
%% @doc looks for a fully covered keyrange in the list of counters
%% This version take ring option to configure how progress will
%% be checked. By default, [], checks that the full ring is covered.
@@ -47,41 +42,39 @@ is_progress_possible(Counters) ->
is_progress_possible(Counters, RingOpts) ->
is_progress_possible(Counters, [], 0, ?RING_END, RingOpts).
-
-spec get_shard_replacements(binary(), [#shard{}]) -> [#shard{}].
get_shard_replacements(DbName, UsedShards0) ->
% We only want to generate a replacements list from shards
% that aren't already used.
AllLiveShards = mem3:live_shards(DbName, [node() | nodes()]),
- UsedShards = [S#shard{ref=undefined} || S <- UsedShards0],
+ UsedShards = [S#shard{ref = undefined} || S <- UsedShards0],
get_shard_replacements_int(AllLiveShards -- UsedShards, UsedShards).
-
-spec node_down(node(), fabric_dict(), fabric_dict()) ->
{ok, fabric_dict()} | error.
node_down(Node, Workers, Responses) ->
node_down(Node, Workers, Responses, []).
-
-spec node_down(node(), fabric_dict(), fabric_dict(), ring_opts()) ->
{ok, fabric_dict()} | error.
node_down(Node, Workers, Responses, RingOpts) ->
{B, E} = range_bounds(Workers, Responses),
- Workers1 = fabric_dict:filter(fun(#shard{node = N}, _) ->
- N =/= Node
- end, Workers),
+ Workers1 = fabric_dict:filter(
+ fun(#shard{node = N}, _) ->
+ N =/= Node
+ end,
+ Workers
+ ),
case is_progress_possible(Workers1, Responses, B, E, RingOpts) of
true -> {ok, Workers1};
false -> error
end.
-
-spec handle_error(#shard{}, fabric_dict(), fabric_dict()) ->
{ok, fabric_dict()} | error.
handle_error(Shard, Workers, Responses) ->
handle_error(Shard, Workers, Responses, []).
-
-spec handle_error(#shard{}, fabric_dict(), fabric_dict(), ring_opts()) ->
{ok, fabric_dict()} | error.
handle_error(Shard, Workers, Responses, RingOpts) ->
@@ -92,20 +85,28 @@ handle_error(Shard, Workers, Responses, RingOpts) ->
false -> error
end.
-
-spec handle_response(#shard{}, any(), fabric_dict(), fabric_dict()) ->
{ok, {fabric_dict(), fabric_dict()}} | {stop, fabric_dict()}.
handle_response(Shard, Response, Workers, Responses) ->
handle_response(Shard, Response, Workers, Responses, []).
-
--spec handle_response(#shard{}, any(), fabric_dict(), fabric_dict(),
- ring_opts()) ->
+-spec handle_response(
+ #shard{},
+ any(),
+ fabric_dict(),
+ fabric_dict(),
+ ring_opts()
+) ->
{ok, {fabric_dict(), fabric_dict()}} | {stop, fabric_dict()}.
handle_response(Shard, Response, Workers, Responses, RingOpts) ->
- handle_response(Shard, Response, Workers, Responses, RingOpts,
- fun stop_workers/1).
-
+ handle_response(
+ Shard,
+ Response,
+ Workers,
+ Responses,
+ RingOpts,
+ fun stop_workers/1
+ ).
% Worker response handler. Gets reponses from shard and puts them in the list
% until they complete a full ring. Then kill unused responses and remaining
@@ -139,7 +140,6 @@ handle_response(Shard, Response, Workers, Responses, RingOpts, CleanupCb) ->
handle_response_all(Workers1, Responses1)
end.
-
handle_response_ring(Workers, Responses, CleanupCb) ->
{MinB, MaxE} = range_bounds(Workers, Responses),
Ranges = lists:map(fun({R, _, _}) -> R end, Responses),
@@ -159,7 +159,6 @@ handle_response_ring(Workers, Responses, CleanupCb) ->
{stop, fabric_dict:from_list(UsedResponses)}
end.
-
handle_response_any(Shard, Response, Workers, Any, CleanupCb) ->
case lists:member(Shard#shard{ref = undefined}, Any) of
true ->
@@ -169,7 +168,6 @@ handle_response_any(Shard, Response, Workers, Any, CleanupCb) ->
{ok, {Workers, []}}
end.
-
handle_response_all(Workers, Responses) ->
case fabric_dict:size(Workers) =:= 0 of
true ->
@@ -178,7 +176,6 @@ handle_response_all(Workers, Responses) ->
{ok, {Workers, Responses}}
end.
-
% Check if workers still waiting and the already received responses could
% still form a continous range. The range won't always be the full ring, and
% the bounds are computed based on the minimum and maximum interval beginning
@@ -191,19 +188,21 @@ handle_response_all(Workers, Responses) ->
% might look like: 00-ff, 00-ff, 07-ff. Even if both 00-ff workers exit,
% progress can still be made with the remaining 07-ff copy.
%
--spec is_progress_possible(fabric_dict(), [{any(), #shard{}, any()}],
- non_neg_integer(), non_neg_integer(), ring_opts()) -> boolean().
+-spec is_progress_possible(
+ fabric_dict(),
+ [{any(), #shard{}, any()}],
+ non_neg_integer(),
+ non_neg_integer(),
+ ring_opts()
+) -> boolean().
is_progress_possible([], [], _, _, _) ->
false;
-
is_progress_possible(Counters, Responses, MinB, MaxE, []) ->
ResponseRanges = lists:map(fun({{B, E}, _, _}) -> {B, E} end, Responses),
Ranges = fabric_util:worker_ranges(Counters) ++ ResponseRanges,
mem3_util:get_ring(Ranges, MinB, MaxE) =/= [];
-
is_progress_possible(Counters, _Responses, _, _, [all]) ->
fabric_dict:size(Counters) > 0;
-
is_progress_possible(Counters, Responses, _, _, [{any, AnyShards}]) ->
InAny = fun(S) -> lists:member(S#shard{ref = undefined}, AnyShards) end,
case fabric_dict:filter(fun(S, _) -> InAny(S) end, Counters) of
@@ -216,30 +215,37 @@ is_progress_possible(Counters, Responses, _, _, [{any, AnyShards}]) ->
true
end.
-
get_shard_replacements_int(UnusedShards, UsedShards) ->
% If we have more than one copy of a range then we don't
% want to try and add a replacement to any copy.
- RangeCounts = lists:foldl(fun(#shard{range=R}, Acc) ->
- dict:update_counter(R, 1, Acc)
- end, dict:new(), UsedShards),
+ RangeCounts = lists:foldl(
+ fun(#shard{range = R}, Acc) ->
+ dict:update_counter(R, 1, Acc)
+ end,
+ dict:new(),
+ UsedShards
+ ),
% For each seq shard range with a count of 1, find any
% possible replacements from the unused shards. The
% replacement list is keyed by range.
- lists:foldl(fun(#shard{range = [B, E] = Range}, Acc) ->
- case dict:find(Range, RangeCounts) of
- {ok, 1} ->
- Repls = mem3_util:non_overlapping_shards(UnusedShards, B, E),
- % Only keep non-empty lists of replacements
- if Repls == [] -> Acc; true ->
- [{Range, Repls} | Acc]
- end;
- _ ->
- Acc
- end
- end, [], UsedShards).
-
+ lists:foldl(
+ fun(#shard{range = [B, E] = Range}, Acc) ->
+ case dict:find(Range, RangeCounts) of
+ {ok, 1} ->
+ Repls = mem3_util:non_overlapping_shards(UnusedShards, B, E),
+ % Only keep non-empty lists of replacements
+ if
+ Repls == [] -> Acc;
+ true -> [{Range, Repls} | Acc]
+ end;
+ _ ->
+ Acc
+ end
+ end,
+ [],
+ UsedShards
+ ).
range_bounds(Workers, Responses) ->
RespRanges = lists:map(fun({R, _, _}) -> R end, Responses),
@@ -247,31 +253,24 @@ range_bounds(Workers, Responses) ->
{Bs, Es} = lists:unzip(Ranges),
{lists:min(Bs), lists:max(Es)}.
-
get_responses([], _) ->
[];
-
get_responses([Range | Ranges], [{Range, Shard, Value} | Resps]) ->
[{Shard, Value} | get_responses(Ranges, Resps)];
-
get_responses(Ranges, [_DupeRangeResp | Resps]) ->
get_responses(Ranges, Resps).
-
stop_unused_workers(_, _, _, undefined) ->
ok;
-
stop_unused_workers(Workers, AllResponses, UsedResponses, CleanupCb) ->
WorkerShards = [S || {S, _} <- Workers],
- Used = [S || {S, _} <- UsedResponses],
+ Used = [S || {S, _} <- UsedResponses],
Unused = [S || {_, S, _} <- AllResponses, not lists:member(S, Used)],
CleanupCb(WorkerShards ++ Unused).
-
stop_workers(Shards) when is_list(Shards) ->
rexi:kill_all([{Node, Ref} || #shard{node = Node, ref = Ref} <- Shards]).
-
% Unit tests
is_progress_possible_full_range_test() ->
@@ -297,7 +296,6 @@ is_progress_possible_full_range_test() ->
T7 = [[0, 10], [13, 20], [21, ?RING_END], [9, 12]],
?assertEqual(false, is_progress_possible(mk_cnts(T7))).
-
is_progress_possible_with_responses_test() ->
C1 = mk_cnts([[0, ?RING_END]]),
?assertEqual(true, is_progress_possible(C1, [], 0, ?RING_END, [])),
@@ -322,7 +320,6 @@ is_progress_possible_with_responses_test() ->
?assertEqual(false, is_progress_possible([], RS1, 5, 8, [])),
?assertEqual(true, is_progress_possible([], RS1, 7, 8, [])).
-
is_progress_possible_with_ring_opts_any_test() ->
Opts = [{any, [mk_shard("n1", [0, 5]), mk_shard("n2", [3, 10])]}],
C1 = [{mk_shard("n1", [0, ?RING_END]), nil}],
@@ -340,23 +337,31 @@ is_progress_possible_with_ring_opts_any_test() ->
C2 = [{mk_shard("n1", [0, 5]), nil}],
?assertEqual(true, is_progress_possible(C2, [], 0, ?RING_END, Opts)).
-
is_progress_possible_with_ring_opts_all_test() ->
C1 = [{mk_shard("n1", [0, ?RING_END]), nil}],
?assertEqual(true, is_progress_possible(C1, [], 0, ?RING_END, [all])),
?assertEqual(false, is_progress_possible([], [], 0, ?RING_END, [all])).
-
get_shard_replacements_test() ->
- Unused = [mk_shard(N, [B, E]) || {N, B, E} <- [
- {"n1", 11, 20}, {"n1", 21, ?RING_END},
- {"n2", 0, 4}, {"n2", 5, 10}, {"n2", 11, 20},
- {"n3", 0, 21, ?RING_END}
- ]],
- Used = [mk_shard(N, [B, E]) || {N, B, E} <- [
- {"n2", 21, ?RING_END},
- {"n3", 0, 10}, {"n3", 11, 20}
- ]],
+ Unused = [
+ mk_shard(N, [B, E])
+ || {N, B, E} <- [
+ {"n1", 11, 20},
+ {"n1", 21, ?RING_END},
+ {"n2", 0, 4},
+ {"n2", 5, 10},
+ {"n2", 11, 20},
+ {"n3", 0, 21, ?RING_END}
+ ]
+ ],
+ Used = [
+ mk_shard(N, [B, E])
+ || {N, B, E} <- [
+ {"n2", 21, ?RING_END},
+ {"n3", 0, 10},
+ {"n3", 11, 20}
+ ]
+ ],
Res = lists:sort(get_shard_replacements_int(Unused, Used)),
% Notice that [0, 10] range can be replaces by spawning the
% [0, 4] and [5, 10] workers on n1
@@ -367,7 +372,6 @@ get_shard_replacements_test() ->
],
?assertEqual(Expect, Res).
-
handle_response_basic_test() ->
Shard1 = mk_shard("n1", [0, 1]),
Shard2 = mk_shard("n1", [2, ?RING_END]),
@@ -383,7 +387,6 @@ handle_response_basic_test() ->
Result2 = handle_response(Shard2, 43, Workers2, Responses1, [], undefined),
?assertEqual({stop, [{Shard1, 42}, {Shard2, 43}]}, Result2).
-
handle_response_incomplete_ring_test() ->
Shard1 = mk_shard("n1", [0, 1]),
Shard2 = mk_shard("n1", [2, 10]),
@@ -399,7 +402,6 @@ handle_response_incomplete_ring_test() ->
Result2 = handle_response(Shard2, 43, Workers2, Responses1, [], undefined),
?assertEqual({stop, [{Shard1, 42}, {Shard2, 43}]}, Result2).
-
handle_response_multiple_copies_test() ->
Shard1 = mk_shard("n1", [0, 1]),
Shard2 = mk_shard("n2", [0, 1]),
@@ -421,7 +423,6 @@ handle_response_multiple_copies_test() ->
% that responded first is included in the ring.
?assertEqual({stop, [{Shard1, 42}, {Shard3, 44}]}, Result3).
-
handle_response_backtracking_test() ->
Shard1 = mk_shard("n1", [0, 5]),
Shard2 = mk_shard("n1", [10, ?RING_END]),
@@ -445,7 +446,6 @@ handle_response_backtracking_test() ->
Result4 = handle_response(Shard4, 45, Workers4, Responses3, [], undefined),
?assertEqual({stop, [{Shard3, 44}, {Shard4, 45}]}, Result4).
-
handle_response_ring_opts_any_test() ->
Shard1 = mk_shard("n1", [0, 5]),
Shard2 = mk_shard("n2", [0, 1]),
@@ -469,7 +469,6 @@ handle_response_ring_opts_any_test() ->
Result3 = handle_response(Shard3, 44, Workers3, [], Opts, undefined),
?assertEqual({stop, [{Shard3, 44}]}, Result3).
-
handle_response_ring_opts_all_test() ->
Shard1 = mk_shard("n1", [0, 5]),
Shard2 = mk_shard("n2", [0, 1]),
@@ -493,7 +492,6 @@ handle_response_ring_opts_all_test() ->
Result3 = handle_response(W3, 44, Workers3, [], [all], undefined),
?assertMatch({stop, [_ | _]}, Result3).
-
handle_error_test() ->
Shard1 = mk_shard("n1", [0, 5]),
Shard2 = mk_shard("n1", [10, ?RING_END]),
@@ -516,7 +514,6 @@ handle_error_test() ->
{ok, {Workers4, Responses3}} = Result3,
?assertEqual(error, handle_error(Shard4, Workers4, Responses3)).
-
node_down_test() ->
Shard1 = mk_shard("n1", [0, 5]),
Shard2 = mk_shard("n1", [10, ?RING_END]),
@@ -547,20 +544,16 @@ node_down_test() ->
?assertEqual(error, node_down(n3, Workers5, Responses3)).
-
mk_cnts(Ranges) ->
Shards = lists:map(fun mk_shard/1, Ranges),
fabric_dict:init([S#shard{ref = make_ref()} || S <- Shards], nil).
-
mk_resps(RangeNameVals) ->
[{{B, E}, mk_shard(Name, [B, E]), V} || {Name, B, E, V} <- RangeNameVals].
-
mk_shard([B, E]) when is_integer(B), is_integer(E) ->
#shard{range = [B, E]}.
-
mk_shard(Name, Range) ->
Node = list_to_atom(Name),
BName = list_to_binary(Name),
diff --git a/src/fabric/src/fabric_rpc.erl b/src/fabric/src/fabric_rpc.erl
index 4330f92be..a90c94ade 100644
--- a/src/fabric/src/fabric_rpc.erl
+++ b/src/fabric/src/fabric_rpc.erl
@@ -12,21 +12,47 @@
-module(fabric_rpc).
--export([get_db_info/1, get_doc_count/1, get_design_doc_count/1,
- get_update_seq/1]).
--export([open_doc/3, open_revs/4, get_doc_info/3, get_full_doc_info/3,
- get_missing_revs/2, get_missing_revs/3, update_docs/3]).
+-export([
+ get_db_info/1,
+ get_doc_count/1,
+ get_design_doc_count/1,
+ get_update_seq/1
+]).
+-export([
+ open_doc/3,
+ open_revs/4,
+ get_doc_info/3,
+ get_full_doc_info/3,
+ get_missing_revs/2, get_missing_revs/3,
+ update_docs/3
+]).
-export([all_docs/3, changes/3, map_view/4, reduce_view/4, group_info/2]).
--export([create_db/1, create_db/2, delete_db/1, reset_validation_funs/1,
- set_security/3, set_revs_limit/3, create_shard_db_doc/2,
- delete_shard_db_doc/2, get_partition_info/2]).
+-export([
+ create_db/1, create_db/2,
+ delete_db/1,
+ reset_validation_funs/1,
+ set_security/3,
+ set_revs_limit/3,
+ create_shard_db_doc/2,
+ delete_shard_db_doc/2,
+ get_partition_info/2
+]).
-export([get_all_security/2, open_shard/2]).
-export([compact/1, compact/2]).
-export([get_purge_seq/2, purge_docs/3, set_purge_infos_limit/3]).
--export([get_db_info/2, get_doc_count/2, get_design_doc_count/2,
- get_update_seq/2, changes/4, map_view/5, reduce_view/5,
- group_info/3, update_mrview/4, get_uuid/1]).
+-export([
+ get_db_info/2,
+ get_doc_count/2,
+ get_design_doc_count/2,
+ get_update_seq/2,
+ changes/4,
+ map_view/5,
+ reduce_view/5,
+ group_info/3,
+ update_mrview/4,
+ get_uuid/1
+]).
-include_lib("fabric/include/fabric.hrl").
-include_lib("couch/include/couch_db.hrl").
@@ -45,79 +71,86 @@ changes(DbName, #changes_args{} = Args, StartSeq, DbOptions) ->
changes(DbName, Options, StartVector, DbOptions) ->
set_io_priority(DbName, DbOptions),
Args0 = lists:keyfind(changes_args, 1, Options),
- #changes_args{dir=Dir, filter_fun=Filter} = Args0,
- Args = case Filter of
- {fetch, custom, Style, Req, {DDocId, Rev}, FName} ->
- {ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
- Args0#changes_args{
- filter_fun={custom, Style, Req, DDoc, FName}
- };
- {fetch, view, Style, {DDocId, Rev}, VName} ->
- {ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
- Args0#changes_args{filter_fun={view, Style, DDoc, VName}};
- _ ->
- Args0
- end,
+ #changes_args{dir = Dir, filter_fun = Filter} = Args0,
+ Args =
+ case Filter of
+ {fetch, custom, Style, Req, {DDocId, Rev}, FName} ->
+ {ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
+ Args0#changes_args{
+ filter_fun = {custom, Style, Req, DDoc, FName}
+ };
+ {fetch, view, Style, {DDocId, Rev}, VName} ->
+ {ok, DDoc} = ddoc_cache:open_doc(mem3:dbname(DbName), DDocId, Rev),
+ Args0#changes_args{filter_fun = {view, Style, DDoc, VName}};
+ _ ->
+ Args0
+ end,
DbOpenOptions = Args#changes_args.db_open_options ++ DbOptions,
case get_or_create_db(DbName, DbOpenOptions) of
- {ok, Db} ->
- StartSeq = calculate_start_seq(Db, node(), StartVector),
- Enum = fun changes_enumerator/2,
- Opts = [{dir,Dir}],
- Acc0 = #fabric_changes_acc{
- db = Db,
- seq = StartSeq,
- args = Args,
- options = Options,
- pending = couch_db:count_changes_since(Db, StartSeq),
- epochs = couch_db:get_epochs(Db)
- },
- try
- {ok, #fabric_changes_acc{seq=LastSeq, pending=Pending, epochs=Epochs}} =
- do_changes(Db, StartSeq, Enum, Acc0, Opts),
- rexi:stream_last({complete, [
- {seq, {LastSeq, uuid(Db), couch_db:owner_of(Epochs, LastSeq)}},
- {pending, Pending}
- ]})
- after
- couch_db:close(Db)
- end;
- Error ->
- rexi:stream_last(Error)
+ {ok, Db} ->
+ StartSeq = calculate_start_seq(Db, node(), StartVector),
+ Enum = fun changes_enumerator/2,
+ Opts = [{dir, Dir}],
+ Acc0 = #fabric_changes_acc{
+ db = Db,
+ seq = StartSeq,
+ args = Args,
+ options = Options,
+ pending = couch_db:count_changes_since(Db, StartSeq),
+ epochs = couch_db:get_epochs(Db)
+ },
+ try
+ {ok, #fabric_changes_acc{seq = LastSeq, pending = Pending, epochs = Epochs}} =
+ do_changes(Db, StartSeq, Enum, Acc0, Opts),
+ rexi:stream_last(
+ {complete, [
+ {seq, {LastSeq, uuid(Db), couch_db:owner_of(Epochs, LastSeq)}},
+ {pending, Pending}
+ ]}
+ )
+ after
+ couch_db:close(Db)
+ end;
+ Error ->
+ rexi:stream_last(Error)
end.
do_changes(Db, StartSeq, Enum, Acc0, Opts) ->
- #fabric_changes_acc {
+ #fabric_changes_acc{
args = Args
} = Acc0,
- #changes_args {
+ #changes_args{
filter = Filter
} = Args,
case Filter of
"_doc_ids" ->
% optimised code path, we’re looking up all doc_ids in the by-id instead of filtering
% the entire by-seq tree to find the doc_ids one by one
- #changes_args {
+ #changes_args{
filter_fun = {doc_ids, Style, DocIds},
dir = Dir
} = Args,
- couch_changes:send_changes_doc_ids(Db, StartSeq, Dir, Enum, Acc0, {doc_ids, Style, DocIds});
+ couch_changes:send_changes_doc_ids(
+ Db, StartSeq, Dir, Enum, Acc0, {doc_ids, Style, DocIds}
+ );
"_design_docs" ->
% optimised code path, we’re looking up all design_docs in the by-id instead of
% filtering the entire by-seq tree to find the design_docs one by one
- #changes_args {
+ #changes_args{
filter_fun = {design_docs, Style},
dir = Dir
} = Args,
- couch_changes:send_changes_design_docs(Db, StartSeq, Dir, Enum, Acc0, {design_docs, Style});
+ couch_changes:send_changes_design_docs(
+ Db, StartSeq, Dir, Enum, Acc0, {design_docs, Style}
+ );
_ ->
couch_db:fold_changes(Db, StartSeq, Enum, Acc0, Opts)
end.
all_docs(DbName, Options, Args0) ->
case fabric_util:upgrade_mrargs(Args0) of
- #mrargs{keys=undefined} = Args ->
+ #mrargs{keys = undefined} = Args ->
set_io_priority(DbName, Options),
{ok, Db} = get_or_create_db(DbName, Options),
CB = get_view_cb(Args),
@@ -129,7 +162,8 @@ update_mrview(DbName, {DDocId, Rev}, ViewName, Args0) ->
couch_util:with_db(DbName, fun(Db) ->
UpdateSeq = couch_db:get_update_seq(Db),
{ok, Pid, _} = couch_mrview:get_view_index_pid(
- Db, DDoc, ViewName, fabric_util:upgrade_mrargs(Args0)),
+ Db, DDoc, ViewName, fabric_util:upgrade_mrargs(Args0)
+ ),
couch_index:get_state(Pid, UpdateSeq)
end).
@@ -158,19 +192,21 @@ reduce_view(DbName, DDoc, ViewName, Args0, DbOptions) ->
set_io_priority(DbName, DbOptions),
Args = fabric_util:upgrade_mrargs(Args0),
{ok, Db} = get_or_create_db(DbName, DbOptions),
- VAcc0 = #vacc{db=Db},
+ VAcc0 = #vacc{db = Db},
couch_mrview:query_view(Db, DDoc, ViewName, Args, fun reduce_cb/2, VAcc0).
create_db(DbName) ->
create_db(DbName, []).
create_db(DbName, Options) ->
- rexi:reply(case couch_server:create(DbName, Options) of
- {ok, _} ->
- ok;
- Error ->
- Error
- end).
+ rexi:reply(
+ case couch_server:create(DbName, Options) of
+ {ok, _} ->
+ ok;
+ Error ->
+ Error
+ end
+ ).
create_shard_db_doc(_, Doc) ->
rexi:reply(mem3_util:write_db_doc(Doc)).
@@ -213,12 +249,13 @@ get_update_seq(DbName, DbOptions) ->
with_db(DbName, DbOptions, {couch_db, get_update_seq, []}).
set_security(DbName, SecObj, Options0) ->
- Options = case lists:keyfind(io_priority, 1, Options0) of
- false ->
- [{io_priority, {db_meta, security}}|Options0];
- _ ->
- Options0
- end,
+ Options =
+ case lists:keyfind(io_priority, 1, Options0) of
+ false ->
+ [{io_priority, {db_meta, security}} | Options0];
+ _ ->
+ Options0
+ end,
with_db(DbName, Options, {couch_db, set_security, [SecObj]}).
get_all_security(DbName, Options) ->
@@ -248,38 +285,46 @@ get_missing_revs(DbName, IdRevsList) ->
get_missing_revs(DbName, IdRevsList, Options) ->
% reimplement here so we get [] for Ids with no missing revs in response
set_io_priority(DbName, Options),
- rexi:reply(case get_or_create_db(DbName, Options) of
- {ok, Db} ->
- Ids = [Id1 || {Id1, _Revs} <- IdRevsList],
- {ok, lists:zipwith(fun({Id, Revs}, FullDocInfoResult) ->
- case FullDocInfoResult of
- #full_doc_info{rev_tree=RevisionTree} = FullInfo ->
- MissingRevs = couch_key_tree:find_missing(RevisionTree, Revs),
- {Id, MissingRevs, possible_ancestors(FullInfo, MissingRevs)};
- not_found ->
- {Id, Revs, []}
- end
- end, IdRevsList, couch_db:get_full_doc_infos(Db, Ids))};
- Error ->
- Error
- end).
+ rexi:reply(
+ case get_or_create_db(DbName, Options) of
+ {ok, Db} ->
+ Ids = [Id1 || {Id1, _Revs} <- IdRevsList],
+ {ok,
+ lists:zipwith(
+ fun({Id, Revs}, FullDocInfoResult) ->
+ case FullDocInfoResult of
+ #full_doc_info{rev_tree = RevisionTree} = FullInfo ->
+ MissingRevs = couch_key_tree:find_missing(RevisionTree, Revs),
+ {Id, MissingRevs, possible_ancestors(FullInfo, MissingRevs)};
+ not_found ->
+ {Id, Revs, []}
+ end
+ end,
+ IdRevsList,
+ couch_db:get_full_doc_infos(Db, Ids)
+ )};
+ Error ->
+ Error
+ end
+ ).
update_docs(DbName, Docs0, Options) ->
- {Docs1, Type} = case couch_util:get_value(read_repair, Options) of
- NodeRevs when is_list(NodeRevs) ->
- Filtered = read_repair_filter(DbName, Docs0, NodeRevs, Options),
- {Filtered, replicated_changes};
- undefined ->
- X = case proplists:get_value(replicated_changes, Options) of
- true -> replicated_changes;
- _ -> interactive_edit
- end,
- {Docs0, X}
- end,
+ {Docs1, Type} =
+ case couch_util:get_value(read_repair, Options) of
+ NodeRevs when is_list(NodeRevs) ->
+ Filtered = read_repair_filter(DbName, Docs0, NodeRevs, Options),
+ {Filtered, replicated_changes};
+ undefined ->
+ X =
+ case proplists:get_value(replicated_changes, Options) of
+ true -> replicated_changes;
+ _ -> interactive_edit
+ end,
+ {Docs0, X}
+ end,
Docs2 = make_att_readers(Docs1),
with_db(DbName, Options, {couch_db, update_docs, [Docs2, Options, Type]}).
-
get_purge_seq(DbName, Options) ->
with_db(DbName, Options, {couch_db, get_purge_seq, []}).
@@ -295,18 +340,19 @@ group_info(DbName, DDocId, DbOptions) ->
reset_validation_funs(DbName) ->
case get_or_create_db(DbName, []) of
- {ok, Db} ->
- couch_db:reload_validation_funs(Db);
- _ ->
- ok
+ {ok, Db} ->
+ couch_db:reload_validation_funs(Db);
+ _ ->
+ ok
end.
open_shard(Name, Opts) ->
set_io_priority(Name, Opts),
try
rexi:reply(mem3_util:get_or_create_db(Name, Opts))
- catch exit:{timeout, _} ->
- couch_stats:increment_counter([fabric, open_shard, timeouts])
+ catch
+ exit:{timeout, _} ->
+ couch_stats:increment_counter([fabric, open_shard, timeouts])
end.
compact(DbName) ->
@@ -314,7 +360,8 @@ compact(DbName) ->
compact(ShardName, DesignName) ->
{ok, Pid} = couch_index_server:get_index(
- couch_mrview_index, ShardName, <<"_design/", DesignName/binary>>),
+ couch_mrview_index, ShardName, <<"_design/", DesignName/binary>>
+ ),
Ref = erlang:make_ref(),
Pid ! {'$gen_call', {self(), Ref}, compact}.
@@ -342,7 +389,6 @@ with_db(DbName, Options, {M,F,A}) ->
rexi:reply(Error)
end.
-
read_repair_filter(DbName, Docs, NodeRevs, Options) ->
set_io_priority(DbName, Options),
case get_or_create_db(DbName, Options) of
@@ -356,7 +402,6 @@ read_repair_filter(DbName, Docs, NodeRevs, Options) ->
rexi:reply(Error)
end.
-
% A read repair operation may have been triggered by a node
% that was out of sync with the local node. Thus, any time
% we receive a read repair request we need to check if we
@@ -393,16 +438,21 @@ read_repair_filter(Db, Docs, NodeRevs) ->
% For each node we scan the purge infos to filter out any
% revisions that have been locally purged since we last
% replicated to the remote node's shard copy.
- AllowableRevs = lists:foldl(fun({Node, Revs}, RevAcc) ->
- {Node, StartSeq} = lists:keyfind(Node, 1, NodeSeqs),
- FoldFun = fun({_PSeq, _UUID, PDocId, PRevs}, InnerAcc) ->
- if PDocId /= DocId -> {ok, InnerAcc}; true ->
- {ok, InnerAcc -- PRevs}
- end
+ AllowableRevs = lists:foldl(
+ fun({Node, Revs}, RevAcc) ->
+ {Node, StartSeq} = lists:keyfind(Node, 1, NodeSeqs),
+ FoldFun = fun({_PSeq, _UUID, PDocId, PRevs}, InnerAcc) ->
+ if
+ PDocId /= DocId -> {ok, InnerAcc};
+ true -> {ok, InnerAcc -- PRevs}
+ end
+ end,
+ {ok, FiltRevs} = couch_db:fold_purge_infos(Db, StartSeq, FoldFun, Revs),
+ lists:usort(FiltRevs ++ RevAcc)
end,
- {ok, FiltRevs} = couch_db:fold_purge_infos(Db, StartSeq, FoldFun, Revs),
- lists:usort(FiltRevs ++ RevAcc)
- end, [], RecentNodeRevs),
+ [],
+ RecentNodeRevs
+ ),
% Finally, filter the doc updates to only include revisions
% that have not been purged locally.
@@ -411,7 +461,6 @@ read_repair_filter(Db, Docs, NodeRevs) ->
end,
lists:filter(DocFiltFun, Docs).
-
get_node_seqs(Db, Nodes) ->
% Gather the list of {Node, PurgeSeq} pairs for all nodes
% that are present in our read repair group
@@ -439,12 +488,9 @@ get_node_seqs(Db, Nodes) ->
{ok, NodeBinSeqs} = couch_db:fold_local_docs(Db, FoldFun, InitAcc, Opts),
[{list_to_existing_atom(binary_to_list(N)), S} || {N, S} <- NodeBinSeqs].
-
-
get_or_create_db(DbName, Options) ->
mem3_util:get_or_create_db_int(DbName, Options).
-
get_view_cb(#mrargs{extra = Options}) ->
case couch_util:get_value(callback, Options) of
{Mod, Fun} when is_atom(Mod), is_atom(Fun) ->
@@ -455,7 +501,6 @@ get_view_cb(#mrargs{extra = Options}) ->
get_view_cb(_) ->
fun view_cb/2.
-
view_cb({meta, Meta}, Acc) ->
% Map function starting
ok = rexi:stream2({meta, Meta}),
@@ -477,7 +522,6 @@ view_cb(complete, Acc) ->
view_cb(ok, ddoc_updated) ->
rexi:reply({ok, ddoc_updated}).
-
reduce_cb({meta, Meta}, Acc) ->
% Map function starting
ok = rexi:stream2({meta, Meta}),
@@ -496,11 +540,10 @@ reduce_cb(complete, Acc) ->
reduce_cb(ok, ddoc_updated) ->
rexi:reply({ok, ddoc_updated}).
-
changes_enumerator(#full_doc_info{} = FDI, Acc) ->
changes_enumerator(couch_doc:to_doc_info(FDI), Acc);
-changes_enumerator(#doc_info{id= <<"_local/", _/binary>>, high_seq=Seq}, Acc) ->
- {ok, Acc#fabric_changes_acc{seq = Seq, pending = Acc#fabric_changes_acc.pending-1}};
+changes_enumerator(#doc_info{id = <<"_local/", _/binary>>, high_seq = Seq}, Acc) ->
+ {ok, Acc#fabric_changes_acc{seq = Seq, pending = Acc#fabric_changes_acc.pending - 1}};
changes_enumerator(DocInfo, Acc) ->
#fabric_changes_acc{
db = Db,
@@ -513,63 +556,82 @@ changes_enumerator(DocInfo, Acc) ->
pending = Pending,
epochs = Epochs
} = Acc,
- #doc_info{id=Id, high_seq=Seq, revs=[#rev_info{deleted=Del}|_]} = DocInfo,
+ #doc_info{id = Id, high_seq = Seq, revs = [#rev_info{deleted = Del} | _]} = DocInfo,
case [X || X <- couch_changes:filter(Db, DocInfo, Filter), X /= null] of
- [] ->
- ChangesRow = {no_pass, [
- {pending, Pending-1},
- {seq, {Seq, uuid(Db), couch_db:owner_of(Epochs, Seq)}}
- ]};
- Results ->
- Opts = if Conflicts -> [conflicts | DocOptions]; true -> DocOptions end,
- ChangesRow = {change, [
- {pending, Pending-1},
- {seq, {Seq, uuid(Db), couch_db:owner_of(Epochs, Seq)}},
- {id, Id},
- {changes, Results},
- {deleted, Del} |
- if IncludeDocs -> [doc_member(Db, DocInfo, Opts, Filter)]; true -> [] end
- ]}
+ [] ->
+ ChangesRow =
+ {no_pass, [
+ {pending, Pending - 1},
+ {seq, {Seq, uuid(Db), couch_db:owner_of(Epochs, Seq)}}
+ ]};
+ Results ->
+ Opts =
+ if
+ Conflicts -> [conflicts | DocOptions];
+ true -> DocOptions
+ end,
+ ChangesRow =
+ {change, [
+ {pending, Pending - 1},
+ {seq, {Seq, uuid(Db), couch_db:owner_of(Epochs, Seq)}},
+ {id, Id},
+ {changes, Results},
+ {deleted, Del}
+ | if
+ IncludeDocs -> [doc_member(Db, DocInfo, Opts, Filter)];
+ true -> []
+ end
+ ]}
end,
ok = rexi:stream2(ChangesRow),
- {ok, Acc#fabric_changes_acc{seq = Seq, pending = Pending-1}}.
+ {ok, Acc#fabric_changes_acc{seq = Seq, pending = Pending - 1}}.
doc_member(Shard, DocInfo, Opts, Filter) ->
case couch_db:open_doc(Shard, DocInfo, [deleted | Opts]) of
- {ok, Doc} ->
- {doc, maybe_filtered_json_doc(Doc, Opts, Filter)};
- Error ->
- Error
+ {ok, Doc} ->
+ {doc, maybe_filtered_json_doc(Doc, Opts, Filter)};
+ Error ->
+ Error
end.
-maybe_filtered_json_doc(Doc, Opts, {selector, _Style, {_Selector, Fields}})
- when Fields =/= nil ->
+maybe_filtered_json_doc(Doc, Opts, {selector, _Style, {_Selector, Fields}}) when
+ Fields =/= nil
+->
mango_fields:extract(couch_doc:to_json_obj(Doc, Opts), Fields);
maybe_filtered_json_doc(Doc, Opts, _Filter) ->
couch_doc:to_json_obj(Doc, Opts).
-
possible_ancestors(_FullInfo, []) ->
[];
possible_ancestors(FullInfo, MissingRevs) ->
- #doc_info{revs=RevsInfo} = couch_doc:to_doc_info(FullInfo),
- LeafRevs = [Rev || #rev_info{rev=Rev} <- RevsInfo],
+ #doc_info{revs = RevsInfo} = couch_doc:to_doc_info(FullInfo),
+ LeafRevs = [Rev || #rev_info{rev = Rev} <- RevsInfo],
% Find the revs that are possible parents of this rev
- lists:foldl(fun({LeafPos, LeafRevId}, Acc) ->
- % this leaf is a "possible ancenstor" of the missing
- % revs if this LeafPos lessthan any of the missing revs
- case lists:any(fun({MissingPos, _}) ->
- LeafPos < MissingPos end, MissingRevs) of
- true ->
- [{LeafPos, LeafRevId} | Acc];
- false ->
- Acc
- end
- end, [], LeafRevs).
+ lists:foldl(
+ fun({LeafPos, LeafRevId}, Acc) ->
+ % this leaf is a "possible ancenstor" of the missing
+ % revs if this LeafPos lessthan any of the missing revs
+ case
+ lists:any(
+ fun({MissingPos, _}) ->
+ LeafPos < MissingPos
+ end,
+ MissingRevs
+ )
+ of
+ true ->
+ [{LeafPos, LeafRevId} | Acc];
+ false ->
+ Acc
+ end
+ end,
+ [],
+ LeafRevs
+ ).
make_att_readers([]) ->
[];
-make_att_readers([#doc{atts=Atts0} = Doc | Rest]) ->
+make_att_readers([#doc{atts = Atts0} = Doc | Rest]) ->
% % go through the attachments looking for 'follows' in the data,
% % replace with function that reads the data from MIME stream.
Atts = [couch_att:transform(data, fun make_att_reader/1, Att) || Att <- Atts0],
@@ -577,14 +639,15 @@ make_att_readers([#doc{atts=Atts0} = Doc | Rest]) ->
make_att_reader({follows, Parser, Ref}) ->
fun() ->
- ParserRef = case get(mp_parser_ref) of
- undefined ->
- PRef = erlang:monitor(process, Parser),
- put(mp_parser_ref, PRef),
- PRef;
- Else ->
- Else
- end,
+ ParserRef =
+ case get(mp_parser_ref) of
+ undefined ->
+ PRef = erlang:monitor(process, Parser),
+ put(mp_parser_ref, PRef),
+ PRef;
+ Else ->
+ Else
+ end,
Parser ! {get_bytes, Ref, self()},
receive
{bytes, Ref, Bytes} ->
@@ -600,14 +663,20 @@ make_att_reader(Else) ->
Else.
clean_stack(S) ->
- lists:map(fun({M,F,A}) when is_list(A) -> {M,F,length(A)}; (X) -> X end, S).
+ lists:map(
+ fun
+ ({M, F, A}) when is_list(A) -> {M, F, length(A)};
+ (X) -> X
+ end,
+ S
+ ).
set_io_priority(DbName, Options) ->
case lists:keyfind(io_priority, 1, Options) of
- {io_priority, Pri} ->
- erlang:put(io_priority, Pri);
- false ->
- erlang:put(io_priority, {interactive, DbName})
+ {io_priority, Pri} ->
+ erlang:put(io_priority, Pri);
+ false ->
+ erlang:put(io_priority, {interactive, DbName})
end,
case erlang:get(io_priority) of
{interactive, _} ->
@@ -623,7 +692,6 @@ set_io_priority(DbName, Options) ->
ok
end.
-
calculate_start_seq(Db, Node, Seq) ->
case couch_db:calculate_start_seq(Db, Node, Seq) of
N when is_integer(N) ->
@@ -637,13 +705,11 @@ calculate_start_seq(Db, Node, Seq) ->
mem3_rep:find_source_seq(Db, OriginalNode, Uuid, OriginalSeq)
end.
-
uuid(Db) ->
Uuid = couch_db:get_uuid(Db),
Prefix = fabric_util:get_uuid_prefix_len(),
binary:part(Uuid, {0, Prefix}).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/fabric/src/fabric_streams.erl b/src/fabric/src/fabric_streams.erl
index 59c8b8a6b..2a3a2b004 100644
--- a/src/fabric/src/fabric_streams.erl
+++ b/src/fabric/src/fabric_streams.erl
@@ -23,22 +23,17 @@
-include_lib("fabric/include/fabric.hrl").
-include_lib("mem3/include/mem3.hrl").
-
-define(WORKER_CLEANER, fabric_worker_cleaner).
-
start(Workers, Keypos) ->
start(Workers, Keypos, undefined, undefined).
-
start(Workers, Keypos, RingOpts) ->
start(Workers, Keypos, undefined, undefined, RingOpts).
-
start(Workers, Keypos, StartFun, Replacements) ->
start(Workers, Keypos, StartFun, Replacements, []).
-
start(Workers0, Keypos, StartFun, Replacements, RingOpts) ->
Fun = fun handle_stream_start/3,
Acc = #stream_acc{
@@ -52,16 +47,19 @@ start(Workers0, Keypos, StartFun, Replacements, RingOpts) ->
Timeout = fabric_util:request_timeout(),
case rexi_utils:recv(Workers0, Keypos, Fun, Acc, Timeout, infinity) of
{ok, #stream_acc{ready = Workers}} ->
- AckedWorkers = fabric_dict:fold(fun(Worker, From, WorkerAcc) ->
- rexi:stream_start(From),
- [Worker | WorkerAcc]
- end, [], Workers),
+ AckedWorkers = fabric_dict:fold(
+ fun(Worker, From, WorkerAcc) ->
+ rexi:stream_start(From),
+ [Worker | WorkerAcc]
+ end,
+ [],
+ Workers
+ ),
{ok, AckedWorkers};
Else ->
Else
end.
-
cleanup(Workers) ->
% Stop the auxiliary cleaner process as we got to the point where cleanup
% happesn in the regular fashion so we don't want to send 2x the number kill
@@ -75,7 +73,6 @@ cleanup(Workers) ->
end,
fabric_util:cleanup(Workers).
-
handle_stream_start({rexi_DOWN, _, {_, NodeRef}, _}, _, St) ->
#stream_acc{workers = Workers, ready = Ready, ring_opts = RingOpts} = St,
case fabric_ring:node_down(NodeRef, Workers, Ready, RingOpts) of
@@ -84,7 +81,6 @@ handle_stream_start({rexi_DOWN, _, {_, NodeRef}, _}, _, St) ->
error ->
{error, {nodedown, <<"progress not possible">>}}
end;
-
handle_stream_start({rexi_EXIT, Reason}, Worker, St) ->
#stream_acc{
workers = Workers,
@@ -100,11 +96,15 @@ handle_stream_start({rexi_EXIT, Reason}, Worker, St) ->
% and start the new workers if so.
case lists:keytake(Worker#shard.range, 1, Replacements) of
{value, {_Range, WorkerReplacements}, NewReplacements} ->
- FinalWorkers = lists:foldl(fun(Repl, NewWorkers) ->
- NewWorker = (St#stream_acc.start_fun)(Repl),
- add_worker_to_cleaner(self(), NewWorker),
- fabric_dict:store(NewWorker, waiting, NewWorkers)
- end, Workers, WorkerReplacements),
+ FinalWorkers = lists:foldl(
+ fun(Repl, NewWorkers) ->
+ NewWorker = (St#stream_acc.start_fun)(Repl),
+ add_worker_to_cleaner(self(), NewWorker),
+ fabric_dict:store(NewWorker, waiting, NewWorkers)
+ end,
+ Workers,
+ WorkerReplacements
+ ),
% Assert that our replaced worker provides us
% the oppurtunity to make progress. Need to make sure
% to include already processed responses, since we are
@@ -115,8 +115,8 @@ handle_stream_start({rexi_EXIT, Reason}, Worker, St) ->
true = fabric_ring:is_progress_possible(AllWorkers),
NewRefs = fabric_dict:fetch_keys(FinalWorkers),
{new_refs, NewRefs, St#stream_acc{
- workers=FinalWorkers,
- replacements=NewReplacements
+ workers = FinalWorkers,
+ replacements = NewReplacements
}};
false ->
% If we progress isn't possible and we don't have any
@@ -126,36 +126,32 @@ handle_stream_start({rexi_EXIT, Reason}, Worker, St) ->
{error, _} ->
{error, fabric_util:error_info(Reason)}
end;
-
handle_stream_start(rexi_STREAM_INIT, {Worker, From}, St) ->
#stream_acc{workers = Workers, ready = Ready, ring_opts = RingOpts} = St,
case fabric_dict:lookup_element(Worker, Workers) of
- undefined ->
- % This worker lost the race with other partition copies, terminate
- rexi:stream_cancel(From),
- {ok, St};
- waiting ->
- case fabric_ring:handle_response(Worker, From, Workers, Ready, RingOpts) of
- {ok, {Workers1, Ready1}} ->
- % Don't have a full ring yet. Keep getting responses
- {ok, St#stream_acc{workers = Workers1, ready = Ready1}};
- {stop, Ready1} ->
- % Have a full ring of workers. But don't ack the worker
- % yet so they don't start sending us rows until we're ready
- {stop, St#stream_acc{workers = [], ready = Ready1}}
- end
+ undefined ->
+ % This worker lost the race with other partition copies, terminate
+ rexi:stream_cancel(From),
+ {ok, St};
+ waiting ->
+ case fabric_ring:handle_response(Worker, From, Workers, Ready, RingOpts) of
+ {ok, {Workers1, Ready1}} ->
+ % Don't have a full ring yet. Keep getting responses
+ {ok, St#stream_acc{workers = Workers1, ready = Ready1}};
+ {stop, Ready1} ->
+ % Have a full ring of workers. But don't ack the worker
+ % yet so they don't start sending us rows until we're ready
+ {stop, St#stream_acc{workers = [], ready = Ready1}}
+ end
end;
-
handle_stream_start({ok, ddoc_updated}, _, St) ->
WaitingWorkers = [W || {W, _} <- St#stream_acc.workers],
ReadyWorkers = [W || {W, _} <- St#stream_acc.ready],
cleanup(WaitingWorkers ++ ReadyWorkers),
{stop, ddoc_updated};
-
handle_stream_start(Else, _, _) ->
exit({invalid_stream_start, Else}).
-
% Spawn an auxiliary rexi worker cleaner. This will be used in cases
% when the coordinator (request) process is forceably killed and doesn't
% get a chance to process its `after` fabric:clean/1 clause.
@@ -168,10 +164,9 @@ spawn_worker_cleaner(Coordinator, Workers) ->
end),
put(?WORKER_CLEANER, Pid),
Pid;
- ExistingCleaner ->
+ ExistingCleaner ->
ExistingCleaner
- end.
-
+ end.
cleaner_loop(Pid, Workers) ->
receive
@@ -181,7 +176,6 @@ cleaner_loop(Pid, Workers) ->
fabric_util:cleanup(Workers)
end.
-
add_worker_to_cleaner(CoordinatorPid, Worker) ->
case get(?WORKER_CLEANER) of
CleanerPid when is_pid(CleanerPid) ->
@@ -190,25 +184,27 @@ add_worker_to_cleaner(CoordinatorPid, Worker) ->
ok
end.
-
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
worker_cleaner_test_() ->
{
- "Fabric spawn_worker_cleaner test", {
- setup, fun setup/0, fun teardown/1,
- fun(_) -> [
- should_clean_workers(),
- does_not_fire_if_cleanup_called(),
- should_clean_additional_worker_too()
- ] end
+ "Fabric spawn_worker_cleaner test",
+ {
+ setup,
+ fun setup/0,
+ fun teardown/1,
+ fun(_) ->
+ [
+ should_clean_workers(),
+ does_not_fire_if_cleanup_called(),
+ should_clean_additional_worker_too()
+ ]
+ end
}
}.
-
should_clean_workers() ->
?_test(begin
meck:reset(rexi),
@@ -217,15 +213,20 @@ should_clean_workers() ->
#shard{node = 'n1', ref = make_ref()},
#shard{node = 'n2', ref = make_ref()}
],
- {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
+ {Coord, _} = spawn_monitor(fun() ->
+ receive
+ die -> ok
+ end
+ end),
Cleaner = spawn_worker_cleaner(Coord, Workers),
Ref = erlang:monitor(process, Cleaner),
Coord ! die,
- receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
+ receive
+ {'DOWN', Ref, _, Cleaner, _} -> ok
+ end,
?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
end).
-
does_not_fire_if_cleanup_called() ->
?_test(begin
meck:reset(rexi),
@@ -234,18 +235,23 @@ does_not_fire_if_cleanup_called() ->
#shard{node = 'n1', ref = make_ref()},
#shard{node = 'n2', ref = make_ref()}
],
- {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
+ {Coord, _} = spawn_monitor(fun() ->
+ receive
+ die -> ok
+ end
+ end),
Cleaner = spawn_worker_cleaner(Coord, Workers),
Ref = erlang:monitor(process, Cleaner),
cleanup(Workers),
Coord ! die,
- receive {'DOWN', Ref, _, _, _} -> ok end,
+ receive
+ {'DOWN', Ref, _, _, _} -> ok
+ end,
% 2 calls would be from cleanup/1 function. If cleanup process fired
% too it would have been 4 calls total.
?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
end).
-
should_clean_additional_worker_too() ->
?_test(begin
meck:reset(rexi),
@@ -253,20 +259,24 @@ should_clean_additional_worker_too() ->
Workers = [
#shard{node = 'n1', ref = make_ref()}
],
- {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end),
+ {Coord, _} = spawn_monitor(fun() ->
+ receive
+ die -> ok
+ end
+ end),
Cleaner = spawn_worker_cleaner(Coord, Workers),
add_worker_to_cleaner(Coord, #shard{node = 'n2', ref = make_ref()}),
Ref = erlang:monitor(process, Cleaner),
Coord ! die,
- receive {'DOWN', Ref, _, Cleaner, _} -> ok end,
+ receive
+ {'DOWN', Ref, _, Cleaner, _} -> ok
+ end,
?assertEqual(1, meck:num_calls(rexi, kill_all, 1))
end).
-
setup() ->
ok = meck:expect(rexi, kill_all, fun(_) -> ok end).
-
teardown(_) ->
meck:unload().
diff --git a/src/fabric/src/fabric_util.erl b/src/fabric/src/fabric_util.erl
index 4c4031627..30e82c29a 100644
--- a/src/fabric/src/fabric_util.erl
+++ b/src/fabric/src/fabric_util.erl
@@ -12,9 +12,19 @@
-module(fabric_util).
--export([submit_jobs/3, submit_jobs/4, cleanup/1, recv/4, get_db/1, get_db/2, error_info/1,
- update_counter/3, remove_ancestors/2, create_monitors/1, kv/2,
- remove_down_workers/2, remove_down_workers/3, doc_id_and_rev/1]).
+-export([
+ submit_jobs/3, submit_jobs/4,
+ cleanup/1,
+ recv/4,
+ get_db/1, get_db/2,
+ error_info/1,
+ update_counter/3,
+ remove_ancestors/2,
+ create_monitors/1,
+ kv/2,
+ remove_down_workers/2, remove_down_workers/3,
+ doc_id_and_rev/1
+]).
-export([request_timeout/0, attachments_timeout/0, all_docs_timeout/0, view_timeout/1]).
-export([log_timeout/2, remove_done_workers/2]).
-export([is_users_db/1, is_replicator_db/1]).
@@ -26,8 +36,7 @@
-export([get_uuid_prefix_len/0]).
-export([isolate/1, isolate/2]).
-
--compile({inline, [{doc_id_and_rev,1}]}).
+-compile({inline, [{doc_id_and_rev, 1}]}).
-include_lib("fabric/include/fabric.hrl").
-include_lib("mem3/include/mem3.hrl").
@@ -42,20 +51,23 @@ remove_down_workers(Workers, BadNode, RingOpts) ->
Filter = fun(#shard{node = Node}, _) -> Node =/= BadNode end,
NewWorkers = fabric_dict:filter(Filter, Workers),
case fabric_ring:is_progress_possible(NewWorkers, RingOpts) of
- true ->
- {ok, NewWorkers};
- false ->
- error
+ true ->
+ {ok, NewWorkers};
+ false ->
+ error
end.
submit_jobs(Shards, EndPoint, ExtraArgs) ->
submit_jobs(Shards, fabric_rpc, EndPoint, ExtraArgs).
submit_jobs(Shards, Module, EndPoint, ExtraArgs) ->
- lists:map(fun(#shard{node=Node, name=ShardName} = Shard) ->
- Ref = rexi:cast(Node, {Module, EndPoint, [ShardName | ExtraArgs]}),
- Shard#shard{ref = Ref}
- end, Shards).
+ lists:map(
+ fun(#shard{node = Node, name = ShardName} = Shard) ->
+ Ref = rexi:cast(Node, {Module, EndPoint, [ShardName | ExtraArgs]}),
+ Shard#shard{ref = Ref}
+ end,
+ Shards
+ ).
cleanup(Workers) ->
rexi:kill_all([{Node, Ref} || #shard{node = Node, ref = Ref} <- Workers]).
@@ -88,10 +100,13 @@ timeout(Type, Default) ->
log_timeout(Workers, EndPoint) ->
CounterKey = [fabric, worker, timeouts],
couch_stats:increment_counter(CounterKey),
- lists:map(fun(#shard{node=Dest, name=Name}) ->
- Fmt = "fabric_worker_timeout ~s,~p,~p",
- couch_log:error(Fmt, [EndPoint, Dest, Name])
- end, Workers).
+ lists:map(
+ fun(#shard{node = Dest, name = Name}) ->
+ Fmt = "fabric_worker_timeout ~s,~p,~p",
+ couch_log:error(Fmt, [EndPoint, Dest, Name])
+ end,
+ Workers
+ ).
remove_done_workers(Workers, WaitingIndicator) ->
[W || {W, WI} <- fabric_dict:to_list(Workers), WI == WaitingIndicator].
@@ -103,9 +118,10 @@ get_db(DbName, Options) ->
{Local, SameZone, DifferentZone} = mem3:group_by_proximity(mem3:shards(DbName)),
% Prefer shards on the same node over other nodes, prefer shards in the same zone over
% over zones and sort each remote list by name so that we don't repeatedly try the same node.
- Shards = Local ++ lists:keysort(#shard.name, SameZone) ++ lists:keysort(#shard.name, DifferentZone),
+ Shards =
+ Local ++ lists:keysort(#shard.name, SameZone) ++ lists:keysort(#shard.name, DifferentZone),
% suppress shards from down nodes
- Nodes = [node()|erlang:nodes()],
+ Nodes = [node() | erlang:nodes()],
Live = [S || #shard{node = N} = S <- Shards, lists:member(N, Nodes)],
% Only accept factors > 1, otherwise our math breaks further down
Factor = max(2, config:get_integer("fabric", "shard_timeout_factor", 2)),
@@ -121,15 +137,16 @@ get_shard([#shard{node = Node, name = Name} | Rest], Opts, Timeout, Factor) ->
MFA = {fabric_rpc, open_shard, [Name, [{timeout, Timeout} | Opts]]},
Ref = rexi:cast(Node, self(), MFA, [sync]),
try
- receive {Ref, {ok, Db}} ->
- {ok, Db};
- {Ref, {'rexi_EXIT', {{unauthorized, _} = Error, _}}} ->
- throw(Error);
- {Ref, {'rexi_EXIT', {{forbidden, _} = Error, _}}} ->
- throw(Error);
- {Ref, Reason} ->
- couch_log:debug("Failed to open shard ~p because: ~p", [Name, Reason]),
- get_shard(Rest, Opts, Timeout, Factor)
+ receive
+ {Ref, {ok, Db}} ->
+ {ok, Db};
+ {Ref, {'rexi_EXIT', {{unauthorized, _} = Error, _}}} ->
+ throw(Error);
+ {Ref, {'rexi_EXIT', {{forbidden, _} = Error, _}}} ->
+ throw(Error);
+ {Ref, Reason} ->
+ couch_log:debug("Failed to open shard ~p because: ~p", [Name, Reason]),
+ get_shard(Rest, Opts, Timeout, Factor)
after Timeout ->
couch_log:debug("Failed to open shard ~p after: ~p", [Name, Timeout]),
get_shard(Rest, Opts, Factor * Timeout, Factor)
@@ -169,18 +186,18 @@ error_info({Error, Stack}) ->
{Error, nil, Stack}.
update_counter(Item, Incr, D) ->
- UpdateFun = fun ({Old, Count}) -> {Old, Count + Incr} end,
+ UpdateFun = fun({Old, Count}) -> {Old, Count + Incr} end,
orddict:update(make_key(Item), UpdateFun, {Item, Incr}, D).
make_key({ok, L}) when is_list(L) ->
make_key(L);
make_key([]) ->
[];
-make_key([{ok, #doc{revs= {Pos,[RevId | _]}}} | Rest]) ->
+make_key([{ok, #doc{revs = {Pos, [RevId | _]}}} | Rest]) ->
[{ok, {Pos, RevId}} | make_key(Rest)];
make_key([{{not_found, missing}, Rev} | Rest]) ->
[{not_found, Rev} | make_key(Rest)];
-make_key({ok, #doc{id=Id,revs=Revs}}) ->
+make_key({ok, #doc{id = Id, revs = Revs}}) ->
{Id, Revs};
make_key(Else) ->
Else.
@@ -190,69 +207,88 @@ remove_ancestors([], Acc) ->
lists:reverse(Acc);
remove_ancestors([{_, {{not_found, _}, Count}} = Head | Tail], Acc) ->
% any document is a descendant
- case lists:filter(fun({_,{{ok, #doc{}}, _}}) -> true; (_) -> false end, Tail) of
- [{_,{{ok, #doc{}} = Descendant, _}} | _] ->
- remove_ancestors(update_counter(Descendant, Count, Tail), Acc);
- [] ->
- remove_ancestors(Tail, [Head | Acc])
+ case
+ lists:filter(
+ fun
+ ({_, {{ok, #doc{}}, _}}) -> true;
+ (_) -> false
+ end,
+ Tail
+ )
+ of
+ [{_, {{ok, #doc{}} = Descendant, _}} | _] ->
+ remove_ancestors(update_counter(Descendant, Count, Tail), Acc);
+ [] ->
+ remove_ancestors(Tail, [Head | Acc])
end;
-remove_ancestors([{_,{{ok, #doc{revs = {Pos, Revs}}}, Count}} = Head | Tail], Acc) ->
- Descendants = lists:dropwhile(fun
- ({_,{{ok, #doc{revs = {Pos2, Revs2}}}, _}}) ->
- case lists:nthtail(erlang:min(Pos2 - Pos, length(Revs2)), Revs2) of
+remove_ancestors([{_, {{ok, #doc{revs = {Pos, Revs}}}, Count}} = Head | Tail], Acc) ->
+ Descendants = lists:dropwhile(
+ fun({_, {{ok, #doc{revs = {Pos2, Revs2}}}, _}}) ->
+ case lists:nthtail(erlang:min(Pos2 - Pos, length(Revs2)), Revs2) of
+ [] ->
+ % impossible to tell if Revs2 is a descendant - assume no
+ true;
+ History ->
+ % if Revs2 is a descendant, History is a prefix of Revs
+ not lists:prefix(History, Revs)
+ end
+ end,
+ Tail
+ ),
+ case Descendants of
[] ->
- % impossible to tell if Revs2 is a descendant - assume no
- true;
- History ->
- % if Revs2 is a descendant, History is a prefix of Revs
- not lists:prefix(History, Revs)
- end
- end, Tail),
- case Descendants of [] ->
- remove_ancestors(Tail, [Head | Acc]);
- [{Descendant, _} | _] ->
- remove_ancestors(update_counter(Descendant, Count, Tail), Acc)
+ remove_ancestors(Tail, [Head | Acc]);
+ [{Descendant, _} | _] ->
+ remove_ancestors(update_counter(Descendant, Count, Tail), Acc)
end;
remove_ancestors([Error | Tail], Acc) ->
remove_ancestors(Tail, [Error | Acc]).
create_monitors(Shards) ->
- MonRefs = lists:usort([
- rexi_utils:server_pid(N) || #shard{node=N} <- Shards
- ]),
+ MonRefs = lists:usort([rexi_utils:server_pid(N) || #shard{node = N} <- Shards]),
rexi_monitor:start(MonRefs).
%% verify only id and rev are used in key.
update_counter_test() ->
- Reply = {ok, #doc{id = <<"id">>, revs = <<"rev">>,
- body = <<"body">>, atts = <<"atts">>}},
- ?assertEqual([{{<<"id">>,<<"rev">>}, {Reply, 1}}],
- update_counter(Reply, 1, [])).
+ Reply =
+ {ok, #doc{
+ id = <<"id">>,
+ revs = <<"rev">>,
+ body = <<"body">>,
+ atts = <<"atts">>
+ }},
+ ?assertEqual(
+ [{{<<"id">>, <<"rev">>}, {Reply, 1}}],
+ update_counter(Reply, 1, [])
+ ).
remove_ancestors_test() ->
Foo1 = {ok, #doc{revs = {1, [<<"foo">>]}}},
Foo2 = {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}},
Bar1 = {ok, #doc{revs = {1, [<<"bar">>]}}},
- Bar2 = {not_found, {1,<<"bar">>}},
+ Bar2 = {not_found, {1, <<"bar">>}},
?assertEqual(
- [kv(Bar1,1), kv(Foo1,1)],
- remove_ancestors([kv(Bar1,1), kv(Foo1,1)], [])
+ [kv(Bar1, 1), kv(Foo1, 1)],
+ remove_ancestors([kv(Bar1, 1), kv(Foo1, 1)], [])
),
?assertEqual(
- [kv(Bar1,1), kv(Foo2,2)],
- remove_ancestors([kv(Bar1,1), kv(Foo1,1), kv(Foo2,1)], [])
+ [kv(Bar1, 1), kv(Foo2, 2)],
+ remove_ancestors([kv(Bar1, 1), kv(Foo1, 1), kv(Foo2, 1)], [])
),
?assertEqual(
- [kv(Bar1,2)],
- remove_ancestors([kv(Bar2,1), kv(Bar1,1)], [])
+ [kv(Bar1, 2)],
+ remove_ancestors([kv(Bar2, 1), kv(Bar1, 1)], [])
).
is_replicator_db(DbName) ->
path_ends_with(DbName, <<"_replicator">>).
is_users_db(DbName) ->
- ConfigName = list_to_binary(config:get(
- "chttpd_auth", "authentication_db", "_users")),
+ ConfigName = list_to_binary(
+ config:get(
+ "chttpd_auth", "authentication_db", "_users"
+ )
+ ),
DbName == ConfigName orelse path_ends_with(DbName, <<"_users">>).
path_ends_with(Path, Suffix) ->
@@ -269,79 +305,54 @@ open_cluster_db(#shard{dbname = DbName, opts = Options}) ->
end.
open_cluster_db(DbName, Opts) ->
- {SecProps} = fabric:get_security(DbName), % as admin
+ % as admin
+ {SecProps} = fabric:get_security(DbName),
UserCtx = couch_util:get_value(user_ctx, Opts, #user_ctx{}),
{ok, Db} = couch_db:clustered_db(DbName, UserCtx, SecProps),
Db.
%% test function
kv(Item, Count) ->
- {make_key(Item), {Item,Count}}.
+ {make_key(Item), {Item, Count}}.
-doc_id_and_rev(#doc{id=DocId, revs={RevNum, [RevHash|_]}}) ->
+doc_id_and_rev(#doc{id = DocId, revs = {RevNum, [RevHash | _]}}) ->
{DocId, {RevNum, RevHash}}.
-
is_partitioned(DbName0) when is_binary(DbName0) ->
Shards = mem3:shards(fabric:dbname(DbName0)),
is_partitioned(open_cluster_db(hd(Shards)));
-
is_partitioned(Db) ->
couch_db:is_partitioned(Db).
-
validate_all_docs_args(DbName, Args) when is_binary(DbName) ->
Shards = mem3:shards(fabric:dbname(DbName)),
Db = open_cluster_db(hd(Shards)),
validate_all_docs_args(Db, Args);
-
validate_all_docs_args(Db, Args) ->
true = couch_db:is_clustered(Db),
couch_mrview_util:validate_all_docs_args(Db, Args).
-
validate_args(DbName, DDoc, Args) when is_binary(DbName) ->
Shards = mem3:shards(fabric:dbname(DbName)),
Db = open_cluster_db(hd(Shards)),
validate_args(Db, DDoc, Args);
-
validate_args(Db, DDoc, Args) ->
true = couch_db:is_clustered(Db),
couch_mrview_util:validate_args(Db, DDoc, Args).
-
upgrade_mrargs(#mrargs{} = Args) ->
Args;
-
-upgrade_mrargs({mrargs,
- ViewType,
- Reduce,
- PreflightFun,
- StartKey,
- StartKeyDocId,
- EndKey,
- EndKeyDocId,
- Keys,
- Direction,
- Limit,
- Skip,
- GroupLevel,
- Group,
- Stale,
- MultiGet,
- InclusiveEnd,
- IncludeDocs,
- DocOptions,
- UpdateSeq,
- Conflicts,
- Callback,
- Sorted,
- Extra}) ->
- {Stable, Update} = case Stale of
- ok -> {true, false};
- update_after -> {true, lazy};
- _ -> {false, true}
- end,
+upgrade_mrargs(
+ {mrargs, ViewType, Reduce, PreflightFun, StartKey, StartKeyDocId, EndKey, EndKeyDocId, Keys,
+ Direction, Limit, Skip, GroupLevel, Group, Stale, MultiGet, InclusiveEnd, IncludeDocs,
+ DocOptions, UpdateSeq, Conflicts, Callback, Sorted, Extra}
+) ->
+ {Stable, Update} =
+ case Stale of
+ ok -> {true, false};
+ update_after -> {true, lazy};
+ _ -> {false, true}
+ end,
#mrargs{
view_type = ViewType,
reduce = Reduce,
@@ -369,18 +380,19 @@ upgrade_mrargs({mrargs,
extra = Extra
}.
-
worker_ranges(Workers) ->
- Ranges = fabric_dict:fold(fun(#shard{range=[X, Y]}, _, Acc) ->
- [{X, Y} | Acc]
- end, [], Workers),
+ Ranges = fabric_dict:fold(
+ fun(#shard{range = [X, Y]}, _, Acc) ->
+ [{X, Y} | Acc]
+ end,
+ [],
+ Workers
+ ),
lists:usort(Ranges).
-
get_uuid_prefix_len() ->
config:get_integer("fabric", "uuid_prefix_len", 7).
-
% If we issue multiple fabric calls from the same process we have to isolate
% them so in case of error they don't pollute the processes dictionary or the
% mailbox
@@ -388,7 +400,6 @@ get_uuid_prefix_len() ->
isolate(Fun) ->
isolate(Fun, infinity).
-
isolate(Fun, Timeout) ->
{Pid, Ref} = erlang:spawn_monitor(fun() -> exit(do_isolate(Fun)) end),
receive
@@ -402,22 +413,19 @@ isolate(Fun, Timeout) ->
erlang:error(timeout)
end.
-
% OTP_RELEASE is defined in OTP 21+ only
-ifdef(OTP_RELEASE).
-
do_isolate(Fun) ->
try
{'$isolres', Fun()}
- catch Tag:Reason:Stack ->
- {'$isolerr', Tag, Reason, Stack}
+ catch
+ Tag:Reason:Stack ->
+ {'$isolerr', Tag, Reason, Stack}
end.
-
-else.
-
do_isolate(Fun) ->
try
{'$isolres', Fun()}
@@ -425,10 +433,8 @@ do_isolate(Fun) ->
{'$isolerr', Tag, Reason, Stack}
end.
-
-endif.
-
get_db_timeout_test() ->
% Q=1, N=1
?assertEqual(20000, get_db_timeout(1, 2, 100, 60000)),
diff --git a/src/fabric/src/fabric_view.erl b/src/fabric/src/fabric_view.erl
index 3048e8987..c2ef13392 100644
--- a/src/fabric/src/fabric_view.erl
+++ b/src/fabric/src/fabric_view.erl
@@ -12,10 +12,18 @@
-module(fabric_view).
--export([remove_overlapping_shards/2, maybe_send_row/1,
- transform_row/1, keydict/1, extract_view/4, get_shards/2,
- check_down_shards/2, handle_worker_exit/3,
- get_shard_replacements/2, maybe_update_others/5]).
+-export([
+ remove_overlapping_shards/2,
+ maybe_send_row/1,
+ transform_row/1,
+ keydict/1,
+ extract_view/4,
+ get_shards/2,
+ check_down_shards/2,
+ handle_worker_exit/3,
+ get_shard_replacements/2,
+ maybe_update_others/5
+]).
-export([fix_skip_and_limit/1]).
-include_lib("fabric/include/fabric.hrl").
@@ -27,7 +35,7 @@
-spec check_down_shards(#collector{}, node()) ->
{ok, #collector{}} | {error, any()}.
check_down_shards(Collector, BadNode) ->
- #collector{callback=Callback, counters=Counters, user_acc=Acc} = Collector,
+ #collector{callback = Callback, counters = Counters, user_acc = Acc} = Collector,
Filter = fun(#shard{node = Node}, _) -> Node == BadNode end,
BadCounters = fabric_dict:filter(Filter, Counters),
case fabric_dict:size(BadCounters) > 0 of
@@ -42,24 +50,21 @@ check_down_shards(Collector, BadNode) ->
%% @doc Handle a worker that dies during a stream
-spec handle_worker_exit(#collector{}, #shard{}, any()) -> {error, any()}.
handle_worker_exit(Collector, _Worker, Reason) ->
- #collector{callback=Callback, user_acc=Acc} = Collector,
+ #collector{callback = Callback, user_acc = Acc} = Collector,
{ok, Resp} = Callback({error, fabric_util:error_info(Reason)}, Acc),
{error, Resp}.
-
-spec remove_overlapping_shards(#shard{}, [{#shard{}, any()}]) ->
[{#shard{}, any()}].
remove_overlapping_shards(#shard{} = Shard, Counters) ->
remove_overlapping_shards(Shard, Counters, fun stop_worker/1).
-
-spec remove_overlapping_shards(#shard{}, [{#shard{}, any()}], fun()) ->
[{#shard{}, any()}].
remove_overlapping_shards(#shard{} = Shard, Counters, RemoveCb) ->
Counters1 = filter_exact_copies(Shard, Counters, RemoveCb),
filter_possible_overlaps(Shard, Counters1, RemoveCb).
-
filter_possible_overlaps(Shard, Counters, RemoveCb) ->
Ranges0 = fabric_util:worker_ranges(Counters),
#shard{range = [BShard, EShard]} = Shard,
@@ -83,54 +88,59 @@ filter_possible_overlaps(Shard, Counters, RemoveCb) ->
B1 =< B2
end,
Ring = mem3_util:get_ring(Ranges, SortFun, MinB, MaxE),
- fabric_dict:filter(fun
- (S, _) when S =:= Shard ->
- % Keep the original shard
- true;
- (#shard{range = [B, E]} = S, _) ->
- case lists:member({B, E}, Ring) of
- true ->
- true; % Keep it
- false ->
- % Duplicate range, delete after calling callback function
- case is_function(RemoveCb) of
- true -> RemoveCb(S);
- false -> ok
- end,
- false
- end
- end, Counters).
-
+ fabric_dict:filter(
+ fun
+ (S, _) when S =:= Shard ->
+ % Keep the original shard
+ true;
+ (#shard{range = [B, E]} = S, _) ->
+ case lists:member({B, E}, Ring) of
+ true ->
+ % Keep it
+ true;
+ false ->
+ % Duplicate range, delete after calling callback function
+ case is_function(RemoveCb) of
+ true -> RemoveCb(S);
+ false -> ok
+ end,
+ false
+ end
+ end,
+ Counters
+ ).
filter_exact_copies(#shard{range = Range0} = Shard0, Shards, Cb) ->
- fabric_dict:filter(fun
- (Shard, _) when Shard =:= Shard0 ->
- true; % Don't remove ourselves
- (#shard{range = Range} = Shard, _) when Range =:= Range0 ->
- case is_function(Cb) of
- true -> Cb(Shard);
- false -> ok
- end,
- false;
- (_, _) ->
- true
- end, Shards).
-
+ fabric_dict:filter(
+ fun
+ (Shard, _) when Shard =:= Shard0 ->
+ % Don't remove ourselves
+ true;
+ (#shard{range = Range} = Shard, _) when Range =:= Range0 ->
+ case is_function(Cb) of
+ true -> Cb(Shard);
+ false -> ok
+ end,
+ false;
+ (_, _) ->
+ true
+ end,
+ Shards
+ ).
stop_worker(#shard{ref = Ref, node = Node}) ->
rexi:kill(Node, Ref).
-
-maybe_send_row(#collector{limit=0} = State) ->
- #collector{counters=Counters, user_acc=AccIn, callback=Callback} = State,
+maybe_send_row(#collector{limit = 0} = State) ->
+ #collector{counters = Counters, user_acc = AccIn, callback = Callback} = State,
case fabric_dict:any(0, Counters) of
- true ->
- % we still need to send the total/offset header
- {ok, State};
- false ->
- erase(meta_sent),
- {_, Acc} = Callback(complete, AccIn),
- {stop, State#collector{user_acc=Acc}}
+ true ->
+ % we still need to send the total/offset header
+ {ok, State};
+ false ->
+ erase(meta_sent),
+ {_, Acc} = Callback(complete, AccIn),
+ {stop, State#collector{user_acc = Acc}}
end;
maybe_send_row(State) ->
#collector{
@@ -141,82 +151,95 @@ maybe_send_row(State) ->
user_acc = AccIn
} = State,
case fabric_dict:any(0, Counters) of
- true ->
- {ok, State};
- false ->
- try get_next_row(State) of
- {_, NewState} when Skip > 0 ->
- maybe_send_row(NewState#collector{skip=Skip-1});
- {Row0, NewState} ->
- Row1 = possibly_embed_doc(NewState, Row0),
- Row2 = detach_partition(Row1),
- Row3 = transform_row(Row2),
- case Callback(Row3, AccIn) of
- {stop, Acc} ->
- {stop, NewState#collector{user_acc=Acc, limit=Limit-1}};
- {ok, Acc} ->
- maybe_send_row(NewState#collector{user_acc=Acc, limit=Limit-1})
+ true ->
+ {ok, State};
+ false ->
+ try get_next_row(State) of
+ {_, NewState} when Skip > 0 ->
+ maybe_send_row(NewState#collector{skip = Skip - 1});
+ {Row0, NewState} ->
+ Row1 = possibly_embed_doc(NewState, Row0),
+ Row2 = detach_partition(Row1),
+ Row3 = transform_row(Row2),
+ case Callback(Row3, AccIn) of
+ {stop, Acc} ->
+ {stop, NewState#collector{user_acc = Acc, limit = Limit - 1}};
+ {ok, Acc} ->
+ maybe_send_row(NewState#collector{user_acc = Acc, limit = Limit - 1})
+ end
+ catch
+ complete ->
+ erase(meta_sent),
+ {_, Acc} = Callback(complete, AccIn),
+ {stop, State#collector{user_acc = Acc}}
end
- catch complete ->
- erase(meta_sent),
- {_, Acc} = Callback(complete, AccIn),
- {stop, State#collector{user_acc=Acc}}
- end
end.
%% if include_docs=true is used when keys and
%% the values contain "_id" then use the "_id"s
%% to retrieve documents and embed in result
-possibly_embed_doc(_State,
- #view_row{id=reduced}=Row) ->
+possibly_embed_doc(
+ _State,
+ #view_row{id = reduced} = Row
+) ->
Row;
-possibly_embed_doc(_State,
- #view_row{value=undefined}=Row) ->
+possibly_embed_doc(
+ _State,
+ #view_row{value = undefined} = Row
+) ->
Row;
-possibly_embed_doc(#collector{db_name=DbName, query_args=Args},
- #view_row{key=_Key, id=_Id, value=Value, doc=_Doc}=Row) ->
- #mrargs{include_docs=IncludeDocs} = Args,
+possibly_embed_doc(
+ #collector{db_name = DbName, query_args = Args},
+ #view_row{key = _Key, id = _Id, value = Value, doc = _Doc} = Row
+) ->
+ #mrargs{include_docs = IncludeDocs} = Args,
case IncludeDocs andalso is_tuple(Value) of
- true ->
- {Props} = Value,
- Rev0 = couch_util:get_value(<<"_rev">>, Props),
- case couch_util:get_value(<<"_id">>,Props) of
- null -> Row#view_row{doc=null};
- undefined -> Row;
- IncId ->
- % use separate process to call fabric:open_doc
- % to not interfere with current call
- {Pid, Ref} = spawn_monitor(fun() ->
- exit(
- case Rev0 of
+ true ->
+ {Props} = Value,
+ Rev0 = couch_util:get_value(<<"_rev">>, Props),
+ case couch_util:get_value(<<"_id">>, Props) of
+ null ->
+ Row#view_row{doc = null};
undefined ->
- case fabric:open_doc(DbName, IncId, []) of
- {ok, NewDoc} ->
- Row#view_row{doc=couch_doc:to_json_obj(NewDoc,[])};
- {not_found, _} ->
- Row#view_row{doc=null};
- Else ->
- Row#view_row{doc={error, Else}}
- end;
- Rev0 ->
- Rev = couch_doc:parse_rev(Rev0),
- case fabric:open_revs(DbName, IncId, [Rev], []) of
- {ok, [{ok, NewDoc}]} ->
- Row#view_row{doc=couch_doc:to_json_obj(NewDoc,[])};
- {ok, [{{not_found, _}, Rev}]} ->
- Row#view_row{doc=null};
- Else ->
- Row#view_row{doc={error, Else}}
+ Row;
+ IncId ->
+ % use separate process to call fabric:open_doc
+ % to not interfere with current call
+ {Pid, Ref} = spawn_monitor(fun() ->
+ exit(
+ case Rev0 of
+ undefined ->
+ case fabric:open_doc(DbName, IncId, []) of
+ {ok, NewDoc} ->
+ Row#view_row{doc = couch_doc:to_json_obj(NewDoc, [])};
+ {not_found, _} ->
+ Row#view_row{doc = null};
+ Else ->
+ Row#view_row{doc = {error, Else}}
+ end;
+ Rev0 ->
+ Rev = couch_doc:parse_rev(Rev0),
+ case fabric:open_revs(DbName, IncId, [Rev], []) of
+ {ok, [{ok, NewDoc}]} ->
+ Row#view_row{doc = couch_doc:to_json_obj(NewDoc, [])};
+ {ok, [{{not_found, _}, Rev}]} ->
+ Row#view_row{doc = null};
+ Else ->
+ Row#view_row{doc = {error, Else}}
+ end
+ end
+ )
+ end),
+ receive
+ {'DOWN', Ref, process, Pid, Resp} ->
+ Resp
end
- end) end),
- receive {'DOWN',Ref,process,Pid, Resp} ->
- Resp
- end
- end;
- _ -> Row
+ end;
+ _ ->
+ Row
end.
-detach_partition(#view_row{key={p, _Partition, Key}} = Row) ->
+detach_partition(#view_row{key = {p, _Partition, Key}} = Row) ->
Row#view_row{key = Key};
detach_partition(#view_row{} = Row) ->
Row.
@@ -224,8 +247,11 @@ detach_partition(#view_row{} = Row) ->
keydict(undefined) ->
undefined;
keydict(Keys) ->
- {Dict,_} = lists:foldl(fun(K, {D,I}) -> {dict:store(K,I,D), I+1} end,
- {dict:new(),0}, Keys),
+ {Dict, _} = lists:foldl(
+ fun(K, {D, I}) -> {dict:store(K, I, D), I + 1} end,
+ {dict:new(), 0},
+ Keys
+ ),
Dict.
%% internal %%
@@ -243,30 +269,34 @@ get_next_row(#collector{reducer = RedSrc} = St) when RedSrc =/= undefined ->
} = St,
{Key, RestKeys} = find_next_key(Keys, Dir, Collation, RowDict),
case reduce_row_dict_take(Key, RowDict, Collation) of
- {Records, NewRowDict} ->
- Counters = lists:foldl(fun(#view_row{worker={Worker,From}}, CntrsAcc) ->
- case From of
- {Pid, _} when is_pid(Pid) ->
- gen_server:reply(From, ok);
- Pid when is_pid(Pid) ->
- rexi:stream_ack(From)
- end,
- fabric_dict:update_counter(Worker, -1, CntrsAcc)
- end, Counters0, Records),
- Wrapped = [[V] || #view_row{value=V} <- Records],
- {ok, [Reduced]} = couch_query_servers:rereduce(Lang, [RedSrc], Wrapped),
- {ok, Finalized} = couch_query_servers:finalize(RedSrc, Reduced),
- NewSt = St#collector{keys=RestKeys, rows=NewRowDict, counters=Counters},
- {#view_row{key=Key, id=reduced, value=Finalized}, NewSt};
- error ->
- get_next_row(St#collector{keys=RestKeys})
+ {Records, NewRowDict} ->
+ Counters = lists:foldl(
+ fun(#view_row{worker = {Worker, From}}, CntrsAcc) ->
+ case From of
+ {Pid, _} when is_pid(Pid) ->
+ gen_server:reply(From, ok);
+ Pid when is_pid(Pid) ->
+ rexi:stream_ack(From)
+ end,
+ fabric_dict:update_counter(Worker, -1, CntrsAcc)
+ end,
+ Counters0,
+ Records
+ ),
+ Wrapped = [[V] || #view_row{value = V} <- Records],
+ {ok, [Reduced]} = couch_query_servers:rereduce(Lang, [RedSrc], Wrapped),
+ {ok, Finalized} = couch_query_servers:finalize(RedSrc, Reduced),
+ NewSt = St#collector{keys = RestKeys, rows = NewRowDict, counters = Counters},
+ {#view_row{key = Key, id = reduced, value = Finalized}, NewSt};
+ error ->
+ get_next_row(St#collector{keys = RestKeys})
end;
get_next_row(State) ->
- #collector{rows = [Row|Rest], counters = Counters0} = State,
+ #collector{rows = [Row | Rest], counters = Counters0} = State,
{Worker, From} = Row#view_row.worker,
rexi:stream_ack(From),
Counters1 = fabric_dict:update_counter(Worker, -1, Counters0),
- {Row, State#collector{rows = Rest, counters=Counters1}}.
+ {Row, State#collector{rows = Rest, counters = Counters1}}.
reduce_row_dict_take(Key, Dict, <<"raw">>) ->
dict:take(Key, Dict);
@@ -278,9 +308,13 @@ reduce_row_dict_take(Key, Dict, _Collation) ->
error;
[_ | _] ->
{Keys, Vals} = lists:unzip(KVs),
- NewDict = lists:foldl(fun(K, Acc) ->
- dict:erase(K, Acc)
- end, Dict, Keys),
+ NewDict = lists:foldl(
+ fun(K, Acc) ->
+ dict:erase(K, Acc)
+ end,
+ Dict,
+ Keys
+ ),
{lists:flatten(Vals), NewDict}
end.
@@ -290,28 +324,28 @@ find_next_key(nil, Dir, Collation, RowDict) ->
find_next_key(undefined, Dir, Collation, RowDict) ->
CmpFun = fun(A, B) -> compare(Dir, Collation, A, B) end,
case lists:sort(CmpFun, dict:fetch_keys(RowDict)) of
- [] ->
- throw(complete);
- [Key|_] ->
- {Key, nil}
+ [] ->
+ throw(complete);
+ [Key | _] ->
+ {Key, nil}
end;
find_next_key([], _, _, _) ->
throw(complete);
-find_next_key([Key|Rest], _, _, _) ->
+find_next_key([Key | Rest], _, _, _) ->
{Key, Rest}.
-transform_row(#view_row{value={[{reduce_overflow_error, Msg}]}}) ->
- {row, [{key,null}, {id,error}, {value,reduce_overflow_error}, {reason,Msg}]};
-transform_row(#view_row{key=Key, id=reduced, value=Value}) ->
- {row, [{key,Key}, {value,Value}]};
-transform_row(#view_row{key=Key, id=undefined}) ->
- {row, [{key,Key}, {id,error}, {value,not_found}]};
-transform_row(#view_row{key=Key, id=Id, value=Value, doc=undefined}) ->
- {row, [{id,Id}, {key,Key}, {value,Value}]};
-transform_row(#view_row{key=Key, id=_Id, value=_Value, doc={error,Reason}}) ->
- {row, [{id,error}, {key,Key}, {value,Reason}]};
-transform_row(#view_row{key=Key, id=Id, value=Value, doc=Doc}) ->
- {row, [{id,Id}, {key,Key}, {value,Value}, {doc,Doc}]}.
+transform_row(#view_row{value = {[{reduce_overflow_error, Msg}]}}) ->
+ {row, [{key, null}, {id, error}, {value, reduce_overflow_error}, {reason, Msg}]};
+transform_row(#view_row{key = Key, id = reduced, value = Value}) ->
+ {row, [{key, Key}, {value, Value}]};
+transform_row(#view_row{key = Key, id = undefined}) ->
+ {row, [{key, Key}, {id, error}, {value, not_found}]};
+transform_row(#view_row{key = Key, id = Id, value = Value, doc = undefined}) ->
+ {row, [{id, Id}, {key, Key}, {value, Value}]};
+transform_row(#view_row{key = Key, id = _Id, value = _Value, doc = {error, Reason}}) ->
+ {row, [{id, error}, {key, Key}, {value, Reason}]};
+transform_row(#view_row{key = Key, id = Id, value = Value, doc = Doc}) ->
+ {row, [{id, Id}, {key, Key}, {value, Value}, {doc, Doc}]}.
compare(fwd, <<"raw">>, A, B) -> A < B;
compare(rev, <<"raw">>, A, B) -> B < A;
@@ -322,16 +356,17 @@ extract_view(Pid, ViewName, [], _ViewType) ->
couch_log:error("missing_named_view ~p", [ViewName]),
exit(Pid, kill),
exit(missing_named_view);
-extract_view(Pid, ViewName, [View|Rest], ViewType) ->
+extract_view(Pid, ViewName, [View | Rest], ViewType) ->
case lists:member(ViewName, view_names(View, ViewType)) of
- true ->
- if ViewType == reduce ->
- {index_of(ViewName, view_names(View, reduce)), View};
true ->
- View
- end;
- false ->
- extract_view(Pid, ViewName, Rest, ViewType)
+ if
+ ViewType == reduce ->
+ {index_of(ViewName, view_names(View, reduce)), View};
+ true ->
+ View
+ end;
+ false ->
+ extract_view(Pid, ViewName, Rest, ViewType)
end.
view_names(View, Type) when Type == red_map; Type == reduce ->
@@ -344,16 +379,17 @@ index_of(X, List) ->
index_of(_X, [], _I) ->
not_found;
-index_of(X, [X|_Rest], I) ->
+index_of(X, [X | _Rest], I) ->
I;
-index_of(X, [_|Rest], I) ->
- index_of(X, Rest, I+1).
+index_of(X, [_ | Rest], I) ->
+ index_of(X, Rest, I + 1).
get_shards(Db, #mrargs{} = Args) ->
DbPartitioned = fabric_util:is_partitioned(Db),
Partition = couch_mrview_util:get_extra(Args, partition),
- if DbPartitioned orelse Partition == undefined -> ok; true ->
- throw({bad_request, <<"partition specified on non-partitioned db">>})
+ if
+ DbPartitioned orelse Partition == undefined -> ok;
+ true -> throw({bad_request, <<"partition specified on non-partitioned db">>})
end,
DbName = fabric:dbname(Db),
% Decide which version of mem3:shards/1,2 or
@@ -372,12 +408,20 @@ get_shards(Db, #mrargs{} = Args) ->
{Shards, [{any, Shards}]}
end.
-maybe_update_others(DbName, DDoc, ShardsInvolved, ViewName,
- #mrargs{update=lazy} = Args) ->
+maybe_update_others(
+ DbName,
+ DDoc,
+ ShardsInvolved,
+ ViewName,
+ #mrargs{update = lazy} = Args
+) ->
ShardsNeedUpdated = mem3:shards(DbName) -- ShardsInvolved,
- lists:foreach(fun(#shard{node=Node, name=ShardName}) ->
- rpc:cast(Node, fabric_rpc, update_mrview, [ShardName, DDoc, ViewName, Args])
- end, ShardsNeedUpdated);
+ lists:foreach(
+ fun(#shard{node = Node, name = ShardName}) ->
+ rpc:cast(Node, fabric_rpc, update_mrview, [ShardName, DDoc, ViewName, Args])
+ end,
+ ShardsNeedUpdated
+ );
maybe_update_others(_DbName, _DDoc, _ShardsInvolved, _ViewName, _Args) ->
ok.
@@ -385,48 +429,57 @@ get_shard_replacements(DbName, UsedShards0) ->
% We only want to generate a replacements list from shards
% that aren't already used.
AllLiveShards = mem3:live_shards(DbName, [node() | nodes()]),
- UsedShards = [S#shard{ref=undefined} || S <- UsedShards0],
+ UsedShards = [S#shard{ref = undefined} || S <- UsedShards0],
get_shard_replacements_int(AllLiveShards -- UsedShards, UsedShards).
get_shard_replacements_int(UnusedShards, UsedShards) ->
% If we have more than one copy of a range then we don't
% want to try and add a replacement to any copy.
- RangeCounts = lists:foldl(fun(#shard{range=R}, Acc) ->
- dict:update_counter(R, 1, Acc)
- end, dict:new(), UsedShards),
+ RangeCounts = lists:foldl(
+ fun(#shard{range = R}, Acc) ->
+ dict:update_counter(R, 1, Acc)
+ end,
+ dict:new(),
+ UsedShards
+ ),
% For each seq shard range with a count of 1, find any
% possible replacements from the unused shards. The
% replacement list is keyed by range.
- lists:foldl(fun(#shard{range = [B, E] = Range}, Acc) ->
- case dict:find(Range, RangeCounts) of
- {ok, 1} ->
- Repls = mem3_util:non_overlapping_shards(UnusedShards, B, E),
- % Only keep non-empty lists of replacements
- if Repls == [] -> Acc; true ->
- [{Range, Repls} | Acc]
- end;
- _ ->
- Acc
- end
- end, [], UsedShards).
-
--spec fix_skip_and_limit(#mrargs{}) -> {CoordArgs::#mrargs{}, WorkerArgs::#mrargs{}}.
+ lists:foldl(
+ fun(#shard{range = [B, E] = Range}, Acc) ->
+ case dict:find(Range, RangeCounts) of
+ {ok, 1} ->
+ Repls = mem3_util:non_overlapping_shards(UnusedShards, B, E),
+ % Only keep non-empty lists of replacements
+ if
+ Repls == [] -> Acc;
+ true -> [{Range, Repls} | Acc]
+ end;
+ _ ->
+ Acc
+ end
+ end,
+ [],
+ UsedShards
+ ).
+
+-spec fix_skip_and_limit(#mrargs{}) -> {CoordArgs :: #mrargs{}, WorkerArgs :: #mrargs{}}.
fix_skip_and_limit(#mrargs{} = Args) ->
- {CoordArgs, WorkerArgs} = case couch_mrview_util:get_extra(Args, partition) of
- undefined ->
- #mrargs{skip=Skip, limit=Limit}=Args,
- {Args, Args#mrargs{skip=0, limit=Skip+Limit}};
- _Partition ->
- {Args#mrargs{skip=0}, Args}
- end,
+ {CoordArgs, WorkerArgs} =
+ case couch_mrview_util:get_extra(Args, partition) of
+ undefined ->
+ #mrargs{skip = Skip, limit = Limit} = Args,
+ {Args, Args#mrargs{skip = 0, limit = Skip + Limit}};
+ _Partition ->
+ {Args#mrargs{skip = 0}, Args}
+ end,
%% the coordinator needs to finalize each row, so make sure the shards don't
{CoordArgs, remove_finalizer(WorkerArgs)}.
remove_finalizer(Args) ->
couch_mrview_util:set_extra(Args, finalizer, null).
-
remove_overlapping_shards_test() ->
Cb = undefined,
@@ -436,29 +489,42 @@ remove_overlapping_shards_test() ->
Shard1 = mk_shard("node-3", [11, 20]),
Shards1 = fabric_dict:store(Shard1, nil, Shards),
R1 = remove_overlapping_shards(Shard1, Shards1, Cb),
- ?assertEqual([{0, 10}, {11, 20}, {21, ?RING_END}],
- fabric_util:worker_ranges(R1)),
+ ?assertEqual(
+ [{0, 10}, {11, 20}, {21, ?RING_END}],
+ fabric_util:worker_ranges(R1)
+ ),
?assert(fabric_dict:is_key(Shard1, R1)),
% Split overlap (shard overlap multiple workers)
Shard2 = mk_shard("node-3", [0, 20]),
Shards2 = fabric_dict:store(Shard2, nil, Shards),
R2 = remove_overlapping_shards(Shard2, Shards2, Cb),
- ?assertEqual([{0, 20}, {21, ?RING_END}],
- fabric_util:worker_ranges(R2)),
+ ?assertEqual(
+ [{0, 20}, {21, ?RING_END}],
+ fabric_util:worker_ranges(R2)
+ ),
?assert(fabric_dict:is_key(Shard2, R2)).
-
get_shard_replacements_test() ->
- Unused = [mk_shard(N, [B, E]) || {N, B, E} <- [
- {"n1", 11, 20}, {"n1", 21, ?RING_END},
- {"n2", 0, 4}, {"n2", 5, 10}, {"n2", 11, 20},
- {"n3", 0, 21, ?RING_END}
- ]],
- Used = [mk_shard(N, [B, E]) || {N, B, E} <- [
- {"n2", 21, ?RING_END},
- {"n3", 0, 10}, {"n3", 11, 20}
- ]],
+ Unused = [
+ mk_shard(N, [B, E])
+ || {N, B, E} <- [
+ {"n1", 11, 20},
+ {"n1", 21, ?RING_END},
+ {"n2", 0, 4},
+ {"n2", 5, 10},
+ {"n2", 11, 20},
+ {"n3", 0, 21, ?RING_END}
+ ]
+ ],
+ Used = [
+ mk_shard(N, [B, E])
+ || {N, B, E} <- [
+ {"n2", 21, ?RING_END},
+ {"n3", 0, 10},
+ {"n3", 11, 20}
+ ]
+ ],
Res = lists:sort(get_shard_replacements_int(Unused, Used)),
% Notice that [0, 10] range can be replaced by spawning the [0, 4] and [5,
% 10] workers on n1
@@ -469,22 +535,25 @@ get_shard_replacements_test() ->
],
?assertEqual(Expect, Res).
-
mk_cnts(Ranges, NoNodes) ->
- orddict:from_list([{Shard,nil}
- || Shard <-
- lists:flatten(lists:map(
- fun(Range) ->
- mk_shards(NoNodes,Range,[])
- end, Ranges))]
- ).
-
-mk_shards(0,_Range,Shards) ->
- Shards;
-mk_shards(NoNodes,Range,Shards) ->
- Name ="node-" ++ integer_to_list(NoNodes),
- mk_shards(NoNodes-1,Range, [mk_shard(Name, Range) | Shards]).
+ orddict:from_list([
+ {Shard, nil}
+ || Shard <-
+ lists:flatten(
+ lists:map(
+ fun(Range) ->
+ mk_shards(NoNodes, Range, [])
+ end,
+ Ranges
+ )
+ )
+ ]).
+mk_shards(0, _Range, Shards) ->
+ Shards;
+mk_shards(NoNodes, Range, Shards) ->
+ Name = "node-" ++ integer_to_list(NoNodes),
+ mk_shards(NoNodes - 1, Range, [mk_shard(Name, Range) | Shards]).
mk_shard(Name, Range) ->
Node = list_to_atom(Name),
diff --git a/src/fabric/src/fabric_view_all_docs.erl b/src/fabric/src/fabric_view_all_docs.erl
index 1b1aa8a9c..4bd8e9522 100644
--- a/src/fabric/src/fabric_view_all_docs.erl
+++ b/src/fabric/src/fabric_view_all_docs.erl
@@ -13,19 +13,21 @@
-module(fabric_view_all_docs).
-export([go/5]).
--export([open_doc/4]). % exported for spawn
+% exported for spawn
+-export([open_doc/4]).
-include_lib("fabric/include/fabric.hrl").
-include_lib("mem3/include/mem3.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-go(Db, Options, #mrargs{keys=undefined} = QueryArgs, Callback, Acc) ->
+go(Db, Options, #mrargs{keys = undefined} = QueryArgs, Callback, Acc) ->
{CoordArgs, WorkerArgs} = fabric_view:fix_skip_and_limit(QueryArgs),
DbName = fabric:dbname(Db),
{Shards, RingOpts} = shards(Db, QueryArgs),
Workers0 = fabric_util:submit_jobs(
- Shards, fabric_rpc, all_docs, [Options, WorkerArgs]),
+ Shards, fabric_rpc, all_docs, [Options, WorkerArgs]
+ ),
RexiMon = fabric_util:create_monitors(Workers0),
try
case fabric_streams:start(Workers0, #shard.ref, RingOpts) of
@@ -50,8 +52,6 @@ go(Db, Options, #mrargs{keys=undefined} = QueryArgs, Callback, Acc) ->
after
rexi_monitor:stop(RexiMon)
end;
-
-
go(DbName, Options, QueryArgs, Callback, Acc0) ->
#mrargs{
direction = Dir,
@@ -64,45 +64,51 @@ go(DbName, Options, QueryArgs, Callback, Acc0) ->
extra = Extra,
update_seq = UpdateSeq
} = QueryArgs,
- DocOptions1 = case Conflicts of
- true -> [conflicts|DocOptions0];
- _ -> DocOptions0
- end,
+ DocOptions1 =
+ case Conflicts of
+ true -> [conflicts | DocOptions0];
+ _ -> DocOptions0
+ end,
SpawnFun = fun(Key) ->
spawn_monitor(?MODULE, open_doc, [DbName, Options ++ DocOptions1, Key, IncludeDocs])
end,
MaxJobs = all_docs_concurrency(),
- Keys1 = case Dir of
- fwd -> Keys0;
- _ -> lists:reverse(Keys0)
- end,
- Keys2 = case Skip < length(Keys1) of
- true -> lists:nthtail(Skip, Keys1);
- false -> []
- end,
- Keys3 = case Limit < length(Keys2) of
- true -> lists:sublist(Keys2, Limit);
- false -> Keys2
- end,
+ Keys1 =
+ case Dir of
+ fwd -> Keys0;
+ _ -> lists:reverse(Keys0)
+ end,
+ Keys2 =
+ case Skip < length(Keys1) of
+ true -> lists:nthtail(Skip, Keys1);
+ false -> []
+ end,
+ Keys3 =
+ case Limit < length(Keys2) of
+ true -> lists:sublist(Keys2, Limit);
+ false -> Keys2
+ end,
%% namespace can be _set_ to `undefined`, so we want simulate enum here
- Namespace = case couch_util:get_value(namespace, Extra) of
- <<"_all_docs">> -> <<"_all_docs">>;
- <<"_design">> -> <<"_design">>;
- <<"_local">> -> <<"_local">>;
- _ -> <<"_all_docs">>
- end,
+ Namespace =
+ case couch_util:get_value(namespace, Extra) of
+ <<"_all_docs">> -> <<"_all_docs">>;
+ <<"_design">> -> <<"_design">>;
+ <<"_local">> -> <<"_local">>;
+ _ -> <<"_all_docs">>
+ end,
Timeout = fabric_util:all_docs_timeout(),
{_, Ref} = spawn_monitor(fun() ->
exit(fabric:get_doc_count(DbName, Namespace))
end),
receive
{'DOWN', Ref, _, _, {ok, TotalRows}} ->
- Meta = case UpdateSeq of
- false ->
- [{total, TotalRows}, {offset, null}];
- true ->
- [{total, TotalRows}, {offset, null}, {update_seq, null}]
- end,
+ Meta =
+ case UpdateSeq of
+ false ->
+ [{total, TotalRows}, {offset, null}];
+ true ->
+ [{total, TotalRows}, {offset, null}, {update_seq, null}]
+ end,
{ok, Acc1} = Callback({meta, Meta}, Acc0),
Resp = doc_receive_loop(
Keys3, queue:new(), SpawnFun, MaxJobs, Callback, Acc1
@@ -129,50 +135,60 @@ go(DbName, _Options, Workers, QueryArgs, Callback, Acc0) ->
skip = Skip,
limit = Limit,
user_acc = Acc0,
- update_seq = case UpdateSeq of true -> []; false -> nil end
+ update_seq =
+ case UpdateSeq of
+ true -> [];
+ false -> nil
+ end
},
- case rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, fabric_util:view_timeout(QueryArgs), 5000) of
- {ok, NewState} ->
- {ok, NewState#collector.user_acc};
- {timeout, NewState} ->
- Callback({error, timeout}, NewState#collector.user_acc);
- {error, Resp} ->
- {ok, Resp}
+ case
+ rexi_utils:recv(
+ Workers,
+ #shard.ref,
+ fun handle_message/3,
+ State,
+ fabric_util:view_timeout(QueryArgs),
+ 5000
+ )
+ of
+ {ok, NewState} ->
+ {ok, NewState#collector.user_acc};
+ {timeout, NewState} ->
+ Callback({error, timeout}, NewState#collector.user_acc);
+ {error, Resp} ->
+ {ok, Resp}
end.
shards(Db, Args) ->
DbPartitioned = fabric_util:is_partitioned(Db),
Partition = couch_mrview_util:get_extra(Args, partition),
- NewArgs = case {DbPartitioned, Partition} of
- {true, undefined} ->
- % If a user specifies the same partition on both
- % the start and end keys we can optimize the
- % query by limiting to the partition shard.
- Start = couch_partition:extract(Args#mrargs.start_key),
- End = couch_partition:extract(Args#mrargs.end_key),
- case {Start, End} of
- {{Partition, SK}, {Partition, EK}} ->
- A1 = Args#mrargs{
- start_key = SK,
- end_key = EK
- },
- couch_mrview_util:set_extra(A1, partition, Partition);
- _ ->
- Args
- end;
- _ ->
- Args
- end,
+ NewArgs =
+ case {DbPartitioned, Partition} of
+ {true, undefined} ->
+ % If a user specifies the same partition on both
+ % the start and end keys we can optimize the
+ % query by limiting to the partition shard.
+ Start = couch_partition:extract(Args#mrargs.start_key),
+ End = couch_partition:extract(Args#mrargs.end_key),
+ case {Start, End} of
+ {{Partition, SK}, {Partition, EK}} ->
+ A1 = Args#mrargs{
+ start_key = SK,
+ end_key = EK
+ },
+ couch_mrview_util:set_extra(A1, partition, Partition);
+ _ ->
+ Args
+ end;
+ _ ->
+ Args
+ end,
fabric_view:get_shards(Db, NewArgs).
-
handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
fabric_view:check_down_shards(State, NodeRef);
-
handle_message({rexi_EXIT, Reason}, Worker, State) ->
fabric_view:handle_worker_exit(State, Worker, Reason);
-
handle_message({meta, Meta0}, {Worker, From}, State) ->
Tot = couch_util:get_value(total, Meta0, 0),
Off = couch_util:get_value(offset, Meta0, 0),
@@ -190,62 +206,70 @@ handle_message({meta, Meta0}, {Worker, From}, State) ->
0 = fabric_dict:lookup_element(Worker, Counters0),
rexi:stream_ack(From),
Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- Total = if Tot == null -> null; true -> Total0 + Tot end,
- Offset = if Off == null -> null; true -> Offset0 + Off end,
- UpdateSeq = case {UpdateSeq0, Seq} of
- {nil, _} -> nil;
- {_, null} -> null;
- _ -> [{Worker, Seq} | UpdateSeq0]
- end,
- case fabric_dict:any(0, Counters1) of
- true ->
- {ok, State#collector{
- counters = Counters1,
- total_rows = Total,
- update_seq = UpdateSeq,
- offset = Offset
- }};
- false ->
- FinalOffset = case Offset of
- null -> null;
- _ -> erlang:min(Total, Offset+State#collector.skip)
+ Total =
+ if
+ Tot == null -> null;
+ true -> Total0 + Tot
end,
- Meta = [{total, Total}, {offset, FinalOffset}] ++
- case UpdateSeq of
- nil ->
- [];
- null ->
- [{update_seq, null}];
- _ ->
- [{update_seq, fabric_view_changes:pack_seqs(UpdateSeq)}]
- end,
- {Go, Acc} = Callback({meta, Meta}, AccIn),
- {Go, State#collector{
- counters = fabric_dict:decrement_all(Counters1),
- total_rows = Total,
- offset = FinalOffset,
- user_acc = Acc,
- update_seq = UpdateSeq0
- }}
+ Offset =
+ if
+ Off == null -> null;
+ true -> Offset0 + Off
+ end,
+ UpdateSeq =
+ case {UpdateSeq0, Seq} of
+ {nil, _} -> nil;
+ {_, null} -> null;
+ _ -> [{Worker, Seq} | UpdateSeq0]
+ end,
+ case fabric_dict:any(0, Counters1) of
+ true ->
+ {ok, State#collector{
+ counters = Counters1,
+ total_rows = Total,
+ update_seq = UpdateSeq,
+ offset = Offset
+ }};
+ false ->
+ FinalOffset =
+ case Offset of
+ null -> null;
+ _ -> erlang:min(Total, Offset + State#collector.skip)
+ end,
+ Meta =
+ [{total, Total}, {offset, FinalOffset}] ++
+ case UpdateSeq of
+ nil ->
+ [];
+ null ->
+ [{update_seq, null}];
+ _ ->
+ [{update_seq, fabric_view_changes:pack_seqs(UpdateSeq)}]
+ end,
+ {Go, Acc} = Callback({meta, Meta}, AccIn),
+ {Go, State#collector{
+ counters = fabric_dict:decrement_all(Counters1),
+ total_rows = Total,
+ offset = FinalOffset,
+ user_acc = Acc,
+ update_seq = UpdateSeq0
+ }}
end;
-
handle_message(#view_row{} = Row, {Worker, From}, State) ->
#collector{query_args = Args, counters = Counters0, rows = Rows0} = State,
Dir = Args#mrargs.direction,
- Rows = merge_row(Dir, Row#view_row{worker={Worker, From}}, Rows0),
+ Rows = merge_row(Dir, Row#view_row{worker = {Worker, From}}, Rows0),
Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- State1 = State#collector{rows=Rows, counters=Counters1},
+ State1 = State#collector{rows = Rows, counters = Counters1},
fabric_view:maybe_send_row(State1);
-
handle_message(complete, Worker, State) ->
Counters = fabric_dict:update_counter(Worker, 1, State#collector.counters),
fabric_view:maybe_send_row(State#collector{counters = Counters});
-
-handle_message({execution_stats, _} = Msg, {_,From}, St) ->
- #collector{callback=Callback, user_acc=AccIn} = St,
+handle_message({execution_stats, _} = Msg, {_, From}, St) ->
+ #collector{callback = Callback, user_acc = AccIn} = St,
{Go, Acc} = Callback(Msg, AccIn),
rexi:stream_ack(From),
- {Go, St#collector{user_acc=Acc}}.
+ {Go, St#collector{user_acc = Acc}}.
merge_row(fwd, Row, Rows) ->
lists:keymerge(#view_row.id, [Row], Rows);
@@ -256,42 +280,43 @@ all_docs_concurrency() ->
Value = config:get("fabric", "all_docs_concurrency", "10"),
try
list_to_integer(Value)
- catch _:_ ->
- 10
+ catch
+ _:_ ->
+ 10
end.
doc_receive_loop(Keys, Pids, SpawnFun, MaxJobs, Callback, AccIn) ->
case {Keys, queue:len(Pids)} of
- {[], 0} ->
- {ok, AccIn};
- {[K | RKeys], Len} when Len < MaxJobs ->
- Pids1 = queue:in(SpawnFun(K), Pids),
- doc_receive_loop(RKeys, Pids1, SpawnFun, MaxJobs, Callback, AccIn);
- _ ->
- {{value, {Pid, Ref}}, RestPids} = queue:out(Pids),
- Timeout = fabric_util:all_docs_timeout(),
- receive {'DOWN', Ref, process, Pid, Row} ->
- case Row of
- #view_row{} ->
- case Callback(fabric_view:transform_row(Row), AccIn) of
- {ok, Acc} ->
- doc_receive_loop(
- Keys, RestPids, SpawnFun, MaxJobs, Callback, Acc
- );
- {stop, Acc} ->
- cancel_read_pids(RestPids),
- {ok, Acc}
- end;
- Error ->
- cancel_read_pids(RestPids),
- Callback({error, Error}, AccIn)
+ {[], 0} ->
+ {ok, AccIn};
+ {[K | RKeys], Len} when Len < MaxJobs ->
+ Pids1 = queue:in(SpawnFun(K), Pids),
+ doc_receive_loop(RKeys, Pids1, SpawnFun, MaxJobs, Callback, AccIn);
+ _ ->
+ {{value, {Pid, Ref}}, RestPids} = queue:out(Pids),
+ Timeout = fabric_util:all_docs_timeout(),
+ receive
+ {'DOWN', Ref, process, Pid, Row} ->
+ case Row of
+ #view_row{} ->
+ case Callback(fabric_view:transform_row(Row), AccIn) of
+ {ok, Acc} ->
+ doc_receive_loop(
+ Keys, RestPids, SpawnFun, MaxJobs, Callback, Acc
+ );
+ {stop, Acc} ->
+ cancel_read_pids(RestPids),
+ {ok, Acc}
+ end;
+ Error ->
+ cancel_read_pids(RestPids),
+ Callback({error, Error}, AccIn)
+ end
+ after Timeout ->
+ timeout
end
- after Timeout ->
- timeout
- end
end.
-
open_doc(DbName, Options, Id, IncludeDocs) ->
try open_doc_int(DbName, Options, Id, IncludeDocs) of
#view_row{} = Row ->
@@ -303,22 +328,26 @@ open_doc(DbName, Options, Id, IncludeDocs) ->
end.
open_doc_int(DbName, Options, Id, IncludeDocs) ->
- Row = case fabric:open_doc(DbName, Id, [deleted | Options]) of
- {not_found, missing} ->
- Doc = undefined,
- #view_row{key=Id};
- {ok, #doc{deleted=true, revs=Revs}} ->
- Doc = null,
- {RevPos, [RevId|_]} = Revs,
- Value = {[{rev,couch_doc:rev_to_str({RevPos, RevId})}, {deleted,true}]},
- #view_row{key=Id, id=Id, value=Value};
- {ok, #doc{revs=Revs} = Doc0} ->
- Doc = couch_doc:to_json_obj(Doc0, Options),
- {RevPos, [RevId|_]} = Revs,
- Value = {[{rev,couch_doc:rev_to_str({RevPos, RevId})}]},
- #view_row{key=Id, id=Id, value=Value}
- end,
- if IncludeDocs -> Row#view_row{doc=Doc}; true -> Row end.
+ Row =
+ case fabric:open_doc(DbName, Id, [deleted | Options]) of
+ {not_found, missing} ->
+ Doc = undefined,
+ #view_row{key = Id};
+ {ok, #doc{deleted = true, revs = Revs}} ->
+ Doc = null,
+ {RevPos, [RevId | _]} = Revs,
+ Value = {[{rev, couch_doc:rev_to_str({RevPos, RevId})}, {deleted, true}]},
+ #view_row{key = Id, id = Id, value = Value};
+ {ok, #doc{revs = Revs} = Doc0} ->
+ Doc = couch_doc:to_json_obj(Doc0, Options),
+ {RevPos, [RevId | _]} = Revs,
+ Value = {[{rev, couch_doc:rev_to_str({RevPos, RevId})}]},
+ #view_row{key = Id, id = Id, value = Value}
+ end,
+ if
+ IncludeDocs -> Row#view_row{doc = Doc};
+ true -> Row
+ end.
cancel_read_pids(Pids) ->
case queue:out(Pids) of
diff --git a/src/fabric/src/fabric_view_changes.erl b/src/fabric/src/fabric_view_changes.erl
index 9fdbf06df..b561da151 100644
--- a/src/fabric/src/fabric_view_changes.erl
+++ b/src/fabric/src/fabric_view_changes.erl
@@ -28,60 +28,66 @@
-import(fabric_db_update_listener, [wait_db_updated/1]).
-go(DbName, Feed, Options, Callback, Acc0) when Feed == "continuous" orelse
- Feed == "longpoll" orelse Feed == "eventsource" ->
+go(DbName, Feed, Options, Callback, Acc0) when
+ Feed == "continuous" orelse
+ Feed == "longpoll" orelse Feed == "eventsource"
+->
Args = make_changes_args(Options),
Since = get_start_seq(DbName, Args),
case validate_start_seq(DbName, Since) of
- ok ->
- {ok, Acc} = Callback(start, Acc0),
- {Timeout, _} = couch_changes:get_changes_timeout(Args, Callback),
- Ref = make_ref(),
- Parent = self(),
- UpdateListener = {spawn_link(fabric_db_update_listener, go,
- [Parent, Ref, DbName, Timeout]),
- Ref},
- put(changes_epoch, get_changes_epoch()),
- try
- keep_sending_changes(
- DbName,
- Args,
- Callback,
- Since,
- Acc,
- Timeout,
- UpdateListener,
- os:timestamp()
- )
- after
- fabric_db_update_listener:stop(UpdateListener)
- end;
- Error ->
- Callback(Error, Acc0)
+ ok ->
+ {ok, Acc} = Callback(start, Acc0),
+ {Timeout, _} = couch_changes:get_changes_timeout(Args, Callback),
+ Ref = make_ref(),
+ Parent = self(),
+ UpdateListener = {
+ spawn_link(
+ fabric_db_update_listener,
+ go,
+ [Parent, Ref, DbName, Timeout]
+ ),
+ Ref
+ },
+ put(changes_epoch, get_changes_epoch()),
+ try
+ keep_sending_changes(
+ DbName,
+ Args,
+ Callback,
+ Since,
+ Acc,
+ Timeout,
+ UpdateListener,
+ os:timestamp()
+ )
+ after
+ fabric_db_update_listener:stop(UpdateListener)
+ end;
+ Error ->
+ Callback(Error, Acc0)
end;
-
go(DbName, "normal", Options, Callback, Acc0) ->
Args = make_changes_args(Options),
Since = get_start_seq(DbName, Args),
case validate_start_seq(DbName, Since) of
- ok ->
- {ok, Acc} = Callback(start, Acc0),
- {ok, Collector} = send_changes(
- DbName,
- Args,
- Callback,
- Since,
- Acc,
- 5000
- ),
- #collector{counters=Seqs, user_acc=AccOut, offset=Offset} = Collector,
- Callback({stop, pack_seqs(Seqs), pending_count(Offset)}, AccOut);
- Error ->
- Callback(Error, Acc0)
+ ok ->
+ {ok, Acc} = Callback(start, Acc0),
+ {ok, Collector} = send_changes(
+ DbName,
+ Args,
+ Callback,
+ Since,
+ Acc,
+ 5000
+ ),
+ #collector{counters = Seqs, user_acc = AccOut, offset = Offset} = Collector,
+ Callback({stop, pack_seqs(Seqs), pending_count(Offset)}, AccOut);
+ Error ->
+ Callback(Error, Acc0)
end.
keep_sending_changes(DbName, Args, Callback, Seqs, AccIn, Timeout, UpListen, T0) ->
- #changes_args{limit=Limit, feed=Feed, heartbeat=Heartbeat} = Args,
+ #changes_args{limit = Limit, feed = Feed, heartbeat = Heartbeat} = Args,
{ok, Collector} = send_changes(DbName, Args, Callback, Seqs, AccIn, Timeout),
#collector{
limit = Limit2,
@@ -92,39 +98,43 @@ keep_sending_changes(DbName, Args, Callback, Seqs, AccIn, Timeout, UpListen, T0)
LastSeq = pack_seqs(NewSeqs),
MaintenanceMode = config:get("couchdb", "maintenance_mode"),
NewEpoch = get_changes_epoch() > erlang:get(changes_epoch),
- if Limit > Limit2, Feed == "longpoll";
- MaintenanceMode == "true"; MaintenanceMode == "nolb"; NewEpoch ->
- Callback({stop, LastSeq, pending_count(Offset)}, AccOut0);
- true ->
- {ok, AccOut} = Callback(waiting_for_updates, AccOut0),
- WaitForUpdate = wait_db_updated(UpListen),
- AccumulatedTime = timer:now_diff(os:timestamp(), T0) div 1000,
- Max = case config:get("fabric", "changes_duration") of
- undefined ->
- infinity;
- MaxStr ->
- list_to_integer(MaxStr)
- end,
- case {Heartbeat, AccumulatedTime > Max, WaitForUpdate} of
- {_, _, changes_feed_died} ->
- Callback({stop, LastSeq, pending_count(Offset)}, AccOut);
- {undefined, _, timeout} ->
- Callback({stop, LastSeq, pending_count(Offset)}, AccOut);
- {_, true, timeout} ->
- Callback({stop, LastSeq, pending_count(Offset)}, AccOut);
- _ ->
- {ok, AccTimeout} = Callback(timeout, AccOut),
- ?MODULE:keep_sending_changes(
- DbName,
- Args#changes_args{limit=Limit2},
- Callback,
- LastSeq,
- AccTimeout,
- Timeout,
- UpListen,
- T0
- )
- end
+ if
+ Limit > Limit2, Feed == "longpoll";
+ MaintenanceMode == "true";
+ MaintenanceMode == "nolb";
+ NewEpoch ->
+ Callback({stop, LastSeq, pending_count(Offset)}, AccOut0);
+ true ->
+ {ok, AccOut} = Callback(waiting_for_updates, AccOut0),
+ WaitForUpdate = wait_db_updated(UpListen),
+ AccumulatedTime = timer:now_diff(os:timestamp(), T0) div 1000,
+ Max =
+ case config:get("fabric", "changes_duration") of
+ undefined ->
+ infinity;
+ MaxStr ->
+ list_to_integer(MaxStr)
+ end,
+ case {Heartbeat, AccumulatedTime > Max, WaitForUpdate} of
+ {_, _, changes_feed_died} ->
+ Callback({stop, LastSeq, pending_count(Offset)}, AccOut);
+ {undefined, _, timeout} ->
+ Callback({stop, LastSeq, pending_count(Offset)}, AccOut);
+ {_, true, timeout} ->
+ Callback({stop, LastSeq, pending_count(Offset)}, AccOut);
+ _ ->
+ {ok, AccTimeout} = Callback(timeout, AccOut),
+ ?MODULE:keep_sending_changes(
+ DbName,
+ Args#changes_args{limit = Limit2},
+ Callback,
+ LastSeq,
+ AccTimeout,
+ Timeout,
+ UpListen,
+ T0
+ )
+ end
end.
send_changes(DbName, ChangesArgs, Callback, PackedSeqs, AccIn, Timeout) ->
@@ -133,35 +143,46 @@ send_changes(DbName, ChangesArgs, Callback, PackedSeqs, AccIn, Timeout) ->
Seqs0 = unpack_seqs(PackedSeqs, DbName),
{WSeqs0, Dead, Reps} = find_replacements(Seqs0, AllLiveShards),
% Start workers which didn't need replacements
- WSeqs = lists:map(fun({#shard{name = Name, node = N} = S, Seq}) ->
- Ref = rexi:cast(N, {fabric_rpc, changes, [Name, ChangesArgs, Seq]}),
- {S#shard{ref = Ref}, Seq}
- end, WSeqs0),
+ WSeqs = lists:map(
+ fun({#shard{name = Name, node = N} = S, Seq}) ->
+ Ref = rexi:cast(N, {fabric_rpc, changes, [Name, ChangesArgs, Seq]}),
+ {S#shard{ref = Ref}, Seq}
+ end,
+ WSeqs0
+ ),
% For some dead workers see if they are a result of split shards. In that
% case make a replacement argument so that local rexi workers can calculate
% (hopefully) a > 0 update sequence.
{WSplitSeqs0, Reps1} = find_split_shard_replacements(Dead, Reps),
- WSplitSeqs = lists:map(fun({#shard{name = Name, node = N} = S, Seq}) ->
- Arg = make_replacement_arg(N, Seq),
- Ref = rexi:cast(N, {fabric_rpc, changes, [Name, ChangesArgs, Arg]}),
- {S#shard{ref = Ref}, Seq}
- end, WSplitSeqs0),
+ WSplitSeqs = lists:map(
+ fun({#shard{name = Name, node = N} = S, Seq}) ->
+ Arg = make_replacement_arg(N, Seq),
+ Ref = rexi:cast(N, {fabric_rpc, changes, [Name, ChangesArgs, Arg]}),
+ {S#shard{ref = Ref}, Seq}
+ end,
+ WSplitSeqs0
+ ),
% For ranges that were not split, look for a replacement on a different node
- WReps = lists:map(fun(#shard{name = Name, node = NewNode, range = R} = S) ->
- Arg = find_replacement_sequence(Dead, R),
- case Arg =/= 0 of true -> ok; false ->
- couch_log:warning("~p reset seq for ~p", [?MODULE, S])
- end,
- Ref = rexi:cast(NewNode, {fabric_rpc, changes, [Name, ChangesArgs, Arg]}),
- {S#shard{ref = Ref}, 0}
- end, Reps1),
+ WReps = lists:map(
+ fun(#shard{name = Name, node = NewNode, range = R} = S) ->
+ Arg = find_replacement_sequence(Dead, R),
+ case Arg =/= 0 of
+ true -> ok;
+ false -> couch_log:warning("~p reset seq for ~p", [?MODULE, S])
+ end,
+ Ref = rexi:cast(NewNode, {fabric_rpc, changes, [Name, ChangesArgs, Arg]}),
+ {S#shard{ref = Ref}, 0}
+ end,
+ Reps1
+ ),
Seqs = WSeqs ++ WSplitSeqs ++ WReps,
{Workers0, _} = lists:unzip(Seqs),
Repls = fabric_ring:get_shard_replacements(DbName, Workers0),
- StartFun = fun(#shard{name=Name, node=N, range=R0}=Shard) ->
+ StartFun = fun(#shard{name = Name, node = N, range = R0} = Shard) ->
SeqArg = find_replacement_sequence(Seqs, R0),
- case SeqArg =/= 0 of true -> ok; false ->
- couch_log:warning("~p StartFun reset seq for ~p", [?MODULE, Shard])
+ case SeqArg =/= 0 of
+ true -> ok;
+ false -> couch_log:warning("~p StartFun reset seq for ~p", [?MODULE, Shard])
end,
Ref = rexi:cast(N, {fabric_rpc, changes, [Name, ChangesArgs, SeqArg]}),
Shard#shard{ref = Ref}
@@ -171,14 +192,24 @@ send_changes(DbName, ChangesArgs, Callback, PackedSeqs, AccIn, Timeout) ->
case fabric_streams:start(Workers0, #shard.ref, StartFun, Repls) of
{ok, Workers} ->
try
- LiveSeqs = lists:map(fun(W) ->
- case lists:keyfind(W, 1, Seqs) of
- {W, Seq} -> {W, Seq};
- _ -> {W, 0}
- end
- end, Workers),
- send_changes(DbName, Workers, LiveSeqs, ChangesArgs,
- Callback, AccIn, Timeout)
+ LiveSeqs = lists:map(
+ fun(W) ->
+ case lists:keyfind(W, 1, Seqs) of
+ {W, Seq} -> {W, Seq};
+ _ -> {W, 0}
+ end
+ end,
+ Workers
+ ),
+ send_changes(
+ DbName,
+ Workers,
+ LiveSeqs,
+ ChangesArgs,
+ Callback,
+ AccIn,
+ Timeout
+ )
after
fabric_streams:cleanup(Workers)
end;
@@ -210,77 +241,84 @@ send_changes(DbName, Workers, Seqs, ChangesArgs, Callback, AccIn, Timeout) ->
user_acc = AccIn,
limit = ChangesArgs#changes_args.limit,
offset = fabric_dict:init(Workers, null),
- rows = Seqs % store sequence positions instead
+ % store sequence positions instead
+ rows = Seqs
},
%% TODO: errors need to be handled here
receive_results(Workers, State, Timeout, Callback).
receive_results(Workers, State, Timeout, Callback) ->
- case rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, State,
- Timeout, infinity) of
- {timeout, NewState0} ->
- {ok, AccOut} = Callback(timeout, NewState0#collector.user_acc),
- NewState = NewState0#collector{user_acc = AccOut},
- receive_results(Workers, NewState, Timeout, Callback);
- {_, NewState} ->
- {ok, NewState}
+ case
+ rexi_utils:recv(
+ Workers,
+ #shard.ref,
+ fun handle_message/3,
+ State,
+ Timeout,
+ infinity
+ )
+ of
+ {timeout, NewState0} ->
+ {ok, AccOut} = Callback(timeout, NewState0#collector.user_acc),
+ NewState = NewState0#collector{user_acc = AccOut},
+ receive_results(Workers, NewState, Timeout, Callback);
+ {_, NewState} ->
+ {ok, NewState}
end.
handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
fabric_view:check_down_shards(State, NodeRef);
-
handle_message({rexi_EXIT, Reason}, Worker, State) ->
fabric_view:handle_worker_exit(State, Worker, Reason);
-
% Temporary upgrade clause - Case 24236
handle_message({complete, Key}, Worker, State) when is_tuple(Key) ->
handle_message({complete, [{seq, Key}, {pending, 0}]}, Worker, State);
-
-handle_message({change, Props}, {Worker, _}, #collector{limit=0} = State) ->
+handle_message({change, Props}, {Worker, _}, #collector{limit = 0} = State) ->
O0 = State#collector.offset,
- O1 = case fabric_dict:lookup_element(Worker, O0) of
- null ->
- % Use Pending+1 because we're ignoring this row in the response
- Pending = couch_util:get_value(pending, Props, 0),
- fabric_dict:store(Worker, Pending+1, O0);
- _ ->
- O0
- end,
+ O1 =
+ case fabric_dict:lookup_element(Worker, O0) of
+ null ->
+ % Use Pending+1 because we're ignoring this row in the response
+ Pending = couch_util:get_value(pending, Props, 0),
+ fabric_dict:store(Worker, Pending + 1, O0);
+ _ ->
+ O0
+ end,
maybe_stop(State#collector{offset = O1});
-
-handle_message({complete, Props}, Worker, #collector{limit=0} = State) ->
+handle_message({complete, Props}, Worker, #collector{limit = 0} = State) ->
O0 = State#collector.offset,
- O1 = case fabric_dict:lookup_element(Worker, O0) of
- null ->
- fabric_dict:store(Worker, couch_util:get_value(pending,Props), O0);
- _ ->
- O0
- end,
+ O1 =
+ case fabric_dict:lookup_element(Worker, O0) of
+ null ->
+ fabric_dict:store(Worker, couch_util:get_value(pending, Props), O0);
+ _ ->
+ O0
+ end,
maybe_stop(State#collector{offset = O1});
-
-handle_message({no_pass, Props}, {Worker, From}, #collector{limit=0} = State)
- when is_list(Props) ->
+handle_message({no_pass, Props}, {Worker, From}, #collector{limit = 0} = State) when
+ is_list(Props)
+->
#collector{counters = S0, offset = O0} = State,
- O1 = case fabric_dict:lookup_element(Worker, O0) of
- null ->
- fabric_dict:store(Worker, couch_util:get_value(pending, Props), O0);
- _ ->
- O0
- end,
+ O1 =
+ case fabric_dict:lookup_element(Worker, O0) of
+ null ->
+ fabric_dict:store(Worker, couch_util:get_value(pending, Props), O0);
+ _ ->
+ O0
+ end,
S1 = fabric_dict:store(Worker, couch_util:get_value(seq, Props), S0),
rexi:stream_ack(From),
maybe_stop(State#collector{counters = S1, offset = O1});
-
handle_message(#change{} = Row, {Worker, From}, St) ->
- Change = {change, [
- {seq, Row#change.key},
- {id, Row#change.id},
- {changes, Row#change.value},
- {deleted, Row#change.deleted},
- {doc, Row#change.doc}
- ]},
+ Change =
+ {change, [
+ {seq, Row#change.key},
+ {id, Row#change.id},
+ {changes, Row#change.value},
+ {deleted, Row#change.deleted},
+ {doc, Row#change.doc}
+ ]},
handle_message(Change, {Worker, From}, St);
-
handle_message({change, Props}, {Worker, From}, St) ->
#collector{
callback = Callback,
@@ -294,51 +332,50 @@ handle_message({change, Props}, {Worker, From}, St) ->
O1 = fabric_dict:store(Worker, couch_util:get_value(pending, Props), O0),
% Temporary hack for FB 23637
Interval = erlang:get(changes_seq_interval),
- if (Interval == undefined) orelse (Limit rem Interval == 0) ->
- Props2 = lists:keyreplace(seq, 1, Props, {seq, pack_seqs(S1)});
- true ->
- Props2 = lists:keyreplace(seq, 1, Props, {seq, null})
+ if
+ (Interval == undefined) orelse (Limit rem Interval == 0) ->
+ Props2 = lists:keyreplace(seq, 1, Props, {seq, pack_seqs(S1)});
+ true ->
+ Props2 = lists:keyreplace(seq, 1, Props, {seq, null})
end,
{Go, Acc} = Callback(changes_row(Props2), AccIn),
rexi:stream_ack(From),
- {Go, St#collector{counters=S1, offset=O1, limit=Limit-1, user_acc=Acc}};
-
+ {Go, St#collector{counters = S1, offset = O1, limit = Limit - 1, user_acc = Acc}};
%% upgrade clause
handle_message({no_pass, Seq}, From, St) when is_integer(Seq) ->
handle_message({no_pass, [{seq, Seq}]}, From, St);
-
handle_message({no_pass, Props}, {Worker, From}, St) ->
Seq = couch_util:get_value(seq, Props),
#collector{counters = S0} = St,
true = fabric_dict:is_key(Worker, S0),
S1 = fabric_dict:store(Worker, Seq, S0),
rexi:stream_ack(From),
- {ok, St#collector{counters=S1}};
-
+ {ok, St#collector{counters = S1}};
handle_message({complete, Props}, Worker, State) ->
Key = couch_util:get_value(seq, Props),
#collector{
counters = S0,
offset = O0,
- total_rows = Completed % override
+ % override
+ total_rows = Completed
} = State,
true = fabric_dict:is_key(Worker, S0),
S1 = fabric_dict:store(Worker, Key, S0),
O1 = fabric_dict:store(Worker, couch_util:get_value(pending, Props), O0),
- NewState = State#collector{counters=S1, offset=O1, total_rows=Completed+1},
+ NewState = State#collector{counters = S1, offset = O1, total_rows = Completed + 1},
% We're relying on S1 having exactly the numnber of workers that
% are participtaing in this response. With the new stream_start
% that's a bit more obvious but historically it wasn't quite
% so clear. The Completed variable is just a hacky override
% of the total_rows field in the #collector{} record.
NumWorkers = fabric_dict:size(S1),
- Go = case NumWorkers =:= (Completed+1) of
- true -> stop;
- false -> ok
- end,
+ Go =
+ case NumWorkers =:= (Completed + 1) of
+ true -> stop;
+ false -> ok
+ end,
{Go, NewState}.
-
make_replacement_arg(Node, {Seq, Uuid}) ->
{replace, Node, Uuid, Seq};
make_replacement_arg(_Node, {Seq, Uuid, EpochNode}) ->
@@ -358,36 +395,40 @@ maybe_stop(#collector{offset = Offset} = State) ->
{ok, State}
end.
-make_changes_args(#changes_args{style=Style, filter_fun=undefined}=Args) ->
+make_changes_args(#changes_args{style = Style, filter_fun = undefined} = Args) ->
Args#changes_args{filter_fun = {default, Style}};
make_changes_args(Args) ->
Args.
-get_start_seq(DbName, #changes_args{dir=Dir, since=Since})
- when Dir == rev; Since == "now" ->
+get_start_seq(DbName, #changes_args{dir = Dir, since = Since}) when
+ Dir == rev; Since == "now"
+->
{ok, Info} = fabric:get_db_info(DbName),
couch_util:get_value(update_seq, Info);
-get_start_seq(_DbName, #changes_args{dir=fwd, since=Since}) ->
+get_start_seq(_DbName, #changes_args{dir = fwd, since = Since}) ->
Since.
pending_count(Dict) ->
- fabric_dict:fold(fun
- (_Worker, Count, Acc) when is_integer(Count), is_integer(Acc) ->
- Count + Acc;
- (_Worker, _Count, _Acc) ->
- null
- end, 0, Dict).
+ fabric_dict:fold(
+ fun
+ (_Worker, Count, Acc) when is_integer(Count), is_integer(Acc) ->
+ Count + Acc;
+ (_Worker, _Count, _Acc) ->
+ null
+ end,
+ 0,
+ Dict
+ ).
pack_seqs(Workers) ->
- SeqList = [{N,R,S} || {#shard{node=N, range=R}, S} <- Workers],
- SeqSum = lists:sum([seq(S) || {_,_,S} <- SeqList]),
+ SeqList = [{N, R, S} || {#shard{node = N, range = R}, S} <- Workers],
+ SeqSum = lists:sum([seq(S) || {_, _, S} <- SeqList]),
Opaque = couch_util:encodeBase64Url(term_to_binary(SeqList, [compressed])),
?l2b([integer_to_list(SeqSum), $-, Opaque]).
seq({Seq, _Uuid, _Node}) -> Seq;
seq({Seq, _Uuid}) -> Seq;
-seq(Seq) -> Seq.
-
+seq(Seq) -> Seq.
unpack_seq_regex_match(Packed) ->
NewPattern = "^\\[[0-9]+\s*,\s*\"(?<opaque>.*)\"\\]$",
@@ -401,11 +442,9 @@ unpack_seq_regex_match(Packed) ->
Match
end.
-
unpack_seq_decode_term(Opaque) ->
binary_to_term(couch_util:decodeBase64Url(Opaque)).
-
% This is used for testing and for remsh debugging
%
% Return the unpacked list of sequences from a raw update seq string. The input
@@ -417,20 +456,17 @@ decode_seq(Packed) ->
Opaque = unpack_seq_regex_match(Packed),
unpack_seq_decode_term(Opaque).
-
% Returns fabric_dict with {Shard, Seq} entries
%
-spec unpack_seqs(pos_integer() | list() | binary(), binary()) ->
- orddict:orddict().
+ orddict:orddict().
unpack_seqs(0, DbName) ->
fabric_dict:init(mem3:shards(DbName), 0);
-
unpack_seqs("0", DbName) ->
fabric_dict:init(mem3:shards(DbName), 0);
-
-unpack_seqs([_SeqNum, Opaque], DbName) -> % deprecated
+% deprecated
+unpack_seqs([_SeqNum, Opaque], DbName) ->
do_unpack_seqs(Opaque, DbName);
-
unpack_seqs(Packed, DbName) ->
Opaque = unpack_seq_regex_match(Packed),
do_unpack_seqs(Opaque, DbName).
@@ -440,23 +476,33 @@ do_unpack_seqs(Opaque, DbName) ->
% This just picks each unique shard and keeps the largest seq
% value recorded.
Decoded = unpack_seq_decode_term(Opaque),
- DedupDict = lists:foldl(fun({Node, [A, B], Seq}, Acc) ->
- dict:append({Node, [A, B]}, Seq, Acc)
- end, dict:new(), Decoded),
- Deduped = lists:map(fun({{Node, [A, B]}, SeqList}) ->
- {Node, [A, B], lists:max(SeqList)}
- end, dict:to_list(DedupDict)),
+ DedupDict = lists:foldl(
+ fun({Node, [A, B], Seq}, Acc) ->
+ dict:append({Node, [A, B]}, Seq, Acc)
+ end,
+ dict:new(),
+ Decoded
+ ),
+ Deduped = lists:map(
+ fun({{Node, [A, B]}, SeqList}) ->
+ {Node, [A, B], lists:max(SeqList)}
+ end,
+ dict:to_list(DedupDict)
+ ),
% Create a fabric_dict of {Shard, Seq} entries
% TODO relies on internal structure of fabric_dict as keylist
- Unpacked = lists:flatmap(fun({Node, [A,B], Seq}) ->
- case mem3:get_shard(DbName, Node, [A,B]) of
- {ok, Shard} ->
- [{Shard, Seq}];
- {error, not_found} ->
- []
- end
- end, Deduped),
+ Unpacked = lists:flatmap(
+ fun({Node, [A, B], Seq}) ->
+ case mem3:get_shard(DbName, Node, [A, B]) of
+ {ok, Shard} ->
+ [{Shard, Seq}];
+ {error, not_found} ->
+ []
+ end
+ end,
+ Deduped
+ ),
% This just handles the case if the ring in the unpacked sequence
% received is not complete and in that case tries to fill in the
@@ -466,32 +512,40 @@ do_unpack_seqs(Opaque, DbName) ->
Unpacked;
false ->
Uuids = get_db_uuid_shards(DbName),
- PotentialWorkers = lists:map(fun({Node, [A, B], Seq}) ->
- case mem3:get_shard(DbName, Node, [A, B]) of
- {ok, Shard} ->
- {Shard, Seq};
- {error, not_found} ->
- Shard = replace_moved_shard(Node, [A, B], Seq, Uuids),
- {Shard, Seq}
- end
- end, Deduped),
+ PotentialWorkers = lists:map(
+ fun({Node, [A, B], Seq}) ->
+ case mem3:get_shard(DbName, Node, [A, B]) of
+ {ok, Shard} ->
+ {Shard, Seq};
+ {error, not_found} ->
+ Shard = replace_moved_shard(Node, [A, B], Seq, Uuids),
+ {Shard, Seq}
+ end
+ end,
+ Deduped
+ ),
Shards = mem3:shards(DbName),
{Unpacked1, Dead, Reps} = find_replacements(PotentialWorkers, Shards),
{Splits, Reps1} = find_split_shard_replacements(Dead, Reps),
- RepSeqs = lists:map(fun(#shard{} = S) ->
- {S, get_old_seq(S, Deduped)}
- end, Reps1),
+ RepSeqs = lists:map(
+ fun(#shard{} = S) ->
+ {S, get_old_seq(S, Deduped)}
+ end,
+ Reps1
+ ),
Unpacked1 ++ Splits ++ RepSeqs
end.
-
-get_old_seq(#shard{range=R}=Shard, SinceSeqs) ->
+get_old_seq(#shard{range = R} = Shard, SinceSeqs) ->
case lists:keyfind(R, 2, SinceSeqs) of
{Node, R, Seq} when is_number(Seq) ->
% Unfortunately we don't have access to the db
% uuid so we can't set a replacememnt here.
- couch_log:warning("~p get_old_seq missing uuid "
- "node: ~p, range: ~p, seq: ~p", [?MODULE, Node, R, Seq]),
+ couch_log:warning(
+ "~p get_old_seq missing uuid "
+ "node: ~p, range: ~p, seq: ~p",
+ [?MODULE, Node, R, Seq]
+ ),
0;
{Node, R, {Seq, Uuid}} ->
% This update seq is using the old format that
@@ -503,12 +557,13 @@ get_old_seq(#shard{range=R}=Shard, SinceSeqs) ->
% can use for replacement.
{Seq, Uuid, EpochNode};
Error ->
- couch_log:warning("~p get_old_seq error: ~p, shard: ~p, seqs: ~p",
- [?MODULE, Error, Shard, SinceSeqs]),
+ couch_log:warning(
+ "~p get_old_seq error: ~p, shard: ~p, seqs: ~p",
+ [?MODULE, Error, Shard, SinceSeqs]
+ ),
0
end.
-
get_db_uuid_shards(DbName) ->
% Need to use an isolated process as we are performing a fabric call from
% another fabric call and there is a good chance we'd polute the mailbox
@@ -522,10 +577,14 @@ get_db_uuid_shards(DbName) ->
% sequence from the same cluster and we didn't tweak that
% relatively obscure config option in the meantime.
PrefixLen = fabric_util:get_uuid_prefix_len(),
- maps:fold(fun(Uuid, Shard, Acc) ->
- TrimmedUuid = binary:part(Uuid, {0, PrefixLen}),
- Acc#{TrimmedUuid => Shard}
- end, #{}, Uuids);
+ maps:fold(
+ fun(Uuid, Shard, Acc) ->
+ TrimmedUuid = binary:part(Uuid, {0, PrefixLen}),
+ Acc#{TrimmedUuid => Shard}
+ end,
+ #{},
+ Uuids
+ );
{error, Error} ->
% Since we are doing a best-effort approach to match moved shards,
% tolerate and log errors. This should also handle cases when the
@@ -541,7 +600,6 @@ get_db_uuid_shards(DbName) ->
#{}
end.
-
%% Determine if the missing shard moved to a new node. Do that by matching the
%% uuids from the current shard map. If we cannot find a moved shard, we return
%% the original node and range as a shard and hope for the best.
@@ -561,29 +619,36 @@ replace_moved_shard(Node, Range, {_Seq, Uuid, _EpochNode}, #{} = UuidShards) ->
#shard{node = Node, range = Range}
end.
-
changes_row(Props0) ->
- Props1 = case couch_util:get_value(deleted, Props0) of
- true ->
- Props0;
- _ ->
- lists:keydelete(deleted, 1, Props0)
- end,
+ Props1 =
+ case couch_util:get_value(deleted, Props0) of
+ true ->
+ Props0;
+ _ ->
+ lists:keydelete(deleted, 1, Props0)
+ end,
Allowed = [seq, id, changes, deleted, doc, error],
- Props2 = lists:filter(fun({K,_V}) -> lists:member(K, Allowed) end, Props1),
+ Props2 = lists:filter(fun({K, _V}) -> lists:member(K, Allowed) end, Props1),
{change, {Props2}}.
-
find_replacements(Workers, AllShards) ->
% Build map [B, E] => [Worker1, Worker2, ...] for all workers
- WrkMap = lists:foldl(fun({#shard{range = [B, E]}, _} = W, Acc) ->
- maps:update_with({B, E}, fun(Ws) -> [W | Ws] end, [W], Acc)
- end, #{}, fabric_dict:to_list(Workers)),
+ WrkMap = lists:foldl(
+ fun({#shard{range = [B, E]}, _} = W, Acc) ->
+ maps:update_with({B, E}, fun(Ws) -> [W | Ws] end, [W], Acc)
+ end,
+ #{},
+ fabric_dict:to_list(Workers)
+ ),
% Build map [B, E] => [Shard1, Shard2, ...] for all shards
- AllMap = lists:foldl(fun(#shard{range = [B, E]} = S, Acc) ->
- maps:update_with({B, E}, fun(Ss) -> [S | Ss] end, [S], Acc)
- end, #{}, AllShards),
+ AllMap = lists:foldl(
+ fun(#shard{range = [B, E]} = S, Acc) ->
+ maps:update_with({B, E}, fun(Ss) -> [S | Ss] end, [S], Acc)
+ end,
+ #{},
+ AllShards
+ ),
% Custom sort function will prioritize workers over other shards.
% The idea is to not unnecessarily kill workers if we don't have to
@@ -610,59 +675,69 @@ find_replacements(Workers, AllShards) ->
% Keep only workers in the ring and from one of the available nodes
Keep = fun(#shard{range = [B, E], node = N}) ->
- lists:member({B, E}, Ring) andalso lists:keyfind(N, #shard.node,
- maps:get({B, E}, AllMap)) =/= false
+ lists:member({B, E}, Ring) andalso
+ lists:keyfind(
+ N,
+ #shard.node,
+ maps:get({B, E}, AllMap)
+ ) =/= false
end,
Workers1 = fabric_dict:filter(fun(S, _) -> Keep(S) end, Workers),
Removed = fabric_dict:filter(fun(S, _) -> not Keep(S) end, Workers),
- {Rep, _} = lists:foldl(fun(R, {RepAcc, AllMapAcc}) ->
- case maps:is_key(R, WrkMap)of
- true ->
- % It's a worker and in the map of available shards. Make sure
- % to keep it only if there is a range available on that node
- % only (reuse Keep/1 predicate from above)
- WorkersInRange = maps:get(R, WrkMap),
- case lists:any(fun({S, _}) -> Keep(S) end, WorkersInRange) of
- true ->
- {RepAcc, AllMapAcc};
- false ->
- [Shard | Rest] = maps:get(R, AllMapAcc),
- {[Shard | RepAcc], AllMapAcc#{R := Rest}}
- end;
- false ->
- % No worker for this range. Replace from available shards
- [Shard | Rest] = maps:get(R, AllMapAcc),
- {[Shard | RepAcc], AllMapAcc#{R := Rest}}
- end
- end, {[], AllMap}, Ring),
+ {Rep, _} = lists:foldl(
+ fun(R, {RepAcc, AllMapAcc}) ->
+ case maps:is_key(R, WrkMap) of
+ true ->
+ % It's a worker and in the map of available shards. Make sure
+ % to keep it only if there is a range available on that node
+ % only (reuse Keep/1 predicate from above)
+ WorkersInRange = maps:get(R, WrkMap),
+ case lists:any(fun({S, _}) -> Keep(S) end, WorkersInRange) of
+ true ->
+ {RepAcc, AllMapAcc};
+ false ->
+ [Shard | Rest] = maps:get(R, AllMapAcc),
+ {[Shard | RepAcc], AllMapAcc#{R := Rest}}
+ end;
+ false ->
+ % No worker for this range. Replace from available shards
+ [Shard | Rest] = maps:get(R, AllMapAcc),
+ {[Shard | RepAcc], AllMapAcc#{R := Rest}}
+ end
+ end,
+ {[], AllMap},
+ Ring
+ ),
% Return the list of workers that are part of ring, list of removed workers
% and a list of replacement shards that could be used to make sure the ring
% completes.
{Workers1, Removed, Rep}.
-
% From the list of dead workers determine if any are a result of a split shard.
% In that case perhaps there is a way to not rewind the changes feed back to 0.
% Returns {NewWorkers, Available} where NewWorkers is the list of
% viable workers Available is the list of still unused input Shards
find_split_shard_replacements(DeadWorkers, Shards) ->
Acc0 = {[], Shards},
- AccF = fabric_dict:fold(fun(#shard{node = WN, range = R}, Seq, Acc) ->
- [B, E] = R,
- {SplitWorkers, Available} = Acc,
- ShardsOnSameNode = [S || #shard{node = N} = S <- Available, N =:= WN],
- SplitShards = mem3_util:non_overlapping_shards(ShardsOnSameNode, B, E),
- RepCount = length(SplitShards),
- NewWorkers = [{S, make_split_seq(Seq, RepCount)} || S <- SplitShards],
- NewAvailable = [S || S <- Available, not lists:member(S, SplitShards)],
- {NewWorkers ++ SplitWorkers, NewAvailable}
- end, Acc0, DeadWorkers),
+ AccF = fabric_dict:fold(
+ fun(#shard{node = WN, range = R}, Seq, Acc) ->
+ [B, E] = R,
+ {SplitWorkers, Available} = Acc,
+ ShardsOnSameNode = [S || #shard{node = N} = S <- Available, N =:= WN],
+ SplitShards = mem3_util:non_overlapping_shards(ShardsOnSameNode, B, E),
+ RepCount = length(SplitShards),
+ NewWorkers = [{S, make_split_seq(Seq, RepCount)} || S <- SplitShards],
+ NewAvailable = [S || S <- Available, not lists:member(S, SplitShards)],
+ {NewWorkers ++ SplitWorkers, NewAvailable}
+ end,
+ Acc0,
+ DeadWorkers
+ ),
{Workers, Available} = AccF,
{fabric_dict:from_list(Workers), Available}.
-
find_replacement_sequence(OriginalSeqs, R0) ->
%% Find the original shard copy in the Seqs array
case lists:dropwhile(fun({S, _}) -> S#shard.range =/= R0 end, OriginalSeqs) of
@@ -678,13 +753,11 @@ find_replacement_sequence(OriginalSeqs, R0) ->
0
end.
-
make_split_seq({Num, Uuid, Node}, RepCount) when RepCount > 1 ->
{Num, {split, Uuid}, Node};
make_split_seq(Seq, _) ->
Seq.
-
validate_start_seq(_DbName, "now") ->
ok;
validate_start_seq(_DbName, 0) ->
@@ -719,7 +792,6 @@ get_changes_epoch() ->
increment_changes_epoch() ->
application:set_env(fabric, changes_epoch, os:timestamp()).
-
unpack_seq_setup() ->
meck:new(mem3),
meck:new(fabric_view),
@@ -727,62 +799,80 @@ unpack_seq_setup() ->
meck:expect(fabric_ring, is_progress_possible, fun(_) -> true end),
ok.
-
unpack_seqs_test_() ->
{
setup,
fun unpack_seq_setup/0,
- fun (_) -> meck:unload() end,
+ fun(_) -> meck:unload() end,
[
t_unpack_seqs()
]
}.
-
t_unpack_seqs() ->
?_test(begin
% BigCouch 0.3 style.
- assert_shards("23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA"),
+ assert_shards(
+ "23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+ "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+ "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA"
+ ),
% BigCouch 0.4 style.
- assert_shards([23423,<<"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA">>]),
+ assert_shards([
+ 23423,
+ <<
+ "g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+ "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+ "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA"
+ >>
+ ]),
% BigCouch 0.4 style (as string).
- assert_shards("[23423,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
- assert_shards("[23423 ,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
- assert_shards("[23423, \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
- assert_shards("[23423 , \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"),
+ assert_shards(
+ "[23423,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+ "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+ "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"
+ ),
+ assert_shards(
+ "[23423 ,\"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+ "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+ "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"
+ ),
+ assert_shards(
+ "[23423, \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+ "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+ "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"
+ ),
+ assert_shards(
+ "[23423 , \"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+ "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+ "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"]"
+ ),
% with internal hypen
- assert_shards("651-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
- "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
- "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"),
- assert_shards([651,"g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
- "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
- "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"]),
+ assert_shards(
+ "651-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
+ "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
+ "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"
+ ),
+ assert_shards([
+ 651,
+ "g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwNDLXMwBCwxygOFMiQ"
+ "5L8____sxJTcalIUgCSSfZgReE4FTmAFMWDFYXgVJQAUlQPVuSKS1EeC5BkaABSQHXz8"
+ "VgJUbgAonB_VqIPfoUHIArvE7T6AUQh0I1-WQAzp1XB"
+ ]),
% CouchDB 1.2 style
- assert_shards("\"23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
- "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
- "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\"")
+ assert_shards(
+ "\"23423-g1AAAAE7eJzLYWBg4MhgTmHgS0ktM3QwND"
+ "LXMwBCwxygOFMiQ5L8____sxIZcKlIUgCSSfZgRUw4FTmAFMWDFTHiVJQAUlSPX1Ee"
+ "C5BkaABSQHXzsxKZ8StcAFG4H4_bIAoPQBTeJ2j1A4hCUJBkAQC7U1NA\""
+ )
end).
-
assert_shards(Packed) ->
- ?assertMatch([{#shard{},_}|_], unpack_seqs(Packed, <<"foo">>)).
-
+ ?assertMatch([{#shard{}, _} | _], unpack_seqs(Packed, <<"foo">>)).
find_replacements_test() ->
% None of the workers are in the live list of shard but there is a
@@ -811,11 +901,14 @@ find_replacements_test() ->
{WorkersRes2, Dead2, Reps2} = find_replacements(Workers2, AllShards2),
?assertEqual([], WorkersRes2),
?assertEqual(Workers2, Dead2),
- ?assertEqual([
- mk_shard("n1", 11, ?RING_END),
- mk_shard("n2", 0, 4),
- mk_shard("n2", 5, 10)
- ], lists:sort(Reps2)),
+ ?assertEqual(
+ [
+ mk_shard("n1", 11, ?RING_END),
+ mk_shard("n2", 0, 4),
+ mk_shard("n2", 5, 10)
+ ],
+ lists:sort(Reps2)
+ ),
% One worker is available and one needs to be replaced. Replacement will be
% from two split shards
@@ -829,10 +922,13 @@ find_replacements_test() ->
{WorkersRes3, Dead3, Reps3} = find_replacements(Workers3, AllShards3),
?assertEqual(mk_workers([{"n2", 11, ?RING_END}]), WorkersRes3),
?assertEqual(mk_workers([{"n1", 0, 10}]), Dead3),
- ?assertEqual([
- mk_shard("n2", 0, 4),
- mk_shard("n2", 5, 10)
- ], lists:sort(Reps3)),
+ ?assertEqual(
+ [
+ mk_shard("n2", 0, 4),
+ mk_shard("n2", 5, 10)
+ ],
+ lists:sort(Reps3)
+ ),
% All workers are available. Make sure they are not killed even if there is
% a longer (single) shard to replace them.
@@ -849,20 +945,17 @@ find_replacements_test() ->
?assertEqual([], Dead4),
?assertEqual([], Reps4).
-
mk_workers(NodesRanges) ->
mk_workers(NodesRanges, nil).
mk_workers(NodesRanges, Val) ->
orddict:from_list([{mk_shard(N, B, E), Val} || {N, B, E} <- NodesRanges]).
-
mk_shard(Name, B, E) ->
Node = list_to_atom(Name),
BName = list_to_binary(Name),
#shard{name = BName, node = Node, range = [B, E]}.
-
find_split_shard_replacements_test() ->
% One worker is can be replaced and one can't
Dead1 = mk_workers([{"n1", 0, 10}, {"n2", 11, ?RING_END}], 42),
@@ -884,12 +977,18 @@ find_split_shard_replacements_test() ->
mk_shard("n2", 15, ?RING_END)
],
{Workers2, ShardsLeft2} = find_split_shard_replacements(Dead2, Shards2),
- ?assertEqual(mk_workers([
- {"n1", 0, 10},
- {"n2", 11, 12},
- {"n2", 13, 14},
- {"n2", 15, ?RING_END}
- ], 42), Workers2),
+ ?assertEqual(
+ mk_workers(
+ [
+ {"n1", 0, 10},
+ {"n2", 11, 12},
+ {"n2", 13, 14},
+ {"n2", 15, ?RING_END}
+ ],
+ 42
+ ),
+ Workers2
+ ),
?assertEqual([], ShardsLeft2),
% No workers can be replaced. Ranges match but they are on different nodes
@@ -902,7 +1001,6 @@ find_split_shard_replacements_test() ->
?assertEqual([], Workers3),
?assertEqual(Shards3, ShardsLeft3).
-
find_replacement_sequence_test() ->
Shards = [{"n2", 0, 10}, {"n3", 0, 5}],
Uuid = <<"abc1234">>,
@@ -915,20 +1013,28 @@ find_replacement_sequence_test() ->
% {Seq, Uuid} should work
Dead2 = mk_workers(Shards, {43, Uuid}),
- ?assertEqual({replace, 'n2', Uuid, 43},
- find_replacement_sequence(Dead2, [0, 10])),
- ?assertEqual({replace, 'n3', Uuid, 43},
- find_replacement_sequence(Dead2, [0, 5])),
+ ?assertEqual(
+ {replace, 'n2', Uuid, 43},
+ find_replacement_sequence(Dead2, [0, 10])
+ ),
+ ?assertEqual(
+ {replace, 'n3', Uuid, 43},
+ find_replacement_sequence(Dead2, [0, 5])
+ ),
% Can't find the range at all
?assertEqual(0, find_replacement_sequence(Dead2, [0, 4])),
% {Seq, Uuids, EpochNode} should work
Dead3 = mk_workers(Shards, {44, Uuid, Epoch}),
- ?assertEqual({replace, 'n1', Uuid, 44},
- find_replacement_sequence(Dead3, [0, 10])),
- ?assertEqual({replace, 'n1', Uuid, 44},
- find_replacement_sequence(Dead3, [0, 5])),
+ ?assertEqual(
+ {replace, 'n1', Uuid, 44},
+ find_replacement_sequence(Dead3, [0, 10])
+ ),
+ ?assertEqual(
+ {replace, 'n1', Uuid, 44},
+ find_replacement_sequence(Dead3, [0, 5])
+ ),
% Cannot replace a replacement
Dead4 = mk_workers(Shards, {replace, 'n1', Uuid, 45}),
diff --git a/src/fabric/src/fabric_view_map.erl b/src/fabric/src/fabric_view_map.erl
index adfe7d648..104086d67 100644
--- a/src/fabric/src/fabric_view_map.erl
+++ b/src/fabric/src/fabric_view_map.erl
@@ -19,19 +19,20 @@
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-go(DbName, Options, GroupId, View, Args, Callback, Acc, VInfo)
- when is_binary(GroupId) ->
+go(DbName, Options, GroupId, View, Args, Callback, Acc, VInfo) when
+ is_binary(GroupId)
+->
{ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
go(DbName, Options, DDoc, View, Args, Callback, Acc, VInfo);
-
go(Db, Options, DDoc, View, Args0, Callback, Acc, VInfo) ->
DbName = fabric:dbname(Db),
- Args = case Args0 of
- #mrargs{keys = Keys, direction = rev} when is_list(Keys) ->
- Args0#mrargs{keys = lists:reverse(Keys)};
- #mrargs{} ->
- Args0
- end,
+ Args =
+ case Args0 of
+ #mrargs{keys = Keys, direction = rev} when is_list(Keys) ->
+ Args0#mrargs{keys = lists:reverse(Keys)};
+ #mrargs{} ->
+ Args0
+ end,
{Shards, RingOpts} = fabric_view:get_shards(Db, Args),
{CoordArgs, WorkerArgs} = fabric_view:fix_skip_and_limit(Args),
DocIdAndRev = fabric_util:doc_id_and_rev(DDoc),
@@ -44,8 +45,15 @@ go(Db, Options, DDoc, View, Args0, Callback, Acc, VInfo) ->
Workers0 = fabric_util:submit_jobs(Shards, fabric_rpc, map_view, RPCArgs),
RexiMon = fabric_util:create_monitors(Workers0),
try
- case fabric_streams:start(Workers0, #shard.ref, StartFun, Repls,
- RingOpts) of
+ case
+ fabric_streams:start(
+ Workers0,
+ #shard.ref,
+ StartFun,
+ Repls,
+ RingOpts
+ )
+ of
{ok, ddoc_updated} ->
Callback({error, ddoc_updated}, Acc);
{ok, Workers} ->
@@ -72,10 +80,10 @@ go(Db, Options, DDoc, View, Args0, Callback, Acc, VInfo) ->
end.
go(DbName, Workers, {map, View, _}, Args, Callback, Acc0) ->
- #mrargs{limit = Limit, skip = Skip, keys = Keys, update_seq=UpdateSeq} = Args,
+ #mrargs{limit = Limit, skip = Skip, keys = Keys, update_seq = UpdateSeq} = Args,
Collation = couch_util:get_value(<<"collation">>, View#mrview.options),
State = #collector{
- db_name=DbName,
+ db_name = DbName,
query_args = Args,
callback = Callback,
counters = fabric_dict:init(Workers, 0),
@@ -85,24 +93,34 @@ go(DbName, Workers, {map, View, _}, Args, Callback, Acc0) ->
sorted = Args#mrargs.sorted,
collation = Collation,
user_acc = Acc0,
- update_seq = case UpdateSeq of true -> []; false -> nil end
+ update_seq =
+ case UpdateSeq of
+ true -> [];
+ false -> nil
+ end
},
- case rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, fabric_util:view_timeout(Args), 1000 * 60 * 60) of
- {ok, NewState} ->
- {ok, NewState#collector.user_acc};
- {timeout, NewState} ->
- Callback({error, timeout}, NewState#collector.user_acc);
- {error, Resp} ->
- {ok, Resp}
+ case
+ rexi_utils:recv(
+ Workers,
+ #shard.ref,
+ fun handle_message/3,
+ State,
+ fabric_util:view_timeout(Args),
+ 1000 * 60 * 60
+ )
+ of
+ {ok, NewState} ->
+ {ok, NewState#collector.user_acc};
+ {timeout, NewState} ->
+ Callback({error, timeout}, NewState#collector.user_acc);
+ {error, Resp} ->
+ {ok, Resp}
end.
handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
fabric_view:check_down_shards(State, NodeRef);
-
handle_message({rexi_EXIT, Reason}, Worker, State) ->
fabric_view:handle_worker_exit(State, Worker, Reason);
-
handle_message({meta, Meta0}, {Worker, From}, State) ->
Tot = couch_util:get_value(total, Meta0, 0),
Off = couch_util:get_value(offset, Meta0, 0),
@@ -122,47 +140,46 @@ handle_message({meta, Meta0}, {Worker, From}, State) ->
Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
Total = Total0 + Tot,
Offset = Offset0 + Off,
- UpdateSeq = case UpdateSeq0 of
- nil -> nil;
- _ -> [{Worker, Seq} | UpdateSeq0]
- end,
+ UpdateSeq =
+ case UpdateSeq0 of
+ nil -> nil;
+ _ -> [{Worker, Seq} | UpdateSeq0]
+ end,
case fabric_dict:any(0, Counters1) of
- true ->
- {ok, State#collector{
- counters = Counters1,
- total_rows = Total,
- update_seq = UpdateSeq,
- offset = Offset
- }};
- false ->
- FinalOffset = erlang:min(Total, Offset+State#collector.skip),
- Meta = [{total, Total}, {offset, FinalOffset}] ++
- case UpdateSeq of
- nil ->
- [];
- _ ->
- [{update_seq, fabric_view_changes:pack_seqs(UpdateSeq)}]
- end,
- {Go, Acc} = Callback({meta, Meta}, AccIn),
- {Go, State#collector{
- counters = fabric_dict:decrement_all(Counters1),
- total_rows = Total,
- offset = FinalOffset,
- user_acc = Acc
- }}
+ true ->
+ {ok, State#collector{
+ counters = Counters1,
+ total_rows = Total,
+ update_seq = UpdateSeq,
+ offset = Offset
+ }};
+ false ->
+ FinalOffset = erlang:min(Total, Offset + State#collector.skip),
+ Meta =
+ [{total, Total}, {offset, FinalOffset}] ++
+ case UpdateSeq of
+ nil ->
+ [];
+ _ ->
+ [{update_seq, fabric_view_changes:pack_seqs(UpdateSeq)}]
+ end,
+ {Go, Acc} = Callback({meta, Meta}, AccIn),
+ {Go, State#collector{
+ counters = fabric_dict:decrement_all(Counters1),
+ total_rows = Total,
+ offset = FinalOffset,
+ user_acc = Acc
+ }}
end;
-
-handle_message(#view_row{}, {_, _}, #collector{sorted=false, limit=0} = State) ->
- #collector{callback=Callback} = State,
+handle_message(#view_row{}, {_, _}, #collector{sorted = false, limit = 0} = State) ->
+ #collector{callback = Callback} = State,
{_, Acc} = Callback(complete, State#collector.user_acc),
- {stop, State#collector{user_acc=Acc}};
-
-handle_message(#view_row{} = Row, {_,From}, #collector{sorted=false} = St) ->
- #collector{callback=Callback, user_acc=AccIn, limit=Limit} = St,
+ {stop, State#collector{user_acc = Acc}};
+handle_message(#view_row{} = Row, {_, From}, #collector{sorted = false} = St) ->
+ #collector{callback = Callback, user_acc = AccIn, limit = Limit} = St,
{Go, Acc} = Callback(fabric_view:transform_row(Row), AccIn),
rexi:stream_ack(From),
- {Go, St#collector{user_acc=Acc, limit=Limit-1}};
-
+ {Go, St#collector{user_acc = Acc, limit = Limit - 1}};
handle_message(#view_row{} = Row, {Worker, From}, State) ->
#collector{
query_args = #mrargs{direction = Dir},
@@ -175,29 +192,26 @@ handle_message(#view_row{} = Row, {Worker, From}, State) ->
Dir,
Collation,
KeyDict0,
- Row#view_row{worker={Worker, From}},
+ Row#view_row{worker = {Worker, From}},
Rows0
),
Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- State1 = State#collector{rows=Rows, counters=Counters1, keys=KeyDict},
+ State1 = State#collector{rows = Rows, counters = Counters1, keys = KeyDict},
fabric_view:maybe_send_row(State1);
-
handle_message(complete, Worker, State) ->
Counters = fabric_dict:update_counter(Worker, 1, State#collector.counters),
fabric_view:maybe_send_row(State#collector{counters = Counters});
-
-handle_message({execution_stats, _} = Msg, {_,From}, St) ->
- #collector{callback=Callback, user_acc=AccIn} = St,
+handle_message({execution_stats, _} = Msg, {_, From}, St) ->
+ #collector{callback = Callback, user_acc = AccIn} = St,
{Go, Acc} = Callback(Msg, AccIn),
rexi:stream_ack(From),
- {Go, St#collector{user_acc=Acc}};
-
+ {Go, St#collector{user_acc = Acc}};
handle_message(ddoc_updated, _Worker, State) ->
{stop, State}.
merge_row(Dir, Collation, undefined, Row, Rows0) ->
Rows1 = lists:merge(
- fun(#view_row{key=KeyA, id=IdA}, #view_row{key=KeyB, id=IdB}) ->
+ fun(#view_row{key = KeyA, id = IdA}, #view_row{key = KeyB, id = IdB}) ->
compare(Dir, Collation, {KeyA, IdA}, {KeyB, IdB})
end,
[Row],
@@ -205,23 +219,27 @@ merge_row(Dir, Collation, undefined, Row, Rows0) ->
),
{Rows1, undefined};
merge_row(Dir, Collation, KeyDict0, Row, Rows0) ->
- CmpFun = case Collation of
- <<"raw">> ->
- fun (A, A) -> 0;
- (A, B) -> case A < B of
- true -> -1;
- false -> 1
- end
- end;
- _ ->
- fun couch_ejson_compare:less/2
- end,
+ CmpFun =
+ case Collation of
+ <<"raw">> ->
+ fun
+ (A, A) ->
+ 0;
+ (A, B) ->
+ case A < B of
+ true -> -1;
+ false -> 1
+ end
+ end;
+ _ ->
+ fun couch_ejson_compare:less/2
+ end,
case maybe_update_keydict(Row#view_row.key, KeyDict0, CmpFun) of
undefined ->
{Rows0, KeyDict0};
KeyDict1 ->
Rows1 = lists:merge(
- fun(#view_row{key=A, id=IdA}, #view_row{key=B, id=IdB}) ->
+ fun(#view_row{key = A, id = IdA}, #view_row{key = B, id = IdB}) ->
case {Dir, CmpFun(A, B)} of
{fwd, 0} ->
IdA < IdB;
@@ -267,7 +285,7 @@ maybe_update_keydict(Key, KeyDict, CmpFun) ->
key_index(_, [], _) ->
undefined;
-key_index(KeyA, [{KeyB, Value}|KVs], CmpFun) ->
+key_index(KeyA, [{KeyB, Value} | KVs], CmpFun) ->
case CmpFun(KeyA, KeyB) of
0 -> Value;
_ -> key_index(KeyA, KVs, CmpFun)
diff --git a/src/fabric/src/fabric_view_reduce.erl b/src/fabric/src/fabric_view_reduce.erl
index a432b2cd5..600c8d01a 100644
--- a/src/fabric/src/fabric_view_reduce.erl
+++ b/src/fabric/src/fabric_view_reduce.erl
@@ -22,7 +22,6 @@
go(DbName, GroupId, View, Args, Callback, Acc0, VInfo) when is_binary(GroupId) ->
{ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []),
go(DbName, DDoc, View, Args, Callback, Acc0, VInfo);
-
go(Db, DDoc, VName, Args, Callback, Acc, VInfo) ->
DbName = fabric:dbname(Db),
{Shards, RingOpts} = fabric_view:get_shards(Db, Args),
@@ -34,11 +33,18 @@ go(Db, DDoc, VName, Args, Callback, Acc, VInfo) ->
StartFun = fun(Shard) ->
hd(fabric_util:submit_jobs([Shard], fabric_rpc, reduce_view, RPCArgs))
end,
- Workers0 = fabric_util:submit_jobs(Shards,fabric_rpc,reduce_view,RPCArgs),
+ Workers0 = fabric_util:submit_jobs(Shards, fabric_rpc, reduce_view, RPCArgs),
RexiMon = fabric_util:create_monitors(Workers0),
try
- case fabric_streams:start(Workers0, #shard.ref, StartFun, Repls,
- RingOpts) of
+ case
+ fabric_streams:start(
+ Workers0,
+ #shard.ref,
+ StartFun,
+ Repls,
+ RingOpts
+ )
+ of
{ok, ddoc_updated} ->
Callback({error, ddoc_updated}, Acc);
{ok, Workers} ->
@@ -64,13 +70,14 @@ go(Db, DDoc, VName, Args, Callback, Acc, VInfo) ->
rexi_monitor:stop(RexiMon)
end.
-go2(DbName, Workers, {red, {_, Lang, View}, _}=VInfo, Args, Callback, Acc0) ->
+go2(DbName, Workers, {red, {_, Lang, View}, _} = VInfo, Args, Callback, Acc0) ->
#mrargs{limit = Limit, skip = Skip, keys = Keys, update_seq = UpdateSeq} = Args,
RedSrc = couch_mrview_util:extract_view_reduce(VInfo),
- OsProc = case os_proc_needed(RedSrc) of
- true -> couch_query_servers:get_os_process(Lang);
- _ -> nil
- end,
+ OsProc =
+ case os_proc_needed(RedSrc) of
+ true -> couch_query_servers:get_os_process(Lang);
+ _ -> nil
+ end,
State = #collector{
db_name = DbName,
query_args = Args,
@@ -85,28 +92,39 @@ go2(DbName, Workers, {red, {_, Lang, View}, _}=VInfo, Args, Callback, Acc0) ->
collation = couch_util:get_value(<<"collation">>, View#mrview.options),
rows = dict:new(),
user_acc = Acc0,
- update_seq = case UpdateSeq of true -> []; false -> nil end
+ update_seq =
+ case UpdateSeq of
+ true -> [];
+ false -> nil
+ end
},
- try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3,
- State, fabric_util:view_timeout(Args), 1000 * 60 * 60) of
- {ok, NewState} ->
- {ok, NewState#collector.user_acc};
- {timeout, NewState} ->
- Callback({error, timeout}, NewState#collector.user_acc);
- {error, Resp} ->
- {ok, Resp}
+ try
+ rexi_utils:recv(
+ Workers,
+ #shard.ref,
+ fun handle_message/3,
+ State,
+ fabric_util:view_timeout(Args),
+ 1000 * 60 * 60
+ )
+ of
+ {ok, NewState} ->
+ {ok, NewState#collector.user_acc};
+ {timeout, NewState} ->
+ Callback({error, timeout}, NewState#collector.user_acc);
+ {error, Resp} ->
+ {ok, Resp}
after
- if OsProc == nil -> ok; true ->
- catch couch_query_servers:ret_os_process(OsProc)
+ if
+ OsProc == nil -> ok;
+ true -> catch couch_query_servers:ret_os_process(OsProc)
end
end.
handle_message({rexi_DOWN, _, {_, NodeRef}, _}, _, State) ->
fabric_view:check_down_shards(State, NodeRef);
-
handle_message({rexi_EXIT, Reason}, Worker, State) ->
fabric_view:handle_worker_exit(State, Worker, Reason);
-
handle_message({meta, Meta0}, {Worker, From}, State) ->
Seq = couch_util:get_value(update_seq, Meta0, 0),
#collector{
@@ -120,46 +138,44 @@ handle_message({meta, Meta0}, {Worker, From}, State) ->
0 = fabric_dict:lookup_element(Worker, Counters0),
rexi:stream_ack(From),
Counters1 = fabric_dict:update_counter(Worker, 1, Counters0),
- UpdateSeq = case UpdateSeq0 of
- nil -> nil;
- _ -> [{Worker, Seq} | UpdateSeq0]
- end,
- case fabric_dict:any(0, Counters1) of
- true ->
- {ok, State#collector{
- counters = Counters1,
- update_seq = UpdateSeq
- }};
- false ->
- Meta = case UpdateSeq of
- nil ->
- [];
- _ ->
- [{update_seq, fabric_view_changes:pack_seqs(UpdateSeq)}]
+ UpdateSeq =
+ case UpdateSeq0 of
+ nil -> nil;
+ _ -> [{Worker, Seq} | UpdateSeq0]
end,
- {Go, Acc} = Callback({meta, Meta}, AccIn),
- {Go, State#collector{
- counters = fabric_dict:decrement_all(Counters1),
- user_acc = Acc
- }}
+ case fabric_dict:any(0, Counters1) of
+ true ->
+ {ok, State#collector{
+ counters = Counters1,
+ update_seq = UpdateSeq
+ }};
+ false ->
+ Meta =
+ case UpdateSeq of
+ nil ->
+ [];
+ _ ->
+ [{update_seq, fabric_view_changes:pack_seqs(UpdateSeq)}]
+ end,
+ {Go, Acc} = Callback({meta, Meta}, AccIn),
+ {Go, State#collector{
+ counters = fabric_dict:decrement_all(Counters1),
+ user_acc = Acc
+ }}
end;
-
-handle_message(#view_row{key=Key} = Row, {Worker, From}, State) ->
+handle_message(#view_row{key = Key} = Row, {Worker, From}, State) ->
#collector{counters = Counters0, rows = Rows0} = State,
true = fabric_dict:is_key(Worker, Counters0),
- Rows = dict:append(Key, Row#view_row{worker={Worker, From}}, Rows0),
+ Rows = dict:append(Key, Row#view_row{worker = {Worker, From}}, Rows0),
C1 = fabric_dict:update_counter(Worker, 1, Counters0),
- State1 = State#collector{rows=Rows, counters=C1},
+ State1 = State#collector{rows = Rows, counters = C1},
fabric_view:maybe_send_row(State1);
-
handle_message(complete, Worker, #collector{counters = Counters0} = State) ->
true = fabric_dict:is_key(Worker, Counters0),
C1 = fabric_dict:update_counter(Worker, 1, Counters0),
fabric_view:maybe_send_row(State#collector{counters = C1});
-
handle_message(ddoc_updated, _Worker, State) ->
{stop, State}.
os_proc_needed(<<"_", _/binary>>) -> false;
os_proc_needed(_) -> true.
-
diff --git a/src/fabric/test/eunit/fabric_db_create_tests.erl b/src/fabric/test/eunit/fabric_db_create_tests.erl
index 8e5b1085e..a7c98a96f 100644
--- a/src/fabric/test/eunit/fabric_db_create_tests.erl
+++ b/src/fabric/test/eunit/fabric_db_create_tests.erl
@@ -12,15 +12,12 @@
-module(fabric_db_create_tests).
-
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("mem3/include/mem3.hrl").
-
-define(TDEF(A), {atom_to_list(A), fun A/0}).
-
main_test_() ->
{
setup,
@@ -31,16 +28,13 @@ main_test_() ->
]
}.
-
setup() ->
test_util:start_couch([fabric]).
-
teardown(Ctx) ->
meck:unload(),
test_util:stop_couch(Ctx).
-
t_handle_shard_doc_conflict() ->
DbName = ?tempdb(),
meck:new(mem3, [passthrough]),
diff --git a/src/fabric/test/eunit/fabric_db_info_tests.erl b/src/fabric/test/eunit/fabric_db_info_tests.erl
index ccdafe3ae..d4fb1e73f 100644
--- a/src/fabric/test/eunit/fabric_db_info_tests.erl
+++ b/src/fabric/test/eunit/fabric_db_info_tests.erl
@@ -12,15 +12,12 @@
-module(fabric_db_info_tests).
-
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("mem3/include/mem3.hrl").
-
-define(TDEF(A), {atom_to_list(A), fun A/0}).
-
main_test_() ->
{
setup,
@@ -31,16 +28,13 @@ main_test_() ->
]
}.
-
setup() ->
test_util:start_couch([fabric]).
-
teardown(Ctx) ->
meck:unload(),
test_util:stop_couch(Ctx).
-
t_update_seq_has_uuids() ->
DbName = ?tempdb(),
ok = fabric:create_db(DbName, [{q, 1}, {n, 1}]),
diff --git a/src/fabric/test/eunit/fabric_db_uuids_tests.erl b/src/fabric/test/eunit/fabric_db_uuids_tests.erl
index 986945b52..1d99019a8 100644
--- a/src/fabric/test/eunit/fabric_db_uuids_tests.erl
+++ b/src/fabric/test/eunit/fabric_db_uuids_tests.erl
@@ -41,11 +41,15 @@ t_can_get_shard_uuids() ->
Shards = mem3:shards(DbName),
{ok, Uuids} = fabric:db_uuids(DbName),
?assertEqual(length(Shards), map_size(Uuids)),
- UuidsFromShards = lists:foldl(fun(#shard{} = Shard, Acc) ->
- Uuid = couch_util:with_db(Shard#shard.name, fun(Db) ->
- couch_db:get_uuid(Db)
- end),
- Acc#{Uuid => Shard}
- end, #{}, Shards),
+ UuidsFromShards = lists:foldl(
+ fun(#shard{} = Shard, Acc) ->
+ Uuid = couch_util:with_db(Shard#shard.name, fun(Db) ->
+ couch_db:get_uuid(Db)
+ end),
+ Acc#{Uuid => Shard}
+ end,
+ #{},
+ Shards
+ ),
?assertEqual(UuidsFromShards, Uuids),
ok = fabric:delete_db(DbName, []).
diff --git a/src/fabric/test/eunit/fabric_moved_shards_seq_tests.erl b/src/fabric/test/eunit/fabric_moved_shards_seq_tests.erl
index a78d17ab7..5463a5170 100644
--- a/src/fabric/test/eunit/fabric_moved_shards_seq_tests.erl
+++ b/src/fabric/test/eunit/fabric_moved_shards_seq_tests.erl
@@ -12,15 +12,12 @@
-module(fabric_moved_shards_seq_tests).
-
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("mem3/include/mem3.hrl").
-
-define(TDEF(A), {atom_to_list(A), fun A/0}).
-
main_test_() ->
{
setup,
@@ -31,24 +28,24 @@ main_test_() ->
]
}.
-
setup() ->
test_util:start_couch([fabric]).
-
teardown(Ctx) ->
meck:unload(),
test_util:stop_couch(Ctx).
-
t_shard_moves_avoid_sequence_rewinds() ->
DocCnt = 30,
DbName = ?tempdb(),
- ok = fabric:create_db(DbName, [{q,1}, {n,1}]),
- lists:foreach(fun(I) ->
- update_doc(DbName, #doc{id = erlang:integer_to_binary(I)})
- end, lists:seq(1, DocCnt)),
+ ok = fabric:create_db(DbName, [{q, 1}, {n, 1}]),
+ lists:foreach(
+ fun(I) ->
+ update_doc(DbName, #doc{id = erlang:integer_to_binary(I)})
+ end,
+ lists:seq(1, DocCnt)
+ ),
{ok, _, Seq1, 0} = changes(DbName, #changes_args{limit = 1, since = "now"}),
[{_, Range, {Seq, Uuid, _}}] = seq_decode(Seq1),
@@ -76,24 +73,19 @@ t_shard_moves_avoid_sequence_rewinds() ->
ok = fabric:delete_db(DbName, []).
-
changes_callback(start, Acc) ->
{ok, Acc};
-
changes_callback({change, {Change}}, Acc) ->
CM = maps:from_list(Change),
{ok, [CM | Acc]};
-
changes_callback({stop, EndSeq, Pending}, Acc) ->
{ok, Acc, EndSeq, Pending}.
-
changes(DbName, #changes_args{} = Args) ->
fabric_util:isolate(fun() ->
fabric:changes(DbName, fun changes_callback/2, [], Args)
end).
-
update_doc(DbName, #doc{} = Doc) ->
fabric_util:isolate(fun() ->
case fabric:update_doc(DbName, Doc, [?ADMIN_CTX]) of
@@ -101,7 +93,6 @@ update_doc(DbName, #doc{} = Doc) ->
end
end).
-
seq_decode(Seq) ->
% This is copied from fabric_view_changes
Pattern = "^\"?([0-9]+-)?(?<opaque>.*?)\"?$",
@@ -109,13 +100,11 @@ seq_decode(Seq) ->
{match, Seq1} = re:run(Seq, Pattern, Options),
binary_to_term(couch_util:decodeBase64Url(Seq1)).
-
seq_encode(Unpacked) ->
% Copied from fabric_view_changes
Opaque = couch_util:encodeBase64Url(term_to_binary(Unpacked, [compressed])),
?l2b(["30", $-, Opaque]).
-
mock_epochs(Epochs) ->
% Since we made up a node name we'll have to mock epoch checking
meck:new(couch_db_engine, [passthrough]),
diff --git a/src/fabric/test/eunit/fabric_rpc_purge_tests.erl b/src/fabric/test/eunit/fabric_rpc_purge_tests.erl
index 6db6a70aa..d3872410e 100644
--- a/src/fabric/test/eunit/fabric_rpc_purge_tests.erl
+++ b/src/fabric/test/eunit/fabric_rpc_purge_tests.erl
@@ -12,11 +12,9 @@
-module(fabric_rpc_purge_tests).
-
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-define(TDEF(A), {A, fun A/1}).
% TODO: Add tests:
@@ -68,25 +66,20 @@ main_test_() ->
]
}.
-
setup_all() ->
test_util:start_couch().
-
teardown_all(Ctx) ->
test_util:stop_couch(Ctx).
-
setup_no_purge() ->
{ok, Db} = create_db(),
populate_db(Db),
couch_db:name(Db).
-
teardown_no_purge(DbName) ->
ok = couch_server:delete(DbName, []).
-
setup_single_purge() ->
DbName = setup_no_purge(),
DocId = <<"0003">>,
@@ -94,26 +87,25 @@ setup_single_purge() ->
purge_doc(DbName, DocId),
{DbName, DocId, OldDoc, 1}.
-
teardown_single_purge({DbName, _, _, _}) ->
teardown_no_purge(DbName).
-
setup_multi_purge() ->
DbName = setup_no_purge(),
DocId = <<"0003">>,
{ok, OldDoc} = open_doc(DbName, DocId),
- lists:foreach(fun(I) ->
- PDocId = iolist_to_binary(io_lib:format("~4..0b", [I])),
- purge_doc(DbName, PDocId)
- end, lists:seq(1, 5)),
+ lists:foreach(
+ fun(I) ->
+ PDocId = iolist_to_binary(io_lib:format("~4..0b", [I])),
+ purge_doc(DbName, PDocId)
+ end,
+ lists:seq(1, 5)
+ ),
{DbName, DocId, OldDoc, 3}.
-
teardown_multi_purge(Ctx) ->
teardown_single_purge(Ctx).
-
t_no_purge_no_filter(DbName) ->
DocId = <<"0003">>,
@@ -126,7 +118,6 @@ t_no_purge_no_filter(DbName) ->
?assert(CurrDoc /= OldDoc),
?assert(CurrDoc == NewDoc).
-
t_filter({DbName, DocId, OldDoc, _PSeq}) ->
?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
create_purge_checkpoint(DbName, 0),
@@ -135,7 +126,6 @@ t_filter({DbName, DocId, OldDoc, _PSeq}) ->
?assertEqual({not_found, missing}, open_doc(DbName, DocId)).
-
t_filter_unknown_node({DbName, DocId, OldDoc, _PSeq}) ->
% Unknown nodes are assumed to start at PurgeSeq = 0
?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
@@ -147,7 +137,6 @@ t_filter_unknown_node({DbName, DocId, OldDoc, _PSeq}) ->
?assertEqual({not_found, missing}, open_doc(DbName, DocId)).
-
t_no_filter_old_node({DbName, DocId, OldDoc, PSeq}) ->
?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
create_purge_checkpoint(DbName, PSeq),
@@ -160,7 +149,6 @@ t_no_filter_old_node({DbName, DocId, OldDoc, PSeq}) ->
?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
t_no_filter_different_node({DbName, DocId, OldDoc, PSeq}) ->
?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
create_purge_checkpoint(DbName, PSeq),
@@ -173,7 +161,6 @@ t_no_filter_different_node({DbName, DocId, OldDoc, PSeq}) ->
?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
t_filter_local_node({DbName, DocId, OldDoc, PSeq}) ->
?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
create_purge_checkpoint(DbName, PSeq),
@@ -185,15 +172,16 @@ t_filter_local_node({DbName, DocId, OldDoc, PSeq}) ->
% Add a local node rev to the list of node revs. It should
% be filtered out
{Pos, [Rev | _]} = OldDoc#doc.revs,
- RROpts = [{read_repair, [
- {tgt_node(), [{Pos, Rev}]},
- {node(), [{1, <<"123">>}]}
- ]}],
+ RROpts = [
+ {read_repair, [
+ {tgt_node(), [{Pos, Rev}]},
+ {node(), [{1, <<"123">>}]}
+ ]}
+ ],
rpc_update_doc(DbName, OldDoc, RROpts),
?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
t_no_filter_after_repl({DbName, DocId, OldDoc, PSeq}) ->
?assertEqual({not_found, missing}, open_doc(DbName, DocId)),
create_purge_checkpoint(DbName, PSeq),
@@ -202,38 +190,37 @@ t_no_filter_after_repl({DbName, DocId, OldDoc, PSeq}) ->
?assertEqual({ok, OldDoc}, open_doc(DbName, DocId)).
-
wrap({Name, Fun}) ->
fun(Arg) ->
- {timeout, 60, {atom_to_list(Name), fun() ->
- process_flag(trap_exit, true),
- Fun(Arg)
- end}}
+ {timeout, 60,
+ {atom_to_list(Name), fun() ->
+ process_flag(trap_exit, true),
+ Fun(Arg)
+ end}}
end.
-
create_db() ->
DbName = ?tempdb(),
couch_db:create(DbName, [?ADMIN_CTX]).
-
populate_db(Db) ->
- Docs = lists:map(fun(Idx) ->
- DocId = lists:flatten(io_lib:format("~4..0b", [Idx])),
- #doc{
- id = list_to_binary(DocId),
- body = {[{<<"int">>, Idx}, {<<"vsn">>, 2}]}
- }
- end, lists:seq(1, 100)),
+ Docs = lists:map(
+ fun(Idx) ->
+ DocId = lists:flatten(io_lib:format("~4..0b", [Idx])),
+ #doc{
+ id = list_to_binary(DocId),
+ body = {[{<<"int">>, Idx}, {<<"vsn">>, 2}]}
+ }
+ end,
+ lists:seq(1, 100)
+ ),
{ok, _} = couch_db:update_docs(Db, Docs).
-
open_doc(DbName, DocId) ->
couch_util:with_db(DbName, fun(Db) ->
couch_db:open_doc(Db, DocId, [])
end).
-
create_update(Doc, NewVsn) ->
#doc{
id = DocId,
@@ -247,7 +234,6 @@ create_update(Doc, NewVsn) ->
body = {NewProps}
}.
-
purge_doc(DbName, DocId) ->
{ok, Doc} = open_doc(DbName, DocId),
{Pos, [Rev | _]} = Doc#doc.revs,
@@ -257,33 +243,30 @@ purge_doc(DbName, DocId) ->
end),
?assertEqual({ok, [{ok, [{Pos, Rev}]}]}, Resp).
-
create_purge_checkpoint(DbName, PurgeSeq) ->
create_purge_checkpoint(DbName, PurgeSeq, tgt_node_bin()).
-
create_purge_checkpoint(DbName, PurgeSeq, TgtNode) when is_binary(TgtNode) ->
Resp = couch_util:with_db(DbName, fun(Db) ->
SrcUUID = couch_db:get_uuid(Db),
TgtUUID = couch_uuids:random(),
CPDoc = #doc{
id = mem3_rep:make_purge_id(SrcUUID, TgtUUID),
- body = {[
- {<<"target_node">>, TgtNode},
- {<<"purge_seq">>, PurgeSeq}
- ]}
+ body =
+ {[
+ {<<"target_node">>, TgtNode},
+ {<<"purge_seq">>, PurgeSeq}
+ ]}
},
couch_db:update_docs(Db, [CPDoc], [])
end),
?assertMatch({ok, [_]}, Resp).
-
rpc_update_doc(DbName, Doc) ->
{Pos, [Rev | _]} = Doc#doc.revs,
RROpt = {read_repair, [{tgt_node(), [{Pos, Rev}]}]},
rpc_update_doc(DbName, Doc, [RROpt]).
-
rpc_update_doc(DbName, Doc, Opts) ->
Ref = erlang:make_ref(),
put(rexi_from, {self(), Ref}),
@@ -298,10 +281,8 @@ rpc_update_doc(DbName, Doc, Opts) ->
end),
?assertEqual({ok, []}, Reply).
-
tgt_node() ->
'foo@127.0.0.1'.
-
tgt_node_bin() ->
iolist_to_binary(atom_to_list(tgt_node())).
diff --git a/src/fabric/test/eunit/fabric_rpc_tests.erl b/src/fabric/test/eunit/fabric_rpc_tests.erl
index b94caf659..f47d153ff 100644
--- a/src/fabric/test/eunit/fabric_rpc_tests.erl
+++ b/src/fabric/test/eunit/fabric_rpc_tests.erl
@@ -12,14 +12,11 @@
-module(fabric_rpc_tests).
-
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-define(TDEF(A), {A, fun A/1}).
-
main_test_() ->
{
setup,
@@ -53,69 +50,63 @@ main_test_() ->
?TDEF(t_db_create_with_config)
])
}
-
]
}.
-
setup_all() ->
test_util:start_couch([rexi, mem3, fabric]).
-
teardown_all(Ctx) ->
test_util:stop_couch(Ctx).
-
setup_no_db_or_config() ->
?tempdb().
-
setup_shard() ->
?tempshard().
-
teardown_noop(_DbName) ->
ok.
teardown_db(DbName) ->
ok = couch_server:delete(DbName, []).
-
wrap({Name, Fun}) ->
fun(Arg) ->
- {timeout, 60, {atom_to_list(Name), fun() ->
- process_flag(trap_exit, true),
- Fun(Arg)
- end}}
+ {timeout, 60,
+ {atom_to_list(Name), fun() ->
+ process_flag(trap_exit, true),
+ Fun(Arg)
+ end}}
end.
-
t_no_db(DbName) ->
?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])).
-
t_no_config_non_shard_db_create_succeeds(DbName) ->
?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])),
?assertEqual(DbName, mem3:dbname(DbName)),
?assertMatch({ok, _}, mem3_util:get_or_create_db(DbName, [?ADMIN_CTX])).
-
t_no_config_db_create_fails_for_shard(DbName) ->
?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])),
- ?assertException(throw, {error, missing_target}, mem3_util:get_or_create_db(DbName, [?ADMIN_CTX])).
-
+ ?assertException(
+ throw, {error, missing_target}, mem3_util:get_or_create_db(DbName, [?ADMIN_CTX])
+ ).
t_no_config_db_create_fails_for_shard_rpc(DbName) ->
?assertEqual({not_found, no_db_file}, couch_db:open_int(DbName, [?ADMIN_CTX])),
- ?assertException(throw, {error, missing_target}, mem3_util:get_or_create_db(DbName, [?ADMIN_CTX])),
+ ?assertException(
+ throw, {error, missing_target}, mem3_util:get_or_create_db(DbName, [?ADMIN_CTX])
+ ),
MFA = {fabric_rpc, get_db_info, [DbName]},
Ref = rexi:cast(node(), self(), MFA),
- Resp = receive
- Resp0 -> Resp0
- end,
+ Resp =
+ receive
+ Resp0 -> Resp0
+ end,
?assertMatch({Ref, {'rexi_EXIT', {{error, missing_target}, _}}}, Resp).
-
t_db_create_with_config(DbName) ->
MDbName = mem3:dbname(DbName),
DbDoc = #doc{id = MDbName, body = test_db_doc()},
@@ -138,21 +129,22 @@ t_db_create_with_config(DbName) ->
{Body} = test_db_doc(),
DbProps = mem3_util:get_shard_opts(Body),
- {Props} = case couch_db_engine:get_props(LDb) of
- undefined -> {[]};
- Else -> {Else}
- end,
+ {Props} =
+ case couch_db_engine:get_props(LDb) of
+ undefined -> {[]};
+ Else -> {Else}
+ end,
%% We don't normally store the default engine name
- EngineProps = case couch_db_engine:get_engine(LDb) of
- couch_bt_engine ->
- [];
- EngineName ->
- [{engine, EngineName}]
- end,
+ EngineProps =
+ case couch_db_engine:get_engine(LDb) of
+ couch_bt_engine ->
+ [];
+ EngineName ->
+ [{engine, EngineName}]
+ end,
?assertEqual([{props, Props} | EngineProps], DbProps)
end).
-
test_db_doc() ->
{[
{<<"shard_suffix">>, ".1584997648"},
@@ -164,18 +156,24 @@ test_db_doc() ->
[<<"add">>, <<"80000000-ffffffff">>, <<"node2@127.0.0.1">>],
[<<"add">>, <<"80000000-ffffffff">>, <<"node3@127.0.0.1">>]
]},
- {<<"by_node">>, {[
- {<<"node1@127.0.0.1">>, [<<"00000000-7fffffff">>, <<"80000000-ffffffff">>]},
- {<<"node2@127.0.0.1">>, [<<"00000000-7fffffff">>, <<"80000000-ffffffff">>]},
- {<<"node3@127.0.0.1">>, [<<"00000000-7fffffff">>, <<"80000000-ffffffff">>]}
- ]}},
- {<<"by_range">>, {[
- {<<"00000000-7fffffff">>, [<<"node1@127.0.0.1">>, <<"node2@127.0.0.1">>, <<"node3@127.0.0.1">>]},
- {<<"80000000-ffffffff">>, [<<"node1@127.0.0.1">>, <<"node2@127.0.0.1">>, <<"node3@127.0.0.1">>]}
- ]}},
- {<<"props">>, {[
- {partitioned, true},
- {hash, [couch_partition, hash, []]}
- ]}}
+ {<<"by_node">>,
+ {[
+ {<<"node1@127.0.0.1">>, [<<"00000000-7fffffff">>, <<"80000000-ffffffff">>]},
+ {<<"node2@127.0.0.1">>, [<<"00000000-7fffffff">>, <<"80000000-ffffffff">>]},
+ {<<"node3@127.0.0.1">>, [<<"00000000-7fffffff">>, <<"80000000-ffffffff">>]}
+ ]}},
+ {<<"by_range">>,
+ {[
+ {<<"00000000-7fffffff">>, [
+ <<"node1@127.0.0.1">>, <<"node2@127.0.0.1">>, <<"node3@127.0.0.1">>
+ ]},
+ {<<"80000000-ffffffff">>, [
+ <<"node1@127.0.0.1">>, <<"node2@127.0.0.1">>, <<"node3@127.0.0.1">>
+ ]}
+ ]}},
+ {<<"props">>,
+ {[
+ {partitioned, true},
+ {hash, [couch_partition, hash, []]}
+ ]}}
]}.
-
diff --git a/src/fabric/test/eunit/fabric_tests.erl b/src/fabric/test/eunit/fabric_tests.erl
index 53995933b..c0e2b626b 100644
--- a/src/fabric/test/eunit/fabric_tests.erl
+++ b/src/fabric/test/eunit/fabric_tests.erl
@@ -12,23 +12,22 @@
-module(fabric_tests).
-
-include_lib("couch/include/couch_eunit.hrl").
-
cleanup_index_files_test_() ->
{
setup,
fun setup/0,
fun teardown/1,
- fun(Ctx) -> [
- t_cleanup_index_files(),
- t_cleanup_index_files_with_existing_db(Ctx),
- t_cleanup_index_files_with_deleted_db(Ctx)
- ] end
+ fun(Ctx) ->
+ [
+ t_cleanup_index_files(),
+ t_cleanup_index_files_with_existing_db(Ctx),
+ t_cleanup_index_files_with_deleted_db(Ctx)
+ ]
+ end
}.
-
setup() ->
Ctx = test_util:start_couch([fabric]),
% TempDb is deleted in the test "t_cleanup_index_files_with_deleted_db".
@@ -36,25 +35,25 @@ setup() ->
fabric:create_db(TempDb),
{Ctx, TempDb}.
-
teardown({Ctx, _TempDb}) ->
test_util:stop_couch(Ctx).
-
t_cleanup_index_files() ->
?_assert(
- lists:all(fun(Res) -> Res =:= ok end, fabric:cleanup_index_files())).
-
+ lists:all(fun(Res) -> Res =:= ok end, fabric:cleanup_index_files())
+ ).
t_cleanup_index_files_with_existing_db({_Ctx, TempDb}) ->
?_assertEqual(ok, fabric:cleanup_index_files(TempDb)).
-
t_cleanup_index_files_with_deleted_db({_Ctx, TempDb}) ->
?_test(
begin
fabric:delete_db(TempDb, []),
- ?assertError(database_does_not_exist,
- fabric:inactive_index_files(TempDb)),
+ ?assertError(
+ database_does_not_exist,
+ fabric:inactive_index_files(TempDb)
+ ),
?assertEqual(ok, fabric:cleanup_index_files(TempDb))
- end).
+ end
+ ).
diff --git a/src/global_changes/src/global_changes_app.erl b/src/global_changes/src/global_changes_app.erl
index 03322a27e..aa0e5d3fd 100644
--- a/src/global_changes/src/global_changes_app.erl
+++ b/src/global_changes/src/global_changes_app.erl
@@ -13,16 +13,13 @@
-module(global_changes_app).
-behavior(application).
-
-export([
start/2,
stop/1
]).
-
start(_StartType, _StartArgs) ->
global_changes_sup:start_link().
-
stop(_State) ->
ok.
diff --git a/src/global_changes/src/global_changes_epi.erl b/src/global_changes/src/global_changes_epi.erl
index 5d8cbf928..25e204001 100644
--- a/src/global_changes/src/global_changes_epi.erl
+++ b/src/global_changes/src/global_changes_epi.erl
@@ -32,7 +32,6 @@ providers() ->
{chttpd_handlers, global_changes_httpd_handlers}
].
-
services() ->
[
{global_changes, global_changes_plugin}
diff --git a/src/global_changes/src/global_changes_httpd.erl b/src/global_changes/src/global_changes_httpd.erl
index e579b09ea..cb4016b63 100644
--- a/src/global_changes/src/global_changes_httpd.erl
+++ b/src/global_changes/src/global_changes_httpd.erl
@@ -28,15 +28,16 @@
limit
}).
-handle_global_changes_req(#httpd{method='GET'}=Req) ->
+handle_global_changes_req(#httpd{method = 'GET'} = Req) ->
Db = global_changes_util:get_dbname(),
Feed = chttpd:qs_value(Req, "feed", "normal"),
Options = parse_global_changes_query(Req),
- Heartbeat = case lists:keyfind(heartbeat, 1, Options) of
- {heartbeat, true} -> 60000;
- {heartbeat, Other} -> Other;
- false -> false
- end,
+ Heartbeat =
+ case lists:keyfind(heartbeat, 1, Options) of
+ {heartbeat, true} -> 60000;
+ {heartbeat, Other} -> Other;
+ false -> false
+ end,
% Limit is handled in the changes callback, since the limit count needs to
% only account for changes which happen after the filter.
Limit = couch_util:get_value(limit, Options),
@@ -44,11 +45,11 @@ handle_global_changes_req(#httpd{method='GET'}=Req) ->
Options1 = Options,
Owner = allowed_owner(Req),
Acc = #acc{
- username=Owner,
- feed=Feed,
- resp=Req,
- heartbeat_interval=Heartbeat,
- limit=Limit
+ username = Owner,
+ feed = Feed,
+ resp = Req,
+ heartbeat_interval = Heartbeat,
+ limit = Limit
},
case Feed of
"normal" ->
@@ -56,7 +57,7 @@ handle_global_changes_req(#httpd{method='GET'}=Req) ->
Suffix = mem3:shard_suffix(Db),
Etag = chttpd:make_etag({Info, Suffix}),
chttpd:etag_respond(Req, Etag, fun() ->
- fabric:changes(Db, fun changes_callback/2, Acc#acc{etag=Etag}, Options1)
+ fabric:changes(Db, fun changes_callback/2, Acc#acc{etag = Etag}, Options1)
end);
Feed when Feed =:= "continuous"; Feed =:= "longpoll"; Feed =:= "eventsource" ->
fabric:changes(Db, fun changes_callback/2, Acc, Options1);
@@ -68,18 +69,22 @@ handle_global_changes_req(Req) ->
chttpd:send_method_not_allowed(Req, "GET").
transform_change(Username, Change) ->
- global_changes_plugin:transform_change(Username, Change,
- fun default_transform_change/2).
+ global_changes_plugin:transform_change(
+ Username,
+ Change,
+ fun default_transform_change/2
+ ).
default_transform_change(Username, {Props}) ->
{id, Id} = lists:keyfind(id, 1, Props),
{seq, Seq} = lists:keyfind(seq, 1, Props),
- Info = case binary:split(Id, <<":">>) of
- [Event0, DbName0] ->
- {Event0, DbName0};
- _ ->
- skip
- end,
+ Info =
+ case binary:split(Id, <<":">>) of
+ [Event0, DbName0] ->
+ {Event0, DbName0};
+ _ ->
+ skip
+ end,
case Info of
% Client is an admin, show them everything.
{Event, DbName} when Username == admin ->
@@ -94,19 +99,17 @@ default_transform_change(Username, {Props}) ->
changes_callback(waiting_for_updates, Acc) ->
{ok, Acc};
-
% This clause is only hit when _db_updates is queried with limit=0. For
% limit>0, the request is stopped by maybe_finish/1.
-changes_callback({change, _}, #acc{limit=0}=Acc) ->
+changes_callback({change, _}, #acc{limit = 0} = Acc) ->
{stop, Acc};
-
% callbacks for continuous feed (newline-delimited JSON Objects)
-changes_callback(start, #acc{feed="continuous"}=Acc) ->
- #acc{resp=Req} = Acc,
+changes_callback(start, #acc{feed = "continuous"} = Acc) ->
+ #acc{resp = Req} = Acc,
{ok, Resp} = chttpd:start_delayed_json_response(Req, 200),
- {ok, Acc#acc{resp=Resp, last_data_sent_time=os:timestamp()}};
-changes_callback({change, Change0}, #acc{feed="continuous"}=Acc) ->
- #acc{resp=Resp, username=Username} = Acc,
+ {ok, Acc#acc{resp = Resp, last_data_sent_time = os:timestamp()}};
+changes_callback({change, Change0}, #acc{feed = "continuous"} = Acc) ->
+ #acc{resp = Resp, username = Username} = Acc,
case transform_change(Username, Change0) of
skip ->
{ok, maybe_send_heartbeat(Acc)};
@@ -114,20 +117,21 @@ changes_callback({change, Change0}, #acc{feed="continuous"}=Acc) ->
Line = [?JSON_ENCODE(Change) | "\n"],
{ok, Resp1} = chttpd:send_delayed_chunk(Resp, Line),
Acc1 = Acc#acc{
- resp=Resp1,
- last_data_sent_time=os:timestamp()
+ resp = Resp1,
+ last_data_sent_time = os:timestamp()
},
maybe_finish(Acc1)
end;
-changes_callback({stop, EndSeq}, #acc{feed="continuous"}=Acc) ->
+changes_callback({stop, EndSeq}, #acc{feed = "continuous"} = Acc) ->
% Temporary upgrade clause - Case 24236
changes_callback({stop, EndSeq, null}, Acc);
-changes_callback({stop, EndSeq, _Pending}, #acc{feed="continuous"}=Acc) ->
- #acc{resp=Resp} = Acc,
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp,
- [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]),
+changes_callback({stop, EndSeq, _Pending}, #acc{feed = "continuous"} = Acc) ->
+ #acc{resp = Resp} = Acc,
+ {ok, Resp1} = chttpd:send_delayed_chunk(
+ Resp,
+ [?JSON_ENCODE({[{<<"last_seq">>, EndSeq}]}) | "\n"]
+ ),
chttpd:end_delayed_json_response(Resp1);
-
% callbacks for eventsource feed (newline-delimited eventsource Objects)
changes_callback(start, #acc{feed = "eventsource"} = Acc) ->
#acc{resp = Req} = Acc,
@@ -136,12 +140,15 @@ changes_callback(start, #acc{feed = "eventsource"} = Acc) ->
{"Cache-Control", "no-cache"}
],
{ok, Resp} = chttpd:start_delayed_json_response(Req, 200, Headers),
- {ok, Acc#acc{resp = Resp, last_data_sent_time=os:timestamp()}};
-changes_callback({change, {ChangeProp}=Change}, #acc{resp = Resp, feed = "eventsource"} = Acc) ->
+ {ok, Acc#acc{resp = Resp, last_data_sent_time = os:timestamp()}};
+changes_callback({change, {ChangeProp} = Change}, #acc{resp = Resp, feed = "eventsource"} = Acc) ->
Seq = proplists:get_value(seq, ChangeProp),
Chunk = [
- "data: ", ?JSON_ENCODE(Change),
- "\n", "id: ", ?JSON_ENCODE(Seq),
+ "data: ",
+ ?JSON_ENCODE(Change),
+ "\n",
+ "id: ",
+ ?JSON_ENCODE(Seq),
"\n\n"
],
{ok, Resp1} = chttpd:send_delayed_chunk(Resp, Chunk),
@@ -155,37 +162,41 @@ changes_callback({stop, _EndSeq}, #acc{feed = "eventsource"} = Acc) ->
#acc{resp = Resp} = Acc,
% {ok, Resp1} = chttpd:send_delayed_chunk(Resp, Buf),
chttpd:end_delayed_json_response(Resp);
-
% callbacks for longpoll and normal (single JSON Object)
-changes_callback(start, #acc{feed="normal", etag=Etag}=Acc)
- when Etag =/= undefined ->
- #acc{resp=Req} = Acc,
+changes_callback(start, #acc{feed = "normal", etag = Etag} = Acc) when
+ Etag =/= undefined
+->
+ #acc{resp = Req} = Acc,
FirstChunk = "{\"results\":[\n",
- {ok, Resp} = chttpd:start_delayed_json_response(Req, 200,
- [{"Etag",Etag}], FirstChunk),
- {ok, Acc#acc{resp=Resp, prepend="", last_data_sent_time=os:timestamp()}};
+ {ok, Resp} = chttpd:start_delayed_json_response(
+ Req,
+ 200,
+ [{"Etag", Etag}],
+ FirstChunk
+ ),
+ {ok, Acc#acc{resp = Resp, prepend = "", last_data_sent_time = os:timestamp()}};
changes_callback(start, Acc) ->
- #acc{resp=Req} = Acc,
+ #acc{resp = Req} = Acc,
FirstChunk = "{\"results\":[\n",
{ok, Resp} = chttpd:start_delayed_json_response(Req, 200, [], FirstChunk),
{ok, Acc#acc{
- resp=Resp,
- prepend="",
- last_data_sent_time=os:timestamp()
+ resp = Resp,
+ prepend = "",
+ last_data_sent_time = os:timestamp()
}};
changes_callback({change, Change0}, Acc) ->
- #acc{resp=Resp, prepend=Prepend, username=Username} = Acc,
+ #acc{resp = Resp, prepend = Prepend, username = Username} = Acc,
case transform_change(Username, Change0) of
skip ->
{ok, maybe_send_heartbeat(Acc)};
Change ->
- #acc{resp=Resp, prepend=Prepend} = Acc,
+ #acc{resp = Resp, prepend = Prepend} = Acc,
Line = [Prepend, ?JSON_ENCODE(Change)],
{ok, Resp1} = chttpd:send_delayed_chunk(Resp, Line),
Acc1 = Acc#acc{
- prepend=",\r\n",
- resp=Resp1,
- last_data_sent_time=os:timestamp()
+ prepend = ",\r\n",
+ resp = Resp1,
+ last_data_sent_time = os:timestamp()
},
maybe_finish(Acc1)
end;
@@ -193,18 +204,18 @@ changes_callback({stop, EndSeq}, Acc) ->
% Temporary upgrade clause - Case 24236
changes_callback({stop, EndSeq, null}, Acc);
changes_callback({stop, EndSeq, _Pending}, Acc) ->
- #acc{resp=Resp} = Acc,
- {ok, Resp1} = chttpd:send_delayed_chunk(Resp,
- ["\n],\n\"last_seq\":", ?JSON_ENCODE(EndSeq), "}\n"]),
+ #acc{resp = Resp} = Acc,
+ {ok, Resp1} = chttpd:send_delayed_chunk(
+ Resp,
+ ["\n],\n\"last_seq\":", ?JSON_ENCODE(EndSeq), "}\n"]
+ ),
chttpd:end_delayed_json_response(Resp1);
-
changes_callback(timeout, Acc) ->
{ok, maybe_send_heartbeat(Acc)};
-
-changes_callback({error, Reason}, #acc{resp=Req=#httpd{}}) ->
+changes_callback({error, Reason}, #acc{resp = Req = #httpd{}}) ->
chttpd:send_error(Req, Reason);
changes_callback({error, Reason}, Acc) ->
- #acc{etag=Etag, feed=Feed, resp=Resp} = Acc,
+ #acc{etag = Etag, feed = Feed, resp = Resp} = Acc,
case {Feed, Etag} of
{"normal", Etag} when Etag =/= undefined ->
chttpd:send_error(Resp, Reason);
@@ -212,7 +223,6 @@ changes_callback({error, Reason}, Acc) ->
chttpd:send_delayed_error(Resp, Reason)
end.
-
maybe_finish(Acc) ->
case Acc#acc.limit of
1 ->
@@ -220,48 +230,50 @@ maybe_finish(Acc) ->
undefined ->
{ok, Acc};
Limit ->
- {ok, Acc#acc{limit=Limit-1}}
+ {ok, Acc#acc{limit = Limit - 1}}
end.
-
-maybe_send_heartbeat(#acc{heartbeat_interval=false}=Acc) ->
+maybe_send_heartbeat(#acc{heartbeat_interval = false} = Acc) ->
Acc;
maybe_send_heartbeat(Acc) ->
- #acc{last_data_sent_time=LastSentTime, heartbeat_interval=Interval, resp=Resp} = Acc,
+ #acc{last_data_sent_time = LastSentTime, heartbeat_interval = Interval, resp = Resp} = Acc,
Now = os:timestamp(),
case timer:now_diff(Now, LastSentTime) div 1000 > Interval of
true ->
{ok, Resp1} = chttpd:send_delayed_chunk(Resp, "\n"),
- Acc#acc{last_data_sent_time=Now, resp=Resp1};
+ Acc#acc{last_data_sent_time = Now, resp = Resp1};
false ->
Acc
end.
-
parse_global_changes_query(Req) ->
- lists:foldl(fun({Key, Value}, Args) ->
- case {Key, Value} of
- {"feed", _} ->
- [{feed, Value} | Args];
- {"descending", "true"} ->
- [{dir, rev} | Args];
- {"since", _} ->
- [{since, Value} | Args];
- {"limit", _} ->
- [{limit, to_non_neg_int(Value)} | Args];
- {"heartbeat", "true"} ->
- [{heartbeat, true} | Args];
- {"heartbeat", "false"} ->
- Args;
- {"heartbeat", _} ->
- [{heartbeat, to_non_neg_int(Value)} | Args];
- {"timeout", _} ->
- [{timeout, to_non_neg_int(Value)} | Args];
- _Else -> % unknown key value pair, ignore.
- Args
- end
- end, [], chttpd:qs(Req)).
-
+ lists:foldl(
+ fun({Key, Value}, Args) ->
+ case {Key, Value} of
+ {"feed", _} ->
+ [{feed, Value} | Args];
+ {"descending", "true"} ->
+ [{dir, rev} | Args];
+ {"since", _} ->
+ [{since, Value} | Args];
+ {"limit", _} ->
+ [{limit, to_non_neg_int(Value)} | Args];
+ {"heartbeat", "true"} ->
+ [{heartbeat, true} | Args];
+ {"heartbeat", "false"} ->
+ Args;
+ {"heartbeat", _} ->
+ [{heartbeat, to_non_neg_int(Value)} | Args];
+ {"timeout", _} ->
+ [{timeout, to_non_neg_int(Value)} | Args];
+ % unknown key value pair, ignore.
+ _Else ->
+ Args
+ end
+ end,
+ [],
+ chttpd:qs(Req)
+ ).
to_non_neg_int(Value) ->
try list_to_integer(Value) of
@@ -269,17 +281,18 @@ to_non_neg_int(Value) ->
V;
_ ->
throw({bad_request, invalid_integer})
- catch error:badarg ->
- throw({bad_request, invalid_integer})
+ catch
+ error:badarg ->
+ throw({bad_request, invalid_integer})
end.
allowed_owner(Req) ->
case config:get("global_changes", "allowed_owner", undefined) of
- undefined ->
- chttpd:verify_is_server_admin(Req),
- admin;
- SpecStr ->
- {ok, {M, F, A}} = couch_util:parse_term(SpecStr),
- couch_util:validate_callback_exists(M, F, 2),
- M:F(Req, A)
+ undefined ->
+ chttpd:verify_is_server_admin(Req),
+ admin;
+ SpecStr ->
+ {ok, {M, F, A}} = couch_util:parse_term(SpecStr),
+ couch_util:validate_callback_exists(M, F, 2),
+ M:F(Req, A)
end.
diff --git a/src/global_changes/src/global_changes_listener.erl b/src/global_changes/src/global_changes_listener.erl
index 9adf0e13d..71d14e274 100644
--- a/src/global_changes/src/global_changes_listener.erl
+++ b/src/global_changes/src/global_changes_listener.erl
@@ -13,7 +13,6 @@
-module(global_changes_listener).
-behavior(couch_event_listener).
-
-export([
start/0
]).
@@ -35,131 +34,142 @@
dbname
}).
-
-include_lib("mem3/include/mem3.hrl").
-
start() ->
couch_event_listener:start(?MODULE, nil, [all_dbs]).
-
init(_) ->
% get configs as strings
UpdateDb0 = config:get("global_changes", "update_db", "true"),
MaxEventDelay0 = config:get("global_changes", "max_event_delay", "25"),
% make config strings into other data types
- UpdateDb = case UpdateDb0 of "false" -> false; _ -> true end,
+ UpdateDb =
+ case UpdateDb0 of
+ "false" -> false;
+ _ -> true
+ end,
MaxEventDelay = list_to_integer(MaxEventDelay0),
State = #state{
- update_db=UpdateDb,
- pending_update_count=0,
- pending_updates=sets:new(),
- max_event_delay=MaxEventDelay,
- dbname=global_changes_util:get_dbname()
+ update_db = UpdateDb,
+ pending_update_count = 0,
+ pending_updates = sets:new(),
+ max_event_delay = MaxEventDelay,
+ dbname = global_changes_util:get_dbname()
},
{ok, State}.
-
terminate(_Reason, _State) ->
ok.
-
-handle_event(_ShardName, _Event, #state{update_db=false}=State) ->
+handle_event(_ShardName, _Event, #state{update_db = false} = State) ->
{ok, State};
-handle_event(ShardName, Event, State0)
- when Event =:= updated orelse Event =:= deleted
- orelse Event =:= created ->
- #state{dbname=ChangesDbName} = State0,
- State = case mem3:dbname(ShardName) of
- ChangesDbName ->
- State0;
- DbName ->
- #state{pending_update_count=Count} = State0,
- EventBin = erlang:atom_to_binary(Event, latin1),
- Key = <<EventBin/binary, <<":">>/binary, DbName/binary>>,
- Pending = sets:add_element(Key, State0#state.pending_updates),
- couch_stats:update_gauge(
- [global_changes, listener_pending_updates],
- Count + 1
- ),
- State0#state{pending_updates=Pending, pending_update_count=Count+1}
- end,
+handle_event(ShardName, Event, State0) when
+ Event =:= updated orelse Event =:= deleted orelse
+ Event =:= created
+->
+ #state{dbname = ChangesDbName} = State0,
+ State =
+ case mem3:dbname(ShardName) of
+ ChangesDbName ->
+ State0;
+ DbName ->
+ #state{pending_update_count = Count} = State0,
+ EventBin = erlang:atom_to_binary(Event, latin1),
+ Key = <<EventBin/binary, <<":">>/binary, DbName/binary>>,
+ Pending = sets:add_element(Key, State0#state.pending_updates),
+ couch_stats:update_gauge(
+ [global_changes, listener_pending_updates],
+ Count + 1
+ ),
+ State0#state{pending_updates = Pending, pending_update_count = Count + 1}
+ end,
maybe_send_updates(State);
handle_event(_DbName, _Event, State) ->
maybe_send_updates(State).
-
handle_cast({set_max_event_delay, MaxEventDelay}, State) ->
- maybe_send_updates(State#state{max_event_delay=MaxEventDelay});
+ maybe_send_updates(State#state{max_event_delay = MaxEventDelay});
handle_cast({set_update_db, Boolean}, State0) ->
% If turning update_db off, clear out server state
- State = case {Boolean, State0#state.update_db} of
- {false, true} ->
- State0#state{
- update_db=Boolean,
- pending_updates=sets:new(),
- pending_update_count=0,
- last_update_time=undefined
- };
- _ ->
- State0#state{update_db=Boolean}
- end,
+ State =
+ case {Boolean, State0#state.update_db} of
+ {false, true} ->
+ State0#state{
+ update_db = Boolean,
+ pending_updates = sets:new(),
+ pending_update_count = 0,
+ last_update_time = undefined
+ };
+ _ ->
+ State0#state{update_db = Boolean}
+ end,
maybe_send_updates(State);
handle_cast(_Msg, State) ->
maybe_send_updates(State).
-
-maybe_send_updates(#state{pending_update_count=0}=State) ->
+maybe_send_updates(#state{pending_update_count = 0} = State) ->
{ok, State};
-maybe_send_updates(#state{update_db=true}=State) ->
- #state{max_event_delay=MaxEventDelay, last_update_time=LastUpdateTime} = State,
+maybe_send_updates(#state{update_db = true} = State) ->
+ #state{max_event_delay = MaxEventDelay, last_update_time = LastUpdateTime} = State,
Now = os:timestamp(),
case LastUpdateTime of
- undefined ->
- {ok, State#state{last_update_time=Now}, MaxEventDelay};
- _ ->
- Delta = timer:now_diff(Now, LastUpdateTime) div 1000,
- if Delta >= MaxEventDelay ->
- Updates = sets:to_list(State#state.pending_updates),
- try group_updates_by_node(State#state.dbname, Updates) of
- Grouped ->
- dict:map(fun(Node, Docs) ->
- couch_stats:increment_counter([global_changes, rpcs]),
- global_changes_server:update_docs(Node, Docs)
- end, Grouped)
- catch error:database_does_not_exist ->
- ok
- end,
- couch_stats:update_gauge(
- [global_changes, listener_pending_updates],
- 0
- ),
- State1 = State#state{
- pending_updates=sets:new(),
- pending_update_count=0,
- last_update_time=undefined
- },
- {ok, State1};
- true ->
- {ok, State, MaxEventDelay-Delta}
- end
+ undefined ->
+ {ok, State#state{last_update_time = Now}, MaxEventDelay};
+ _ ->
+ Delta = timer:now_diff(Now, LastUpdateTime) div 1000,
+ if
+ Delta >= MaxEventDelay ->
+ Updates = sets:to_list(State#state.pending_updates),
+ try group_updates_by_node(State#state.dbname, Updates) of
+ Grouped ->
+ dict:map(
+ fun(Node, Docs) ->
+ couch_stats:increment_counter([global_changes, rpcs]),
+ global_changes_server:update_docs(Node, Docs)
+ end,
+ Grouped
+ )
+ catch
+ error:database_does_not_exist ->
+ ok
+ end,
+ couch_stats:update_gauge(
+ [global_changes, listener_pending_updates],
+ 0
+ ),
+ State1 = State#state{
+ pending_updates = sets:new(),
+ pending_update_count = 0,
+ last_update_time = undefined
+ },
+ {ok, State1};
+ true ->
+ {ok, State, MaxEventDelay - Delta}
+ end
end;
maybe_send_updates(State) ->
{ok, State}.
-
handle_info(_Msg, State) ->
maybe_send_updates(State).
-
%% restore spec when R14 support is dropped
%% -spec group_updates_by_node(binary(), [binary()]) -> dict:dict().
group_updates_by_node(DbName, Updates) ->
- lists:foldl(fun(Key, OuterAcc) ->
- Shards = mem3:shards(DbName, Key),
- lists:foldl(fun(#shard{node=Node}, InnerAcc) ->
- dict:append(Node, Key, InnerAcc)
- end, OuterAcc, Shards)
- end, dict:new(), Updates).
+ lists:foldl(
+ fun(Key, OuterAcc) ->
+ Shards = mem3:shards(DbName, Key),
+ lists:foldl(
+ fun(#shard{node = Node}, InnerAcc) ->
+ dict:append(Node, Key, InnerAcc)
+ end,
+ OuterAcc,
+ Shards
+ )
+ end,
+ dict:new(),
+ Updates
+ ).
diff --git a/src/global_changes/src/global_changes_plugin.erl b/src/global_changes/src/global_changes_plugin.erl
index 96bb91eaa..8e42d56bd 100644
--- a/src/global_changes/src/global_changes_plugin.erl
+++ b/src/global_changes/src/global_changes_plugin.erl
@@ -18,7 +18,6 @@
-define(SERVICE_ID, global_changes).
-
%% ------------------------------------------------------------------
%% API Function Definitions
%% ------------------------------------------------------------------
diff --git a/src/global_changes/src/global_changes_server.erl b/src/global_changes/src/global_changes_server.erl
index 7e3062586..e4902e207 100644
--- a/src/global_changes/src/global_changes_server.erl
+++ b/src/global_changes/src/global_changes_server.erl
@@ -14,7 +14,6 @@
-behaviour(gen_server).
-vsn(1).
-
-export([
start_link/0
]).
@@ -32,7 +31,6 @@
update_docs/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("mem3/include/mem3.hrl").
@@ -45,11 +43,9 @@
handler_ref
}).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
init([]) ->
{ok, Handler} = global_changes_listener:start(),
% get configs as strings
@@ -57,32 +53,33 @@ init([]) ->
MaxWriteDelay0 = config:get("global_changes", "max_write_delay", "500"),
% make config strings into other data types
- UpdateDb = case UpdateDb0 of "false" -> false; _ -> true end,
+ UpdateDb =
+ case UpdateDb0 of
+ "false" -> false;
+ _ -> true
+ end,
MaxWriteDelay = list_to_integer(MaxWriteDelay0),
% Start our write triggers
erlang:send_after(MaxWriteDelay, self(), flush_updates),
State = #state{
- update_db=UpdateDb,
- pending_update_count=0,
- pending_updates=sets:new(),
- max_write_delay=MaxWriteDelay,
- dbname=global_changes_util:get_dbname(),
- handler_ref=erlang:monitor(process, Handler)
+ update_db = UpdateDb,
+ pending_update_count = 0,
+ pending_updates = sets:new(),
+ max_write_delay = MaxWriteDelay,
+ dbname = global_changes_util:get_dbname(),
+ handler_ref = erlang:monitor(process, Handler)
},
{ok, State}.
-
terminate(_Reason, _Srv) ->
ok.
-
handle_call(_Msg, _From, State) ->
{reply, ok, State}.
-
-handle_cast(_Msg, #state{update_db=false}=State) ->
+handle_cast(_Msg, #state{update_db = false} = State) ->
{noreply, State};
handle_cast({update_docs, DocIds}, State) ->
Pending = sets:union(sets:from_list(DocIds), State#state.pending_updates),
@@ -92,35 +89,34 @@ handle_cast({update_docs, DocIds}, State) ->
PendingCount
),
NewState = State#state{
- pending_updates=Pending,
- pending_update_count=PendingCount
+ pending_updates = Pending,
+ pending_update_count = PendingCount
},
{noreply, NewState};
-
handle_cast({set_max_write_delay, MaxWriteDelay}, State) ->
- NewState = State#state{max_write_delay=MaxWriteDelay},
+ NewState = State#state{max_write_delay = MaxWriteDelay},
{noreply, NewState};
handle_cast({set_update_db, Boolean}, State0) ->
% If turning update_db off, clear out server state
- State = case {Boolean, State0#state.update_db} of
- {false, true} ->
- State0#state{
- update_db=Boolean,
- pending_updates=sets:new(),
- pending_update_count=0
- };
- _ ->
- State0#state{update_db=Boolean}
- end,
+ State =
+ case {Boolean, State0#state.update_db} of
+ {false, true} ->
+ State0#state{
+ update_db = Boolean,
+ pending_updates = sets:new(),
+ pending_update_count = 0
+ };
+ _ ->
+ State0#state{update_db = Boolean}
+ end,
{noreply, State};
handle_cast(_Msg, State) ->
{noreply, State}.
-
-handle_info(flush_updates, #state{pending_update_count=0}=State) ->
+handle_info(flush_updates, #state{pending_update_count = 0} = State) ->
erlang:send_after(State#state.max_write_delay, self(), flush_updates),
{noreply, State};
-handle_info(flush_updates, #state{update_db=false}=State) ->
+handle_info(flush_updates, #state{update_db = false} = State) ->
erlang:send_after(State#state.max_write_delay, self(), flush_updates),
{noreply, State};
handle_info(flush_updates, State) ->
@@ -129,88 +125,97 @@ handle_info(flush_updates, State) ->
handle_info(start_listener, State) ->
{ok, Handler} = global_changes_listener:start(),
NewState = State#state{
- handler_ref=erlang:monitor(process, Handler)
+ handler_ref = erlang:monitor(process, Handler)
},
{noreply, NewState};
-handle_info({'DOWN', Ref, _, _, Reason}, #state{handler_ref=Ref}=State) ->
+handle_info({'DOWN', Ref, _, _, Reason}, #state{handler_ref = Ref} = State) ->
couch_log:error("global_changes_listener terminated: ~w", [Reason]),
erlang:send_after(5000, self(), start_listener),
{noreply, State};
handle_info(_, State) ->
{noreply, State}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
-
flush_updates(State) ->
DocIds = sets:to_list(State#state.pending_updates),
try group_ids_by_shard(State#state.dbname, DocIds) of
- GroupedIds ->
- Docs = dict:fold(fun(ShardName, Ids, DocInfoAcc) ->
- {ok, Shard} = couch_db:open(ShardName, [?ADMIN_CTX]),
- try
- GroupedDocs = get_docs_locally(Shard, Ids),
- GroupedDocs ++ DocInfoAcc
- after
- couch_db:close(Shard)
- end
- end, [], GroupedIds),
-
- spawn(fun() ->
- fabric:update_docs(State#state.dbname, Docs, [])
- end),
-
- Count = State#state.pending_update_count,
- couch_stats:increment_counter(
- [global_changes, db_writes],
- Count
- )
- catch error:database_does_not_exist ->
- {noreply, State}
+ GroupedIds ->
+ Docs = dict:fold(
+ fun(ShardName, Ids, DocInfoAcc) ->
+ {ok, Shard} = couch_db:open(ShardName, [?ADMIN_CTX]),
+ try
+ GroupedDocs = get_docs_locally(Shard, Ids),
+ GroupedDocs ++ DocInfoAcc
+ after
+ couch_db:close(Shard)
+ end
+ end,
+ [],
+ GroupedIds
+ ),
+
+ spawn(fun() ->
+ fabric:update_docs(State#state.dbname, Docs, [])
+ end),
+
+ Count = State#state.pending_update_count,
+ couch_stats:increment_counter(
+ [global_changes, db_writes],
+ Count
+ )
+ catch
+ error:database_does_not_exist ->
+ {noreply, State}
end,
couch_stats:update_gauge(
[global_changes, server_pending_updates],
0
),
{noreply, State#state{
- pending_updates=sets:new(),
- pending_update_count=0
+ pending_updates = sets:new(),
+ pending_update_count = 0
}}.
-
update_docs(Node, Updates) ->
gen_server:cast({?MODULE, Node}, {update_docs, Updates}).
-
group_ids_by_shard(DbName, DocIds) ->
LocalNode = node(),
- lists:foldl(fun(DocId, Acc) ->
- Shards = mem3:shards(DbName, DocId),
- lists:foldl(fun
- (#shard{node=Node, name=Name}, Acc1) when Node == LocalNode ->
- dict:append(Name, DocId, Acc1);
- (_, Acc1) ->
- Acc1
- end, Acc, Shards)
- end, dict:new(), DocIds).
-
+ lists:foldl(
+ fun(DocId, Acc) ->
+ Shards = mem3:shards(DbName, DocId),
+ lists:foldl(
+ fun
+ (#shard{node = Node, name = Name}, Acc1) when Node == LocalNode ->
+ dict:append(Name, DocId, Acc1);
+ (_, Acc1) ->
+ Acc1
+ end,
+ Acc,
+ Shards
+ )
+ end,
+ dict:new(),
+ DocIds
+ ).
get_docs_locally(Shard, Ids) ->
- lists:map(fun(Id) ->
- DocInfo = couch_db:get_doc_info(Shard, Id),
- #doc{id=Id, revs=get_rev(DocInfo)}
- end, Ids).
-
+ lists:map(
+ fun(Id) ->
+ DocInfo = couch_db:get_doc_info(Shard, Id),
+ #doc{id = Id, revs = get_rev(DocInfo)}
+ end,
+ Ids
+ ).
get_rev(not_found) ->
{0, []};
-get_rev({ok, #doc_info{revs=[RevInfo]}}) ->
+get_rev({ok, #doc_info{revs = [RevInfo]}}) ->
{Pos, Rev} = RevInfo#rev_info.rev,
{Pos, [Rev]};
-get_rev({ok, #doc_info{revs=[RevInfo|_]}}) ->
+get_rev({ok, #doc_info{revs = [RevInfo | _]}}) ->
% couch_doc:to_doc_info/1 sorts things so that the first
% #rev_info in the list is the "winning" revision which is
% the one we'd want to base our edit off of. In theory
diff --git a/src/global_changes/src/global_changes_sup.erl b/src/global_changes/src/global_changes_sup.erl
index 59a40f26a..3229ac0d3 100644
--- a/src/global_changes/src/global_changes_sup.erl
+++ b/src/global_changes/src/global_changes_sup.erl
@@ -13,7 +13,6 @@
-module(global_changes_sup).
-behavior(supervisor).
-
-export([start_link/0]).
-export([init/1]).
@@ -27,10 +26,10 @@
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
init([]) ->
{ok, {
- {one_for_one, 5, 10}, couch_epi:register_service(global_changes_epi, [
+ {one_for_one, 5, 10},
+ couch_epi:register_service(global_changes_epi, [
{
config_listener_mon,
{config_listener_mon, start_link, [?MODULE, nil]},
@@ -47,36 +46,35 @@ init([]) ->
worker,
[global_changes_server]
}
- ])}}.
+ ])
+ }}.
handle_config_change("global_changes", "max_event_delay", MaxDelayStr, _, _) ->
try list_to_integer(MaxDelayStr) of
MaxDelay ->
gen_server:cast(?LISTENER, {set_max_event_delay, MaxDelay})
- catch error:badarg ->
- ok
+ catch
+ error:badarg ->
+ ok
end,
{ok, nil};
-
handle_config_change("global_changes", "max_write_delay", MaxDelayStr, _, _) ->
try list_to_integer(MaxDelayStr) of
MaxDelay ->
gen_server:cast(?SERVER, {set_max_write_delay, MaxDelay})
- catch error:badarg ->
- ok
+ catch
+ error:badarg ->
+ ok
end,
{ok, nil};
-
handle_config_change("global_changes", "update_db", "false", _, _) ->
gen_server:cast(?LISTENER, {set_update_db, false}),
gen_server:cast(?SERVER, {set_update_db, false}),
{ok, nil};
-
handle_config_change("global_changes", "update_db", _, _, _) ->
gen_server:cast(?LISTENER, {set_update_db, true}),
gen_server:cast(?SERVER, {set_update_db, true}),
{ok, nil};
-
handle_config_change(_, _, _, _, _) ->
{ok, nil}.
diff --git a/src/global_changes/src/global_changes_util.erl b/src/global_changes/src/global_changes_util.erl
index 0ca57a35f..910e0137f 100644
--- a/src/global_changes/src/global_changes_util.erl
+++ b/src/global_changes/src/global_changes_util.erl
@@ -12,10 +12,8 @@
-module(global_changes_util).
-
-export([get_dbname/0]).
-
get_dbname() ->
case application:get_env(global_changes, dbname) of
{ok, DbName} when is_binary(DbName) ->
diff --git a/src/global_changes/test/eunit/global_changes_hooks_tests.erl b/src/global_changes/test/eunit/global_changes_hooks_tests.erl
index 3c2fcc6e6..5d6bbd13d 100644
--- a/src/global_changes/test/eunit/global_changes_hooks_tests.erl
+++ b/src/global_changes/test/eunit/global_changes_hooks_tests.erl
@@ -38,8 +38,12 @@ setup(default) ->
get_host();
setup(A) ->
Host = setup(default),
- ok = config:set("global_changes", "allowed_owner",
- ?t2l({?MODULE, allowed_owner, A}), false),
+ ok = config:set(
+ "global_changes",
+ "allowed_owner",
+ ?t2l({?MODULE, allowed_owner, A}),
+ false
+ ),
Host.
teardown(_) ->
@@ -57,7 +61,8 @@ allowed_owner_hook_test_() ->
"Check allowed_owner hook",
{
setup,
- fun start/0, fun stop/1,
+ fun start/0,
+ fun stop/1,
[
disabled_allowed_owner_integration_point(),
enabled_allowed_owner_integration_point()
@@ -70,11 +75,12 @@ disabled_allowed_owner_integration_point() ->
"disabled allowed_owner integration point",
{
foreach,
- fun() -> setup(default) end, fun teardown/1,
+ fun() -> setup(default) end,
+ fun teardown/1,
[
fun should_not_fail_for_admin/1,
fun should_fail_for_non_admin/1
- ]
+ ]
}
}.
@@ -84,12 +90,14 @@ enabled_allowed_owner_integration_point() ->
[
{
foreach,
- fun() -> setup("throw") end, fun teardown/1,
+ fun() -> setup("throw") end,
+ fun teardown/1,
[fun should_throw/1]
},
{
foreach,
- fun() -> setup("pass") end, fun teardown/1,
+ fun() -> setup("pass") end,
+ fun teardown/1,
[fun should_pass/1]
}
]
diff --git a/src/ioq/src/ioq.erl b/src/ioq/src/ioq.erl
index c3f9365bf..51934d544 100644
--- a/src/ioq/src/ioq.erl
+++ b/src/ioq/src/ioq.erl
@@ -26,9 +26,9 @@
-record(state, {
concurrency,
ratio,
- interactive=queue:new(),
- background=queue:new(),
- running=[]
+ interactive = queue:new(),
+ background = queue:new(),
+ running = []
}).
-record(request, {
@@ -85,11 +85,11 @@ io_class(_, _) ->
other.
queued_call(Fd, Msg, Priority) ->
- Request = #request{fd=Fd, msg=Msg, priority=Priority, from=self()},
+ Request = #request{fd = Fd, msg = Msg, priority = Priority, from = self()},
try
gen_server:call(?MODULE, Request, infinity)
catch
- exit:{noproc,_} ->
+ exit:{noproc, _} ->
gen_server:call(Fd, Msg, infinity)
end.
@@ -101,7 +101,7 @@ init(_) ->
read_config(State) ->
Ratio = config:get_float("ioq", "ratio", 0.01),
Concurrency = config:get_integer("ioq", "concurrency", 10),
- State#state{concurrency=Concurrency, ratio=Ratio}.
+ State#state{concurrency = Concurrency, ratio = Ratio}.
handle_call(get_queue_lengths, _From, State) ->
Response = #{
@@ -109,8 +109,8 @@ handle_call(get_queue_lengths, _From, State) ->
background => queue:len(State#state.background)
},
{reply, Response, State, 0};
-handle_call(#request{}=Request, From, State) ->
- {noreply, enqueue_request(Request#request{from=From}, State), 0}.
+handle_call(#request{} = Request, From, State) ->
+ {noreply, enqueue_request(Request#request{from = From}, State), 0}.
handle_cast(change, State) ->
{noreply, read_config(State)};
@@ -122,7 +122,7 @@ handle_info({Ref, Reply}, State) ->
{value, Request, Remaining} ->
erlang:demonitor(Ref, [flush]),
gen_server:reply(Request#request.from, Reply),
- {noreply, State#state{running=Remaining}, 0};
+ {noreply, State#state{running = Remaining}, 0};
false ->
{noreply, State, 0}
end;
@@ -130,7 +130,7 @@ handle_info({'DOWN', Ref, _, _, Reason}, State) ->
case lists:keytake(Ref, #request.ref, State#state.running) of
{value, Request, Remaining} ->
gen_server:reply(Request#request.from, {'EXIT', Reason}),
- {noreply, State#state{running=Remaining}, 0};
+ {noreply, State#state{running = Remaining}, 0};
false ->
{noreply, State, 0}
end;
@@ -156,15 +156,16 @@ code_change(_Vsn, State, _Extra) ->
terminate(_Reason, _State) ->
ok.
-enqueue_request(#request{priority=compaction}=Request, #state{}=State) ->
- State#state{background=queue:in(Request, State#state.background)};
-enqueue_request(#request{priority=shard_sync}=Request, #state{}=State) ->
- State#state{background=queue:in(Request, State#state.background)};
-enqueue_request(#request{}=Request, #state{}=State) ->
- State#state{interactive=queue:in(Request, State#state.interactive)}.
+enqueue_request(#request{priority = compaction} = Request, #state{} = State) ->
+ State#state{background = queue:in(Request, State#state.background)};
+enqueue_request(#request{priority = shard_sync} = Request, #state{} = State) ->
+ State#state{background = queue:in(Request, State#state.background)};
+enqueue_request(#request{} = Request, #state{} = State) ->
+ State#state{interactive = queue:in(Request, State#state.interactive)}.
-maybe_submit_request(#state{concurrency=Concurrency, running=Running}=State)
- when length(Running) < Concurrency ->
+maybe_submit_request(#state{concurrency = Concurrency, running = Running} = State) when
+ length(Running) < Concurrency
+->
case make_next_request(State) of
State ->
State;
@@ -176,7 +177,7 @@ maybe_submit_request(#state{concurrency=Concurrency, running=Running}=State)
maybe_submit_request(State) ->
State.
-make_next_request(#state{}=State) ->
+make_next_request(#state{} = State) ->
case {queue:is_empty(State#state.background), queue:is_empty(State#state.interactive)} of
{true, true} ->
State;
@@ -201,7 +202,7 @@ choose_next_request(Index, State) ->
submit_request(Request, setelement(Index, State, Q))
end.
-submit_request(#request{}=Request, #state{}=State) ->
+submit_request(#request{} = Request, #state{} = State) ->
Ref = erlang:monitor(process, Request#request.fd),
Request#request.fd ! {'$gen_call', {self(), Ref}, Request#request.msg},
- State#state{running = [Request#request{ref=Ref} | State#state.running]}.
+ State#state{running = [Request#request{ref = Ref} | State#state.running]}.
diff --git a/src/ioq/src/ioq_sup.erl b/src/ioq/src/ioq_sup.erl
index c4d04a9e4..937e5a952 100644
--- a/src/ioq/src/ioq_sup.erl
+++ b/src/ioq/src/ioq_sup.erl
@@ -21,4 +21,4 @@ start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
init([]) ->
- {ok, { {one_for_one, 5, 10}, [?CHILD(ioq, worker)]}}.
+ {ok, {{one_for_one, 5, 10}, [?CHILD(ioq, worker)]}}.
diff --git a/src/jwtf/src/jwtf.erl b/src/jwtf/src/jwtf.erl
index 4c4f80c70..d62789b0a 100644
--- a/src/jwtf/src/jwtf.erl
+++ b/src/jwtf/src/jwtf.erl
@@ -25,7 +25,8 @@
]).
-define(ALGS, [
- {<<"RS256">>, {public_key, sha256}}, % RSA PKCS#1 signature with SHA-256
+ % RSA PKCS#1 signature with SHA-256
+ {<<"RS256">>, {public_key, sha256}},
{<<"RS384">>, {public_key, sha384}},
{<<"RS512">>, {public_key, sha512}},
{<<"ES256">>, {public_key, sha256}},
@@ -33,7 +34,8 @@
{<<"ES512">>, {public_key, sha512}},
{<<"HS256">>, {hmac, sha256}},
{<<"HS384">>, {hmac, sha384}},
- {<<"HS512">>, {hmac, sha512}}]).
+ {<<"HS512">>, {hmac, sha512}}
+]).
-define(CHECKS, [
alg,
@@ -43,8 +45,8 @@
kid,
nbf,
sig,
- typ]).
-
+ typ
+]).
% @doc encode
% Encode the JSON Header and Claims using Key and Alg obtained from Header
@@ -52,21 +54,23 @@
{ok, binary()} | no_return().
encode(Header = {HeaderProps}, Claims, Key) ->
try
- Alg = case prop(<<"alg">>, HeaderProps) of
- undefined ->
- throw({bad_request, <<"Missing alg header parameter">>});
- Val ->
- Val
- end,
+ Alg =
+ case prop(<<"alg">>, HeaderProps) of
+ undefined ->
+ throw({bad_request, <<"Missing alg header parameter">>});
+ Val ->
+ Val
+ end,
EncodedHeader = b64url:encode(jiffy:encode(Header)),
EncodedClaims = b64url:encode(jiffy:encode(Claims)),
Message = <<EncodedHeader/binary, $., EncodedClaims/binary>>,
- SignatureOrMac = case verification_algorithm(Alg) of
- {public_key, Algorithm} ->
- public_key:sign(Message, Algorithm, Key);
- {hmac, Algorithm} ->
- hmac(Algorithm, Key, Message)
- end,
+ SignatureOrMac =
+ case verification_algorithm(Alg) of
+ {public_key, Algorithm} ->
+ public_key:sign(Message, Algorithm, Key);
+ {hmac, Algorithm} ->
+ hmac(Algorithm, Key, Message)
+ end,
EncodedSignatureOrMac = b64url:encode(SignatureOrMac),
{ok, <<Message/binary, $., EncodedSignatureOrMac/binary>>}
catch
@@ -74,7 +78,6 @@ encode(Header = {HeaderProps}, Claims, Key) ->
{error, Error}
end.
-
% @doc decode
% Decodes the supplied encoded token, checking
% for the attributes defined in Checks and calling
@@ -90,14 +93,12 @@ decode(EncodedToken, Checks, KS) ->
{error, Error}
end.
-
% @doc valid_algorithms
% Return a list of supported algorithms
-spec valid_algorithms() -> [binary()].
valid_algorithms() ->
proplists:get_keys(?ALGS).
-
% @doc verification_algorithm
% Return {VerificationMethod, Algorithm} tuple for the specified Alg
-spec verification_algorithm(binary()) ->
@@ -110,7 +111,6 @@ verification_algorithm(Alg) ->
throw({bad_request, <<"Invalid alg header parameter">>})
end.
-
validate(Header0, Payload0, Signature, Checks, KS) ->
validate_checks(Checks),
Header1 = props(decode_b64url_json(Header0)),
@@ -123,7 +123,6 @@ validate(Header0, Payload0, Signature, Checks, KS) ->
Key = key(Header1, Checks, KS),
verify(Alg, Header0, Payload0, Signature, Key).
-
validate_checks(Checks) when is_list(Checks) ->
case {lists:usort(Checks), lists:sort(Checks)} of
{L, L} ->
@@ -139,22 +138,17 @@ validate_checks(Checks) when is_list(Checks) ->
error({unknown_checks, UnknownChecks})
end.
-
valid_check(Check) when is_atom(Check) ->
lists:member(Check, ?CHECKS);
-
valid_check({Check, _}) when is_atom(Check) ->
lists:member(Check, ?CHECKS);
-
valid_check(_) ->
false.
-
validate_header(Props, Checks) ->
validate_typ(Props, Checks),
validate_alg(Props, Checks).
-
validate_typ(Props, Checks) ->
Required = prop(typ, Checks),
TYP = prop(<<"typ">>, Props),
@@ -169,7 +163,6 @@ validate_typ(Props, Checks) ->
throw({bad_request, <<"Invalid typ header parameter">>})
end.
-
validate_alg(Props, Checks) ->
Required = prop(alg, Checks),
Alg = prop(<<"alg">>, Props),
@@ -187,7 +180,6 @@ validate_alg(Props, Checks) ->
end
end.
-
%% Only validate required checks.
validate_payload(Props, Checks) ->
validate_iss(Props, Checks),
@@ -195,13 +187,13 @@ validate_payload(Props, Checks) ->
validate_nbf(Props, Checks),
validate_exp(Props, Checks).
-
validate_iss(Props, Checks) ->
ExpectedISS = prop(iss, Checks),
ActualISS = prop(<<"iss">>, Props),
case {ExpectedISS, ActualISS} of
- {undefined, _} -> % ignore unrequired check
+ % ignore unrequired check
+ {undefined, _} ->
ok;
{ISS, undefined} when ISS /= undefined ->
throw({bad_request, <<"Missing iss claim">>});
@@ -211,13 +203,13 @@ validate_iss(Props, Checks) ->
throw({bad_request, <<"Invalid iss claim">>})
end.
-
validate_iat(Props, Checks) ->
Required = prop(iat, Checks),
IAT = prop(<<"iat">>, Props),
case {Required, IAT} of
- {undefined, _} -> % ignore unrequired check
+ % ignore unrequired check
+ {undefined, _} ->
ok;
{true, undefined} ->
throw({bad_request, <<"Missing iat claim">>});
@@ -227,13 +219,13 @@ validate_iat(Props, Checks) ->
throw({bad_request, <<"Invalid iat claim">>})
end.
-
validate_nbf(Props, Checks) ->
Required = prop(nbf, Checks),
NBF = prop(<<"nbf">>, Props),
case {Required, NBF} of
- {undefined, _} -> % ignore unrequired check
+ % ignore unrequired check
+ {undefined, _} ->
ok;
{true, undefined} ->
throw({bad_request, <<"Missing nbf claim">>});
@@ -241,13 +233,13 @@ validate_nbf(Props, Checks) ->
assert_past(<<"nbf">>, NBF)
end.
-
validate_exp(Props, Checks) ->
Required = prop(exp, Checks),
EXP = prop(<<"exp">>, Props),
case {Required, EXP} of
- {undefined, _} -> % ignore unrequired check
+ % ignore unrequired check
+ {undefined, _} ->
ok;
{true, undefined} ->
throw({bad_request, <<"Missing exp claim">>});
@@ -255,7 +247,6 @@ validate_exp(Props, Checks) ->
assert_future(<<"exp">>, EXP)
end.
-
key(Props, Checks, KS) ->
Alg = prop(<<"alg">>, Props),
Required = prop(kid, Checks),
@@ -267,7 +258,6 @@ key(Props, Checks, KS) ->
KS(Alg, KID)
end.
-
verify(Alg, Header, Payload, SignatureOrMac0, Key) ->
Message = <<Header/binary, $., Payload/binary>>,
SignatureOrMac1 = b64url:decode(SignatureOrMac0),
@@ -279,7 +269,6 @@ verify(Alg, Header, Payload, SignatureOrMac0, Key) ->
hmac_verify(Algorithm, Message, SignatureOrMac1, Key)
end.
-
public_key_verify(Algorithm, Message, Signature, PublicKey) ->
case public_key:verify(Message, Algorithm, Signature, PublicKey) of
true ->
@@ -288,7 +277,6 @@ public_key_verify(Algorithm, Message, Signature, PublicKey) ->
throw({bad_request, <<"Bad signature">>})
end.
-
hmac_verify(Algorithm, Message, HMAC, SecretKey) ->
case hmac(Algorithm, SecretKey, Message) of
HMAC ->
@@ -297,14 +285,12 @@ hmac_verify(Algorithm, Message, HMAC, SecretKey) ->
throw({bad_request, <<"Bad HMAC">>})
end.
-
split(EncodedToken) ->
case binary:split(EncodedToken, <<$.>>, [global]) of
[_, _, _] = Split -> Split;
_ -> throw({bad_request, <<"Malformed token">>})
end.
-
decode_b64url_json(B64UrlEncoded) ->
try
case b64url:decode(B64UrlEncoded) of
@@ -318,14 +304,11 @@ decode_b64url_json(B64UrlEncoded) ->
throw({bad_request, Error})
end.
-
props({Props}) ->
Props;
-
props(_) ->
throw({bad_request, <<"Not an object">>}).
-
assert_past(Name, Time) ->
case Time < now_seconds() of
true ->
@@ -342,16 +325,13 @@ assert_future(Name, Time) ->
throw({unauthorized, <<Name/binary, " not in future">>})
end.
-
now_seconds() ->
{MegaSecs, Secs, _MicroSecs} = os:timestamp(),
MegaSecs * 1000000 + Secs.
-
prop(Prop, Props) ->
proplists:get_value(Prop, Props).
-
-ifdef(OTP_RELEASE).
-if(?OTP_RELEASE >= 22).
@@ -366,7 +346,8 @@ hmac(Alg, Key, Message) ->
hmac(Alg, Key, Message) ->
crypto:hmac(Alg, Key, Message).
--endif. % -if(?OTP_RELEASE >= 22)
+% -if(?OTP_RELEASE >= 22)
+-endif.
-else.
@@ -374,8 +355,8 @@ hmac(Alg, Key, Message) ->
hmac(Alg, Key, Message) ->
crypto:hmac(Alg, Key, Message).
--endif. % -ifdef(OTP_RELEASE)
-
+% -ifdef(OTP_RELEASE)
+-endif.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/jwtf/src/jwtf_keystore.erl b/src/jwtf/src/jwtf_keystore.erl
index be261e67c..4c2933264 100644
--- a/src/jwtf/src/jwtf_keystore.erl
+++ b/src/jwtf/src/jwtf_keystore.erl
@@ -23,8 +23,14 @@
]).
% gen_server api.
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- code_change/3, terminate/2]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3,
+ terminate/2
+]).
% config_listener api
-export([handle_config_change/5, handle_config_terminate/3]).
@@ -33,7 +39,6 @@
get(Alg, undefined) when is_binary(Alg) ->
get(Alg, <<"_default">>);
-
get(Alg, KID0) when is_binary(Alg), is_binary(KID0) ->
Kty = kty(Alg),
KID = binary_to_list(KID0),
@@ -43,10 +48,9 @@ get(Alg, KID0) when is_binary(Alg), is_binary(KID0) ->
ok = gen_server:call(?MODULE, {set, Kty, KID, Key}),
Key;
[{{Kty, KID}, Key}] ->
- Key
+ Key
end.
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
@@ -57,36 +61,28 @@ init(_) ->
ets:new(?MODULE, [public, named_table]),
{ok, nil}.
-
handle_call({set, Kty, KID, Key}, _From, State) ->
true = ets:insert(?MODULE, {{Kty, KID}, Key}),
{reply, ok, State}.
-
handle_cast({delete, Kty, KID}, State) ->
true = ets:delete(?MODULE, {Kty, KID}),
{noreply, State};
-
handle_cast(_Msg, State) ->
{noreply, State}.
-
handle_info(restart_config_listener, State) ->
ok = config:listen_for_changes(?MODULE, nil),
{noreply, State};
-
handle_info(_Msg, State) ->
{noreply, State}.
-
terminate(_Reason, _State) ->
ok.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
% config listener callback
handle_config_change("jwt_keys", ConfigKey, _ConfigValue, _, _) ->
@@ -97,13 +93,11 @@ handle_config_change("jwt_keys", ConfigKey, _ConfigValue, _, _) ->
ignored
end,
{ok, nil};
-
handle_config_change(_, _, _, _, _) ->
{ok, nil}.
handle_config_terminate(_Server, stop, _State) ->
ok;
-
handle_config_terminate(_Server, _Reason, _State) ->
erlang:send_after(100, whereis(?MODULE), restart_config_listener).
@@ -150,12 +144,9 @@ pem_decode(PEM) ->
kty(<<"HS", _/binary>>) ->
"hmac";
-
kty(<<"RS", _/binary>>) ->
"rsa";
-
kty(<<"ES", _/binary>>) ->
"ec";
-
kty(_) ->
throw({bad_request, <<"Unknown kty">>}).
diff --git a/src/jwtf/src/jwtf_sup.erl b/src/jwtf/src/jwtf_sup.erl
index 6f44808de..98d354c96 100644
--- a/src/jwtf/src/jwtf_sup.erl
+++ b/src/jwtf/src/jwtf_sup.erl
@@ -35,4 +35,4 @@ start_link() ->
%% ===================================================================
init([]) ->
- {ok, { {one_for_one, 5, 10}, [?CHILD(jwtf_keystore, worker)]} }.
+ {ok, {{one_for_one, 5, 10}, [?CHILD(jwtf_keystore, worker)]}}.
diff --git a/src/jwtf/test/jwtf_keystore_tests.erl b/src/jwtf/test/jwtf_keystore_tests.erl
index c0c55fb0b..c05d7f1b4 100644
--- a/src/jwtf/test/jwtf_keystore_tests.erl
+++ b/src/jwtf/test/jwtf_keystore_tests.erl
@@ -16,8 +16,12 @@
-include_lib("public_key/include/public_key.hrl").
-define(HMAC_SECRET, "aGVsbG8=").
--define(RSA_SECRET, "-----BEGIN PUBLIC KEY-----\\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAztanwQtIx0sms+x7m1SF\\nh7EHJHkM2biTJ41jR89FsDE2gd3MChpaqxemS5GpNvfFKRvuHa4PUZ3JtRCBG1KM\\n/7EWIVTy1JQDr2mb8couGlQNqz4uXN2vkNQ0XszgjU4Wn6ZpvYxmqPFbmkRe8QSn\\nAy2Wf8jQgjsbez8eaaX0G9S1hgFZUN3KFu7SVmUDQNvWpQdaJPP+ms5Z0CqF7JLa\\nvJmSdsU49nlYw9VH/XmwlUBMye6HgR4ZGCLQS85frqF0xLWvi7CsMdchcIjHudXH\\nQK1AumD/VVZVdi8Q5Qew7F6VXeXqnhbw9n6Px25cCuNuh6u5+E6GUzXRrMpqo9vO\\nqQIDAQAB\\n-----END PUBLIC KEY-----\\n").
--define(EC_SECRET, "-----BEGIN PUBLIC KEY-----\\nMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEDsr0lz/Dg3luarb+Kua0Wcj9WrfR23os\\nwHzakglb8GhWRDn+oZT0Bt/26sX8uB4/ij9PEOLHPo+IHBtX4ELFFVr5GTzlqcJe\\nyctaTDd1OOAPXYuc67EWtGZ3pDAzztRs\\n-----END PUBLIC KEY-----\\n").
+-define(RSA_SECRET,
+ "-----BEGIN PUBLIC KEY-----\\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAztanwQtIx0sms+x7m1SF\\nh7EHJHkM2biTJ41jR89FsDE2gd3MChpaqxemS5GpNvfFKRvuHa4PUZ3JtRCBG1KM\\n/7EWIVTy1JQDr2mb8couGlQNqz4uXN2vkNQ0XszgjU4Wn6ZpvYxmqPFbmkRe8QSn\\nAy2Wf8jQgjsbez8eaaX0G9S1hgFZUN3KFu7SVmUDQNvWpQdaJPP+ms5Z0CqF7JLa\\nvJmSdsU49nlYw9VH/XmwlUBMye6HgR4ZGCLQS85frqF0xLWvi7CsMdchcIjHudXH\\nQK1AumD/VVZVdi8Q5Qew7F6VXeXqnhbw9n6Px25cCuNuh6u5+E6GUzXRrMpqo9vO\\nqQIDAQAB\\n-----END PUBLIC KEY-----\\n"
+).
+-define(EC_SECRET,
+ "-----BEGIN PUBLIC KEY-----\\nMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEDsr0lz/Dg3luarb+Kua0Wcj9WrfR23os\\nwHzakglb8GhWRDn+oZT0Bt/26sX8uB4/ij9PEOLHPo+IHBtX4ELFFVr5GTzlqcJe\\nyctaTDd1OOAPXYuc67EWtGZ3pDAzztRs\\n-----END PUBLIC KEY-----\\n"
+).
setup() ->
test_util:start_applications([couch_log, config, jwtf]),
@@ -38,20 +42,20 @@ teardown(_) ->
jwtf_keystore_test_() ->
{
- setup,
- fun setup/0,
- fun teardown/1,
- [
- ?_assertEqual(<<"hello">>, jwtf_keystore:get(<<"HS256">>, <<"hmac">>)),
- ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"RS256">>, <<"hmac">>)),
- ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"ES256">>, <<"hmac">>)),
-
- ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"HS256">>, <<"rsa">>)),
- ?_assertMatch(#'RSAPublicKey'{}, jwtf_keystore:get(<<"RS256">>, <<"rsa">>)),
- ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"ES256">>, <<"rsa">>)),
-
- ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"HS256">>, <<"ec">>)),
- ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"RS256">>, <<"ec">>)),
- ?_assertMatch({#'ECPoint'{}, _}, jwtf_keystore:get(<<"ES256">>, <<"ec">>))
- ]
+ setup,
+ fun setup/0,
+ fun teardown/1,
+ [
+ ?_assertEqual(<<"hello">>, jwtf_keystore:get(<<"HS256">>, <<"hmac">>)),
+ ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"RS256">>, <<"hmac">>)),
+ ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"ES256">>, <<"hmac">>)),
+
+ ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"HS256">>, <<"rsa">>)),
+ ?_assertMatch(#'RSAPublicKey'{}, jwtf_keystore:get(<<"RS256">>, <<"rsa">>)),
+ ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"ES256">>, <<"rsa">>)),
+
+ ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"HS256">>, <<"ec">>)),
+ ?_assertThrow({bad_request, _}, jwtf_keystore:get(<<"RS256">>, <<"ec">>)),
+ ?_assertMatch({#'ECPoint'{}, _}, jwtf_keystore:get(<<"ES256">>, <<"ec">>))
+ ]
}.
diff --git a/src/jwtf/test/jwtf_tests.erl b/src/jwtf/test/jwtf_tests.erl
index ba944f7c7..f4685a54e 100644
--- a/src/jwtf/test/jwtf_tests.erl
+++ b/src/jwtf/test/jwtf_tests.erl
@@ -25,247 +25,286 @@ valid_header() ->
{[{<<"typ">>, <<"JWT">>}, {<<"alg">>, <<"RS256">>}]}.
jwt_io_pubkey() ->
- PublicKeyPEM = <<"-----BEGIN PUBLIC KEY-----\n"
- "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdlatRjRjogo3WojgGH"
- "FHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlvdbH6"
- "dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a03GUnKHkkl"
- "e+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQAB\n"
- "-----END PUBLIC KEY-----\n">>,
+ PublicKeyPEM = <<
+ "-----BEGIN PUBLIC KEY-----\n"
+ "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdlatRjRjogo3WojgGH"
+ "FHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlvdbH6"
+ "dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a03GUnKHkkl"
+ "e+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQAB\n"
+ "-----END PUBLIC KEY-----\n"
+ >>,
[PEMEntry] = public_key:pem_decode(PublicKeyPEM),
public_key:pem_entry_decode(PEMEntry).
-
b64_badarg_test() ->
Encoded = <<"0.0.0">>,
- ?assertEqual({error, {bad_request,badarg}},
- jwtf:decode(Encoded, [], nil)).
-
+ ?assertEqual(
+ {error, {bad_request, badarg}},
+ jwtf:decode(Encoded, [], nil)
+ ).
b64_bad_block_test() ->
Encoded = <<" aGVsbG8. aGVsbG8. aGVsbG8">>,
- ?assertEqual({error, {bad_request,{bad_block,0}}},
- jwtf:decode(Encoded, [], nil)).
-
+ ?assertEqual(
+ {error, {bad_request, {bad_block, 0}}},
+ jwtf:decode(Encoded, [], nil)
+ ).
invalid_json_test() ->
Encoded = <<"fQ.fQ.fQ">>,
- ?assertEqual({error, {bad_request,{1,invalid_json}}},
- jwtf:decode(Encoded, [], nil)).
-
+ ?assertEqual(
+ {error, {bad_request, {1, invalid_json}}},
+ jwtf:decode(Encoded, [], nil)
+ ).
truncated_json_test() ->
Encoded = <<"ew.ew.ew">>,
- ?assertEqual({error, {bad_request,{2,truncated_json}}},
- jwtf:decode(Encoded, [], nil)).
-
+ ?assertEqual(
+ {error, {bad_request, {2, truncated_json}}},
+ jwtf:decode(Encoded, [], nil)
+ ).
missing_typ_test() ->
Encoded = encode({[]}, []),
- ?assertEqual({error, {bad_request,<<"Missing typ header parameter">>}},
- jwtf:decode(Encoded, [typ], nil)).
-
+ ?assertEqual(
+ {error, {bad_request, <<"Missing typ header parameter">>}},
+ jwtf:decode(Encoded, [typ], nil)
+ ).
invalid_typ_test() ->
Encoded = encode({[{<<"typ">>, <<"NOPE">>}]}, []),
- ?assertEqual({error, {bad_request,<<"Invalid typ header parameter">>}},
- jwtf:decode(Encoded, [typ], nil)).
-
+ ?assertEqual(
+ {error, {bad_request, <<"Invalid typ header parameter">>}},
+ jwtf:decode(Encoded, [typ], nil)
+ ).
missing_alg_test() ->
Encoded = encode({[]}, []),
- ?assertEqual({error, {bad_request,<<"Missing alg header parameter">>}},
- jwtf:decode(Encoded, [alg], nil)).
-
+ ?assertEqual(
+ {error, {bad_request, <<"Missing alg header parameter">>}},
+ jwtf:decode(Encoded, [alg], nil)
+ ).
invalid_alg_test() ->
Encoded = encode({[{<<"alg">>, <<"NOPE">>}]}, []),
- ?assertEqual({error, {bad_request,<<"Invalid alg header parameter">>}},
- jwtf:decode(Encoded, [alg], nil)).
-
+ ?assertEqual(
+ {error, {bad_request, <<"Invalid alg header parameter">>}},
+ jwtf:decode(Encoded, [alg], nil)
+ ).
missing_iss_test() ->
Encoded = encode(valid_header(), {[]}),
- ?assertEqual({error, {bad_request,<<"Missing iss claim">>}},
- jwtf:decode(Encoded, [{iss, right}], nil)).
-
+ ?assertEqual(
+ {error, {bad_request, <<"Missing iss claim">>}},
+ jwtf:decode(Encoded, [{iss, right}], nil)
+ ).
invalid_iss_test() ->
Encoded = encode(valid_header(), {[{<<"iss">>, <<"wrong">>}]}),
- ?assertEqual({error, {bad_request,<<"Invalid iss claim">>}},
- jwtf:decode(Encoded, [{iss, right}], nil)).
-
+ ?assertEqual(
+ {error, {bad_request, <<"Invalid iss claim">>}},
+ jwtf:decode(Encoded, [{iss, right}], nil)
+ ).
missing_iat_test() ->
Encoded = encode(valid_header(), {[]}),
- ?assertEqual({error, {bad_request,<<"Missing iat claim">>}},
- jwtf:decode(Encoded, [iat], nil)).
-
+ ?assertEqual(
+ {error, {bad_request, <<"Missing iat claim">>}},
+ jwtf:decode(Encoded, [iat], nil)
+ ).
invalid_iat_test() ->
Encoded = encode(valid_header(), {[{<<"iat">>, <<"hello">>}]}),
- ?assertEqual({error, {bad_request,<<"Invalid iat claim">>}},
- jwtf:decode(Encoded, [iat], nil)).
-
+ ?assertEqual(
+ {error, {bad_request, <<"Invalid iat claim">>}},
+ jwtf:decode(Encoded, [iat], nil)
+ ).
missing_nbf_test() ->
Encoded = encode(valid_header(), {[]}),
- ?assertEqual({error, {bad_request,<<"Missing nbf claim">>}},
- jwtf:decode(Encoded, [nbf], nil)).
-
+ ?assertEqual(
+ {error, {bad_request, <<"Missing nbf claim">>}},
+ jwtf:decode(Encoded, [nbf], nil)
+ ).
invalid_nbf_test() ->
Encoded = encode(valid_header(), {[{<<"nbf">>, 2 * now_seconds()}]}),
- ?assertEqual({error, {unauthorized, <<"nbf not in past">>}},
- jwtf:decode(Encoded, [nbf], nil)).
-
+ ?assertEqual(
+ {error, {unauthorized, <<"nbf not in past">>}},
+ jwtf:decode(Encoded, [nbf], nil)
+ ).
missing_exp_test() ->
Encoded = encode(valid_header(), {[]}),
- ?assertEqual({error, {bad_request, <<"Missing exp claim">>}},
- jwtf:decode(Encoded, [exp], nil)).
-
+ ?assertEqual(
+ {error, {bad_request, <<"Missing exp claim">>}},
+ jwtf:decode(Encoded, [exp], nil)
+ ).
invalid_exp_test() ->
Encoded = encode(valid_header(), {[{<<"exp">>, 0}]}),
- ?assertEqual({error, {unauthorized, <<"exp not in future">>}},
- jwtf:decode(Encoded, [exp], nil)).
-
+ ?assertEqual(
+ {error, {unauthorized, <<"exp not in future">>}},
+ jwtf:decode(Encoded, [exp], nil)
+ ).
missing_kid_test() ->
Encoded = encode({[]}, {[]}),
- ?assertEqual({error, {bad_request, <<"Missing kid claim">>}},
- jwtf:decode(Encoded, [kid], nil)).
-
+ ?assertEqual(
+ {error, {bad_request, <<"Missing kid claim">>}},
+ jwtf:decode(Encoded, [kid], nil)
+ ).
public_key_not_found_test() ->
Encoded = encode(
{[{<<"alg">>, <<"RS256">>}, {<<"kid">>, <<"1">>}]},
- {[]}),
+ {[]}
+ ),
KS = fun(_, _) -> throw(not_found) end,
Expected = {error, not_found},
?assertEqual(Expected, jwtf:decode(Encoded, [], KS)).
-
bad_rs256_sig_test() ->
Encoded = encode(
{[{<<"typ">>, <<"JWT">>}, {<<"alg">>, <<"RS256">>}]},
- {[]}),
+ {[]}
+ ),
KS = fun(<<"RS256">>, undefined) -> jwt_io_pubkey() end,
- ?assertEqual({error, {bad_request, <<"Bad signature">>}},
- jwtf:decode(Encoded, [], KS)).
-
+ ?assertEqual(
+ {error, {bad_request, <<"Bad signature">>}},
+ jwtf:decode(Encoded, [], KS)
+ ).
bad_hs256_sig_test() ->
Encoded = encode(
{[{<<"typ">>, <<"JWT">>}, {<<"alg">>, <<"HS256">>}]},
- {[]}),
+ {[]}
+ ),
KS = fun(<<"HS256">>, undefined) -> <<"bad">> end,
- ?assertEqual({error, {bad_request, <<"Bad HMAC">>}},
- jwtf:decode(Encoded, [], KS)).
-
+ ?assertEqual(
+ {error, {bad_request, <<"Bad HMAC">>}},
+ jwtf:decode(Encoded, [], KS)
+ ).
malformed_token_test() ->
- ?assertEqual({error, {bad_request, <<"Malformed token">>}},
- jwtf:decode(<<"a.b.c.d">>, [], nil)).
+ ?assertEqual(
+ {error, {bad_request, <<"Malformed token">>}},
+ jwtf:decode(<<"a.b.c.d">>, [], nil)
+ ).
unknown_atom_check_test() ->
- ?assertError({unknown_checks, [foo, bar]},
- jwtf:decode(<<"a.b.c">>, [exp, foo, iss, bar], nil)).
+ ?assertError(
+ {unknown_checks, [foo, bar]},
+ jwtf:decode(<<"a.b.c">>, [exp, foo, iss, bar], nil)
+ ).
unknown_binary_check_test() ->
- ?assertError({unknown_checks, [<<"bar">>]},
- jwtf:decode(<<"a.b.c">>, [exp, iss, <<"bar">>], nil)).
+ ?assertError(
+ {unknown_checks, [<<"bar">>]},
+ jwtf:decode(<<"a.b.c">>, [exp, iss, <<"bar">>], nil)
+ ).
duplicate_check_test() ->
- ?assertError({duplicate_checks, [exp]},
- jwtf:decode(<<"a.b.c">>, [exp, exp], nil)).
-
+ ?assertError(
+ {duplicate_checks, [exp]},
+ jwtf:decode(<<"a.b.c">>, [exp, exp], nil)
+ ).
%% jwt.io generated
hs256_test() ->
- EncodedToken = <<"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjEyMzQ1Ni"
- "J9.eyJpc3MiOiJodHRwczovL2Zvby5jb20iLCJpYXQiOjAsImV4cCI"
- "6MTAwMDAwMDAwMDAwMDAsImtpZCI6ImJhciJ9.iS8AH11QHHlczkBn"
- "Hl9X119BYLOZyZPllOVhSBZ4RZs">>,
+ EncodedToken = <<
+ "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjEyMzQ1Ni"
+ "J9.eyJpc3MiOiJodHRwczovL2Zvby5jb20iLCJpYXQiOjAsImV4cCI"
+ "6MTAwMDAwMDAwMDAwMDAsImtpZCI6ImJhciJ9.iS8AH11QHHlczkBn"
+ "Hl9X119BYLOZyZPllOVhSBZ4RZs"
+ >>,
KS = fun(<<"HS256">>, <<"123456">>) -> <<"secret">> end,
Checks = [{iss, <<"https://foo.com">>}, iat, exp, typ, alg, kid],
?assertMatch({ok, _}, catch jwtf:decode(EncodedToken, Checks, KS)).
-
%% pip install PyJWT
%% > import jwt
%% > jwt.encode({'foo':'bar'}, 'secret', algorithm='HS384')
hs384_test() ->
- EncodedToken = <<"eyJhbGciOiJIUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIif"
- "Q.2quwghs6I56GM3j7ZQbn-ASZ53xdBqzPzTDHm_CtVec32LUy-Ezy"
- "L3JjIe7WjL93">>,
+ EncodedToken = <<
+ "eyJhbGciOiJIUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIif"
+ "Q.2quwghs6I56GM3j7ZQbn-ASZ53xdBqzPzTDHm_CtVec32LUy-Ezy"
+ "L3JjIe7WjL93"
+ >>,
KS = fun(<<"HS384">>, _) -> <<"secret">> end,
- ?assertMatch({ok, {[{<<"foo">>,<<"bar">>}]}},
- catch jwtf:decode(EncodedToken, [], KS)).
-
+ ?assertMatch(
+ {ok, {[{<<"foo">>, <<"bar">>}]}},
+ catch jwtf:decode(EncodedToken, [], KS)
+ ).
%% pip install PyJWT
%% > import jwt
%% > jwt.encode({'foo':'bar'}, 'secret', algorithm='HS512')
hs512_test() ->
- EncodedToken = <<"eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYX"
- "IifQ.WePl7achkd0oGNB8XRF_LJwxlyiPZqpdNgdKpDboAjSTsW"
- "q-aOGNynTp8TOv8KjonFym8vwFwppXOLoLXbkIaQ">>,
+ EncodedToken = <<
+ "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYX"
+ "IifQ.WePl7achkd0oGNB8XRF_LJwxlyiPZqpdNgdKpDboAjSTsW"
+ "q-aOGNynTp8TOv8KjonFym8vwFwppXOLoLXbkIaQ"
+ >>,
KS = fun(<<"HS512">>, _) -> <<"secret">> end,
- ?assertMatch({ok, {[{<<"foo">>,<<"bar">>}]}},
- catch jwtf:decode(EncodedToken, [], KS)).
-
+ ?assertMatch(
+ {ok, {[{<<"foo">>, <<"bar">>}]}},
+ catch jwtf:decode(EncodedToken, [], KS)
+ ).
%% jwt.io generated
rs256_test() ->
- EncodedToken = <<"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0N"
- "TY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.Ek"
- "N-DOsnsuRjRO6BxXemmJDm3HbxrbRzXglbN2S4sOkopdU4IsDxTI8j"
- "O19W_A4K8ZPJijNLis4EZsHeY559a4DFOd50_OqgHGuERTqYZyuhtF"
- "39yxJPAjUESwxk2J5k_4zM3O-vtd1Ghyo4IbqKKSy6J9mTniYJPenn"
- "5-HIirE">>,
+ EncodedToken = <<
+ "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0N"
+ "TY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.Ek"
+ "N-DOsnsuRjRO6BxXemmJDm3HbxrbRzXglbN2S4sOkopdU4IsDxTI8j"
+ "O19W_A4K8ZPJijNLis4EZsHeY559a4DFOd50_OqgHGuERTqYZyuhtF"
+ "39yxJPAjUESwxk2J5k_4zM3O-vtd1Ghyo4IbqKKSy6J9mTniYJPenn"
+ "5-HIirE"
+ >>,
Checks = [sig, alg],
KS = fun(<<"RS256">>, undefined) -> jwt_io_pubkey() end,
- ExpectedPayload = {[
- {<<"sub">>, <<"1234567890">>},
- {<<"name">>, <<"John Doe">>},
- {<<"admin">>, true}
- ]},
+ ExpectedPayload =
+ {[
+ {<<"sub">>, <<"1234567890">>},
+ {<<"name">>, <<"John Doe">>},
+ {<<"admin">>, true}
+ ]},
?assertMatch({ok, ExpectedPayload}, jwtf:decode(EncodedToken, Checks, KS)).
-
encode_missing_alg_test() ->
- ?assertEqual({error, {bad_request, <<"Missing alg header parameter">>}},
- jwtf:encode({[]}, {[]}, <<"foo">>)).
-
+ ?assertEqual(
+ {error, {bad_request, <<"Missing alg header parameter">>}},
+ jwtf:encode({[]}, {[]}, <<"foo">>)
+ ).
encode_invalid_alg_test() ->
- ?assertEqual({error, {bad_request, <<"Invalid alg header parameter">>}},
- jwtf:encode({[{<<"alg">>, <<"BOGUS">>}]}, {[]}, <<"foo">>)).
-
+ ?assertEqual(
+ {error, {bad_request, <<"Invalid alg header parameter">>}},
+ jwtf:encode({[{<<"alg">>, <<"BOGUS">>}]}, {[]}, <<"foo">>)
+ ).
encode_decode_test_() ->
[{Alg, encode_decode(Alg)} || Alg <- jwtf:valid_algorithms()].
-
encode_decode(Alg) ->
- {EncodeKey, DecodeKey} = case jwtf:verification_algorithm(Alg) of
- {public_key, _Algorithm} ->
- create_keypair();
- {hmac, _Algorithm} ->
- Key = <<"a-super-secret-key">>,
- {Key, Key}
- end,
+ {EncodeKey, DecodeKey} =
+ case jwtf:verification_algorithm(Alg) of
+ {public_key, _Algorithm} ->
+ create_keypair();
+ {hmac, _Algorithm} ->
+ Key = <<"a-super-secret-key">>,
+ {Key, Key}
+ end,
Claims = claims(),
{ok, Encoded} = jwtf:encode(header(Alg), Claims, EncodeKey),
KS = fun(_, _) -> DecodeKey end,
{ok, Decoded} = jwtf:decode(Encoded, [], KS),
?_assertMatch(Claims, Decoded).
-
header(Alg) ->
{[
{<<"typ">>, <<"JWT">>},
@@ -273,7 +312,6 @@ header(Alg) ->
{<<"kid">>, <<"20170520-00:00:00">>}
]}.
-
claims() ->
EpochSeconds = os:system_time(second),
{[
@@ -283,19 +321,23 @@ claims() ->
create_keypair() ->
%% https://tools.ietf.org/html/rfc7517#appendix-C
- N = decode(<<"t6Q8PWSi1dkJj9hTP8hNYFlvadM7DflW9mWepOJhJ66w7nyoK1gPNqFMSQRy"
+ N = decode(<<
+ "t6Q8PWSi1dkJj9hTP8hNYFlvadM7DflW9mWepOJhJ66w7nyoK1gPNqFMSQRy"
"O125Gp-TEkodhWr0iujjHVx7BcV0llS4w5ACGgPrcAd6ZcSR0-Iqom-QFcNP"
"8Sjg086MwoqQU_LYywlAGZ21WSdS_PERyGFiNnj3QQlO8Yns5jCtLCRwLHL0"
"Pb1fEv45AuRIuUfVcPySBWYnDyGxvjYGDSM-AqWS9zIQ2ZilgT-GqUmipg0X"
"OC0Cc20rgLe2ymLHjpHciCKVAbY5-L32-lSeZO-Os6U15_aXrk9Gw8cPUaX1"
- "_I8sLGuSiVdt3C_Fn2PZ3Z8i744FPFGGcG1qs2Wz-Q">>),
+ "_I8sLGuSiVdt3C_Fn2PZ3Z8i744FPFGGcG1qs2Wz-Q"
+ >>),
E = decode(<<"AQAB">>),
- D = decode(<<"GRtbIQmhOZtyszfgKdg4u_N-R_mZGU_9k7JQ_jn1DnfTuMdSNprTeaSTyWfS"
+ D = decode(<<
+ "GRtbIQmhOZtyszfgKdg4u_N-R_mZGU_9k7JQ_jn1DnfTuMdSNprTeaSTyWfS"
"NkuaAwnOEbIQVy1IQbWVV25NY3ybc_IhUJtfri7bAXYEReWaCl3hdlPKXy9U"
"vqPYGR0kIXTQRqns-dVJ7jahlI7LyckrpTmrM8dWBo4_PMaenNnPiQgO0xnu"
"ToxutRZJfJvG4Ox4ka3GORQd9CsCZ2vsUDmsXOfUENOyMqADC6p1M3h33tsu"
"rY15k9qMSpG9OX_IJAXmxzAh_tWiZOwk2K4yxH9tS3Lq1yX8C1EWmeRDkK2a"
- "hecG85-oLKQt5VEpWHKmjOi_gJSdSgqcN96X52esAQ">>),
+ "hecG85-oLKQt5VEpWHKmjOi_gJSdSgqcN96X52esAQ"
+ >>),
RSAPrivateKey = #'RSAPrivateKey'{
modulus = N,
publicExponent = E,
@@ -307,11 +349,9 @@ create_keypair() ->
},
{RSAPrivateKey, RSAPublicKey}.
-
decode(Goop) ->
crypto:bytes_to_integer(b64url:decode(Goop)).
-
now_seconds() ->
{MegaSecs, Secs, _MicroSecs} = os:timestamp(),
MegaSecs * 1000000 + Secs.
diff --git a/src/ken/src/ken_event_handler.erl b/src/ken/src/ken_event_handler.erl
index 8f158f425..f45fec087 100644
--- a/src/ken/src/ken_event_handler.erl
+++ b/src/ken/src/ken_event_handler.erl
@@ -25,7 +25,6 @@
handle_info/2
]).
-
start_link() ->
couch_event_listener:start_link(?MODULE, nil, [all_dbs]).
diff --git a/src/ken/src/ken_server.erl b/src/ken/src/ken_server.erl
index b33d01f35..9f869b379 100644
--- a/src/ken/src/ken_server.erl
+++ b/src/ken/src/ken_server.erl
@@ -32,8 +32,10 @@
-export([update_db_indexes/2]).
-record(job, {
- name, % {DbName, GroupId} for view. {DbName, DDocId, IndexId} for search.
- server, % Pid of either view group or search index
+ % {DbName, GroupId} for view. {DbName, DDocId, IndexId} for search.
+ name,
+ % Pid of either view group or search index
+ server,
worker_pid = nil,
seq = 0,
lru = erlang:monotonic_time()
@@ -78,11 +80,15 @@ remove(DbName) ->
add_all_shards(DbName) ->
try
Shards = mem3:shards(mem3:dbname(DbName)),
- lists:map(fun(Shard) ->
- rexi:cast(Shard#shard.node, {ken_server, add, [Shard#shard.name]})
- end, Shards)
- catch error:database_does_not_exist ->
- ok
+ lists:map(
+ fun(Shard) ->
+ rexi:cast(Shard#shard.node, {ken_server, add, [Shard#shard.name]})
+ end,
+ Shards
+ )
+ catch
+ error:database_does_not_exist ->
+ ok
end.
%% @doc Changes the configured value for a batch size.
@@ -124,66 +130,64 @@ terminate(_Reason, _State) ->
handle_call({set_batch_size, BS}, _From, #state{batch_size = Old} = State) ->
{reply, Old, State#state{batch_size = BS}, 0};
-
handle_call({set_delay, Delay}, _From, #state{delay = Old} = State) ->
{reply, Old, State#state{delay = Delay}, 0};
-
handle_call({set_limit, Limit}, _From, #state{limit = Old} = State) ->
{reply, Old, State#state{limit = Limit}, 0};
-
-handle_call({set_prune_interval, Interval}, _From , State) ->
+handle_call({set_prune_interval, Interval}, _From, State) ->
Old = State#state.prune_interval,
{reply, Old, State#state{prune_interval = Interval}, 0};
-
handle_call(Msg, From, State) ->
{stop, {unknown_call, Msg, From}, State}.
% Queues a DB to (maybe) have indexing jobs spawned.
handle_cast({add, DbName}, State) ->
case ets:insert_new(ken_pending, {DbName}) of
- true ->
- {noreply, State#state{q = queue:in(DbName, State#state.q)}, 0};
- false ->
- {noreply, State, 0}
+ true ->
+ {noreply, State#state{q = queue:in(DbName, State#state.q)}, 0};
+ false ->
+ {noreply, State, 0}
end;
-
handle_cast({remove, DbName}, State) ->
Q2 = queue:filter(fun(X) -> X =/= DbName end, State#state.q),
ets:delete(ken_pending, DbName),
% Delete search index workers
- ets:match_delete(ken_workers, #job{name={DbName,'_','_'}, _='_'}),
+ ets:match_delete(ken_workers, #job{name = {DbName, '_', '_'}, _ = '_'}),
% Delete view index workers
- ets:match_delete(ken_workers, #job{name={DbName,'_'}, _='_'}),
+ ets:match_delete(ken_workers, #job{name = {DbName, '_'}, _ = '_'}),
% TODO kill off active jobs for this DB as well
{noreply, State#state{q = Q2}, 0};
-
handle_cast({resubmit, DbName}, State) ->
ets:delete(ken_resubmit, DbName),
handle_cast({add, DbName}, State);
-
% st index job names have 3 elements, 3rd being 'hastings'. See job record definition.
-handle_cast({trigger_update, #job{name={_, _, hastings}, server=GPid, seq=Seq} = Job}, State) ->
+handle_cast({trigger_update, #job{name = {_, _, hastings}, server = GPid, seq = Seq} = Job}, State) ->
% hastings_index:await will trigger a hastings index update
- {Pid, _} = erlang:spawn_monitor(hastings_index, await,
- [GPid, Seq]),
+ {Pid, _} = erlang:spawn_monitor(
+ hastings_index,
+ await,
+ [GPid, Seq]
+ ),
Now = erlang:monotonic_time(),
ets:insert(ken_workers, Job#job{worker_pid = Pid, lru = Now}),
{noreply, State, 0};
% search index job names have 3 elements. See job record definition.
-handle_cast({trigger_update, #job{name={_,_,_}, server=GPid, seq=Seq} = Job}, State) ->
+handle_cast({trigger_update, #job{name = {_, _, _}, server = GPid, seq = Seq} = Job}, State) ->
% dreyfus_index:await will trigger a search index update.
- {Pid, _} = erlang:spawn_monitor(dreyfus_index, await,
- [GPid, Seq]),
+ {Pid, _} = erlang:spawn_monitor(
+ dreyfus_index,
+ await,
+ [GPid, Seq]
+ ),
Now = erlang:monotonic_time(),
ets:insert(ken_workers, Job#job{worker_pid = Pid, lru = Now}),
{noreply, State, 0};
-handle_cast({trigger_update, #job{name={_,_}, server=SrvPid, seq=Seq} = Job}, State) ->
+handle_cast({trigger_update, #job{name = {_, _}, server = SrvPid, seq = Seq} = Job}, State) ->
% couch_index:get_state/2 will trigger a view group index update.
{Pid, _} = erlang:spawn_monitor(couch_index, get_state, [SrvPid, Seq]),
Now = erlang:monotonic_time(),
ets:insert(ken_workers, Job#job{worker_pid = Pid, lru = Now}),
{noreply, State, 0};
-
handle_cast(Msg, State) ->
{stop, {unknown_cast, Msg}, State}.
@@ -191,37 +195,33 @@ handle_info({gen_event_EXIT, ken_event_handler, Reason}, State) ->
couch_log:error("ken_event_handler terminated: ~w", [Reason]),
erlang:send_after(5000, self(), start_event_handler),
{ok, State, 0};
-
handle_info(start_event_handler, State) ->
case ken_event_handler:start_link() of
- {ok, _Pid} ->
- ok;
- Error ->
- couch_log:error("ken_event_handler init: ~w", [Error]),
- erlang:send_after(5000, self(), start_event_handler)
+ {ok, _Pid} ->
+ ok;
+ Error ->
+ couch_log:error("ken_event_handler init: ~w", [Error]),
+ erlang:send_after(5000, self(), start_event_handler)
end,
{noreply, State, 0};
-
handle_info(timeout, #state{prune_interval = I, pruned_last = Last} = State) ->
Now = erlang:monotonic_time(),
Interval = erlang:convert_time_unit(
- State#state.delay, millisecond, native),
+ State#state.delay, millisecond, native
+ ),
case Now - Last > Interval of
- true ->
- NewState = prune_worker_table(State);
- _ ->
- NewState = State
+ true ->
+ NewState = prune_worker_table(State);
+ _ ->
+ NewState = State
end,
{noreply, maybe_start_next_queued_job(NewState), I};
-
-handle_info({'DOWN', _, _, Pid, Reason}, #state{dbworker = {Name,Pid}} = St) ->
+handle_info({'DOWN', _, _, Pid, Reason}, #state{dbworker = {Name, Pid}} = St) ->
maybe_resubmit(Name, Reason),
- {noreply, St#state{dbworker=nil}, 0};
-
+ {noreply, St#state{dbworker = nil}, 0};
handle_info({'DOWN', _, _, Pid, Reason}, State) ->
debrief_worker(Pid, Reason, State),
{noreply, State, 0};
-
handle_info(Msg, State) ->
{stop, {unknown_info, Msg}, State}.
@@ -230,31 +230,32 @@ code_change(_OldVsn, State, _Extra) ->
%% private functions
-maybe_start_next_queued_job(#state{dbworker = {_,_}} = State) ->
+maybe_start_next_queued_job(#state{dbworker = {_, _}} = State) ->
State;
-maybe_start_next_queued_job(#state{q=Q} = State) ->
+maybe_start_next_queued_job(#state{q = Q} = State) ->
IncrementalChannels = list_to_integer(config("incremental_channels", "80")),
BatchChannels = list_to_integer(config("batch_channels", "20")),
TotalChannels = IncrementalChannels + BatchChannels,
case queue:out(Q) of
- {{value, DbName}, Q2} ->
- case skip_job(DbName) of
- true ->
- % job is either being resubmitted or ignored, skip it
- ets:delete(ken_pending, DbName),
- maybe_start_next_queued_job(State#state{q = Q2});
- false ->
- case get_active_count() of A when A < TotalChannels ->
- Args = [DbName, State],
- {Pid, _} = spawn_monitor(?MODULE, update_db_indexes, Args),
- ets:delete(ken_pending, DbName),
- State#state{dbworker = {DbName,Pid}, q = Q2};
- _ ->
- State#state{q = queue:in_r(DbName, Q2)}
- end
- end;
- {empty, Q} ->
- State
+ {{value, DbName}, Q2} ->
+ case skip_job(DbName) of
+ true ->
+ % job is either being resubmitted or ignored, skip it
+ ets:delete(ken_pending, DbName),
+ maybe_start_next_queued_job(State#state{q = Q2});
+ false ->
+ case get_active_count() of
+ A when A < TotalChannels ->
+ Args = [DbName, State],
+ {Pid, _} = spawn_monitor(?MODULE, update_db_indexes, Args),
+ ets:delete(ken_pending, DbName),
+ State#state{dbworker = {DbName, Pid}, q = Q2};
+ _ ->
+ State#state{q = queue:in_r(DbName, Q2)}
+ end
+ end;
+ {empty, Q} ->
+ State
end.
skip_job(DbName) ->
@@ -262,28 +263,35 @@ skip_job(DbName) ->
ignore_db(DbName) ->
case config:get("ken.ignore", ?b2l(DbName), false) of
- "true" ->
- true;
- _ ->
- false
+ "true" ->
+ true;
+ _ ->
+ false
end.
get_active_count() ->
- MatchSpec = [{#job{worker_pid='$1', _='_'}, [{is_pid, '$1'}], [true]}],
+ MatchSpec = [{#job{worker_pid = '$1', _ = '_'}, [{is_pid, '$1'}], [true]}],
ets:select_count(ken_workers, MatchSpec).
% If any indexing job fails, resubmit requests for all indexes.
update_db_indexes(Name, State) ->
{ok, DDocs} = design_docs(Name),
RandomSorted = lists:sort([{rand:uniform(), D} || D <- DDocs]),
- Resubmit = lists:foldl(fun({_, DDoc}, Acc) ->
- JsonDDoc = couch_doc:from_json_obj(DDoc),
- case update_ddoc_indexes(Name, JsonDDoc, State) of
- ok -> Acc;
- _ -> true
- end
- end, false, RandomSorted),
- if Resubmit -> exit(resubmit); true -> ok end.
+ Resubmit = lists:foldl(
+ fun({_, DDoc}, Acc) ->
+ JsonDDoc = couch_doc:from_json_obj(DDoc),
+ case update_ddoc_indexes(Name, JsonDDoc, State) of
+ ok -> Acc;
+ _ -> true
+ end
+ end,
+ false,
+ RandomSorted
+ ),
+ if
+ Resubmit -> exit(resubmit);
+ true -> ok
+ end.
design_docs(Name) ->
try
@@ -293,27 +301,32 @@ design_docs(Name) ->
Else ->
Else
end
- catch error:database_does_not_exist ->
- {ok, []}
+ catch
+ error:database_does_not_exist ->
+ {ok, []}
end.
% Returns an error if any job creation fails.
-update_ddoc_indexes(Name, #doc{}=Doc, State) ->
- {ok, Db} = case couch_db:open_int(Name, []) of
- {ok, _} = Resp -> Resp;
- Else -> exit(Else)
- end,
+update_ddoc_indexes(Name, #doc{} = Doc, State) ->
+ {ok, Db} =
+ case couch_db:open_int(Name, []) of
+ {ok, _} = Resp -> Resp;
+ Else -> exit(Else)
+ end,
Seq = couch_db:get_update_seq(Db),
couch_db:close(Db),
- ViewUpdated = case should_update(Doc, <<"views">>) of true ->
- try couch_mrview_util:ddoc_to_mrst(Name, Doc) of
- {ok, MRSt} -> update_ddoc_views(Name, MRSt, Seq, State)
- catch _:_ ->
- ok
- end;
- false ->
- ok
- end,
+ ViewUpdated =
+ case should_update(Doc, <<"views">>) of
+ true ->
+ try couch_mrview_util:ddoc_to_mrst(Name, Doc) of
+ {ok, MRSt} -> update_ddoc_views(Name, MRSt, Seq, State)
+ catch
+ _:_ ->
+ ok
+ end;
+ false ->
+ ok
+ end,
SearchUpdated = search_updated(Name, Doc, Seq, State),
STUpdated = st_updated(Name, Doc, Seq, State),
case {ViewUpdated, SearchUpdated, STUpdated} of
@@ -323,14 +336,16 @@ update_ddoc_indexes(Name, #doc{}=Doc, State) ->
-ifdef(HAVE_DREYFUS).
search_updated(Name, Doc, Seq, State) ->
- case should_update(Doc, <<"indexes">>) of true ->
- try dreyfus_index:design_doc_to_indexes(Doc) of
- SIndexes -> update_ddoc_search_indexes(Name, SIndexes, Seq, State)
- catch _:_ ->
+ case should_update(Doc, <<"indexes">>) of
+ true ->
+ try dreyfus_index:design_doc_to_indexes(Doc) of
+ SIndexes -> update_ddoc_search_indexes(Name, SIndexes, Seq, State)
+ catch
+ _:_ ->
+ ok
+ end;
+ false ->
ok
- end;
- false ->
- ok
end.
-else.
search_updated(_Name, _Doc, _Seq, _State) ->
@@ -339,22 +354,23 @@ search_updated(_Name, _Doc, _Seq, _State) ->
-ifdef(HAVE_HASTINGS).
st_updated(Name, Doc, Seq, State) ->
- case should_update(Doc, <<"st_indexes">>) of true ->
- try
- hastings_index:design_doc_to_indexes(Doc) of
- STIndexes -> update_ddoc_st_indexes(Name, STIndexes, Seq, State)
- catch _:_ ->
+ case should_update(Doc, <<"st_indexes">>) of
+ true ->
+ try hastings_index:design_doc_to_indexes(Doc) of
+ STIndexes -> update_ddoc_st_indexes(Name, STIndexes, Seq, State)
+ catch
+ _:_ ->
+ ok
+ end;
+ false ->
ok
- end;
- false ->
- ok
end.
-else.
st_updated(_Name, _Doc, _Seq, _State) ->
ok.
-endif.
-should_update(#doc{body={Props}}, IndexType) ->
+should_update(#doc{body = {Props}}, IndexType) ->
case couch_util:get_value(<<"autoupdate">>, Props) of
false ->
false;
@@ -373,47 +389,66 @@ update_ddoc_views(Name, MRSt, Seq, State) ->
Language = couch_mrview_index:get(language, MRSt),
Allowed = lists:member(Language, allowed_languages()),
Views = couch_mrview_index:get(views, MRSt),
- if Allowed andalso Views =/= [] ->
- {ok, Pid} = couch_index_server:get_index(couch_mrview_index, MRSt),
- GroupName = couch_mrview_index:get(idx_name, MRSt),
- maybe_start_job({Name, GroupName}, Pid, Seq, State);
- true -> ok end.
+ if
+ Allowed andalso Views =/= [] ->
+ {ok, Pid} = couch_index_server:get_index(couch_mrview_index, MRSt),
+ GroupName = couch_mrview_index:get(idx_name, MRSt),
+ maybe_start_job({Name, GroupName}, Pid, Seq, State);
+ true ->
+ ok
+ end.
-ifdef(HAVE_DREYFUS).
update_ddoc_search_indexes(DbName, Indexes, Seq, State) ->
- if Indexes =/= [] ->
- % Spawn a job for each search index in the ddoc
- lists:foldl(fun(#index{name=IName, ddoc_id=DDocName}=Index, Acc) ->
- case dreyfus_index_manager:get_index(DbName, Index) of
- {ok, Pid} ->
- case maybe_start_job({DbName, DDocName, IName}, Pid, Seq, State) of
- resubmit -> resubmit;
- _ -> Acc
- end;
- _ ->
- % If any job fails, retry the db.
- resubmit
- end end, ok, Indexes);
- true -> ok end.
+ if
+ Indexes =/= [] ->
+ % Spawn a job for each search index in the ddoc
+ lists:foldl(
+ fun(#index{name = IName, ddoc_id = DDocName} = Index, Acc) ->
+ case dreyfus_index_manager:get_index(DbName, Index) of
+ {ok, Pid} ->
+ case maybe_start_job({DbName, DDocName, IName}, Pid, Seq, State) of
+ resubmit -> resubmit;
+ _ -> Acc
+ end;
+ _ ->
+ % If any job fails, retry the db.
+ resubmit
+ end
+ end,
+ ok,
+ Indexes
+ );
+ true ->
+ ok
+ end.
-endif.
-ifdef(HAVE_HASTINGS).
update_ddoc_st_indexes(DbName, Indexes, Seq, State) ->
- if Indexes =/= [] ->
- % The record name in hastings is #h_idx rather than #index as it is for dreyfus
- % Spawn a job for each spatial index in the ddoc
- lists:foldl(fun(#h_idx{ddoc_id=DDocName}=Index, Acc) ->
- case hastings_index_manager:get_index(DbName, Index) of
- {ok, Pid} ->
- case maybe_start_job({DbName, DDocName, hastings}, Pid, Seq, State) of
- resubmit -> resubmit;
- _ -> Acc
- end;
- _ ->
- % If any job fails, retry the db.
- resubmit
- end end, ok, Indexes);
- true -> ok end.
+ if
+ Indexes =/= [] ->
+ % The record name in hastings is #h_idx rather than #index as it is for dreyfus
+ % Spawn a job for each spatial index in the ddoc
+ lists:foldl(
+ fun(#h_idx{ddoc_id = DDocName} = Index, Acc) ->
+ case hastings_index_manager:get_index(DbName, Index) of
+ {ok, Pid} ->
+ case maybe_start_job({DbName, DDocName, hastings}, Pid, Seq, State) of
+ resubmit -> resubmit;
+ _ -> Acc
+ end;
+ _ ->
+ % If any job fails, retry the db.
+ resubmit
+ end
+ end,
+ ok,
+ Indexes
+ );
+ true ->
+ ok
+ end.
-endif.
should_start_job(#job{name = Name, seq = Seq, server = Pid}, State) ->
@@ -424,48 +459,49 @@ should_start_job(#job{name = Name, seq = Seq, server = Pid}, State) ->
A = get_active_count(),
#state{delay = Delay, batch_size = BS} = State,
case ets:lookup(ken_workers, Name) of
- [] ->
- if
- A < BatchChannels ->
- true;
- A < TotalChannels ->
- case Name of
- % st_index name has three elements
- {_, _, hastings} ->
- {ok, CurrentSeq} = hastings_index:await(Pid, 0),
- (Seq - CurrentSeq) < Threshold;
- % View name has two elements.
- {_,_} ->
- % Since seq is 0, couch_index:get_state/2 won't
- % spawn an index update.
- {ok, MRSt} = couch_index:get_state(Pid, 0),
- CurrentSeq = couch_mrview_index:get(update_seq, MRSt),
- (Seq - CurrentSeq) < Threshold;
- % Search name has three elements.
- {_,_,_} ->
- {ok, _IndexPid, CurrentSeq} = dreyfus_index:await(Pid, 0),
- (Seq - CurrentSeq) < Threshold;
- _ -> % Should never happen, but if it does, ignore.
- false
+ [] ->
+ if
+ A < BatchChannels ->
+ true;
+ A < TotalChannels ->
+ case Name of
+ % st_index name has three elements
+ {_, _, hastings} ->
+ {ok, CurrentSeq} = hastings_index:await(Pid, 0),
+ (Seq - CurrentSeq) < Threshold;
+ % View name has two elements.
+ {_, _} ->
+ % Since seq is 0, couch_index:get_state/2 won't
+ % spawn an index update.
+ {ok, MRSt} = couch_index:get_state(Pid, 0),
+ CurrentSeq = couch_mrview_index:get(update_seq, MRSt),
+ (Seq - CurrentSeq) < Threshold;
+ % Search name has three elements.
+ {_, _, _} ->
+ {ok, _IndexPid, CurrentSeq} = dreyfus_index:await(Pid, 0),
+ (Seq - CurrentSeq) < Threshold;
+ % Should never happen, but if it does, ignore.
+ _ ->
+ false
end;
- true ->
- false
- end;
- [#job{worker_pid = nil, lru = LRU, seq = OldSeq}] ->
- Now = erlang:monotonic_time(),
- DeltaT = erlang:convert_time_unit(Now - LRU, native, millisecond),
- if
- A < BatchChannels, (Seq - OldSeq) >= BS ->
- true;
- A < BatchChannels, DeltaT > Delay ->
- true;
- A < TotalChannels, (Seq - OldSeq) < Threshold, DeltaT > Delay ->
- true;
- true ->
- false
- end;
- _ ->
- false
+ true ->
+ false
+ end;
+ [#job{worker_pid = nil, lru = LRU, seq = OldSeq}] ->
+ Now = erlang:monotonic_time(),
+ DeltaT = erlang:convert_time_unit(Now - LRU, native, millisecond),
+ if
+ A < BatchChannels, (Seq - OldSeq) >= BS ->
+ true;
+ A < BatchChannels, DeltaT > Delay ->
+ true;
+ A < TotalChannels, (Seq - OldSeq) < Threshold, DeltaT > Delay ->
+ true;
+ true ->
+ false
+ end;
+ _ ->
+ false
end.
maybe_start_job(JobName, IndexPid, Seq, State) ->
@@ -475,24 +511,25 @@ maybe_start_job(JobName, IndexPid, Seq, State) ->
seq = Seq
},
case should_start_job(Job, State) of
- true ->
- gen_server:cast(?MODULE, {trigger_update, Job});
- false ->
- resubmit
+ true ->
+ gen_server:cast(?MODULE, {trigger_update, Job});
+ false ->
+ resubmit
end.
debrief_worker(Pid, Reason, _State) ->
- case ets:match_object(ken_workers, #job{worker_pid=Pid, _='_'}) of
- [#job{name = Name} = Job] ->
- case Name of
- {DbName,_} ->
- maybe_resubmit(DbName, Reason);
- {DbName,_,_} ->
- maybe_resubmit(DbName, Reason)
- end,
- ets:insert(ken_workers, Job#job{worker_pid = nil});
- [] -> % should never happen, but if it does, ignore
- ok
+ case ets:match_object(ken_workers, #job{worker_pid = Pid, _ = '_'}) of
+ [#job{name = Name} = Job] ->
+ case Name of
+ {DbName, _} ->
+ maybe_resubmit(DbName, Reason);
+ {DbName, _, _} ->
+ maybe_resubmit(DbName, Reason)
+ end,
+ ets:insert(ken_workers, Job#job{worker_pid = nil});
+ % should never happen, but if it does, ignore
+ [] ->
+ ok
end.
maybe_resubmit(_DbName, normal) ->
@@ -519,14 +556,15 @@ prune_worker_table(State) ->
Delay = erlang:convert_time_unit(State#state.delay, millisecond, native),
C = erlang:monotonic_time() - Delay,
%% fun(#job{worker_pid=nil, lru=A) when A < C -> true end
- MatchHead = #job{worker_pid=nil, lru='$1', _='_'},
+ MatchHead = #job{worker_pid = nil, lru = '$1', _ = '_'},
Guard = {'<', '$1', C},
ets:select_delete(ken_workers, [{MatchHead, [Guard], [true]}]),
State#state{pruned_last = erlang:monotonic_time()}.
allowed_languages() ->
- Config = couch_proc_manager:get_servers_from_env("COUCHDB_QUERY_SERVER_") ++
- couch_proc_manager:get_servers_from_env("COUCHDB_NATIVE_QUERY_SERVER_"),
+ Config =
+ couch_proc_manager:get_servers_from_env("COUCHDB_QUERY_SERVER_") ++
+ couch_proc_manager:get_servers_from_env("COUCHDB_NATIVE_QUERY_SERVER_"),
Allowed = [list_to_binary(string:to_lower(Lang)) || {Lang, _Cmd} <- Config],
[<<"query">> | Allowed].
@@ -536,8 +574,6 @@ config(Key, Default) ->
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
-
prune_old_entries_test() ->
{
setup,
@@ -548,15 +584,19 @@ prune_old_entries_test() ->
catch ets:delete(ken_workers)
end,
?_test(begin
- lists:foreach(fun(Idx) ->
- ets:insert(ken_workers, #job{name=Idx}),
- timer:sleep(100)
- end, lists:seq(1, 3)),
- prune_worker_table(#state{delay=250}),
+ lists:foreach(
+ fun(Idx) ->
+ ets:insert(ken_workers, #job{name = Idx}),
+ timer:sleep(100)
+ end,
+ lists:seq(1, 3)
+ ),
+ prune_worker_table(#state{delay = 250}),
?assertEqual(
[2, 3],
lists:usort(
- [N || #job{name = N} <- ets:tab2list(ken_workers)])
+ [N || #job{name = N} <- ets:tab2list(ken_workers)]
+ )
),
ok
end)
diff --git a/src/ken/src/ken_sup.erl b/src/ken/src/ken_sup.erl
index fd08cfd11..8c06592e7 100644
--- a/src/ken/src/ken_sup.erl
+++ b/src/ken/src/ken_sup.erl
@@ -29,5 +29,4 @@ start_link() ->
%% supervisor callbacks
init([]) ->
- {ok, { {one_for_one, 5, 10}, [?CHILD(ken_server, worker)]} }.
-
+ {ok, {{one_for_one, 5, 10}, [?CHILD(ken_server, worker)]}}.
diff --git a/src/ken/test/ken_server_test.erl b/src/ken/test/ken_server_test.erl
index eed348422..090c5570a 100644
--- a/src/ken/test/ken_server_test.erl
+++ b/src/ken/test/ken_server_test.erl
@@ -16,10 +16,8 @@
%% hardcoded defaults: limit: 20; batch: 1; delay: 5000; prune: 60000
default_test_() ->
- {inorder, {setup,
- fun setup_default/0,
- fun teardown/1,
- [
+ {inorder,
+ {setup, fun setup_default/0, fun teardown/1, [
set_builder("returns default", set_limit, 12, 20),
set_builder("keeps set", set_limit, 6, 12),
set_builder("returns default", set_batch_size, 3, 1),
@@ -28,14 +26,11 @@ default_test_() ->
set_builder("keeps set", set_delay, 10000, 7000),
set_builder("returns default", set_prune_interval, 70000, 60000),
set_builder("keeps set", set_prune_interval, 80000, 70000)
- ]
- }}.
+ ]}}.
exception_test_() ->
- {inorder, {foreach,
- fun setup_default/0,
- fun teardown/1,
- [
+ {inorder,
+ {foreach, fun setup_default/0, fun teardown/1, [
exception_builder("exception on zero", set_limit, 0),
exception_builder("exception on negative", set_limit, -12),
exception_builder("exception on zero", set_batch_size, 0),
@@ -44,18 +39,14 @@ exception_test_() ->
exception_builder("exception on negative", set_delay, -12),
exception_builder("exception on zero", set_prune_interval, 0),
exception_builder("exception on negative", set_prune_interval, -12)
- ]
- }}.
+ ]}}.
config_test_() ->
- {inorder, {setup,
- fun setup_config/0,
- fun teardown/1,
- [
+ {inorder,
+ {setup, fun setup_config/0, fun teardown/1, [
set_builder("reads config", set_limit, 24, 42),
set_builder("keeps set", set_limit, 6, 24)
- ]
- }}.
+ ]}}.
setup_default() ->
{ok, EventPid} = start_server(couch_event_server),
@@ -94,4 +85,6 @@ stop_server(Key, Cfg) ->
{Key, Pid} = lists:keyfind(Key, 1, Cfg),
MRef = erlang:monitor(process, Pid),
true = exit(Pid, kill),
- receive {'DOWN', MRef, _, _, _} -> ok end.
+ receive
+ {'DOWN', MRef, _, _, _} -> ok
+ end.
diff --git a/src/mango/src/mango_crud.erl b/src/mango/src/mango_crud.erl
index 41a4d143d..c13dbdcb9 100644
--- a/src/mango/src/mango_crud.erl
+++ b/src/mango/src/mango_crud.erl
@@ -24,14 +24,12 @@
collect_cb/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-
-insert(Db, #doc{}=Doc, Opts) ->
+insert(Db, #doc{} = Doc, Opts) ->
insert(Db, [Doc], Opts);
-insert(Db, {_}=Doc, Opts) ->
+insert(Db, {_} = Doc, Opts) ->
insert(Db, [Doc], Opts);
insert(Db, Docs, Opts0) when is_list(Docs) ->
Opts1 = maybe_add_user_ctx(Db, Opts0),
@@ -45,14 +43,12 @@ insert(Db, Docs, Opts0) when is_list(Docs) ->
{error, lists:map(fun result_to_json/1, Errors)}
end.
-
find(Db, Selector, Callback, UserAcc, Opts0) ->
Opts1 = maybe_add_user_ctx(Db, Opts0),
Opts2 = maybe_int_to_str(r, Opts1),
{ok, Cursor} = mango_cursor:create(Db, Selector, Opts2),
mango_cursor:execute(Cursor, Callback, UserAcc).
-
update(Db, Selector, Update, Options) ->
Upsert = proplists:get_value(upsert, Options),
case collect_docs(Db, Selector, Options) of
@@ -65,47 +61,51 @@ update(Db, Selector, Update, Options) ->
% Probably need to catch and rethrow errors from
% this function.
Doc = couch_doc:from_json_obj(InitDoc),
- NewDoc = case Doc#doc.id of
- <<"">> ->
- Doc#doc{id=couch_uuids:new(), revs={0, []}};
- _ ->
- Doc
- end,
+ NewDoc =
+ case Doc#doc.id of
+ <<"">> ->
+ Doc#doc{id = couch_uuids:new(), revs = {0, []}};
+ _ ->
+ Doc
+ end,
insert(Db, NewDoc, Options)
end;
{ok, Docs} ->
- NewDocs = lists:map(fun(Doc) ->
- mango_doc:apply_update(Doc, Update)
- end, Docs),
+ NewDocs = lists:map(
+ fun(Doc) ->
+ mango_doc:apply_update(Doc, Update)
+ end,
+ Docs
+ ),
insert(Db, NewDocs, Options);
Else ->
Else
end.
-
delete(Db, Selector, Options) ->
case collect_docs(Db, Selector, Options) of
{ok, Docs} ->
- NewDocs = lists:map(fun({Props}) ->
- {[
- {<<"_id">>, proplists:get_value(<<"_id">>, Props)},
- {<<"_rev">>, proplists:get_value(<<"_rev">>, Props)},
- {<<"_deleted">>, true}
- ]}
- end, Docs),
+ NewDocs = lists:map(
+ fun({Props}) ->
+ {[
+ {<<"_id">>, proplists:get_value(<<"_id">>, Props)},
+ {<<"_rev">>, proplists:get_value(<<"_rev">>, Props)},
+ {<<"_deleted">>, true}
+ ]}
+ end,
+ Docs
+ ),
insert(Db, NewDocs, Options);
Else ->
Else
end.
-
explain(Db, Selector, Opts0) ->
Opts1 = maybe_add_user_ctx(Db, Opts0),
Opts2 = maybe_int_to_str(r, Opts1),
{ok, Cursor} = mango_cursor:create(Db, Selector, Opts2),
mango_cursor:explain(Cursor).
-
maybe_add_user_ctx(Db, Opts) ->
case lists:keyfind(user_ctx, 1, Opts) of
{user_ctx, _} ->
@@ -114,7 +114,6 @@ maybe_add_user_ctx(Db, Opts) ->
[{user_ctx, couch_db:get_user_ctx(Db)} | Opts]
end.
-
maybe_int_to_str(_Key, []) ->
[];
maybe_int_to_str(Key, [{Key, Val} | Rest]) when is_integer(Val) ->
@@ -122,8 +121,7 @@ maybe_int_to_str(Key, [{Key, Val} | Rest]) when is_integer(Val) ->
maybe_int_to_str(Key, [KV | Rest]) ->
[KV | maybe_int_to_str(Key, Rest)].
-
-result_to_json(#doc{id=Id}, Result) ->
+result_to_json(#doc{id = Id}, Result) ->
result_to_json(Id, Result);
result_to_json({Props}, Result) ->
Id = couch_util:get_value(<<"_id">>, Props),
@@ -149,7 +147,6 @@ result_to_json(DocId, Error) ->
{reason, Reason}
]}.
-
% This is for errors because for some reason we
% need a different return value for errors? Blargh.
result_to_json({{Id, Rev}, Error}) ->
@@ -161,7 +158,6 @@ result_to_json({{Id, Rev}, Error}) ->
{reason, Reason}
]}.
-
collect_docs(Db, Selector, Options) ->
Cb = fun ?MODULE:collect_cb/2,
case find(Db, Selector, Cb, [], Options) of
@@ -171,7 +167,5 @@ collect_docs(Db, Selector, Options) ->
Else
end.
-
collect_cb({row, Doc}, Acc) ->
{ok, [Doc | Acc]}.
-
diff --git a/src/mango/src/mango_cursor.erl b/src/mango/src/mango_cursor.erl
index b1cb4148e..e9db4c3cf 100644
--- a/src/mango/src/mango_cursor.erl
+++ b/src/mango/src/mango_cursor.erl
@@ -12,7 +12,6 @@
-module(mango_cursor).
-
-export([
create/3,
explain/1,
@@ -23,13 +22,11 @@
maybe_noop_range/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-include("mango_cursor.hrl").
-include("mango_idx.hrl").
-
-ifdef(HAVE_DREYFUS).
-define(CURSOR_MODULES, [
mango_cursor_view,
@@ -45,7 +42,6 @@
-define(SUPERVISOR, mango_cursor_sup).
-
create(Db, Selector0, Opts) ->
Selector = mango_selector:normalize(Selector0),
UsableIndexes = mango_idx:get_usable_indexes(Db, Selector, Opts),
@@ -57,8 +53,7 @@ create(Db, Selector0, Opts) ->
create_cursor(Db, UserSpecifiedIndex, Selector, Opts)
end.
-
-explain(#cursor{}=Cursor) ->
+explain(#cursor{} = Cursor) ->
#cursor{
index = Idx,
selector = Selector,
@@ -69,23 +64,23 @@ explain(#cursor{}=Cursor) ->
} = Cursor,
Mod = mango_idx:cursor_mod(Idx),
Opts = lists:keydelete(user_ctx, 1, Opts0),
- {[
- {dbname, mango_idx:dbname(Idx)},
- {index, mango_idx:to_json(Idx)},
- {partitioned, mango_idx:partitioned(Idx)},
- {selector, Selector},
- {opts, {Opts}},
- {limit, Limit},
- {skip, Skip},
- {fields, Fields}
- ] ++ Mod:explain(Cursor)}.
-
-
-execute(#cursor{index=Idx}=Cursor, UserFun, UserAcc) ->
+ {
+ [
+ {dbname, mango_idx:dbname(Idx)},
+ {index, mango_idx:to_json(Idx)},
+ {partitioned, mango_idx:partitioned(Idx)},
+ {selector, Selector},
+ {opts, {Opts}},
+ {limit, Limit},
+ {skip, Skip},
+ {fields, Fields}
+ ] ++ Mod:explain(Cursor)
+ }.
+
+execute(#cursor{index = Idx} = Cursor, UserFun, UserAcc) ->
Mod = mango_idx:cursor_mod(Idx),
Mod:execute(Cursor, UserFun, UserAcc).
-
maybe_filter_indexes_by_ddoc(Indexes, Opts) ->
case lists:keyfind(use_index, 1, Opts) of
{use_index, []} ->
@@ -96,24 +91,22 @@ maybe_filter_indexes_by_ddoc(Indexes, Opts) ->
filter_indexes(Indexes, DesignId, ViewName)
end.
-
filter_indexes(Indexes, DesignId0) ->
- DesignId = case DesignId0 of
- <<"_design/", _/binary>> ->
- DesignId0;
- Else ->
- <<"_design/", Else/binary>>
- end,
+ DesignId =
+ case DesignId0 of
+ <<"_design/", _/binary>> ->
+ DesignId0;
+ Else ->
+ <<"_design/", Else/binary>>
+ end,
FiltFun = fun(I) -> mango_idx:ddoc(I) == DesignId end,
lists:filter(FiltFun, Indexes).
-
filter_indexes(Indexes0, DesignId, ViewName) ->
Indexes = filter_indexes(Indexes0, DesignId),
FiltFun = fun(I) -> mango_idx:name(I) == ViewName end,
lists:filter(FiltFun, Indexes).
-
remove_indexes_with_partial_filter_selector(Indexes) ->
FiltFun = fun(Idx) ->
case mango_idx:get_partial_filter_selector(Idx) of
@@ -123,7 +116,6 @@ remove_indexes_with_partial_filter_selector(Indexes) ->
end,
lists:filter(FiltFun, Indexes).
-
maybe_add_warning(UserFun, #cursor{index = Index, opts = Opts}, Stats, UserAcc) ->
W0 = invalid_index_warning(Index, Opts),
W1 = no_index_warning(Index),
@@ -139,39 +131,41 @@ maybe_add_warning(UserFun, #cursor{index = Index, opts = Opts}, Stats, UserAcc)
UserAcc1
end.
-
create_cursor(Db, Indexes, Selector, Opts) ->
[{CursorMod, CursorModIndexes} | _] = group_indexes_by_type(Indexes),
CursorMod:create(Db, CursorModIndexes, Selector, Opts).
-
group_indexes_by_type(Indexes) ->
- IdxDict = lists:foldl(fun(I, D) ->
- dict:append(mango_idx:cursor_mod(I), I, D)
- end, dict:new(), Indexes),
+ IdxDict = lists:foldl(
+ fun(I, D) ->
+ dict:append(mango_idx:cursor_mod(I), I, D)
+ end,
+ dict:new(),
+ Indexes
+ ),
% The first cursor module that has indexes will be
% used to service this query. This is so that we
% don't suddenly switch indexes for existing client
% queries.
- lists:flatmap(fun(CMod) ->
- case dict:find(CMod, IdxDict) of
- {ok, CModIndexes} ->
- [{CMod, CModIndexes}];
- error ->
- []
- end
- end, ?CURSOR_MODULES).
-
+ lists:flatmap(
+ fun(CMod) ->
+ case dict:find(CMod, IdxDict) of
+ {ok, CModIndexes} ->
+ [{CMod, CModIndexes}];
+ error ->
+ []
+ end
+ end,
+ ?CURSOR_MODULES
+ ).
% warn if the _all_docs index was used to fulfil a query
no_index_warning(#idx{type = Type}) when Type =:= <<"special">> ->
couch_stats:increment_counter([mango, unindexed_queries]),
[<<"No matching index found, create an index to optimize query time.">>];
-
no_index_warning(_) ->
[].
-
% warn if user specified an index which doesn't exist or isn't valid
% for the selector.
% In this scenario, Mango will ignore the index hint and auto-select an index.
@@ -179,36 +173,42 @@ invalid_index_warning(Index, Opts) ->
UseIndex = lists:keyfind(use_index, 1, Opts),
invalid_index_warning_int(Index, UseIndex).
-
invalid_index_warning_int(Index, {use_index, [DesignId]}) ->
Filtered = filter_indexes([Index], DesignId),
- if Filtered /= [] -> []; true ->
- couch_stats:increment_counter([mango, query_invalid_index]),
- Reason = fmt("_design/~s was not used because it does not contain a valid index for this query.",
- [ddoc_name(DesignId)]),
- [Reason]
+ if
+ Filtered /= [] ->
+ [];
+ true ->
+ couch_stats:increment_counter([mango, query_invalid_index]),
+ Reason = fmt(
+ "_design/~s was not used because it does not contain a valid index for this query.",
+ [ddoc_name(DesignId)]
+ ),
+ [Reason]
end;
-
invalid_index_warning_int(Index, {use_index, [DesignId, ViewName]}) ->
Filtered = filter_indexes([Index], DesignId, ViewName),
- if Filtered /= [] -> []; true ->
- couch_stats:increment_counter([mango, query_invalid_index]),
- Reason = fmt("_design/~s, ~s was not used because it is not a valid index for this query.",
- [ddoc_name(DesignId), ViewName]),
- [Reason]
+ if
+ Filtered /= [] ->
+ [];
+ true ->
+ couch_stats:increment_counter([mango, query_invalid_index]),
+ Reason = fmt(
+ "_design/~s, ~s was not used because it is not a valid index for this query.",
+ [ddoc_name(DesignId), ViewName]
+ ),
+ [Reason]
end;
-
invalid_index_warning_int(_, _) ->
[].
-
% warn if a large number of documents needed to be scanned per result
% returned, implying a lot of in-memory filtering
-index_scan_warning(#execution_stats {
- totalDocsExamined = Docs,
- totalQuorumDocsExamined = DocsQuorum,
- resultsReturned = ResultCount
- }) ->
+index_scan_warning(#execution_stats{
+ totalDocsExamined = Docs,
+ totalQuorumDocsExamined = DocsQuorum,
+ resultsReturned = ResultCount
+}) ->
% Docs and DocsQuorum are mutually exclusive so it's safe to sum them
DocsScanned = Docs + DocsQuorum,
Ratio = calculate_index_scan_ratio(DocsScanned, ResultCount),
@@ -216,9 +216,11 @@ index_scan_warning(#execution_stats {
case Threshold > 0 andalso Ratio > Threshold of
true ->
couch_stats:increment_counter([mango, too_many_docs_scanned]),
- Reason = <<"The number of documents examined is high in proportion to the number of results returned. Consider adding a more specific index to improve this.">>,
+ Reason =
+ <<"The number of documents examined is high in proportion to the number of results returned. Consider adding a more specific index to improve this.">>,
[Reason];
- false -> []
+ false ->
+ []
end.
% When there is an empty array for certain operators, we don't actually
@@ -237,20 +239,15 @@ maybe_noop_range({[{Op, []}]}, IndexRanges) ->
maybe_noop_range(_, IndexRanges) ->
IndexRanges.
-
calculate_index_scan_ratio(DocsScanned, 0) ->
DocsScanned;
-
calculate_index_scan_ratio(DocsScanned, ResultCount) ->
DocsScanned / ResultCount.
-
fmt(Format, Args) ->
iolist_to_binary(io_lib:format(Format, Args)).
-
ddoc_name(<<"_design/", Name/binary>>) ->
Name;
-
ddoc_name(Name) ->
Name.
diff --git a/src/mango/src/mango_cursor_special.erl b/src/mango/src/mango_cursor_special.erl
index df1f6d655..f20edebd1 100644
--- a/src/mango/src/mango_cursor_special.erl
+++ b/src/mango/src/mango_cursor_special.erl
@@ -22,12 +22,10 @@
handle_message/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-include("mango_cursor.hrl").
-
create(Db, Indexes, Selector, Opts) ->
InitialRange = mango_idx_view:field_ranges(Selector),
CatchAll = [{<<"_id">>, {'$gt', null, '$lt', mango_json_max}}],
@@ -37,7 +35,7 @@ create(Db, Indexes, Selector, Opts) ->
FieldRanges = InitialRange ++ CatchAll,
Composited = mango_cursor_view:composite_indexes(Indexes, FieldRanges),
{Index, IndexRanges} = mango_cursor_view:choose_best_index(Db, Composited),
-
+
Limit = couch_util:get_value(limit, Opts, mango_opts:default_limit()),
Skip = couch_util:get_value(skip, Opts, 0),
Fields = couch_util:get_value(fields, Opts, all_fields),
diff --git a/src/mango/src/mango_cursor_text.erl b/src/mango/src/mango_cursor_text.erl
index 43ef84e4c..53bf63edb 100644
--- a/src/mango/src/mango_cursor_text.erl
+++ b/src/mango/src/mango_cursor_text.erl
@@ -20,13 +20,11 @@
execute/3
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("dreyfus/include/dreyfus.hrl").
-include("mango_cursor.hrl").
-include("mango.hrl").
-
-record(cacc, {
selector,
dbname,
@@ -42,14 +40,14 @@
execution_stats
}).
-
create(Db, Indexes, Selector, Opts0) ->
- Index = case Indexes of
- [Index0] ->
- Index0;
- _ ->
- ?MANGO_ERROR(multiple_text_indexes)
- end,
+ Index =
+ case Indexes of
+ [Index0] ->
+ Index0;
+ _ ->
+ ?MANGO_ERROR(multiple_text_indexes)
+ end,
Opts = unpack_bookmark(couch_db:name(Db), Opts0),
@@ -69,7 +67,6 @@ create(Db, Indexes, Selector, Opts0) ->
fields = Fields
}}.
-
explain(Cursor) ->
#cursor{
selector = Selector,
@@ -81,7 +78,6 @@ explain(Cursor) ->
{sort, sort_query(Opts, Selector)}
].
-
execute(Cursor, UserFun, UserAcc) ->
#cursor{
db = Db,
@@ -131,12 +127,13 @@ execute(Cursor, UserFun, UserAcc) ->
JsonBM = dreyfus_bookmark:pack(FinalBM),
Arg = {add_key, bookmark, JsonBM},
{_Go, FinalUserAcc} = UserFun(Arg, LastUserAcc),
- FinalUserAcc0 = mango_execution_stats:maybe_add_stats(Opts, UserFun, Stats0, FinalUserAcc),
+ FinalUserAcc0 = mango_execution_stats:maybe_add_stats(
+ Opts, UserFun, Stats0, FinalUserAcc
+ ),
FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Cursor, Stats0, FinalUserAcc0),
{ok, FinalUserAcc1}
end.
-
execute(CAcc) ->
case search_docs(CAcc) of
{ok, Bookmark, []} ->
@@ -152,7 +149,6 @@ execute(CAcc) ->
execute(FinalCAcc)
end.
-
search_docs(CAcc) ->
#cacc{
dbname = DbName,
@@ -167,19 +163,15 @@ search_docs(CAcc) ->
?MANGO_ERROR({text_search_error, {error, Reason}})
end.
-
handle_hits(CAcc, []) ->
{ok, CAcc};
-
handle_hits(CAcc0, [{Sort, Doc} | Rest]) ->
CAcc1 = handle_hit(CAcc0, Sort, Doc),
handle_hits(CAcc1, Rest).
-
handle_hit(CAcc0, Sort, not_found) ->
CAcc1 = update_bookmark(CAcc0, Sort),
CAcc1;
-
handle_hit(CAcc0, Sort, Doc) ->
#cacc{
limit = Limit,
@@ -208,7 +200,6 @@ handle_hit(CAcc0, Sort, Doc) ->
CAcc2
end.
-
apply_user_fun(CAcc, Doc) ->
FinalDoc = mango_fields:extract(Doc, CAcc#cacc.fields),
#cacc{
@@ -224,39 +215,40 @@ apply_user_fun(CAcc, Doc) ->
throw({stop, CAcc#cacc{user_acc = NewUserAcc, execution_stats = Stats0}})
end.
-
%% Convert Query to Dreyfus sort specifications
%% Covert <<"Field">>, <<"desc">> to <<"-Field">>
%% and append to the dreyfus query
sort_query(Opts, Selector) ->
{sort, {Sort}} = lists:keyfind(sort, 1, Opts),
- SortList = lists:map(fun(SortField) ->
- {Dir, RawSortField} = case SortField of
- {Field, <<"asc">>} -> {asc, Field};
- {Field, <<"desc">>} -> {desc, Field};
- Field when is_binary(Field) -> {asc, Field}
+ SortList = lists:map(
+ fun(SortField) ->
+ {Dir, RawSortField} =
+ case SortField of
+ {Field, <<"asc">>} -> {asc, Field};
+ {Field, <<"desc">>} -> {desc, Field};
+ Field when is_binary(Field) -> {asc, Field}
+ end,
+ SField = mango_selector_text:append_sort_type(RawSortField, Selector),
+ case Dir of
+ asc ->
+ SField;
+ desc ->
+ <<"-", SField/binary>>
+ end
end,
- SField = mango_selector_text:append_sort_type(RawSortField, Selector),
- case Dir of
- asc ->
- SField;
- desc ->
- <<"-", SField/binary>>
- end
- end, Sort),
+ Sort
+ ),
case SortList of
[] -> relevance;
_ -> SortList
end.
-
get_partition(Opts, Default) ->
case couch_util:get_value(partition, Opts) of
<<>> -> Default;
Else -> Else
end.
-
get_bookmark(Opts) ->
case lists:keyfind(bookmark, 1, Opts) of
{_, BM} when is_list(BM), BM /= [] ->
@@ -265,7 +257,6 @@ get_bookmark(Opts) ->
nil
end.
-
update_bookmark(CAcc, Sortable) ->
BM = CAcc#cacc.bookmark,
QueryArgs = CAcc#cacc.query_args,
@@ -273,28 +264,27 @@ update_bookmark(CAcc, Sortable) ->
NewBM = dreyfus_bookmark:update(Sort, BM, [Sortable]),
CAcc#cacc{bookmark = NewBM}.
-
pack_bookmark(Bookmark) ->
case dreyfus_bookmark:pack(Bookmark) of
null -> nil;
Enc -> Enc
end.
-
unpack_bookmark(DbName, Opts) ->
- NewBM = case lists:keyfind(bookmark, 1, Opts) of
- {_, nil} ->
- [];
- {_, Bin} ->
- try
- dreyfus_bookmark:unpack(DbName, Bin)
- catch _:_ ->
- ?MANGO_ERROR({invalid_bookmark, Bin})
- end
- end,
+ NewBM =
+ case lists:keyfind(bookmark, 1, Opts) of
+ {_, nil} ->
+ [];
+ {_, Bin} ->
+ try
+ dreyfus_bookmark:unpack(DbName, Bin)
+ catch
+ _:_ ->
+ ?MANGO_ERROR({invalid_bookmark, Bin})
+ end
+ end,
lists:keystore(bookmark, 1, Opts, {bookmark, NewBM}).
-
ddocid(Idx) ->
case mango_idx:ddoc(Idx) of
<<"_design/", Rest/binary>> ->
@@ -303,7 +293,6 @@ ddocid(Idx) ->
Else
end.
-
update_query_args(CAcc) ->
#cacc{
bookmark = Bookmark,
@@ -314,29 +303,32 @@ update_query_args(CAcc) ->
limit = get_limit(CAcc)
}.
-
get_limit(CAcc) ->
erlang:min(get_dreyfus_limit(), CAcc#cacc.limit + CAcc#cacc.skip).
-
get_dreyfus_limit() ->
config:get_integer("dreyfus", "max_limit", 200).
-
get_json_docs(DbName, Hits) ->
- Ids = lists:map(fun(#sortable{item = Item}) ->
- couch_util:get_value(<<"_id">>, Item#hit.fields)
- end, Hits),
+ Ids = lists:map(
+ fun(#sortable{item = Item}) ->
+ couch_util:get_value(<<"_id">>, Item#hit.fields)
+ end,
+ Hits
+ ),
% TODO: respect R query parameter (same as json indexes)
{ok, IdDocs} = dreyfus_fabric:get_json_docs(DbName, Ids),
- lists:map(fun(#sortable{item = Item} = Sort) ->
- Id = couch_util:get_value(<<"_id">>, Item#hit.fields),
- case lists:keyfind(Id, 1, IdDocs) of
- {Id, {doc, Doc}} ->
- {Sort, Doc};
- false ->
- {Sort, not_found}
- end
- end, Hits).
+ lists:map(
+ fun(#sortable{item = Item} = Sort) ->
+ Id = couch_util:get_value(<<"_id">>, Item#hit.fields),
+ case lists:keyfind(Id, 1, IdDocs) of
+ {Id, {doc, Doc}} ->
+ {Sort, Doc};
+ false ->
+ {Sort, not_found}
+ end
+ end,
+ Hits
+ ).
-endif.
diff --git a/src/mango/src/mango_cursor_view.erl b/src/mango/src/mango_cursor_view.erl
index 68d7c3b62..5656ffc0b 100644
--- a/src/mango/src/mango_cursor_view.erl
+++ b/src/mango/src/mango_cursor_view.erl
@@ -26,7 +26,6 @@
choose_best_index/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-include_lib("fabric/include/fabric.hrl").
@@ -60,7 +59,6 @@ create(Db, Indexes, Selector, Opts) ->
bookmark = Bookmark
}}.
-
explain(Cursor) ->
#cursor{
opts = Opts
@@ -69,47 +67,50 @@ explain(Cursor) ->
BaseArgs = base_args(Cursor),
Args = apply_opts(Opts, BaseArgs),
- [{mrargs, {[
- {include_docs, Args#mrargs.include_docs},
- {view_type, Args#mrargs.view_type},
- {reduce, Args#mrargs.reduce},
- {partition, couch_mrview_util:get_extra(Args, partition, null)},
- {start_key, maybe_replace_max_json(Args#mrargs.start_key)},
- {end_key, maybe_replace_max_json(Args#mrargs.end_key)},
- {direction, Args#mrargs.direction},
- {stable, Args#mrargs.stable},
- {update, Args#mrargs.update},
- {conflicts, Args#mrargs.conflicts}
- ]}}].
-
+ [
+ {mrargs,
+ {[
+ {include_docs, Args#mrargs.include_docs},
+ {view_type, Args#mrargs.view_type},
+ {reduce, Args#mrargs.reduce},
+ {partition, couch_mrview_util:get_extra(Args, partition, null)},
+ {start_key, maybe_replace_max_json(Args#mrargs.start_key)},
+ {end_key, maybe_replace_max_json(Args#mrargs.end_key)},
+ {direction, Args#mrargs.direction},
+ {stable, Args#mrargs.stable},
+ {update, Args#mrargs.update},
+ {conflicts, Args#mrargs.conflicts}
+ ]}}
+ ].
% replace internal values that cannot
% be represented as a valid UTF-8 string
% with a token for JSON serialization
maybe_replace_max_json([]) ->
[];
-
maybe_replace_max_json(?MAX_STR) ->
<<"<MAX>">>;
-
maybe_replace_max_json([H | T] = EndKey) when is_list(EndKey) ->
- H1 = if H == ?MAX_JSON_OBJ -> <<"<MAX>">>;
+ H1 =
+ if
+ H == ?MAX_JSON_OBJ -> <<"<MAX>">>;
true -> H
- end,
+ end,
[H1 | maybe_replace_max_json(T)];
-
maybe_replace_max_json(EndKey) ->
EndKey.
-
base_args(#cursor{index = Idx, selector = Selector} = Cursor) ->
- {StartKey, EndKey} = case Cursor#cursor.ranges of
- [empty] ->
- {null, null};
- _ ->
- {mango_idx:start_key(Idx, Cursor#cursor.ranges),
- mango_idx:end_key(Idx, Cursor#cursor.ranges)}
- end,
+ {StartKey, EndKey} =
+ case Cursor#cursor.ranges of
+ [empty] ->
+ {null, null};
+ _ ->
+ {
+ mango_idx:start_key(Idx, Cursor#cursor.ranges),
+ mango_idx:end_key(Idx, Cursor#cursor.ranges)
+ }
+ end,
#mrargs{
view_type = map,
reduce = false,
@@ -123,7 +124,6 @@ base_args(#cursor{index = Idx, selector = Selector} = Cursor) ->
]
}.
-
execute(#cursor{db = Db, index = Idx, execution_stats = Stats} = Cursor0, UserFun, UserAcc) ->
Cursor = Cursor0#cursor{
user_fun = UserFun,
@@ -141,48 +141,55 @@ execute(#cursor{db = Db, index = Idx, execution_stats = Stats} = Cursor0, UserFu
Args = mango_json_bookmark:update_args(Bookmark, Args0),
UserCtx = couch_util:get_value(user_ctx, Opts, #user_ctx{}),
DbOpts = [{user_ctx, UserCtx}],
- Result = case mango_idx:def(Idx) of
- all_docs ->
- CB = fun ?MODULE:handle_all_docs_message/2,
- fabric:all_docs(Db, DbOpts, CB, Cursor, Args);
- _ ->
- CB = fun ?MODULE:handle_message/2,
- % Normal view
- DDoc = ddocid(Idx),
- Name = mango_idx:name(Idx),
- fabric:query_view(Db, DbOpts, DDoc, Name, CB, Cursor, Args)
- end,
+ Result =
+ case mango_idx:def(Idx) of
+ all_docs ->
+ CB = fun ?MODULE:handle_all_docs_message/2,
+ fabric:all_docs(Db, DbOpts, CB, Cursor, Args);
+ _ ->
+ CB = fun ?MODULE:handle_message/2,
+ % Normal view
+ DDoc = ddocid(Idx),
+ Name = mango_idx:name(Idx),
+ fabric:query_view(Db, DbOpts, DDoc, Name, CB, Cursor, Args)
+ end,
case Result of
{ok, LastCursor} ->
NewBookmark = mango_json_bookmark:create(LastCursor),
Arg = {add_key, bookmark, NewBookmark},
{_Go, FinalUserAcc} = UserFun(Arg, LastCursor#cursor.user_acc),
Stats0 = LastCursor#cursor.execution_stats,
- FinalUserAcc0 = mango_execution_stats:maybe_add_stats(Opts, UserFun, Stats0, FinalUserAcc),
- FinalUserAcc1 = mango_cursor:maybe_add_warning(UserFun, Cursor, Stats0, FinalUserAcc0),
+ FinalUserAcc0 = mango_execution_stats:maybe_add_stats(
+ Opts, UserFun, Stats0, FinalUserAcc
+ ),
+ FinalUserAcc1 = mango_cursor:maybe_add_warning(
+ UserFun, Cursor, Stats0, FinalUserAcc0
+ ),
{ok, FinalUserAcc1};
{error, Reason} ->
{error, Reason}
end
end.
-
% Any of these indexes may be a composite index. For each
% index find the most specific set of fields for each
% index. Ie, if an index has columns a, b, c, d, then
% check FieldRanges for a, b, c, and d and return
% the longest prefix of columns found.
composite_indexes(Indexes, FieldRanges) ->
- lists:foldl(fun(Idx, Acc) ->
- Cols = mango_idx:columns(Idx),
- Prefix = composite_prefix(Cols, FieldRanges),
- % Calcuate the difference between the FieldRanges/Selector
- % and the Prefix. We want to select the index with a prefix
- % that is as close to the FieldRanges as possible
- PrefixDifference = length(FieldRanges) - length(Prefix),
- [{Idx, Prefix, PrefixDifference} | Acc]
- end, [], Indexes).
-
+ lists:foldl(
+ fun(Idx, Acc) ->
+ Cols = mango_idx:columns(Idx),
+ Prefix = composite_prefix(Cols, FieldRanges),
+ % Calcuate the difference between the FieldRanges/Selector
+ % and the Prefix. We want to select the index with a prefix
+ % that is as close to the FieldRanges as possible
+ PrefixDifference = length(FieldRanges) - length(Prefix),
+ [{Idx, Prefix, PrefixDifference} | Acc]
+ end,
+ [],
+ Indexes
+ ).
composite_prefix([], _) ->
[];
@@ -194,7 +201,6 @@ composite_prefix([Col | Rest], Ranges) ->
[]
end.
-
% The query planner
% First choose the index with the lowest difference between its
% Prefix and the FieldRanges. If that is equal, then
@@ -230,7 +236,6 @@ choose_best_index(_DbName, IndexRanges) ->
{SelectedIndex, SelectedIndexRanges, _} = hd(lists:sort(Cmp, IndexRanges)),
{SelectedIndex, SelectedIndexRanges}.
-
view_cb({meta, Meta}, Acc) ->
% Map function starting
put(mango_docs_examined, 0),
@@ -238,7 +243,7 @@ view_cb({meta, Meta}, Acc) ->
ok = rexi:stream2({meta, Meta}),
{ok, Acc};
view_cb({row, Row}, #mrargs{extra = Options} = Acc) ->
- ViewRow = #view_row{
+ ViewRow = #view_row{
id = couch_util:get_value(id, Row),
key = couch_util:get_value(key, Row),
doc = couch_util:get_value(doc, Row)
@@ -261,7 +266,7 @@ view_cb({row, Row}, #mrargs{extra = Options} = Acc) ->
false ->
maybe_send_mango_ping()
end
- end,
+ end,
{ok, Acc};
view_cb(complete, Acc) ->
% Send shard-level execution stats
@@ -272,7 +277,6 @@ view_cb(complete, Acc) ->
view_cb(ok, ddoc_updated) ->
rexi:reply({ok, ddoc_updated}).
-
maybe_send_mango_ping() ->
Current = os:timestamp(),
LastPing = get(mango_last_msg_timestamp),
@@ -286,24 +290,22 @@ maybe_send_mango_ping() ->
set_mango_msg_timestamp()
end.
-
set_mango_msg_timestamp() ->
put(mango_last_msg_timestamp, os:timestamp()).
-
handle_message({meta, _}, Cursor) ->
{ok, Cursor};
handle_message({row, Props}, Cursor) ->
case doc_member(Cursor, Props) of
{ok, Doc, {execution_stats, Stats}} ->
- Cursor1 = Cursor#cursor {
+ Cursor1 = Cursor#cursor{
execution_stats = Stats
},
Cursor2 = update_bookmark_keys(Cursor1, Props),
FinalDoc = mango_fields:extract(Doc, Cursor2#cursor.fields),
handle_doc(Cursor2, FinalDoc);
{no_match, _, {execution_stats, Stats}} ->
- Cursor1 = Cursor#cursor {
+ Cursor1 = Cursor#cursor{
execution_stats = Stats
},
{ok, Cursor1};
@@ -322,7 +324,6 @@ handle_message(complete, Cursor) ->
handle_message({error, Reason}, _Cursor) ->
{error, Reason}.
-
handle_all_docs_message({row, Props}, Cursor) ->
case is_design_doc(Props) of
true -> {ok, Cursor};
@@ -331,7 +332,6 @@ handle_all_docs_message({row, Props}, Cursor) ->
handle_all_docs_message(Message, Cursor) ->
handle_message(Message, Cursor).
-
handle_doc(#cursor{skip = S} = C, _) when S > 0 ->
{ok, C#cursor{skip = S - 1}};
handle_doc(#cursor{limit = L, execution_stats = Stats} = C, Doc) when L > 0 ->
@@ -346,7 +346,6 @@ handle_doc(#cursor{limit = L, execution_stats = Stats} = C, Doc) when L > 0 ->
handle_doc(C, _Doc) ->
{stop, C}.
-
ddocid(Idx) ->
case mango_idx:ddoc(Idx) of
<<"_design/", Rest/binary>> ->
@@ -355,19 +354,19 @@ ddocid(Idx) ->
Else
end.
-
apply_opts([], Args) ->
Args;
apply_opts([{r, RStr} | Rest], Args) ->
- IncludeDocs = case list_to_integer(RStr) of
- 1 ->
- true;
- R when R > 1 ->
- % We don't load the doc in the view query because
- % we have to do a quorum read in the coordinator
- % so there's no point.
- false
- end,
+ IncludeDocs =
+ case list_to_integer(RStr) of
+ 1 ->
+ true;
+ R when R > 1 ->
+ % We don't load the doc in the view query because
+ % we have to do a quorum read in the coordinator
+ % so there's no point.
+ false
+ end,
NewArgs = Args#mrargs{include_docs = IncludeDocs},
apply_opts(Rest, NewArgs);
apply_opts([{conflicts, true} | Rest], Args) ->
@@ -423,7 +422,6 @@ apply_opts([{_, _} | Rest], Args) ->
% Ignore unknown options
apply_opts(Rest, Args).
-
doc_member(Cursor, RowProps) ->
Db = Cursor#cursor.db,
Opts = Cursor#cursor.opts,
@@ -441,7 +439,7 @@ doc_member(Cursor, RowProps) ->
couch_stats:increment_counter([mango, quorum_docs_examined]),
Id = couch_util:get_value(id, RowProps),
case mango_util:defer(fabric, open_doc, [Db, Id, Opts]) of
- {ok, #doc{}=DocProps} ->
+ {ok, #doc{} = DocProps} ->
Doc = couch_doc:to_json_obj(DocProps, []),
match_doc(Selector, Doc, ExecutionStats1);
Else ->
@@ -452,7 +450,6 @@ doc_member(Cursor, RowProps) ->
{no_match, null, {execution_stats, ExecutionStats}}
end.
-
match_doc(Selector, Doc, ExecutionStats) ->
case mango_selector:match(Selector, Doc) of
true ->
@@ -461,51 +458,47 @@ match_doc(Selector, Doc, ExecutionStats) ->
{no_match, Doc, {execution_stats, ExecutionStats}}
end.
-
is_design_doc(RowProps) ->
case couch_util:get_value(id, RowProps) of
<<"_design/", _/binary>> -> true;
_ -> false
end.
-
update_bookmark_keys(#cursor{limit = Limit} = Cursor, Props) when Limit > 0 ->
Id = couch_util:get_value(id, Props),
Key = couch_util:get_value(key, Props),
- Cursor#cursor {
+ Cursor#cursor{
bookmark_docid = Id,
bookmark_key = Key
};
update_bookmark_keys(Cursor, _Props) ->
Cursor.
-
%%%%%%%% module tests below %%%%%%%%
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
does_not_refetch_doc_with_value_test() ->
- Cursor = #cursor {
+ Cursor = #cursor{
db = <<"db">>,
opts = [],
execution_stats = #execution_stats{},
selector = mango_selector:normalize({[{<<"user_id">>, <<"1234">>}]})
},
RowProps = [
- {id,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>},
- {key,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>},
- {doc,{
- [
- {<<"_id">>,<<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>},
- {<<"_rev">>,<<"1-a954fe2308f14307756067b0e18c2968">>},
- {<<"user_id">>,11}
- ]
- }}
+ {id, <<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>},
+ {key, <<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>},
+ {doc,
+ {
+ [
+ {<<"_id">>, <<"b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4">>},
+ {<<"_rev">>, <<"1-a954fe2308f14307756067b0e18c2968">>},
+ {<<"user_id">>, 11}
+ ]
+ }}
],
{Match, _, _} = doc_member(Cursor, RowProps),
?assertEqual(Match, ok).
-
-endif.
diff --git a/src/mango/src/mango_doc.erl b/src/mango/src/mango_doc.erl
index c22b15544..f8cb4c63b 100644
--- a/src/mango/src/mango_doc.erl
+++ b/src/mango/src/mango_doc.erl
@@ -12,7 +12,6 @@
-module(mango_doc).
-
-export([
from_bson/1,
@@ -26,36 +25,35 @@
set_field/3
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-
from_bson({Props}) ->
- DocProps = case lists:keytake(<<"_id">>, 1, Props) of
- {value, {<<"_id">>, DocId0}, RestProps} ->
- DocId = case DocId0 of
- {[{<<"$id">>, Id}]} ->
- Id;
- Else ->
- Else
- end,
- [{<<"_id">>, DocId} | RestProps];
- false ->
- Props
- end,
+ DocProps =
+ case lists:keytake(<<"_id">>, 1, Props) of
+ {value, {<<"_id">>, DocId0}, RestProps} ->
+ DocId =
+ case DocId0 of
+ {[{<<"$id">>, Id}]} ->
+ Id;
+ Else ->
+ Else
+ end,
+ [{<<"_id">>, DocId} | RestProps];
+ false ->
+ Props
+ end,
Doc = couch_doc:from_json_obj({DocProps}),
case Doc#doc.id of
<<"">> ->
- Doc#doc{id=couch_uuids:new(), revs={0, []}};
+ Doc#doc{id = couch_uuids:new(), revs = {0, []}};
_ ->
Doc
end.
-
-apply_update(#doc{body={Props}}=Doc, Update) ->
+apply_update(#doc{body = {Props}} = Doc, Update) ->
NewProps = apply_update(Props, Update),
- Doc#doc{body={NewProps}};
+ Doc#doc{body = {NewProps}};
apply_update({Props}, {Update}) ->
Result = do_update({Props}, Update),
case has_operators(Result) of
@@ -66,13 +64,11 @@ apply_update({Props}, {Update}) ->
end,
Result.
-
update_as_insert({Update}) ->
NewProps = do_update_to_insert(Update, {[]}),
apply_update(NewProps, {Update}).
-
-has_operators(#doc{body=Body}) ->
+has_operators(#doc{body = Body}) ->
has_operators(Body);
has_operators({Props}) when is_list(Props) ->
has_operators_obj(Props);
@@ -85,7 +81,6 @@ has_operators(Val) when is_number(Val) ->
has_operators(Val) when is_binary(Val) ->
false.
-
has_operators_obj([]) ->
false;
has_operators_obj([{K, V} | Rest]) ->
@@ -101,7 +96,6 @@ has_operators_obj([{K, V} | Rest]) ->
end
end.
-
has_operators_arr([]) ->
false;
has_operators_arr([V | Rest]) ->
@@ -112,25 +106,24 @@ has_operators_arr([V | Rest]) ->
has_operators_arr(Rest)
end.
-
do_update(Props, []) ->
Props;
do_update(Props, [{Op, Value} | Rest]) ->
UpdateFun = update_operator_fun(Op),
- NewProps = case UpdateFun of
- undefined ->
- lists:keystore(Op, 1, Props, {Op, Value});
- Fun when is_function(Fun, 2) ->
- case Value of
- {ValueProps} ->
- Fun(Props, ValueProps);
- _ ->
- ?MANGO_ERROR({invalid_operand, Op, Value})
- end
- end,
+ NewProps =
+ case UpdateFun of
+ undefined ->
+ lists:keystore(Op, 1, Props, {Op, Value});
+ Fun when is_function(Fun, 2) ->
+ case Value of
+ {ValueProps} ->
+ Fun(Props, ValueProps);
+ _ ->
+ ?MANGO_ERROR({invalid_operand, Op, Value})
+ end
+ end,
do_update(NewProps, Rest).
-
update_operator_fun(<<"$", _/binary>> = Op) ->
OperatorFuns = [
% Object operators
@@ -160,217 +153,230 @@ update_operator_fun(<<"$", _/binary>> = Op) ->
update_operator_fun(_) ->
undefined.
-
do_update_inc(Props, []) ->
Props;
do_update_inc(Props, [{Field, Incr} | Rest]) ->
- if is_number(Incr) -> ok; true ->
- ?MANGO_ERROR({invalid_increment, Incr})
- end,
- NewProps = case get_field(Props, Field, fun is_number/1) of
- Value when is_number(Value) ->
- set_field(Props, Field, Value + Incr);
- not_found ->
- set_field(Props, Field, Incr);
- _ ->
- Props
+ if
+ is_number(Incr) -> ok;
+ true -> ?MANGO_ERROR({invalid_increment, Incr})
end,
+ NewProps =
+ case get_field(Props, Field, fun is_number/1) of
+ Value when is_number(Value) ->
+ set_field(Props, Field, Value + Incr);
+ not_found ->
+ set_field(Props, Field, Incr);
+ _ ->
+ Props
+ end,
do_update_inc(NewProps, Rest).
-
do_update_rename(Props, []) ->
Props;
do_update_rename(Props, [{OldField, NewField} | Rest]) ->
- NewProps = case rem_field(Props, OldField) of
- {RemProps, OldValue} ->
- set_field(RemProps, NewField, OldValue);
- _ ->
- Props
- end,
+ NewProps =
+ case rem_field(Props, OldField) of
+ {RemProps, OldValue} ->
+ set_field(RemProps, NewField, OldValue);
+ _ ->
+ Props
+ end,
do_update_rename(NewProps, Rest).
-
do_update_set_on_insert(Props, _) ->
% This is only called during calls to apply_update/2
% which means this isn't an insert, so drop it on
% the floor.
Props.
-
do_update_set(Props, []) ->
Props;
do_update_set(Props, [{Field, Value} | Rest]) ->
NewProps = set_field(Props, Field, Value),
do_update_set(NewProps, Rest).
-
do_update_unset(Props, []) ->
Props;
do_update_unset(Props, [{Field, _} | Rest]) ->
- NewProps = case rem_field(Props, Field) of
- {RemProps, _} ->
- RemProps;
- _ ->
- Props
- end,
+ NewProps =
+ case rem_field(Props, Field) of
+ {RemProps, _} ->
+ RemProps;
+ _ ->
+ Props
+ end,
do_update_unset(NewProps, Rest).
-
do_update_add_to_set(Props, []) ->
Props;
do_update_add_to_set(Props, [{Field, NewValue} | Rest]) ->
- ToAdd = case NewValue of
- {[{<<"$each">>, NewValues}]} when is_list(NewValues) ->
- NewValues;
- {[{<<"$each">>, NewValue}]} ->
- [NewValue];
- Else ->
- [Else]
- end,
- NewProps = case get_field(Props, Field) of
- OldValues when is_list(OldValues) ->
- FinalValues = lists:foldl(fun(V, Acc) ->
- lists:append(Acc, [V])
- end, OldValues, ToAdd),
- set_field(Props, Field, FinalValues);
- _ ->
- Props
- end,
+ ToAdd =
+ case NewValue of
+ {[{<<"$each">>, NewValues}]} when is_list(NewValues) ->
+ NewValues;
+ {[{<<"$each">>, NewValue}]} ->
+ [NewValue];
+ Else ->
+ [Else]
+ end,
+ NewProps =
+ case get_field(Props, Field) of
+ OldValues when is_list(OldValues) ->
+ FinalValues = lists:foldl(
+ fun(V, Acc) ->
+ lists:append(Acc, [V])
+ end,
+ OldValues,
+ ToAdd
+ ),
+ set_field(Props, Field, FinalValues);
+ _ ->
+ Props
+ end,
do_update_add_to_set(NewProps, Rest).
-
do_update_pop(Props, []) ->
Props;
do_update_pop(Props, [{Field, Pos} | Rest]) ->
- NewProps = case get_field(Props, Field) of
- OldValues when is_list(OldValues) ->
- NewValues = case Pos > 0 of
- true ->
- lists:sublist(OldValues, 1, length(OldValues) - 1);
- false ->
- lists:sublist(OldValues, 2, length(OldValues) - 1)
- end,
- set_field(Props, Field, NewValues);
- _ ->
- Props
- end,
+ NewProps =
+ case get_field(Props, Field) of
+ OldValues when is_list(OldValues) ->
+ NewValues =
+ case Pos > 0 of
+ true ->
+ lists:sublist(OldValues, 1, length(OldValues) - 1);
+ false ->
+ lists:sublist(OldValues, 2, length(OldValues) - 1)
+ end,
+ set_field(Props, Field, NewValues);
+ _ ->
+ Props
+ end,
do_update_pop(NewProps, Rest).
-
do_update_pull_all(Props, []) ->
Props;
do_update_pull_all(Props, [{Field, Values} | Rest]) ->
- ToRem = case is_list(Values) of
- true -> Values;
- false -> [Values]
- end,
- NewProps = case get_field(Props, Field) of
- OldValues when is_list(OldValues) ->
- NewValues = lists:foldl(fun(ValToRem, Acc) ->
- % The logic in these filter functions is a bit
- % subtle. The way to think of this is that we
- % return true for all elements we want to keep.
- FilterFun = case has_operators(ValToRem) of
- true ->
- fun(A) ->
- Sel = mango_selector:normalize(ValToRem),
- not mango_selector:match(A, Sel)
- end;
- false ->
- fun(A) -> A /= ValToRem end
- end,
- lists:filter(FilterFun, Acc)
- end, OldValues, ToRem),
- set_field(Props, Field, NewValues);
- _ ->
- Props
- end,
+ ToRem =
+ case is_list(Values) of
+ true -> Values;
+ false -> [Values]
+ end,
+ NewProps =
+ case get_field(Props, Field) of
+ OldValues when is_list(OldValues) ->
+ NewValues = lists:foldl(
+ fun(ValToRem, Acc) ->
+ % The logic in these filter functions is a bit
+ % subtle. The way to think of this is that we
+ % return true for all elements we want to keep.
+ FilterFun =
+ case has_operators(ValToRem) of
+ true ->
+ fun(A) ->
+ Sel = mango_selector:normalize(ValToRem),
+ not mango_selector:match(A, Sel)
+ end;
+ false ->
+ fun(A) -> A /= ValToRem end
+ end,
+ lists:filter(FilterFun, Acc)
+ end,
+ OldValues,
+ ToRem
+ ),
+ set_field(Props, Field, NewValues);
+ _ ->
+ Props
+ end,
do_update_add_to_set(NewProps, Rest).
-
do_update_pull(Props, []) ->
Props;
do_update_pull(Props, [{Field, Value} | Rest]) ->
- ToRem = case Value of
- {[{<<"$each">>, Values}]} when is_list(Values) ->
- Values;
- {[{<<"$each">>, Value}]} ->
- [Value];
- Else ->
- [Else]
- end,
+ ToRem =
+ case Value of
+ {[{<<"$each">>, Values}]} when is_list(Values) ->
+ Values;
+ {[{<<"$each">>, Value}]} ->
+ [Value];
+ Else ->
+ [Else]
+ end,
NewProps = do_update_pull_all(Props, [{Field, ToRem}]),
do_update_pull(NewProps, Rest).
-
do_update_push_all(_, []) ->
[];
do_update_push_all(Props, [{Field, Values} | Rest]) ->
- ToAdd = case is_list(Values) of
- true -> Values;
- false -> [Values]
- end,
- NewProps = case get_field(Props, Field) of
- OldValues when is_list(OldValues) ->
- NewValues = OldValues ++ ToAdd,
- set_field(Props, Field, NewValues);
- _ ->
- Props
- end,
+ ToAdd =
+ case is_list(Values) of
+ true -> Values;
+ false -> [Values]
+ end,
+ NewProps =
+ case get_field(Props, Field) of
+ OldValues when is_list(OldValues) ->
+ NewValues = OldValues ++ ToAdd,
+ set_field(Props, Field, NewValues);
+ _ ->
+ Props
+ end,
do_update_push_all(NewProps, Rest).
-
do_update_push(Props, []) ->
Props;
do_update_push(Props, [{Field, Value} | Rest]) ->
- ToAdd = case Value of
- {[{<<"$each">>, Values}]} when is_list(Values) ->
- Values;
- {[{<<"$each">>, Value}]} ->
- [Value];
- Else ->
- [Else]
- end,
+ ToAdd =
+ case Value of
+ {[{<<"$each">>, Values}]} when is_list(Values) ->
+ Values;
+ {[{<<"$each">>, Value}]} ->
+ [Value];
+ Else ->
+ [Else]
+ end,
NewProps = do_update_push_all(Props, [{Field, ToAdd}]),
do_update_push(NewProps, Rest).
-
-
do_update_bitwise(Props, []) ->
Props;
do_update_bitwise(Props, [{Field, Value} | Rest]) ->
- DoOp = case Value of
- {[{<<"and">>, Val}]} when is_integer(Val) ->
- fun(V) -> V band Val end;
- {[{<<"or">>, Val}]} when is_integer(Val) ->
- fun(V) -> V bor Val end;
- _ ->
- fun(V) -> V end
- end,
- NewProps = case get_field(Props, Field, fun is_number/1) of
- Value when is_number(Value) ->
- NewValue = DoOp(Value),
- set_field(Props, Field, NewValue);
- _ ->
- Props
- end,
+ DoOp =
+ case Value of
+ {[{<<"and">>, Val}]} when is_integer(Val) ->
+ fun(V) -> V band Val end;
+ {[{<<"or">>, Val}]} when is_integer(Val) ->
+ fun(V) -> V bor Val end;
+ _ ->
+ fun(V) -> V end
+ end,
+ NewProps =
+ case get_field(Props, Field, fun is_number/1) of
+ Value when is_number(Value) ->
+ NewValue = DoOp(Value),
+ set_field(Props, Field, NewValue);
+ _ ->
+ Props
+ end,
do_update_bitwise(NewProps, Rest).
-
do_update_to_insert([], Doc) ->
Doc;
do_update_to_insert([{<<"$setOnInsert">>, {FieldProps}}], Doc) ->
- lists:foldl(fun({Field, Value}, DocAcc) ->
- set_field(DocAcc, Field, Value)
- end, Doc, FieldProps);
+ lists:foldl(
+ fun({Field, Value}, DocAcc) ->
+ set_field(DocAcc, Field, Value)
+ end,
+ Doc,
+ FieldProps
+ );
do_update_to_insert([{_, _} | Rest], Doc) ->
do_update_to_insert(Rest, Doc).
-
get_field(Props, Field) ->
get_field(Props, Field, no_validation).
-
get_field(Props, Field, Validator) when is_binary(Field) ->
{ok, Path} = mango_util:parse_field(Field),
get_field(Props, Path, Validator);
@@ -402,13 +408,13 @@ get_field(Values, [Name | Rest], Validator) when is_list(Values) ->
false ->
bad_path
end
- catch error:badarg ->
- bad_path
+ catch
+ error:badarg ->
+ bad_path
end;
-get_field(_, [_|_], _) ->
+get_field(_, [_ | _], _) ->
bad_path.
-
rem_field(Props, Field) when is_binary(Field) ->
{ok, Path} = mango_util:parse_field(Field),
rem_field(Props, Path);
@@ -443,8 +449,9 @@ rem_field(Values, [Name]) when is_list(Values) ->
false ->
bad_path
end
- catch error:badarg ->
- bad_path
+ catch
+ error:badarg ->
+ bad_path
end;
rem_field(Values, [Name | Rest]) when is_list(Values) ->
% Name might be an integer index into an array
@@ -463,13 +470,13 @@ rem_field(Values, [Name | Rest]) when is_list(Values) ->
false ->
bad_path
end
- catch error:badarg ->
- bad_path
+ catch
+ error:badarg ->
+ bad_path
end;
-rem_field(_, [_|_]) ->
+rem_field(_, [_ | _]) ->
bad_path.
-
set_field(Props, Field, Value) when is_binary(Field) ->
{ok, Path} = mango_util:parse_field(Field),
set_field(Props, Path, Value);
@@ -495,8 +502,9 @@ set_field(Values, [Name], Value) when is_list(Values) ->
false ->
Values
end
- catch error:badarg ->
- Values
+ catch
+ error:badarg ->
+ Values
end;
set_field(Values, [Name | Rest], Value) when is_list(Values) ->
% Name might be an integer index into an array
@@ -511,27 +519,25 @@ set_field(Values, [Name | Rest], Value) when is_list(Values) ->
false ->
Values
end
- catch error:badarg ->
- Values
+ catch
+ error:badarg ->
+ Values
end;
-set_field(Value, [_|_], _) ->
+set_field(Value, [_ | _], _) ->
Value.
-
make_nested([], Value) ->
Value;
make_nested([Name | Rest], Value) ->
{[{Name, make_nested(Rest, Value)}]}.
-
rem_elem(1, [Value | Rest]) ->
{Rest, Value};
rem_elem(I, [Item | Rest]) when I > 1 ->
- {Tail, Value} = rem_elem(I+1, Rest),
+ {Tail, Value} = rem_elem(I + 1, Rest),
{[Item | Tail], Value}.
-
set_elem(1, [_ | Rest], Value) ->
[Value | Rest];
set_elem(I, [Item | Rest], Value) when I > 1 ->
- [Item | set_elem(I-1, Rest, Value)].
+ [Item | set_elem(I - 1, Rest, Value)].
diff --git a/src/mango/src/mango_epi.erl b/src/mango/src/mango_epi.erl
index 1fcd05b7f..b7ee68857 100644
--- a/src/mango/src/mango_epi.erl
+++ b/src/mango/src/mango_epi.erl
@@ -29,7 +29,7 @@ app() ->
providers() ->
[
- {chttpd_handlers, mango_httpd_handlers}
+ {chttpd_handlers, mango_httpd_handlers}
].
services() ->
diff --git a/src/mango/src/mango_error.erl b/src/mango/src/mango_error.erl
index bb545ad67..0301d079f 100644
--- a/src/mango/src/mango_error.erl
+++ b/src/mango/src/mango_error.erl
@@ -12,28 +12,29 @@
-module(mango_error).
-
-include_lib("couch/include/couch_db.hrl").
-
-export([
info/2
]).
-
info(mango_idx, {no_usable_index, missing_sort_index}) ->
{
400,
<<"no_usable_index">>,
- <<"No index exists for this sort, "
- "try indexing by the sort fields.">>
+ <<
+ "No index exists for this sort, "
+ "try indexing by the sort fields."
+ >>
};
info(mango_idx, {no_usable_index, missing_sort_index_partitioned}) ->
{
400,
<<"no_usable_index">>,
- <<"No partitioned index exists for this sort, "
- "try indexing by the sort fields.">>
+ <<
+ "No partitioned index exists for this sort, "
+ "try indexing by the sort fields."
+ >>
};
info(mango_idx, {no_usable_index, missing_sort_index_global}) ->
{
@@ -47,7 +48,6 @@ info(mango_json_bookmark, {invalid_bookmark, BadBookmark}) ->
<<"invalid_bookmark">>,
fmt("Invalid bookmark value: ~s", [?JSON_ENCODE(BadBookmark)])
};
-
info(mango_cursor_text, {invalid_bookmark, BadBookmark}) ->
{
400,
@@ -60,8 +60,9 @@ info(mango_cursor_text, multiple_text_indexes) ->
<<"multiple_text_indexes">>,
<<"You must specify an index with the `use_index` parameter.">>
};
-info(mango_cursor_text, {text_search_error, {error, {bad_request, Msg}}})
- when is_binary(Msg) ->
+info(mango_cursor_text, {text_search_error, {error, {bad_request, Msg}}}) when
+ is_binary(Msg)
+->
{
400,
<<"text_search_error">>,
@@ -73,7 +74,6 @@ info(mango_cursor_text, {text_search_error, {error, Error}}) ->
<<"text_search_error">>,
fmt("~p", [Error])
};
-
info(mango_fields, {invalid_fields_json, BadFields}) ->
{
400,
@@ -86,7 +86,6 @@ info(mango_fields, {invalid_field_json, BadField}) ->
<<"invalid_field">>,
fmt("Invalid JSON for field spec: ~w", [BadField])
};
-
info(mango_httpd, error_saving_ddoc) ->
{
500,
@@ -109,9 +108,8 @@ info(mango_httpd, invalid_list_index_params) ->
{
500,
<<"invalid_list_index_params">>,
- <<"Index parameter ranges: limit > 1, skip > 0" >>
+ <<"Index parameter ranges: limit > 1, skip > 0">>
};
-
info(mango_idx, {invalid_index_type, BadType}) ->
{
400,
@@ -122,8 +120,11 @@ info(mango_idx, {partitioned_option_mismatch, BadDDoc}) ->
{
400,
<<"invalid_partitioned_option">>,
- fmt("Requested partitioned option does not match existing value on"
- " design document ~s", [BadDDoc])
+ fmt(
+ "Requested partitioned option does not match existing value on"
+ " design document ~s",
+ [BadDDoc]
+ )
};
info(mango_idx, invalid_query_ddoc_language) ->
{
@@ -149,7 +150,6 @@ info(mango_idx, {index_service_unavailable, IndexName}) ->
<<"required index service unavailable">>,
fmt("~s", [IndexName])
};
-
info(mango_idx_view, {invalid_index_json, BadIdx}) ->
{
400,
@@ -160,9 +160,12 @@ info(mango_idx_text, {invalid_index_fields_definition, Def}) ->
{
400,
<<"invalid_index_fields_definition">>,
- fmt("Text Index field definitions must be of the form
- {\"name\": \"non-empty fieldname\", \"type\":
- \"boolean,number, or string\"}. Def: ~p", [Def])
+ fmt(
+ "Text Index field definitions must be of the form\n"
+ " {\"name\": \"non-empty fieldname\", \"type\":\n"
+ " \"boolean,number, or string\"}. Def: ~p",
+ [Def]
+ )
};
info(mango_idx_view, {index_not_found, BadIdx}) ->
{
@@ -170,7 +173,6 @@ info(mango_idx_view, {index_not_found, BadIdx}) ->
<<"invalid_index">>,
fmt("JSON index ~s not found in this design doc.", [BadIdx])
};
-
info(mango_idx_text, {invalid_index_text, BadIdx}) ->
{
400,
@@ -189,13 +191,14 @@ info(mango_idx_text, index_all_disabled) ->
<<"index_all_disabled">>,
<<"New text indexes are forbidden to index all fields.">>
};
-
info(mango_opts, {invalid_bulk_docs, Val}) ->
{
400,
<<"invalid_bulk_docs">>,
- fmt("Bulk Delete requires an array of non-null docids. Docids: ~w",
- [Val])
+ fmt(
+ "Bulk Delete requires an array of non-null docids. Docids: ~w",
+ [Val]
+ )
};
info(mango_opts, {invalid_ejson, Val}) ->
{
@@ -269,15 +272,15 @@ info(mango_opts, {invalid_index_name, BadName}) ->
<<"invalid_index_name">>,
fmt("Invalid index name: ~w", [BadName])
};
-
info(mango_opts, {multiple_text_operator, {invalid_selector, BadSel}}) ->
{
400,
<<"multiple_text_selector">>,
- fmt("Selector cannot contain more than one $text operator: ~w",
- [BadSel])
+ fmt(
+ "Selector cannot contain more than one $text operator: ~w",
+ [BadSel]
+ )
};
-
info(mango_selector, {invalid_selector, missing_field_name}) ->
{
400,
@@ -308,7 +311,6 @@ info(mango_selector, {bad_field, BadSel}) ->
<<"bad_field">>,
fmt("Invalid field normalization on selector: ~w", [BadSel])
};
-
info(mango_selector_text, {invalid_operator, Op}) ->
{
400,
@@ -317,14 +319,14 @@ info(mango_selector_text, {invalid_operator, Op}) ->
};
info(mango_selector_text, {text_sort_error, Field}) ->
S = binary_to_list(Field),
- Msg = "Unspecified or ambiguous sort type. Try appending :number or"
+ Msg =
+ "Unspecified or ambiguous sort type. Try appending :number or"
" :string to the sort field. ~s",
{
400,
<<"text_sort_error">>,
fmt(Msg, [S])
};
-
info(mango_sort, {invalid_sort_json, BadSort}) ->
{
400,
@@ -349,7 +351,6 @@ info(mango_sort, {unsupported, mixed_sort}) ->
<<"unsupported_mixed_sort">>,
<<"Sorts currently only support a single direction for all fields.">>
};
-
info(mango_util, {error_loading_doc, DocId}) ->
{
500,
@@ -368,7 +369,6 @@ info(mango_util, {invalid_ddoc_lang, Lang}) ->
<<"invalid_ddoc_lang">>,
fmt("Existing design doc has an invalid language: ~w", [Lang])
};
-
info(Module, Reason) ->
{
500,
@@ -376,6 +376,5 @@ info(Module, Reason) ->
fmt("Unknown Error: ~s :: ~w", [Module, Reason])
}.
-
fmt(Format, Args) ->
iolist_to_binary(io_lib:format(Format, Args)).
diff --git a/src/mango/src/mango_execution_stats.erl b/src/mango/src/mango_execution_stats.erl
index 5878a3190..0db3edf5f 100644
--- a/src/mango/src/mango_execution_stats.erl
+++ b/src/mango/src/mango_execution_stats.erl
@@ -12,7 +12,6 @@
-module(mango_execution_stats).
-
-export([
to_json/1,
incr_keys_examined/1,
@@ -25,10 +24,8 @@
maybe_add_stats/4
]).
-
-include("mango_cursor.hrl").
-
to_json(Stats) ->
{[
{total_keys_examined, Stats#execution_stats.totalKeysExamined},
@@ -38,50 +35,42 @@ to_json(Stats) ->
{execution_time_ms, Stats#execution_stats.executionTimeMs}
]}.
-
incr_keys_examined(Stats) ->
- Stats#execution_stats {
+ Stats#execution_stats{
totalKeysExamined = Stats#execution_stats.totalKeysExamined + 1
}.
-
incr_docs_examined(Stats) ->
incr_docs_examined(Stats, 1).
-
incr_docs_examined(Stats, N) ->
- Stats#execution_stats {
+ Stats#execution_stats{
totalDocsExamined = Stats#execution_stats.totalDocsExamined + N
}.
-
incr_quorum_docs_examined(Stats) ->
- Stats#execution_stats {
+ Stats#execution_stats{
totalQuorumDocsExamined = Stats#execution_stats.totalQuorumDocsExamined + 1
}.
-
incr_results_returned(Stats) ->
couch_stats:increment_counter([mango, results_returned]),
- Stats#execution_stats {
+ Stats#execution_stats{
resultsReturned = Stats#execution_stats.resultsReturned + 1
}.
-
log_start(Stats) ->
- Stats#execution_stats {
+ Stats#execution_stats{
executionStartTime = os:timestamp()
}.
-
log_end(Stats) ->
End = os:timestamp(),
Diff = timer:now_diff(End, Stats#execution_stats.executionStartTime) / 1000,
- Stats#execution_stats {
+ Stats#execution_stats{
executionTimeMs = Diff
}.
-
maybe_add_stats(Opts, UserFun, Stats0, UserAcc) ->
Stats1 = log_end(Stats0),
couch_stats:update_histogram([mango, query_time], Stats1#execution_stats.executionTimeMs),
diff --git a/src/mango/src/mango_fields.erl b/src/mango/src/mango_fields.erl
index 273256025..1745cf9dd 100644
--- a/src/mango/src/mango_fields.erl
+++ b/src/mango/src/mango_fields.erl
@@ -17,10 +17,8 @@
extract/2
]).
-
-include("mango.hrl").
-
new([]) ->
{ok, all_fields};
new(Fields) when is_list(Fields) ->
@@ -28,24 +26,26 @@ new(Fields) when is_list(Fields) ->
new(Else) ->
?MANGO_ERROR({invalid_fields_json, Else}).
-
extract(Doc, undefined) ->
Doc;
extract(Doc, all_fields) ->
Doc;
extract(Doc, Fields) ->
- lists:foldl(fun(F, NewDoc) ->
- {ok, Path} = mango_util:parse_field(F),
- case mango_doc:get_field(Doc, Path) of
- not_found ->
- NewDoc;
- bad_path ->
- NewDoc;
- Value ->
- mango_doc:set_field(NewDoc, Path, Value)
- end
- end, {[]}, Fields).
-
+ lists:foldl(
+ fun(F, NewDoc) ->
+ {ok, Path} = mango_util:parse_field(F),
+ case mango_doc:get_field(Doc, Path) of
+ not_found ->
+ NewDoc;
+ bad_path ->
+ NewDoc;
+ Value ->
+ mango_doc:set_field(NewDoc, Path, Value)
+ end
+ end,
+ {[]},
+ Fields
+ ).
field(Val) when is_binary(Val) ->
Val;
diff --git a/src/mango/src/mango_httpd.erl b/src/mango/src/mango_httpd.erl
index 624691bb9..002c45b2f 100644
--- a/src/mango/src/mango_httpd.erl
+++ b/src/mango/src/mango_httpd.erl
@@ -12,12 +12,10 @@
-module(mango_httpd).
-
-export([
handle_req/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-include("mango_idx.hrl").
@@ -46,41 +44,42 @@ handle_req(#httpd{} = Req, Db0) ->
end
end.
-
-handle_req_int(#httpd{path_parts=[_, <<"_index">> | _]} = Req, Db) ->
+handle_req_int(#httpd{path_parts = [_, <<"_index">> | _]} = Req, Db) ->
handle_index_req(Req, Db);
-handle_req_int(#httpd{path_parts=[_, <<"_explain">> | _]} = Req, Db) ->
+handle_req_int(#httpd{path_parts = [_, <<"_explain">> | _]} = Req, Db) ->
handle_explain_req(Req, Db);
-handle_req_int(#httpd{path_parts=[_, <<"_find">> | _]} = Req, Db) ->
+handle_req_int(#httpd{path_parts = [_, <<"_find">> | _]} = Req, Db) ->
handle_find_req(Req, Db);
handle_req_int(_, _) ->
throw({not_found, missing}).
-
-handle_index_req(#httpd{method='GET', path_parts=[_, _]}=Req, Db) ->
- Params = lists:flatmap(fun({K, V}) -> parse_index_param(K, V) end,
- chttpd:qs(Req)),
+handle_index_req(#httpd{method = 'GET', path_parts = [_, _]} = Req, Db) ->
+ Params = lists:flatmap(
+ fun({K, V}) -> parse_index_param(K, V) end,
+ chttpd:qs(Req)
+ ),
Idxs = lists:sort(mango_idx:list(Db)),
JsonIdxs0 = lists:map(fun mango_idx:to_json/1, Idxs),
TotalRows = length(JsonIdxs0),
- Limit = case couch_util:get_value(limit, Params, TotalRows) of
- Limit0 when Limit0 < 1 ->
- ?MANGO_ERROR(invalid_list_index_params);
- Limit0 ->
- Limit0
- end,
- Skip = case couch_util:get_value(skip, Params, 0) of
- Skip0 when Skip0 < 0 ->
- ?MANGO_ERROR(invalid_list_index_params);
- Skip0 when Skip0 > TotalRows ->
- TotalRows;
- Skip0 ->
- Skip0
- end,
- JsonIdxs = lists:sublist(JsonIdxs0, Skip+1, Limit),
- chttpd:send_json(Req, {[{total_rows, TotalRows}, {indexes, JsonIdxs}]});
-
-handle_index_req(#httpd{method='POST', path_parts=[_, _]}=Req, Db) ->
+ Limit =
+ case couch_util:get_value(limit, Params, TotalRows) of
+ Limit0 when Limit0 < 1 ->
+ ?MANGO_ERROR(invalid_list_index_params);
+ Limit0 ->
+ Limit0
+ end,
+ Skip =
+ case couch_util:get_value(skip, Params, 0) of
+ Skip0 when Skip0 < 0 ->
+ ?MANGO_ERROR(invalid_list_index_params);
+ Skip0 when Skip0 > TotalRows ->
+ TotalRows;
+ Skip0 ->
+ Skip0
+ end,
+ JsonIdxs = lists:sublist(JsonIdxs0, Skip + 1, Limit),
+ chttpd:send_json(Req, {[{total_rows, TotalRows}, {indexes, JsonIdxs}]});
+handle_index_req(#httpd{method = 'POST', path_parts = [_, _]} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
{ok, Opts} = mango_opts:validate_idx_create(chttpd:json_body_obj(Req)),
{ok, Idx0} = mango_idx:new(Db, Opts),
@@ -89,62 +88,89 @@ handle_index_req(#httpd{method='POST', path_parts=[_, _]}=Req, Db) ->
{ok, DDoc} = mango_util:load_ddoc(Db, mango_idx:ddoc(Idx), DbOpts),
Id = Idx#idx.ddoc,
Name = Idx#idx.name,
- Status = case mango_idx:add(DDoc, Idx) of
- {ok, DDoc} ->
- <<"exists">>;
- {ok, NewDDoc} ->
- CreateOpts = get_idx_w_opts(Opts),
- case mango_crud:insert(Db, NewDDoc, CreateOpts) of
- {ok, [{RespProps}]} ->
- case lists:keyfind(error, 1, RespProps) of
- {error, Reason} ->
- ?MANGO_ERROR({error_saving_ddoc, Reason});
- _ ->
- <<"created">>
- end;
- _ ->
- ?MANGO_ERROR(error_saving_ddoc)
- end
- end,
- chttpd:send_json(Req, {[{result, Status}, {id, Id}, {name, Name}]});
-
-handle_index_req(#httpd{path_parts=[_, _]}=Req, _Db) ->
+ Status =
+ case mango_idx:add(DDoc, Idx) of
+ {ok, DDoc} ->
+ <<"exists">>;
+ {ok, NewDDoc} ->
+ CreateOpts = get_idx_w_opts(Opts),
+ case mango_crud:insert(Db, NewDDoc, CreateOpts) of
+ {ok, [{RespProps}]} ->
+ case lists:keyfind(error, 1, RespProps) of
+ {error, Reason} ->
+ ?MANGO_ERROR({error_saving_ddoc, Reason});
+ _ ->
+ <<"created">>
+ end;
+ _ ->
+ ?MANGO_ERROR(error_saving_ddoc)
+ end
+ end,
+ chttpd:send_json(Req, {[{result, Status}, {id, Id}, {name, Name}]});
+handle_index_req(#httpd{path_parts = [_, _]} = Req, _Db) ->
chttpd:send_method_not_allowed(Req, "GET,POST");
-
%% Essentially we just iterate through the list of ddoc ids passed in and
%% delete one by one. If an error occurs, all previous documents will be
%% deleted, but an error will be thrown for the current ddoc id.
-handle_index_req(#httpd{method='POST', path_parts=[_, <<"_index">>,
- <<"_bulk_delete">>]}=Req, Db) ->
+handle_index_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [
+ _,
+ <<"_index">>,
+ <<"_bulk_delete">>
+ ]
+ } = Req,
+ Db
+) ->
chttpd:validate_ctype(Req, "application/json"),
{ok, Opts} = mango_opts:validate_bulk_delete(chttpd:json_body_obj(Req)),
Idxs = mango_idx:list(Db),
DDocs = get_bulk_delete_ddocs(Opts),
DelOpts = get_idx_w_opts(Opts),
- {Success, Fail} = lists:foldl(fun(DDocId0, {Success0, Fail0}) ->
- DDocId = convert_to_design_id(DDocId0),
- Filt = fun(Idx) -> mango_idx:ddoc(Idx) == DDocId end,
- Id = {<<"id">>, DDocId},
- case mango_idx:delete(Filt, Db, Idxs, DelOpts) of
- {ok, true} ->
- {[{[Id, {<<"ok">>, true}]} | Success0], Fail0};
- {error, Error} ->
- {Success0, [{[Id, {<<"error">>, Error}]} | Fail0]}
- end
- end, {[], []}, DDocs),
+ {Success, Fail} = lists:foldl(
+ fun(DDocId0, {Success0, Fail0}) ->
+ DDocId = convert_to_design_id(DDocId0),
+ Filt = fun(Idx) -> mango_idx:ddoc(Idx) == DDocId end,
+ Id = {<<"id">>, DDocId},
+ case mango_idx:delete(Filt, Db, Idxs, DelOpts) of
+ {ok, true} ->
+ {[{[Id, {<<"ok">>, true}]} | Success0], Fail0};
+ {error, Error} ->
+ {Success0, [{[Id, {<<"error">>, Error}]} | Fail0]}
+ end
+ end,
+ {[], []},
+ DDocs
+ ),
chttpd:send_json(Req, {[{<<"success">>, Success}, {<<"fail">>, Fail}]});
-
-handle_index_req(#httpd{path_parts=[_, <<"_index">>,
- <<"_bulk_delete">>]}=Req, _Db) ->
+handle_index_req(
+ #httpd{
+ path_parts = [
+ _,
+ <<"_index">>,
+ <<"_bulk_delete">>
+ ]
+ } = Req,
+ _Db
+) ->
chttpd:send_method_not_allowed(Req, "POST");
-
-handle_index_req(#httpd{method='DELETE',
- path_parts=[A, B, <<"_design">>, DDocId0, Type, Name]}=Req, Db) ->
+handle_index_req(
+ #httpd{
+ method = 'DELETE',
+ path_parts = [A, B, <<"_design">>, DDocId0, Type, Name]
+ } = Req,
+ Db
+) ->
PathParts = [A, B, <<"_design/", DDocId0/binary>>, Type, Name],
- handle_index_req(Req#httpd{path_parts=PathParts}, Db);
-
-handle_index_req(#httpd{method='DELETE',
- path_parts=[_, _, DDocId0, Type, Name]}=Req, Db) ->
+ handle_index_req(Req#httpd{path_parts = PathParts}, Db);
+handle_index_req(
+ #httpd{
+ method = 'DELETE',
+ path_parts = [_, _, DDocId0, Type, Name]
+ } = Req,
+ Db
+) ->
Idxs = mango_idx:list(Db),
DDocId = convert_to_design_id(DDocId0),
DelOpts = get_idx_del_opts(Req),
@@ -162,24 +188,20 @@ handle_index_req(#httpd{method='DELETE',
{error, Error} ->
?MANGO_ERROR({error_saving_ddoc, Error})
end;
-
-handle_index_req(#httpd{path_parts=[_, _, _DDocId0, _Type, _Name]}=Req, _Db) ->
+handle_index_req(#httpd{path_parts = [_, _, _DDocId0, _Type, _Name]} = Req, _Db) ->
chttpd:send_method_not_allowed(Req, "DELETE").
-
-handle_explain_req(#httpd{method='POST'}=Req, Db) ->
+handle_explain_req(#httpd{method = 'POST'} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
Body = maybe_set_partition(Req),
{ok, Opts0} = mango_opts:validate_find(Body),
{value, {selector, Sel}, Opts} = lists:keytake(selector, 1, Opts0),
Resp = mango_crud:explain(Db, Sel, Opts),
chttpd:send_json(Req, Resp);
-
handle_explain_req(Req, _Db) ->
chttpd:send_method_not_allowed(Req, "POST").
-
-handle_find_req(#httpd{method='POST'}=Req, Db) ->
+handle_find_req(#httpd{method = 'POST'} = Req, Db) ->
chttpd:validate_ctype(Req, "application/json"),
Body = maybe_set_partition(Req),
{ok, Opts0} = mango_opts:validate_find(Body),
@@ -191,17 +213,13 @@ handle_find_req(#httpd{method='POST'}=Req, Db) ->
{error, Error} ->
chttpd:send_error(Req, Error)
end;
-
-
handle_find_req(Req, _Db) ->
chttpd:send_method_not_allowed(Req, "POST").
-
-set_user_ctx(#httpd{user_ctx=Ctx}, Db) ->
+set_user_ctx(#httpd{user_ctx = Ctx}, Db) ->
{ok, NewDb} = couch_db:set_user_ctx(Db, Ctx),
NewDb.
-
get_idx_w_opts(Opts) ->
case lists:keyfind(w, 1, Opts) of
{w, N} when is_integer(N), N > 0 ->
@@ -210,7 +228,6 @@ get_idx_w_opts(Opts) ->
[{w, "2"}]
end.
-
get_bulk_delete_ddocs(Opts) ->
case lists:keyfind(docids, 1, Opts) of
{docids, DDocs} when is_list(DDocs) ->
@@ -219,17 +236,16 @@ get_bulk_delete_ddocs(Opts) ->
[]
end.
-
get_idx_del_opts(Req) ->
try
WStr = chttpd:qs_value(Req, "w", "2"),
_ = list_to_integer(WStr),
[{w, WStr}]
- catch _:_ ->
- [{w, "2"}]
+ catch
+ _:_ ->
+ [{w, "2"}]
end.
-
maybe_set_partition(Req) ->
{Props} = chttpd:json_body_obj(Req),
case chttpd:qs_value(Req, "partition", undefined) of
@@ -246,31 +262,31 @@ maybe_set_partition(Req) ->
end
end.
-
convert_to_design_id(DDocId) ->
case DDocId of
<<"_design/", _/binary>> -> DDocId;
_ -> <<"_design/", DDocId/binary>>
end.
-
start_find_resp(Req) ->
chttpd:start_delayed_json_response(Req, 200, [], "{\"docs\":[").
-
end_find_resp(Acc0) ->
- #vacc{resp=Resp00, buffer=Buf, kvs=KVs, threshold=Max} = Acc0,
+ #vacc{resp = Resp00, buffer = Buf, kvs = KVs, threshold = Max} = Acc0,
{ok, Resp0} = chttpd:close_delayed_json_object(Resp00, Buf, "\r\n]", Max),
- FinalAcc = lists:foldl(fun({K, V}, Acc) ->
- JK = ?JSON_ENCODE(K),
- JV = ?JSON_ENCODE(V),
- [JV, ": ", JK, ",\r\n" | Acc]
- end, [], KVs),
+ FinalAcc = lists:foldl(
+ fun({K, V}, Acc) ->
+ JK = ?JSON_ENCODE(K),
+ JV = ?JSON_ENCODE(V),
+ [JV, ": ", JK, ",\r\n" | Acc]
+ end,
+ [],
+ KVs
+ ),
Chunk = lists:reverse(FinalAcc, ["}\r\n"]),
{ok, Resp1} = chttpd:send_delayed_chunk(Resp0, Chunk),
chttpd:end_delayed_json_response(Resp1).
-
run_find(Resp, Db, Sel, Opts) ->
Acc0 = #vacc{
resp = Resp,
@@ -280,18 +296,18 @@ run_find(Resp, Db, Sel, Opts) ->
},
mango_crud:find(Db, Sel, fun handle_doc/2, Acc0, Opts).
-
handle_doc({add_key, Key, Value}, Acc0) ->
- #vacc{kvs=KVs} = Acc0,
+ #vacc{kvs = KVs} = Acc0,
NewKVs = lists:keystore(Key, 1, KVs, {Key, Value}),
{ok, Acc0#vacc{kvs = NewKVs}};
handle_doc({row, Doc}, Acc0) ->
- #vacc{prepend=Prepend} = Acc0,
+ #vacc{prepend = Prepend} = Acc0,
Chunk = [Prepend, ?JSON_ENCODE(Doc)],
maybe_flush_response(Acc0, Chunk, iolist_size(Chunk)).
-maybe_flush_response(#vacc{bufsize=Size, threshold=Max} = Acc, Data, Len)
- when Size > 0 andalso (Size + Len) > Max ->
+maybe_flush_response(#vacc{bufsize = Size, threshold = Max} = Acc, Data, Len) when
+ Size > 0 andalso (Size + Len) > Max
+->
#vacc{buffer = Buffer, resp = Resp} = Acc,
{ok, R1} = chttpd:send_delayed_chunk(Resp, Buffer),
{ok, Acc#vacc{prepend = ",\r\n", buffer = Data, bufsize = Len, resp = R1}};
@@ -304,18 +320,17 @@ maybe_flush_response(Acc0, Data, Len) ->
},
{ok, Acc}.
-
parse_index_param("limit", Value) ->
[{limit, parse_val(Value)}];
parse_index_param("skip", Value) ->
[{skip, parse_val(Value)}];
parse_index_param(_Key, _Value) ->
- [].
+ [].
parse_val(Value) ->
case (catch list_to_integer(Value)) of
- IntVal when is_integer(IntVal) ->
- IntVal;
- _ ->
- ?MANGO_ERROR(invalid_list_index_params)
+ IntVal when is_integer(IntVal) ->
+ IntVal;
+ _ ->
+ ?MANGO_ERROR(invalid_list_index_params)
end.
diff --git a/src/mango/src/mango_httpd_handlers.erl b/src/mango/src/mango_httpd_handlers.erl
index 80e5e277e..feb693e94 100644
--- a/src/mango/src/mango_httpd_handlers.erl
+++ b/src/mango/src/mango_httpd_handlers.erl
@@ -16,9 +16,9 @@
url_handler(_) -> no_match.
-db_handler(<<"_index">>) -> fun mango_httpd:handle_req/2;
-db_handler(<<"_explain">>) -> fun mango_httpd:handle_req/2;
-db_handler(<<"_find">>) -> fun mango_httpd:handle_req/2;
+db_handler(<<"_index">>) -> fun mango_httpd:handle_req/2;
+db_handler(<<"_explain">>) -> fun mango_httpd:handle_req/2;
+db_handler(<<"_find">>) -> fun mango_httpd:handle_req/2;
db_handler(_) -> no_match.
design_handler(_) -> no_match.
diff --git a/src/mango/src/mango_idx.erl b/src/mango/src/mango_idx.erl
index 5d06a8fe3..a2daa8b9e 100644
--- a/src/mango/src/mango_idx.erl
+++ b/src/mango/src/mango_idx.erl
@@ -16,7 +16,6 @@
-module(mango_idx).
-
-export([
list/1,
recover/1,
@@ -47,22 +46,19 @@
get_partial_filter_selector/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-include("mango_idx.hrl").
-
list(Db) ->
{ok, Indexes} = ddoc_cache:open(db_to_name(Db), ?MODULE),
Indexes.
-
get_usable_indexes(Db, Selector, Opts) ->
ExistingIndexes = mango_idx:list(Db),
GlobalIndexes = mango_cursor:remove_indexes_with_partial_filter_selector(
- ExistingIndexes
- ),
+ ExistingIndexes
+ ),
UserSpecifiedIndex = mango_cursor:maybe_filter_indexes_by_ddoc(ExistingIndexes, Opts),
UsableIndexes0 = lists:usort(GlobalIndexes ++ UserSpecifiedIndex),
UsableIndexes1 = filter_partition_indexes(UsableIndexes0, Opts),
@@ -77,7 +73,6 @@ get_usable_indexes(Db, Selector, Opts) ->
UsableIndexes
end.
-
mango_sort_error(Db, Opts) ->
case {fabric_util:is_partitioned(Db), is_opts_partitioned(Opts)} of
{false, _} ->
@@ -88,7 +83,6 @@ mango_sort_error(Db, Opts) ->
?MANGO_ERROR({no_usable_index, missing_sort_index_global})
end.
-
recover(Db) ->
{ok, DDocs0} = mango_util:open_ddocs(Db),
Pred = fun({Props}) ->
@@ -99,10 +93,14 @@ recover(Db) ->
end,
DDocs = lists:filter(Pred, DDocs0),
Special = special(Db),
- {ok, Special ++ lists:flatmap(fun(Doc) ->
- from_ddoc(Db, Doc)
- end, DDocs)}.
-
+ {ok,
+ Special ++
+ lists:flatmap(
+ fun(Doc) ->
+ from_ddoc(Db, Doc)
+ end,
+ DDocs
+ )}.
get_sort_fields(Opts) ->
case lists:keyfind(sort, 1, Opts) of
@@ -112,7 +110,6 @@ get_sort_fields(Opts) ->
[]
end.
-
new(Db, Opts) ->
Def = get_idx_def(Opts),
Type = get_idx_type(Opts),
@@ -128,12 +125,10 @@ new(Db, Opts) ->
opts = filter_opts(Opts)
}}.
-
validate_new(Idx, Db) ->
Mod = idx_mod(Idx),
Mod:validate_new(Idx, Db).
-
add(DDoc, Idx) ->
Mod = idx_mod(Idx),
{ok, NewDDoc1} = Mod:add(DDoc, Idx),
@@ -142,7 +137,6 @@ add(DDoc, Idx) ->
Body = ?JSON_DECODE(?JSON_ENCODE(NewDDoc2#doc.body)),
{ok, NewDDoc2#doc{body = Body}}.
-
remove(DDoc, Idx) ->
Mod = idx_mod(Idx),
{ok, NewDDoc} = Mod:remove(DDoc, Idx),
@@ -150,18 +144,18 @@ remove(DDoc, Idx) ->
Body = ?JSON_DECODE(?JSON_ENCODE(NewDDoc#doc.body)),
{ok, NewDDoc#doc{body = Body}}.
-
delete(Filt, Db, Indexes, DelOpts) ->
case lists:filter(Filt, Indexes) of
[Idx] ->
{ok, DDoc} = mango_util:load_ddoc(Db, mango_idx:ddoc(Idx)),
{ok, NewDDoc} = mango_idx:remove(DDoc, Idx),
- FinalDDoc = case NewDDoc#doc.body of
- {[{<<"language">>, <<"query">>}]} ->
- NewDDoc#doc{deleted = true, body = {[]}};
- _ ->
- NewDDoc
- end,
+ FinalDDoc =
+ case NewDDoc#doc.body of
+ {[{<<"language">>, <<"query">>}]} ->
+ NewDDoc#doc{deleted = true, body = {[]}};
+ _ ->
+ NewDDoc
+ end,
case mango_crud:insert(Db, FinalDDoc, DelOpts) of
{ok, _} ->
{ok, true};
@@ -172,31 +166,32 @@ delete(Filt, Db, Indexes, DelOpts) ->
{error, not_found}
end.
-
from_ddoc(Db, {Props}) ->
DbName = db_to_name(Db),
DDoc = proplists:get_value(<<"_id">>, Props),
case proplists:get_value(<<"language">>, Props) of
<<"query">> -> ok;
- _ ->
- ?MANGO_ERROR(invalid_query_ddoc_language)
- end,
- IdxMods = case clouseau_rpc:connected() of
- true ->
- [mango_idx_view, mango_idx_text];
- false ->
- [mango_idx_view]
+ _ -> ?MANGO_ERROR(invalid_query_ddoc_language)
end,
+ IdxMods =
+ case clouseau_rpc:connected() of
+ true ->
+ [mango_idx_view, mango_idx_text];
+ false ->
+ [mango_idx_view]
+ end,
Idxs = lists:flatmap(fun(Mod) -> Mod:from_ddoc({Props}) end, IdxMods),
- lists:map(fun(Idx) ->
- Idx#idx{
- dbname = DbName,
- ddoc = DDoc,
- partitioned = get_idx_partitioned(Db, Props)
- }
- end, Idxs).
-
+ lists:map(
+ fun(Idx) ->
+ Idx#idx{
+ dbname = DbName,
+ ddoc = DDoc,
+ partitioned = get_idx_partitioned(Db, Props)
+ }
+ end,
+ Idxs
+ ).
special(Db) ->
AllDocs = #idx{
@@ -209,63 +204,50 @@ special(Db) ->
% Add one for _update_seq
[AllDocs].
-
-dbname(#idx{dbname=DbName}) ->
+dbname(#idx{dbname = DbName}) ->
DbName.
-
-ddoc(#idx{ddoc=DDoc}) ->
+ddoc(#idx{ddoc = DDoc}) ->
DDoc.
-
-name(#idx{name=Name}) ->
+name(#idx{name = Name}) ->
Name.
-
-type(#idx{type=Type}) ->
+type(#idx{type = Type}) ->
Type.
-
-def(#idx{def=Def}) ->
+def(#idx{def = Def}) ->
Def.
-
-partitioned(#idx{partitioned=Partitioned}) ->
+partitioned(#idx{partitioned = Partitioned}) ->
Partitioned.
-
-opts(#idx{opts=Opts}) ->
+opts(#idx{opts = Opts}) ->
Opts.
-
-to_json(#idx{}=Idx) ->
+to_json(#idx{} = Idx) ->
Mod = idx_mod(Idx),
Mod:to_json(Idx).
-
-columns(#idx{}=Idx) ->
+columns(#idx{} = Idx) ->
Mod = idx_mod(Idx),
Mod:columns(Idx).
-
-is_usable(#idx{}=Idx, Selector, SortFields) ->
+is_usable(#idx{} = Idx, Selector, SortFields) ->
Mod = idx_mod(Idx),
Mod:is_usable(Idx, Selector, SortFields).
-
-start_key(#idx{}=Idx, Ranges) ->
+start_key(#idx{} = Idx, Ranges) ->
Mod = idx_mod(Idx),
Mod:start_key(Ranges).
-
-end_key(#idx{}=Idx, Ranges) ->
+end_key(#idx{} = Idx, Ranges) ->
Mod = idx_mod(Idx),
Mod:end_key(Ranges).
-
cursor_mod(#idx{type = <<"json">>}) ->
mango_cursor_view;
-cursor_mod(#idx{def = all_docs, type= <<"special">>}) ->
+cursor_mod(#idx{def = all_docs, type = <<"special">>}) ->
mango_cursor_special;
cursor_mod(#idx{type = <<"text">>}) ->
case clouseau_rpc:connected() of
@@ -275,7 +257,6 @@ cursor_mod(#idx{type = <<"text">>}) ->
?MANGO_ERROR({index_service_unavailable, <<"text">>})
end.
-
idx_mod(#idx{type = <<"json">>}) ->
mango_idx_view;
idx_mod(#idx{type = <<"special">>}) ->
@@ -288,7 +269,6 @@ idx_mod(#idx{type = <<"text">>}) ->
?MANGO_ERROR({index_service_unavailable, <<"text">>})
end.
-
db_to_name(Name) when is_binary(Name) ->
Name;
db_to_name(Name) when is_list(Name) ->
@@ -296,7 +276,6 @@ db_to_name(Name) when is_list(Name) ->
db_to_name(Db) ->
couch_db:name(Db).
-
get_idx_def(Opts) ->
case proplists:get_value(def, Opts) of
undefined ->
@@ -305,23 +284,24 @@ get_idx_def(Opts) ->
Def
end.
-
get_idx_type(Opts) ->
case proplists:get_value(type, Opts) of
- <<"json">> -> <<"json">>;
- <<"text">> -> case clouseau_rpc:connected() of
- true ->
- <<"text">>;
- false ->
- ?MANGO_ERROR({index_service_unavailable, <<"text">>})
+ <<"json">> ->
+ <<"json">>;
+ <<"text">> ->
+ case clouseau_rpc:connected() of
+ true ->
+ <<"text">>;
+ false ->
+ ?MANGO_ERROR({index_service_unavailable, <<"text">>})
end;
%<<"geo">> -> <<"geo">>;
- undefined -> <<"json">>;
+ undefined ->
+ <<"json">>;
BadType ->
?MANGO_ERROR({invalid_index_type, BadType})
end.
-
get_idx_ddoc(Idx, Opts) ->
case proplists:get_value(ddoc, Opts) of
<<"_design/", _Rest/binary>> = Name ->
@@ -333,7 +313,6 @@ get_idx_ddoc(Idx, Opts) ->
<<"_design/", Bin/binary>>
end.
-
get_idx_name(Idx, Opts) ->
case proplists:get_value(name, Opts) of
Name when is_binary(Name) ->
@@ -342,14 +321,12 @@ get_idx_name(Idx, Opts) ->
gen_name(Idx, Opts)
end.
-
gen_name(Idx, Opts0) ->
Opts = lists:usort(Opts0),
TermBin = term_to_binary({Idx, Opts}),
Sha = crypto:hash(sha, TermBin),
mango_util:enc_hex(Sha).
-
get_idx_partitioned(Opts) ->
case proplists:get_value(partitioned, Opts) of
B when is_boolean(B) ->
@@ -360,7 +337,6 @@ get_idx_partitioned(Opts) ->
undefined
end.
-
set_ddoc_partitioned(DDoc, Idx) ->
% We have to verify that the new index being added
% to this design document either matches the current
@@ -371,42 +347,45 @@ set_ddoc_partitioned(DDoc, Idx) ->
body = {BodyProps}
} = DDoc,
OldDOpts = couch_util:get_value(<<"options">>, BodyProps),
- OldOpt = case OldDOpts of
- {OldDOptProps} when is_list(OldDOptProps) ->
- couch_util:get_value(<<"partitioned">>, OldDOptProps);
- _ ->
- undefined
- end,
+ OldOpt =
+ case OldDOpts of
+ {OldDOptProps} when is_list(OldDOptProps) ->
+ couch_util:get_value(<<"partitioned">>, OldDOptProps);
+ _ ->
+ undefined
+ end,
% If new matches old we're done
- if Idx#idx.partitioned == OldOpt -> DDoc; true ->
- % If we're creating a ddoc then we can set the options
- case Revs == {0, []} of
- true when Idx#idx.partitioned /= undefined ->
- set_ddoc_partitioned_option(DDoc, Idx#idx.partitioned);
- true when Idx#idx.partitioned == undefined ->
- DDoc;
- false ->
- ?MANGO_ERROR({partitioned_option_mismatch, DDocId})
- end
+ if
+ Idx#idx.partitioned == OldOpt ->
+ DDoc;
+ true ->
+ % If we're creating a ddoc then we can set the options
+ case Revs == {0, []} of
+ true when Idx#idx.partitioned /= undefined ->
+ set_ddoc_partitioned_option(DDoc, Idx#idx.partitioned);
+ true when Idx#idx.partitioned == undefined ->
+ DDoc;
+ false ->
+ ?MANGO_ERROR({partitioned_option_mismatch, DDocId})
+ end
end.
-
set_ddoc_partitioned_option(DDoc, Partitioned) ->
#doc{
body = {BodyProps}
} = DDoc,
- NewProps = case couch_util:get_value(<<"options">>, BodyProps) of
- {Existing} when is_list(Existing) ->
- Opt = {<<"partitioned">>, Partitioned},
- New = lists:keystore(<<"partitioned">>, 1, Existing, Opt),
- lists:keystore(<<"options">>, 1, BodyProps, {<<"options">>, New});
- undefined ->
- New = {<<"options">>, {[{<<"partitioned">>, Partitioned}]}},
- lists:keystore(<<"options">>, 1, BodyProps, New)
- end,
+ NewProps =
+ case couch_util:get_value(<<"options">>, BodyProps) of
+ {Existing} when is_list(Existing) ->
+ Opt = {<<"partitioned">>, Partitioned},
+ New = lists:keystore(<<"partitioned">>, 1, Existing, Opt),
+ lists:keystore(<<"options">>, 1, BodyProps, {<<"options">>, New});
+ undefined ->
+ New = {<<"options">>, {[{<<"partitioned">>, Partitioned}]}},
+ lists:keystore(<<"options">>, 1, BodyProps, New)
+ end,
DDoc#doc{body = {NewProps}}.
-
get_idx_partitioned(Db, DDocProps) ->
Default = fabric_util:is_partitioned(Db),
case couch_util:get_value(<<"options">>, DDocProps) of
@@ -429,18 +408,17 @@ is_opts_partitioned(Opts) ->
true
end.
-
filter_partition_indexes(Indexes, Opts) ->
- PFilt = case is_opts_partitioned(Opts) of
- false ->
- fun(#idx{partitioned = P}) -> not P end;
- true ->
- fun(#idx{partitioned = P}) -> P end
- end,
+ PFilt =
+ case is_opts_partitioned(Opts) of
+ false ->
+ fun(#idx{partitioned = P}) -> not P end;
+ true ->
+ fun(#idx{partitioned = P}) -> P end
+ end,
Filt = fun(Idx) -> type(Idx) == <<"special">> orelse PFilt(Idx) end,
lists:filter(Filt, Indexes).
-
filter_opts([]) ->
[];
filter_opts([{user_ctx, _} | Rest]) ->
@@ -458,7 +436,6 @@ filter_opts([{partitioned, _} | Rest]) ->
filter_opts([Opt | Rest]) ->
[Opt | filter_opts(Rest)].
-
get_partial_filter_selector(#idx{def = Def}) when Def =:= all_docs; Def =:= undefined ->
undefined;
get_partial_filter_selector(#idx{def = {Def}}) ->
@@ -468,7 +445,6 @@ get_partial_filter_selector(#idx{def = {Def}}) ->
Selector -> Selector
end.
-
% Partial filter selectors is supported in text indexes via the selector field
% This adds backwards support for existing indexes that might have a selector in it
get_legacy_selector(Def) ->
@@ -483,13 +459,17 @@ get_legacy_selector(Def) ->
index(SelectorName, Selector) ->
{
- idx,<<"mango_test_46418cd02081470d93290dc12306ebcb">>,
- <<"_design/57e860dee471f40a2c74ea5b72997b81dda36a24">>,
- <<"Selected">>,<<"json">>,
- {[{<<"fields">>,{[{<<"location">>,<<"asc">>}]}},
- {SelectorName,{Selector}}]},
- false,
- [{<<"def">>,{[{<<"fields">>,[<<"location">>]}]}}]
+ idx,
+ <<"mango_test_46418cd02081470d93290dc12306ebcb">>,
+ <<"_design/57e860dee471f40a2c74ea5b72997b81dda36a24">>,
+ <<"Selected">>,
+ <<"json">>,
+ {[
+ {<<"fields">>, {[{<<"location">>, <<"asc">>}]}},
+ {SelectorName, {Selector}}
+ ]},
+ false,
+ [{<<"def">>, {[{<<"fields">>, [<<"location">>]}]}}]
}.
get_partial_filter_all_docs_test() ->
@@ -509,12 +489,12 @@ get_partial_filter_selector_missing_test() ->
?assertEqual(undefined, get_partial_filter_selector(Idx)).
get_partial_filter_selector_with_selector_test() ->
- Selector = [{<<"location">>,{[{<<"$gt">>,<<"FRA">>}]}}],
+ Selector = [{<<"location">>, {[{<<"$gt">>, <<"FRA">>}]}}],
Idx = index(<<"partial_filter_selector">>, Selector),
?assertEqual({Selector}, get_partial_filter_selector(Idx)).
get_partial_filter_selector_with_legacy_selector_test() ->
- Selector = [{<<"location">>,{[{<<"$gt">>,<<"FRA">>}]}}],
+ Selector = [{<<"location">>, {[{<<"$gt">>, <<"FRA">>}]}}],
Idx = index(<<"selector">>, Selector),
?assertEqual({Selector}, get_partial_filter_selector(Idx)).
@@ -522,7 +502,6 @@ get_partial_filter_selector_with_legacy_default_selector_test() ->
Idx = index(<<"selector">>, []),
?assertEqual(undefined, get_partial_filter_selector(Idx)).
-
get_idx_ddoc_name_only_test() ->
Opts = [{ddoc, <<"foo">>}],
?assertEqual(<<"_design/foo">>, get_idx_ddoc({}, Opts)).
diff --git a/src/mango/src/mango_idx_special.erl b/src/mango/src/mango_idx_special.erl
index ac6efc707..4c4001c80 100644
--- a/src/mango/src/mango_idx_special.erl
+++ b/src/mango/src/mango_idx_special.erl
@@ -12,7 +12,6 @@
-module(mango_idx_special).
-
-export([
validate/1,
add/2,
@@ -25,51 +24,45 @@
end_key/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango_idx.hrl").
-
validate(_) ->
erlang:exit(invalid_call).
-
add(_, _) ->
erlang:exit(invalid_call).
-
remove(_, _) ->
erlang:exit(invalid_call).
-
from_ddoc(_) ->
erlang:exit(invalid_call).
-
-to_json(#idx{def=all_docs}) ->
+to_json(#idx{def = all_docs}) ->
{[
{ddoc, null},
{name, <<"_all_docs">>},
{type, <<"special">>},
- {def, {[
- {<<"fields">>, [{[
- {<<"_id">>, <<"asc">>}
- ]}]}
- ]}}
+ {def,
+ {[
+ {<<"fields">>, [
+ {[
+ {<<"_id">>, <<"asc">>}
+ ]}
+ ]}
+ ]}}
]}.
-
-columns(#idx{def=all_docs}) ->
+columns(#idx{def = all_docs}) ->
[<<"_id">>].
-
-is_usable(#idx{def=all_docs}, _Selector, []) ->
+is_usable(#idx{def = all_docs}, _Selector, []) ->
true;
-is_usable(#idx{def=all_docs} = Idx, Selector, SortFields) ->
+is_usable(#idx{def = all_docs} = Idx, Selector, SortFields) ->
Fields = mango_idx_view:indexable_fields(Selector),
lists:member(<<"_id">>, Fields) and can_use_sort(Idx, SortFields, Selector).
-
start_key([{'$gt', Key, _, _}]) ->
case mango_json:special(Key) of
true ->
@@ -84,7 +77,6 @@ start_key([{'$eq', Key, '$eq', Key}]) ->
false = mango_json:special(Key),
Key.
-
end_key([{_, _, '$lt', Key}]) ->
case mango_json:special(Key) of
true ->
@@ -99,7 +91,6 @@ end_key([{'$eq', Key, '$eq', Key}]) ->
false = mango_json:special(Key),
Key.
-
can_use_sort(_Idx, [], _Selector) ->
true;
can_use_sort(Idx, SortFields, _Selector) ->
diff --git a/src/mango/src/mango_idx_text.erl b/src/mango/src/mango_idx_text.erl
index 1d4becfb3..b4a46d688 100644
--- a/src/mango/src/mango_idx_text.erl
+++ b/src/mango/src/mango_idx_text.erl
@@ -12,7 +12,6 @@
-module(mango_idx_text).
-
-export([
validate_new/2,
validate_fields/1,
@@ -26,75 +25,74 @@
get_default_field_options/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-include("mango_idx.hrl").
-
-validate_new(#idx{}=Idx, Db) ->
+validate_new(#idx{} = Idx, Db) ->
{ok, Def} = do_validate(Idx#idx.def),
maybe_reject_index_all_req(Def, Db),
- {ok, Idx#idx{def=Def}}.
-
+ {ok, Idx#idx{def = Def}}.
validate_index_def(IndexInfo) ->
do_validate(IndexInfo).
-
-add(#doc{body={Props0}}=DDoc, Idx) ->
- Texts1 = case proplists:get_value(<<"indexes">>, Props0) of
- {Texts0} -> Texts0;
- _ -> []
- end,
+add(#doc{body = {Props0}} = DDoc, Idx) ->
+ Texts1 =
+ case proplists:get_value(<<"indexes">>, Props0) of
+ {Texts0} -> Texts0;
+ _ -> []
+ end,
NewText = make_text(Idx),
Texts2 = lists:keystore(element(1, NewText), 1, Texts1, NewText),
- Props1 = lists:keystore(<<"indexes">>, 1, Props0, {<<"indexes">>,
- {Texts2}}),
- {ok, DDoc#doc{body={Props1}}}.
-
-
-remove(#doc{body={Props0}}=DDoc, Idx) ->
- Texts1 = case proplists:get_value(<<"indexes">>, Props0) of
- {Texts0} ->
- Texts0;
- _ ->
- ?MANGO_ERROR({index_not_found, Idx#idx.name})
- end,
+ Props1 = lists:keystore(<<"indexes">>, 1, Props0, {<<"indexes">>, {Texts2}}),
+ {ok, DDoc#doc{body = {Props1}}}.
+
+remove(#doc{body = {Props0}} = DDoc, Idx) ->
+ Texts1 =
+ case proplists:get_value(<<"indexes">>, Props0) of
+ {Texts0} ->
+ Texts0;
+ _ ->
+ ?MANGO_ERROR({index_not_found, Idx#idx.name})
+ end,
Texts2 = lists:keydelete(Idx#idx.name, 1, Texts1),
- if Texts2 /= Texts1 -> ok; true ->
- ?MANGO_ERROR({index_not_found, Idx#idx.name})
+ if
+ Texts2 /= Texts1 -> ok;
+ true -> ?MANGO_ERROR({index_not_found, Idx#idx.name})
end,
- Props1 = case Texts2 of
- [] ->
- lists:keydelete(<<"indexes">>, 1, Props0);
- _ ->
- lists:keystore(<<"indexes">>, 1, Props0, {<<"indexes">>, {Texts2}})
- end,
- {ok, DDoc#doc{body={Props1}}}.
-
+ Props1 =
+ case Texts2 of
+ [] ->
+ lists:keydelete(<<"indexes">>, 1, Props0);
+ _ ->
+ lists:keystore(<<"indexes">>, 1, Props0, {<<"indexes">>, {Texts2}})
+ end,
+ {ok, DDoc#doc{body = {Props1}}}.
from_ddoc({Props}) ->
case lists:keyfind(<<"indexes">>, 1, Props) of
{<<"indexes">>, {Texts}} when is_list(Texts) ->
- lists:flatmap(fun({Name, {VProps}}) ->
- case validate_ddoc(VProps) of
- invalid_ddoc ->
- [];
- Def ->
- I = #idx{
- type = <<"text">>,
- name = Name,
- def = Def
- },
- [I]
- end
- end, Texts);
+ lists:flatmap(
+ fun({Name, {VProps}}) ->
+ case validate_ddoc(VProps) of
+ invalid_ddoc ->
+ [];
+ Def ->
+ I = #idx{
+ type = <<"text">>,
+ name = Name,
+ def = Def
+ },
+ [I]
+ end
+ end,
+ Texts
+ );
_ ->
[]
end.
-
to_json(Idx) ->
{[
{ddoc, Idx#idx.ddoc},
@@ -104,7 +102,6 @@ to_json(Idx) ->
{def, {def_to_json(Idx#idx.def)}}
]}.
-
columns(Idx) ->
{Props} = Idx#idx.def,
{<<"fields">>, Fields} = lists:keyfind(<<"fields">>, 1, Props),
@@ -114,18 +111,22 @@ columns(Idx) ->
_ ->
{DFProps} = couch_util:get_value(<<"default_field">>, Props, {[]}),
Enabled = couch_util:get_value(<<"enabled">>, DFProps, true),
- Default = case Enabled of
- true -> [<<"$default">>];
- false -> []
- end,
- Default ++ lists:map(fun({FProps}) ->
- {_, Name} = lists:keyfind(<<"name">>, 1, FProps),
- {_, Type} = lists:keyfind(<<"type">>, 1, FProps),
- iolist_to_binary([Name, ":", Type])
- end, Fields)
+ Default =
+ case Enabled of
+ true -> [<<"$default">>];
+ false -> []
+ end,
+ Default ++
+ lists:map(
+ fun({FProps}) ->
+ {_, Name} = lists:keyfind(<<"name">>, 1, FProps),
+ {_, Type} = lists:keyfind(<<"type">>, 1, FProps),
+ iolist_to_binary([Name, ":", Type])
+ end,
+ Fields
+ )
end.
-
is_usable(_, Selector, _) when Selector =:= {[]} ->
false;
is_usable(Idx, Selector, _) ->
@@ -137,14 +138,12 @@ is_usable(Idx, Selector, _) ->
sets:is_subset(sets:from_list(Fields), sets:from_list(Cols))
end.
-
do_validate({Props}) ->
{ok, Opts} = mango_opts:validate(Props, opts()),
{ok, {Opts}};
do_validate(Else) ->
?MANGO_ERROR({invalid_index_text, Else}).
-
def_to_json({Props}) ->
def_to_json(Props);
def_to_json([]) ->
@@ -162,7 +161,6 @@ def_to_json([{<<"partial_filter_selector">>, {[]}} | Rest]) ->
def_to_json([{Key, Value} | Rest]) ->
[{Key, Value} | def_to_json(Rest)].
-
fields_to_json([]) ->
[];
fields_to_json([{[{<<"name">>, Name}, {<<"type">>, Type0}]} | Rest]) ->
@@ -174,17 +172,15 @@ fields_to_json([{[{<<"type">>, Type0}, {<<"name">>, Name}]} | Rest]) ->
Type = validate_field_type(Type0),
[{[{Name, Type}]} | fields_to_json(Rest)].
-
%% In the future, we can possibly add more restrictive validation.
%% For now, let's make sure the field name is not blank.
validate_field_name(<<"">>) ->
throw(invalid_field_name);
-validate_field_name(Else) when is_binary(Else)->
+validate_field_name(Else) when is_binary(Else) ->
ok;
validate_field_name(_) ->
throw(invalid_field_name).
-
validate_field_type(<<"string">>) ->
<<"string">>;
validate_field_type(<<"number">>) ->
@@ -192,32 +188,33 @@ validate_field_type(<<"number">>) ->
validate_field_type(<<"boolean">>) ->
<<"boolean">>.
-
validate_fields(<<"all_fields">>) ->
{ok, all_fields};
validate_fields(Fields) ->
try fields_to_json(Fields) of
_ ->
mango_fields:new(Fields)
- catch error:function_clause ->
- ?MANGO_ERROR({invalid_index_fields_definition, Fields});
- throw:invalid_field_name ->
- ?MANGO_ERROR({invalid_index_fields_definition, Fields})
+ catch
+ error:function_clause ->
+ ?MANGO_ERROR({invalid_index_fields_definition, Fields});
+ throw:invalid_field_name ->
+ ?MANGO_ERROR({invalid_index_fields_definition, Fields})
end.
-
validate_ddoc(VProps) ->
try
Def = proplists:get_value(<<"index">>, VProps),
validate_index_def(Def),
Def
- catch Error:Reason ->
- couch_log:error("Invalid Index Def ~p: Error. ~p, Reason: ~p",
- [VProps, Error, Reason]),
- invalid_ddoc
+ catch
+ Error:Reason ->
+ couch_log:error(
+ "Invalid Index Def ~p: Error. ~p, Reason: ~p",
+ [VProps, Error, Reason]
+ ),
+ invalid_ddoc
end.
-
opts() ->
[
{<<"default_analyzer">>, [
@@ -256,15 +253,14 @@ opts() ->
]}
].
-
make_text(Idx) ->
- Text= {[
- {<<"index">>, Idx#idx.def},
- {<<"analyzer">>, construct_analyzer(Idx#idx.def)}
- ]},
+ Text =
+ {[
+ {<<"index">>, Idx#idx.def},
+ {<<"analyzer">>, construct_analyzer(Idx#idx.def)}
+ ]},
{Idx#idx.name, Text}.
-
get_default_field_options(Props) ->
Default = couch_util:get_value(default_field, Props, {[]}),
case Default of
@@ -272,24 +268,30 @@ get_default_field_options(Props) ->
{Bool, <<"standard">>};
{[]} ->
{true, <<"standard">>};
- {Opts}->
+ {Opts} ->
Enabled = couch_util:get_value(<<"enabled">>, Opts, true),
- Analyzer = couch_util:get_value(<<"analyzer">>, Opts,
- <<"standard">>),
+ Analyzer = couch_util:get_value(
+ <<"analyzer">>,
+ Opts,
+ <<"standard">>
+ ),
{Enabled, Analyzer}
end.
-
construct_analyzer({Props}) ->
- DefaultAnalyzer = couch_util:get_value(default_analyzer, Props,
- <<"keyword">>),
+ DefaultAnalyzer = couch_util:get_value(
+ default_analyzer,
+ Props,
+ <<"keyword">>
+ ),
{DefaultField, DefaultFieldAnalyzer} = get_default_field_options(Props),
- DefaultAnalyzerDef = case DefaultField of
- true ->
- [{<<"$default">>, DefaultFieldAnalyzer}];
- _ ->
- []
- end,
+ DefaultAnalyzerDef =
+ case DefaultField of
+ true ->
+ [{<<"$default">>, DefaultFieldAnalyzer}];
+ _ ->
+ []
+ end,
case DefaultAnalyzerDef of
[] ->
<<"keyword">>;
@@ -301,23 +303,28 @@ construct_analyzer({Props}) ->
]}
end.
-
indexable_fields(Selector) ->
TupleTree = mango_selector_text:convert([], Selector),
indexable_fields([], TupleTree).
-
indexable_fields(Fields, {op_and, Args}) when is_list(Args) ->
- lists:foldl(fun(Arg, Fields0) -> indexable_fields(Fields0, Arg) end,
- Fields, Args);
-
+ lists:foldl(
+ fun(Arg, Fields0) -> indexable_fields(Fields0, Arg) end,
+ Fields,
+ Args
+ );
%% For queries that use array element access or $in operations, two
%% fields get generated by mango_selector_text:convert. At index
%% definition time, only one field gets defined. In this situation, we
%% remove the extra generated field so that the index can be used. For
%% all other situations, we include the fields as normal.
-indexable_fields(Fields, {op_or, [{op_field, Field0},
- {op_field, {[Name | _], _}} = Field1]}) ->
+indexable_fields(
+ Fields,
+ {op_or, [
+ {op_field, Field0},
+ {op_field, {[Name | _], _}} = Field1
+ ]}
+) ->
case lists:member(<<"[]">>, Name) of
true ->
indexable_fields(Fields, {op_field, Field0});
@@ -326,40 +333,36 @@ indexable_fields(Fields, {op_or, [{op_field, Field0},
indexable_fields(Fields1, Field1)
end;
indexable_fields(Fields, {op_or, Args}) when is_list(Args) ->
- lists:foldl(fun(Arg, Fields0) -> indexable_fields(Fields0, Arg) end,
- Fields, Args);
-
+ lists:foldl(
+ fun(Arg, Fields0) -> indexable_fields(Fields0, Arg) end,
+ Fields,
+ Args
+ );
indexable_fields(Fields, {op_not, {ExistsQuery, Arg}}) when is_tuple(Arg) ->
Fields0 = indexable_fields(Fields, ExistsQuery),
indexable_fields(Fields0, Arg);
% forces "$exists" : false to use _all_docs
indexable_fields(_, {op_not, {_, false}}) ->
[];
-
indexable_fields(Fields, {op_insert, Arg}) when is_binary(Arg) ->
Fields;
-
%% fieldname.[]:length is not a user defined field.
indexable_fields(Fields, {op_field, {[_, <<":length">>], _}}) ->
Fields;
indexable_fields(Fields, {op_field, {Name, _}}) ->
[iolist_to_binary(Name) | Fields];
-
%% In this particular case, the lucene index is doing a field_exists query
%% so it is looking at all sorts of combinations of field:* and field.*
%% We don't add the field because we cannot pre-determine what field will exist.
%% Hence we just return Fields and make it less restrictive.
indexable_fields(Fields, {op_fieldname, {_, _}}) ->
Fields;
-
%% Similar idea to op_fieldname but with fieldname:null
indexable_fields(Fields, {op_null, {_, _}}) ->
Fields;
-
indexable_fields(Fields, {op_default, _}) ->
[<<"$default">> | Fields].
-
maybe_reject_index_all_req({Def}, Db) ->
DbName = couch_db:name(Db),
#user_ctx{name = User} = couch_db:get_user_ctx(Db),
@@ -368,48 +371,47 @@ maybe_reject_index_all_req({Def}, Db) ->
{all_fields, "true"} ->
?MANGO_ERROR(index_all_disabled);
{all_fields, "warn"} ->
- couch_log:warning("User ~p is indexing all fields in db ~p",
- [User, DbName]);
+ couch_log:warning(
+ "User ~p is indexing all fields in db ~p",
+ [User, DbName]
+ );
_ ->
ok
end.
-
forbid_index_all() ->
config:get("mango", "index_all_disabled", "false").
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
setup_all() ->
Ctx = test_util:start_couch(),
- meck:expect(couch_log, warning, 2,
- fun(_,_) ->
+ meck:expect(
+ couch_log,
+ warning,
+ 2,
+ fun(_, _) ->
throw({test_error, logged_warning})
- end),
+ end
+ ),
Ctx.
-
teardown_all(Ctx) ->
meck:unload(),
test_util:stop_couch(Ctx).
-
setup() ->
%default index all def that generates {fields, all_fields}
- Index = #idx{def={[]}},
+ Index = #idx{def = {[]}},
DbName = <<"testdb">>,
UserCtx = #user_ctx{name = <<"u1">>},
{ok, Db} = couch_db:clustered_db(DbName, UserCtx),
{Index, Db}.
-
teardown(_) ->
ok.
-
index_all_test_() ->
{
setup,
@@ -427,34 +429,31 @@ index_all_test_() ->
}
}.
-
forbid_index_all({Idx, Db}) ->
?_test(begin
ok = config:set("mango", "index_all_disabled", "true", false),
- ?assertThrow({mango_error, ?MODULE, index_all_disabled},
+ ?assertThrow(
+ {mango_error, ?MODULE, index_all_disabled},
validate_new(Idx, Db)
)
end).
-
default_and_false_index_all({Idx, Db}) ->
?_test(begin
config:delete("mango", "index_all_disabled", false),
- {ok, #idx{def={Def}}} = validate_new(Idx, Db),
+ {ok, #idx{def = {Def}}} = validate_new(Idx, Db),
Fields = couch_util:get_value(fields, Def),
?assertEqual(all_fields, Fields),
ok = config:set("mango", "index_all_disabled", "false", false),
- {ok, #idx{def={Def2}}} = validate_new(Idx, Db),
+ {ok, #idx{def = {Def2}}} = validate_new(Idx, Db),
Fields2 = couch_util:get_value(fields, Def2),
?assertEqual(all_fields, Fields2)
end).
-
warn_index_all({Idx, Db}) ->
?_test(begin
ok = config:set("mango", "index_all_disabled", "warn", false),
?assertThrow({test_error, logged_warning}, validate_new(Idx, Db))
end).
-
-endif.
diff --git a/src/mango/src/mango_idx_view.erl b/src/mango/src/mango_idx_view.erl
index 37911498c..ff8f6c6bb 100644
--- a/src/mango/src/mango_idx_view.erl
+++ b/src/mango/src/mango_idx_view.erl
@@ -12,7 +12,6 @@
-module(mango_idx_view).
-
-export([
validate_new/2,
validate_index_def/1,
@@ -30,75 +29,75 @@
field_ranges/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-include("mango_idx.hrl").
-include("mango_idx_view.hrl").
-
-validate_new(#idx{}=Idx, _Db) ->
+validate_new(#idx{} = Idx, _Db) ->
{ok, Def} = do_validate(Idx#idx.def),
- {ok, Idx#idx{def=Def}}.
-
+ {ok, Idx#idx{def = Def}}.
validate_index_def(Def) ->
def_to_json(Def).
-
-add(#doc{body={Props0}}=DDoc, Idx) ->
- Views1 = case proplists:get_value(<<"views">>, Props0) of
- {Views0} -> Views0;
- _ -> []
- end,
+add(#doc{body = {Props0}} = DDoc, Idx) ->
+ Views1 =
+ case proplists:get_value(<<"views">>, Props0) of
+ {Views0} -> Views0;
+ _ -> []
+ end,
NewView = make_view(Idx),
Views2 = lists:keystore(element(1, NewView), 1, Views1, NewView),
Props1 = lists:keystore(<<"views">>, 1, Props0, {<<"views">>, {Views2}}),
- {ok, DDoc#doc{body={Props1}}}.
-
-
-remove(#doc{body={Props0}}=DDoc, Idx) ->
- Views1 = case proplists:get_value(<<"views">>, Props0) of
- {Views0} ->
- Views0;
- _ ->
- ?MANGO_ERROR({index_not_found, Idx#idx.name})
- end,
+ {ok, DDoc#doc{body = {Props1}}}.
+
+remove(#doc{body = {Props0}} = DDoc, Idx) ->
+ Views1 =
+ case proplists:get_value(<<"views">>, Props0) of
+ {Views0} ->
+ Views0;
+ _ ->
+ ?MANGO_ERROR({index_not_found, Idx#idx.name})
+ end,
Views2 = lists:keydelete(Idx#idx.name, 1, Views1),
- if Views2 /= Views1 -> ok; true ->
- ?MANGO_ERROR({index_not_found, Idx#idx.name})
- end,
- Props1 = case Views2 of
- [] ->
- lists:keydelete(<<"views">>, 1, Props0);
- _ ->
- lists:keystore(<<"views">>, 1, Props0, {<<"views">>, {Views2}})
+ if
+ Views2 /= Views1 -> ok;
+ true -> ?MANGO_ERROR({index_not_found, Idx#idx.name})
end,
- {ok, DDoc#doc{body={Props1}}}.
-
+ Props1 =
+ case Views2 of
+ [] ->
+ lists:keydelete(<<"views">>, 1, Props0);
+ _ ->
+ lists:keystore(<<"views">>, 1, Props0, {<<"views">>, {Views2}})
+ end,
+ {ok, DDoc#doc{body = {Props1}}}.
from_ddoc({Props}) ->
case lists:keyfind(<<"views">>, 1, Props) of
{<<"views">>, {Views}} when is_list(Views) ->
- lists:flatmap(fun({Name, {VProps}}) ->
- case validate_ddoc(VProps) of
- invalid_view ->
- [];
- {Def, Opts} ->
- I = #idx{
- type = <<"json">>,
- name = Name,
- def = Def,
- opts = Opts
- },
- [I]
- end
- end, Views);
+ lists:flatmap(
+ fun({Name, {VProps}}) ->
+ case validate_ddoc(VProps) of
+ invalid_view ->
+ [];
+ {Def, Opts} ->
+ I = #idx{
+ type = <<"json">>,
+ name = Name,
+ def = Def,
+ opts = Opts
+ },
+ [I]
+ end
+ end,
+ Views
+ );
_ ->
[]
end.
-
to_json(Idx) ->
{[
{ddoc, Idx#idx.ddoc},
@@ -108,20 +107,18 @@ to_json(Idx) ->
{def, {def_to_json(Idx#idx.def)}}
]}.
-
columns(Idx) ->
{Props} = Idx#idx.def,
{<<"fields">>, {Fields}} = lists:keyfind(<<"fields">>, 1, Props),
[Key || {Key, _} <- Fields].
-
is_usable(Idx, Selector, SortFields) ->
% This index is usable if all of the columns are
% restricted by the selector such that they are required to exist
% and the selector is not a text search (so requires a text index)
RequiredFields = columns(Idx),
- % sort fields are required to exist in the results so
+ % sort fields are required to exist in the results so
% we don't need to check the selector for these
RequiredFields1 = ordsets:subtract(lists:usort(RequiredFields), lists:usort(SortFields)),
@@ -129,31 +126,35 @@ is_usable(Idx, Selector, SortFields) ->
% we don't need to check the selector for these either
RequiredFields2 = ordsets:subtract(
RequiredFields1,
- [<<"_id">>, <<"_rev">>]),
-
- mango_selector:has_required_fields(Selector, RequiredFields2)
- andalso not is_text_search(Selector)
- andalso can_use_sort(RequiredFields, SortFields, Selector).
+ [<<"_id">>, <<"_rev">>]
+ ),
+ mango_selector:has_required_fields(Selector, RequiredFields2) andalso
+ not is_text_search(Selector) andalso
+ can_use_sort(RequiredFields, SortFields, Selector).
is_text_search({[]}) ->
false;
is_text_search({[{<<"$default">>, _}]}) ->
true;
is_text_search({[{_Field, Cond}]}) when is_list(Cond) ->
- lists:foldl(fun(C, Exists) ->
- Exists orelse is_text_search(C)
- end, false, Cond);
+ lists:foldl(
+ fun(C, Exists) ->
+ Exists orelse is_text_search(C)
+ end,
+ false,
+ Cond
+ );
is_text_search({[{_Field, Cond}]}) when is_tuple(Cond) ->
is_text_search(Cond);
is_text_search({[{_Field, _Cond}]}) ->
false;
%% we reached values, which should always be false
-is_text_search(Val)
- when is_number(Val); is_boolean(Val); is_binary(Val)->
+is_text_search(Val) when
+ is_number(Val); is_boolean(Val); is_binary(Val)
+->
false.
-
start_key([]) ->
[];
start_key([{'$gt', Key, _, _} | Rest]) ->
@@ -170,7 +171,6 @@ start_key([{'$eq', Key, '$eq', Key} | Rest]) ->
false = mango_json:special(Key),
[Key | start_key(Rest)].
-
end_key([]) ->
[?MAX_JSON_OBJ];
end_key([{_, _, '$lt', Key} | Rest]) ->
@@ -187,14 +187,12 @@ end_key([{'$eq', Key, '$eq', Key} | Rest]) ->
false = mango_json:special(Key),
[Key | end_key(Rest)].
-
do_validate({Props}) ->
{ok, Opts} = mango_opts:validate(Props, opts()),
{ok, {Opts}};
do_validate(Else) ->
?MANGO_ERROR({invalid_index_json, Else}).
-
def_to_json({Props}) ->
def_to_json(Props);
def_to_json([]) ->
@@ -210,7 +208,6 @@ def_to_json([{<<"partial_filter_selector">>, {[]}} | Rest]) ->
def_to_json([{Key, Value} | Rest]) ->
[{Key, Value} | def_to_json(Rest)].
-
opts() ->
[
{<<"fields">>, [
@@ -225,16 +222,15 @@ opts() ->
]}
].
-
make_view(Idx) ->
- View = {[
- {<<"map">>, Idx#idx.def},
- {<<"reduce">>, <<"_count">>},
- {<<"options">>, {Idx#idx.opts}}
- ]},
+ View =
+ {[
+ {<<"map">>, Idx#idx.def},
+ {<<"reduce">>, <<"_count">>},
+ {<<"options">>, {Idx#idx.opts}}
+ ]},
{Idx#idx.name, View}.
-
validate_ddoc(VProps) ->
try
Def = proplists:get_value(<<"map">>, VProps),
@@ -242,13 +238,15 @@ validate_ddoc(VProps) ->
{Opts0} = proplists:get_value(<<"options">>, VProps),
Opts = lists:keydelete(<<"sort">>, 1, Opts0),
{Def, Opts}
- catch Error:Reason ->
- couch_log:error("Invalid Index Def ~p. Error: ~p, Reason: ~p",
- [VProps, Error, Reason]),
- invalid_view
+ catch
+ Error:Reason ->
+ couch_log:error(
+ "Invalid Index Def ~p. Error: ~p, Reason: ~p",
+ [VProps, Error, Reason]
+ ),
+ invalid_view
end.
-
% This function returns a list of indexes that
% can be used to restrict this query. This works by
% searching the selector looking for field names that
@@ -268,11 +266,9 @@ validate_ddoc(VProps) ->
% We can see through '$and' trivially
indexable_fields({[{<<"$and">>, Args}]}) ->
lists:usort(lists:flatten([indexable_fields(A) || A <- Args]));
-
% So far we can't see through any other operator
indexable_fields({[{<<"$", _/binary>>, _}]}) ->
[];
-
% If we have a field with a terminator that is locatable
% using an index then the field is a possible index
indexable_fields({[{Field, Cond}]}) ->
@@ -282,12 +278,10 @@ indexable_fields({[{Field, Cond}]}) ->
false ->
[]
end;
-
% An empty selector
indexable_fields({[]}) ->
[].
-
% Check if a condition is indexable. The logical
% comparisons are mostly straight forward. We
% currently don't understand '$in' which is
@@ -304,24 +298,20 @@ indexable({[{<<"$gt">>, _}]}) ->
true;
indexable({[{<<"$gte">>, _}]}) ->
true;
-
% All other operators are currently not indexable.
% This is also a subtle assertion that we don't
% call indexable/1 on a field name.
indexable({[{<<"$", _/binary>>, _}]}) ->
false.
-
% For each field, return {Field, Range}
field_ranges(Selector) ->
Fields = indexable_fields(Selector),
field_ranges(Selector, Fields).
-
field_ranges(Selector, Fields) ->
field_ranges(Selector, Fields, []).
-
field_ranges(_Selector, [], Acc) ->
lists:reverse(Acc);
field_ranges(Selector, [Field | Rest], Acc) ->
@@ -332,7 +322,6 @@ field_ranges(Selector, [Field | Rest], Acc) ->
field_ranges(Selector, Rest, [{Field, Range} | Acc])
end.
-
% Find the complete range for a given index in this
% selector. This works by AND'ing logical comparisons
% together so that we can define the start and end
@@ -343,32 +332,31 @@ field_ranges(Selector, [Field | Rest], Acc) ->
range(Selector, Index) ->
range(Selector, Index, '$gt', mango_json:min(), '$lt', mango_json:max()).
-
% Adjust Low and High based on values found for the
% givend Index in Selector.
range({[{<<"$and">>, Args}]}, Index, LCmp, Low, HCmp, High) ->
- lists:foldl(fun
- (Arg, {LC, L, HC, H}) ->
- range(Arg, Index, LC, L, HC, H);
- (_Arg, empty) ->
- empty
- end, {LCmp, Low, HCmp, High}, Args);
-
+ lists:foldl(
+ fun
+ (Arg, {LC, L, HC, H}) ->
+ range(Arg, Index, LC, L, HC, H);
+ (_Arg, empty) ->
+ empty
+ end,
+ {LCmp, Low, HCmp, High},
+ Args
+ );
% We can currently only traverse '$and' operators
range({[{<<"$", _/binary>>}]}, _Index, LCmp, Low, HCmp, High) ->
{LCmp, Low, HCmp, High};
-
% If the field name matches the index see if we can narrow
% the acceptable range.
range({[{Index, Cond}]}, Index, LCmp, Low, HCmp, High) ->
range(Cond, LCmp, Low, HCmp, High);
-
% Else we have a field unrelated to this index so just
% return the current values.
range(_, _, LCmp, Low, HCmp, High) ->
{LCmp, Low, HCmp, High}.
-
% The comments below are a bit cryptic at first but they show
% where the Arg cand land in the current range.
%
@@ -425,7 +413,6 @@ range({[{<<"$lt">>, Arg}]}, LCmp, Low, HCmp, High) ->
max ->
{LCmp, Low, HCmp, High}
end;
-
range({[{<<"$lte">>, Arg}]}, LCmp, Low, HCmp, High) ->
case range_pos(Low, Arg, High) of
min ->
@@ -441,7 +428,6 @@ range({[{<<"$lte">>, Arg}]}, LCmp, Low, HCmp, High) ->
max ->
{LCmp, Low, HCmp, High}
end;
-
range({[{<<"$eq">>, Arg}]}, LCmp, Low, HCmp, High) ->
case range_pos(Low, Arg, High) of
min ->
@@ -459,7 +445,6 @@ range({[{<<"$eq">>, Arg}]}, LCmp, Low, HCmp, High) ->
max ->
empty
end;
-
range({[{<<"$gte">>, Arg}]}, LCmp, Low, HCmp, High) ->
case range_pos(Low, Arg, High) of
min ->
@@ -475,7 +460,6 @@ range({[{<<"$gte">>, Arg}]}, LCmp, Low, HCmp, High) ->
max ->
empty
end;
-
range({[{<<"$gt">>, Arg}]}, LCmp, Low, HCmp, High) ->
case range_pos(Low, Arg, High) of
min ->
@@ -489,14 +473,12 @@ range({[{<<"$gt">>, Arg}]}, LCmp, Low, HCmp, High) ->
max ->
empty
end;
-
% There's some other un-indexable restriction on the index
% that will be applied as a post-filter. Ignore it and
% carry on our merry way.
range({[{<<"$", _/binary>>, _}]}, LCmp, Low, HCmp, High) ->
{LCmp, Low, HCmp, High}.
-
% Returns the value min | low | mid | high | max depending
% on how Arg compares to Low and High.
range_pos(Low, Arg, High) ->
@@ -514,7 +496,6 @@ range_pos(Low, Arg, High) ->
end
end.
-
% Can_use_sort works as follows:
%
% * no sort fields then we can use this
diff --git a/src/mango/src/mango_json.erl b/src/mango/src/mango_json.erl
index 9584c2d7e..ca18d8898 100644
--- a/src/mango/src/mango_json.erl
+++ b/src/mango/src/mango_json.erl
@@ -12,7 +12,6 @@
-module(mango_json).
-
-export([
min/0,
max/0,
@@ -23,19 +22,15 @@
to_binary/1
]).
-
-define(MIN_VAL, mango_json_min).
-define(MAX_VAL, mango_json_max).
-
min() ->
?MIN_VAL.
-
max() ->
?MAX_VAL.
-
cmp(?MIN_VAL, ?MIN_VAL) ->
0;
cmp(?MIN_VAL, _) ->
@@ -51,7 +46,6 @@ cmp(_, ?MAX_VAL) ->
cmp(A, B) ->
couch_ejson_compare:less(A, B).
-
cmp_raw(?MIN_VAL, ?MIN_VAL) ->
0;
cmp_raw(?MIN_VAL, _) ->
@@ -77,7 +71,6 @@ cmp_raw(A, B) ->
end
end.
-
type(null) ->
<<"null">>;
type(Bool) when is_boolean(Bool) ->
@@ -91,7 +84,6 @@ type({Props}) when is_list(Props) ->
type(Vals) when is_list(Vals) ->
<<"array">>.
-
special(?MIN_VAL) ->
true;
special(?MAX_VAL) ->
@@ -99,7 +91,6 @@ special(?MAX_VAL) ->
special(_) ->
false.
-
to_binary({Props}) ->
Pred = fun({Key, Value}) ->
{to_binary(Key), to_binary(Value)}
@@ -118,4 +109,4 @@ to_binary(Data) when is_atom(Data) ->
to_binary(Data) when is_number(Data) ->
Data;
to_binary(Data) when is_binary(Data) ->
- Data. \ No newline at end of file
+ Data.
diff --git a/src/mango/src/mango_json_bookmark.erl b/src/mango/src/mango_json_bookmark.erl
index 83fd00f29..8446e0c8a 100644
--- a/src/mango/src/mango_json_bookmark.erl
+++ b/src/mango/src/mango_json_bookmark.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(mango_json_bookmark).
-export([
@@ -18,15 +17,14 @@
create/1
]).
-
-include_lib("couch_mrview/include/couch_mrview.hrl").
-include("mango_cursor.hrl").
-include("mango.hrl").
-update_args(EncodedBookmark, #mrargs{skip = Skip} = Args) ->
+update_args(EncodedBookmark, #mrargs{skip = Skip} = Args) ->
Bookmark = unpack(EncodedBookmark),
case is_list(Bookmark) of
- true ->
+ true ->
{startkey, Startkey} = lists:keyfind(startkey, 1, Bookmark),
{startkey_docid, StartkeyDocId} = lists:keyfind(startkey_docid, 1, Bookmark),
Args#mrargs{
@@ -37,35 +35,36 @@ update_args(EncodedBookmark, #mrargs{skip = Skip} = Args) ->
false ->
Args
end.
-
-create(#cursor{bookmark_docid = BookmarkDocId, bookmark_key = BookmarkKey}) when BookmarkKey =/= undefined ->
+create(#cursor{bookmark_docid = BookmarkDocId, bookmark_key = BookmarkKey}) when
+ BookmarkKey =/= undefined
+->
QueryArgs = [
{startkey_docid, BookmarkDocId},
{startkey, BookmarkKey}
],
- Bin = term_to_binary(QueryArgs, [compressed, {minor_version,1}]),
+ Bin = term_to_binary(QueryArgs, [compressed, {minor_version, 1}]),
couch_util:encodeBase64Url(Bin);
create(#cursor{bookmark = Bookmark}) ->
Bookmark.
-
unpack(nil) ->
nil;
unpack(Packed) ->
try
Bookmark = binary_to_term(couch_util:decodeBase64Url(Packed), [safe]),
verify(Bookmark)
- catch _:_ ->
- ?MANGO_ERROR({invalid_bookmark, Packed})
+ catch
+ _:_ ->
+ ?MANGO_ERROR({invalid_bookmark, Packed})
end.
verify(Bookmark) when is_list(Bookmark) ->
- case lists:keymember(startkey, 1, Bookmark) andalso lists:keymember(startkey_docid, 1, Bookmark) of
+ case
+ lists:keymember(startkey, 1, Bookmark) andalso lists:keymember(startkey_docid, 1, Bookmark)
+ of
true -> Bookmark;
_ -> throw(invalid_bookmark)
end;
verify(_Bookmark) ->
throw(invalid_bookmark).
-
- \ No newline at end of file
diff --git a/src/mango/src/mango_native_proc.erl b/src/mango/src/mango_native_proc.erl
index 274ae11de..48c78c4c4 100644
--- a/src/mango/src/mango_native_proc.erl
+++ b/src/mango/src/mango_native_proc.erl
@@ -13,10 +13,8 @@
-module(mango_native_proc).
-behavior(gen_server).
-
-include("mango_idx.hrl").
-
-export([
start_link/0,
set_timeout/2,
@@ -32,117 +30,94 @@
code_change/3
]).
-
-record(st, {
indexes = [],
timeout = 5000
}).
-
-record(tacc, {
index_array_lengths = true,
fields = all_fields,
path = []
}).
-
start_link() ->
gen_server:start_link(?MODULE, [], []).
-
set_timeout(Pid, TimeOut) when is_integer(TimeOut), TimeOut > 0 ->
gen_server:call(Pid, {set_timeout, TimeOut}).
-
prompt(Pid, Data) ->
gen_server:call(Pid, {prompt, Data}).
-
init(_) ->
{ok, #st{}}.
-
terminate(_Reason, _St) ->
ok.
-
handle_call({set_timeout, TimeOut}, _From, St) ->
- {reply, ok, St#st{timeout=TimeOut}};
-
+ {reply, ok, St#st{timeout = TimeOut}};
handle_call({prompt, [<<"reset">>]}, _From, St) ->
- {reply, true, St#st{indexes=[]}};
-
+ {reply, true, St#st{indexes = []}};
handle_call({prompt, [<<"reset">>, _QueryConfig]}, _From, St) ->
- {reply, true, St#st{indexes=[]}};
-
+ {reply, true, St#st{indexes = []}};
handle_call({prompt, [<<"add_fun">>, IndexInfo]}, _From, St) ->
- Indexes = case validate_index_info(IndexInfo) of
- true ->
- St#st.indexes ++ [IndexInfo];
- false ->
- couch_log:error("No Valid Indexes For: ~p", [IndexInfo]),
- St#st.indexes
- end,
+ Indexes =
+ case validate_index_info(IndexInfo) of
+ true ->
+ St#st.indexes ++ [IndexInfo];
+ false ->
+ couch_log:error("No Valid Indexes For: ~p", [IndexInfo]),
+ St#st.indexes
+ end,
NewSt = St#st{indexes = Indexes},
{reply, true, NewSt};
-
handle_call({prompt, [<<"map_doc">>, Doc]}, _From, St) ->
{reply, map_doc(St, mango_json:to_binary(Doc)), St};
-
handle_call({prompt, [<<"reduce">>, RedSrcs, _]}, _From, St) ->
{reply, [true, [null || _ <- RedSrcs]], St};
-
handle_call({prompt, [<<"rereduce">>, RedSrcs, _]}, _From, St) ->
{reply, [true, [null || _ <- RedSrcs]], St};
-
handle_call({prompt, [<<"index_doc">>, Doc]}, _From, St) ->
- Vals = case index_doc(St, mango_json:to_binary(Doc)) of
- [] ->
- [[]];
- Else ->
- Else
- end,
+ Vals =
+ case index_doc(St, mango_json:to_binary(Doc)) of
+ [] ->
+ [[]];
+ Else ->
+ Else
+ end,
{reply, Vals, St};
-
-
handle_call(Msg, _From, St) ->
{stop, {invalid_call, Msg}, {invalid_call, Msg}, St}.
-
handle_cast(garbage_collect, St) ->
erlang:garbage_collect(),
{noreply, St};
-
handle_cast(stop, St) ->
{stop, normal, St};
-
handle_cast(Msg, St) ->
{stop, {invalid_cast, Msg}, St}.
-
handle_info(Msg, St) ->
{stop, {invalid_info, Msg}, St}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
-map_doc(#st{indexes=Indexes}, Doc) ->
+map_doc(#st{indexes = Indexes}, Doc) ->
lists:map(fun(Idx) -> get_index_entries(Idx, Doc) end, Indexes).
-
-index_doc(#st{indexes=Indexes}, Doc) ->
+index_doc(#st{indexes = Indexes}, Doc) ->
lists:map(fun(Idx) -> get_text_entries(Idx, Doc) end, Indexes).
-
get_index_entries({IdxProps}, Doc) ->
{Fields} = couch_util:get_value(<<"fields">>, IdxProps),
Selector = get_index_partial_filter_selector(IdxProps),
case should_index(Selector, Doc) of
- false ->
+ false ->
[];
- true ->
+ true ->
Values = get_index_values(Fields, Doc),
case lists:member(not_found, Values) of
true -> [];
@@ -150,16 +125,17 @@ get_index_entries({IdxProps}, Doc) ->
end
end.
-
get_index_values(Fields, Doc) ->
- lists:map(fun({Field, _Dir}) ->
- case mango_doc:get_field(Doc, Field) of
- not_found -> not_found;
- bad_path -> not_found;
- Value -> Value
- end
- end, Fields).
-
+ lists:map(
+ fun({Field, _Dir}) ->
+ case mango_doc:get_field(Doc, Field) of
+ not_found -> not_found;
+ bad_path -> not_found;
+ Value -> Value
+ end
+ end,
+ Fields
+ ).
get_text_entries({IdxProps}, Doc) ->
Selector = get_index_partial_filter_selector(IdxProps),
@@ -170,7 +146,6 @@ get_text_entries({IdxProps}, Doc) ->
[]
end.
-
get_index_partial_filter_selector(IdxProps) ->
case couch_util:get_value(<<"partial_filter_selector">>, IdxProps, {[]}) of
{[]} ->
@@ -181,7 +156,6 @@ get_index_partial_filter_selector(IdxProps) ->
Else
end.
-
get_text_entries0(IdxProps, Doc) ->
DefaultEnabled = get_default_enabled(IdxProps),
IndexArrayLengths = get_index_array_lengths(IdxProps),
@@ -191,22 +165,22 @@ get_text_entries0(IdxProps, Doc) ->
fields = FieldsList
},
Fields0 = get_text_field_values(Doc, TAcc),
- Fields = if not DefaultEnabled -> Fields0; true ->
- add_default_text_field(Fields0)
- end,
+ Fields =
+ if
+ not DefaultEnabled -> Fields0;
+ true -> add_default_text_field(Fields0)
+ end,
FieldNames = get_field_names(Fields),
Converted = convert_text_fields(Fields),
FieldNames ++ Converted.
-
get_text_field_values({Props}, TAcc) when is_list(Props) ->
get_text_field_values_obj(Props, TAcc, []);
-
get_text_field_values(Values, TAcc) when is_list(Values) ->
IndexArrayLengths = TAcc#tacc.index_array_lengths,
NewPath = ["[]" | TAcc#tacc.path],
NewTAcc = TAcc#tacc{path = NewPath},
- case IndexArrayLengths of
+ case IndexArrayLengths of
true ->
% We bypass make_text_field and directly call make_text_field_name
% because the length field name is not part of the path.
@@ -216,20 +190,15 @@ get_text_field_values(Values, TAcc) when is_list(Values) ->
_ ->
get_text_field_values_arr(Values, NewTAcc, [])
end;
-
get_text_field_values(Bin, TAcc) when is_binary(Bin) ->
make_text_field(TAcc, <<"string">>, Bin);
-
get_text_field_values(Num, TAcc) when is_number(Num) ->
make_text_field(TAcc, <<"number">>, Num);
-
get_text_field_values(Bool, TAcc) when is_boolean(Bool) ->
make_text_field(TAcc, <<"boolean">>, Bool);
-
get_text_field_values(null, TAcc) ->
make_text_field(TAcc, <<"null">>, true).
-
get_text_field_values_obj([], _, FAcc) ->
FAcc;
get_text_field_values_obj([{Key, Val} | Rest], TAcc, FAcc) ->
@@ -238,34 +207,29 @@ get_text_field_values_obj([{Key, Val} | Rest], TAcc, FAcc) ->
Fields = get_text_field_values(Val, NewTAcc),
get_text_field_values_obj(Rest, TAcc, Fields ++ FAcc).
-
get_text_field_values_arr([], _, FAcc) ->
FAcc;
get_text_field_values_arr([Value | Rest], TAcc, FAcc) ->
Fields = get_text_field_values(Value, TAcc),
get_text_field_values_arr(Rest, TAcc, Fields ++ FAcc).
-
get_default_enabled(Props) ->
case couch_util:get_value(<<"default_field">>, Props, {[]}) of
Bool when is_boolean(Bool) ->
Bool;
{[]} ->
true;
- {Opts}->
+ {Opts} ->
couch_util:get_value(<<"enabled">>, Opts, true)
end.
-
get_index_array_lengths(Props) ->
couch_util:get_value(<<"index_array_lengths">>, Props, true).
-
add_default_text_field(Fields) ->
DefaultFields = add_default_text_field(Fields, []),
DefaultFields ++ Fields.
-
add_default_text_field([], Acc) ->
Acc;
add_default_text_field([{_Name, <<"string">>, Value} | Rest], Acc) ->
@@ -274,32 +238,33 @@ add_default_text_field([{_Name, <<"string">>, Value} | Rest], Acc) ->
add_default_text_field([_ | Rest], Acc) ->
add_default_text_field(Rest, Acc).
-
%% index of all field names
get_field_names(Fields) ->
- FieldNameSet = lists:foldl(fun({Name, _, _}, Set) ->
- gb_sets:add([<<"$fieldnames">>, Name, []], Set)
- end, gb_sets:new(), Fields),
+ FieldNameSet = lists:foldl(
+ fun({Name, _, _}, Set) ->
+ gb_sets:add([<<"$fieldnames">>, Name, []], Set)
+ end,
+ gb_sets:new(),
+ Fields
+ ),
gb_sets:to_list(FieldNameSet).
-
convert_text_fields([]) ->
[];
convert_text_fields([{Name, _Type, Value} | Rest]) ->
[[Name, Value, []] | convert_text_fields(Rest)].
-
should_index(Selector, Doc) ->
% We should do this
NormSelector = mango_selector:normalize(Selector),
Matches = mango_selector:match(NormSelector, Doc),
- IsDesign = case mango_doc:get_field(Doc, <<"_id">>) of
- <<"_design/", _/binary>> -> true;
- _ -> false
- end,
+ IsDesign =
+ case mango_doc:get_field(Doc, <<"_id">>) of
+ <<"_design/", _/binary>> -> true;
+ _ -> false
+ end,
Matches and not IsDesign.
-
get_text_field_list(IdxProps) ->
case couch_util:get_value(<<"fields">>, IdxProps) of
Fields when is_list(Fields) ->
@@ -309,16 +274,17 @@ get_text_field_list(IdxProps) ->
all_fields
end.
-
get_text_field_info({Props}) ->
Name = couch_util:get_value(<<"name">>, Props),
Type0 = couch_util:get_value(<<"type">>, Props),
- if not is_binary(Name) -> []; true ->
- Type = get_text_field_type(Type0),
- [iolist_to_binary([Name, ":", Type])]
+ if
+ not is_binary(Name) ->
+ [];
+ true ->
+ Type = get_text_field_type(Type0),
+ [iolist_to_binary([Name, ":", Type])]
end.
-
get_text_field_type(<<"number">>) ->
<<"number">>;
get_text_field_type(<<"boolean">>) ->
@@ -326,7 +292,6 @@ get_text_field_type(<<"boolean">>) ->
get_text_field_type(_) ->
<<"string">>.
-
make_text_field(TAcc, Type, Value) ->
FieldName = make_text_field_name(TAcc#tacc.path, Type),
Fields = TAcc#tacc.fields,
@@ -337,31 +302,34 @@ make_text_field(TAcc, Type, Value) ->
[]
end.
-
make_text_field_name([P | Rest], Type) ->
Parts = lists:reverse(Rest, [iolist_to_binary([P, ":", Type])]),
Escaped = [mango_util:lucene_escape_field(N) || N <- Parts],
iolist_to_binary(mango_util:join(".", Escaped)).
-
validate_index_info(IndexInfo) ->
- IdxTypes = case clouseau_rpc:connected() of
- true ->
- [mango_idx_view, mango_idx_text];
- false ->
- [mango_idx_view]
- end,
- Results = lists:foldl(fun(IdxType, Results0) ->
- try
- IdxType:validate_index_def(IndexInfo),
- [valid_index | Results0]
- catch _:_ ->
- [invalid_index | Results0]
- end
- end, [], IdxTypes),
+ IdxTypes =
+ case clouseau_rpc:connected() of
+ true ->
+ [mango_idx_view, mango_idx_text];
+ false ->
+ [mango_idx_view]
+ end,
+ Results = lists:foldl(
+ fun(IdxType, Results0) ->
+ try
+ IdxType:validate_index_def(IndexInfo),
+ [valid_index | Results0]
+ catch
+ _:_ ->
+ [invalid_index | Results0]
+ end
+ end,
+ [],
+ IdxTypes
+ ),
lists:member(valid_index, Results).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/mango/src/mango_opts.erl b/src/mango/src/mango_opts.erl
index 92c07f743..04fe5bbf8 100644
--- a/src/mango/src/mango_opts.erl
+++ b/src/mango/src/mango_opts.erl
@@ -39,10 +39,8 @@
default_limit/0
]).
-
-include("mango.hrl").
-
validate_idx_create({Props}) ->
Opts = [
{<<"index">>, [
@@ -81,7 +79,6 @@ validate_idx_create({Props}) ->
],
validate(Props, Opts).
-
validate_find({Props}) ->
Opts = [
{<<"selector">>, [
@@ -169,7 +166,6 @@ validate_find({Props}) ->
],
validate(Props, Opts).
-
validate_bulk_delete({Props}) ->
Opts = [
{<<"docids">>, [
@@ -185,7 +181,6 @@ validate_bulk_delete({Props}) ->
],
validate(Props, Opts).
-
validate(Props, Opts) ->
case mango_util:assert_ejson({Props}) of
true ->
@@ -202,13 +197,11 @@ validate(Props, Opts) ->
end,
{ok, Acc}.
-
is_string(Val) when is_binary(Val) ->
{ok, Val};
is_string(Else) ->
?MANGO_ERROR({invalid_string, Else}).
-
is_boolean(true) ->
{ok, true};
is_boolean(false) ->
@@ -216,19 +209,16 @@ is_boolean(false) ->
is_boolean(Else) ->
?MANGO_ERROR({invalid_boolean, Else}).
-
is_pos_integer(V) when is_integer(V), V > 0 ->
{ok, V};
is_pos_integer(Else) ->
?MANGO_ERROR({invalid_pos_integer, Else}).
-
is_non_neg_integer(V) when is_integer(V), V >= 0 ->
{ok, V};
is_non_neg_integer(Else) ->
?MANGO_ERROR({invalid_non_neg_integer, Else}).
-
is_object({Props}) ->
true = mango_util:assert_ejson({Props}),
{ok, {Props}};
@@ -236,27 +226,26 @@ is_object(Else) ->
?MANGO_ERROR({invalid_object, Else}).
is_ok_or_false(<<"ok">>) ->
- {ok, ok};
-is_ok_or_false(<<"false">>) -> % convenience
- {ok, false};
+ {ok, ok};
+% convenience
+is_ok_or_false(<<"false">>) ->
+ {ok, false};
is_ok_or_false(false) ->
- {ok, false};
+ {ok, false};
is_ok_or_false(Else) ->
- ?MANGO_ERROR({invalid_ok_or_false_value, Else}).
+ ?MANGO_ERROR({invalid_ok_or_false_value, Else}).
validate_idx_name(auto_name) ->
{ok, auto_name};
validate_idx_name(Else) ->
is_string(Else).
-
validate_selector({Props}) ->
Norm = mango_selector:normalize({Props}),
{ok, Norm};
validate_selector(Else) ->
?MANGO_ERROR({invalid_selector_json, Else}).
-
%% We re-use validate_use_index to make sure the index names are valid
validate_bulk_docs(Docs) when is_list(Docs) ->
lists:foreach(fun validate_use_index/1, Docs),
@@ -264,7 +253,6 @@ validate_bulk_docs(Docs) when is_list(Docs) ->
validate_bulk_docs(Else) ->
?MANGO_ERROR({invalid_bulk_docs, Else}).
-
validate_use_index(IndexName) when is_binary(IndexName) ->
case binary:split(IndexName, <<"/">>) of
[DesignId] ->
@@ -284,13 +272,13 @@ validate_use_index([]) ->
{ok, []};
validate_use_index([DesignId]) when is_binary(DesignId) ->
{ok, [DesignId]};
-validate_use_index([DesignId, ViewName])
- when is_binary(DesignId), is_binary(ViewName) ->
+validate_use_index([DesignId, ViewName]) when
+ is_binary(DesignId), is_binary(ViewName)
+->
{ok, [DesignId, ViewName]};
validate_use_index(Else) ->
?MANGO_ERROR({invalid_index_name, Else}).
-
validate_bookmark(null) ->
{ok, nil};
validate_bookmark(<<>>) ->
@@ -300,15 +288,12 @@ validate_bookmark(Bin) when is_binary(Bin) ->
validate_bookmark(Else) ->
?MANGO_ERROR({invalid_bookmark, Else}).
-
validate_sort(Value) ->
mango_sort:new(Value).
-
validate_fields(Value) ->
mango_fields:new(Value).
-
validate_partitioned(true) ->
{ok, true};
validate_partitioned(false) ->
@@ -318,14 +303,12 @@ validate_partitioned(db_default) ->
validate_partitioned(Else) ->
?MANGO_ERROR({invalid_partitioned_value, Else}).
-
validate_partition(<<>>) ->
{ok, <<>>};
validate_partition(Partition) ->
couch_partition:validate_partition(Partition),
{ok, Partition}.
-
validate_opts([], Props, Acc) ->
{Props, lists:reverse(Acc)};
validate_opts([{Name, Desc} | Rest], Props, Acc) ->
@@ -339,7 +322,6 @@ validate_opts([{Name, Desc} | Rest], Props, Acc) ->
validate_opts(Rest, Props, NewAcc)
end.
-
validate_opt(_Name, [], Value) ->
Value;
validate_opt(Name, Desc0, undefined) ->
@@ -372,6 +354,5 @@ validate_opt(Name, [{validator, Fun} | Rest], Value) ->
?MANGO_ERROR({invalid_value, Name, Value})
end.
-
default_limit() ->
config:get_integer("mango", "default_limit", 25).
diff --git a/src/mango/src/mango_selector.erl b/src/mango/src/mango_selector.erl
index fc6a6d1a7..be2616ff5 100644
--- a/src/mango/src/mango_selector.erl
+++ b/src/mango/src/mango_selector.erl
@@ -12,7 +12,6 @@
-module(mango_selector).
-
-export([
normalize/1,
match/2,
@@ -20,11 +19,9 @@
is_constant_field/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-
% Validate and normalize each operator. This translates
% every selector operator into a consistent version that
% we can then rely on for all other selector functions.
@@ -48,7 +45,6 @@ normalize(Selector) ->
end,
{NProps}.
-
% Match a selector against a #doc{} or EJSON value.
% This assumes that the Selector has been normalized.
% Returns true or false.
@@ -56,14 +52,11 @@ match(Selector, D) ->
couch_stats:increment_counter([mango, evaluate_selector]),
match_int(Selector, D).
-
% An empty selector matches any value.
match_int({[]}, _) ->
true;
-
-match_int(Selector, #doc{body=Body}) ->
+match_int(Selector, #doc{body = Body}) ->
match(Selector, Body, fun mango_json:cmp/2);
-
match_int(Selector, {Props}) ->
match(Selector, {Props}, fun mango_json:cmp/2).
@@ -74,47 +67,38 @@ norm_ops({[{<<"$and">>, Args}]}) when is_list(Args) ->
{[{<<"$and">>, [norm_ops(A) || A <- Args]}]};
norm_ops({[{<<"$and">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$and', Arg});
-
norm_ops({[{<<"$or">>, Args}]}) when is_list(Args) ->
{[{<<"$or">>, [norm_ops(A) || A <- Args]}]};
norm_ops({[{<<"$or">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$or', Arg});
-
-norm_ops({[{<<"$not">>, {_}=Arg}]}) ->
+norm_ops({[{<<"$not">>, {_} = Arg}]}) ->
{[{<<"$not">>, norm_ops(Arg)}]};
norm_ops({[{<<"$not">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$not', Arg});
-
norm_ops({[{<<"$nor">>, Args}]}) when is_list(Args) ->
{[{<<"$nor">>, [norm_ops(A) || A <- Args]}]};
norm_ops({[{<<"$nor">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$nor', Arg});
-
norm_ops({[{<<"$in">>, Args}]} = Cond) when is_list(Args) ->
Cond;
norm_ops({[{<<"$in">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$in', Arg});
-
norm_ops({[{<<"$nin">>, Args}]} = Cond) when is_list(Args) ->
Cond;
norm_ops({[{<<"$nin">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$nin', Arg});
-
norm_ops({[{<<"$exists">>, Arg}]} = Cond) when is_boolean(Arg) ->
Cond;
norm_ops({[{<<"$exists">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$exists', Arg});
-
norm_ops({[{<<"$type">>, Arg}]} = Cond) when is_binary(Arg) ->
Cond;
norm_ops({[{<<"$type">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$type', Arg});
-
norm_ops({[{<<"$mod">>, [D, R]}]} = Cond) when is_integer(D), is_integer(R) ->
Cond;
norm_ops({[{<<"$mod">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$mod', Arg});
-
norm_ops({[{<<"$regex">>, Regex}]} = Cond) when is_binary(Regex) ->
case re:compile(Regex) of
{ok, _} ->
@@ -122,45 +106,40 @@ norm_ops({[{<<"$regex">>, Regex}]} = Cond) when is_binary(Regex) ->
_ ->
?MANGO_ERROR({bad_arg, '$regex', Regex})
end;
-
norm_ops({[{<<"$all">>, Args}]}) when is_list(Args) ->
{[{<<"$all">>, Args}]};
norm_ops({[{<<"$all">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$all', Arg});
-
-norm_ops({[{<<"$elemMatch">>, {_}=Arg}]}) ->
+norm_ops({[{<<"$elemMatch">>, {_} = Arg}]}) ->
{[{<<"$elemMatch">>, norm_ops(Arg)}]};
norm_ops({[{<<"$elemMatch">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$elemMatch', Arg});
-
-norm_ops({[{<<"$allMatch">>, {_}=Arg}]}) ->
+norm_ops({[{<<"$allMatch">>, {_} = Arg}]}) ->
{[{<<"$allMatch">>, norm_ops(Arg)}]};
norm_ops({[{<<"$allMatch">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$allMatch', Arg});
-
-norm_ops({[{<<"$keyMapMatch">>, {_}=Arg}]}) ->
+norm_ops({[{<<"$keyMapMatch">>, {_} = Arg}]}) ->
{[{<<"$keyMapMatch">>, norm_ops(Arg)}]};
norm_ops({[{<<"$keyMapMatch">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$keyMapMatch', Arg});
-
norm_ops({[{<<"$size">>, Arg}]}) when is_integer(Arg), Arg >= 0 ->
{[{<<"$size">>, Arg}]};
norm_ops({[{<<"$size">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$size', Arg});
-
-norm_ops({[{<<"$text">>, Arg}]}) when is_binary(Arg); is_number(Arg);
- is_boolean(Arg) ->
+norm_ops({[{<<"$text">>, Arg}]}) when
+ is_binary(Arg);
+ is_number(Arg);
+ is_boolean(Arg)
+->
{[{<<"$default">>, {[{<<"$text">>, Arg}]}}]};
norm_ops({[{<<"$text">>, Arg}]}) ->
?MANGO_ERROR({bad_arg, '$text', Arg});
-
% Not technically an operator but we pass it through here
% so that this function accepts its own output. This exists
% so that $text can have a field name value which simplifies
% logic elsewhere.
norm_ops({[{<<"$default">>, _}]} = Selector) ->
Selector;
-
% Terminals where we can't perform any validation
% on the value because any value is acceptable.
norm_ops({[{<<"$lt">>, _}]} = Cond) ->
@@ -175,7 +154,6 @@ norm_ops({[{<<"$gte">>, _}]} = Cond) ->
Cond;
norm_ops({[{<<"$gt">>, _}]} = Cond) ->
Cond;
-
% Known but unsupported operators
norm_ops({[{<<"$where">>, _}]}) ->
?MANGO_ERROR({not_supported, '$where'});
@@ -187,24 +165,19 @@ norm_ops({[{<<"$near">>, _}]}) ->
?MANGO_ERROR({not_supported, '$near'});
norm_ops({[{<<"$nearSphere">>, _}]}) ->
?MANGO_ERROR({not_supported, '$nearSphere'});
-
% Unknown operator
-norm_ops({[{<<"$", _/binary>>=Op, _}]}) ->
+norm_ops({[{<<"$", _/binary>> = Op, _}]}) ->
?MANGO_ERROR({invalid_operator, Op});
-
% A {Field: Cond} pair
norm_ops({[{Field, Cond}]}) ->
{[{Field, norm_ops(Cond)}]};
-
% An implicit $and
norm_ops({[_, _ | _] = Props}) ->
{[{<<"$and">>, [norm_ops({[P]}) || P <- Props]}]};
-
% A bare value condition means equality
norm_ops(Value) ->
{[{<<"$eq">>, Value}]}.
-
% This takes a selector and normalizes all of the
% field names as far as possible. For instance:
%
@@ -233,52 +206,40 @@ norm_fields({[]}) ->
norm_fields(Selector) ->
norm_fields(Selector, <<>>).
-
% Operators where we can push the field names further
% down the operator tree
norm_fields({[{<<"$and">>, Args}]}, Path) ->
{[{<<"$and">>, [norm_fields(A, Path) || A <- Args]}]};
-
norm_fields({[{<<"$or">>, Args}]}, Path) ->
{[{<<"$or">>, [norm_fields(A, Path) || A <- Args]}]};
-
norm_fields({[{<<"$not">>, Arg}]}, Path) ->
{[{<<"$not">>, norm_fields(Arg, Path)}]};
-
norm_fields({[{<<"$nor">>, Args}]}, Path) ->
{[{<<"$nor">>, [norm_fields(A, Path) || A <- Args]}]};
-
% Fields where we can normalize fields in the
% operator arguments independently.
norm_fields({[{<<"$elemMatch">>, Arg}]}, Path) ->
Cond = {[{<<"$elemMatch">>, norm_fields(Arg)}]},
{[{Path, Cond}]};
-
norm_fields({[{<<"$allMatch">>, Arg}]}, Path) ->
Cond = {[{<<"$allMatch">>, norm_fields(Arg)}]},
{[{Path, Cond}]};
-
norm_fields({[{<<"$keyMapMatch">>, Arg}]}, Path) ->
Cond = {[{<<"$keyMapMatch">>, norm_fields(Arg)}]},
{[{Path, Cond}]};
-
-
% The text operator operates against the internal
% $default field. This also asserts that the $default
% field is at the root as well as that it only has
% a $text operator applied.
-norm_fields({[{<<"$default">>, {[{<<"$text">>, _Arg}]}}]}=Sel, <<>>) ->
+norm_fields({[{<<"$default">>, {[{<<"$text">>, _Arg}]}}]} = Sel, <<>>) ->
Sel;
norm_fields({[{<<"$default">>, _}]} = Selector, _) ->
?MANGO_ERROR({bad_field, Selector});
-
-
% Any other operator is a terminal below which no
% field names should exist. Set the path to this
% terminal and return it.
norm_fields({[{<<"$", _/binary>>, _}]} = Cond, Path) ->
{[{Path, Cond}]};
-
% We've found a field name. Append it to the path
% and skip this node as we unroll the stack as
% the full path will be further down the branch.
@@ -288,16 +249,13 @@ norm_fields({[{Field, Cond}]}, <<>>) ->
norm_fields(Cond, Field);
norm_fields({[{Field, Cond}]}, Path) ->
norm_fields(Cond, <<Path/binary, ".", Field/binary>>);
-
% An empty selector
norm_fields({[]}, Path) ->
{Path, {[]}};
-
% Else we have an invalid selector
norm_fields(BadSelector, _) ->
?MANGO_ERROR({bad_field, BadSelector}).
-
% Take all the negation operators and move the logic
% as far down the branch as possible. This does things
% like:
@@ -325,33 +283,25 @@ norm_fields(BadSelector, _) ->
% Operators that cause a negation
norm_negations({[{<<"$not">>, Arg}]}) ->
negate(Arg);
-
norm_negations({[{<<"$nor">>, Args}]}) ->
{[{<<"$and">>, [negate(A) || A <- Args]}]};
-
% Operators that we merely seek through as we look for
% negations.
norm_negations({[{<<"$and">>, Args}]}) ->
{[{<<"$and">>, [norm_negations(A) || A <- Args]}]};
-
norm_negations({[{<<"$or">>, Args}]}) ->
{[{<<"$or">>, [norm_negations(A) || A <- Args]}]};
-
norm_negations({[{<<"$elemMatch">>, Arg}]}) ->
{[{<<"$elemMatch">>, norm_negations(Arg)}]};
-
norm_negations({[{<<"$allMatch">>, Arg}]}) ->
{[{<<"$allMatch">>, norm_negations(Arg)}]};
-
norm_negations({[{<<"$keyMapMatch">>, Arg}]}) ->
{[{<<"$keyMapMatch">>, norm_negations(Arg)}]};
-
% All other conditions can't introduce negations anywhere
% further down the operator tree.
norm_negations(Cond) ->
Cond.
-
% Actually negate an expression. Make sure and read up
% on DeMorgan's laws if you're trying to read this, but
% in a nutshell:
@@ -368,20 +318,15 @@ norm_negations(Cond) ->
% norm_negations/1
negate({[{<<"$not">>, Arg}]}) ->
norm_negations(Arg);
-
negate({[{<<"$nor">>, Args}]}) ->
{[{<<"$or">>, [norm_negations(A) || A <- Args]}]};
-
% DeMorgan Negations
negate({[{<<"$and">>, Args}]}) ->
{[{<<"$or">>, [negate(A) || A <- Args]}]};
-
negate({[{<<"$or">>, Args}]}) ->
{[{<<"$and">>, [negate(A) || A <- Args]}]};
-
negate({[{<<"$default">>, _}]} = Arg) ->
?MANGO_ERROR({bad_arg, '$not', Arg});
-
% Negating comparison operators is straight forward
negate({[{<<"$lt">>, Arg}]}) ->
{[{<<"$gte">>, Arg}]};
@@ -399,22 +344,18 @@ negate({[{<<"$in">>, Args}]}) ->
{[{<<"$nin">>, Args}]};
negate({[{<<"$nin">>, Args}]}) ->
{[{<<"$in">>, Args}]};
-
% We can also trivially negate the exists operator
negate({[{<<"$exists">>, Arg}]}) ->
{[{<<"$exists">>, not Arg}]};
-
% Anything else we have to just terminate the
% negation by reinserting the negation operator
negate({[{<<"$", _/binary>>, _}]} = Cond) ->
{[{<<"$not">>, Cond}]};
-
% Finally, negating a field just means we negate its
% condition.
negate({[{Field, Cond}]}) ->
{[{Field, negate(Cond)}]}.
-
% We need to treat an empty array as always true. This will be applied
% for $or, $in, $all, $nin as well.
match({[{<<"$and">>, []}]}, _, _) ->
@@ -422,16 +363,13 @@ match({[{<<"$and">>, []}]}, _, _) ->
match({[{<<"$and">>, Args}]}, Value, Cmp) ->
Pred = fun(SubSel) -> match(SubSel, Value, Cmp) end,
lists:all(Pred, Args);
-
match({[{<<"$or">>, []}]}, _, _) ->
true;
match({[{<<"$or">>, Args}]}, Value, Cmp) ->
Pred = fun(SubSel) -> match(SubSel, Value, Cmp) end,
lists:any(Pred, Args);
-
match({[{<<"$not">>, Arg}]}, Value, Cmp) ->
not match(Arg, Value, Cmp);
-
match({[{<<"$all">>, []}]}, _, _) ->
false;
% All of the values in Args must exist in Values or
@@ -440,16 +378,16 @@ match({[{<<"$all">>, []}]}, _, _) ->
match({[{<<"$all">>, Args}]}, Values, _Cmp) when is_list(Values) ->
Pred = fun(A) -> lists:member(A, Values) end,
HasArgs = lists:all(Pred, Args),
- IsArgs = case Args of
- [A] when is_list(A) ->
- A == Values;
- _ ->
- false
- end,
+ IsArgs =
+ case Args of
+ [A] when is_list(A) ->
+ A == Values;
+ _ ->
+ false
+ end,
HasArgs orelse IsArgs;
match({[{<<"$all">>, _Args}]}, _Values, _Cmp) ->
false;
-
%% This is for $elemMatch, $allMatch, and possibly $in because of our normalizer.
%% A selector such as {"field_name": {"$elemMatch": {"$gte": 80, "$lt": 85}}}
%% gets normalized to:
@@ -464,17 +402,19 @@ match({[{<<"$all">>, _Args}]}, _Values, _Cmp) ->
%% So we filter out the <<>>.
match({[{<<>>, Arg}]}, Values, Cmp) ->
match(Arg, Values, Cmp);
-
% Matches when any element in values matches the
% sub-selector Arg.
match({[{<<"$elemMatch">>, Arg}]}, Values, Cmp) when is_list(Values) ->
try
- lists:foreach(fun(V) ->
- case match(Arg, V, Cmp) of
- true -> throw(matched);
- _ -> ok
- end
- end, Values),
+ lists:foreach(
+ fun(V) ->
+ case match(Arg, V, Cmp) of
+ true -> throw(matched);
+ _ -> ok
+ end
+ end,
+ Values
+ ),
false
catch
throw:matched ->
@@ -484,17 +424,19 @@ match({[{<<"$elemMatch">>, Arg}]}, Values, Cmp) when is_list(Values) ->
end;
match({[{<<"$elemMatch">>, _Arg}]}, _Value, _Cmp) ->
false;
-
% Matches when all elements in values match the
% sub-selector Arg.
match({[{<<"$allMatch">>, Arg}]}, [_ | _] = Values, Cmp) ->
try
- lists:foreach(fun(V) ->
- case match(Arg, V, Cmp) of
- false -> throw(unmatched);
- _ -> ok
- end
- end, Values),
+ lists:foreach(
+ fun(V) ->
+ case match(Arg, V, Cmp) of
+ false -> throw(unmatched);
+ _ -> ok
+ end
+ end,
+ Values
+ ),
true
catch
_:_ ->
@@ -502,17 +444,19 @@ match({[{<<"$allMatch">>, Arg}]}, [_ | _] = Values, Cmp) ->
end;
match({[{<<"$allMatch">>, _Arg}]}, _Value, _Cmp) ->
false;
-
% Matches when any key in the map value matches the
% sub-selector Arg.
match({[{<<"$keyMapMatch">>, Arg}]}, Value, Cmp) when is_tuple(Value) ->
try
- lists:foreach(fun(V) ->
- case match(Arg, V, Cmp) of
- true -> throw(matched);
- _ -> ok
- end
- end, [Key || {Key, _} <- element(1, Value)]),
+ lists:foreach(
+ fun(V) ->
+ case match(Arg, V, Cmp) of
+ true -> throw(matched);
+ _ -> ok
+ end
+ end,
+ [Key || {Key, _} <- element(1, Value)]
+ ),
false
catch
throw:matched ->
@@ -522,7 +466,6 @@ match({[{<<"$keyMapMatch">>, Arg}]}, Value, Cmp) when is_tuple(Value) ->
end;
match({[{<<"$keyMapMatch">>, _Arg}]}, _Value, _Cmp) ->
false;
-
% Our comparison operators are fairly straight forward
match({[{<<"$lt">>, Arg}]}, Value, Cmp) ->
Cmp(Value, Arg) < 0;
@@ -536,67 +479,62 @@ match({[{<<"$gte">>, Arg}]}, Value, Cmp) ->
Cmp(Value, Arg) >= 0;
match({[{<<"$gt">>, Arg}]}, Value, Cmp) ->
Cmp(Value, Arg) > 0;
-
match({[{<<"$in">>, []}]}, _, _) ->
false;
-match({[{<<"$in">>, Args}]}, Values, Cmp) when is_list(Values)->
+match({[{<<"$in">>, Args}]}, Values, Cmp) when is_list(Values) ->
Pred = fun(Arg) ->
- lists:foldl(fun(Value,Match) ->
- (Cmp(Value, Arg) == 0) or Match
- end, false, Values)
+ lists:foldl(
+ fun(Value, Match) ->
+ (Cmp(Value, Arg) == 0) or Match
+ end,
+ false,
+ Values
+ )
end,
lists:any(Pred, Args);
match({[{<<"$in">>, Args}]}, Value, Cmp) ->
Pred = fun(Arg) -> Cmp(Value, Arg) == 0 end,
lists:any(Pred, Args);
-
match({[{<<"$nin">>, []}]}, _, _) ->
true;
-match({[{<<"$nin">>, Args}]}, Values, Cmp) when is_list(Values)->
+match({[{<<"$nin">>, Args}]}, Values, Cmp) when is_list(Values) ->
not match({[{<<"$in">>, Args}]}, Values, Cmp);
match({[{<<"$nin">>, Args}]}, Value, Cmp) ->
Pred = fun(Arg) -> Cmp(Value, Arg) /= 0 end,
lists:all(Pred, Args);
-
% This logic is a bit subtle. Basically, if value is
% not undefined, then it exists.
match({[{<<"$exists">>, ShouldExist}]}, Value, _Cmp) ->
Exists = Value /= undefined,
ShouldExist andalso Exists;
-
match({[{<<"$type">>, Arg}]}, Value, _Cmp) when is_binary(Arg) ->
Arg == mango_json:type(Value);
-
match({[{<<"$mod">>, [D, R]}]}, Value, _Cmp) when is_integer(Value) ->
Value rem D == R;
match({[{<<"$mod">>, _}]}, _Value, _Cmp) ->
false;
-
match({[{<<"$regex">>, Regex}]}, Value, _Cmp) when is_binary(Value) ->
try
match == re:run(Value, Regex, [{capture, none}])
- catch _:_ ->
- false
+ catch
+ _:_ ->
+ false
end;
match({[{<<"$regex">>, _}]}, _Value, _Cmp) ->
false;
-
match({[{<<"$size">>, Arg}]}, Values, _Cmp) when is_list(Values) ->
length(Values) == Arg;
match({[{<<"$size">>, _}]}, _Value, _Cmp) ->
false;
-
% We don't have any choice but to believe that the text
% index returned valid matches
match({[{<<"$default">>, _}]}, _Value, _Cmp) ->
true;
-
% All other operators are internal assertion errors for
% matching because we either should've removed them during
% normalization or something else broke.
-match({[{<<"$", _/binary>>=Op, _}]}, _, _) ->
+match({[{<<"$", _/binary>> = Op, _}]}, _, _) ->
?MANGO_ERROR({invalid_operator, Op});
-
% We need to traverse value to find field. The call to
% mango_doc:get_field/2 may return either not_found or
% bad_path in which case matching fails.
@@ -613,11 +551,9 @@ match({[{Field, Cond}]}, Value, Cmp) ->
SubValue ->
match(Cond, SubValue, Cmp)
end;
-
match({[_, _ | _] = _Props} = Sel, _Value, _Cmp) ->
erlang:error({unnormalized_selector, Sel}).
-
% Returns true if Selector requires all
% fields in RequiredFields to exist in any matching documents.
@@ -634,48 +570,48 @@ has_required_fields(Selector, RequiredFields) ->
% Empty selector
has_required_fields_int({[]}, Remainder) ->
Remainder;
-
% No more required fields
has_required_fields_int(_, []) ->
[];
-
% No more selector
has_required_fields_int([], Remainder) ->
Remainder;
-
has_required_fields_int(Selector, RequiredFields) when not is_list(Selector) ->
has_required_fields_int([Selector], RequiredFields);
-
% We can "see" through $and operator. Iterate
% through the list of child operators.
-has_required_fields_int([{[{<<"$and">>, Args}]}], RequiredFields)
- when is_list(Args) ->
+has_required_fields_int([{[{<<"$and">>, Args}]}], RequiredFields) when
+ is_list(Args)
+->
has_required_fields_int(Args, RequiredFields);
-
% We can "see" through $or operator. Required fields
% must be covered by all children.
-has_required_fields_int([{[{<<"$or">>, Args}]} | Rest], RequiredFields)
- when is_list(Args) ->
- Remainder0 = lists:foldl(fun(Arg, Acc) ->
- % for each child test coverage against the full
- % set of required fields
- Remainder = has_required_fields_int(Arg, RequiredFields),
-
- % collect the remaining fields across all children
- Acc ++ Remainder
- end, [], Args),
+has_required_fields_int([{[{<<"$or">>, Args}]} | Rest], RequiredFields) when
+ is_list(Args)
+->
+ Remainder0 = lists:foldl(
+ fun(Arg, Acc) ->
+ % for each child test coverage against the full
+ % set of required fields
+ Remainder = has_required_fields_int(Arg, RequiredFields),
+
+ % collect the remaining fields across all children
+ Acc ++ Remainder
+ end,
+ [],
+ Args
+ ),
% remove duplicate fields
Remainder1 = lists:usort(Remainder0),
has_required_fields_int(Rest, Remainder1);
-
% Handle $and operator where it has peers. Required fields
% can be covered by any child.
-has_required_fields_int([{[{<<"$and">>, Args}]} | Rest], RequiredFields)
- when is_list(Args) ->
+has_required_fields_int([{[{<<"$and">>, Args}]} | Rest], RequiredFields) when
+ is_list(Args)
+->
Remainder = has_required_fields_int(Args, RequiredFields),
has_required_fields_int(Rest, Remainder);
-
has_required_fields_int([{[{Field, Cond}]} | Rest], RequiredFields) ->
case Cond of
% $exists:false is a special case - this is the only operator
@@ -686,30 +622,22 @@ has_required_fields_int([{[{Field, Cond}]} | Rest], RequiredFields) ->
has_required_fields_int(Rest, lists:delete(Field, RequiredFields))
end.
-
% Returns true if a field in the selector is a constant value e.g. {a: {$eq: 1}}
is_constant_field({[]}, _Field) ->
false;
-
is_constant_field(Selector, Field) when not is_list(Selector) ->
is_constant_field([Selector], Field);
-
is_constant_field([], _Field) ->
false;
-
is_constant_field([{[{<<"$and">>, Args}]}], Field) when is_list(Args) ->
lists:any(fun(Arg) -> is_constant_field(Arg, Field) end, Args);
-
is_constant_field([{[{<<"$and">>, Args}]}], Field) ->
is_constant_field(Args, Field);
-
is_constant_field([{[{Field, {[{Cond, _Val}]}}]} | _Rest], Field) ->
Cond =:= <<"$eq">>;
-
is_constant_field([{[{_UnMatched, _}]} | Rest], Field) ->
is_constant_field(Rest, Field).
-
%%%%%%%% module tests below %%%%%%%%
-ifdef(TEST).
@@ -721,42 +649,49 @@ is_constant_field_basic_test() ->
?assertEqual(true, is_constant_field(Selector, Field)).
is_constant_field_basic_two_test() ->
- Selector = normalize({[{<<"$and">>,
- [
- {[{<<"cars">>,{[{<<"$eq">>,<<"2">>}]}}]},
- {[{<<"age">>,{[{<<"$gt">>,10}]}}]}
- ]
- }]}),
+ Selector = normalize(
+ {[
+ {<<"$and">>, [
+ {[{<<"cars">>, {[{<<"$eq">>, <<"2">>}]}}]},
+ {[{<<"age">>, {[{<<"$gt">>, 10}]}}]}
+ ]}
+ ]}
+ ),
Field = <<"cars">>,
?assertEqual(true, is_constant_field(Selector, Field)).
is_constant_field_not_eq_test() ->
- Selector = normalize({[{<<"$and">>,
- [
- {[{<<"cars">>,{[{<<"$eq">>,<<"2">>}]}}]},
- {[{<<"age">>,{[{<<"$gt">>,10}]}}]}
- ]
- }]}),
+ Selector = normalize(
+ {[
+ {<<"$and">>, [
+ {[{<<"cars">>, {[{<<"$eq">>, <<"2">>}]}}]},
+ {[{<<"age">>, {[{<<"$gt">>, 10}]}}]}
+ ]}
+ ]}
+ ),
Field = <<"age">>,
?assertEqual(false, is_constant_field(Selector, Field)).
is_constant_field_missing_field_test() ->
- Selector = normalize({[{<<"$and">>,
- [
- {[{<<"cars">>,{[{<<"$eq">>,<<"2">>}]}}]},
- {[{<<"age">>,{[{<<"$gt">>,10}]}}]}
- ]
- }]}),
+ Selector = normalize(
+ {[
+ {<<"$and">>, [
+ {[{<<"cars">>, {[{<<"$eq">>, <<"2">>}]}}]},
+ {[{<<"age">>, {[{<<"$gt">>, 10}]}}]}
+ ]}
+ ]}
+ ),
Field = <<"wrong">>,
?assertEqual(false, is_constant_field(Selector, Field)).
is_constant_field_or_field_test() ->
- Selector = {[{<<"$or">>,
- [
- {[{<<"A">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]
- }]},
+ Selector =
+ {[
+ {<<"$or">>, [
+ {[{<<"A">>, <<"foo">>}]},
+ {[{<<"B">>, <<"foo">>}]}
+ ]}
+ ]},
Normalized = normalize(Selector),
Field = <<"A">>,
?assertEqual(false, is_constant_field(Normalized, Field)).
@@ -767,37 +702,40 @@ is_constant_field_empty_selector_test() ->
?assertEqual(false, is_constant_field(Selector, Field)).
is_constant_nested_and_test() ->
- Selector1 = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector2 = {[{<<"$and">>,
- [
- {[{<<"B">>, {[{<<"$gt">>,10}]}}]}
- ]
- }]},
- Selector = {[{<<"$and">>,
- [
- Selector1,
- Selector2
- ]
- }]},
+ Selector1 =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector2 =
+ {[
+ {<<"$and">>, [
+ {[{<<"B">>, {[{<<"$gt">>, 10}]}}]}
+ ]}
+ ]},
+ Selector =
+ {[
+ {<<"$and">>, [
+ Selector1,
+ Selector2
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, is_constant_field(Normalized, <<"A">>)),
?assertEqual(false, is_constant_field(Normalized, <<"B">>)).
is_constant_combined_or_and_equals_test() ->
- Selector = {[{<<"A">>, "foo"},
- {<<"$or">>,
- [
- {[{<<"B">>, <<"bar">>}]},
- {[{<<"B">>, <<"baz">>}]}
- ]
- },
- {<<"C">>, "qux"}
- ]},
+ Selector =
+ {[
+ {<<"A">>, "foo"},
+ {<<"$or">>, [
+ {[{<<"B">>, <<"bar">>}]},
+ {[{<<"B">>, <<"baz">>}]}
+ ]},
+ {<<"C">>, "qux"}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, is_constant_field(Normalized, <<"C">>)),
?assertEqual(false, is_constant_field(Normalized, <<"B">>)).
@@ -822,202 +760,225 @@ has_required_fields_empty_selector_test() ->
has_required_fields_exists_false_test() ->
RequiredFields = [<<"A">>],
- Selector = {[{<<"A">>,{[{<<"$exists">>, false}]}}]},
+ Selector = {[{<<"A">>, {[{<<"$exists">>, false}]}}]},
Normalized = normalize(Selector),
?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
has_required_fields_and_true_test() ->
RequiredFields = [<<"A">>],
- Selector = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]
- }]},
+ Selector =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]},
+ {[{<<"B">>, <<"foo">>}]}
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
has_required_fields_nested_and_true_test() ->
RequiredFields = [<<"A">>, <<"B">>],
- Selector1 = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector2 = {[{<<"$and">>,
- [
- {[{<<"B">>, <<"foo">>}]}
- ]
- }]},
- Selector = {[{<<"$and">>,
- [
- Selector1,
- Selector2
- ]
- }]},
+ Selector1 =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector2 =
+ {[
+ {<<"$and">>, [
+ {[{<<"B">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector =
+ {[
+ {<<"$and">>, [
+ Selector1,
+ Selector2
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
has_required_fields_and_false_test() ->
RequiredFields = [<<"A">>, <<"C">>],
- Selector = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]
- }]},
+ Selector =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]},
+ {[{<<"B">>, <<"foo">>}]}
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
has_required_fields_or_false_test() ->
RequiredFields = [<<"A">>],
- Selector = {[{<<"$or">>,
- [
- {[{<<"A">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]
- }]},
+ Selector =
+ {[
+ {<<"$or">>, [
+ {[{<<"A">>, <<"foo">>}]},
+ {[{<<"B">>, <<"foo">>}]}
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
has_required_fields_or_true_test() ->
RequiredFields = [<<"A">>, <<"B">>, <<"C">>],
- Selector = {[{<<"A">>, "foo"},
- {<<"$or">>,
- [
- {[{<<"B">>, <<"bar">>}]},
- {[{<<"B">>, <<"baz">>}]}
- ]
- },
- {<<"C">>, "qux"}
- ]},
+ Selector =
+ {[
+ {<<"A">>, "foo"},
+ {<<"$or">>, [
+ {[{<<"B">>, <<"bar">>}]},
+ {[{<<"B">>, <<"baz">>}]}
+ ]},
+ {<<"C">>, "qux"}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
has_required_fields_and_nested_or_true_test() ->
RequiredFields = [<<"A">>, <<"B">>],
- Selector1 = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector2 = {[{<<"$or">>,
- [
- {[{<<"B">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]
- }]},
- Selector = {[{<<"$and">>,
- [
- Selector1,
- Selector2
- ]
- }]},
+ Selector1 =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector2 =
+ {[
+ {<<"$or">>, [
+ {[{<<"B">>, <<"foo">>}]},
+ {[{<<"B">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector =
+ {[
+ {<<"$and">>, [
+ Selector1,
+ Selector2
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, has_required_fields(Normalized, RequiredFields)),
- SelectorReverse = {[{<<"$and">>,
- [
- Selector2,
- Selector1
- ]
- }]},
+ SelectorReverse =
+ {[
+ {<<"$and">>, [
+ Selector2,
+ Selector1
+ ]}
+ ]},
NormalizedReverse = normalize(SelectorReverse),
?assertEqual(true, has_required_fields(NormalizedReverse, RequiredFields)).
has_required_fields_and_nested_or_false_test() ->
RequiredFields = [<<"A">>, <<"B">>],
- Selector1 = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector2 = {[{<<"$or">>,
- [
- {[{<<"A">>, <<"foo">>}]},
- {[{<<"B">>, <<"foo">>}]}
- ]
- }]},
- Selector = {[{<<"$and">>,
- [
- Selector1,
- Selector2
- ]
- }]},
+ Selector1 =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector2 =
+ {[
+ {<<"$or">>, [
+ {[{<<"A">>, <<"foo">>}]},
+ {[{<<"B">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector =
+ {[
+ {<<"$and">>, [
+ Selector1,
+ Selector2
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(false, has_required_fields(Normalized, RequiredFields)),
- SelectorReverse = {[{<<"$and">>,
- [
- Selector2,
- Selector1
- ]
- }]},
+ SelectorReverse =
+ {[
+ {<<"$and">>, [
+ Selector2,
+ Selector1
+ ]}
+ ]},
NormalizedReverse = normalize(SelectorReverse),
?assertEqual(false, has_required_fields(NormalizedReverse, RequiredFields)).
has_required_fields_or_nested_and_true_test() ->
RequiredFields = [<<"A">>],
- Selector1 = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector2 = {[{<<"$and">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector = {[{<<"$or">>,
- [
- Selector1,
- Selector2
- ]
- }]},
+ Selector1 =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector2 =
+ {[
+ {<<"$and">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector =
+ {[
+ {<<"$or">>, [
+ Selector1,
+ Selector2
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
has_required_fields_or_nested_or_true_test() ->
RequiredFields = [<<"A">>],
- Selector1 = {[{<<"$or">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector2 = {[{<<"$or">>,
- [
- {[{<<"A">>, <<"bar">>}]}
- ]
- }]},
- Selector = {[{<<"$or">>,
- [
- Selector1,
- Selector2
- ]
- }]},
+ Selector1 =
+ {[
+ {<<"$or">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector2 =
+ {[
+ {<<"$or">>, [
+ {[{<<"A">>, <<"bar">>}]}
+ ]}
+ ]},
+ Selector =
+ {[
+ {<<"$or">>, [
+ Selector1,
+ Selector2
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(true, has_required_fields(Normalized, RequiredFields)).
has_required_fields_or_nested_or_false_test() ->
RequiredFields = [<<"A">>],
- Selector1 = {[{<<"$or">>,
- [
- {[{<<"A">>, <<"foo">>}]}
- ]
- }]},
- Selector2 = {[{<<"$or">>,
- [
- {[{<<"B">>, <<"bar">>}]}
- ]
- }]},
- Selector = {[{<<"$or">>,
- [
- Selector1,
- Selector2
- ]
- }]},
+ Selector1 =
+ {[
+ {<<"$or">>, [
+ {[{<<"A">>, <<"foo">>}]}
+ ]}
+ ]},
+ Selector2 =
+ {[
+ {<<"$or">>, [
+ {[{<<"B">>, <<"bar">>}]}
+ ]}
+ ]},
+ Selector =
+ {[
+ {<<"$or">>, [
+ Selector1,
+ Selector2
+ ]}
+ ]},
Normalized = normalize(Selector),
?assertEqual(false, has_required_fields(Normalized, RequiredFields)).
diff --git a/src/mango/src/mango_selector_text.erl b/src/mango/src/mango_selector_text.erl
index b3b61ff26..aaa1e3329 100644
--- a/src/mango/src/mango_selector_text.erl
+++ b/src/mango/src/mango_selector_text.erl
@@ -12,7 +12,6 @@
-module(mango_selector_text).
-
-export([
convert/1,
convert/2,
@@ -20,20 +19,16 @@
append_sort_type/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-
%% Regex for <<"\\.">>
-define(PERIOD, "\\.").
-
convert(Object) ->
TupleTree = convert([], Object),
iolist_to_binary(to_query(TupleTree)).
-
convert(Path, {[{<<"$and">>, Args}]}) ->
Parts = [convert(Path, Arg) || Arg <- Args],
{op_and, Parts};
@@ -45,12 +40,10 @@ convert(Path, {[{<<"$not">>, Arg}]}) ->
convert(Path, {[{<<"$default">>, Arg}]}) ->
{op_field, {_, Query}} = convert(Path, Arg),
{op_default, Query};
-
% The $text operator specifies a Lucene syntax query
% so we just pull it in directly.
convert(Path, {[{<<"$text">>, Query}]}) when is_binary(Query) ->
{op_field, {make_field(Path, Query), value_str(Query)}};
-
% The MongoDB docs for $all are super confusing and read more
% like they screwed up the implementation of this operator
% and then just documented it as a feature.
@@ -68,15 +61,14 @@ convert(Path, {[{<<"$all">>, Args}]}) ->
% that means we just need to search for each value in
% Path.[] and Path.[].[] and rely on our filtering to limit
% the results properly.
- Fields1 = convert(Path, {[{<<"$eq">> , Values}]}),
- Fields2 = convert([<<"[]">>| Path], {[{<<"$eq">> , Values}]}),
+ Fields1 = convert(Path, {[{<<"$eq">>, Values}]}),
+ Fields2 = convert([<<"[]">> | Path], {[{<<"$eq">>, Values}]}),
{op_or, [Fields1, Fields2]};
_ ->
% Otherwise the $all operator is equivalent to an $and
% operator so we treat it as such.
convert([<<"[]">> | Path], {[{<<"$and">>, Args}]})
end;
-
% The $elemMatch Lucene query is not an exact translation
% as we can't enforce that the matches are all for the same
% item in an array. We just rely on the final selector match
@@ -85,18 +77,22 @@ convert(Path, {[{<<"$all">>, Args}]}) ->
% say this has to match against an array.
convert(Path, {[{<<"$elemMatch">>, Arg}]}) ->
convert([<<"[]">> | Path], Arg);
-
convert(Path, {[{<<"$allMatch">>, Arg}]}) ->
convert([<<"[]">> | Path], Arg);
-
% Our comparison operators are fairly straight forward
-convert(Path, {[{<<"$lt">>, Arg}]}) when is_list(Arg); is_tuple(Arg);
- Arg =:= null ->
+convert(Path, {[{<<"$lt">>, Arg}]}) when
+ is_list(Arg);
+ is_tuple(Arg);
+ Arg =:= null
+->
field_exists_query(Path);
convert(Path, {[{<<"$lt">>, Arg}]}) ->
{op_field, {make_field(Path, Arg), range(lt, Arg)}};
-convert(Path, {[{<<"$lte">>, Arg}]}) when is_list(Arg); is_tuple(Arg);
- Arg =:= null->
+convert(Path, {[{<<"$lte">>, Arg}]}) when
+ is_list(Arg);
+ is_tuple(Arg);
+ Arg =:= null
+->
field_exists_query(Path);
convert(Path, {[{<<"$lte">>, Arg}]}) ->
{op_field, {make_field(Path, Arg), range(lte, Arg)}};
@@ -115,66 +111,64 @@ convert(Path, {[{<<"$eq">>, Arg}]}) ->
{op_field, {make_field(Path, Arg), value_str(Arg)}};
convert(Path, {[{<<"$ne">>, Arg}]}) ->
{op_not, {field_exists_query(Path), convert(Path, {[{<<"$eq">>, Arg}]})}};
-convert(Path, {[{<<"$gte">>, Arg}]}) when is_list(Arg); is_tuple(Arg);
- Arg =:= null ->
+convert(Path, {[{<<"$gte">>, Arg}]}) when
+ is_list(Arg);
+ is_tuple(Arg);
+ Arg =:= null
+->
field_exists_query(Path);
convert(Path, {[{<<"$gte">>, Arg}]}) ->
{op_field, {make_field(Path, Arg), range(gte, Arg)}};
-convert(Path, {[{<<"$gt">>, Arg}]}) when is_list(Arg); is_tuple(Arg);
- Arg =:= null->
+convert(Path, {[{<<"$gt">>, Arg}]}) when
+ is_list(Arg);
+ is_tuple(Arg);
+ Arg =:= null
+->
field_exists_query(Path);
convert(Path, {[{<<"$gt">>, Arg}]}) ->
{op_field, {make_field(Path, Arg), range(gt, Arg)}};
-
convert(Path, {[{<<"$in">>, Args}]}) ->
{op_or, convert_in(Path, Args)};
-
convert(Path, {[{<<"$nin">>, Args}]}) ->
{op_not, {field_exists_query(Path), convert(Path, {[{<<"$in">>, Args}]})}};
-
convert(Path, {[{<<"$exists">>, ShouldExist}]}) ->
FieldExists = field_exists_query(Path),
case ShouldExist of
true -> FieldExists;
false -> {op_not, {FieldExists, false}}
end;
-
% We're not checking the actual type here, just looking for
% anything that has a possibility of matching by checking
% for the field name. We use the same logic for $exists on
% the actual query.
convert(Path, {[{<<"$type">>, _}]}) ->
field_exists_query(Path);
-
convert(Path, {[{<<"$mod">>, _}]}) ->
field_exists_query(Path, "number");
-
% The lucene regular expression engine does not use java's regex engine but
% instead a custom implementation. The syntax is therefore different, so we do
% would get different behavior than our view indexes. To be consistent, we will
% simply return docs for fields that exist and then run our match filter.
convert(Path, {[{<<"$regex">>, _}]}) ->
field_exists_query(Path, "string");
-
convert(Path, {[{<<"$size">>, Arg}]}) ->
{op_field, {make_field([<<"[]">> | Path], length), value_str(Arg)}};
-
% All other operators are internal assertion errors for
% matching because we either should've removed them during
% normalization or something else broke.
-convert(_Path, {[{<<"$", _/binary>>=Op, _}]}) ->
+convert(_Path, {[{<<"$", _/binary>> = Op, _}]}) ->
?MANGO_ERROR({invalid_operator, Op});
-
% We've hit a field name specifier. Check if the field name is accessing
% arrays. Convert occurrences of element position references to .[]. Then we
% need to break the name into path parts and continue our conversion.
convert(Path, {[{Field0, Cond}]}) ->
- {ok, PP0} = case Field0 of
- <<>> ->
- {ok, []};
- _ ->
- mango_util:parse_field(Field0)
- end,
+ {ok, PP0} =
+ case Field0 of
+ <<>> ->
+ {ok, []};
+ _ ->
+ mango_util:parse_field(Field0)
+ end,
% Later on, we perform a lucene_escape_user call on the
% final Path, which calls parse_field again. Calling the function
% twice converts <<"a\\.b">> to [<<"a">>,<<"b">>]. This leads to
@@ -182,8 +176,15 @@ convert(Path, {[{Field0, Cond}]}) ->
% our escaping mechanism, we simply revert this first parse_field
% effect and replace instances of "." to "\\.".
MP = mango_util:cached_re(mango_period, ?PERIOD),
- PP1 = [re:replace(P, MP, <<"\\\\.">>,
- [global,{return,binary}]) || P <- PP0],
+ PP1 = [
+ re:replace(
+ P,
+ MP,
+ <<"\\\\.">>,
+ [global, {return, binary}]
+ )
+ || P <- PP0
+ ],
{PP2, HasInteger} = replace_array_indexes(PP1, [], false),
NewPath = PP2 ++ Path,
case HasInteger of
@@ -195,101 +196,88 @@ convert(Path, {[{Field0, Cond}]}) ->
false ->
convert(NewPath, Cond)
end;
-
%% For $in
convert(Path, Val) when is_binary(Val); is_number(Val); is_boolean(Val) ->
{op_field, {make_field(Path, Val), value_str(Val)}};
-
% Anything else is a bad selector.
convert(_Path, {Props} = Sel) when length(Props) > 1 ->
erlang:error({unnormalized_selector, Sel}).
-
to_query_nested(Args) ->
QueryArgs = lists:map(fun to_query/1, Args),
% removes empty queries that result from selectors with empty arrays
FilterFun = fun(A) -> A =/= [] andalso A =/= "()" end,
lists:filter(FilterFun, QueryArgs).
-
to_query({op_and, []}) ->
[];
-
to_query({op_and, Args}) when is_list(Args) ->
case to_query_nested(Args) of
[] -> [];
- QueryArgs -> ["(", mango_util:join(<<" AND ">>, QueryArgs), ")"]
+ QueryArgs -> ["(", mango_util:join(<<" AND ">>, QueryArgs), ")"]
end;
-
to_query({op_or, []}) ->
[];
-
to_query({op_or, Args}) when is_list(Args) ->
case to_query_nested(Args) of
[] -> [];
QueryArgs -> ["(", mango_util:join(" OR ", QueryArgs), ")"]
end;
-
to_query({op_not, {ExistsQuery, Arg}}) when is_tuple(Arg) ->
case to_query(Arg) of
[] -> ["(", to_query(ExistsQuery), ")"];
Query -> ["(", to_query(ExistsQuery), " AND NOT (", Query, "))"]
end;
-
%% For $exists:false
to_query({op_not, {ExistsQuery, false}}) ->
["($fieldnames:/.*/ ", " AND NOT (", to_query(ExistsQuery), "))"];
-
to_query({op_insert, Arg}) when is_binary(Arg) ->
["(", Arg, ")"];
-
%% We escape : and / for now for values and all lucene chars for fieldnames
%% This needs to be resolved.
to_query({op_field, {Name, Value}}) ->
NameBin = iolist_to_binary(Name),
["(", mango_util:lucene_escape_user(NameBin), ":", Value, ")"];
-
%% This is for indexable_fields
to_query({op_null, {Name, Value}}) ->
NameBin = iolist_to_binary(Name),
["(", mango_util:lucene_escape_user(NameBin), ":", Value, ")"];
-
to_query({op_fieldname, {Name, Wildcard}}) ->
NameBin = iolist_to_binary(Name),
["($fieldnames:", mango_util:lucene_escape_user(NameBin), Wildcard, ")"];
-
to_query({op_default, Value}) ->
["($default:", Value, ")"].
-
%% We match on fieldname and fieldname.[]
convert_in(Path, Args) ->
Path0 = [<<"[]">> | Path],
- lists:map(fun(Arg) ->
- case Arg of
- {Object} ->
- Parts = lists:map(fun (SubObject) ->
- Fields1 = convert(Path, {[SubObject]}),
- Fields2 = convert(Path0, {[SubObject]}),
+ lists:map(
+ fun(Arg) ->
+ case Arg of
+ {Object} ->
+ Parts = lists:map(
+ fun(SubObject) ->
+ Fields1 = convert(Path, {[SubObject]}),
+ Fields2 = convert(Path0, {[SubObject]}),
+ {op_or, [Fields1, Fields2]}
+ end,
+ Object
+ ),
+ {op_or, Parts};
+ SingleVal ->
+ Fields1 = {op_field, {make_field(Path, SingleVal), value_str(SingleVal)}},
+ Fields2 = {op_field, {make_field(Path0, SingleVal), value_str(SingleVal)}},
{op_or, [Fields1, Fields2]}
- end, Object),
- {op_or, Parts};
- SingleVal ->
- Fields1 = {op_field, {make_field(Path, SingleVal),
- value_str(SingleVal)}},
- Fields2 = {op_field, {make_field(Path0, SingleVal),
- value_str(SingleVal)}},
- {op_or, [Fields1, Fields2]}
- end
- end, Args).
-
+ end
+ end,
+ Args
+ ).
make_field(Path, length) ->
[path_str(Path), <<":length">>];
make_field(Path, Arg) ->
[path_str(Path), <<":">>, type_str(Arg)].
-
range(lt, Arg) ->
Min = get_range(min, Arg),
[<<"[", Min/binary, " TO ">>, value_str(Arg), <<"}">>];
@@ -312,7 +300,6 @@ get_range(max, Arg) when is_number(Arg) ->
get_range(max, _Arg) ->
<<"\u0x10FFFF">>.
-
field_exists_query(Path) ->
% We specify two here for :* and .* so that we don't incorrectly
% match a path foo.name against foo.name_first (if were to just
@@ -326,15 +313,12 @@ field_exists_query(Path) ->
],
{op_or, Parts}.
-
field_exists_query(Path, Type) ->
{op_fieldname, {[path_str(Path), ":"], Type}}.
-
path_str(Path) ->
path_str(Path, []).
-
path_str([], Acc) ->
Acc;
path_str([Part], Acc) ->
@@ -350,7 +334,6 @@ path_str([Part | Rest], Acc) ->
path_str(Rest, [<<".">>, Part | Acc])
end.
-
type_str(Value) when is_number(Value) ->
<<"number">>;
type_str(Value) when is_boolean(Value) ->
@@ -360,7 +343,6 @@ type_str(Value) when is_binary(Value) ->
type_str(null) ->
<<"null">>.
-
value_str(Value) when is_binary(Value) ->
case mango_util:is_number_string(Value) of
true ->
@@ -380,7 +362,6 @@ value_str(false) ->
value_str(null) ->
<<"true">>.
-
append_sort_type(RawSortField, Selector) ->
EncodeField = mango_util:lucene_escape_user(RawSortField),
String = mango_util:has_suffix(EncodeField, <<"_3astring">>),
@@ -395,7 +376,6 @@ append_sort_type(RawSortField, Selector) ->
<<EncodeField/binary, Type/binary>>
end.
-
get_sort_type(Field, Selector) ->
Types = get_sort_types(Field, Selector, []),
case lists:usort(Types) of
@@ -404,35 +384,40 @@ get_sort_type(Field, Selector) ->
_ -> ?MANGO_ERROR({text_sort_error, Field})
end.
-
-get_sort_types(Field, {[{Field, {[{<<"$", _/binary>>, Cond}]}}]}, Acc)
- when is_binary(Cond) ->
+get_sort_types(Field, {[{Field, {[{<<"$", _/binary>>, Cond}]}}]}, Acc) when
+ is_binary(Cond)
+->
[str | Acc];
-
-get_sort_types(Field, {[{Field, {[{<<"$", _/binary>>, Cond}]}}]}, Acc)
- when is_number(Cond) ->
+get_sort_types(Field, {[{Field, {[{<<"$", _/binary>>, Cond}]}}]}, Acc) when
+ is_number(Cond)
+->
[num | Acc];
-
get_sort_types(Field, {[{_, Cond}]}, Acc) when is_list(Cond) ->
- lists:foldl(fun(Arg, InnerAcc) ->
- get_sort_types(Field, Arg, InnerAcc)
- end, Acc, Cond);
-
-get_sort_types(Field, {[{_, Cond}]}, Acc) when is_tuple(Cond)->
+ lists:foldl(
+ fun(Arg, InnerAcc) ->
+ get_sort_types(Field, Arg, InnerAcc)
+ end,
+ Acc,
+ Cond
+ );
+get_sort_types(Field, {[{_, Cond}]}, Acc) when is_tuple(Cond) ->
get_sort_types(Field, Cond, Acc);
-
-get_sort_types(_Field, _, Acc) ->
+get_sort_types(_Field, _, Acc) ->
Acc.
-
replace_array_indexes([], NewPartsAcc, HasIntAcc) ->
{NewPartsAcc, HasIntAcc};
replace_array_indexes([Part | Rest], NewPartsAcc, HasIntAcc) ->
- {NewPart, HasInt} = try
- _ = list_to_integer(binary_to_list(Part)),
- {<<"[]">>, true}
- catch _:_ ->
- {Part, false}
- end,
- replace_array_indexes(Rest, [NewPart | NewPartsAcc],
- HasInt or HasIntAcc).
+ {NewPart, HasInt} =
+ try
+ _ = list_to_integer(binary_to_list(Part)),
+ {<<"[]">>, true}
+ catch
+ _:_ ->
+ {Part, false}
+ end,
+ replace_array_indexes(
+ Rest,
+ [NewPart | NewPartsAcc],
+ HasInt or HasIntAcc
+ ).
diff --git a/src/mango/src/mango_sort.erl b/src/mango/src/mango_sort.erl
index 17249c297..808b6e7f2 100644
--- a/src/mango/src/mango_sort.erl
+++ b/src/mango/src/mango_sort.erl
@@ -19,10 +19,8 @@
directions/1
]).
-
-include("mango.hrl").
-
new(Fields) when is_list(Fields) ->
Sort = {[sort_field(Field) || Field <- Fields]},
validate(Sort),
@@ -30,7 +28,6 @@ new(Fields) when is_list(Fields) ->
new(Else) ->
?MANGO_ERROR({invalid_sort_json, Else}).
-
to_json({Fields}) ->
to_json(Fields);
to_json([]) ->
@@ -38,15 +35,12 @@ to_json([]) ->
to_json([{Name, Dir} | Rest]) ->
[{[{Name, Dir}]} | to_json(Rest)].
-
fields({Props}) ->
[Name || {Name, _Dir} <- Props].
-
directions({Props}) ->
[Dir || {_Name, Dir} <- Props].
-
sort_field(<<"">>) ->
?MANGO_ERROR({invalid_sort_field, <<"">>});
sort_field(Field) when is_binary(Field) ->
@@ -60,7 +54,6 @@ sort_field({Name, BadDir}) when is_binary(Name) ->
sort_field(Else) ->
?MANGO_ERROR({invalid_sort_field, Else}).
-
validate({Props}) ->
% Assert each field is in the same direction
% until we support mixed direction sorts.
diff --git a/src/mango/src/mango_sup.erl b/src/mango/src/mango_sup.erl
index b0dedf125..c0b04d9c9 100644
--- a/src/mango/src/mango_sup.erl
+++ b/src/mango/src/mango_sup.erl
@@ -16,9 +16,8 @@
-export([start_link/1]).
-
start_link(Args) ->
- supervisor:start_link({local,?MODULE}, ?MODULE, Args).
+ supervisor:start_link({local, ?MODULE}, ?MODULE, Args).
init([]) ->
{ok, {{one_for_one, 3, 10}, couch_epi:register_service(mango_epi, [])}}.
diff --git a/src/mango/src/mango_util.erl b/src/mango/src/mango_util.erl
index 8257e841f..609a9dbc0 100644
--- a/src/mango/src/mango_util.erl
+++ b/src/mango/src/mango_util.erl
@@ -12,7 +12,6 @@
-module(mango_util).
-
-export([
open_doc/2,
open_ddocs/1,
@@ -46,44 +45,41 @@
cached_re/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mango.hrl").
-
-define(DIGITS, "(\\p{N}+)").
-define(HEXDIGITS, "([0-9a-fA-F]+)").
-define(EXP, "[eE][+-]?" ++ ?DIGITS).
-define(NUMSTRING,
-"[\\x00-\\x20]*" ++ "[+-]?(" ++ "NaN|"
- ++ "Infinity|" ++ "((("
- ++ ?DIGITS
- ++ "(\\.)?("
- ++ ?DIGITS
- ++ "?)("
- ++ ?EXP
- ++ ")?)|"
- ++ "(\\.("
- ++ ?DIGITS
- ++ ")("
- ++ ?EXP
- ++ ")?)|"
- ++ "(("
- ++ "(0[xX]"
- ++ ?HEXDIGITS
- ++ "(\\.)?)|"
- ++ "(0[xX]"
- ++ ?HEXDIGITS
- ++ "?(\\.)"
- ++ ?HEXDIGITS
- ++ ")"
- ++ ")[pP][+-]?" ++ ?DIGITS ++ "))" ++ "[fFdD]?))" ++ "[\\x00-\\x20]*").
-
+ "[\\x00-\\x20]*" ++ "[+-]?(" ++ "NaN|" ++
+ "Infinity|" ++ "(((" ++
+ ?DIGITS ++
+ "(\\.)?(" ++
+ ?DIGITS ++
+ "?)(" ++
+ ?EXP ++
+ ")?)|" ++
+ "(\\.(" ++
+ ?DIGITS ++
+ ")(" ++
+ ?EXP ++
+ ")?)|" ++
+ "((" ++
+ "(0[xX]" ++
+ ?HEXDIGITS ++
+ "(\\.)?)|" ++
+ "(0[xX]" ++
+ ?HEXDIGITS ++
+ "?(\\.)" ++
+ ?HEXDIGITS ++
+ ")" ++
+ ")[pP][+-]?" ++ ?DIGITS ++ "))" ++ "[fFdD]?))" ++ "[\\x00-\\x20]*"
+).
open_doc(Db, DocId) ->
open_doc(Db, DocId, [deleted, ejson_body]).
-
open_doc(Db, DocId, Options) ->
case mango_util:defer(fabric, open_doc, [Db, DocId, Options]) of
{ok, Doc} ->
@@ -94,7 +90,6 @@ open_doc(Db, DocId, Options) ->
?MANGO_ERROR({error_loading_doc, DocId})
end.
-
open_ddocs(Db) ->
case mango_util:defer(fabric, design_docs, [Db]) of
{ok, Docs} ->
@@ -103,7 +98,6 @@ open_ddocs(Db) ->
?MANGO_ERROR(error_loading_ddocs)
end.
-
load_ddoc(Db, DDocId) ->
load_ddoc(Db, DDocId, [deleted, ejson_body]).
@@ -112,13 +106,13 @@ load_ddoc(Db, DDocId, DbOpts) ->
{ok, Doc} ->
{ok, check_lang(Doc)};
not_found ->
- Body = {[
- {<<"language">>, <<"query">>}
- ]},
+ Body =
+ {[
+ {<<"language">>, <<"query">>}
+ ]},
{ok, #doc{id = DDocId, body = Body}}
end.
-
defer(Mod, Fun, Args) ->
{Pid, Ref} = erlang:spawn_monitor(?MODULE, do_defer, [Mod, Fun, Args]),
receive
@@ -132,7 +126,6 @@ defer(Mod, Fun, Args) ->
erlang:exit(Value)
end.
-
do_defer(Mod, Fun, Args) ->
try erlang:apply(Mod, Fun, Args) of
Resp ->
@@ -149,7 +142,6 @@ do_defer(Mod, Fun, Args) ->
erlang:exit({mango_defer_exit, Error})
end.
-
assert_ejson({Props}) ->
assert_ejson_obj(Props);
assert_ejson(Vals) when is_list(Vals) ->
@@ -167,7 +159,6 @@ assert_ejson(Number) when is_number(Number) ->
assert_ejson(_Else) ->
false.
-
assert_ejson_obj([]) ->
true;
assert_ejson_obj([{Key, Val} | Rest]) when is_binary(Key) ->
@@ -180,7 +171,6 @@ assert_ejson_obj([{Key, Val} | Rest]) when is_binary(Key) ->
assert_ejson_obj(_Else) ->
false.
-
assert_ejson_arr([]) ->
true;
assert_ejson_arr([Val | Rest]) ->
@@ -191,11 +181,11 @@ assert_ejson_arr([Val | Rest]) ->
false
end.
-
check_lang(#doc{id = Id, deleted = true}) ->
- Body = {[
- {<<"language">>, <<"query">>}
- ]},
+ Body =
+ {[
+ {<<"language">>, <<"query">>}
+ ]},
#doc{id = Id, body = Body};
check_lang(#doc{body = {Props}} = Doc) ->
case lists:keyfind(<<"language">>, 1, Props) of
@@ -205,13 +195,11 @@ check_lang(#doc{body = {Props}} = Doc) ->
?MANGO_ERROR({invalid_ddoc_lang, Else})
end.
-
to_lower(Key) when is_binary(Key) ->
KStr = binary_to_list(Key),
KLower = string:to_lower(KStr),
list_to_binary(KLower).
-
enc_dbname(<<>>) ->
<<>>;
enc_dbname(<<A:8/integer, Rest/binary>>) ->
@@ -219,7 +207,6 @@ enc_dbname(<<A:8/integer, Rest/binary>>) ->
Tail = enc_dbname(Rest),
<<Bytes/binary, Tail/binary>>.
-
enc_db_byte(N) when N >= $a, N =< $z -> <<N>>;
enc_db_byte(N) when N >= $0, N =< $9 -> <<N>>;
enc_db_byte(N) when N == $/; N == $_; N == $- -> <<N>>;
@@ -228,7 +215,6 @@ enc_db_byte(N) ->
L = enc_hex_byte(N rem 16),
<<$$, H:8/integer, L:8/integer>>.
-
dec_dbname(<<>>) ->
<<>>;
dec_dbname(<<$$, _:8/integer>>) ->
@@ -241,7 +227,6 @@ dec_dbname(<<N:8/integer, Rest/binary>>) ->
Tail = dec_dbname(Rest),
<<N:8/integer, Tail/binary>>.
-
enc_hex(<<>>) ->
<<>>;
enc_hex(<<V:8/integer, Rest/binary>>) ->
@@ -250,12 +235,10 @@ enc_hex(<<V:8/integer, Rest/binary>>) ->
Tail = enc_hex(Rest),
<<H:8/integer, L:8/integer, Tail/binary>>.
-
enc_hex_byte(N) when N >= 0, N < 10 -> $0 + N;
enc_hex_byte(N) when N >= 10, N < 16 -> $a + (N - 10);
enc_hex_byte(N) -> throw({invalid_hex_value, N}).
-
dec_hex(<<>>) ->
<<>>;
dec_hex(<<_:8/integer>>) ->
@@ -265,14 +248,11 @@ dec_hex(<<H:8/integer, L:8/integer, Rest/binary>>) ->
Tail = dec_hex(Rest),
<<Byte:8/integer, Tail/binary>>.
-
dec_hex_byte(N) when N >= $0, N =< $9 -> (N - $0);
dec_hex_byte(N) when N >= $a, N =< $f -> (N - $a) + 10;
dec_hex_byte(N) when N >= $A, N =< $F -> (N - $A) + 10;
dec_hex_byte(N) -> throw({invalid_hex_character, N}).
-
-
lucene_escape_field(Bin) when is_binary(Bin) ->
Str = binary_to_list(Bin),
Enc = lucene_escape_field(Str),
@@ -289,58 +269,58 @@ lucene_escape_field([H | T]) when is_number(H), H >= 0, H =< 255 ->
Hi = enc_hex_byte(H div 16),
Lo = enc_hex_byte(H rem 16),
[$_, Hi, Lo | lucene_escape_field(T)]
- end;
+ end;
lucene_escape_field([]) ->
[].
-
lucene_escape_query_value(IoList) when is_list(IoList) ->
lucene_escape_query_value(iolist_to_binary(IoList));
lucene_escape_query_value(Bin) when is_binary(Bin) ->
IoList = lucene_escape_qv(Bin),
iolist_to_binary(IoList).
-
% This escapes the special Lucene query characters
% listed below as well as any whitespace.
%
% + - && || ! ( ) { } [ ] ^ ~ * ? : \ " /
%
-lucene_escape_qv(<<>>) -> [];
+lucene_escape_qv(<<>>) ->
+ [];
lucene_escape_qv(<<"&&", Rest/binary>>) ->
["\\&&" | lucene_escape_qv(Rest)];
lucene_escape_qv(<<"||", Rest/binary>>) ->
["\\||" | lucene_escape_qv(Rest)];
lucene_escape_qv(<<C, Rest/binary>>) ->
NeedsEscape = "+-(){}[]!^~*?:/\\\" \t\r\n",
- Out = case lists:member(C, NeedsEscape) of
- true -> ["\\", C];
- false -> [C]
- end,
+ Out =
+ case lists:member(C, NeedsEscape) of
+ true -> ["\\", C];
+ false -> [C]
+ end,
Out ++ lucene_escape_qv(Rest).
-
lucene_escape_user(Field) ->
{ok, Path} = parse_field(Field),
Escaped = [mango_util:lucene_escape_field(P) || P <- Path],
iolist_to_binary(join(".", Escaped)).
-
has_suffix(Bin, Suffix) when is_binary(Bin), is_binary(Suffix) ->
SBin = size(Bin),
SSuffix = size(Suffix),
- if SBin < SSuffix -> false; true ->
- PSize = SBin - SSuffix,
- case Bin of
- <<_:PSize/binary, Suffix/binary>> ->
- true;
- _ ->
- false
- end
+ if
+ SBin < SSuffix ->
+ false;
+ true ->
+ PSize = SBin - SSuffix,
+ case Bin of
+ <<_:PSize/binary, Suffix/binary>> ->
+ true;
+ _ ->
+ false
+ end
end.
-
join(_Sep, []) ->
[];
join(_Sep, [Item]) ->
@@ -348,10 +328,9 @@ join(_Sep, [Item]) ->
join(Sep, [Item | Rest]) ->
[Item, Sep | join(Sep, Rest)].
-
is_number_string(Value) when is_binary(Value) ->
is_number_string(binary_to_list(Value));
-is_number_string(Value) when is_list(Value)->
+is_number_string(Value) when is_list(Value) ->
MP = cached_re(mango_numstring_re, ?NUMSTRING),
case re:run(Value, MP) of
nomatch ->
@@ -360,7 +339,6 @@ is_number_string(Value) when is_list(Value)->
true
end.
-
cached_re(Name, RE) ->
case mochiglobal:get(Name) of
undefined ->
@@ -371,7 +349,6 @@ cached_re(Name, RE) ->
MP
end.
-
parse_field(Field) ->
case binary:match(Field, <<"\\">>, []) of
nomatch ->
@@ -382,12 +359,15 @@ parse_field(Field) ->
end.
parse_field_slow(Field) ->
- Path = lists:map(fun
- (P) when P =:= <<>> ->
- ?MANGO_ERROR({invalid_field_name, Field});
- (P) ->
- re:replace(P, <<"\\\\">>, <<>>, [global, {return, binary}])
- end, re:split(Field, <<"(?<!\\\\)\\.">>)),
+ Path = lists:map(
+ fun
+ (P) when P =:= <<>> ->
+ ?MANGO_ERROR({invalid_field_name, Field});
+ (P) ->
+ re:replace(P, <<"\\\\">>, <<>>, [global, {return, binary}])
+ end,
+ re:split(Field, <<"(?<!\\\\)\\.">>)
+ ),
{ok, Path}.
check_non_empty(Field, Parts) ->
@@ -398,7 +378,6 @@ check_non_empty(Field, Parts) ->
Parts
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/mem3/src/mem3.erl b/src/mem3/src/mem3.erl
index 86eb3bb45..7151a3ec1 100644
--- a/src/mem3/src/mem3.erl
+++ b/src/mem3/src/mem3.erl
@@ -12,8 +12,18 @@
-module(mem3).
--export([start/0, stop/0, restart/0, nodes/0, node_info/2, shards/1, shards/2,
- choose_shards/2, n/1, n/2, dbname/1, ushards/1, ushards/2]).
+-export([
+ start/0,
+ stop/0,
+ restart/0,
+ nodes/0,
+ node_info/2,
+ shards/1, shards/2,
+ choose_shards/2,
+ n/1, n/2,
+ dbname/1,
+ ushards/1, ushards/2
+]).
-export([get_shard/3, local_shards/1, shard_suffix/1, fold_shards/2]).
-export([sync_security/0, sync_security/1]).
-export([compare_nodelists/0, compare_shards/1]).
@@ -47,30 +57,45 @@ restart() ->
%% key and the nodes holding that state as the value. Also reports member
%% nodes which fail to respond and nodes which are connected but are not
%% cluster members. Useful for debugging.
--spec compare_nodelists() -> [{{cluster_nodes, [node()]} | bad_nodes
- | non_member_nodes, [node()]}].
+-spec compare_nodelists() ->
+ [
+ {
+ {cluster_nodes, [node()]}
+ | bad_nodes
+ | non_member_nodes,
+ [node()]
+ }
+ ].
compare_nodelists() ->
Nodes = mem3:nodes(),
AllNodes = erlang:nodes([this, visible]),
{Replies, BadNodes} = gen_server:multi_call(Nodes, mem3_nodes, get_nodelist),
- Dict = lists:foldl(fun({Node, Nodelist}, D) ->
- orddict:append({cluster_nodes, Nodelist}, Node, D)
- end, orddict:new(), Replies),
+ Dict = lists:foldl(
+ fun({Node, Nodelist}, D) ->
+ orddict:append({cluster_nodes, Nodelist}, Node, D)
+ end,
+ orddict:new(),
+ Replies
+ ),
[{non_member_nodes, AllNodes -- Nodes}, {bad_nodes, BadNodes} | Dict].
--spec compare_shards(DbName::iodata()) -> [{bad_nodes | [#shard{}], [node()]}].
+-spec compare_shards(DbName :: iodata()) -> [{bad_nodes | [#shard{}], [node()]}].
compare_shards(DbName) when is_list(DbName) ->
compare_shards(list_to_binary(DbName));
compare_shards(DbName) ->
Nodes = mem3:nodes(),
{Replies, BadNodes} = rpc:multicall(mem3, shards, [DbName]),
GoodNodes = [N || N <- Nodes, not lists:member(N, BadNodes)],
- Dict = lists:foldl(fun({Shards, Node}, D) ->
- orddict:append(Shards, Node, D)
- end, orddict:new(), lists:zip(Replies, GoodNodes)),
+ Dict = lists:foldl(
+ fun({Shards, Node}, D) ->
+ orddict:append(Shards, Node, D)
+ end,
+ orddict:new(),
+ lists:zip(Replies, GoodNodes)
+ ),
[{bad_nodes, BadNodes} | Dict].
--spec n(DbName::iodata()) -> integer().
+-spec n(DbName :: iodata()) -> integer().
n(DbName) ->
% Use _design to avoid issues with
% partition validation
@@ -86,7 +111,7 @@ nodes() ->
node_info(Node, Key) ->
mem3_nodes:get_node_info(Node, Key).
--spec shards(DbName::iodata()) -> [#shard{}].
+-spec shards(DbName :: iodata()) -> [#shard{}].
shards(DbName) ->
shards_int(DbName, []).
@@ -97,30 +122,36 @@ shards_int(DbName, Options) ->
ShardDbName =
list_to_binary(config:get("mem3", "shards_db", "_dbs")),
case DbName of
- ShardDbName when Ordered ->
- %% shard_db is treated as a single sharded db to support calls to db_info
- %% and view_all_docs
- [#ordered_shard{
- node = node(),
- name = ShardDbName,
- dbname = ShardDbName,
- range = [0, (2 bsl 31)-1],
- order = undefined,
- opts = []}];
- ShardDbName ->
- %% shard_db is treated as a single sharded db to support calls to db_info
- %% and view_all_docs
- [#shard{
- node = node(),
- name = ShardDbName,
- dbname = ShardDbName,
- range = [0, (2 bsl 31)-1],
- opts = []}];
- _ ->
- mem3_shards:for_db(DbName, Options)
+ ShardDbName when Ordered ->
+ %% shard_db is treated as a single sharded db to support calls to db_info
+ %% and view_all_docs
+ [
+ #ordered_shard{
+ node = node(),
+ name = ShardDbName,
+ dbname = ShardDbName,
+ range = [0, (2 bsl 31) - 1],
+ order = undefined,
+ opts = []
+ }
+ ];
+ ShardDbName ->
+ %% shard_db is treated as a single sharded db to support calls to db_info
+ %% and view_all_docs
+ [
+ #shard{
+ node = node(),
+ name = ShardDbName,
+ dbname = ShardDbName,
+ range = [0, (2 bsl 31) - 1],
+ opts = []
+ }
+ ];
+ _ ->
+ mem3_shards:for_db(DbName, Options)
end.
--spec shards(DbName::iodata(), DocId::binary()) -> [#shard{}].
+-spec shards(DbName :: iodata(), DocId :: binary()) -> [#shard{}].
shards(DbName, DocId) ->
shards_int(DbName, DocId, []).
@@ -131,22 +162,21 @@ shards_int(DbName, DocId, Options) when is_list(DocId) ->
shards_int(DbName, DocId, Options) ->
mem3_shards:for_docid(DbName, DocId, Options).
-
--spec ushards(DbName::iodata()) -> [#shard{}].
+-spec ushards(DbName :: iodata()) -> [#shard{}].
ushards(DbName) ->
- Nodes = [node()|erlang:nodes()],
+ Nodes = [node() | erlang:nodes()],
ZoneMap = zone_map(Nodes),
Shards = ushards(DbName, live_shards(DbName, Nodes, [ordered]), ZoneMap),
mem3_util:downcast(Shards).
--spec ushards(DbName::iodata(), DocId::binary()) -> [#shard{}].
+-spec ushards(DbName :: iodata(), DocId :: binary()) -> [#shard{}].
ushards(DbName, DocId) ->
Shards = shards_int(DbName, DocId, [ordered]),
Shard = hd(Shards),
mem3_util:downcast([Shard]).
ushards(DbName, Shards0, ZoneMap) ->
- {L,S,D} = group_by_proximity(Shards0, ZoneMap),
+ {L, S, D} = group_by_proximity(Shards0, ZoneMap),
% Prefer shards in the local zone over shards in a different zone,
% but sort each zone separately to ensure a consistent choice between
% nodes in the same zone.
@@ -177,34 +207,45 @@ sync_security() ->
sync_security(Db) ->
mem3_sync_security:go(dbname(Db)).
--spec choose_shards(DbName::iodata(), Options::list()) -> [#shard{}].
+-spec choose_shards(DbName :: iodata(), Options :: list()) -> [#shard{}].
choose_shards(DbName, Options) when is_list(DbName) ->
choose_shards(list_to_binary(DbName), Options);
choose_shards(DbName, Options) ->
- try shards(DbName)
- catch error:E when E==database_does_not_exist; E==badarg ->
- Nodes = allowed_nodes(),
- case get_placement(Options) of
- undefined ->
- choose_shards(DbName, Nodes, Options);
- Placement ->
- lists:flatmap(fun({Zone, N}) ->
- NodesInZone = nodes_in_zone(Nodes, Zone),
- Options1 = lists:keymerge(1, [{n,N}], Options),
- choose_shards(DbName, NodesInZone, Options1)
- end, Placement)
- end
+ try
+ shards(DbName)
+ catch
+ error:E when E == database_does_not_exist; E == badarg ->
+ Nodes = allowed_nodes(),
+ case get_placement(Options) of
+ undefined ->
+ choose_shards(DbName, Nodes, Options);
+ Placement ->
+ lists:flatmap(
+ fun({Zone, N}) ->
+ NodesInZone = nodes_in_zone(Nodes, Zone),
+ Options1 = lists:keymerge(1, [{n, N}], Options),
+ choose_shards(DbName, NodesInZone, Options1)
+ end,
+ Placement
+ )
+ end
end.
choose_shards(DbName, Nodes, Options) ->
NodeCount = length(Nodes),
Suffix = couch_util:get_value(shard_suffix, Options, ""),
N = mem3_util:n_val(couch_util:get_value(n, Options), NodeCount),
- if N =:= 0 -> erlang:error(no_nodes_in_zone);
- true -> ok
+ if
+ N =:= 0 -> erlang:error(no_nodes_in_zone);
+ true -> ok
end,
- Q = mem3_util:q_val(couch_util:get_value(q, Options,
- config:get_integer("cluster", "q", 2))),
+ Q = mem3_util:q_val(
+ couch_util:get_value(
+ q,
+ Options,
+ config:get_integer("cluster", "q", 2)
+ )
+ ),
%% rotate to a random entry in the nodelist for even distribution
RotatedNodes = rotate_rand(Nodes),
mem3_util:create_partition_map(DbName, N, Q, RotatedNodes, Suffix).
@@ -227,10 +268,13 @@ get_placement(Options) ->
end.
decode_placement_string(PlacementStr) ->
- [begin
- [Zone, N] = string:tokens(Rule, ":"),
- {list_to_binary(Zone), list_to_integer(N)}
- end || Rule <- string:tokens(PlacementStr, ",")].
+ [
+ begin
+ [Zone, N] = string:tokens(Rule, ":"),
+ {list_to_binary(Zone), list_to_integer(N)}
+ end
+ || Rule <- string:tokens(PlacementStr, ",")
+ ].
-spec dbname(#shard{} | iodata()) -> binary().
dbname(#shard{dbname = DbName}) ->
@@ -245,7 +289,7 @@ dbname(_) ->
erlang:error(badarg).
%% @doc Determine if DocId belongs in shard (identified by record or filename)
-belongs(#shard{}=Shard, DocId) when is_binary(DocId) ->
+belongs(#shard{} = Shard, DocId) when is_binary(DocId) ->
[Begin, End] = range(Shard),
belongs(Begin, End, Shard, DocId);
belongs(<<"shards/", _/binary>> = ShardName, DocId) when is_binary(DocId) ->
@@ -263,14 +307,19 @@ range(#shard{range = Range}) ->
range(#ordered_shard{range = Range}) ->
Range;
range(<<"shards/", Start:8/binary, "-", End:8/binary, "/", _/binary>>) ->
- [httpd_util:hexlist_to_integer(binary_to_list(Start)),
- httpd_util:hexlist_to_integer(binary_to_list(End))].
+ [
+ httpd_util:hexlist_to_integer(binary_to_list(Start)),
+ httpd_util:hexlist_to_integer(binary_to_list(End))
+ ].
allowed_nodes() ->
- lists:filter(fun(Node) ->
- Decom = mem3:node_info(Node, <<"decom">>),
- (Decom =/= true) andalso (Decom =/= <<"true">>)
- end, mem3:nodes()).
+ lists:filter(
+ fun(Node) ->
+ Decom = mem3:node_info(Node, <<"decom">>),
+ (Decom =/= true) andalso (Decom =/= <<"true">>)
+ end,
+ mem3:nodes()
+ ).
nodes_in_zone(Nodes, Zone) ->
[Node || Node <- Nodes, Zone == mem3:node_info(Node, <<"zone">>)].
@@ -289,8 +338,10 @@ group_by_proximity(Shards) ->
group_by_proximity(Shards, zone_map(Nodes)).
group_by_proximity(Shards, ZoneMap) ->
- {Local, Remote} = lists:partition(fun(S) -> mem3:node(S) =:= node() end,
- Shards),
+ {Local, Remote} = lists:partition(
+ fun(S) -> mem3:node(S) =:= node() end,
+ Shards
+ ),
LocalZone = proplists:get_value(node(), ZoneMap),
Fun = fun(S) -> proplists:get_value(mem3:node(S), ZoneMap) =:= LocalZone end,
{SameZone, DifferentZone} = lists:partition(Fun, Remote),
@@ -298,18 +349,25 @@ group_by_proximity(Shards, ZoneMap) ->
choose_ushards(DbName, Shards) ->
Groups0 = group_by_range(Shards),
- Groups1 = [mem3_util:rotate_list({DbName, R}, order_shards(G))
- || {R, G} <- Groups0],
+ Groups1 = [
+ mem3_util:rotate_list({DbName, R}, order_shards(G))
+ || {R, G} <- Groups0
+ ],
[hd(G) || G <- Groups1].
-order_shards([#ordered_shard{}|_]=OrderedShards) ->
+order_shards([#ordered_shard{} | _] = OrderedShards) ->
lists:keysort(#ordered_shard.order, OrderedShards);
order_shards(UnorderedShards) ->
UnorderedShards.
group_by_range(Shards) ->
- lists:foldl(fun(Shard, Dict) ->
- orddict:append(mem3:range(Shard), Shard, Dict) end, orddict:new(), Shards).
+ lists:foldl(
+ fun(Shard, Dict) ->
+ orddict:append(mem3:range(Shard), Shard, Dict)
+ end,
+ orddict:new(),
+ Shards
+ ).
% quorum functions
@@ -318,15 +376,14 @@ quorum(DbName) when is_binary(DbName) ->
quorum(Db) ->
quorum(couch_db:name(Db)).
-
-node(#shard{node=Node}) ->
+node(#shard{node = Node}) ->
Node;
-node(#ordered_shard{node=Node}) ->
+node(#ordered_shard{node = Node}) ->
Node.
-name(#shard{name=Name}) ->
+name(#shard{name = Name}) ->
Name;
-name(#ordered_shard{name=Name}) ->
+name(#ordered_shard{name = Name}) ->
Name.
% Direct calculation of node membership. This is the algorithm part. It
@@ -335,9 +392,9 @@ name(#ordered_shard{name=Name}) ->
owner(DbName, DocId, Nodes) ->
hd(mem3_util:rotate_list({DbName, DocId}, lists:usort(Nodes))).
-engine(#shard{opts=Opts}) ->
+engine(#shard{opts = Opts}) ->
engine(Opts);
-engine(#ordered_shard{opts=Opts}) ->
+engine(#ordered_shard{opts = Opts}) ->
engine(Opts);
engine(Opts) when is_list(Opts) ->
case couch_util:get_value(engine, Opts) of
@@ -360,18 +417,23 @@ ping(Node) ->
ping(Node, Timeout) when is_atom(Node) ->
%% The implementation of the function is copied from
%% lib/kernel/src/net_adm.erl with addition of a Timeout
- case catch gen:call({net_kernel, Node},
- '$gen_call', {is_auth, node()}, Timeout) of
- {ok, yes} -> pong;
+ case
+ catch gen:call(
+ {net_kernel, Node},
+ '$gen_call',
+ {is_auth, node()},
+ Timeout
+ )
+ of
+ {ok, yes} ->
+ pong;
_ ->
erlang:disconnect_node(Node),
pang
end.
-
db_is_current(#shard{name = Name}) ->
db_is_current(Name);
-
db_is_current(<<"shards/", _/binary>> = Name) ->
try
Shards = mem3:shards(mem3:dbname(Name)),
@@ -380,13 +442,11 @@ db_is_current(<<"shards/", _/binary>> = Name) ->
error:database_does_not_exist ->
false
end;
-
db_is_current(Name) when is_binary(Name) ->
% This accounts for local (non-sharded) dbs, and is mostly
% for unit tests that either test or use mem3_rep logic
couch_server:exists(Name).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
@@ -394,25 +454,34 @@ db_is_current(Name) when is_binary(Name) ->
-define(ALLOWED_NODE, 'node1@127.0.0.1').
allowed_nodes_test_() ->
- {"allowed_nodes test", [{
- setup,
- fun () ->
- Props = [
- {?ALLOWED_NODE, []},
- {'node2@127.0.0.1', [{<<"decom">>,<<"true">>}]},
- {'node3@127.0.0.1', [{<<"decom">>,true}]}],
- ok = meck:expect(mem3_nodes, get_nodelist,
- fun() -> proplists:get_keys(Props) end),
- ok = meck:expect(mem3_nodes, get_node_info,
- fun(Node, Key) ->
- couch_util:get_value(Key, proplists:get_value(Node, Props))
- end)
- end,
- fun (_) -> meck:unload() end,
- [
- ?_assertMatch([?ALLOWED_NODE], allowed_nodes())
- ]
- }]}.
+ {"allowed_nodes test", [
+ {
+ setup,
+ fun() ->
+ Props = [
+ {?ALLOWED_NODE, []},
+ {'node2@127.0.0.1', [{<<"decom">>, <<"true">>}]},
+ {'node3@127.0.0.1', [{<<"decom">>, true}]}
+ ],
+ ok = meck:expect(
+ mem3_nodes,
+ get_nodelist,
+ fun() -> proplists:get_keys(Props) end
+ ),
+ ok = meck:expect(
+ mem3_nodes,
+ get_node_info,
+ fun(Node, Key) ->
+ couch_util:get_value(Key, proplists:get_value(Node, Props))
+ end
+ )
+ end,
+ fun(_) -> meck:unload() end,
+ [
+ ?_assertMatch([?ALLOWED_NODE], allowed_nodes())
+ ]
+ }
+ ]}.
rotate_rand_degenerate_test() ->
?assertEqual([1], rotate_rand([1])).
diff --git a/src/mem3/src/mem3_bdu.erl b/src/mem3/src/mem3_bdu.erl
index bf84d4470..84eda2397 100644
--- a/src/mem3/src/mem3_bdu.erl
+++ b/src/mem3/src/mem3_bdu.erl
@@ -12,35 +12,28 @@
-module(mem3_bdu).
-
-export([
before_doc_update/3
]).
-
-include_lib("couch/include/couch_db.hrl").
-
--spec before_doc_update(#doc{}, Db::any(), couch_db:update_type()) -> #doc{}.
+-spec before_doc_update(#doc{}, Db :: any(), couch_db:update_type()) -> #doc{}.
before_doc_update(#doc{id = <<?DESIGN_DOC_PREFIX, _/binary>>} = Doc, _Db, _UpdateType) ->
% Skip design docs
Doc;
-
before_doc_update(#doc{deleted = true} = Doc, _Db, _UpdateType) ->
% Skip deleted
Doc;
-
before_doc_update(#doc{} = Doc, _Db, replicated_changes) ->
% Skip internal replicator updates
Doc;
-
before_doc_update(#doc{} = Doc, _Db, _UpdateType) ->
Body1 = couch_util:json_encode(Doc#doc.body),
Body2 = couch_util:json_decode(Body1, [return_maps]),
validate(Body2),
Doc.
-
validate(#{} = Body) ->
validate_key(<<"by_node">>, Body, ["by_node is mandatory"]),
validate_key(<<"by_range">>, Body, ["by_range is mandatory"]),
@@ -57,48 +50,55 @@ validate(#{} = Body) ->
% "by_node": {
% "node1@xxx.xxx.xxx.xxx": ["00000000-1fffffff",...]
% ]}
- maps:map(fun(Node, Ranges) ->
- validate_by_node(Node, Ranges, ByRange)
- end, ByNode),
+ maps:map(
+ fun(Node, Ranges) ->
+ validate_by_node(Node, Ranges, ByRange)
+ end,
+ ByNode
+ ),
% "by_range": {
% "00000000-1fffffff": ["node1@xxx.xxx.xxx.xxx", ...]
% ]}
- maps:map(fun(Range, Nodes) ->
- validate_by_range(Range, Nodes, ByNode)
- end, ByRange).
-
+ maps:map(
+ fun(Range, Nodes) ->
+ validate_by_range(Range, Nodes, ByNode)
+ end,
+ ByRange
+ ).
validate_by_node(Node, Ranges, ByRange) ->
validate_array(Ranges, ["by_node", Ranges, "value not an array"]),
- lists:foreach(fun(Range) ->
- validate_key(Range, ByRange, ["by_range for", Range, "missing"]),
- Nodes = maps:get(Range, ByRange),
- validate_member(Node, Nodes, ["by_range for", Range, "missing", Node])
- end, Ranges).
-
+ lists:foreach(
+ fun(Range) ->
+ validate_key(Range, ByRange, ["by_range for", Range, "missing"]),
+ Nodes = maps:get(Range, ByRange),
+ validate_member(Node, Nodes, ["by_range for", Range, "missing", Node])
+ end,
+ Ranges
+ ).
validate_by_range(Range, Nodes, ByNode) ->
validate_array(Nodes, ["by_range", Nodes, "value not an array"]),
- lists:foreach(fun(Node) ->
- validate_key(Node, ByNode, ["by_node for", Node, "missing"]),
- Ranges = maps:get(Node, ByNode),
- validate_member(Range, Ranges, ["by_node for", Node, "missing", Range])
- end, Nodes).
-
+ lists:foreach(
+ fun(Node) ->
+ validate_key(Node, ByNode, ["by_node for", Node, "missing"]),
+ Ranges = maps:get(Node, ByNode),
+ validate_member(Range, Ranges, ["by_node for", Node, "missing", Range])
+ end,
+ Nodes
+ ).
validate_array(Val, _ErrMsg) when is_list(Val) ->
ok;
validate_array(_Val, ErrMsg) ->
throw({forbidden, errmsg(ErrMsg)}).
-
validate_key(Key, #{} = Map, ErrMsg) ->
case maps:is_key(Key, Map) of
true -> ok;
false -> throw({forbidden, errmsg(ErrMsg)})
end.
-
validate_member(Val, Array, ErrMsg) when is_list(Array) ->
case lists:member(Val, Array) of
true -> ok;
@@ -107,6 +107,5 @@ validate_member(Val, Array, ErrMsg) when is_list(Array) ->
validate_member(_Val, _Array, ErrMsg) ->
throw({forbidden, errmsg(ErrMsg)}).
-
errmsg(ErrMsg) when is_list(ErrMsg) ->
list_to_binary(lists:join(" ", ErrMsg)).
diff --git a/src/mem3/src/mem3_cluster.erl b/src/mem3/src/mem3_cluster.erl
index 7e3d477cb..974b2cbef 100644
--- a/src/mem3/src/mem3_cluster.erl
+++ b/src/mem3/src/mem3_cluster.erl
@@ -25,7 +25,6 @@
% That can be configured via the StartPeriod argument. If the time since start
% is less than a full period, then the StartPeriod is used as the period.
-
-module(mem3_cluster).
-behaviour(gen_server).
@@ -44,11 +43,9 @@
code_change/3
]).
-
-callback cluster_stable(Context :: term()) -> NewContext :: term().
-callback cluster_unstable(Context :: term()) -> NewContext :: term().
-
-record(state, {
mod :: atom(),
ctx :: term(),
@@ -59,19 +56,17 @@
timer :: reference()
}).
-
-spec start_link(module(), term(), integer(), integer()) ->
{ok, pid()} | ignore | {error, term()}.
-start_link(Module, Context, StartPeriod, Period)
- when is_atom(Module), is_integer(StartPeriod), is_integer(Period) ->
+start_link(Module, Context, StartPeriod, Period) when
+ is_atom(Module), is_integer(StartPeriod), is_integer(Period)
+->
gen_server:start_link(?MODULE, [Module, Context, StartPeriod, Period], []).
-
-spec set_period(pid(), integer()) -> ok.
set_period(Server, Period) when is_pid(Server), is_integer(Period) ->
gen_server:cast(Server, {set_period, Period}).
-
% gen_server callbacks
init([Module, Context, StartPeriod, Period]) ->
@@ -84,8 +79,7 @@ init([Module, Context, StartPeriod, Period]) ->
period = Period,
start_period = StartPeriod,
timer = new_timer(StartPeriod)
- }}.
-
+ }}.
terminate(_Reason, _State) ->
ok.
@@ -93,32 +87,26 @@ terminate(_Reason, _State) ->
handle_call(_Msg, _From, State) ->
{reply, ignored, State}.
-
handle_cast({set_period, Period}, State) ->
{noreply, State#state{period = Period}}.
-
handle_info({nodeup, _Node}, State) ->
{noreply, cluster_changed(State)};
-
handle_info({nodedown, _Node}, State) ->
{noreply, cluster_changed(State)};
-
handle_info(stability_check, #state{mod = Mod, ctx = Ctx} = State) ->
- erlang:cancel_timer(State#state.timer),
- case now_diff_sec(State#state.last_change) > interval(State) of
- true ->
- {noreply, State#state{ctx = Mod:cluster_stable(Ctx)}};
- false ->
- Timer = new_timer(interval(State)),
- {noreply, State#state{timer = Timer}}
- end.
-
+ erlang:cancel_timer(State#state.timer),
+ case now_diff_sec(State#state.last_change) > interval(State) of
+ true ->
+ {noreply, State#state{ctx = Mod:cluster_stable(Ctx)}};
+ false ->
+ Timer = new_timer(interval(State)),
+ {noreply, State#state{timer = Timer}}
+ end.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
%% Internal functions
-spec cluster_changed(#state{}) -> #state{}.
@@ -129,18 +117,19 @@ cluster_changed(#state{mod = Mod, ctx = Ctx} = State) ->
ctx = Mod:cluster_unstable(Ctx)
}.
-
-spec new_timer(non_neg_integer()) -> reference().
new_timer(IntervalSec) ->
erlang:send_after(IntervalSec * 1000, self(), stability_check).
-
% For the first Period seconds after node boot we check cluster stability every
% StartPeriod seconds. Once the initial Period seconds have passed we continue
% to monitor once every Period seconds
-spec interval(#state{}) -> non_neg_integer().
-interval(#state{period = Period, start_period = StartPeriod,
- start_time = T0}) ->
+interval(#state{
+ period = Period,
+ start_period = StartPeriod,
+ start_time = T0
+}) ->
case now_diff_sec(T0) > Period of
true ->
% Normal operation
@@ -150,12 +139,11 @@ interval(#state{period = Period, start_period = StartPeriod,
StartPeriod
end.
-
-spec now_diff_sec(erlang:timestamp()) -> non_neg_integer().
now_diff_sec(Time) ->
case timer:now_diff(os:timestamp(), Time) of
USec when USec < 0 ->
0;
USec when USec >= 0 ->
- USec / 1000000
+ USec / 1000000
end.
diff --git a/src/mem3/src/mem3_epi.erl b/src/mem3/src/mem3_epi.erl
index 4bf2bf5d2..7bfc74dcf 100644
--- a/src/mem3/src/mem3_epi.erl
+++ b/src/mem3/src/mem3_epi.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(mem3_epi).
-behaviour(couch_epi_plugin).
@@ -34,7 +33,6 @@ providers() ->
{chttpd_handlers, mem3_httpd_handlers}
].
-
services() ->
[].
diff --git a/src/mem3/src/mem3_hash.erl b/src/mem3/src/mem3_hash.erl
index 665c61cb1..ccaab7223 100644
--- a/src/mem3/src/mem3_hash.erl
+++ b/src/mem3/src/mem3_hash.erl
@@ -20,50 +20,41 @@
crc32/1
]).
-
-include_lib("mem3/include/mem3.hrl").
-
calculate(#shard{opts = Opts}, DocId) ->
Props = couch_util:get_value(props, Opts, []),
MFA = get_hash_fun_int(Props),
calculate(MFA, DocId);
-
calculate(#ordered_shard{opts = Opts}, DocId) ->
Props = couch_util:get_value(props, Opts, []),
MFA = get_hash_fun_int(Props),
calculate(MFA, DocId);
-
calculate(DbName, DocId) when is_binary(DbName) ->
MFA = get_hash_fun(DbName),
calculate(MFA, DocId);
-
calculate({Mod, Fun, Args}, DocId) ->
erlang:apply(Mod, Fun, [DocId | Args]).
-
get_hash_fun(#shard{opts = Opts}) ->
get_hash_fun_int(Opts);
-
get_hash_fun(#ordered_shard{opts = Opts}) ->
get_hash_fun_int(Opts);
-
get_hash_fun(DbName0) when is_binary(DbName0) ->
DbName = mem3:dbname(DbName0),
try
- [#shard{opts=Opts} | _] = mem3_shards:for_db(DbName),
+ [#shard{opts = Opts} | _] = mem3_shards:for_db(DbName),
get_hash_fun_int(couch_util:get_value(props, Opts, []))
- catch error:database_does_not_exist ->
- {?MODULE, crc32, []}
+ catch
+ error:database_does_not_exist ->
+ {?MODULE, crc32, []}
end.
-
crc32(Item) when is_binary(Item) ->
erlang:crc32(Item);
crc32(Item) ->
erlang:crc32(term_to_binary(Item)).
-
get_hash_fun_int(Opts) when is_list(Opts) ->
case lists:keyfind(hash, 1, Opts) of
{hash, [Mod, Fun, Args]} ->
diff --git a/src/mem3/src/mem3_httpd.erl b/src/mem3/src/mem3_httpd.erl
index 3df7e1876..745fe815c 100644
--- a/src/mem3/src/mem3_httpd.erl
+++ b/src/mem3/src/mem3_httpd.erl
@@ -12,57 +12,90 @@
-module(mem3_httpd).
--export([handle_membership_req/1, handle_shards_req/2,
- handle_sync_req/2]).
+-export([
+ handle_membership_req/1,
+ handle_shards_req/2,
+ handle_sync_req/2
+]).
%% includes
-include_lib("mem3/include/mem3.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-handle_membership_req(#httpd{method='GET',
- path_parts=[<<"_membership">>]} = Req) ->
- ClusterNodes = try mem3:nodes()
- catch _:_ -> {ok,[]} end,
- couch_httpd:send_json(Req, {[
- {all_nodes, lists:sort([node()|nodes()])},
- {cluster_nodes, lists:sort(ClusterNodes)}
- ]});
-handle_membership_req(#httpd{path_parts=[<<"_membership">>]}=Req) ->
+handle_membership_req(
+ #httpd{
+ method = 'GET',
+ path_parts = [<<"_membership">>]
+ } = Req
+) ->
+ ClusterNodes =
+ try
+ mem3:nodes()
+ catch
+ _:_ -> {ok, []}
+ end,
+ couch_httpd:send_json(
+ Req,
+ {[
+ {all_nodes, lists:sort([node() | nodes()])},
+ {cluster_nodes, lists:sort(ClusterNodes)}
+ ]}
+ );
+handle_membership_req(#httpd{path_parts = [<<"_membership">>]} = Req) ->
chttpd:send_method_not_allowed(Req, "GET").
-handle_shards_req(#httpd{method='GET',
- path_parts=[_DbName, <<"_shards">>]} = Req, Db) ->
+handle_shards_req(
+ #httpd{
+ method = 'GET',
+ path_parts = [_DbName, <<"_shards">>]
+ } = Req,
+ Db
+) ->
DbName = mem3:dbname(couch_db:name(Db)),
Shards = mem3:shards(DbName),
JsonShards = json_shards(Shards, dict:new()),
- couch_httpd:send_json(Req, {[
- {shards, JsonShards}
- ]});
-handle_shards_req(#httpd{method='GET',
- path_parts=[_DbName, <<"_shards">>, DocId]} = Req, Db) ->
+ couch_httpd:send_json(
+ Req,
+ {[
+ {shards, JsonShards}
+ ]}
+ );
+handle_shards_req(
+ #httpd{
+ method = 'GET',
+ path_parts = [_DbName, <<"_shards">>, DocId]
+ } = Req,
+ Db
+) ->
DbName = mem3:dbname(couch_db:name(Db)),
Shards = mem3:shards(DbName, DocId),
{[{Shard, Dbs}]} = json_shards(Shards, dict:new()),
- couch_httpd:send_json(Req, {[
- {range, Shard},
- {nodes, Dbs}
- ]});
-handle_shards_req(#httpd{path_parts=[_DbName, <<"_shards">>]}=Req, _Db) ->
+ couch_httpd:send_json(
+ Req,
+ {[
+ {range, Shard},
+ {nodes, Dbs}
+ ]}
+ );
+handle_shards_req(#httpd{path_parts = [_DbName, <<"_shards">>]} = Req, _Db) ->
chttpd:send_method_not_allowed(Req, "GET");
-handle_shards_req(#httpd{path_parts=[_DbName, <<"_shards">>, _DocId]}=Req, _Db) ->
+handle_shards_req(#httpd{path_parts = [_DbName, <<"_shards">>, _DocId]} = Req, _Db) ->
chttpd:send_method_not_allowed(Req, "GET").
-handle_sync_req(#httpd{method='POST',
- path_parts=[_DbName, <<"_sync_shards">>]} = Req, Db) ->
+handle_sync_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [_DbName, <<"_sync_shards">>]
+ } = Req,
+ Db
+) ->
DbName = mem3:dbname(couch_db:name(Db)),
ShardList = [S#shard.name || S <- mem3:ushards(DbName)],
- [ sync_shard(S) || S <- ShardList ],
+ [sync_shard(S) || S <- ShardList],
chttpd:send_json(Req, 202, {[{ok, true}]});
handle_sync_req(Req, _) ->
chttpd:send_method_not_allowed(Req, "POST").
-
%%
%% internal
%%
@@ -70,7 +103,7 @@ handle_sync_req(Req, _) ->
json_shards([], AccIn) ->
List = dict:to_list(AccIn),
{lists:sort(List)};
-json_shards([#shard{node=Node, range=[B,E]} | Rest], AccIn) ->
+json_shards([#shard{node = Node, range = [B, E]} | Rest], AccIn) ->
HexBeg = couch_util:to_hex(<<B:32/integer>>),
HexEnd = couch_util:to_hex(<<E:32/integer>>),
Range = list_to_binary(HexBeg ++ "-" ++ HexEnd),
@@ -78,7 +111,8 @@ json_shards([#shard{node=Node, range=[B,E]} | Rest], AccIn) ->
sync_shard(ShardName) ->
Shards = mem3_shards:for_shard_range(ShardName),
- [rpc:call(S1#shard.node, mem3_sync, push, [S1, S2#shard.node]) ||
- S1 <- Shards, S2 <- Shards, S1 =/= S2],
+ [
+ rpc:call(S1#shard.node, mem3_sync, push, [S1, S2#shard.node])
+ || S1 <- Shards, S2 <- Shards, S1 =/= S2
+ ],
ok.
-
diff --git a/src/mem3/src/mem3_httpd_handlers.erl b/src/mem3/src/mem3_httpd_handlers.erl
index 7dd6ab052..ca6893e98 100644
--- a/src/mem3/src/mem3_httpd_handlers.erl
+++ b/src/mem3/src/mem3_httpd_handlers.erl
@@ -19,7 +19,7 @@ url_handler(<<"_reshard">>) -> fun mem3_reshard_httpd:handle_reshard_req/1;
url_handler(_) -> no_match.
db_handler(<<"_shards">>) -> fun mem3_httpd:handle_shards_req/2;
-db_handler(<<"_sync_shards">>) -> fun mem3_httpd:handle_sync_req/2;
+db_handler(<<"_sync_shards">>) -> fun mem3_httpd:handle_sync_req/2;
db_handler(_) -> no_match.
design_handler(_) -> no_match.
diff --git a/src/mem3/src/mem3_nodes.erl b/src/mem3/src/mem3_nodes.erl
index dd5be1a72..b46b3bb64 100644
--- a/src/mem3/src/mem3_nodes.erl
+++ b/src/mem3/src/mem3_nodes.erl
@@ -13,8 +13,14 @@
-module(mem3_nodes).
-behaviour(gen_server).
-vsn(1).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
-export([start_link/0, get_nodelist/0, get_node_info/2]).
@@ -28,16 +34,18 @@ start_link() ->
get_nodelist() ->
try
- lists:sort([N || {N,_} <- ets:tab2list(?MODULE)])
- catch error:badarg ->
- gen_server:call(?MODULE, get_nodelist)
+ lists:sort([N || {N, _} <- ets:tab2list(?MODULE)])
+ catch
+ error:badarg ->
+ gen_server:call(?MODULE, get_nodelist)
end.
get_node_info(Node, Key) ->
try
couch_util:get_value(Key, ets:lookup_element(?MODULE, Node, 2))
- catch error:badarg ->
- gen_server:call(?MODULE, {get_node_info, Node, Key})
+ catch
+ error:badarg ->
+ gen_server:call(?MODULE, {get_node_info, Node, Key})
end.
init([]) ->
@@ -47,13 +55,15 @@ init([]) ->
{ok, #state{changes_pid = Pid, update_seq = UpdateSeq}}.
handle_call(get_nodelist, _From, State) ->
- {reply, lists:sort([N || {N,_} <- ets:tab2list(?MODULE)]), State};
+ {reply, lists:sort([N || {N, _} <- ets:tab2list(?MODULE)]), State};
handle_call({get_node_info, Node, Key}, _From, State) ->
- Resp = try
- couch_util:get_value(Key, ets:lookup_element(?MODULE, Node, 2))
- catch error:badarg ->
- error
- end,
+ Resp =
+ try
+ couch_util:get_value(Key, ets:lookup_element(?MODULE, Node, 2))
+ catch
+ error:badarg ->
+ error
+ end,
{reply, Resp, State};
handle_call({add_node, Node, NodeInfo}, _From, State) ->
gen_event:notify(mem3_events, {add_node, Node}),
@@ -69,22 +79,26 @@ handle_call(_Call, _From, State) ->
handle_cast(_Msg, State) ->
{noreply, State}.
-handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid=Pid} = State) ->
+handle_info({'DOWN', _, _, Pid, Reason}, #state{changes_pid = Pid} = State) ->
couch_log:notice("~p changes listener died ~p", [?MODULE, Reason]),
StartSeq = State#state.update_seq,
- Seq = case Reason of {seq, EndSeq} -> EndSeq; _ -> StartSeq end,
+ Seq =
+ case Reason of
+ {seq, EndSeq} -> EndSeq;
+ _ -> StartSeq
+ end,
erlang:send_after(5000, self(), start_listener),
{noreply, State#state{update_seq = Seq}};
handle_info(start_listener, #state{update_seq = Seq} = State) ->
{NewPid, _} = spawn_monitor(fun() -> listen_for_changes(Seq) end),
- {noreply, State#state{changes_pid=NewPid}};
+ {noreply, State#state{changes_pid = NewPid}};
handle_info(_Info, State) ->
{noreply, State}.
terminate(_Reason, _State) ->
ok.
-code_change(_OldVsn, #state{}=State, _Extra) ->
+code_change(_OldVsn, #state{} = State, _Extra) ->
{ok, State}.
%% internal functions
@@ -100,10 +114,10 @@ initialize_nodelist() ->
first_fold(#full_doc_info{id = <<"_design/", _/binary>>}, Acc) ->
{ok, Acc};
-first_fold(#full_doc_info{deleted=true}, Acc) ->
+first_fold(#full_doc_info{deleted = true}, Acc) ->
{ok, Acc};
-first_fold(#full_doc_info{id=Id}=DocInfo, Db) ->
- {ok, #doc{body={Props}}} = couch_db:open_doc(Db, DocInfo, [ejson_body]),
+first_fold(#full_doc_info{id = Id} = DocInfo, Db) ->
+ {ok, #doc{body = {Props}}} = couch_db:open_doc(Db, DocInfo, [ejson_body]),
ets:insert(?MODULE, {mem3_util:to_atom(Id), Props}),
{ok, Db}.
@@ -125,31 +139,39 @@ changes_callback({stop, EndSeq}, _) ->
exit({seq, EndSeq});
changes_callback({change, {Change}, _}, _) ->
Node = couch_util:get_value(<<"id">>, Change),
- case Node of <<"_design/", _/binary>> -> ok; _ ->
- case mem3_util:is_deleted(Change) of
- false ->
- {Props} = couch_util:get_value(doc, Change),
- gen_server:call(?MODULE, {add_node, mem3_util:to_atom(Node), Props});
- true ->
- gen_server:call(?MODULE, {remove_node, mem3_util:to_atom(Node)})
- end
+ case Node of
+ <<"_design/", _/binary>> ->
+ ok;
+ _ ->
+ case mem3_util:is_deleted(Change) of
+ false ->
+ {Props} = couch_util:get_value(doc, Change),
+ gen_server:call(?MODULE, {add_node, mem3_util:to_atom(Node), Props});
+ true ->
+ gen_server:call(?MODULE, {remove_node, mem3_util:to_atom(Node)})
+ end
end,
{ok, couch_util:get_value(<<"seq">>, Change)};
changes_callback(timeout, _) ->
{ok, nil}.
insert_if_missing(Db, Nodes) ->
- Docs = lists:foldl(fun(Node, Acc) ->
- case ets:lookup(?MODULE, Node) of
- [_] ->
- Acc;
- [] ->
- ets:insert(?MODULE, {Node, []}),
- [#doc{id = couch_util:to_binary(Node)} | Acc]
- end
- end, [], Nodes),
- if Docs =/= [] ->
- {ok, _} = couch_db:update_docs(Db, Docs, []);
- true ->
- {ok, []}
+ Docs = lists:foldl(
+ fun(Node, Acc) ->
+ case ets:lookup(?MODULE, Node) of
+ [_] ->
+ Acc;
+ [] ->
+ ets:insert(?MODULE, {Node, []}),
+ [#doc{id = couch_util:to_binary(Node)} | Acc]
+ end
+ end,
+ [],
+ Nodes
+ ),
+ if
+ Docs =/= [] ->
+ {ok, _} = couch_db:update_docs(Db, Docs, []);
+ true ->
+ {ok, []}
end.
diff --git a/src/mem3/src/mem3_plugin_couch_db.erl b/src/mem3/src/mem3_plugin_couch_db.erl
index 8cb5d7898..ca6a2e570 100644
--- a/src/mem3/src/mem3_plugin_couch_db.erl
+++ b/src/mem3/src/mem3_plugin_couch_db.erl
@@ -16,6 +16,5 @@
is_valid_purge_client/2
]).
-
is_valid_purge_client(DbName, Props) ->
mem3_rep:verify_purge_checkpoint(DbName, Props).
diff --git a/src/mem3/src/mem3_rep.erl b/src/mem3/src/mem3_rep.erl
index 2487e6a98..1ffc70fc1 100644
--- a/src/mem3/src/mem3_rep.erl
+++ b/src/mem3/src/mem3_rep.erl
@@ -12,7 +12,6 @@
-module(mem3_rep).
-
-export([
go/2,
go/3,
@@ -29,7 +28,6 @@
changes_enumerator/2
]).
-
-include_lib("mem3/include/mem3.hrl").
-include_lib("couch/include/couch_db.hrl").
@@ -59,10 +57,8 @@
go(Source, Target) ->
go(Source, Target, []).
-
go(DbName, Node, Opts) when is_binary(DbName), is_atom(Node) ->
- go(#shard{name=DbName, node=node()}, #shard{name=DbName, node=Node}, Opts);
-
+ go(#shard{name = DbName, node = node()}, #shard{name = DbName, node = Node}, Opts);
go(#shard{} = Source, #shard{} = Target, Opts) ->
case mem3:db_is_current(Source) of
true ->
@@ -71,23 +67,27 @@ go(#shard{} = Source, #shard{} = Target, Opts) ->
% Database could have been recreated
{error, missing_source}
end;
-
go(#shard{} = Source, #{} = Targets0, Opts) when map_size(Targets0) > 0 ->
Targets = maps:map(fun(_, T) -> #tgt{shard = T} end, Targets0),
case couch_server:exists(Source#shard.name) of
true ->
sync_security(Source, Targets),
- BatchSize = case proplists:get_value(batch_size, Opts) of
- BS when is_integer(BS), BS > 0 -> BS;
- _ -> 100
- end,
- BatchCount = case proplists:get_value(batch_count, Opts) of
- all -> all;
- BC when is_integer(BC), BC > 0 -> BC;
- _ -> 1
- end,
- IncompleteRanges = config:get_boolean("mem3", "incomplete_ranges",
- false),
+ BatchSize =
+ case proplists:get_value(batch_size, Opts) of
+ BS when is_integer(BS), BS > 0 -> BS;
+ _ -> 100
+ end,
+ BatchCount =
+ case proplists:get_value(batch_count, Opts) of
+ all -> all;
+ BC when is_integer(BC), BC > 0 -> BC;
+ _ -> 1
+ end,
+ IncompleteRanges = config:get_boolean(
+ "mem3",
+ "incomplete_ranges",
+ false
+ ),
Filter = proplists:get_value(filter, Opts),
Acc = #acc{
batch_size = BatchSize,
@@ -102,99 +102,97 @@ go(#shard{} = Source, #{} = Targets0, Opts) when map_size(Targets0) > 0 ->
{error, missing_source}
end.
-
-go(#acc{source=Source, batch_count=BC}=Acc) ->
+go(#acc{source = Source, batch_count = BC} = Acc) ->
case couch_db:open(Source#shard.name, [?ADMIN_CTX]) of
- {ok, Db} ->
- Resp = try
- HashFun = mem3_hash:get_hash_fun(couch_db:name(Db)),
- repl(Acc#acc{db = Db, hashfun = HashFun})
- catch
- error:{error, missing_source} ->
- {error, missing_source};
- error:{not_found, no_db_file} ->
- {error, missing_target}
- after
- couch_db:close(Db)
- end,
- case Resp of
- {ok, P} when P > 0, BC == all ->
- go(Acc);
- {ok, P} when P > 0, BC > 1 ->
- go(Acc#acc{batch_count=BC-1});
- Else ->
- Else
- end;
- {not_found, no_db_file} ->
- {error, missing_source}
+ {ok, Db} ->
+ Resp =
+ try
+ HashFun = mem3_hash:get_hash_fun(couch_db:name(Db)),
+ repl(Acc#acc{db = Db, hashfun = HashFun})
+ catch
+ error:{error, missing_source} ->
+ {error, missing_source};
+ error:{not_found, no_db_file} ->
+ {error, missing_target}
+ after
+ couch_db:close(Db)
+ end,
+ case Resp of
+ {ok, P} when P > 0, BC == all ->
+ go(Acc);
+ {ok, P} when P > 0, BC > 1 ->
+ go(Acc#acc{batch_count = BC - 1});
+ Else ->
+ Else
+ end;
+ {not_found, no_db_file} ->
+ {error, missing_source}
end.
-
make_local_id(Source, Target) ->
make_local_id(Source, Target, undefined).
-
-make_local_id(#shard{node=SourceNode}, #shard{node=TargetNode}, Filter) ->
+make_local_id(#shard{node = SourceNode}, #shard{node = TargetNode}, Filter) ->
make_local_id(SourceNode, TargetNode, Filter);
-
make_local_id(SourceThing, TargetThing, F) when is_binary(F) ->
S = local_id_hash(SourceThing),
T = local_id_hash(TargetThing),
<<"_local/shard-sync-", S/binary, "-", T/binary, F/binary>>;
-
make_local_id(SourceThing, TargetThing, Filter) ->
S = local_id_hash(SourceThing),
T = local_id_hash(TargetThing),
F = filter_hash(Filter),
<<"_local/shard-sync-", S/binary, "-", T/binary, F/binary>>.
-
filter_hash(Filter) when is_function(Filter) ->
{new_uniq, Hash} = erlang:fun_info(Filter, new_uniq),
B = couch_util:encodeBase64Url(Hash),
<<"-", B/binary>>;
-
filter_hash(_) ->
<<>>.
-
local_id_hash(Thing) ->
couch_util:encodeBase64Url(couch_hash:md5_hash(term_to_binary(Thing))).
-
make_purge_id(SourceUUID, TargetUUID) ->
<<"_local/purge-mem3-", SourceUUID/binary, "-", TargetUUID/binary>>.
-
verify_purge_checkpoint(DbName, Props) ->
try
Type = couch_util:get_value(<<"type">>, Props),
- if Type =/= <<"internal_replication">> -> false; true ->
- SourceBin = couch_util:get_value(<<"source">>, Props),
- TargetBin = couch_util:get_value(<<"target">>, Props),
- Range = couch_util:get_value(<<"range">>, Props),
-
- Source = binary_to_existing_atom(SourceBin, latin1),
- Target = binary_to_existing_atom(TargetBin, latin1),
-
- try
- Nodes = lists:foldl(fun(Shard, Acc) ->
- case Shard#shard.range == Range of
- true -> [Shard#shard.node | Acc];
- false -> Acc
- end
- end, [], mem3:shards(DbName)),
- lists:member(Source, Nodes) andalso lists:member(Target, Nodes)
- catch
- error:database_does_not_exist ->
- false
- end
+ if
+ Type =/= <<"internal_replication">> ->
+ false;
+ true ->
+ SourceBin = couch_util:get_value(<<"source">>, Props),
+ TargetBin = couch_util:get_value(<<"target">>, Props),
+ Range = couch_util:get_value(<<"range">>, Props),
+
+ Source = binary_to_existing_atom(SourceBin, latin1),
+ Target = binary_to_existing_atom(TargetBin, latin1),
+
+ try
+ Nodes = lists:foldl(
+ fun(Shard, Acc) ->
+ case Shard#shard.range == Range of
+ true -> [Shard#shard.node | Acc];
+ false -> Acc
+ end
+ end,
+ [],
+ mem3:shards(DbName)
+ ),
+ lists:member(Source, Nodes) andalso lists:member(Target, Nodes)
+ catch
+ error:database_does_not_exist ->
+ false
+ end
end
- catch _:_ ->
- false
+ catch
+ _:_ ->
+ false
end.
-
%% @doc Find and return the largest update_seq in SourceDb
%% that the client has seen from TargetNode.
%%
@@ -209,35 +207,41 @@ verify_purge_checkpoint(DbName, Props) ->
%% largest source_seq that has a target_seq =< TgtSeq.
find_source_seq(SrcDb, TgtNode, TgtUUIDPrefix, TgtSeq) ->
case find_repl_doc(SrcDb, TgtUUIDPrefix) of
- {ok, TgtUUID, Doc} ->
- SrcNode = atom_to_binary(node(), utf8),
- find_source_seq_int(Doc, SrcNode, TgtNode, TgtUUID, TgtSeq);
- {not_found, _} ->
- couch_log:warning("~p find_source_seq repl doc not_found "
- "src_db: ~p, tgt_node: ~p, tgt_uuid_prefix: ~p, tgt_seq: ~p",
- [?MODULE, SrcDb, TgtNode, TgtUUIDPrefix, TgtSeq]),
- 0
+ {ok, TgtUUID, Doc} ->
+ SrcNode = atom_to_binary(node(), utf8),
+ find_source_seq_int(Doc, SrcNode, TgtNode, TgtUUID, TgtSeq);
+ {not_found, _} ->
+ couch_log:warning(
+ "~p find_source_seq repl doc not_found "
+ "src_db: ~p, tgt_node: ~p, tgt_uuid_prefix: ~p, tgt_seq: ~p",
+ [?MODULE, SrcDb, TgtNode, TgtUUIDPrefix, TgtSeq]
+ ),
+ 0
end.
-
-find_source_seq_int(#doc{body={Props}}, SrcNode0, TgtNode0, TgtUUID, TgtSeq) ->
- SrcNode = case is_atom(SrcNode0) of
- true -> atom_to_binary(SrcNode0, utf8);
- false -> SrcNode0
- end,
- TgtNode = case is_atom(TgtNode0) of
- true -> atom_to_binary(TgtNode0, utf8);
- false -> TgtNode0
- end,
+find_source_seq_int(#doc{body = {Props}}, SrcNode0, TgtNode0, TgtUUID, TgtSeq) ->
+ SrcNode =
+ case is_atom(SrcNode0) of
+ true -> atom_to_binary(SrcNode0, utf8);
+ false -> SrcNode0
+ end,
+ TgtNode =
+ case is_atom(TgtNode0) of
+ true -> atom_to_binary(TgtNode0, utf8);
+ false -> TgtNode0
+ end,
% This is split off purely for the ability to run unit tests
% against this bit of code without requiring all sorts of mocks.
{History} = couch_util:get_value(<<"history">>, Props, {[]}),
SrcHistory = couch_util:get_value(SrcNode, History, []),
- UseableHistory = lists:filter(fun({Entry}) ->
- couch_util:get_value(<<"target_node">>, Entry) =:= TgtNode andalso
- couch_util:get_value(<<"target_uuid">>, Entry) =:= TgtUUID andalso
- couch_util:get_value(<<"target_seq">>, Entry) =< TgtSeq
- end, SrcHistory),
+ UseableHistory = lists:filter(
+ fun({Entry}) ->
+ couch_util:get_value(<<"target_node">>, Entry) =:= TgtNode andalso
+ couch_util:get_value(<<"target_uuid">>, Entry) =:= TgtUUID andalso
+ couch_util:get_value(<<"target_seq">>, Entry) =< TgtSeq
+ end,
+ SrcHistory
+ ),
% This relies on SrcHistory being ordered desceding by source
% sequence.
@@ -245,19 +249,21 @@ find_source_seq_int(#doc{body={Props}}, SrcNode0, TgtNode0, TgtUUID, TgtSeq) ->
[{Entry} | _] ->
couch_util:get_value(<<"source_seq">>, Entry);
[] ->
- couch_log:warning("~p find_source_seq_int nil useable history "
+ couch_log:warning(
+ "~p find_source_seq_int nil useable history "
"src_node: ~p, tgt_node: ~p, tgt_uuid: ~p, tgt_seq: ~p, "
"src_history: ~p",
- [?MODULE, SrcNode, TgtNode, TgtUUID, TgtSeq, SrcHistory]),
+ [?MODULE, SrcNode, TgtNode, TgtUUID, TgtSeq, SrcHistory]
+ ),
0
end.
-
find_split_target_seq(TgtDb, SrcNode0, SrcUUIDPrefix, SrcSeq) ->
- SrcNode = case is_atom(SrcNode0) of
- true -> atom_to_binary(SrcNode0, utf8);
- false -> SrcNode0
- end,
+ SrcNode =
+ case is_atom(SrcNode0) of
+ true -> atom_to_binary(SrcNode0, utf8);
+ false -> SrcNode0
+ end,
case find_split_target_seq_int(TgtDb, SrcNode, SrcUUIDPrefix) of
{ok, [{BulkCopySeq, BulkCopySeq} | _]} when SrcSeq =< BulkCopySeq ->
% Check if source sequence is at or below the initial bulk copy
@@ -266,65 +272,77 @@ find_split_target_seq(TgtDb, SrcNode0, SrcUUIDPrefix, SrcSeq) ->
% extra safety we assert that the initial source and target
% sequences are the same value
SrcSeq;
- {ok, Seqs= [{_, _} | _]} ->
+ {ok, Seqs = [{_, _} | _]} ->
% Pick the target sequence for the greatest source sequence that is
% less than `SrcSeq`.
case lists:takewhile(fun({Seq, _}) -> Seq < SrcSeq end, Seqs) of
[] ->
- couch_log:warning("~p find_split_target_seq target seq not found "
- "tgt_db: ~p, src_uuid_prefix: ~p, src_seq: ~p",
- [?MODULE, couch_db:name(TgtDb), SrcUUIDPrefix, SrcSeq]),
+ couch_log:warning(
+ "~p find_split_target_seq target seq not found "
+ "tgt_db: ~p, src_uuid_prefix: ~p, src_seq: ~p",
+ [?MODULE, couch_db:name(TgtDb), SrcUUIDPrefix, SrcSeq]
+ ),
0;
[{_, _} | _] = Seqs1 ->
{_, TSeq} = lists:last(Seqs1),
TSeq
end;
{not_found, _} ->
- couch_log:warning("~p find_split_target_seq target seq not found "
+ couch_log:warning(
+ "~p find_split_target_seq target seq not found "
"tgt_db: ~p, src_uuid_prefix: ~p, src_seq: ~p",
- [?MODULE, couch_db:name(TgtDb), SrcUUIDPrefix, SrcSeq]),
+ [?MODULE, couch_db:name(TgtDb), SrcUUIDPrefix, SrcSeq]
+ ),
0
end.
-
repl(#acc{db = Db0} = Acc0) ->
erlang:put(io_priority, {internal_repl, couch_db:name(Db0)}),
Acc1 = calculate_start_seq_multi(Acc0),
try
- Acc3 = case config:get_boolean("mem3", "replicate_purges", false) of
- true ->
- Acc2 = pull_purges_multi(Acc1),
- push_purges_multi(Acc2);
- false ->
- Acc1
- end,
+ Acc3 =
+ case config:get_boolean("mem3", "replicate_purges", false) of
+ true ->
+ Acc2 = pull_purges_multi(Acc1),
+ push_purges_multi(Acc2);
+ false ->
+ Acc1
+ end,
push_changes(Acc3)
catch
throw:{finished, Count} ->
{ok, Count}
end.
-
pull_purges_multi(#acc{source = Source} = Acc0) ->
#acc{batch_size = Count, seq = UpdateSeq, targets = Targets0} = Acc0,
with_src_db(Acc0, fun(Db) ->
- Targets = maps:map(fun(_, #tgt{} = T) ->
- pull_purges(Db, Count, Source, T)
- end, reset_remaining(Targets0)),
- Remaining = maps:fold(fun(_, #tgt{remaining = R}, Sum) ->
- Sum + R
- end, 0, Targets),
- if Remaining == 0 -> Acc0#acc{targets = Targets}; true ->
- PurgeSeq = couch_db:get_purge_seq(Db),
- OldestPurgeSeq = couch_db:get_oldest_purge_seq(Db),
- PurgesToPush = PurgeSeq - OldestPurgeSeq,
- Changes = couch_db:count_changes_since(Db, UpdateSeq),
- Pending = Remaining + PurgesToPush + Changes,
- throw({finished, Pending})
+ Targets = maps:map(
+ fun(_, #tgt{} = T) ->
+ pull_purges(Db, Count, Source, T)
+ end,
+ reset_remaining(Targets0)
+ ),
+ Remaining = maps:fold(
+ fun(_, #tgt{remaining = R}, Sum) ->
+ Sum + R
+ end,
+ 0,
+ Targets
+ ),
+ if
+ Remaining == 0 ->
+ Acc0#acc{targets = Targets};
+ true ->
+ PurgeSeq = couch_db:get_purge_seq(Db),
+ OldestPurgeSeq = couch_db:get_oldest_purge_seq(Db),
+ PurgesToPush = PurgeSeq - OldestPurgeSeq,
+ Changes = couch_db:count_changes_since(Db, UpdateSeq),
+ Pending = Remaining + PurgesToPush + Changes,
+ throw({finished, Pending})
end
end).
-
pull_purges(Db, Count, SrcShard, #tgt{} = Tgt0) ->
#tgt{shard = TgtShard} = Tgt0,
SrcUUID = couch_db:get_uuid(Db),
@@ -332,76 +350,101 @@ pull_purges(Db, Count, SrcShard, #tgt{} = Tgt0) ->
{LocalPurgeId, Infos, ThroughSeq, Remaining} =
mem3_rpc:load_purge_infos(TgtNode, TgtDbName, SrcUUID, Count),
Tgt = Tgt0#tgt{purgeid = LocalPurgeId},
- if Infos == [] -> ok; true ->
- {ok, _} = couch_db:purge_docs(Db, Infos, [replicated_edits]),
- Body = purge_cp_body(SrcShard, TgtShard, ThroughSeq),
- mem3_rpc:save_purge_checkpoint(TgtNode, TgtDbName, LocalPurgeId, Body)
+ if
+ Infos == [] ->
+ ok;
+ true ->
+ {ok, _} = couch_db:purge_docs(Db, Infos, [replicated_edits]),
+ Body = purge_cp_body(SrcShard, TgtShard, ThroughSeq),
+ mem3_rpc:save_purge_checkpoint(TgtNode, TgtDbName, LocalPurgeId, Body)
end,
Tgt#tgt{remaining = max(0, Remaining)}.
-
push_purges_multi(#acc{source = SrcShard} = Acc) ->
#acc{batch_size = BatchSize, seq = UpdateSeq, targets = Targets0} = Acc,
with_src_db(Acc, fun(Db) ->
- Targets = maps:map(fun(_, #tgt{} = T) ->
- push_purges(Db, BatchSize, SrcShard, T)
- end, reset_remaining(Targets0)),
- Remaining = maps:fold(fun(_, #tgt{remaining = R}, Sum) ->
- Sum + R
- end, 0, Targets),
- if Remaining == 0 -> Acc#acc{targets = Targets}; true ->
- Changes = couch_db:count_changes_since(Db, UpdateSeq),
- throw({finished, Remaining + Changes})
+ Targets = maps:map(
+ fun(_, #tgt{} = T) ->
+ push_purges(Db, BatchSize, SrcShard, T)
+ end,
+ reset_remaining(Targets0)
+ ),
+ Remaining = maps:fold(
+ fun(_, #tgt{remaining = R}, Sum) ->
+ Sum + R
+ end,
+ 0,
+ Targets
+ ),
+ if
+ Remaining == 0 ->
+ Acc#acc{targets = Targets};
+ true ->
+ Changes = couch_db:count_changes_since(Db, UpdateSeq),
+ throw({finished, Remaining + Changes})
end
end).
-
push_purges(Db, BatchSize, SrcShard, Tgt) ->
#tgt{shard = TgtShard, purgeid = LocalPurgeId} = Tgt,
#shard{node = TgtNode, name = TgtDbName} = TgtShard,
- StartSeq = case couch_db:open_doc(Db, LocalPurgeId, []) of
- {ok, #doc{body = {Props}}} ->
- couch_util:get_value(<<"purge_seq">>, Props);
- {not_found, _} ->
- Oldest = couch_db:get_oldest_purge_seq(Db),
- erlang:max(0, Oldest - 1)
- end,
+ StartSeq =
+ case couch_db:open_doc(Db, LocalPurgeId, []) of
+ {ok, #doc{body = {Props}}} ->
+ couch_util:get_value(<<"purge_seq">>, Props);
+ {not_found, _} ->
+ Oldest = couch_db:get_oldest_purge_seq(Db),
+ erlang:max(0, Oldest - 1)
+ end,
FoldFun = fun({PSeq, UUID, Id, Revs}, {Count, Infos, _}) ->
NewCount = Count + length(Revs),
NewInfos = [{UUID, Id, Revs} | Infos],
- Status = if NewCount < BatchSize -> ok; true -> stop end,
+ Status =
+ if
+ NewCount < BatchSize -> ok;
+ true -> stop
+ end,
{Status, {NewCount, NewInfos, PSeq}}
end,
InitAcc = {0, [], StartSeq},
{ok, {_, Infos, ThroughSeq}} =
couch_db:fold_purge_infos(Db, StartSeq, FoldFun, InitAcc),
- if Infos == [] -> ok; true ->
- ok = purge_on_target(TgtNode, TgtDbName, Infos),
- Body = purge_cp_body(SrcShard, TgtShard, ThroughSeq),
- Doc = #doc{id = LocalPurgeId, body = Body},
- {ok, _} = couch_db:update_doc(Db, Doc, [])
+ if
+ Infos == [] ->
+ ok;
+ true ->
+ ok = purge_on_target(TgtNode, TgtDbName, Infos),
+ Body = purge_cp_body(SrcShard, TgtShard, ThroughSeq),
+ Doc = #doc{id = LocalPurgeId, body = Body},
+ {ok, _} = couch_db:update_doc(Db, Doc, [])
end,
Tgt#tgt{remaining = max(0, couch_db:get_purge_seq(Db) - ThroughSeq)}.
-
calculate_start_seq_multi(#acc{} = Acc) ->
#acc{db = Db, targets = Targets0, filter = Filter} = Acc,
FilterHash = filter_hash(Filter),
- Targets = maps:map(fun(_, #tgt{} = T) ->
- calculate_start_seq(Db, FilterHash, T)
- end, Targets0),
+ Targets = maps:map(
+ fun(_, #tgt{} = T) ->
+ calculate_start_seq(Db, FilterHash, T)
+ end,
+ Targets0
+ ),
% There will always be at least one target
#tgt{seq = Seq0} = hd(maps:values(Targets)),
- Seq = maps:fold(fun(_, #tgt{seq = S}, M) -> min(S, M) end, Seq0, Targets),
+ Seq = maps:fold(fun(_, #tgt{seq = S}, M) -> min(S, M) end, Seq0, Targets),
Acc#acc{seq = Seq, targets = Targets}.
-
calculate_start_seq(Db, FilterHash, #tgt{shard = TgtShard} = Tgt) ->
UUID = couch_db:get_uuid(Db),
#shard{node = Node, name = Name} = TgtShard,
- {NewDocId, Doc} = mem3_rpc:load_checkpoint(Node, Name, node(), UUID,
- FilterHash),
- #doc{id=FoundId, body={TProps}} = Doc,
+ {NewDocId, Doc} = mem3_rpc:load_checkpoint(
+ Node,
+ Name,
+ node(),
+ UUID,
+ FilterHash
+ ),
+ #doc{id = FoundId, body = {TProps}} = Doc,
Tgt1 = Tgt#tgt{localid = NewDocId},
% NewDocId and FoundId may be different the first time
% this code runs to save our newly named internal replication
@@ -429,7 +472,6 @@ calculate_start_seq(Db, FilterHash, #tgt{shard = TgtShard} = Tgt) ->
compare_epochs(Db, Tgt1)
end.
-
push_changes(#acc{} = Acc0) ->
#acc{
db = Db0,
@@ -439,8 +481,9 @@ push_changes(#acc{} = Acc0) ->
% Avoid needless rewriting the internal replication
% checkpoint document if nothing is replicated.
UpdateSeq = couch_db:get_update_seq(Db0),
- if Seq < UpdateSeq -> ok; true ->
- throw({finished, 0})
+ if
+ Seq < UpdateSeq -> ok;
+ true -> throw({finished, 0})
end,
with_src_db(Acc0, fun(Db) ->
@@ -451,7 +494,6 @@ push_changes(#acc{} = Acc0) ->
{ok, couch_db:count_changes_since(Db, LastSeq)}
end).
-
compare_epochs(Db, #tgt{shard = TgtShard} = Tgt) ->
#shard{node = Node, name = Name} = TgtShard,
UUID = couch_db:get_uuid(Db),
@@ -459,33 +501,44 @@ compare_epochs(Db, #tgt{shard = TgtShard} = Tgt) ->
Seq = mem3_rpc:find_common_seq(Node, Name, UUID, Epochs),
Tgt#tgt{seq = Seq, history = {[]}}.
-
-changes_enumerator(#doc_info{id=DocId}, #acc{db=Db}=Acc) ->
+changes_enumerator(#doc_info{id = DocId}, #acc{db = Db} = Acc) ->
{ok, FDI} = couch_db:get_full_doc_info(Db, DocId),
changes_enumerator(FDI, Acc);
-changes_enumerator(#full_doc_info{}=FDI, #acc{}=Acc0) ->
+changes_enumerator(#full_doc_info{} = FDI, #acc{} = Acc0) ->
#acc{
revcount = C,
targets = Targets0,
hashfun = HashFun,
incomplete_ranges = IncompleteRanges
} = Acc0,
- #doc_info{high_seq=Seq, revs=Revs} = couch_doc:to_doc_info(FDI),
- {Count, Targets} = case filter_doc(Acc0#acc.filter, FDI) of
- keep ->
- NewTargets = changes_append_fdi(FDI, Targets0, HashFun,
- IncompleteRanges),
- {C + length(Revs), NewTargets};
- discard ->
- {C, Targets0}
- end,
+ #doc_info{high_seq = Seq, revs = Revs} = couch_doc:to_doc_info(FDI),
+ {Count, Targets} =
+ case filter_doc(Acc0#acc.filter, FDI) of
+ keep ->
+ NewTargets = changes_append_fdi(
+ FDI,
+ Targets0,
+ HashFun,
+ IncompleteRanges
+ ),
+ {C + length(Revs), NewTargets};
+ discard ->
+ {C, Targets0}
+ end,
Acc1 = Acc0#acc{seq = Seq, revcount = Count, targets = Targets},
- Go = if Count < Acc1#acc.batch_size -> ok; true -> stop end,
+ Go =
+ if
+ Count < Acc1#acc.batch_size -> ok;
+ true -> stop
+ end,
{Go, Acc1}.
-
-changes_append_fdi(#full_doc_info{id = Id} = FDI, Targets, HashFun,
- IncompleteRanges) ->
+changes_append_fdi(
+ #full_doc_info{id = Id} = FDI,
+ Targets,
+ HashFun,
+ IncompleteRanges
+) ->
case mem3_reshard_job:pickfun(Id, maps:keys(Targets), HashFun) of
not_in_range when IncompleteRanges ->
Targets;
@@ -496,49 +549,61 @@ changes_append_fdi(#full_doc_info{id = Id} = FDI, Targets, HashFun,
couch_log:error(ErrMsg, [?MODULE, Id, TNames]),
error({error, {Id, not_in_target_ranges}});
Key ->
- maps:update_with(Key, fun(#tgt{infos = Infos} = T) ->
- T#tgt{infos = [FDI | Infos]}
- end, Targets)
+ maps:update_with(
+ Key,
+ fun(#tgt{infos = Infos} = T) ->
+ T#tgt{infos = [FDI | Infos]}
+ end,
+ Targets
+ )
end.
-
replicate_batch_multi(#acc{targets = Targets0, seq = Seq, db = Db} = Acc) ->
- Targets = maps:map(fun(_, #tgt{} = T) ->
- replicate_batch(T, Db, Seq)
- end, Targets0),
+ Targets = maps:map(
+ fun(_, #tgt{} = T) ->
+ replicate_batch(T, Db, Seq)
+ end,
+ Targets0
+ ),
{ok, Acc#acc{targets = Targets, revcount = 0}}.
-
replicate_batch(#tgt{shard = TgtShard, infos = Infos} = Target, Db, Seq) ->
#shard{node = Node, name = Name} = TgtShard,
case find_missing_revs(Target) of
[] ->
ok;
Missing ->
- lists:map(fun(Chunk) ->
- Docs = open_docs(Db, Infos, Chunk),
- ok = save_on_target(Node, Name, Docs)
- end, chunk_revs(Missing))
+ lists:map(
+ fun(Chunk) ->
+ Docs = open_docs(Db, Infos, Chunk),
+ ok = save_on_target(Node, Name, Docs)
+ end,
+ chunk_revs(Missing)
+ )
end,
update_locals(Target, Db, Seq),
Target#tgt{infos = []}.
-
find_missing_revs(#tgt{shard = TgtShard, infos = Infos}) ->
#shard{node = Node, name = Name} = TgtShard,
- IdsRevs = lists:map(fun(FDI) ->
- #doc_info{id=Id, revs=RevInfos} = couch_doc:to_doc_info(FDI),
- {Id, [R || #rev_info{rev=R} <- RevInfos]}
- end, Infos),
+ IdsRevs = lists:map(
+ fun(FDI) ->
+ #doc_info{id = Id, revs = RevInfos} = couch_doc:to_doc_info(FDI),
+ {Id, [R || #rev_info{rev = R} <- RevInfos]}
+ end,
+ Infos
+ ),
Missing = mem3_rpc:get_missing_revs(Node, Name, IdsRevs, [
{io_priority, {internal_repl, Name}},
?ADMIN_CTX
]),
- lists:filter(fun
- ({_Id, [], _Ancestors}) -> false;
- ({_Id, _Revs, _Ancestors}) -> true
- end, Missing).
-
+ lists:filter(
+ fun
+ ({_Id, [], _Ancestors}) -> false;
+ ({_Id, _Revs, _Ancestors}) -> true
+ end,
+ Missing
+ ).
chunk_revs(Revs) ->
Limit = list_to_integer(config:get("mem3", "rev_chunk_size", "5000")),
@@ -548,34 +613,38 @@ chunk_revs(Revs, Limit) ->
chunk_revs(Revs, {0, []}, [], Limit).
chunk_revs([], {_Count, Chunk}, Chunks, _Limit) ->
- [Chunk|Chunks];
-chunk_revs([{Id, R, A}|Revs], {Count, Chunk}, Chunks, Limit) when length(R) =< Limit - Count ->
+ [Chunk | Chunks];
+chunk_revs([{Id, R, A} | Revs], {Count, Chunk}, Chunks, Limit) when length(R) =< Limit - Count ->
chunk_revs(
Revs,
- {Count + length(R), [{Id, R, A}|Chunk]},
+ {Count + length(R), [{Id, R, A} | Chunk]},
Chunks,
Limit
);
-chunk_revs([{Id, R, A}|Revs], {Count, Chunk}, Chunks, Limit) ->
+chunk_revs([{Id, R, A} | Revs], {Count, Chunk}, Chunks, Limit) ->
{This, Next} = lists:split(Limit - Count, R),
chunk_revs(
- [{Id, Next, A}|Revs],
+ [{Id, Next, A} | Revs],
{0, []},
- [[{Id, This, A}|Chunk]|Chunks],
+ [[{Id, This, A} | Chunk] | Chunks],
Limit
).
-
open_docs(Db, Infos, Missing) ->
- lists:flatmap(fun({Id, Revs, _}) ->
- FDI = lists:keyfind(Id, #full_doc_info.id, Infos),
- #full_doc_info{rev_tree=RevTree} = FDI,
- {FoundRevs, _} = couch_key_tree:get_key_leafs(RevTree, Revs),
- lists:map(fun({#leaf{deleted=IsDel, ptr=SummaryPtr}, FoundRevPath}) ->
- couch_db:make_doc(Db, Id, IsDel, SummaryPtr, FoundRevPath)
- end, FoundRevs)
- end, Missing).
-
+ lists:flatmap(
+ fun({Id, Revs, _}) ->
+ FDI = lists:keyfind(Id, #full_doc_info.id, Infos),
+ #full_doc_info{rev_tree = RevTree} = FDI,
+ {FoundRevs, _} = couch_key_tree:get_key_leafs(RevTree, Revs),
+ lists:map(
+ fun({#leaf{deleted = IsDel, ptr = SummaryPtr}, FoundRevPath}) ->
+ couch_db:make_doc(Db, Id, IsDel, SummaryPtr, FoundRevPath)
+ end,
+ FoundRevs
+ )
+ end,
+ Missing
+ ).
save_on_target(Node, Name, Docs) ->
mem3_rpc:update_docs(Node, Name, Docs, [
@@ -586,7 +655,6 @@ save_on_target(Node, Name, Docs) ->
]),
ok.
-
purge_on_target(Node, Name, PurgeInfos) ->
mem3_rpc:purge_docs(Node, Name, PurgeInfos, [
replicated_changes,
@@ -596,7 +664,6 @@ purge_on_target(Node, Name, PurgeInfos) ->
]),
ok.
-
update_locals(Target, Db, Seq) ->
#tgt{shard = TgtShard, localid = Id, history = History} = Target,
#shard{node = Node, name = Name} = TgtShard,
@@ -609,7 +676,6 @@ update_locals(Target, Db, Seq) ->
NewBody = mem3_rpc:save_checkpoint(Node, Name, Id, Seq, NewEntry, History),
{ok, _} = couch_db:update_doc(Db, #doc{id = Id, body = NewBody}, []).
-
purge_cp_body(#shard{} = Source, #shard{} = Target, PurgeSeq) ->
{Mega, Secs, _} = os:timestamp(),
NowSecs = Mega * 1000000 + Secs,
@@ -622,7 +688,6 @@ purge_cp_body(#shard{} = Source, #shard{} = Target, PurgeSeq) ->
{<<"range">>, Source#shard.range}
]}.
-
find_repl_doc(SrcDb, TgtUUIDPrefix) ->
SrcUUID = couch_db:get_uuid(SrcDb),
S = local_id_hash(SrcUUID),
@@ -652,7 +717,6 @@ find_repl_doc(SrcDb, TgtUUIDPrefix) ->
{not_found, missing}
end.
-
find_split_target_seq_int(TgtDb, Node, SrcUUIDPrefix) ->
TgtUUID = couch_db:get_uuid(TgtDb),
FoldFun = fun(#doc{body = {Props}}, _) ->
@@ -685,29 +749,27 @@ find_split_target_seq_int(TgtDb, Node, SrcUUIDPrefix) ->
{not_found, missing}
end.
-
% Get target sequences for each checkpoint when source replicated to the target
% The "target" is the current db where the history entry was read from and "source"
% is another, now possibly deleted, database.
get_target_seqs([], _TgtUUID, _Node, _SrcUUIDPrefix, Acc) ->
lists:reverse(Acc);
-
get_target_seqs([{Entry} | HProps], TgtUUID, Node, SrcUUIDPrefix, Acc) ->
SameTgt = couch_util:get_value(<<"target_uuid">>, Entry) =:= TgtUUID,
SameNode = couch_util:get_value(<<"target_node">>, Entry) =:= Node,
SrcUUID = couch_util:get_value(<<"source_uuid">>, Entry),
IsPrefix = is_prefix(SrcUUIDPrefix, SrcUUID),
- Acc1 = case SameTgt andalso SameNode andalso IsPrefix of
- true ->
- EntrySourceSeq = couch_util:get_value(<<"source_seq">>, Entry),
- EntryTargetSeq = couch_util:get_value(<<"target_seq">>, Entry),
- [{EntrySourceSeq, EntryTargetSeq} | Acc];
- false ->
- Acc
- end,
+ Acc1 =
+ case SameTgt andalso SameNode andalso IsPrefix of
+ true ->
+ EntrySourceSeq = couch_util:get_value(<<"source_seq">>, Entry),
+ EntryTargetSeq = couch_util:get_value(<<"target_seq">>, Entry),
+ [{EntrySourceSeq, EntryTargetSeq} | Acc];
+ false ->
+ Acc
+ end,
get_target_seqs(HProps, TgtUUID, Node, SrcUUIDPrefix, Acc1).
-
with_src_db(#acc{source = Source}, Fun) ->
case couch_db:open(Source#shard.name, [?ADMIN_CTX]) of
{ok, Db} ->
@@ -716,34 +778,36 @@ with_src_db(#acc{source = Source}, Fun) ->
after
couch_db:close(Db)
end;
- {not_found, _} ->
+ {not_found, _} ->
error({error, missing_source})
end.
-
is_prefix(Prefix, Subject) ->
binary:longest_common_prefix([Prefix, Subject]) == size(Prefix).
-
filter_doc(Filter, FullDocInfo) when is_function(Filter) ->
try Filter(FullDocInfo) of
discard -> discard;
_ -> keep
- catch _:_ ->
- keep
+ catch
+ _:_ ->
+ keep
end;
filter_doc(_, _) ->
keep.
-
sync_security(#shard{} = Source, #{} = Targets) ->
- maps:map(fun(_, #tgt{shard = Target}) ->
- mem3_sync_security:maybe_sync(Source, Target)
- end, Targets).
-
+ maps:map(
+ fun(_, #tgt{shard = Target}) ->
+ mem3_sync_security:maybe_sync(Source, Target)
+ end,
+ Targets
+ ).
-targets_map(#shard{name = <<"shards/", _/binary>> = SrcName} = Src,
- #shard{name = <<"shards/", _/binary>>, node = TgtNode} = Tgt) ->
+targets_map(
+ #shard{name = <<"shards/", _/binary>> = SrcName} = Src,
+ #shard{name = <<"shards/", _/binary>>, node = TgtNode} = Tgt
+) ->
% Parse range from name in case the passed shard is built with a name only
SrcRange = mem3:range(SrcName),
Shards0 = mem3:shards(mem3:dbname(SrcName)),
@@ -756,41 +820,35 @@ targets_map(#shard{name = <<"shards/", _/binary>> = SrcName} = Src,
% moving / copying shards using mem3:go/2,3 before the
% shards are present in the shard map
#{mem3:range(SrcName) => Tgt};
- [_ | _] = TMapList->
+ [_ | _] = TMapList ->
maps:from_list(TMapList)
end;
-
-
targets_map(_Src, Tgt) ->
#{[0, ?RING_END] => Tgt}.
-
shard_eq(#shard{name = Name, node = Node}, #shard{name = Name, node = Node}) ->
true;
-
shard_eq(_, _) ->
false.
-
check_overlap(SrcRange, Node, #shard{node = Node, range = TgtRange}) ->
mem3_util:range_overlap(SrcRange, TgtRange);
-
check_overlap([_, _], _, #shard{}) ->
false.
-
reset_remaining(#{} = Targets) ->
- maps:map(fun(_, #tgt{} = T) ->
- T#tgt{remaining = 0}
- end, Targets).
-
+ maps:map(
+ fun(_, #tgt{} = T) ->
+ T#tgt{remaining = 0}
+ end,
+ Targets
+ ).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-define(TDEF(A), {atom_to_list(A), fun A/0}).
-
find_source_seq_int_test_() ->
{
setup,
@@ -805,42 +863,36 @@ find_source_seq_int_test_() ->
]
}.
-
t_unknown_node() ->
?assertEqual(
find_source_seq_int(doc_(), <<"foo">>, <<"bing">>, <<"bar_uuid">>, 10),
0
).
-
t_unknown_uuid() ->
?assertEqual(
find_source_seq_int(doc_(), <<"foo">>, <<"bar">>, <<"teapot">>, 10),
0
).
-
t_ok() ->
?assertEqual(
find_source_seq_int(doc_(), <<"foo">>, <<"bar">>, <<"bar_uuid">>, 100),
100
).
-
t_old_ok() ->
?assertEqual(
find_source_seq_int(doc_(), <<"foo">>, <<"bar">>, <<"bar_uuid">>, 84),
50
).
-
t_different_node() ->
?assertEqual(
find_source_seq_int(doc_(), <<"foo2">>, <<"bar">>, <<"bar_uuid">>, 92),
31
).
-
-define(SNODE, <<"source_node">>).
-define(SUUID, <<"source_uuid">>).
-define(SSEQ, <<"source_seq">>).
@@ -851,49 +903,81 @@ t_different_node() ->
doc_() ->
Foo_Bar = [
{[
- {?SNODE, <<"foo">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 100},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 100}
+ {?SNODE, <<"foo">>},
+ {?SUUID, <<"foo_uuid">>},
+ {?SSEQ, 100},
+ {?TNODE, <<"bar">>},
+ {?TUUID, <<"bar_uuid">>},
+ {?TSEQ, 100}
]},
{[
- {?SNODE, <<"foo">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 90},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 85}
+ {?SNODE, <<"foo">>},
+ {?SUUID, <<"foo_uuid">>},
+ {?SSEQ, 90},
+ {?TNODE, <<"bar">>},
+ {?TUUID, <<"bar_uuid">>},
+ {?TSEQ, 85}
]},
{[
- {?SNODE, <<"foo">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 50},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 51}
+ {?SNODE, <<"foo">>},
+ {?SUUID, <<"foo_uuid">>},
+ {?SSEQ, 50},
+ {?TNODE, <<"bar">>},
+ {?TUUID, <<"bar_uuid">>},
+ {?TSEQ, 51}
]},
{[
- {?SNODE, <<"foo">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 40},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 45}
+ {?SNODE, <<"foo">>},
+ {?SUUID, <<"foo_uuid">>},
+ {?SSEQ, 40},
+ {?TNODE, <<"bar">>},
+ {?TUUID, <<"bar_uuid">>},
+ {?TSEQ, 45}
]},
{[
- {?SNODE, <<"foo">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 2},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 2}
+ {?SNODE, <<"foo">>},
+ {?SUUID, <<"foo_uuid">>},
+ {?SSEQ, 2},
+ {?TNODE, <<"bar">>},
+ {?TUUID, <<"bar_uuid">>},
+ {?TSEQ, 2}
]}
],
Foo2_Bar = [
{[
- {?SNODE, <<"foo2">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 100},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 100}
+ {?SNODE, <<"foo2">>},
+ {?SUUID, <<"foo_uuid">>},
+ {?SSEQ, 100},
+ {?TNODE, <<"bar">>},
+ {?TUUID, <<"bar_uuid">>},
+ {?TSEQ, 100}
]},
{[
- {?SNODE, <<"foo2">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 92},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 93}
+ {?SNODE, <<"foo2">>},
+ {?SUUID, <<"foo_uuid">>},
+ {?SSEQ, 92},
+ {?TNODE, <<"bar">>},
+ {?TUUID, <<"bar_uuid">>},
+ {?TSEQ, 93}
]},
{[
- {?SNODE, <<"foo2">>}, {?SUUID, <<"foo_uuid">>}, {?SSEQ, 31},
- {?TNODE, <<"bar">>}, {?TUUID, <<"bar_uuid">>}, {?TSEQ, 30}
+ {?SNODE, <<"foo2">>},
+ {?SUUID, <<"foo_uuid">>},
+ {?SSEQ, 31},
+ {?TNODE, <<"bar">>},
+ {?TUUID, <<"bar_uuid">>},
+ {?TSEQ, 30}
]}
],
- History = {[
- {<<"foo">>, Foo_Bar},
- {<<"foo2">>, Foo2_Bar}
- ]},
+ History =
+ {[
+ {<<"foo">>, Foo_Bar},
+ {<<"foo2">>, Foo2_Bar}
+ ]},
#doc{
- body={[{<<"history">>, History}]}
+ body = {[{<<"history">>, History}]}
}.
-
targets_map_test_() ->
{
setup,
@@ -908,11 +992,9 @@ targets_map_test_() ->
]
}.
-
target_not_a_shard() ->
?_assertEqual(#{[0, ?RING_END] => <<"t">>}, targets_map(<<"s">>, <<"t">>)).
-
source_contained_in_target() ->
?_test(begin
R07 = [16#00000000, 16#7fffffff],
@@ -937,13 +1019,12 @@ source_contained_in_target() ->
?assertEqual(1, map_size(Map1)),
?assertMatch(#{R07 := #shard{node = 'n2'}}, Map1),
- Tgt2 = #shard{name = TgtName1, node = 'n3'},
- Map2 = targets_map(Src1, Tgt2),
+ Tgt2 = #shard{name = TgtName1, node = 'n3'},
+ Map2 = targets_map(Src1, Tgt2),
?assertEqual(1, map_size(Map2)),
?assertMatch(#{R0f := #shard{node = 'n3'}}, Map2)
end).
-
multiple_targets() ->
?_test(begin
R07 = [16#00000000, 16#7fffffff],
@@ -968,7 +1049,6 @@ multiple_targets() ->
?assertMatch(#{R8f := #shard{node = 'n1'}}, Map)
end).
-
uneven_overlap() ->
?_test(begin
R04 = [16#00000000, 16#4fffffff],
@@ -995,7 +1075,6 @@ uneven_overlap() ->
?assertMatch(#{R58 := #shard{node = 'n1'}}, Map)
end).
-
target_not_in_shard_map() ->
?_test(begin
R0f = [16#00000000, 16#ffffffff],
diff --git a/src/mem3/src/mem3_reshard.erl b/src/mem3/src/mem3_reshard.erl
index 620b1bc73..ec08c72cd 100644
--- a/src/mem3/src/mem3_reshard.erl
+++ b/src/mem3/src/mem3_reshard.erl
@@ -12,10 +12,8 @@
-module(mem3_reshard).
-
-behaviour(gen_server).
-
-export([
start_link/0,
@@ -50,10 +48,8 @@
code_change/3
]).
-
-include("mem3_reshard.hrl").
-
-define(JOB_ID_VERSION, 1).
-define(JOB_STATE_VERSION, 1).
-define(DEFAULT_MAX_JOBS, 48).
@@ -61,14 +57,12 @@
-define(JOB_PREFIX, <<"reshard-job-">>).
-define(STATE_PREFIX, <<"reshard-state-">>).
-
%% Public API
-spec start_link() -> {ok, pid()} | ignore | {error, term()}.
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-spec start() -> ok | {error, any()}.
start() ->
case is_disabled() of
@@ -76,7 +70,6 @@ start() ->
false -> gen_server:call(?MODULE, start, infinity)
end.
-
-spec stop(binary()) -> ok | {error, any()}.
stop(Reason) ->
case is_disabled() of
@@ -84,15 +77,12 @@ stop(Reason) ->
false -> gen_server:call(?MODULE, {stop, Reason}, infinity)
end.
-
-spec start_split_job(#shard{} | binary()) -> {ok, binary()} | {error, term()}.
start_split_job(#shard{} = Shard) ->
start_split_job(Shard, 2);
-
start_split_job(ShardName) when is_binary(ShardName) ->
start_split_job(shard_from_name(ShardName), 2).
-
-spec start_split_job(#shard{}, split()) -> {ok, binary()} | {error, any()}.
start_split_job(#shard{} = Source, Split) ->
case is_disabled() of
@@ -100,7 +90,6 @@ start_split_job(#shard{} = Source, Split) ->
false -> validate_and_start_job(Source, Split)
end.
-
-spec stop_job(binary(), binary()) -> ok | {error, any()}.
stop_job(JobId, Reason) when is_binary(JobId), is_binary(Reason) ->
case is_disabled() of
@@ -108,7 +97,6 @@ stop_job(JobId, Reason) when is_binary(JobId), is_binary(Reason) ->
false -> gen_server:call(?MODULE, {stop_job, JobId, Reason}, infinity)
end.
-
-spec resume_job(binary()) -> ok | {error, any()}.
resume_job(JobId) when is_binary(JobId) ->
case is_disabled() of
@@ -116,7 +104,6 @@ resume_job(JobId) when is_binary(JobId) ->
false -> gen_server:call(?MODULE, {resume_job, JobId}, infinity)
end.
-
-spec remove_job(binary()) -> ok | {error, any()}.
remove_job(JobId) when is_binary(JobId) ->
case is_disabled() of
@@ -124,20 +111,21 @@ remove_job(JobId) when is_binary(JobId) ->
false -> gen_server:call(?MODULE, {remove_job, JobId}, infinity)
end.
-
-spec get_state() -> {[_ | _]}.
get_state() ->
gen_server:call(?MODULE, get_state, infinity).
-
-spec jobs() -> [[tuple()]].
jobs() ->
- ets:foldl(fun(Job, Acc) ->
- Opts = [iso8601],
- Props = mem3_reshard_store:job_to_ejson_props(Job, Opts),
- [{Props} | Acc]
- end, [], ?MODULE).
-
+ ets:foldl(
+ fun(Job, Acc) ->
+ Opts = [iso8601],
+ Props = mem3_reshard_store:job_to_ejson_props(Job, Opts),
+ [{Props} | Acc]
+ end,
+ [],
+ ?MODULE
+ ).
-spec job(job_id()) -> {ok, {[_ | _]}} | {error, not_found}.
job(JobId) ->
@@ -150,7 +138,6 @@ job(JobId) ->
{error, not_found}
end.
-
% Return true resharding is disabled in the application level settings
-spec is_disabled() -> boolean().
is_disabled() ->
@@ -160,19 +147,16 @@ is_disabled() ->
_ -> false
end.
-
% State reporting callbacks. Used by mem3_reshard_job module.
-spec report(pid(), #job{}) -> ok.
report(Server, #job{} = Job) when is_pid(Server) ->
gen_server:cast(Server, {report, Job}).
-
-spec checkpoint(pid(), #job{}) -> ok.
checkpoint(Server, #job{} = Job) ->
couch_log:notice("~p checkpointing ~p ~p", [?MODULE, Server, jobfmt(Job)]),
gen_server:cast(Server, {checkpoint, Job}).
-
% Utility functions used from other mem3_reshard modules
-spec now_sec() -> non_neg_integer().
@@ -180,12 +164,10 @@ now_sec() ->
{Mega, Sec, _Micro} = os:timestamp(),
Mega * 1000000 + Sec.
-
-spec update_history(atom(), binary() | null, time_sec(), list()) -> list().
update_history(State, State, Ts, History) ->
% State is the same as detail. Make the detail null to avoid duplication
update_history(State, null, Ts, History);
-
update_history(State, Detail, Ts, History) ->
% Reverse, so we can process the last event as the head using
% head matches, then after append and trimming, reserse again
@@ -194,22 +176,18 @@ update_history(State, Detail, Ts, History) ->
TrimmedRev = lists:sublist(UpdatedRev, max_history()),
lists:reverse(TrimmedRev).
-
-spec shard_from_name(binary()) -> #shard{}.
-shard_from_name(<<"shards/", _:8/binary, "-", _:8/binary, "/",
- Rest/binary>> = Shard) ->
+shard_from_name(<<"shards/", _:8/binary, "-", _:8/binary, "/", Rest/binary>> = Shard) ->
Range = mem3:range(Shard),
[DbName, Suffix] = binary:split(Rest, <<".">>),
build_shard(Range, DbName, Suffix).
-
% For debugging only
-spec reset_state() -> ok.
reset_state() ->
gen_server:call(?MODULE, reset_state, infinity).
-
% Gen server functions
init(_) ->
@@ -235,14 +213,12 @@ init(_) ->
gen_server:cast(self(), reload_jobs),
{ok, State3}.
-
terminate(Reason, State) ->
couch_log:notice("~p terminate ~p ~p", [?MODULE, Reason, statefmt(State)]),
catch unlink(State#state.db_monitor),
catch exit(State#state.db_monitor, kill),
lists:foreach(fun(Job) -> kill_job_int(Job) end, running_jobs()).
-
handle_call(start, _From, #state{state = stopped} = State) ->
State1 = State#state{
state = running,
@@ -253,10 +229,8 @@ handle_call(start, _From, #state{state = stopped} = State) ->
State2 = maybe_disable(State1),
State3 = reload_jobs(State2),
{reply, ok, State3};
-
handle_call(start, _From, State) ->
{reply, ok, State};
-
handle_call({stop, Reason}, _From, #state{state = running} = State) ->
State1 = State#state{
state = stopped,
@@ -266,15 +240,13 @@ handle_call({stop, Reason}, _From, #state{state = running} = State) ->
ok = mem3_reshard_store:store_state(State1),
lists:foreach(fun(Job) -> temporarily_stop_job(Job) end, running_jobs()),
{reply, ok, State1};
-
handle_call({stop, _}, _From, State) ->
{reply, ok, State};
-
handle_call({start_job, #job{id = Id, source = Source} = Job}, _From, State) ->
couch_log:notice("~p start_job call ~p", [?MODULE, jobfmt(Job)]),
Total = ets:info(?MODULE, size),
SourceOk = mem3_reshard_validate:source(Source),
- case {job_by_id(Id), Total + 1 =< get_max_jobs(), SourceOk} of
+ case {job_by_id(Id), Total + 1 =< get_max_jobs(), SourceOk} of
{not_found, true, ok} ->
handle_start_job(Job, State);
{#job{}, _, _} ->
@@ -284,7 +256,6 @@ handle_call({start_job, #job{id = Id, source = Source} = Job}, _From, State) ->
{_, _, {error, _} = SourceError} ->
{reply, SourceError, State}
end;
-
handle_call({resume_job, _}, _From, #state{state = stopped} = State) ->
case couch_util:get_value(reason, State#state.state_info) of
undefined ->
@@ -292,7 +263,6 @@ handle_call({resume_job, _}, _From, #state{state = stopped} = State) ->
Reason ->
{reply, {error, {stopped, Reason}}, State}
end;
-
handle_call({resume_job, Id}, _From, State) ->
couch_log:notice("~p resume_job call ~p", [?MODULE, Id]),
case job_by_id(Id) of
@@ -308,12 +278,13 @@ handle_call({resume_job, Id}, _From, State) ->
not_found ->
{reply, {error, not_found}, State}
end;
-
handle_call({stop_job, Id, Reason}, _From, State) ->
couch_log:notice("~p stop_job Id:~p Reason:~p", [?MODULE, Id, Reason]),
case job_by_id(Id) of
- #job{job_state = JSt} = Job when JSt =:= running orelse JSt =:= new
- orelse JSt =:= stopped ->
+ #job{job_state = JSt} = Job when
+ JSt =:= running orelse JSt =:= new orelse
+ JSt =:= stopped
+ ->
ok = stop_job_int(Job, stopped, Reason, State),
{reply, ok, State};
#job{} ->
@@ -321,62 +292,57 @@ handle_call({stop_job, Id, Reason}, _From, State) ->
not_found ->
{reply, {error, not_found}, State}
end;
-
handle_call({remove_job, Id}, _From, State) ->
{reply, remove_job_int(Id, State), State};
-
handle_call(get_state, _From, #state{state = GlobalState} = State) ->
StateProps = mem3_reshard_store:state_to_ejson_props(State),
- Stats0 = #{running => 0, completed => 0, failed => 0, stopped => 0},
- StateStats = ets:foldl(fun(#job{job_state = JS}, Acc) ->
- % When jobs are disabled globally their state is not checkpointed as
- % "stopped", but it stays as "running". But when returning the state we
- % don't want to mislead and indicate that there are "N running jobs"
- % when the global state is "stopped".
- JS1 = case GlobalState =:= stopped andalso JS =:= running of
- true -> stopped;
- false -> JS
+ Stats0 = #{running => 0, completed => 0, failed => 0, stopped => 0},
+ StateStats = ets:foldl(
+ fun(#job{job_state = JS}, Acc) ->
+ % When jobs are disabled globally their state is not checkpointed as
+ % "stopped", but it stays as "running". But when returning the state we
+ % don't want to mislead and indicate that there are "N running jobs"
+ % when the global state is "stopped".
+ JS1 =
+ case GlobalState =:= stopped andalso JS =:= running of
+ true -> stopped;
+ false -> JS
+ end,
+ Acc#{JS1 => maps:get(JS1, Acc, 0) + 1}
end,
- Acc#{JS1 => maps:get(JS1, Acc, 0) + 1}
- end, Stats0, ?MODULE),
+ Stats0,
+ ?MODULE
+ ),
Total = ets:info(?MODULE, size),
StateStats1 = maps:to_list(StateStats) ++ [{total, Total}],
Result = {lists:sort(StateProps ++ StateStats1)},
{reply, Result, State};
-
handle_call(reset_state, _From, State) ->
{reply, ok, reset_state(State)};
-
handle_call(Call, From, State) ->
couch_log:error("~p unknown call ~p from: ~p", [?MODULE, Call, From]),
{noreply, State}.
-
handle_cast({db_deleted, DbName}, State) ->
% Remove only completed jobs. Other running states would `fail` but
% job result would stick around so users can inspect them.
JobIds = jobs_by_db_and_state(DbName, completed),
[remove_job_int(JobId, State) || JobId <- JobIds],
{noreply, State};
-
handle_cast({report, Job}, State) ->
report_int(Job),
{noreply, State};
-
handle_cast({checkpoint, Job}, State) ->
{noreply, checkpoint_int(Job, State)};
-
handle_cast(reload_jobs, State) ->
couch_log:notice("~p starting reloading jobs", [?MODULE]),
State1 = reload_jobs(State),
couch_log:notice("~p finished reloading jobs", [?MODULE]),
{noreply, State1};
-
handle_cast(Cast, State) ->
couch_log:error("~p unexpected cast ~p", [?MODULE, Cast]),
{noreply, State}.
-
handle_info({'DOWN', _Ref, process, Pid, Info}, State) ->
case job_by_pid(Pid) of
{ok, Job} ->
@@ -386,16 +352,13 @@ handle_info({'DOWN', _Ref, process, Pid, Info}, State) ->
couch_log:error("~p job not found: ~p ~p", [?MODULE, Pid, Info])
end,
{noreply, State};
-
handle_info(Info, State) ->
couch_log:error("~p unexpected info ~p", [?MODULE, Info]),
{noreply, State}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
%% Private API
validate_and_start_job(#shard{} = Source, Split) ->
@@ -425,7 +388,6 @@ validate_and_start_job(#shard{} = Source, Split) ->
{error, Error}
end.
-
handle_start_job(#job{} = Job, #state{state = running} = State) ->
case start_job_int(Job, State) of
ok ->
@@ -433,7 +395,6 @@ handle_start_job(#job{} = Job, #state{state = running} = State) ->
{error, Error} ->
{reply, {error, Error}, State}
end;
-
handle_start_job(#job{} = Job, #state{state = stopped} = State) ->
ok = mem3_reshard_store:store_job(State, Job),
% Since resharding is stopped on this node, the job is temporarily marked
@@ -442,7 +403,6 @@ handle_start_job(#job{} = Job, #state{state = stopped} = State) ->
temporarily_stop_job(Job),
{reply, {ok, Job#job.id}, State}.
-
% Insert job in the ets table as a temporarily stopped job. This would happen
% when a job is reloaded or added when node-wide resharding is stopped.
-spec temporarily_stop_job(#job{}) -> #job{}.
@@ -462,25 +422,24 @@ temporarily_stop_job(Job) ->
true = ets:insert(?MODULE, Job3),
Job3.
-
-spec reload_jobs(#state{}) -> #state{}.
reload_jobs(State) ->
Jobs = mem3_reshard_store:get_jobs(State),
lists:foldl(fun reload_job/2, State, Jobs).
-
% This is a case when main application is stopped but a job is reloaded that
% was checkpointed in running state. Set that state to stopped to avoid the API
% results looking odd.
-spec reload_job(#job{}, #state{}) -> #state{}.
-reload_job(#job{job_state = JS} = Job, #state{state = stopped} = State)
- when JS =:= running orelse JS =:= new ->
+reload_job(#job{job_state = JS} = Job, #state{state = stopped} = State) when
+ JS =:= running orelse JS =:= new
+->
temporarily_stop_job(Job),
State;
-
% This is a case when a job process should be spawend
-reload_job(#job{job_state = JS} = Job, #state{state = running} = State)
- when JS =:= running orelse JS =:= new ->
+reload_job(#job{job_state = JS} = Job, #state{state = running} = State) when
+ JS =:= running orelse JS =:= new
+->
case start_job_int(Job, State) of
ok ->
State;
@@ -489,20 +448,18 @@ reload_job(#job{job_state = JS} = Job, #state{state = running} = State)
couch_log:error(Msg, [?MODULE, jobfmt(Job), Error]),
State
end;
-
% If job is disabled individually (stopped by the user), is completed or failed
% then simply load it into the ets table
-reload_job(#job{job_state = JS} = Job, #state{} = State)
- when JS =:= failed orelse JS =:= completed orelse JS =:= stopped ->
+reload_job(#job{job_state = JS} = Job, #state{} = State) when
+ JS =:= failed orelse JS =:= completed orelse JS =:= stopped
+->
true = ets:insert(?MODULE, Job),
State.
-
-spec get_max_jobs() -> integer().
get_max_jobs() ->
config:get_integer("reshard", "max_jobs", ?DEFAULT_MAX_JOBS).
-
-spec start_job_int(#job{}, #state{}) -> ok | {error, term()}.
start_job_int(Job, State) ->
case spawn_job(Job) of
@@ -515,7 +472,6 @@ start_job_int(Job, State) ->
{error, Error}
end.
-
-spec spawn_job(#job{}) -> {ok, pid()} | {error, term()}.
spawn_job(#job{} = Job0) ->
Job = Job0#job{
@@ -535,11 +491,14 @@ spawn_job(#job{} = Job0) ->
{error, Reason}
end.
-
-spec stop_job_int(#job{}, job_state(), term(), #state{}) -> ok.
stop_job_int(#job{} = Job, JobState, Reason, State) ->
- couch_log:info("~p stop_job_int ~p newstate: ~p reason:~p", [?MODULE,
- jobfmt(Job), JobState, Reason]),
+ couch_log:info("~p stop_job_int ~p newstate: ~p reason:~p", [
+ ?MODULE,
+ jobfmt(Job),
+ JobState,
+ Reason
+ ]),
Job1 = kill_job_int(Job),
Job2 = Job1#job{
job_state = JobState,
@@ -551,11 +510,9 @@ stop_job_int(#job{} = Job, JobState, Reason, State) ->
couch_log:info("~p stop_job_int stopped ~p", [?MODULE, jobfmt(Job2)]),
ok.
-
-spec kill_job_int(#job{}) -> #job{}.
kill_job_int(#job{pid = undefined} = Job) ->
Job;
-
kill_job_int(#job{pid = Pid, ref = Ref} = Job) ->
couch_log:info("~p kill_job_int ~p", [?MODULE, jobfmt(Job)]),
demonitor(Ref, [flush]),
@@ -569,7 +526,6 @@ kill_job_int(#job{pid = Pid, ref = Ref} = Job) ->
true = ets:insert(?MODULE, Job1),
Job1.
-
-spec handle_job_exit(#job{}, term(), #state{}) -> ok.
handle_job_exit(#job{split_state = completed} = Job, normal, State) ->
couch_log:notice("~p completed job ~s exited", [?MODULE, Job#job.id]),
@@ -584,7 +540,6 @@ handle_job_exit(#job{split_state = completed} = Job, normal, State) ->
ok = mem3_reshard_store:store_job(State, Job2),
true = ets:insert(?MODULE, Job2),
ok;
-
handle_job_exit(#job{job_state = running} = Job, normal, _State) ->
couch_log:notice("~p running job ~s stopped", [?MODULE, Job#job.id]),
OldInfo = Job#job.state_info,
@@ -597,7 +552,6 @@ handle_job_exit(#job{job_state = running} = Job, normal, _State) ->
},
true = ets:insert(?MODULE, update_job_history(Job1)),
ok;
-
handle_job_exit(#job{job_state = running} = Job, shutdown, _State) ->
couch_log:notice("~p job ~s shutdown", [?MODULE, Job#job.id]),
OldInfo = Job#job.state_info,
@@ -610,7 +564,6 @@ handle_job_exit(#job{job_state = running} = Job, shutdown, _State) ->
},
true = ets:insert(?MODULE, update_job_history(Job1)),
ok;
-
handle_job_exit(#job{job_state = running} = Job, {shutdown, Msg}, _State) ->
couch_log:notice("~p job ~s shutdown ~p", [?MODULE, Job#job.id, Msg]),
OldInfo = Job#job.state_info,
@@ -623,7 +576,6 @@ handle_job_exit(#job{job_state = running} = Job, {shutdown, Msg}, _State) ->
},
true = ets:insert(?MODULE, update_job_history(Job1)),
ok;
-
handle_job_exit(#job{} = Job, Error, State) ->
couch_log:notice("~p job ~s failed ~p", [?MODULE, Job#job.id, Error]),
OldInfo = Job#job.state_info,
@@ -639,7 +591,6 @@ handle_job_exit(#job{} = Job, Error, State) ->
true = ets:insert(?MODULE, Job2),
ok.
-
-spec job_by_id(job_id()) -> #job{} | not_found.
job_by_id(Id) ->
case ets:lookup(?MODULE, Id) of
@@ -649,23 +600,20 @@ job_by_id(Id) ->
Job
end.
-
-spec job_by_pid(pid()) -> {ok, #job{}} | {error, not_found}.
job_by_pid(Pid) when is_pid(Pid) ->
- case ets:match_object(?MODULE, #job{pid=Pid, _='_'}) of
+ case ets:match_object(?MODULE, #job{pid = Pid, _ = '_'}) of
[] ->
{error, not_found};
[#job{} = Job] ->
{ok, Job}
end.
-
-spec state_id() -> binary().
state_id() ->
Ver = iolist_to_binary(io_lib:format("~3..0B", [?JOB_STATE_VERSION])),
<<?STATE_PREFIX/binary, Ver/binary>>.
-
-spec job_id(#job{}) -> binary().
job_id(#job{source = #shard{name = SourceName}}) ->
HashInput = [SourceName, atom_to_binary(node(), utf8)],
@@ -674,20 +622,22 @@ job_id(#job{source = #shard{name = SourceName}}) ->
Prefix = iolist_to_binary(io_lib:format("~3..0B", [?JOB_ID_VERSION])),
<<Prefix/binary, "-", IdHash/binary>>.
-
-spec target_shards(#shard{}, split()) -> [#shard{}].
target_shards(#shard{name = Name, range = [B, E], dbname = DbName}, Split) when
- is_integer(Split), Split >= 2, (E - B + 1) >= Split ->
+ is_integer(Split), Split >= 2, (E - B + 1) >= Split
+->
Ranges = target_ranges([B, E], Split),
<<"shards/", _:8/binary, "-", _:8/binary, "/", DbAndSuffix/binary>> = Name,
[DbName, Suffix] = binary:split(DbAndSuffix, <<".">>),
[build_shard(R, DbName, Suffix) || R <- Ranges].
-
-spec target_ranges([range_pos()], split()) -> [[range_pos()]].
-target_ranges([Begin, End], Split) when (End - Begin + 1) >= Split,
- Split >=2 ->
- Len = End - Begin + 1, % + 1 since intervals are inclusive
+target_ranges([Begin, End], Split) when
+ (End - Begin + 1) >= Split,
+ Split >= 2
+->
+ % + 1 since intervals are inclusive
+ Len = End - Begin + 1,
NewLen = Len div Split,
Rem = Len rem Split,
Ranges = [[I, I + NewLen - 1] || I <- lists:seq(Begin, End - Rem, NewLen)],
@@ -698,29 +648,24 @@ target_ranges([Begin, End], Split) when (End - Begin + 1) >= Split,
{BeforeLast, [[BeginLast, _]]} = lists:split(Split - 1, Ranges),
BeforeLast ++ [[BeginLast, End]].
-
-spec build_shard([non_neg_integer()], binary(), binary()) -> #shard{}.
build_shard(Range, DbName, Suffix) ->
Shard = #shard{dbname = DbName, range = Range, node = node()},
mem3_util:name_shard(Shard, <<".", Suffix/binary>>).
-
-spec running_jobs() -> [#job{}].
running_jobs() ->
Pat = #job{job_state = running, _ = '_'},
ets:match_object(?MODULE, Pat).
-
-spec info_update(atom(), any(), [tuple()]) -> [tuple()].
info_update(Key, Val, StateInfo) ->
lists:keystore(Key, 1, StateInfo, {Key, Val}).
-
-spec info_delete(atom(), [tuple()]) -> [tuple()].
info_delete(Key, StateInfo) ->
lists:keydelete(Key, 1, StateInfo).
-
-spec checkpoint_int(#job{}, #state{}) -> #state{}.
checkpoint_int(#job{} = Job, State) ->
couch_log:debug("~p checkpoint ~s", [?MODULE, jobfmt(Job)]),
@@ -734,7 +679,6 @@ checkpoint_int(#job{} = Job, State) ->
State
end.
-
-spec report_int(#job{}) -> ok | not_found.
report_int(Job) ->
case ets:lookup(?MODULE, Job#job.id) of
@@ -756,7 +700,6 @@ report_int(Job) ->
not_found
end.
-
-spec remove_job_int(#job{}, #state{}) -> ok | {error, not_found}.
remove_job_int(Id, State) ->
couch_log:notice("~p call remove_job Id:~p", [?MODULE, Id]),
@@ -770,7 +713,6 @@ remove_job_int(Id, State) ->
{error, not_found}
end.
-
% This function is for testing and debugging only
-spec reset_state(#state{}) -> #state{}.
reset_state(#state{} = State) ->
@@ -781,11 +723,15 @@ reset_state(#state{} = State) ->
ets:delete_all_objects(?MODULE),
couch_log:warning("~p resetting all job states", [?MODULE]),
Jobs = mem3_reshard_store:get_jobs(State),
- lists:foldl(fun(#job{id = Id}, StateAcc) ->
- couch_log:warning("~p resetting job state ~p", [?MODULE, Id]),
- ok = mem3_reshard_store:delete_job(StateAcc, Id),
- StateAcc
- end, State, Jobs),
+ lists:foldl(
+ fun(#job{id = Id}, StateAcc) ->
+ couch_log:warning("~p resetting job state ~p", [?MODULE, Id]),
+ ok = mem3_reshard_store:delete_job(StateAcc, Id),
+ StateAcc
+ end,
+ State,
+ Jobs
+ ),
couch_log:warning("~p resetting state done", [?MODULE]),
State#state{
state = running,
@@ -793,38 +739,32 @@ reset_state(#state{} = State) ->
update_time = now_sec()
}.
-
-spec update_job_history(#job{}) -> #job{}.
update_job_history(#job{job_state = St, update_time = Ts} = Job) ->
Hist = Job#job.history,
- Reason = case couch_util:get_value(reason, Job#job.state_info) of
- undefined -> null;
- Val -> couch_util:to_binary(Val)
- end,
+ Reason =
+ case couch_util:get_value(reason, Job#job.state_info) of
+ undefined -> null;
+ Val -> couch_util:to_binary(Val)
+ end,
Job#job{history = update_history(St, Reason, Ts, Hist)}.
-
update_history_rev(State, null, Ts, [{_, State, Detail} | Rest]) ->
% Just updated the detail, state stays the same, no new entry added
[{Ts, State, Detail} | Rest];
-
update_history_rev(State, Detail, Ts, [{_, State, Detail} | Rest]) ->
% State and detail were same as last event, just update the timestamp
[{Ts, State, Detail} | Rest];
-
update_history_rev(State, Detail, Ts, [{_, State, Detail} | Rest]) ->
% State and detail were same as last event, just update the timestamp
[{Ts, State, Detail} | Rest];
-
update_history_rev(State, Detail, Ts, History) ->
[{Ts, State, Detail} | History].
-
-spec max_history() -> non_neg_integer().
max_history() ->
config:get_integer("reshard", "max_history", ?DEFAULT_MAX_HISTORY).
-
-spec maybe_disable(#state{}) -> #state{}.
maybe_disable(#state{} = State) ->
case is_disabled() of
@@ -839,19 +779,17 @@ maybe_disable(#state{} = State) ->
State
end.
-
-spec jobs_by_db_and_state(binary(), split_state() | '_') -> [job_id()].
jobs_by_db_and_state(Db, State) ->
DbName = mem3:dbname(Db),
Pat = #job{
id = '$1',
- source =#shard{dbname = DbName, _ = '_'},
+ source = #shard{dbname = DbName, _ = '_'},
job_state = State,
_ = '_'
},
[JobId || [JobId] <- ets:match(?MODULE, Pat)].
-
-spec db_exists(binary()) -> boolean().
db_exists(Name) ->
try
@@ -862,7 +800,6 @@ db_exists(Name) ->
false
end.
-
-spec db_monitor(pid()) -> no_return().
db_monitor(Server) ->
couch_log:notice("~p db monitor ~p starting", [?MODULE, self()]),
@@ -870,7 +807,6 @@ db_monitor(Server) ->
couch_event:register_all(self()),
db_monitor_loop(Server, EvtRef).
-
-spec db_monitor_loop(pid(), reference()) -> no_return().
db_monitor_loop(Server, EvtRef) ->
receive
@@ -899,7 +835,6 @@ db_monitor_loop(Server, EvtRef) ->
db_monitor_loop(Server, EvtRef)
end.
-
-spec statefmt(#state{} | term()) -> string().
statefmt(#state{state = StateName}) ->
Total = ets:info(?MODULE, size),
@@ -907,12 +842,10 @@ statefmt(#state{state = StateName}) ->
Msg = "#state{~s total:~B active:~B}",
Fmt = io_lib:format(Msg, [StateName, Total, Active]),
lists:flatten(Fmt);
-
statefmt(State) ->
Fmt = io_lib:format("<Unknown split state:~p>", [State]),
lists:flatten(Fmt).
-
-spec jobfmt(#job{}) -> string().
jobfmt(#job{} = Job) ->
mem3_reshard_job:jobfmt(Job).
diff --git a/src/mem3/src/mem3_reshard_api.erl b/src/mem3/src/mem3_reshard_api.erl
index 0d3377db7..a4d395461 100644
--- a/src/mem3/src/mem3_reshard_api.erl
+++ b/src/mem3/src/mem3_reshard_api.erl
@@ -24,28 +24,28 @@
get_shard_splitting_state/0
]).
-
create_jobs(Node, Shard, Db, Range, split) ->
- lists:map(fun(S) ->
- N = mem3:node(S),
- Name = mem3:name(S),
- case rpc:call(N, mem3_reshard, start_split_job, [Name]) of
- {badrpc, Error} ->
- {error, Error, N, Name};
- {ok, JobId} ->
- {ok, JobId, N, Name};
- {error, Error} ->
- {error, Error, N, Name}
- end
- end, pick_shards(Node, Shard, Db, Range)).
-
+ lists:map(
+ fun(S) ->
+ N = mem3:node(S),
+ Name = mem3:name(S),
+ case rpc:call(N, mem3_reshard, start_split_job, [Name]) of
+ {badrpc, Error} ->
+ {error, Error, N, Name};
+ {ok, JobId} ->
+ {ok, JobId, N, Name};
+ {error, Error} ->
+ {error, Error, N, Name}
+ end
+ end,
+ pick_shards(Node, Shard, Db, Range)
+ ).
get_jobs() ->
Nodes = mem3_util:live_nodes(),
{Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, jobs, []),
lists:flatten(Replies).
-
get_job(JobId) ->
Nodes = mem3_util:live_nodes(),
{Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, job, [JobId]),
@@ -56,26 +56,40 @@ get_job(JobId) ->
{error, not_found}
end.
-
get_summary() ->
Nodes = mem3_util:live_nodes(),
{Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, get_state, []),
- Stats0 = #{running => 0, total => 0, completed => 0, failed => 0,
- stopped => 0},
- StatsF = lists:foldl(fun({Res}, Stats) ->
- maps:map(fun(Stat, OldVal) ->
- OldVal + couch_util:get_value(Stat, Res, 0)
- end, Stats)
- end, Stats0, Replies),
+ Stats0 = #{
+ running => 0,
+ total => 0,
+ completed => 0,
+ failed => 0,
+ stopped => 0
+ },
+ StatsF = lists:foldl(
+ fun({Res}, Stats) ->
+ maps:map(
+ fun(Stat, OldVal) ->
+ OldVal + couch_util:get_value(Stat, Res, 0)
+ end,
+ Stats
+ )
+ end,
+ Stats0,
+ Replies
+ ),
{State, Reason} = state_and_reason(Replies),
StateReasonProps = [{state, State}, {state_reason, Reason}],
{StateReasonProps ++ lists:sort(maps:to_list(StatsF))}.
-
resume_job(JobId) ->
Nodes = mem3_util:live_nodes(),
- {Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, resume_job,
- [JobId]),
+ {Replies, _Bad} = rpc:multicall(
+ Nodes,
+ mem3_reshard,
+ resume_job,
+ [JobId]
+ ),
WithoutNotFound = [R || R <- Replies, R =/= {error, not_found}],
case lists:usort(WithoutNotFound) of
[ok] ->
@@ -86,11 +100,14 @@ resume_job(JobId) ->
{error, not_found}
end.
-
stop_job(JobId, Reason) ->
Nodes = mem3_util:live_nodes(),
- {Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, stop_job,
- [JobId, Reason]),
+ {Replies, _Bad} = rpc:multicall(
+ Nodes,
+ mem3_reshard,
+ stop_job,
+ [JobId, Reason]
+ ),
WithoutNotFound = [R || R <- Replies, R =/= {error, not_found}],
case lists:usort(WithoutNotFound) of
[ok] ->
@@ -101,7 +118,6 @@ stop_job(JobId, Reason) ->
{error, not_found}
end.
-
start_shard_splitting() ->
{Replies, _Bad} = rpc:multicall(mem3_reshard, start, []),
case lists:usort(lists:flatten(Replies)) of
@@ -111,7 +127,6 @@ start_shard_splitting() ->
{error, {[{error, couch_util:to_binary(Error)}]}}
end.
-
stop_shard_splitting(Reason) ->
{Replies, _Bad} = rpc:multicall(mem3_reshard, stop, [Reason]),
case lists:usort(lists:flatten(Replies)) of
@@ -121,22 +136,24 @@ stop_shard_splitting(Reason) ->
{error, {[{error, couch_util:to_binary(Error)}]}}
end.
-
get_shard_splitting_state() ->
Nodes = mem3_util:live_nodes(),
{Replies, _Bad} = rpc:multicall(Nodes, mem3_reshard, get_state, []),
state_and_reason(Replies).
-
state_and_reason(StateReplies) ->
- AccF = lists:foldl(fun({ResProps}, Acc) ->
- Reason = get_reason(ResProps),
- case couch_util:get_value(state, ResProps) of
- <<"running">> -> orddict:append(running, Reason, Acc);
- <<"stopped">> -> orddict:append(stopped, Reason, Acc);
- undefined -> Acc
- end
- end, orddict:from_list([{running, []}, {stopped, []}]), StateReplies),
+ AccF = lists:foldl(
+ fun({ResProps}, Acc) ->
+ Reason = get_reason(ResProps),
+ case couch_util:get_value(state, ResProps) of
+ <<"running">> -> orddict:append(running, Reason, Acc);
+ <<"stopped">> -> orddict:append(stopped, Reason, Acc);
+ undefined -> Acc
+ end
+ end,
+ orddict:from_list([{running, []}, {stopped, []}]),
+ StateReplies
+ ),
Running = orddict:fetch(running, AccF),
case length(Running) > 0 of
true ->
@@ -147,7 +164,6 @@ state_and_reason(StateReplies) ->
{stopped, Reason}
end.
-
pick_reason(Reasons) ->
Reasons1 = lists:usort(Reasons),
Reasons2 = [R || R <- Reasons1, R =/= undefined],
@@ -156,7 +172,6 @@ pick_reason(Reasons) ->
[R1 | _] -> R1
end.
-
get_reason(StateProps) when is_list(StateProps) ->
case couch_util:get_value(state_info, StateProps) of
[] -> undefined;
@@ -164,42 +179,39 @@ get_reason(StateProps) when is_list(StateProps) ->
{SInfoProps} -> couch_util:get_value(reason, SInfoProps)
end.
-
pick_shards(undefined, undefined, Db, undefined) when is_binary(Db) ->
check_node_required(),
check_range_required(),
mem3:shards(Db);
-
-pick_shards(Node, undefined, Db, undefined) when is_atom(Node),
- is_binary(Db) ->
+pick_shards(Node, undefined, Db, undefined) when
+ is_atom(Node),
+ is_binary(Db)
+->
check_range_required(),
[S || S <- mem3:shards(Db), mem3:node(S) == Node];
-
-pick_shards(undefined, undefined, Db, [_B, _E] = Range) when is_binary(Db) ->
+pick_shards(undefined, undefined, Db, [_B, _E] = Range) when is_binary(Db) ->
check_node_required(),
[S || S <- mem3:shards(Db), mem3:range(S) == Range];
-
-pick_shards(Node, undefined, Db, [_B, _E] = Range) when is_atom(Node),
- is_binary(Db) ->
+pick_shards(Node, undefined, Db, [_B, _E] = Range) when
+ is_atom(Node),
+ is_binary(Db)
+->
[S || S <- mem3:shards(Db), mem3:node(S) == Node, mem3:range(S) == Range];
-
pick_shards(undefined, Shard, undefined, undefined) when is_binary(Shard) ->
check_node_required(),
Db = mem3:dbname(Shard),
[S || S <- mem3:shards(Db), mem3:name(S) == Shard];
-
-pick_shards(Node, Shard, undefined, undefined) when is_atom(Node),
- is_binary(Shard) ->
+pick_shards(Node, Shard, undefined, undefined) when
+ is_atom(Node),
+ is_binary(Shard)
+->
Db = mem3:dbname(Shard),
[S || S <- mem3:shards(Db), mem3:name(S) == Shard, mem3:node(S) == Node];
-
pick_shards(_, undefined, undefined, _) ->
throw({bad_request, <<"Must specify at least `db` or `shard`">>});
-
pick_shards(_, Db, Shard, _) when is_binary(Db), is_binary(Shard) ->
throw({bad_request, <<"`db` and `shard` are mutually exclusive">>}).
-
check_node_required() ->
case config:get_boolean("reshard", "require_node_param", false) of
true ->
diff --git a/src/mem3/src/mem3_reshard_dbdoc.erl b/src/mem3/src/mem3_reshard_dbdoc.erl
index 4a0a35c1f..7fb69598e 100644
--- a/src/mem3/src/mem3_reshard_dbdoc.erl
+++ b/src/mem3/src/mem3_reshard_dbdoc.erl
@@ -27,11 +27,9 @@
code_change/3
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mem3_reshard.hrl").
-
-spec update_shard_map(#job{}) -> no_return | ok.
update_shard_map(#job{source = Source, target = Target} = Job) ->
Node = hd(mem3_util:live_nodes()),
@@ -58,93 +56,82 @@ update_shard_map(#job{source = Source, target = Target} = Job) ->
false -> exit(shard_update_did_not_propagate)
end.
-
-spec start_link() -> {ok, pid()} | ignore | {error, term()}.
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
init(_) ->
couch_log:notice("~p start init()", [?MODULE]),
{ok, nil}.
-
terminate(_Reason, _State) ->
ok.
-
handle_call({update_shard_map, Source, Target}, _From, State) ->
- Res = try
- update_shard_map(Source, Target)
- catch
- throw:{error, Error} ->
- {error, Error}
- end,
+ Res =
+ try
+ update_shard_map(Source, Target)
+ catch
+ throw:{error, Error} ->
+ {error, Error}
+ end,
{reply, Res, State};
-
handle_call(Call, From, State) ->
couch_log:error("~p unknown call ~p from: ~p", [?MODULE, Call, From]),
{noreply, State}.
-
handle_cast(Cast, State) ->
couch_log:error("~p unexpected cast ~p", [?MODULE, Cast]),
{noreply, State}.
-
handle_info(Info, State) ->
couch_log:error("~p unexpected info ~p", [?MODULE, Info]),
{noreply, State}.
-
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-
% Private
update_shard_map(Source, Target) ->
ok = validate_coordinator(),
ok = replicate_from_all_nodes(shard_update_timeout_msec()),
DocId = mem3:dbname(Source#shard.name),
- OldDoc = case mem3_util:open_db_doc(DocId) of
- {ok, #doc{deleted = true}} ->
- throw({error, missing_source});
- {ok, #doc{} = Doc} ->
- Doc;
- {not_found, deleted} ->
- throw({error, missing_source});
- OpenErr ->
- throw({error, {shard_doc_open_error, OpenErr}})
- end,
+ OldDoc =
+ case mem3_util:open_db_doc(DocId) of
+ {ok, #doc{deleted = true}} ->
+ throw({error, missing_source});
+ {ok, #doc{} = Doc} ->
+ Doc;
+ {not_found, deleted} ->
+ throw({error, missing_source});
+ OpenErr ->
+ throw({error, {shard_doc_open_error, OpenErr}})
+ end,
#doc{body = OldBody} = OldDoc,
NewBody = update_shard_props(OldBody, Source, Target),
- {ok, _} = write_shard_doc(OldDoc, NewBody),
+ {ok, _} = write_shard_doc(OldDoc, NewBody),
ok = replicate_to_all_nodes(shard_update_timeout_msec()),
{ok, NewBody}.
-
validate_coordinator() ->
case hd(mem3_util:live_nodes()) =:= node() of
true -> ok;
false -> throw({error, coordinator_changed})
end.
-
replicate_from_all_nodes(TimeoutMSec) ->
case mem3_util:replicate_dbs_from_all_nodes(TimeoutMSec) of
ok -> ok;
Error -> throw({error, Error})
end.
-
replicate_to_all_nodes(TimeoutMSec) ->
case mem3_util:replicate_dbs_to_all_nodes(TimeoutMSec) of
ok -> ok;
Error -> throw({error, Error})
end.
-
write_shard_doc(#doc{id = Id} = Doc, Body) ->
UpdatedDoc = Doc#doc{body = Body},
couch_util:with_db(mem3_sync:shards_db(), fun(Db) ->
@@ -156,9 +143,8 @@ write_shard_doc(#doc{id = Id} = Doc, Body) ->
end
end).
-
update_shard_props({Props0}, #shard{} = Source, [#shard{} | _] = Targets) ->
- {ByNode0} = couch_util:get_value(<<"by_node">>, Props0, {[]}),
+ {ByNode0} = couch_util:get_value(<<"by_node">>, Props0, {[]}),
ByNodeKV = {<<"by_node">>, {update_by_node(ByNode0, Source, Targets)}},
Props1 = lists:keyreplace(<<"by_node">>, 1, Props0, ByNodeKV),
@@ -175,7 +161,6 @@ update_shard_props({Props0}, #shard{} = Source, [#shard{} | _] = Targets) ->
{Props3}.
-
update_by_node(ByNode, #shard{} = Source, [#shard{} | _] = Targets) ->
{NodeKey, SKey} = {node_key(Source), range_key(Source)},
{_, Ranges} = lists:keyfind(NodeKey, 1, ByNode),
@@ -183,12 +168,10 @@ update_by_node(ByNode, #shard{} = Source, [#shard{} | _] = Targets) ->
Ranges2 = Ranges1 ++ [range_key(T) || T <- Targets],
lists:keyreplace(NodeKey, 1, ByNode, {NodeKey, lists:sort(Ranges2)}).
-
update_by_range(ByRange, Source, Targets) ->
ByRange1 = remove_node_from_source(ByRange, Source),
lists:foldl(fun add_node_to_target_foldl/2, ByRange1, Targets).
-
remove_node_from_source(ByRange, Source) ->
{NodeKey, SKey} = {node_key(Source), range_key(Source)},
{_, SourceNodes} = lists:keyfind(SKey, 1, ByRange),
@@ -208,7 +191,6 @@ remove_node_from_source(ByRange, Source) ->
lists:keyreplace(SKey, 1, ByRange, {SKey, SourceNodes1})
end.
-
add_node_to_target_foldl(#shard{} = Target, ByRange) ->
{NodeKey, TKey} = {node_key(Target), range_key(Target)},
case lists:keyfind(TKey, 1, ByRange) of
@@ -227,21 +209,17 @@ add_node_to_target_foldl(#shard{} = Target, ByRange) ->
lists:sort([{TKey, [NodeKey]} | ByRange])
end.
-
node_key(#shard{node = Node}) ->
couch_util:to_binary(Node).
-
range_key(#shard{range = [B, E]}) ->
BHex = couch_util:to_hex(<<B:32/integer>>),
EHex = couch_util:to_hex(<<E:32/integer>>),
list_to_binary([BHex, "-", EHex]).
-
shard_update_timeout_msec() ->
config:get_integer("reshard", "shard_upate_timeout_msec", 300000).
-
wait_source_removed(#shard{name = Name} = Source, SleepSec, UntilSec) ->
case check_source_removed(Source) of
true ->
@@ -258,7 +236,6 @@ wait_source_removed(#shard{name = Name} = Source, SleepSec, UntilSec) ->
end
end.
-
check_source_removed(#shard{name = Name}) ->
DbName = mem3:dbname(Name),
Live = mem3_util:live_nodes(),
@@ -266,9 +243,13 @@ check_source_removed(#shard{name = Name}) ->
Nodes = lists:usort([N || N <- ShardNodes, lists:member(N, Live)]),
{Responses, _} = rpc:multicall(Nodes, mem3, shards, [DbName]),
Shards = lists:usort(lists:flatten(Responses)),
- SourcePresent = [S || S = #shard{name = S, node = N} <- Shards, S =:= Name,
- N =:= node()],
+ SourcePresent = [
+ S
+ || S = #shard{name = S, node = N} <- Shards,
+ S =:= Name,
+ N =:= node()
+ ],
case SourcePresent of
- [] -> true;
+ [] -> true;
[_ | _] -> false
end.
diff --git a/src/mem3/src/mem3_reshard_httpd.erl b/src/mem3/src/mem3_reshard_httpd.erl
index 3d0f77f39..5abe8025c 100644
--- a/src/mem3/src/mem3_reshard_httpd.erl
+++ b/src/mem3/src/mem3_reshard_httpd.erl
@@ -22,35 +22,37 @@
send_method_not_allowed/2
]).
-
-include_lib("couch/include/couch_db.hrl").
-
-define(JOBS, <<"jobs">>).
-define(STATE, <<"state">>).
-define(S_RUNNING, <<"running">>).
-define(S_STOPPED, <<"stopped">>).
-
% GET /_reshard
-handle_reshard_req(#httpd{method='GET', path_parts=[_]} = Req) ->
+handle_reshard_req(#httpd{method = 'GET', path_parts = [_]} = Req) ->
reject_if_disabled(),
State = mem3_reshard_api:get_summary(),
send_json(Req, State);
-
-handle_reshard_req(#httpd{path_parts=[_]} = Req) ->
+handle_reshard_req(#httpd{path_parts = [_]} = Req) ->
send_method_not_allowed(Req, "GET,HEAD");
-
% GET /_reshard/state
-handle_reshard_req(#httpd{method='GET',
- path_parts=[_, ?STATE]} = Req) ->
+handle_reshard_req(
+ #httpd{
+ method = 'GET',
+ path_parts = [_, ?STATE]
+ } = Req
+) ->
reject_if_disabled(),
{State, Reason} = mem3_reshard_api:get_shard_splitting_state(),
send_json(Req, {[{state, State}, {reason, Reason}]});
-
% PUT /_reshard/state
-handle_reshard_req(#httpd{method='PUT',
- path_parts=[_, ?STATE]} = Req) ->
+handle_reshard_req(
+ #httpd{
+ method = 'PUT',
+ path_parts = [_, ?STATE]
+ } = Req
+) ->
reject_if_disabled(),
couch_httpd:validate_ctype(Req, "application/json"),
{Props} = couch_httpd:json_body_obj(Req),
@@ -67,10 +69,11 @@ handle_reshard_req(#httpd{method='PUT',
send_json(Req, 500, JsonError)
end;
{?S_STOPPED, Reason} ->
- Reason1 = case Reason =:= undefined of
- false -> Reason;
- true -> <<"Cluster-wide resharding stopped by the user">>
- end,
+ Reason1 =
+ case Reason =:= undefined of
+ false -> Reason;
+ true -> <<"Cluster-wide resharding stopped by the user">>
+ end,
case mem3_reshard_api:stop_shard_splitting(Reason1) of
{ok, JsonResult} ->
send_json(Req, 200, JsonResult);
@@ -80,23 +83,23 @@ handle_reshard_req(#httpd{method='PUT',
{_, _} ->
throw({bad_request, <<"State field not `running` or `stopped`">>})
end;
-
-handle_reshard_req(#httpd{path_parts=[_, ?STATE]} = Req) ->
+handle_reshard_req(#httpd{path_parts = [_, ?STATE]} = Req) ->
send_method_not_allowed(Req, "GET,HEAD,PUT");
-
-handle_reshard_req(#httpd{path_parts=[_, ?STATE | _]} = _Req) ->
+handle_reshard_req(#httpd{path_parts = [_, ?STATE | _]} = _Req) ->
throw(not_found);
-
% GET /_reshard/jobs
-handle_reshard_req(#httpd{method='GET', path_parts=[_, ?JOBS]}=Req) ->
+handle_reshard_req(#httpd{method = 'GET', path_parts = [_, ?JOBS]} = Req) ->
reject_if_disabled(),
Jobs = mem3_reshard_api:get_jobs(),
Total = length(Jobs),
send_json(Req, {[{total_rows, Total}, {offset, 0}, {jobs, Jobs}]});
-
% POST /_reshard/jobs {"node": "...", "shard": "..."}
-handle_reshard_req(#httpd{method = 'POST',
- path_parts=[_, ?JOBS]} = Req) ->
+handle_reshard_req(
+ #httpd{
+ method = 'POST',
+ path_parts = [_, ?JOBS]
+ } = Req
+) ->
reject_if_disabled(),
couch_httpd:validate_ctype(Req, "application/json"),
{Props} = couch_httpd:json_body_obj(Req),
@@ -111,28 +114,33 @@ handle_reshard_req(#httpd{method = 'POST',
_ -> ok
end,
Oks = length([R || {ok, _, _, _} = R <- Res]),
- Code = case {Oks, length(Res)} of
- {Oks, Oks} -> 201;
- {Oks, _} when Oks > 0 -> 202;
- {0, _} -> 500
- end,
- EJson = lists:map(fun
- ({ok, Id, N, S}) ->
- {[{ok, true}, {id, Id}, {node, N}, {shard, S}]};
- ({error, E, N, S}) ->
- {[{error, couch_util:to_binary(E)}, {node, N}, {shard, S}]}
- end, Res),
+ Code =
+ case {Oks, length(Res)} of
+ {Oks, Oks} -> 201;
+ {Oks, _} when Oks > 0 -> 202;
+ {0, _} -> 500
+ end,
+ EJson = lists:map(
+ fun
+ ({ok, Id, N, S}) ->
+ {[{ok, true}, {id, Id}, {node, N}, {shard, S}]};
+ ({error, E, N, S}) ->
+ {[{error, couch_util:to_binary(E)}, {node, N}, {shard, S}]}
+ end,
+ Res
+ ),
send_json(Req, Code, EJson);
-
-handle_reshard_req(#httpd{path_parts=[_, ?JOBS]} = Req) ->
+handle_reshard_req(#httpd{path_parts = [_, ?JOBS]} = Req) ->
send_method_not_allowed(Req, "GET,HEAD,POST");
-
-handle_reshard_req(#httpd{path_parts=[_, _]}) ->
+handle_reshard_req(#httpd{path_parts = [_, _]}) ->
throw(not_found);
-
% GET /_reshard/jobs/$jobid
-handle_reshard_req(#httpd{method='GET',
- path_parts=[_, ?JOBS, JobId]}=Req) ->
+handle_reshard_req(
+ #httpd{
+ method = 'GET',
+ path_parts = [_, ?JOBS, JobId]
+ } = Req
+) ->
reject_if_disabled(),
case mem3_reshard_api:get_job(JobId) of
{ok, JobInfo} ->
@@ -140,10 +148,13 @@ handle_reshard_req(#httpd{method='GET',
{error, not_found} ->
throw(not_found)
end;
-
% DELETE /_reshard/jobs/$jobid
-handle_reshard_req(#httpd{method='DELETE',
- path_parts=[_, ?JOBS, JobId]}=Req) ->
+handle_reshard_req(
+ #httpd{
+ method = 'DELETE',
+ path_parts = [_, ?JOBS, JobId]
+ } = Req
+) ->
reject_if_disabled(),
case mem3_reshard_api:get_job(JobId) of
{ok, {Props}} ->
@@ -158,30 +169,36 @@ handle_reshard_req(#httpd{method='DELETE',
{error, not_found} ->
throw(not_found)
end;
-
-handle_reshard_req(#httpd{path_parts=[_, ?JOBS, _]} = Req) ->
+handle_reshard_req(#httpd{path_parts = [_, ?JOBS, _]} = Req) ->
send_method_not_allowed(Req, "GET,HEAD,DELETE");
-
% GET /_reshard/jobs/$jobid/state
-handle_reshard_req(#httpd{method='GET',
- path_parts=[_, ?JOBS, JobId, ?STATE]} = Req) ->
+handle_reshard_req(
+ #httpd{
+ method = 'GET',
+ path_parts = [_, ?JOBS, JobId, ?STATE]
+ } = Req
+) ->
reject_if_disabled(),
case mem3_reshard_api:get_job(JobId) of
{ok, {Props}} ->
JobState = couch_util:get_value(job_state, Props),
{SIProps} = couch_util:get_value(state_info, Props),
- Reason = case couch_util:get_value(reason, SIProps) of
- undefined -> null;
- Val -> couch_util:to_binary(Val)
- end,
+ Reason =
+ case couch_util:get_value(reason, SIProps) of
+ undefined -> null;
+ Val -> couch_util:to_binary(Val)
+ end,
send_json(Req, 200, {[{state, JobState}, {reason, Reason}]});
{error, not_found} ->
throw(not_found)
end;
-
% PUT /_reshard/jobs/$jobid/state
-handle_reshard_req(#httpd{method='PUT',
- path_parts=[_, ?JOBS, JobId, ?STATE]} = Req) ->
+handle_reshard_req(
+ #httpd{
+ method = 'PUT',
+ path_parts = [_, ?JOBS, JobId, ?STATE]
+ } = Req
+) ->
reject_if_disabled(),
couch_httpd:validate_ctype(Req, "application/json"),
{Props} = couch_httpd:json_body_obj(Req),
@@ -200,10 +217,11 @@ handle_reshard_req(#httpd{method='PUT',
send_json(Req, 500, JsonError)
end;
{?S_STOPPED, Reason} ->
- Reason1 = case Reason =:= undefined of
- false -> Reason;
- true -> <<"Stopped by user">>
- end,
+ Reason1 =
+ case Reason =:= undefined of
+ false -> Reason;
+ true -> <<"Stopped by user">>
+ end,
case mem3_reshard_api:stop_job(JobId, Reason1) of
ok ->
send_json(Req, 200, {[{ok, true}]});
@@ -215,28 +233,22 @@ handle_reshard_req(#httpd{method='PUT',
{_, _} ->
throw({bad_request, <<"State field not `running` or `stopped`">>})
end;
-
-handle_reshard_req(#httpd{path_parts=[_, ?JOBS, _, ?STATE]} = Req) ->
+handle_reshard_req(#httpd{path_parts = [_, ?JOBS, _, ?STATE]} = Req) ->
send_method_not_allowed(Req, "GET,HEAD,PUT").
-
reject_if_disabled() ->
case mem3_reshard:is_disabled() of
true -> throw(not_implemented);
false -> ok
end.
-
validate_type(<<"split">>) ->
split;
-
validate_type(_Type) ->
throw({bad_request, <<"`job type must be `split`">>}).
-
validate_node(undefined) ->
undefined;
-
validate_node(Node0) when is_binary(Node0) ->
Nodes = mem3_util:live_nodes(),
try binary_to_existing_atom(Node0, utf8) of
@@ -249,14 +261,11 @@ validate_node(Node0) when is_binary(Node0) ->
error:badarg ->
throw({bad_request, <<"`node` is not a valid node name">>})
end;
-
validate_node(_Node) ->
throw({bad_request, <<"Invalid `node`">>}).
-
validate_shard(undefined) ->
undefined;
-
validate_shard(Shard) when is_binary(Shard) ->
case Shard of
<<"shards/", _:8/binary, "-", _:8/binary, "/", _/binary>> ->
@@ -264,40 +273,35 @@ validate_shard(Shard) when is_binary(Shard) ->
_ ->
throw({bad_request, <<"`shard` is invalid">>})
end;
-
validate_shard(_Shard) ->
throw({bad_request, <<"Invalid `shard`">>}).
-
validate_db(undefined) ->
undefined;
-
validate_db(DbName) when is_binary(DbName) ->
try mem3:shards(DbName) of
[_ | _] -> DbName;
- _ -> throw({bad_request, <<"`No shards in `db`">>})
+ _ -> throw({bad_request, <<"`No shards in `db`">>})
catch
_:_ ->
throw({bad_request, <<"Invalid `db`">>})
end;
-
validate_db(_bName) ->
throw({bad_request, <<"Invalid `db`">>}).
-
validate_range(undefined) ->
undefined;
-
validate_range(<<BBin:8/binary, "-", EBin:8/binary>>) ->
- {B, E} = try
- {
- httpd_util:hexlist_to_integer(binary_to_list(BBin)),
- httpd_util:hexlist_to_integer(binary_to_list(EBin))
- }
- catch
- _:_ ->
- invalid_range()
- end,
+ {B, E} =
+ try
+ {
+ httpd_util:hexlist_to_integer(binary_to_list(BBin)),
+ httpd_util:hexlist_to_integer(binary_to_list(EBin))
+ }
+ catch
+ _:_ ->
+ invalid_range()
+ end,
if
B < 0 -> invalid_range();
E < 0 -> invalid_range();
@@ -308,10 +312,8 @@ validate_range(<<BBin:8/binary, "-", EBin:8/binary>>) ->
end,
% Use a list format here to make it look the same as #shard's range
[B, E];
-
validate_range(_Range) ->
invalid_range().
-
invalid_range() ->
throw({bad_request, <<"Invalid `range`">>}).
diff --git a/src/mem3/src/mem3_reshard_index.erl b/src/mem3/src/mem3_reshard_index.erl
index d4cb7caa1..fef25d52c 100644
--- a/src/mem3/src/mem3_reshard_index.erl
+++ b/src/mem3/src/mem3_reshard_index.erl
@@ -12,18 +12,15 @@
-module(mem3_reshard_index).
-
-export([
design_docs/1,
target_indices/2,
spawn_builders/1
]).
-
-include_lib("mem3/include/mem3.hrl").
-include_lib("couch/include/couch_db.hrl").
-
%% Public API
design_docs(DbName) ->
@@ -37,16 +34,15 @@ design_docs(DbName) ->
Else ->
Else
end
- catch error:database_does_not_exist ->
- {ok, []}
+ catch
+ error:database_does_not_exist ->
+ {ok, []}
end.
-
target_indices(Docs, Targets) ->
Indices = [[indices(N, D) || D <- Docs] || #shard{name = N} <- Targets],
lists:flatten(Indices).
-
spawn_builders(Indices) ->
Results = [build_index(Index) || Index <- Indices],
Oks = [{ok, Pid} || {ok, Pid} <- Results, is_pid(Pid)],
@@ -58,14 +54,16 @@ spawn_builders(Indices) ->
% spawned, kill the spawned ones and and return the error.
ErrMsg = "~p failed to spawn index builders: ~p ~p",
couch_log:error(ErrMsg, [?MODULE, Error, Indices]),
- lists:foreach(fun({ok, Pid}) ->
- catch unlink(Pid),
- catch exit(Pid, kill)
- end, Oks),
+ lists:foreach(
+ fun({ok, Pid}) ->
+ catch unlink(Pid),
+ catch exit(Pid, kill)
+ end,
+ Oks
+ ),
{error, Error}
end.
-
%% Private API
fabric_design_docs(DbName) ->
@@ -74,12 +72,10 @@ fabric_design_docs(DbName) ->
{error, Error} -> Error
end.
-
indices(DbName, Doc) ->
- mrview_indices(DbName, Doc)
- ++ [dreyfus_indices(DbName, Doc) || has_app(dreyfus)]
- ++ [hastings_indices(DbName, Doc) || has_app(hastings)].
-
+ mrview_indices(DbName, Doc) ++
+ [dreyfus_indices(DbName, Doc) || has_app(dreyfus)] ++
+ [hastings_indices(DbName, Doc) || has_app(hastings)].
mrview_indices(DbName, Doc) ->
try
@@ -98,7 +94,6 @@ mrview_indices(DbName, Doc) ->
[]
end.
-
dreyfus_indices(DbName, Doc) ->
try
Indices = dreyfus_index:design_doc_to_indexes(Doc),
@@ -110,7 +105,6 @@ dreyfus_indices(DbName, Doc) ->
[]
end.
-
hastings_indices(DbName, Doc) ->
try
Indices = hastings_index:design_doc_to_indexes(Doc),
@@ -122,7 +116,6 @@ hastings_indices(DbName, Doc) ->
[]
end.
-
build_index({mrview, DbName, MRSt}) ->
case couch_index_server:get_index(couch_mrview_index, MRSt) of
{ok, Pid} ->
@@ -132,8 +125,7 @@ build_index({mrview, DbName, MRSt}) ->
Error ->
Error
end;
-
-build_index({dreyfus, DbName, Index})->
+build_index({dreyfus, DbName, Index}) ->
case dreyfus_index_manager:get_index(DbName, Index) of
{ok, Pid} ->
Args = [Pid, get_update_seq(DbName)],
@@ -142,7 +134,6 @@ build_index({dreyfus, DbName, Index})->
Error ->
Error
end;
-
build_index({hastings, DbName, Index}) ->
case hastings_index_manager:get_index(DbName, Index) of
{ok, Pid} ->
@@ -153,11 +144,9 @@ build_index({hastings, DbName, Index}) ->
Error
end.
-
has_app(App) ->
code:lib_dir(App) /= {error, bad_name}.
-
get_update_seq(DbName) ->
couch_util:with_db(DbName, fun(Db) ->
couch_db:get_update_seq(Db)
diff --git a/src/mem3/src/mem3_reshard_job.erl b/src/mem3/src/mem3_reshard_job.erl
index aedca21bb..aff5c2648 100644
--- a/src/mem3/src/mem3_reshard_job.erl
+++ b/src/mem3/src/mem3_reshard_job.erl
@@ -12,7 +12,6 @@
-module(mem3_reshard_job).
-
-export([
start_link/1,
@@ -46,11 +45,9 @@
completed/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mem3_reshard.hrl").
-
% Batch size for internal replication topoffs
-define(INTERNAL_REP_BATCH_SIZE, 2000).
@@ -73,7 +70,6 @@
completed
]).
-
% When a job starts it may be resuming from a partially
% completed state. These state pairs list the state
% we have to restart from for each possible state.
@@ -91,7 +87,6 @@
completed => completed
}).
-
% If we have a worker failing during any of these
% states we need to clean up the targets
-define(CLEAN_TARGET_STATES, [
@@ -102,11 +97,9 @@
copy_local_docs
]).
-
start_link(#job{} = Job) ->
proc_lib:start_link(?MODULE, init, [Job]).
-
% This is called by the main proces after it has checkpointed the progress
% of the job. After the new state is checkpointed, we signal the job to start
% executing that state.
@@ -115,7 +108,6 @@ checkpoint_done(#job{pid = Pid} = Job) ->
Pid ! checkpoint_done,
ok.
-
% Formatting function, used for logging mostly
jobfmt(#job{} = Job) ->
#job{
@@ -131,13 +123,13 @@ jobfmt(#job{} = Job) ->
Fmt = io_lib:format(Msg, [Id, Source, TargetCount, JobState, State, Pid]),
lists:flatten(Fmt).
-
% This is the function which picks between various targets. It is used here as
% well as in mem3_rep internal replicator and couch_db_split bulk copy logic.
% Given a document id and list of ranges, and a hash function, it will pick one
% of the range or return not_in_range atom.
pickfun(DocId, [[B, E] | _] = Ranges, {_M, _F, _A} = HashFun) when
- is_integer(B), is_integer(E), B =< E ->
+ is_integer(B), is_integer(E), B =< E
+->
HashKey = mem3_hash:calculate(HashFun, DocId),
Pred = fun([Begin, End]) ->
Begin =< HashKey andalso HashKey =< End
@@ -147,7 +139,6 @@ pickfun(DocId, [[B, E] | _] = Ranges, {_M, _F, _A} = HashFun) when
[Key] -> Key
end.
-
init(#job{} = Job0) ->
process_flag(trap_exit, true),
Job1 = set_start_state(Job0#job{
@@ -162,52 +153,47 @@ init(#job{} = Job0) ->
ok = checkpoint(Job2),
run(Job2).
-
run(#job{split_state = CurrState} = Job) ->
- StateFun = case CurrState of
- topoff1 -> topoff;
- topoff2 -> topoff;
- topoff3 -> topoff;
- _ -> CurrState
- end,
- NewJob = try
- Job1 = ?MODULE:StateFun(Job),
- Job2 = wait_for_workers(Job1),
- Job3 = switch_to_next_state(Job2),
- ok = checkpoint(Job3),
- Job3
- catch
- throw:{retry, RetryJob} ->
- RetryJob
- end,
+ StateFun =
+ case CurrState of
+ topoff1 -> topoff;
+ topoff2 -> topoff;
+ topoff3 -> topoff;
+ _ -> CurrState
+ end,
+ NewJob =
+ try
+ Job1 = ?MODULE:StateFun(Job),
+ Job2 = wait_for_workers(Job1),
+ Job3 = switch_to_next_state(Job2),
+ ok = checkpoint(Job3),
+ Job3
+ catch
+ throw:{retry, RetryJob} ->
+ RetryJob
+ end,
run(NewJob).
-
set_start_state(#job{split_state = State} = Job) ->
case maps:get(State, ?STATE_RESTART, undefined) of
undefined ->
Fmt1 = "~p recover : unknown state ~s",
couch_log:error(Fmt1, [?MODULE, jobfmt(Job)]),
erlang:error({invalid_split_job_recover_state, Job});
- StartState->
+ StartState ->
Job#job{split_state = StartState}
end.
-
get_next_state(#job{split_state = State}) ->
get_next_state(State, ?SPLIT_STATES).
-
get_next_state(completed, _) ->
completed;
-
get_next_state(CurrState, [CurrState, NextState | _]) ->
NextState;
-
get_next_state(CurrState, [_ | Rest]) ->
get_next_state(CurrState, Rest).
-
switch_to_next_state(#job{} = Job0) ->
Info0 = Job0#job.state_info,
Info1 = info_delete(error, Info0),
@@ -222,7 +208,6 @@ switch_to_next_state(#job{} = Job0) ->
Job2 = update_split_history(Job1),
check_state(Job2).
-
checkpoint(Job) ->
% Ask main process to checkpoint. When it has finished it will notify us
% by calling by checkpoint_done/1. The reason not to call the main process
@@ -241,10 +226,8 @@ checkpoint(Job) ->
handle_unknown_msg(Job, "checkpoint", Other)
end.
-
wait_for_workers(#job{workers = []} = Job) ->
Job;
-
wait_for_workers(#job{workers = Workers} = Job) ->
Parent = parent(),
receive
@@ -262,10 +245,8 @@ wait_for_workers(#job{workers = Workers} = Job) ->
handle_unknown_msg(Job, "wait_for_workers", Other)
end.
-
handle_worker_exit(#job{workers = Workers} = Job, Pid, normal) ->
Job#job{workers = Workers -- [Pid]};
-
handle_worker_exit(#job{} = Job, _Pid, {error, missing_source}) ->
Msg1 = "~p stopping worker due to source missing ~p",
couch_log:error(Msg1, [?MODULE, jobfmt(Job)]),
@@ -279,13 +260,11 @@ handle_worker_exit(#job{} = Job, _Pid, {error, missing_source}) ->
false ->
exit({error, missing_source})
end;
-
handle_worker_exit(#job{} = Job, _Pid, {error, missing_target}) ->
Msg = "~p stopping worker due to target db missing ~p",
couch_log:error(Msg, [?MODULE, jobfmt(Job)]),
kill_workers(Job),
exit({error, missing_target});
-
handle_worker_exit(#job{} = Job0, _Pid, Reason) ->
couch_log:error("~p worker error ~p ~p", [?MODULE, jobfmt(Job0), Reason]),
kill_workers(Job0),
@@ -297,11 +276,12 @@ handle_worker_exit(#job{} = Job0, _Pid, Reason) ->
exit(Reason)
end.
-
% Cleanup and exit when we receive an 'EXIT' message from our parent. In case
% the shard map is being updated, try to wait some time for it to finish.
-handle_exit(#job{split_state = update_shardmap, workers = [WPid]} = Job,
- Reason) ->
+handle_exit(
+ #job{split_state = update_shardmap, workers = [WPid]} = Job,
+ Reason
+) ->
Timeout = update_shard_map_timeout_sec(),
Msg1 = "~p job exit ~s ~p while shard map is updating, waiting ~p sec",
couch_log:warning(Msg1, [?MODULE, jobfmt(Job), Reason, Timeout]),
@@ -314,18 +294,16 @@ handle_exit(#job{split_state = update_shardmap, workers = [WPid]} = Job,
Msg3 = "~p ~s shard map update failed with error ~p",
couch_log:error(Msg3, [?MODULE, jobfmt(Job), Error]),
exit(Reason)
- after Timeout * 1000->
+ after Timeout * 1000 ->
Msg4 = "~p ~s shard map update timeout exceeded ~p sec",
couch_log:error(Msg4, [?MODULE, jobfmt(Job), Timeout]),
kill_workers(Job),
exit(Reason)
end;
-
handle_exit(#job{} = Job, Reason) ->
kill_workers(Job),
exit(Reason).
-
retry_state(#job{retries = Retries, state_info = Info} = Job0, Error) ->
Job1 = Job0#job{
retries = Retries + 1,
@@ -345,21 +323,21 @@ retry_state(#job{retries = Retries, state_info = Info} = Job0, Error) ->
end,
throw({retry, Job2}).
-
report(#job{manager = ManagerPid} = Job) ->
Job1 = Job#job{update_time = mem3_reshard:now_sec()},
ok = mem3_reshard:report(ManagerPid, Job1),
Job1.
-
kill_workers(#job{workers = Workers}) ->
- lists:foreach(fun(Worker) ->
- unlink(Worker),
- exit(Worker, kill)
- end, Workers),
+ lists:foreach(
+ fun(Worker) ->
+ unlink(Worker),
+ exit(Worker, kill)
+ end,
+ Workers
+ ),
flush_worker_messages().
-
flush_worker_messages() ->
Parent = parent(),
receive
@@ -369,7 +347,6 @@ flush_worker_messages() ->
ok
end.
-
parent() ->
case get('$ancestors') of
[Pid | _] when is_pid(Pid) -> Pid;
@@ -377,18 +354,15 @@ parent() ->
_ -> undefined
end.
-
handle_unknown_msg(Job, When, RMsg) ->
LogMsg = "~p ~s received an unknown message ~p when in ~s",
couch_log:error(LogMsg, [?MODULE, jobfmt(Job), RMsg, When]),
erlang:error({invalid_split_job_message, Job#job.id, When, RMsg}).
-
initial_copy(#job{} = Job) ->
Pid = spawn_link(?MODULE, initial_copy_impl, [Job]),
report(Job#job{workers = [Pid]}).
-
initial_copy_impl(#job{source = Source, target = Targets0} = Job) ->
#shard{name = SourceName} = Source,
Targets = [{R, N} || #shard{range = R, name = N} <- Targets0],
@@ -410,18 +384,16 @@ initial_copy_impl(#job{source = Source, target = Targets0} = Job) ->
exit({error, Error})
end.
-
topoff(#job{} = Job) ->
Pid = spawn_link(?MODULE, topoff_impl, [Job]),
report(Job#job{workers = [Pid]}).
-
topoff_impl(#job{source = #shard{} = Source, target = Targets}) ->
couch_log:notice("~p topoff ~p", [?MODULE, shardsstr(Source, Targets)]),
check_source_exists(Source, topoff),
check_targets_exist(Targets, topoff),
TMap = maps:from_list([{R, T} || #shard{range = R} = T <- Targets]),
- Opts = [{batch_size, ?INTERNAL_REP_BATCH_SIZE}, {batch_count, all}],
+ Opts = [{batch_size, ?INTERNAL_REP_BATCH_SIZE}, {batch_count, all}],
case mem3_rep:go(Source, TMap, Opts) of
{ok, Count} ->
Args = [?MODULE, shardsstr(Source, Targets), Count],
@@ -433,7 +405,6 @@ topoff_impl(#job{source = #shard{} = Source, target = Targets}) ->
exit({error, Error})
end.
-
build_indices(#job{} = Job) ->
#job{
source = #shard{name = SourceName} = Source,
@@ -462,12 +433,10 @@ build_indices(#job{} = Job) ->
end
end.
-
copy_local_docs(#job{split_state = copy_local_docs} = Job) ->
Pid = spawn_link(?MODULE, copy_local_docs_impl, [Job]),
report(Job#job{workers = [Pid]}).
-
copy_local_docs_impl(#job{source = Source, target = Targets0}) ->
#shard{name = SourceName} = Source,
Targets = [{R, N} || #shard{range = R, name = N} <- Targets0],
@@ -484,18 +453,15 @@ copy_local_docs_impl(#job{source = Source, target = Targets0}) ->
exit({error, Error})
end.
-
update_shardmap(#job{} = Job) ->
Pid = spawn_link(mem3_reshard_dbdoc, update_shard_map, [Job]),
report(Job#job{workers = [Pid]}).
-
wait_source_close(#job{source = #shard{name = Name}} = Job) ->
couch_event:notify(Name, deleted),
Pid = spawn_link(?MODULE, wait_source_close_impl, [Job]),
report(Job#job{workers = [Pid]}).
-
wait_source_close_impl(#job{source = #shard{name = Name}, target = Targets}) ->
Timeout = config:get_integer("reshard", "source_close_timeout_sec", 600),
check_targets_exist(Targets, wait_source_close),
@@ -513,7 +479,6 @@ wait_source_close_impl(#job{source = #shard{name = Name}, target = Targets}) ->
ok
end.
-
wait_source_close(Db, SleepSec, UntilSec) ->
case couch_db:monitored_by(Db) -- [self()] of
[] ->
@@ -531,23 +496,25 @@ wait_source_close(Db, SleepSec, UntilSec) ->
end
end.
-
source_delete(#job{} = Job) ->
Pid = spawn_link(?MODULE, source_delete_impl, [Job]),
report(Job#job{workers = [Pid]}).
-
source_delete_impl(#job{source = #shard{name = Name}, target = Targets}) ->
check_targets_exist(Targets, source_delete),
case config:get_boolean("mem3_reshard", "delete_source", true) of
true ->
case couch_server:delete(Name, [?ADMIN_CTX]) of
ok ->
- couch_log:notice("~p : deleted source shard ~p",
- [?MODULE, Name]);
+ couch_log:notice(
+ "~p : deleted source shard ~p",
+ [?MODULE, Name]
+ );
not_found ->
- couch_log:warning("~p : source was already deleted ~p",
- [?MODULE, Name])
+ couch_log:warning(
+ "~p : source was already deleted ~p",
+ [?MODULE, Name]
+ )
end;
false ->
% Emit deleted event even when not actually deleting the files this
@@ -562,12 +529,10 @@ source_delete_impl(#job{source = #shard{name = Name}, target = Targets}) ->
TNames = [TName || #shard{name = TName} <- Targets],
lists:foreach(fun(TName) -> couch_event:notify(TName, updated) end, TNames).
-
completed(#job{} = Job) ->
couch_log:notice("~p : ~p completed, exit normal", [?MODULE, jobfmt(Job)]),
exit(normal).
-
% This is for belt and suspenders really. Call periodically to validate the
% state is one of the expected states.
-spec check_state(#job{}) -> #job{} | no_return().
@@ -579,45 +544,47 @@ check_state(#job{split_state = State} = Job) ->
erlang:error({invalid_shard_split_state, State, Job})
end.
-
create_artificial_mem3_rep_checkpoints(#job{} = Job, Seq) ->
#job{source = Source = #shard{name = SourceName}, target = Targets} = Job,
check_source_exists(Source, initial_copy),
TNames = [TN || #shard{name = TN} <- Targets],
Timestamp = list_to_binary(mem3_util:iso8601_timestamp()),
couch_util:with_db(SourceName, fun(SDb) ->
- [couch_util:with_db(TName, fun(TDb) ->
- Doc = mem3_rep_checkpoint_doc(SDb, TDb, Timestamp, Seq),
- {ok, _} = couch_db:update_doc(SDb, Doc, []),
- {ok, _} = couch_db:update_doc(TDb, Doc, []),
- ok
- end) || TName <- TNames]
+ [
+ couch_util:with_db(TName, fun(TDb) ->
+ Doc = mem3_rep_checkpoint_doc(SDb, TDb, Timestamp, Seq),
+ {ok, _} = couch_db:update_doc(SDb, Doc, []),
+ {ok, _} = couch_db:update_doc(TDb, Doc, []),
+ ok
+ end)
+ || TName <- TNames
+ ]
end),
ok.
-
mem3_rep_checkpoint_doc(SourceDb, TargetDb, Timestamp, Seq) ->
Node = atom_to_binary(node(), utf8),
- SourceUUID = couch_db:get_uuid(SourceDb),
+ SourceUUID = couch_db:get_uuid(SourceDb),
TargetUUID = couch_db:get_uuid(TargetDb),
- History = {[
- {<<"source_node">>, Node},
- {<<"source_uuid">>, SourceUUID},
- {<<"source_seq">>, Seq},
- {<<"timestamp">>, Timestamp},
- {<<"target_node">>, Node},
- {<<"target_uuid">>, TargetUUID},
- {<<"target_seq">>, Seq}
- ]},
- Body = {[
- {<<"seq">>, Seq},
- {<<"target_uuid">>, TargetUUID},
- {<<"history">>, {[{Node, [History]}]}}
- ]},
+ History =
+ {[
+ {<<"source_node">>, Node},
+ {<<"source_uuid">>, SourceUUID},
+ {<<"source_seq">>, Seq},
+ {<<"timestamp">>, Timestamp},
+ {<<"target_node">>, Node},
+ {<<"target_uuid">>, TargetUUID},
+ {<<"target_seq">>, Seq}
+ ]},
+ Body =
+ {[
+ {<<"seq">>, Seq},
+ {<<"target_uuid">>, TargetUUID},
+ {<<"history">>, {[{Node, [History]}]}}
+ ]},
Id = mem3_rep:make_local_id(SourceUUID, TargetUUID),
#doc{id = Id, body = Body}.
-
check_source_exists(#shard{name = Name}, StateName) ->
case couch_server:exists(Name) of
true ->
@@ -628,89 +595,87 @@ check_source_exists(#shard{name = Name}, StateName) ->
exit({error, missing_source})
end.
-
check_targets_exist(Targets, StateName) ->
- lists:foreach(fun(#shard{name = Name}) ->
- case couch_server:exists(Name) of
- true ->
- ok;
- false ->
- ErrMsg = "~p target ~p is unexpectedly missing in ~p",
- couch_log:error(ErrMsg, [?MODULE, Name, StateName]),
- exit({error, missing_target})
- end
- end, Targets).
-
+ lists:foreach(
+ fun(#shard{name = Name}) ->
+ case couch_server:exists(Name) of
+ true ->
+ ok;
+ false ->
+ ErrMsg = "~p target ~p is unexpectedly missing in ~p",
+ couch_log:error(ErrMsg, [?MODULE, Name, StateName]),
+ exit({error, missing_target})
+ end
+ end,
+ Targets
+ ).
-spec max_retries() -> integer().
max_retries() ->
config:get_integer("reshard", "max_retries", 1).
-
-spec retry_interval_sec() -> integer().
retry_interval_sec() ->
config:get_integer("reshard", "retry_interval_sec", 10).
-
-spec update_shard_map_timeout_sec() -> integer().
update_shard_map_timeout_sec() ->
config:get_integer("reshard", "update_shardmap_timeout_sec", 60).
-
-spec info_update(atom(), any(), [tuple()]) -> [tuple()].
info_update(Key, Val, StateInfo) ->
lists:keystore(Key, 1, StateInfo, {Key, Val}).
-
-spec info_delete(atom(), [tuple()]) -> [tuple()].
info_delete(Key, StateInfo) ->
lists:keydelete(Key, 1, StateInfo).
-
-spec shardsstr(#shard{}, #shard{} | [#shard{}]) -> string().
shardsstr(#shard{name = SourceName}, #shard{name = TargetName}) ->
lists:flatten(io_lib:format("~s -> ~s", [SourceName, TargetName]));
-
shardsstr(#shard{name = SourceName}, Targets) ->
TNames = [TN || #shard{name = TN} <- Targets],
TargetsStr = string:join([binary_to_list(T) || T <- TNames], ","),
lists:flatten(io_lib:format("~s -> ~s", [SourceName, TargetsStr])).
-
-spec reset_target(#job{}) -> #job{}.
reset_target(#job{source = Source, target = Targets} = Job) ->
- ShardNames = try
- [N || #shard{name = N} <- mem3:local_shards(mem3:dbname(Source))]
- catch
- error:database_does_not_exist ->
- []
- end,
- lists:map(fun(#shard{name = Name}) ->
- case {couch_server:exists(Name), lists:member(Name, ShardNames)} of
- {_, true} ->
- % Should never get here but if we do crash and don't continue
- LogMsg = "~p : ~p target unexpectedly found in shard map ~p",
- couch_log:error(LogMsg, [?MODULE, jobfmt(Job), Name]),
- erlang:error({target_present_in_shard_map, Name});
- {true, false} ->
- LogMsg = "~p : ~p resetting ~p target",
- couch_log:warning(LogMsg, [?MODULE, jobfmt(Job), Name]),
- couch_db_split:cleanup_target(Source#shard.name, Name);
- {false, false} ->
- ok
- end
- end, Targets),
+ ShardNames =
+ try
+ [N || #shard{name = N} <- mem3:local_shards(mem3:dbname(Source))]
+ catch
+ error:database_does_not_exist ->
+ []
+ end,
+ lists:map(
+ fun(#shard{name = Name}) ->
+ case {couch_server:exists(Name), lists:member(Name, ShardNames)} of
+ {_, true} ->
+ % Should never get here but if we do crash and don't continue
+ LogMsg = "~p : ~p target unexpectedly found in shard map ~p",
+ couch_log:error(LogMsg, [?MODULE, jobfmt(Job), Name]),
+ erlang:error({target_present_in_shard_map, Name});
+ {true, false} ->
+ LogMsg = "~p : ~p resetting ~p target",
+ couch_log:warning(LogMsg, [?MODULE, jobfmt(Job), Name]),
+ couch_db_split:cleanup_target(Source#shard.name, Name);
+ {false, false} ->
+ ok
+ end
+ end,
+ Targets
+ ),
Job.
-
-spec update_split_history(#job{}) -> #job{}.
update_split_history(#job{split_state = St, update_time = Ts} = Job) ->
Hist = Job#job.history,
- JobSt = case St of
- completed -> completed;
- failed -> failed;
- new -> new;
- stopped -> stopped;
- _ -> running
- end,
+ JobSt =
+ case St of
+ completed -> completed;
+ failed -> failed;
+ new -> new;
+ stopped -> stopped;
+ _ -> running
+ end,
Job#job{history = mem3_reshard:update_history(JobSt, St, Ts, Hist)}.
diff --git a/src/mem3/src/mem3_reshard_job_sup.erl b/src/mem3/src/mem3_reshard_job_sup.erl
index 3f1b3bfb4..e98cb5a08 100644
--- a/src/mem3/src/mem3_reshard_job_sup.erl
+++ b/src/mem3/src/mem3_reshard_job_sup.erl
@@ -22,34 +22,25 @@
init/1
]).
-
-include("mem3_reshard.hrl").
-
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
start_child(Job) ->
supervisor:start_child(?MODULE, [Job]).
-
terminate_child(Pid) ->
supervisor:terminate_child(?MODULE, Pid).
-
count_children() ->
Props = supervisor:count_children(?MODULE),
proplists:get_value(active, Props).
-
init(_Args) ->
Children = [
- {mem3_reshard_job,
- {mem3_reshard_job, start_link, []},
- temporary,
- 60000,
- worker,
- [mem3_reshard_job]}
+ {mem3_reshard_job, {mem3_reshard_job, start_link, []}, temporary, 60000, worker, [
+ mem3_reshard_job
+ ]}
],
{ok, {{simple_one_for_one, 10, 3}, Children}}.
diff --git a/src/mem3/src/mem3_reshard_store.erl b/src/mem3/src/mem3_reshard_store.erl
index c3534b374..140cc5bd7 100644
--- a/src/mem3/src/mem3_reshard_store.erl
+++ b/src/mem3/src/mem3_reshard_store.erl
@@ -12,7 +12,6 @@
-module(mem3_reshard_store).
-
-export([
init/3,
@@ -23,17 +22,16 @@
store_state/1,
load_state/2,
- delete_state/1, % for debugging
+ % for debugging
+ delete_state/1,
job_to_ejson_props/2,
state_to_ejson_props/1
]).
-
-include_lib("couch/include/couch_db.hrl").
-include("mem3_reshard.hrl").
-
-spec init(#state{}, binary(), binary()) -> #state{}.
init(#state{} = State, JobPrefix, StateDocId) ->
State#state{
@@ -41,7 +39,6 @@ init(#state{} = State, JobPrefix, StateDocId) ->
state_id = <<?LOCAL_DOC_PREFIX, StateDocId/binary>>
}.
-
-spec store_job(#state{}, #job{}) -> ok.
store_job(#state{job_prefix = Prefix}, #job{id = Id} = Job) ->
with_shards_db(fun(Db) ->
@@ -49,7 +46,6 @@ store_job(#state{job_prefix = Prefix}, #job{id = Id} = Job) ->
ok = update_doc(Db, DocId, job_to_ejson_props(Job))
end).
-
-spec load_job(#state{}, binary()) -> {ok, {[_]}} | not_found.
load_job(#state{job_prefix = Prefix}, Id) ->
with_shards_db(fun(Db) ->
@@ -61,7 +57,6 @@ load_job(#state{job_prefix = Prefix}, Id) ->
end
end).
-
-spec delete_job(#state{}, binary()) -> ok.
delete_job(#state{job_prefix = Prefix}, Id) ->
with_shards_db(fun(Db) ->
@@ -69,7 +64,6 @@ delete_job(#state{job_prefix = Prefix}, Id) ->
ok = delete_doc(Db, DocId)
end).
-
-spec get_jobs(#state{}) -> [#job{}].
get_jobs(#state{job_prefix = Prefix}) ->
with_shards_db(fun(Db) ->
@@ -87,14 +81,12 @@ get_jobs(#state{job_prefix = Prefix}) ->
lists:reverse(Jobs)
end).
-
-spec store_state(#state{}) -> ok.
store_state(#state{state_id = DocId} = State) ->
with_shards_db(fun(Db) ->
ok = update_doc(Db, DocId, state_to_ejson_props(State))
end).
-
-spec load_state(#state{}, atom()) -> #state{}.
load_state(#state{state_id = DocId} = State, Default) ->
with_shards_db(fun(Db) ->
@@ -106,25 +98,25 @@ load_state(#state{state_id = DocId} = State, Default) ->
end
end).
-
-spec delete_state(#state{}) -> ok.
delete_state(#state{state_id = DocId}) ->
with_shards_db(fun(Db) ->
ok = delete_doc(Db, DocId)
end).
-
job_to_ejson_props(#job{source = Source, target = Targets} = Job, Opts) ->
Iso8601 = proplists:get_value(iso8601, Opts),
History = history_to_ejson(Job#job.history, Iso8601),
- StartTime = case Iso8601 of
- true -> iso8601(Job#job.start_time);
- _ -> Job#job.start_time
- end,
- UpdateTime = case Iso8601 of
- true -> iso8601(Job#job.update_time);
- _ -> Job#job.update_time
- end,
+ StartTime =
+ case Iso8601 of
+ true -> iso8601(Job#job.start_time);
+ _ -> Job#job.start_time
+ end,
+ UpdateTime =
+ case Iso8601 of
+ true -> iso8601(Job#job.update_time);
+ _ -> Job#job.update_time
+ end,
[
{id, Job#job.id},
{type, Job#job.type},
@@ -139,7 +131,6 @@ job_to_ejson_props(#job{source = Source, target = Targets} = Job, Opts) ->
{history, History}
].
-
state_to_ejson_props(#state{} = State) ->
[
{state, atom_to_binary(State#state.state, utf8)},
@@ -148,7 +139,6 @@ state_to_ejson_props(#state{} = State) ->
{node, atom_to_binary(State#state.node, utf8)}
].
-
% Private API
with_shards_db(Fun) ->
@@ -164,7 +154,6 @@ with_shards_db(Fun) ->
throw(Else)
end.
-
delete_doc(Db, DocId) ->
case couch_db:open_doc(Db, DocId, []) of
{ok, #doc{revs = {_, Revs}}} ->
@@ -174,17 +163,17 @@ delete_doc(Db, DocId) ->
ok
end.
-
update_doc(Db, DocId, Body) ->
DocProps = [{<<"_id">>, DocId}] ++ Body,
Body1 = ?JSON_DECODE(?JSON_ENCODE({DocProps})),
BaseDoc = couch_doc:from_json_obj(Body1),
- Doc = case couch_db:open_doc(Db, DocId, []) of
- {ok, #doc{revs = Revs}} ->
- BaseDoc#doc{revs = Revs};
- {not_found, _} ->
- BaseDoc
- end,
+ Doc =
+ case couch_db:open_doc(Db, DocId, []) of
+ {ok, #doc{revs = Revs}} ->
+ BaseDoc#doc{revs = Revs};
+ {not_found, _} ->
+ BaseDoc
+ end,
case store_state() of
true ->
{ok, _} = couch_db:update_doc(Db, Doc, []),
@@ -195,7 +184,6 @@ update_doc(Db, DocId, Body) ->
ok
end.
-
load_doc(Db, DocId) ->
case couch_db:open_doc(Db, DocId, [ejson_body]) of
{ok, #doc{body = Body}} ->
@@ -205,11 +193,9 @@ load_doc(Db, DocId) ->
not_found
end.
-
job_to_ejson_props(#job{} = Job) ->
job_to_ejson_props(Job, []).
-
job_from_ejson({Props}) ->
Id = couch_util:get_value(<<"id">>, Props),
Type = couch_util:get_value(<<"type">>, Props),
@@ -235,7 +221,6 @@ job_from_ejson({Props}) ->
history = history_from_ejson(History)
}.
-
state_from_ejson(#state{} = State, {Props}) ->
StateVal = couch_util:get_value(<<"state">>, Props),
StateInfo = couch_util:get_value(<<"state_info">>, Props),
@@ -247,37 +232,35 @@ state_from_ejson(#state{} = State, {Props}) ->
update_time = TUpdated
}.
-
state_info_from_ejson({Props}) ->
- Props1 = [{binary_to_atom(K, utf8), couch_util:to_binary(V)}
- || {K, V} <- Props],
+ Props1 = [
+ {binary_to_atom(K, utf8), couch_util:to_binary(V)}
+ || {K, V} <- Props
+ ],
lists:sort(Props1).
-
history_to_ejson(Hist, true) when is_list(Hist) ->
[{[{timestamp, iso8601(T)}, {type, S}, {detail, D}]} || {T, S, D} <- Hist];
-
history_to_ejson(Hist, _) when is_list(Hist) ->
[{[{timestamp, T}, {type, S}, {detail, D}]} || {T, S, D} <- Hist].
-
history_from_ejson(HistoryEJson) when is_list(HistoryEJson) ->
- lists:map(fun({EventProps}) ->
- Timestamp = couch_util:get_value(<<"timestamp">>, EventProps),
- State = couch_util:get_value(<<"type">>, EventProps),
- Detail = couch_util:get_value(<<"detail">>, EventProps),
- {Timestamp, binary_to_atom(State, utf8), Detail}
- end, HistoryEJson).
-
+ lists:map(
+ fun({EventProps}) ->
+ Timestamp = couch_util:get_value(<<"timestamp">>, EventProps),
+ State = couch_util:get_value(<<"type">>, EventProps),
+ Detail = couch_util:get_value(<<"detail">>, EventProps),
+ {Timestamp, binary_to_atom(State, utf8), Detail}
+ end,
+ HistoryEJson
+ ).
state_info_to_ejson(Props) ->
{lists:sort([{K, couch_util:to_binary(V)} || {K, V} <- Props])}.
-
store_state() ->
config:get_boolean("reshard", "store_state", true).
-
iso8601(UnixSec) ->
Mega = UnixSec div 1000000,
Sec = UnixSec rem 1000000,
diff --git a/src/mem3/src/mem3_reshard_sup.erl b/src/mem3/src/mem3_reshard_sup.erl
index 6349a4041..5a28359fb 100644
--- a/src/mem3/src/mem3_reshard_sup.erl
+++ b/src/mem3/src/mem3_reshard_sup.erl
@@ -22,26 +22,15 @@
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
-
init(_Args) ->
Children = [
- {mem3_reshard_dbdoc,
- {mem3_reshard_dbdoc, start_link, []},
- permanent,
- infinity,
- worker,
- [mem3_reshard_dbdoc]},
- {mem3_reshard_job_sup,
- {mem3_reshard_job_sup, start_link, []},
- permanent,
- infinity,
- supervisor,
- [mem3_reshard_job_sup]},
- {mem3_reshard,
- {mem3_reshard, start_link, []},
- permanent,
- brutal_kill,
- worker,
- [mem3_reshard]}
+ {mem3_reshard_dbdoc, {mem3_reshard_dbdoc, start_link, []}, permanent, infinity, worker, [
+ mem3_reshard_dbdoc
+ ]},
+ {mem3_reshard_job_sup, {mem3_reshard_job_sup, start_link, []}, permanent, infinity,
+ supervisor, [mem3_reshard_job_sup]},
+ {mem3_reshard, {mem3_reshard, start_link, []}, permanent, brutal_kill, worker, [
+ mem3_reshard
+ ]}
],
{ok, {{one_for_all, 5, 5}, Children}}.
diff --git a/src/mem3/src/mem3_reshard_validate.erl b/src/mem3/src/mem3_reshard_validate.erl
index aa8df3e16..fca1617ce 100644
--- a/src/mem3/src/mem3_reshard_validate.erl
+++ b/src/mem3/src/mem3_reshard_validate.erl
@@ -20,7 +20,6 @@
-include_lib("mem3/include/mem3.hrl").
-
-spec start_args(#shard{}, any()) -> ok | {error, term()}.
start_args(Source, Split) ->
first_error([
@@ -31,8 +30,7 @@ start_args(Source, Split) ->
check_shard_map(Source)
]).
-
--spec source(#shard{}) -> ok | {error, term()}.
+-spec source(#shard{}) -> ok | {error, term()}.
source(#shard{name = Name}) ->
case couch_server:exists(Name) of
true ->
@@ -41,8 +39,7 @@ source(#shard{name = Name}) ->
{error, {source_shard_not_found, Name}}
end.
-
--spec check_shard_map(#shard{}) -> ok | {error, term()}.
+-spec check_shard_map(#shard{}) -> ok | {error, term()}.
check_shard_map(#shard{name = Name}) ->
DbName = mem3:dbname(Name),
AllShards = mem3:shards(DbName),
@@ -53,22 +50,19 @@ check_shard_map(#shard{name = Name}) ->
{error, {not_enough_shard_copies, DbName}}
end.
-
-spec targets(#shard{}, [#shard{}]) -> ok | {error, term()}.
targets(#shard{} = Source, Targets) ->
first_error([
target_ranges(Source, Targets)
]).
-
--spec check_split(any()) -> ok | {error, term()}.
+-spec check_split(any()) -> ok | {error, term()}.
check_split(Split) when is_integer(Split), Split > 1 ->
ok;
check_split(Split) ->
{error, {invalid_split_parameter, Split}}.
-
--spec check_range(#shard{}, any()) -> ok | {error, term()}.
+-spec check_range(#shard{}, any()) -> ok | {error, term()}.
check_range(#shard{range = Range = [B, E]}, Split) ->
case (E + 1 - B) >= Split of
true ->
@@ -77,33 +71,33 @@ check_range(#shard{range = Range = [B, E]}, Split) ->
{error, {shard_range_cannot_be_split, Range, Split}}
end.
-
--spec check_node(#shard{}) -> ok | {error, term()}.
+-spec check_node(#shard{}) -> ok | {error, term()}.
check_node(#shard{node = undefined}) ->
ok;
-
check_node(#shard{node = Node}) when Node =:= node() ->
ok;
-
check_node(#shard{node = Node}) ->
{error, {source_shard_node_is_not_current_node, Node}}.
-
-spec target_ranges(#shard{}, [#shard{}]) -> ok | {error, any()}.
target_ranges(#shard{range = [Begin, End]}, Targets) ->
Ranges = [R || #shard{range = R} <- Targets],
SortFun = fun([B1, _], [B2, _]) -> B1 =< B2 end,
[First | RestRanges] = lists:sort(SortFun, Ranges),
try
- TotalRange = lists:foldl(fun([B2, E2], [B1, E1]) ->
- case B2 =:= E1 + 1 of
- true ->
- ok;
- false ->
- throw({range_error, {B2, E1}})
+ TotalRange = lists:foldl(
+ fun([B2, E2], [B1, E1]) ->
+ case B2 =:= E1 + 1 of
+ true ->
+ ok;
+ false ->
+ throw({range_error, {B2, E1}})
+ end,
+ [B1, E2]
end,
- [B1, E2]
- end, First, RestRanges),
+ First,
+ RestRanges
+ ),
case [Begin, End] =:= TotalRange of
true ->
ok;
@@ -115,7 +109,6 @@ target_ranges(#shard{range = [Begin, End]}, Targets) ->
{error, {shard_range_error, Error}}
end.
-
-spec first_error([ok | {error, term()}]) -> ok | {error, term()}.
first_error(Results) ->
case [Res || Res <- Results, Res =/= ok] of
diff --git a/src/mem3/src/mem3_rpc.erl b/src/mem3/src/mem3_rpc.erl
index 9e0f42a8e..468bdee21 100644
--- a/src/mem3/src/mem3_rpc.erl
+++ b/src/mem3/src/mem3_rpc.erl
@@ -14,7 +14,6 @@
-module(mem3_rpc).
-
-export([
find_common_seq/4,
get_missing_revs/4,
@@ -43,18 +42,14 @@
save_purge_checkpoint_rpc/3,
replicate_rpc/2
-
]).
-
-include("mem3.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-define(BATCH_SIZE, 1000).
-define(REXI_CALL_TIMEOUT_MSEC, 600000).
-
% "Pull" is a bit of a misnomer here, as what we're actually doing is
% issuing an RPC request and telling the remote node to push updates to
% us. This lets us reuse all of the battle-tested machinery of mem3_rpc.
@@ -64,7 +59,6 @@ pull_replication(Seed) ->
get_missing_revs(Node, DbName, IdsRevs, Options) ->
rexi_call(Node, {fabric_rpc, get_missing_revs, [DbName, IdsRevs, Options]}).
-
update_docs(Node, DbName, Docs, Options) ->
rexi_call(Node, {fabric_rpc, update_docs, [DbName, Docs, Options]}).
@@ -77,95 +71,91 @@ load_checkpoint(Node, DbName, SourceNode, SourceUUID, FilterHash) ->
Args = [DbName, SourceNode, SourceUUID, FilterHash],
rexi_call(Node, {mem3_rpc, load_checkpoint_rpc, Args}).
-
load_checkpoint(Node, DbName, SourceNode, SourceUUID) ->
Args = [DbName, SourceNode, SourceUUID],
rexi_call(Node, {mem3_rpc, load_checkpoint_rpc, Args}).
-
save_checkpoint(Node, DbName, DocId, Seq, Entry, History) ->
Args = [DbName, DocId, Seq, Entry, History],
rexi_call(Node, {mem3_rpc, save_checkpoint_rpc, Args}).
-
find_common_seq(Node, DbName, SourceUUID, SourceEpochs) ->
Args = [DbName, SourceUUID, SourceEpochs],
rexi_call(Node, {mem3_rpc, find_common_seq_rpc, Args}).
-
load_purge_infos(Node, DbName, SourceUUID, Count) ->
Args = [DbName, SourceUUID, Count],
rexi_call(Node, {mem3_rpc, load_purge_infos_rpc, Args}).
-
save_purge_checkpoint(Node, DbName, PurgeDocId, Body) ->
Args = [DbName, PurgeDocId, Body],
rexi_call(Node, {mem3_rpc, save_purge_checkpoint_rpc, Args}).
-
purge_docs(Node, DbName, PurgeInfos, Options) ->
rexi_call(Node, {fabric_rpc, purge_docs, [DbName, PurgeInfos, Options]}).
-
-replicate(Source, Target, DbName, Timeout)
- when is_atom(Source), is_atom(Target), is_binary(DbName) ->
+replicate(Source, Target, DbName, Timeout) when
+ is_atom(Source), is_atom(Target), is_binary(DbName)
+->
Args = [DbName, Target],
rexi_call(Source, {mem3_rpc, replicate_rpc, Args}, Timeout).
-
load_checkpoint_rpc(DbName, SourceNode, SourceUUID) ->
load_checkpoint_rpc(DbName, SourceNode, SourceUUID, <<>>).
-
load_checkpoint_rpc(DbName, SourceNode, SourceUUID, FilterHash) ->
erlang:put(io_priority, {internal_repl, DbName}),
case get_or_create_db(DbName, [?ADMIN_CTX]) of
- {ok, Db} ->
- TargetUUID = couch_db:get_uuid(Db),
- NewId = mem3_rep:make_local_id(SourceUUID, TargetUUID, FilterHash),
- case couch_db:open_doc(Db, NewId, []) of
- {ok, Doc} ->
- rexi:reply({ok, {NewId, Doc}});
- {not_found, _} ->
- OldId = mem3_rep:make_local_id(SourceNode, node()),
- case couch_db:open_doc(Db, OldId, []) of
- {ok, Doc} ->
- rexi:reply({ok, {NewId, Doc}});
- {not_found, _} ->
- rexi:reply({ok, {NewId, #doc{id = NewId}}})
- end
- end;
- Error ->
- rexi:reply(Error)
+ {ok, Db} ->
+ TargetUUID = couch_db:get_uuid(Db),
+ NewId = mem3_rep:make_local_id(SourceUUID, TargetUUID, FilterHash),
+ case couch_db:open_doc(Db, NewId, []) of
+ {ok, Doc} ->
+ rexi:reply({ok, {NewId, Doc}});
+ {not_found, _} ->
+ OldId = mem3_rep:make_local_id(SourceNode, node()),
+ case couch_db:open_doc(Db, OldId, []) of
+ {ok, Doc} ->
+ rexi:reply({ok, {NewId, Doc}});
+ {not_found, _} ->
+ rexi:reply({ok, {NewId, #doc{id = NewId}}})
+ end
+ end;
+ Error ->
+ rexi:reply(Error)
end.
-
save_checkpoint_rpc(DbName, Id, SourceSeq, NewEntry0, History0) ->
erlang:put(io_priority, {internal_repl, DbName}),
case get_or_create_db(DbName, [?ADMIN_CTX]) of
{ok, Db} ->
- NewEntry = {[
- {<<"target_node">>, atom_to_binary(node(), utf8)},
- {<<"target_uuid">>, couch_db:get_uuid(Db)},
- {<<"target_seq">>, couch_db:get_update_seq(Db)}
- ] ++ NewEntry0},
- Body = {[
- {<<"seq">>, SourceSeq},
- {<<"target_uuid">>, couch_db:get_uuid(Db)},
- {<<"history">>, add_checkpoint(NewEntry, History0)}
- ]},
+ NewEntry = {
+ [
+ {<<"target_node">>, atom_to_binary(node(), utf8)},
+ {<<"target_uuid">>, couch_db:get_uuid(Db)},
+ {<<"target_seq">>, couch_db:get_update_seq(Db)}
+ ] ++ NewEntry0
+ },
+ Body =
+ {[
+ {<<"seq">>, SourceSeq},
+ {<<"target_uuid">>, couch_db:get_uuid(Db)},
+ {<<"history">>, add_checkpoint(NewEntry, History0)}
+ ]},
Doc = #doc{id = Id, body = Body},
- rexi:reply(try couch_db:update_doc(Db, Doc, []) of
- {ok, _} ->
- {ok, Body};
- Else ->
- {error, Else}
- catch
- Exception ->
- Exception;
- error:Reason ->
- {error, Reason}
- end);
+ rexi:reply(
+ try couch_db:update_doc(Db, Doc, []) of
+ {ok, _} ->
+ {ok, Body};
+ Else ->
+ {error, Else}
+ catch
+ Exception ->
+ Exception;
+ error:Reason ->
+ {error, Reason}
+ end
+ );
Error ->
rexi:reply(Error)
end.
@@ -173,17 +163,17 @@ save_checkpoint_rpc(DbName, Id, SourceSeq, NewEntry0, History0) ->
find_common_seq_rpc(DbName, SourceUUID, SourceEpochs) ->
erlang:put(io_priority, {internal_repl, DbName}),
case get_or_create_db(DbName, [?ADMIN_CTX]) of
- {ok, Db} ->
- case couch_db:get_uuid(Db) of
- SourceUUID ->
- TargetEpochs = couch_db:get_epochs(Db),
- Seq = compare_epochs(SourceEpochs, TargetEpochs),
- rexi:reply({ok, Seq});
- _Else ->
- rexi:reply({ok, 0})
- end;
- Error ->
- rexi:reply(Error)
+ {ok, Db} ->
+ case couch_db:get_uuid(Db) of
+ SourceUUID ->
+ TargetEpochs = couch_db:get_epochs(Db),
+ Seq = compare_epochs(SourceEpochs, TargetEpochs),
+ rexi:reply({ok, Seq});
+ _Else ->
+ rexi:reply({ok, 0})
+ end;
+ Error ->
+ rexi:reply(Error)
end.
pull_replication_rpc(Target) ->
@@ -192,29 +182,33 @@ pull_replication_rpc(Target) ->
Repl = fun(Db) -> {Db, mem3_rep:go(Db, Target, Opts)} end,
rexi:reply({ok, lists:map(Repl, Dbs)}).
-
load_purge_infos_rpc(DbName, SrcUUID, BatchSize) ->
erlang:put(io_priority, {internal_repl, DbName}),
case get_or_create_db(DbName, [?ADMIN_CTX]) of
{ok, Db} ->
TgtUUID = couch_db:get_uuid(Db),
PurgeDocId = mem3_rep:make_purge_id(SrcUUID, TgtUUID),
- StartSeq = case couch_db:open_doc(Db, PurgeDocId, []) of
- {ok, #doc{body = {Props}}} ->
- couch_util:get_value(<<"purge_seq">>, Props);
- {not_found, _} ->
- Oldest = couch_db:get_oldest_purge_seq(Db),
- erlang:max(0, Oldest - 1)
- end,
+ StartSeq =
+ case couch_db:open_doc(Db, PurgeDocId, []) of
+ {ok, #doc{body = {Props}}} ->
+ couch_util:get_value(<<"purge_seq">>, Props);
+ {not_found, _} ->
+ Oldest = couch_db:get_oldest_purge_seq(Db),
+ erlang:max(0, Oldest - 1)
+ end,
FoldFun = fun({PSeq, UUID, Id, Revs}, {Count, Infos, _}) ->
NewCount = Count + length(Revs),
NewInfos = [{UUID, Id, Revs} | Infos],
- Status = if NewCount < BatchSize -> ok; true -> stop end,
+ Status =
+ if
+ NewCount < BatchSize -> ok;
+ true -> stop
+ end,
{Status, {NewCount, NewInfos, PSeq}}
end,
InitAcc = {0, [], StartSeq},
{ok, {_, PurgeInfos, ThroughSeq}} =
- couch_db:fold_purge_infos(Db, StartSeq, FoldFun, InitAcc),
+ couch_db:fold_purge_infos(Db, StartSeq, FoldFun, InitAcc),
PurgeSeq = couch_db:get_purge_seq(Db),
Remaining = PurgeSeq - ThroughSeq,
rexi:reply({ok, {PurgeDocId, PurgeInfos, ThroughSeq, Remaining}});
@@ -222,32 +216,33 @@ load_purge_infos_rpc(DbName, SrcUUID, BatchSize) ->
rexi:reply(Else)
end.
-
save_purge_checkpoint_rpc(DbName, PurgeDocId, Body) ->
erlang:put(io_priority, {internal_repl, DbName}),
case get_or_create_db(DbName, [?ADMIN_CTX]) of
{ok, Db} ->
Doc = #doc{id = PurgeDocId, body = Body},
- Resp = try couch_db:update_doc(Db, Doc, []) of
- Resp0 -> Resp0
- catch T:R ->
- {T, R}
- end,
+ Resp =
+ try couch_db:update_doc(Db, Doc, []) of
+ Resp0 -> Resp0
+ catch
+ T:R ->
+ {T, R}
+ end,
rexi:reply(Resp);
Error ->
rexi:reply(Error)
end.
-
replicate_rpc(DbName, Target) ->
- rexi:reply(try
- Opts = [{batch_size, ?BATCH_SIZE}, {batch_count, all}],
- {ok, mem3_rep:go(DbName, Target, Opts)}
- catch
- Tag:Error ->
- {Tag, Error}
- end).
-
+ rexi:reply(
+ try
+ Opts = [{batch_size, ?BATCH_SIZE}, {batch_count, all}],
+ {ok, mem3_rep:go(DbName, Target, Opts)}
+ catch
+ Tag:Error ->
+ {Tag, Error}
+ end
+ ).
%% @doc Return the sequence where two files with the same UUID diverged.
compare_epochs(SourceEpochs, TargetEpochs) ->
@@ -256,7 +251,6 @@ compare_epochs(SourceEpochs, TargetEpochs) ->
lists:reverse(TargetEpochs)
).
-
compare_rev_epochs([{Node, Seq} | SourceRest], [{Node, Seq} | TargetRest]) ->
% Common history, fast-forward
compare_epochs(SourceRest, TargetRest);
@@ -270,7 +264,6 @@ compare_rev_epochs([{_, SourceSeq} | _], [{_, TargetSeq} | _]) ->
% The source was moved to a new location independently, take the minimum
erlang:min(SourceSeq, TargetSeq) - 1.
-
%% @doc This adds a new update sequence checkpoint to the replication
%% history. Checkpoints are keyed by the source node so that we
%% aren't mixing history between source shard moves.
@@ -298,7 +291,6 @@ add_checkpoint({Props}, {History}) ->
NodeRemoved = lists:keydelete(SourceNode, 1, History),
{[{SourceNode, NewSourceHistory} | NodeRemoved]}.
-
filter_history(SourceSeqThresh, TargetSeqThresh, History) ->
SourceFilter = fun({Entry}) ->
SourceSeq = couch_util:get_value(<<"source_seq">>, Entry),
@@ -311,7 +303,6 @@ filter_history(SourceSeqThresh, TargetSeqThresh, History) ->
SourceFiltered = lists:filter(SourceFilter, History),
lists:filter(TargetFilter, SourceFiltered).
-
%% @doc This function adjusts our history to maintain a
%% history of checkpoints that follow an exponentially
%% increasing age from the most recent checkpoint.
@@ -335,7 +326,7 @@ filter_history(SourceSeqThresh, TargetSeqThresh, History) ->
%% This function expects the provided history to be sorted
%% in descending order of source_seq values.
rebucket([], _NewSeq, Bucket) ->
- {Bucket+1, []};
+ {Bucket + 1, []};
rebucket([{Entry} | RestHistory], NewSeq, Bucket) ->
CurSeq = couch_util:get_value(<<"source_seq">>, Entry),
case find_bucket(NewSeq, CurSeq, Bucket) of
@@ -363,7 +354,6 @@ rebucket([{Entry} | RestHistory], NewSeq, Bucket) ->
{NextBucket, [{Entry} | NewHistory]}
end.
-
%% @doc Find the bucket id for the given sequence pair.
find_bucket(NewSeq, CurSeq, Bucket) ->
% The +1 constant in this comparison is a bit subtle. The
@@ -371,27 +361,27 @@ find_bucket(NewSeq, CurSeq, Bucket) ->
% the history is guaranteed to have a BucketId of 1. This
% also relies on never having a duplicated update
% sequence so adding 1 here guarantees a difference >= 2.
- if (NewSeq - CurSeq + 1) > (2 bsl Bucket) ->
- find_bucket(NewSeq, CurSeq, Bucket+1);
- true ->
- Bucket
+ if
+ (NewSeq - CurSeq + 1) > (2 bsl Bucket) ->
+ find_bucket(NewSeq, CurSeq, Bucket + 1);
+ true ->
+ Bucket
end.
-
rexi_call(Node, MFA) ->
rexi_call(Node, MFA, ?REXI_CALL_TIMEOUT_MSEC).
-
rexi_call(Node, MFA, Timeout) ->
Mon = rexi_monitor:start([rexi_utils:server_pid(Node)]),
Ref = rexi:cast(Node, self(), MFA, [sync]),
try
- receive {Ref, {ok, Reply}} ->
- Reply;
- {Ref, Error} ->
- erlang:error(Error);
- {rexi_DOWN, Mon, _, Reason} ->
- erlang:error({rexi_DOWN, {Node, Reason}})
+ receive
+ {Ref, {ok, Reply}} ->
+ Reply;
+ {Ref, Error} ->
+ erlang:error(Error);
+ {rexi_DOWN, Mon, _, Reason} ->
+ erlang:error({rexi_DOWN, {Node, Reason}})
after Timeout ->
erlang:error(timeout)
end
@@ -399,15 +389,12 @@ rexi_call(Node, MFA, Timeout) ->
rexi_monitor:stop(Mon)
end.
-
get_or_create_db(DbName, Options) ->
mem3_util:get_or_create_db_int(DbName, Options).
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
-define(SNODE, <<"src@localhost">>).
-define(TNODE, <<"tgt@localhost">>).
-define(SNODE_KV, {<<"source_node">>, ?SNODE}).
@@ -416,7 +403,6 @@ get_or_create_db(DbName, Options) ->
-define(TSEQ, <<"target_seq">>).
-define(ENTRY(S, T), {[?SNODE_KV, {?SSEQ, S}, ?TNODE_KV, {?TSEQ, T}]}).
-
filter_history_data() ->
[
?ENTRY(13, 15),
@@ -424,7 +410,6 @@ filter_history_data() ->
?ENTRY(2, 3)
].
-
filter_history_remove_none_test() ->
?assertEqual(filter_history(20, 20, filter_history_data()), [
?ENTRY(13, 15),
@@ -432,11 +417,9 @@ filter_history_remove_none_test() ->
?ENTRY(2, 3)
]).
-
filter_history_remove_all_test() ->
?assertEqual(filter_history(1, 1, filter_history_data()), []).
-
filter_history_remove_equal_test() ->
?assertEqual(filter_history(10, 10, filter_history_data()), [
?ENTRY(2, 3)
@@ -445,7 +428,6 @@ filter_history_remove_equal_test() ->
?ENTRY(2, 3)
]).
-
filter_history_remove_for_source_and_target_test() ->
?assertEqual(filter_history(11, 20, filter_history_data()), [
?ENTRY(10, 9),
@@ -456,129 +438,176 @@ filter_history_remove_for_source_and_target_test() ->
?ENTRY(2, 3)
]).
-
filter_history_remove_for_both_test() ->
?assertEqual(filter_history(11, 11, filter_history_data()), [
?ENTRY(10, 9),
?ENTRY(2, 3)
]).
-
filter_history_remove_for_both_again_test() ->
?assertEqual(filter_history(3, 4, filter_history_data()), [
?ENTRY(2, 3)
]).
-
add_first_checkpoint_test() ->
History = {[]},
- ?assertEqual(add_checkpoint(?ENTRY(2, 3), History), {[
- {?SNODE, [
- ?ENTRY(2, 3)
+ ?assertEqual(
+ add_checkpoint(?ENTRY(2, 3), History),
+ {[
+ {?SNODE, [
+ ?ENTRY(2, 3)
+ ]}
]}
- ]}).
-
+ ).
add_first_checkpoint_to_empty_test() ->
History = {[{?SNODE, []}]},
- ?assertEqual(add_checkpoint(?ENTRY(2, 3), History), {[
- {?SNODE, [
- ?ENTRY(2, 3)
+ ?assertEqual(
+ add_checkpoint(?ENTRY(2, 3), History),
+ {[
+ {?SNODE, [
+ ?ENTRY(2, 3)
+ ]}
]}
- ]}).
-
+ ).
add_second_checkpoint_test() ->
History = {[{?SNODE, [?ENTRY(2, 3)]}]},
- ?assertEqual(add_checkpoint(?ENTRY(10, 9), History), {[
- {?SNODE, [
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
+ ?assertEqual(
+ add_checkpoint(?ENTRY(10, 9), History),
+ {[
+ {?SNODE, [
+ ?ENTRY(10, 9),
+ ?ENTRY(2, 3)
+ ]}
]}
- ]}).
-
+ ).
add_third_checkpoint_test() ->
- History = {[{?SNODE, [
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}]},
- ?assertEqual(add_checkpoint(?ENTRY(11, 10), History), {[
- {?SNODE, [
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
+ History =
+ {[
+ {?SNODE, [
+ ?ENTRY(10, 9),
+ ?ENTRY(2, 3)
+ ]}
+ ]},
+ ?assertEqual(
+ add_checkpoint(?ENTRY(11, 10), History),
+ {[
+ {?SNODE, [
+ ?ENTRY(11, 10),
+ ?ENTRY(10, 9),
+ ?ENTRY(2, 3)
+ ]}
]}
- ]}).
-
+ ).
add_fourth_checkpoint_test() ->
- History = {[{?SNODE, [
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}]},
- ?assertEqual(add_checkpoint(?ENTRY(12, 13), History), {[
- {?SNODE, [
- ?ENTRY(12, 13),
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
+ History =
+ {[
+ {?SNODE, [
+ ?ENTRY(11, 10),
+ ?ENTRY(10, 9),
+ ?ENTRY(2, 3)
+ ]}
+ ]},
+ ?assertEqual(
+ add_checkpoint(?ENTRY(12, 13), History),
+ {[
+ {?SNODE, [
+ ?ENTRY(12, 13),
+ ?ENTRY(11, 10),
+ ?ENTRY(10, 9),
+ ?ENTRY(2, 3)
+ ]}
]}
- ]}).
-
+ ).
add_checkpoint_with_replacement_test() ->
- History = {[{?SNODE, [
- ?ENTRY(12, 13),
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}]},
+ History =
+ {[
+ {?SNODE, [
+ ?ENTRY(12, 13),
+ ?ENTRY(11, 10),
+ ?ENTRY(10, 9),
+ ?ENTRY(2, 3)
+ ]}
+ ]},
% Picking a source_seq of 16 to force 10, 11, and 12
% into the same bucket to show we drop the 11 entry.
- ?assertEqual(add_checkpoint(?ENTRY(16, 16), History), {[
- {?SNODE, [
- ?ENTRY(16, 16),
- ?ENTRY(12, 13),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
+ ?assertEqual(
+ add_checkpoint(?ENTRY(16, 16), History),
+ {[
+ {?SNODE, [
+ ?ENTRY(16, 16),
+ ?ENTRY(12, 13),
+ ?ENTRY(10, 9),
+ ?ENTRY(2, 3)
+ ]}
]}
- ]}).
+ ).
add_checkpoint_drops_redundant_checkpoints_test() ->
% I've added comments showing the bucket ID based
% on the ?ENTRY passed to add_checkpoint
- History = {[{?SNODE, [
- ?ENTRY(15, 15), % Bucket 0
- ?ENTRY(14, 14), % Bucket 1
- ?ENTRY(13, 13), % Bucket 1
- ?ENTRY(12, 12), % Bucket 2
- ?ENTRY(11, 11), % Bucket 2
- ?ENTRY(10, 10), % Bucket 2
- ?ENTRY(9, 9), % Bucket 2
- ?ENTRY(8, 8), % Bucket 3
- ?ENTRY(7, 7), % Bucket 3
- ?ENTRY(6, 6), % Bucket 3
- ?ENTRY(5, 5), % Bucket 3
- ?ENTRY(4, 4), % Bucket 3
- ?ENTRY(3, 3), % Bucket 3
- ?ENTRY(2, 2), % Bucket 3
- ?ENTRY(1, 1) % Bucket 3
- ]}]},
- ?assertEqual(add_checkpoint(?ENTRY(16, 16), History), {[
- {?SNODE, [
- ?ENTRY(16, 16), % Bucket 0
- ?ENTRY(15, 15), % Bucket 0
- ?ENTRY(14, 14), % Bucket 1
- ?ENTRY(13, 13), % Bucket 1
- ?ENTRY(12, 12), % Bucket 2
- ?ENTRY(9, 9), % Bucket 2
- ?ENTRY(8, 8), % Bucket 3
- ?ENTRY(1, 1) % Bucket 3
+ History =
+ {[
+ {?SNODE, [
+ % Bucket 0
+ ?ENTRY(15, 15),
+ % Bucket 1
+ ?ENTRY(14, 14),
+ % Bucket 1
+ ?ENTRY(13, 13),
+ % Bucket 2
+ ?ENTRY(12, 12),
+ % Bucket 2
+ ?ENTRY(11, 11),
+ % Bucket 2
+ ?ENTRY(10, 10),
+ % Bucket 2
+ ?ENTRY(9, 9),
+ % Bucket 3
+ ?ENTRY(8, 8),
+ % Bucket 3
+ ?ENTRY(7, 7),
+ % Bucket 3
+ ?ENTRY(6, 6),
+ % Bucket 3
+ ?ENTRY(5, 5),
+ % Bucket 3
+ ?ENTRY(4, 4),
+ % Bucket 3
+ ?ENTRY(3, 3),
+ % Bucket 3
+ ?ENTRY(2, 2),
+ % Bucket 3
+ ?ENTRY(1, 1)
+ ]}
+ ]},
+ ?assertEqual(
+ add_checkpoint(?ENTRY(16, 16), History),
+ {[
+ {?SNODE, [
+ % Bucket 0
+ ?ENTRY(16, 16),
+ % Bucket 0
+ ?ENTRY(15, 15),
+ % Bucket 1
+ ?ENTRY(14, 14),
+ % Bucket 1
+ ?ENTRY(13, 13),
+ % Bucket 2
+ ?ENTRY(12, 12),
+ % Bucket 2
+ ?ENTRY(9, 9),
+ % Bucket 3
+ ?ENTRY(8, 8),
+ % Bucket 3
+ ?ENTRY(1, 1)
+ ]}
]}
- ]}).
-
+ ).
add_checkpoint_show_not_always_a_drop_test() ->
% Depending on the edge conditions of buckets we
@@ -588,124 +617,181 @@ add_checkpoint_show_not_always_a_drop_test() ->
%
% I've added comments showing the bucket ID based
% on the ?ENTRY passed to add_checkpoint
- History = {[{?SNODE, [
- ?ENTRY(16, 16), % Bucket 0
- ?ENTRY(15, 15), % Bucket 1
- ?ENTRY(14, 14), % Bucket 1
- ?ENTRY(13, 13), % Bucket 2
- ?ENTRY(12, 12), % Bucket 2
- ?ENTRY(9, 9), % Bucket 3
- ?ENTRY(8, 8), % Bucket 3
- ?ENTRY(1, 1) % Bucket 4
- ]}]},
- ?assertEqual(add_checkpoint(?ENTRY(17, 17), History), {[
- {?SNODE, [
- ?ENTRY(17, 17), % Bucket 0
- ?ENTRY(16, 16), % Bucket 0
- ?ENTRY(15, 15), % Bucket 1
- ?ENTRY(14, 14), % Bucket 1
- ?ENTRY(13, 13), % Bucket 2
- ?ENTRY(12, 12), % Bucket 2
- ?ENTRY(9, 9), % Bucket 3
- ?ENTRY(8, 8), % Bucket 3
- ?ENTRY(1, 1) % Bucket 4
+ History =
+ {[
+ {?SNODE, [
+ % Bucket 0
+ ?ENTRY(16, 16),
+ % Bucket 1
+ ?ENTRY(15, 15),
+ % Bucket 1
+ ?ENTRY(14, 14),
+ % Bucket 2
+ ?ENTRY(13, 13),
+ % Bucket 2
+ ?ENTRY(12, 12),
+ % Bucket 3
+ ?ENTRY(9, 9),
+ % Bucket 3
+ ?ENTRY(8, 8),
+ % Bucket 4
+ ?ENTRY(1, 1)
+ ]}
+ ]},
+ ?assertEqual(
+ add_checkpoint(?ENTRY(17, 17), History),
+ {[
+ {?SNODE, [
+ % Bucket 0
+ ?ENTRY(17, 17),
+ % Bucket 0
+ ?ENTRY(16, 16),
+ % Bucket 1
+ ?ENTRY(15, 15),
+ % Bucket 1
+ ?ENTRY(14, 14),
+ % Bucket 2
+ ?ENTRY(13, 13),
+ % Bucket 2
+ ?ENTRY(12, 12),
+ % Bucket 3
+ ?ENTRY(9, 9),
+ % Bucket 3
+ ?ENTRY(8, 8),
+ % Bucket 4
+ ?ENTRY(1, 1)
+ ]}
]}
- ]}).
-
+ ).
add_checkpoint_big_jump_show_lots_drop_test() ->
% I've added comments showing the bucket ID based
% on the ?ENTRY passed to add_checkpoint
- History = {[{?SNODE, [
- ?ENTRY(16, 16), % Bucket 4
- ?ENTRY(15, 15), % Bucket 4
- ?ENTRY(14, 14), % Bucket 4
- ?ENTRY(13, 13), % Bucket 4
- ?ENTRY(12, 12), % Bucket 4
- ?ENTRY(9, 9), % Bucket 4
- ?ENTRY(8, 8), % Bucket 4
- ?ENTRY(1, 1) % Bucket 4
- ]}]},
- ?assertEqual(add_checkpoint(?ENTRY(32, 32), History), {[
- {?SNODE, [
- ?ENTRY(32, 32), % Bucket 0
- ?ENTRY(16, 16), % Bucket 4
- ?ENTRY(1, 1) % Bucket 4
+ History =
+ {[
+ {?SNODE, [
+ % Bucket 4
+ ?ENTRY(16, 16),
+ % Bucket 4
+ ?ENTRY(15, 15),
+ % Bucket 4
+ ?ENTRY(14, 14),
+ % Bucket 4
+ ?ENTRY(13, 13),
+ % Bucket 4
+ ?ENTRY(12, 12),
+ % Bucket 4
+ ?ENTRY(9, 9),
+ % Bucket 4
+ ?ENTRY(8, 8),
+ % Bucket 4
+ ?ENTRY(1, 1)
+ ]}
+ ]},
+ ?assertEqual(
+ add_checkpoint(?ENTRY(32, 32), History),
+ {[
+ {?SNODE, [
+ % Bucket 0
+ ?ENTRY(32, 32),
+ % Bucket 4
+ ?ENTRY(16, 16),
+ % Bucket 4
+ ?ENTRY(1, 1)
+ ]}
]}
- ]}).
-
+ ).
add_checkpoint_show_filter_history_test() ->
- History = {[{?SNODE, [
- ?ENTRY(16, 16),
- ?ENTRY(15, 15),
- ?ENTRY(14, 14),
- ?ENTRY(13, 13),
- ?ENTRY(12, 12),
- ?ENTRY(9, 9),
- ?ENTRY(8, 8),
- ?ENTRY(1, 1)
- ]}]},
+ History =
+ {[
+ {?SNODE, [
+ ?ENTRY(16, 16),
+ ?ENTRY(15, 15),
+ ?ENTRY(14, 14),
+ ?ENTRY(13, 13),
+ ?ENTRY(12, 12),
+ ?ENTRY(9, 9),
+ ?ENTRY(8, 8),
+ ?ENTRY(1, 1)
+ ]}
+ ]},
% Drop for both
- ?assertEqual(add_checkpoint(?ENTRY(10, 10), History), {[
- {?SNODE, [
- ?ENTRY(10, 10),
- ?ENTRY(9, 9),
- ?ENTRY(8, 8),
- ?ENTRY(1, 1)
+ ?assertEqual(
+ add_checkpoint(?ENTRY(10, 10), History),
+ {[
+ {?SNODE, [
+ ?ENTRY(10, 10),
+ ?ENTRY(9, 9),
+ ?ENTRY(8, 8),
+ ?ENTRY(1, 1)
+ ]}
]}
- ]}),
+ ),
% Drop four source
- ?assertEqual(add_checkpoint(?ENTRY(10, 200), History), {[
- {?SNODE, [
- ?ENTRY(10, 200),
- ?ENTRY(9, 9),
- ?ENTRY(8, 8),
- ?ENTRY(1, 1)
+ ?assertEqual(
+ add_checkpoint(?ENTRY(10, 200), History),
+ {[
+ {?SNODE, [
+ ?ENTRY(10, 200),
+ ?ENTRY(9, 9),
+ ?ENTRY(8, 8),
+ ?ENTRY(1, 1)
+ ]}
]}
- ]}),
+ ),
% Drop for target. Obviously a source_seq of 200
% will end up droping the 8 entry.
- ?assertEqual(add_checkpoint(?ENTRY(200, 10), History), {[
- {?SNODE, [
- ?ENTRY(200, 10),
- ?ENTRY(9, 9),
- ?ENTRY(1, 1)
+ ?assertEqual(
+ add_checkpoint(?ENTRY(200, 10), History),
+ {[
+ {?SNODE, [
+ ?ENTRY(200, 10),
+ ?ENTRY(9, 9),
+ ?ENTRY(1, 1)
+ ]}
]}
- ]}).
-
+ ).
add_checkpoint_from_other_node_test() ->
- History = {[{<<"not_the_source">>, [
- ?ENTRY(12, 13),
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
- ]}]},
- % No filtering
- ?assertEqual(add_checkpoint(?ENTRY(1, 1), History), {[
- {?SNODE, [
- ?ENTRY(1, 1)
+ History =
+ {[
+ {<<"not_the_source">>, [
+ ?ENTRY(12, 13),
+ ?ENTRY(11, 10),
+ ?ENTRY(10, 9),
+ ?ENTRY(2, 3)
+ ]}
]},
- {<<"not_the_source">>, [
- ?ENTRY(12, 13),
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
+ % No filtering
+ ?assertEqual(
+ add_checkpoint(?ENTRY(1, 1), History),
+ {[
+ {?SNODE, [
+ ?ENTRY(1, 1)
+ ]},
+ {<<"not_the_source">>, [
+ ?ENTRY(12, 13),
+ ?ENTRY(11, 10),
+ ?ENTRY(10, 9),
+ ?ENTRY(2, 3)
+ ]}
]}
- ]}),
+ ),
% No dropping
- ?assertEqual(add_checkpoint(?ENTRY(200, 200), History), {[
- {?SNODE, [
- ?ENTRY(200, 200)
- ]},
- {<<"not_the_source">>, [
- ?ENTRY(12, 13),
- ?ENTRY(11, 10),
- ?ENTRY(10, 9),
- ?ENTRY(2, 3)
+ ?assertEqual(
+ add_checkpoint(?ENTRY(200, 200), History),
+ {[
+ {?SNODE, [
+ ?ENTRY(200, 200)
+ ]},
+ {<<"not_the_source">>, [
+ ?ENTRY(12, 13),
+ ?ENTRY(11, 10),
+ ?ENTRY(10, 9),
+ ?ENTRY(2, 3)
+ ]}
]}
- ]}).
-
+ ).
-endif.
diff --git a/src/mem3/src/mem3_seeds.erl b/src/mem3/src/mem3_seeds.erl
index f1aceb996..6d74398e7 100644
--- a/src/mem3/src/mem3_seeds.erl
+++ b/src/mem3/src/mem3_seeds.erl
@@ -32,7 +32,8 @@
ready = false,
seeds = [],
jobref = nil,
- status = [] % nested proplist keyed on node name
+ % nested proplist keyed on node name
+ status = []
}).
-define(REPLICATION_INTERVAL, 60000).
@@ -58,29 +59,36 @@ init([]) ->
InitStatus = [{Seed, {[]}} || Seed <- Seeds],
State = #st{
seeds = Seeds,
- ready = case Seeds of [] -> true; _ -> false end,
+ ready =
+ case Seeds of
+ [] -> true;
+ _ -> false
+ end,
jobref = start_replication(Seeds),
status = InitStatus
},
{ok, State}.
handle_call(get_status, _From, St) ->
- Status = {[
- {status, case St#st.ready of true -> ok; false -> seeding end},
- {seeds, {St#st.status}}
- ]},
+ Status =
+ {[
+ {status,
+ case St#st.ready of
+ true -> ok;
+ false -> seeding
+ end},
+ {seeds, {St#st.status}}
+ ]},
{reply, {ok, Status}, St}.
handle_cast(_Msg, St) ->
{noreply, St}.
-handle_info(start_replication, #st{jobref=nil} = St) ->
+handle_info(start_replication, #st{jobref = nil} = St) ->
JobRef = start_replication(St#st.seeds),
{noreply, St#st{jobref = JobRef}};
-
handle_info({'DOWN', Ref, _, Pid, Output}, #st{jobref = {Pid, Ref}} = St) ->
{noreply, update_state(St, Output)};
-
handle_info(_Msg, St) ->
{noreply, St}.
@@ -102,11 +110,12 @@ start_replication([Seed | _]) ->
update_state(State, {ok, Data}) ->
#st{seeds = [Current | Tail], status = Status} = State,
- Report = {[
- {timestamp, list_to_binary(mem3_util:iso8601_timestamp())},
- {last_replication_status, ok},
- format_data(Data)
- ]},
+ Report =
+ {[
+ {timestamp, list_to_binary(mem3_util:iso8601_timestamp())},
+ {last_replication_status, ok},
+ format_data(Data)
+ ]},
NewStatus = lists:ukeymerge(1, [{Current, Report}], Status),
Ready = is_ready(State#st.ready, Data),
case Ready of
@@ -126,16 +135,18 @@ update_state(State, {ok, Data}) ->
};
update_state(State, {_Error, _Stack}) ->
#st{seeds = [Current | Tail], status = Status} = State,
- Report = {[
- {timestamp, list_to_binary(mem3_util:iso8601_timestamp())},
- {last_replication_status, error}
- ]},
+ Report =
+ {[
+ {timestamp, list_to_binary(mem3_util:iso8601_timestamp())},
+ {last_replication_status, error}
+ ]},
NewStatus = lists:ukeymerge(1, [{Current, Report}], Status),
Seeds = Tail ++ [Current],
- if not State#st.ready ->
- erlang:send_after(1000, self(), start_replication);
- true ->
- ok
+ if
+ not State#st.ready ->
+ erlang:send_after(1000, self(), start_replication);
+ true ->
+ ok
end,
State#st{
seeds = Seeds,
@@ -149,14 +160,17 @@ is_ready(false, Data) ->
lists:all(fun({_DbName, Pending}) -> Pending =:= {ok, 0} end, Data).
format_data(Data) ->
- Formatted = lists:map(fun({DbName, Status}) ->
- case Status of
- {ok, Pending} when is_number(Pending) ->
- {DbName, Pending};
- {error, Tag} ->
- {DbName, list_to_binary(io_lib:format("~p", [Tag]))};
- _Else ->
- {DbName, unknown_error}
- end
- end, Data),
+ Formatted = lists:map(
+ fun({DbName, Status}) ->
+ case Status of
+ {ok, Pending} when is_number(Pending) ->
+ {DbName, Pending};
+ {error, Tag} ->
+ {DbName, list_to_binary(io_lib:format("~p", [Tag]))};
+ _Else ->
+ {DbName, unknown_error}
+ end
+ end,
+ Data
+ ),
{pending_updates, {Formatted}}.
diff --git a/src/mem3/src/mem3_shards.erl b/src/mem3/src/mem3_shards.erl
index fd1894abe..8bbc92411 100644
--- a/src/mem3/src/mem3_shards.erl
+++ b/src/mem3/src/mem3_shards.erl
@@ -60,17 +60,19 @@ for_db(DbName) ->
for_db(DbName, []).
for_db(DbName, Options) ->
- Shards = try ets:lookup(?SHARDS, DbName) of
- [] ->
- load_shards_from_disk(DbName);
- Else ->
- gen_server:cast(?MODULE, {cache_hit, DbName}),
- Else
- catch error:badarg ->
- load_shards_from_disk(DbName)
- end,
+ Shards =
+ try ets:lookup(?SHARDS, DbName) of
+ [] ->
+ load_shards_from_disk(DbName);
+ Else ->
+ gen_server:cast(?MODULE, {cache_hit, DbName}),
+ Else
+ catch
+ error:badarg ->
+ load_shards_from_disk(DbName)
+ end,
case lists:member(ordered, Options) of
- true -> Shards;
+ true -> Shards;
false -> mem3_util:downcast(Shards)
end.
@@ -92,17 +94,19 @@ for_docid(DbName, DocId, Options) ->
Conditions = [{'=<', '$1', HashKey}, {'=<', HashKey, '$2'}],
ShardSpec = {ShardHead, Conditions, ['$_']},
OrderedShardSpec = {OrderedShardHead, Conditions, ['$_']},
- Shards = try ets:select(?SHARDS, [ShardSpec, OrderedShardSpec]) of
- [] ->
- load_shards_from_disk(DbName, DocId);
- Else ->
- gen_server:cast(?MODULE, {cache_hit, DbName}),
- Else
- catch error:badarg ->
- load_shards_from_disk(DbName, DocId)
- end,
+ Shards =
+ try ets:select(?SHARDS, [ShardSpec, OrderedShardSpec]) of
+ [] ->
+ load_shards_from_disk(DbName, DocId);
+ Else ->
+ gen_server:cast(?MODULE, {cache_hit, DbName}),
+ Else
+ catch
+ error:badarg ->
+ load_shards_from_disk(DbName, DocId)
+ end,
case lists:member(ordered, Options) of
- true -> Shards;
+ true -> Shards;
false -> mem3_util:downcast(Shards)
end.
@@ -123,35 +127,43 @@ for_shard_range(ShardName) ->
Conditions = [{'=<', '$1', E}, {'=<', B, '$2'}],
ShardSpec = {ShardHead, Conditions, ['$_']},
OrderedShardSpec = {OrderedShardHead, Conditions, ['$_']},
- Shards = try ets:select(?SHARDS, [ShardSpec, OrderedShardSpec]) of
- [] ->
- filter_shards_by_range([B, E], load_shards_from_disk(DbName));
- Else ->
- gen_server:cast(?MODULE, {cache_hit, DbName}),
- Else
- catch error:badarg ->
- filter_shards_by_range([B, E], load_shards_from_disk(DbName))
- end,
+ Shards =
+ try ets:select(?SHARDS, [ShardSpec, OrderedShardSpec]) of
+ [] ->
+ filter_shards_by_range([B, E], load_shards_from_disk(DbName));
+ Else ->
+ gen_server:cast(?MODULE, {cache_hit, DbName}),
+ Else
+ catch
+ error:badarg ->
+ filter_shards_by_range([B, E], load_shards_from_disk(DbName))
+ end,
mem3_util:downcast(Shards).
-
get(DbName, Node, Range) ->
- Res = lists:foldl(fun(#shard{node=N, range=R}=S, Acc) ->
- case {N, R} of
- {Node, Range} -> [S | Acc];
- _ -> Acc
- end
- end, [], for_db(DbName)),
+ Res = lists:foldl(
+ fun(#shard{node = N, range = R} = S, Acc) ->
+ case {N, R} of
+ {Node, Range} -> [S | Acc];
+ _ -> Acc
+ end
+ end,
+ [],
+ for_db(DbName)
+ ),
case Res of
[] -> {error, not_found};
[Shard] -> {ok, Shard};
- [_|_] -> {error, duplicates}
+ [_ | _] -> {error, duplicates}
end.
local(DbName) when is_list(DbName) ->
local(list_to_binary(DbName));
local(DbName) ->
- Pred = fun(#shard{node=Node}) when Node == node() -> true; (_) -> false end,
+ Pred = fun
+ (#shard{node = Node}) when Node == node() -> true;
+ (_) -> false
+ end,
lists:filter(Pred, for_db(DbName)).
fold(Fun, Acc) ->
@@ -177,11 +189,13 @@ handle_config_change("mem3", "shard_cache_size", SizeList, _, _) ->
handle_config_change("mem3", "shards_db", _DbName, _, _) ->
{ok, gen_server:call(?MODULE, shard_db_changed, infinity)};
handle_config_change("mem3", "shard_write_timeout", Timeout, _, _) ->
- Timeout = try
- list_to_integer(Timeout)
- catch _:_ ->
- 1000
- end,
+ Timeout =
+ try
+ list_to_integer(Timeout)
+ catch
+ _:_ ->
+ 1000
+ end,
{ok, gen_server:call(?MODULE, {set_write_timeout, Timeout})};
handle_config_change(_, _, _, _, _) ->
{ok, nil}.
@@ -197,7 +211,7 @@ init([]) ->
bag,
public,
named_table,
- {keypos,#shard.dbname},
+ {keypos, #shard.dbname},
{read_concurrency, true}
]),
ets:new(?DBS, [set, protected, named_table]),
@@ -216,7 +230,7 @@ init([]) ->
}}.
handle_call({set_max_size, Size}, _From, St) ->
- {reply, ok, cache_free(St#st{max_size=Size})};
+ {reply, ok, cache_free(St#st{max_size = Size})};
handle_call(shard_db_changed, _From, St) ->
exit(St#st.changes_pid, shard_db_changed),
{reply, ok, St};
@@ -237,13 +251,14 @@ handle_cast({cache_insert, DbName, Writer, UpdateSeq}, St) ->
% to think of when a _dbs db doesn't change. If it used
% `=<` it would be impossible to insert anything into
% the cache.
- NewSt = case UpdateSeq < St#st.update_seq of
- true ->
- Writer ! cancel,
- St;
- false ->
- cache_free(cache_insert(St, DbName, Writer, St#st.write_timeout))
- end,
+ NewSt =
+ case UpdateSeq < St#st.update_seq of
+ true ->
+ Writer ! cancel,
+ St;
+ false ->
+ cache_free(cache_insert(St, DbName, Writer, St#st.write_timeout))
+ end,
{noreply, NewSt};
handle_cast({cache_remove, DbName}, St) ->
couch_stats:increment_counter([mem3, shard_cache, eviction]),
@@ -258,18 +273,19 @@ handle_cast({cache_remove_change, DbName, UpdateSeq}, St) ->
handle_cast(_Msg, St) ->
{noreply, St}.
-handle_info({'DOWN', _, _, Pid, Reason}, #st{changes_pid=Pid}=St) ->
- {NewSt, Seq} = case Reason of
- {seq, EndSeq} ->
- {St, EndSeq};
- shard_db_changed ->
- {cache_clear(St), get_update_seq()};
- _ ->
- couch_log:notice("~p changes listener died ~p", [?MODULE, Reason]),
- {St, get_update_seq()}
- end,
+handle_info({'DOWN', _, _, Pid, Reason}, #st{changes_pid = Pid} = St) ->
+ {NewSt, Seq} =
+ case Reason of
+ {seq, EndSeq} ->
+ {St, EndSeq};
+ shard_db_changed ->
+ {cache_clear(St), get_update_seq()};
+ _ ->
+ couch_log:notice("~p changes listener died ~p", [?MODULE, Reason]),
+ {St, get_update_seq()}
+ end,
erlang:send_after(5000, self(), {start_listener, Seq}),
- {noreply, NewSt#st{changes_pid=undefined}};
+ {noreply, NewSt#st{changes_pid = undefined}};
handle_info({start_listener, Seq}, St) ->
{noreply, St#st{
changes_pid = start_changes_listener(Seq)
@@ -280,11 +296,11 @@ handle_info(restart_config_listener, State) ->
handle_info(_Msg, St) ->
{noreply, St}.
-terminate(_Reason, #st{changes_pid=Pid}) ->
+terminate(_Reason, #st{changes_pid = Pid}) ->
exit(Pid, kill),
ok.
-code_change(_OldVsn, #st{}=St, _Extra) ->
+code_change(_OldVsn, #st{} = St, _Extra) ->
{ok, St}.
%% internal functions
@@ -304,10 +320,10 @@ start_changes_listener(SinceSeq) ->
end),
Pid.
-fold_fun(#full_doc_info{}=FDI, Acc) ->
+fold_fun(#full_doc_info{} = FDI, Acc) ->
DI = couch_doc:to_doc_info(FDI),
fold_fun(DI, Acc);
-fold_fun(#doc_info{}=DI, {Db, UFun, UAcc}) ->
+fold_fun(#doc_info{} = DI, {Db, UFun, UAcc}) ->
case couch_db:open_doc(Db, DI, [ejson_body, conflicts]) of
{ok, Doc} ->
{Props} = Doc#doc.body,
@@ -342,27 +358,37 @@ changes_callback({stop, EndSeq}, _) ->
changes_callback({change, {Change}, _}, _) ->
DbName = couch_util:get_value(<<"id">>, Change),
Seq = couch_util:get_value(<<"seq">>, Change),
- case DbName of <<"_design/", _/binary>> -> ok; _Else ->
- case mem3_util:is_deleted(Change) of
- true ->
- gen_server:cast(?MODULE, {cache_remove_change, DbName, Seq});
- false ->
- case couch_util:get_value(doc, Change) of
- {error, Reason} ->
- couch_log:error("missing partition table for ~s: ~p",
- [DbName, Reason]);
- {Doc} ->
- Shards = mem3_util:build_ordered_shards(DbName, Doc),
- IdleTimeout = config:get_integer(
- "mem3", "writer_idle_timeout", 30000),
- Writer = spawn_shard_writer(DbName, Shards, IdleTimeout),
- ets:insert(?OPENERS, {DbName, Writer}),
- Msg = {cache_insert_change, DbName, Writer, Seq},
- gen_server:cast(?MODULE, Msg),
- [create_if_missing(mem3:name(S)) || S
- <- Shards, mem3:node(S) =:= node()]
+ case DbName of
+ <<"_design/", _/binary>> ->
+ ok;
+ _Else ->
+ case mem3_util:is_deleted(Change) of
+ true ->
+ gen_server:cast(?MODULE, {cache_remove_change, DbName, Seq});
+ false ->
+ case couch_util:get_value(doc, Change) of
+ {error, Reason} ->
+ couch_log:error(
+ "missing partition table for ~s: ~p",
+ [DbName, Reason]
+ );
+ {Doc} ->
+ Shards = mem3_util:build_ordered_shards(DbName, Doc),
+ IdleTimeout = config:get_integer(
+ "mem3", "writer_idle_timeout", 30000
+ ),
+ Writer = spawn_shard_writer(DbName, Shards, IdleTimeout),
+ ets:insert(?OPENERS, {DbName, Writer}),
+ Msg = {cache_insert_change, DbName, Writer, Seq},
+ gen_server:cast(?MODULE, Msg),
+ [
+ create_if_missing(mem3:name(S))
+ || S <-
+ Shards,
+ mem3:node(S) =:= node()
+ ]
+ end
end
- end
end,
{ok, Seq};
changes_callback(timeout, _) ->
@@ -379,28 +405,28 @@ load_shards_from_disk(DbName) when is_binary(DbName) ->
load_shards_from_db(ShardDb, DbName) ->
case couch_db:open_doc(ShardDb, DbName, [ejson_body]) of
- {ok, #doc{body = {Props}}} ->
- Seq = couch_db:get_update_seq(ShardDb),
- Shards = mem3_util:build_ordered_shards(DbName, Props),
- IdleTimeout = config:get_integer("mem3", "writer_idle_timeout", 30000),
- case maybe_spawn_shard_writer(DbName, Shards, IdleTimeout) of
- Writer when is_pid(Writer) ->
- case ets:insert_new(?OPENERS, {DbName, Writer}) of
- true ->
- Msg = {cache_insert, DbName, Writer, Seq},
- gen_server:cast(?MODULE, Msg);
- false ->
- Writer ! cancel
- end;
- ignore ->
- ok
- end,
- Shards;
- {not_found, _} ->
- erlang:error(database_does_not_exist, ?b2l(DbName))
+ {ok, #doc{body = {Props}}} ->
+ Seq = couch_db:get_update_seq(ShardDb),
+ Shards = mem3_util:build_ordered_shards(DbName, Props),
+ IdleTimeout = config:get_integer("mem3", "writer_idle_timeout", 30000),
+ case maybe_spawn_shard_writer(DbName, Shards, IdleTimeout) of
+ Writer when is_pid(Writer) ->
+ case ets:insert_new(?OPENERS, {DbName, Writer}) of
+ true ->
+ Msg = {cache_insert, DbName, Writer, Seq},
+ gen_server:cast(?MODULE, Msg);
+ false ->
+ Writer ! cancel
+ end;
+ ignore ->
+ ok
+ end,
+ Shards;
+ {not_found, _} ->
+ erlang:error(database_does_not_exist, ?b2l(DbName))
end.
-load_shards_from_disk(DbName, DocId)->
+load_shards_from_disk(DbName, DocId) ->
Shards = load_shards_from_disk(DbName),
HashKey = mem3_hash:calculate(hd(Shards), DocId),
[S || S <- Shards, in_range(S, HashKey)].
@@ -416,15 +442,17 @@ create_if_missing(ShardName) ->
false ->
Options = opts_for_db(ShardName),
case couch_server:create(ShardName, [?ADMIN_CTX] ++ Options) of
- {ok, Db} ->
- couch_db:close(Db);
- Error ->
- couch_log:error("~p tried to create ~s, got ~p",
- [?MODULE, ShardName, Error])
+ {ok, Db} ->
+ couch_db:close(Db);
+ Error ->
+ couch_log:error(
+ "~p tried to create ~s, got ~p",
+ [?MODULE, ShardName, Error]
+ )
end
end.
-cache_insert(#st{cur_size=Cur}=St, DbName, Writer, Timeout) ->
+cache_insert(#st{cur_size = Cur} = St, DbName, Writer, Timeout) ->
NewATime = couch_util:unique_monotonic_integer(),
true = ets:delete(?SHARDS, DbName),
flush_write(DbName, Writer, Timeout),
@@ -437,16 +465,16 @@ cache_insert(#st{cur_size=Cur}=St, DbName, Writer, Timeout) ->
[] ->
true = ets:insert(?ATIMES, {NewATime, DbName}),
true = ets:insert(?DBS, {DbName, NewATime}),
- St#st{cur_size=Cur + 1}
+ St#st{cur_size = Cur + 1}
end.
-cache_remove(#st{cur_size=Cur}=St, DbName) ->
+cache_remove(#st{cur_size = Cur} = St, DbName) ->
true = ets:delete(?SHARDS, DbName),
case ets:lookup(?DBS, DbName) of
[{DbName, ATime}] ->
true = ets:delete(?DBS, DbName),
true = ets:delete(?ATIMES, ATime),
- St#st{cur_size=Cur-1};
+ St#st{cur_size = Cur - 1};
[] ->
St
end.
@@ -462,13 +490,13 @@ cache_hit(DbName) ->
ok
end.
-cache_free(#st{max_size=Max, cur_size=Cur}=St) when Max =< Cur ->
+cache_free(#st{max_size = Max, cur_size = Cur} = St) when Max =< Cur ->
ATime = ets:first(?ATIMES),
[{ATime, DbName}] = ets:lookup(?ATIMES, ATime),
true = ets:delete(?ATIMES, ATime),
true = ets:delete(?DBS, DbName),
true = ets:delete(?SHARDS, DbName),
- cache_free(St#st{cur_size=Cur-1});
+ cache_free(St#st{cur_size = Cur - 1});
cache_free(St) ->
St.
@@ -476,7 +504,7 @@ cache_clear(St) ->
true = ets:delete_all_objects(?DBS),
true = ets:delete_all_objects(?SHARDS),
true = ets:delete_all_objects(?ATIMES),
- St#st{cur_size=0}.
+ St#st{cur_size = 0}.
maybe_spawn_shard_writer(DbName, Shards, IdleTimeout) ->
case ets:member(?OPENERS, DbName) of
@@ -515,13 +543,14 @@ flush_write(DbName, Writer, WriteTimeout) ->
erlang:exit({mem3_shards_write_timeout, DbName})
end.
-
-filter_shards_by_range(Range, Shards)->
- lists:filter(fun
- (#ordered_shard{range = R}) -> mem3_util:range_overlap(Range, R);
- (#shard{range = R}) -> mem3_util:range_overlap(Range, R)
- end, Shards).
-
+filter_shards_by_range(Range, Shards) ->
+ lists:filter(
+ fun
+ (#ordered_shard{range = R}) -> mem3_util:range_overlap(Range, R);
+ (#shard{range = R}) -> mem3_util:range_overlap(Range, R)
+ end,
+ Shards
+ ).
-ifdef(TEST).
@@ -530,7 +559,6 @@ filter_shards_by_range(Range, Shards)->
-define(DB, <<"eunit_db_name">>).
-define(INFINITY, 99999999).
-
mem3_shards_test_() ->
{
setup,
@@ -555,7 +583,6 @@ mem3_shards_test_() ->
}
}.
-
setup_all() ->
ets:new(?SHARDS, [bag, public, named_table, {keypos, #shard.dbname}]),
ets:new(?OPENERS, [bag, public, named_table]),
@@ -564,7 +591,6 @@ setup_all() ->
meck:expect(config, get, ["mem3", "shards_db", '_'], "_dbs"),
ok.
-
teardown_all(_) ->
meck:unload(),
ets:delete(?ATIMES),
@@ -572,18 +598,15 @@ teardown_all(_) ->
ets:delete(?OPENERS),
ets:delete(?SHARDS).
-
setup() ->
ets:delete_all_objects(?ATIMES),
ets:delete_all_objects(?DBS),
ets:delete_all_objects(?OPENERS),
ets:delete_all_objects(?SHARDS).
-
teardown(_) ->
ok.
-
t_maybe_spawn_shard_writer_already_exists() ->
?_test(begin
ets:insert(?OPENERS, {?DB, self()}),
@@ -592,7 +615,6 @@ t_maybe_spawn_shard_writer_already_exists() ->
?assertEqual(ignore, WRes)
end).
-
t_maybe_spawn_shard_writer_new() ->
?_test(begin
Shards = mock_shards(),
@@ -605,7 +627,6 @@ t_maybe_spawn_shard_writer_new() ->
?assertEqual(Shards, ets:tab2list(?SHARDS))
end).
-
t_flush_writer_exists_normal() ->
?_test(begin
Shards = mock_shards(),
@@ -614,24 +635,29 @@ t_flush_writer_exists_normal() ->
?assertEqual(Shards, ets:tab2list(?SHARDS))
end).
-
t_flush_writer_times_out() ->
?_test(begin
- WPid = spawn(fun() -> receive will_never_receive_this -> ok end end),
+ WPid = spawn(fun() ->
+ receive
+ will_never_receive_this -> ok
+ end
+ end),
Error = {mem3_shards_write_timeout, ?DB},
?assertExit(Error, flush_write(?DB, WPid, 100)),
exit(WPid, kill)
end).
-
t_flush_writer_crashes() ->
?_test(begin
- WPid = spawn(fun() -> receive write -> exit('kapow!') end end),
+ WPid = spawn(fun() ->
+ receive
+ write -> exit('kapow!')
+ end
+ end),
Error = {mem3_shards_bad_write, 'kapow!'},
?assertExit(Error, flush_write(?DB, WPid, 1000))
end).
-
t_writer_deletes_itself_when_done() ->
?_test(begin
Shards = mock_shards(),
@@ -644,14 +670,14 @@ t_writer_deletes_itself_when_done() ->
?assertEqual([], ets:tab2list(?OPENERS))
end).
-
t_writer_does_not_delete_other_writers_for_same_shard() ->
?_test(begin
Shards = mock_shards(),
WPid = spawn_link_mock_writer(?DB, Shards, ?INFINITY),
WRef = erlang:monitor(process, WPid),
ets:insert(?OPENERS, {?DB, WPid}),
- ets:insert(?OPENERS, {?DB, self()}), % should not be deleted
+ % should not be deleted
+ ets:insert(?OPENERS, {?DB, self()}),
WPid ! write,
?assertEqual(normal, wait_writer_result(WRef)),
?assertEqual(Shards, ets:tab2list(?SHARDS)),
@@ -659,21 +685,22 @@ t_writer_does_not_delete_other_writers_for_same_shard() ->
?assertEqual([{?DB, self()}], ets:tab2list(?OPENERS))
end).
-
t_spawn_writer_in_load_shards_from_db() ->
?_test(begin
meck:expect(couch_db, open_doc, 3, {ok, #doc{body = {[]}}}),
meck:expect(couch_db, get_update_seq, 1, 1),
meck:expect(mem3_util, build_ordered_shards, 2, mock_shards()),
- erlang:register(?MODULE, self()), % register to get cache_insert cast
+ % register to get cache_insert cast
+ erlang:register(?MODULE, self()),
load_shards_from_db(test_util:fake_db([{name, <<"testdb">>}]), ?DB),
meck:validate(couch_db),
meck:validate(mem3_util),
- Cast = receive
+ Cast =
+ receive
{'$gen_cast', Msg} -> Msg
after 1000 ->
timeout
- end,
+ end,
?assertMatch({cache_insert, ?DB, Pid, 1} when is_pid(Pid), Cast),
{cache_insert, _, WPid, _} = Cast,
exit(WPid, kill),
@@ -682,7 +709,6 @@ t_spawn_writer_in_load_shards_from_db() ->
meck:unload(mem3_util)
end).
-
t_cache_insert_takes_new_update() ->
?_test(begin
Shards = mock_shards(),
@@ -694,7 +720,6 @@ t_cache_insert_takes_new_update() ->
?assertEqual([], ets:tab2list(?OPENERS))
end).
-
t_cache_insert_ignores_stale_update_and_kills_worker() ->
?_test(begin
Shards = mock_shards(),
@@ -708,7 +733,6 @@ t_cache_insert_ignores_stale_update_and_kills_worker() ->
?assertEqual([], ets:tab2list(?OPENERS))
end).
-
mock_state(UpdateSeq) ->
#st{
update_seq = UpdateSeq,
@@ -716,44 +740,40 @@ mock_state(UpdateSeq) ->
write_timeout = 1000
}.
-
mock_shards() ->
[
#ordered_shard{
name = <<"testshardname">>,
node = node(),
dbname = ?DB,
- range = [0,1],
+ range = [0, 1],
order = 1
}
].
-
wait_writer_result(WRef) ->
receive
{'DOWN', WRef, _, _, Result} ->
Result
- after 1000 ->
- timeout
+ after 1000 ->
+ timeout
end.
-
spawn_link_mock_writer(Db, Shards, Timeout) ->
erlang:spawn_link(fun() -> shard_writer(Db, Shards, Timeout) end).
-
-
-mem3_shards_changes_test_() -> {
- "Test mem3_shards changes listener",
+mem3_shards_changes_test_() ->
{
- setup,
- fun test_util:start_couch/0, fun test_util:stop_couch/1,
- [
- fun should_kill_changes_listener_on_shutdown/0
- ]
- }
-}.
-
+ "Test mem3_shards changes listener",
+ {
+ setup,
+ fun test_util:start_couch/0,
+ fun test_util:stop_couch/1,
+ [
+ fun should_kill_changes_listener_on_shutdown/0
+ ]
+ }
+ }.
should_kill_changes_listener_on_shutdown() ->
{ok, Pid} = ?MODULE:start_link(),
@@ -761,7 +781,8 @@ should_kill_changes_listener_on_shutdown() ->
?assert(is_process_alive(ChangesPid)),
true = erlang:unlink(Pid),
true = test_util:stop_sync_throw(
- ChangesPid, fun() -> exit(Pid, shutdown) end, wait_timeout),
+ ChangesPid, fun() -> exit(Pid, shutdown) end, wait_timeout
+ ),
?assertNot(is_process_alive(ChangesPid)),
exit(Pid, shutdown).
diff --git a/src/mem3/src/mem3_sup.erl b/src/mem3/src/mem3_sup.erl
index 3a1a3ca5a..a2dc5ba8d 100644
--- a/src/mem3/src/mem3_sup.erl
+++ b/src/mem3/src/mem3_sup.erl
@@ -22,13 +22,14 @@ init(_Args) ->
child(mem3_events),
child(mem3_nodes),
child(mem3_seeds),
- child(mem3_sync_nodes), % Order important?
+ % Order important?
+ child(mem3_sync_nodes),
child(mem3_sync),
child(mem3_shards),
child(mem3_sync_event_listener),
child(mem3_reshard_sup)
],
- {ok, {{one_for_one,10,1}, couch_epi:register_service(mem3_epi, Children)}}.
+ {ok, {{one_for_one, 10, 1}, couch_epi:register_service(mem3_epi, Children)}}.
child(mem3_events) ->
MFA = {gen_event, start_link, [{local, mem3_events}]},
diff --git a/src/mem3/src/mem3_sync.erl b/src/mem3/src/mem3_sync.erl
index cfed6a445..3d1c18420 100644
--- a/src/mem3/src/mem3_sync.erl
+++ b/src/mem3/src/mem3_sync.erl
@@ -13,12 +13,29 @@
-module(mem3_sync).
-behaviour(gen_server).
-vsn(1).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
--export([start_link/0, get_active/0, get_queue/0, push/1, push/2,
- remove_node/1, remove_shard/1, initial_sync/1, get_backlog/0, nodes_db/0,
- shards_db/0, users_db/0, find_next_node/0]).
+-export([
+ start_link/0,
+ get_active/0,
+ get_queue/0,
+ push/1, push/2,
+ remove_node/1,
+ remove_shard/1,
+ initial_sync/1,
+ get_backlog/0,
+ nodes_db/0,
+ shards_db/0,
+ users_db/0,
+ find_next_node/0
+]).
-export([
local_dbs/0
]).
@@ -36,7 +53,7 @@
waiting = queue:new()
}).
--record(job, {name, node, count=nil, pid=nil}).
+-record(job, {name, node, count = nil, pid = nil}).
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
@@ -52,7 +69,7 @@ get_backlog() ->
push(#shard{name = Name}, Target) ->
push(Name, Target);
-push(Name, #shard{node=Node}) ->
+push(Name, #shard{node = Node}) ->
push(Name, Node);
push(Name, Node) ->
push(#job{name = Name, node = Node}).
@@ -77,137 +94,147 @@ init([]) ->
handle_call({push, Job}, From, State) ->
handle_cast({push, Job#job{pid = From}}, State);
-
handle_call(get_active, _From, State) ->
{reply, State#state.active, State};
-
handle_call(get_queue, _From, State) ->
{reply, to_list(State#state.waiting), State};
-
-handle_call(get_backlog, _From, #state{active=A, waiting=WQ} = State) ->
- CA = lists:sum([C || #job{count=C} <- A, is_integer(C)]),
- CW = lists:sum([C || #job{count=C} <- to_list(WQ), is_integer(C)]),
- {reply, CA+CW, State}.
+handle_call(get_backlog, _From, #state{active = A, waiting = WQ} = State) ->
+ CA = lists:sum([C || #job{count = C} <- A, is_integer(C)]),
+ CW = lists:sum([C || #job{count = C} <- to_list(WQ), is_integer(C)]),
+ {reply, CA + CW, State}.
handle_cast({push, DbName, Node}, State) ->
handle_cast({push, #job{name = DbName, node = Node}}, State);
-
-handle_cast({push, Job}, #state{count=Count, limit=Limit} = State)
- when Count >= Limit ->
+handle_cast({push, Job}, #state{count = Count, limit = Limit} = State) when
+ Count >= Limit
+->
{noreply, add_to_queue(State, Job)};
-
handle_cast({push, Job}, State) ->
#state{active = L, count = C} = State,
#job{name = DbName, node = Node} = Job,
case is_running(DbName, Node, L) of
- true ->
- {noreply, add_to_queue(State, Job)};
- false ->
- Pid = start_push_replication(Job),
- {noreply, State#state{active=[Job#job{pid=Pid}|L], count=C+1}}
+ true ->
+ {noreply, add_to_queue(State, Job)};
+ false ->
+ Pid = start_push_replication(Job),
+ {noreply, State#state{active = [Job#job{pid = Pid} | L], count = C + 1}}
end;
-
handle_cast({remove_node, Node}, #state{waiting = W0} = State) ->
- {Alive, Dead} = lists:partition(fun(#job{node=N}) -> N =/= Node end, to_list(W0)),
+ {Alive, Dead} = lists:partition(fun(#job{node = N}) -> N =/= Node end, to_list(W0)),
Dict = remove_entries(State#state.dict, Dead),
- [exit(Pid, die_now) || #job{node=N, pid=Pid} <- State#state.active,
- N =:= Node],
+ [
+ exit(Pid, die_now)
+ || #job{node = N, pid = Pid} <- State#state.active,
+ N =:= Node
+ ],
{noreply, State#state{dict = Dict, waiting = from_list(Alive)}};
-
handle_cast({remove_shard, Shard}, #state{waiting = W0} = State) ->
- {Alive, Dead} = lists:partition(fun(#job{name=S}) ->
- S =/= Shard end, to_list(W0)),
+ {Alive, Dead} = lists:partition(
+ fun(#job{name = S}) ->
+ S =/= Shard
+ end,
+ to_list(W0)
+ ),
Dict = remove_entries(State#state.dict, Dead),
- [exit(Pid, die_now) || #job{name=S, pid=Pid} <- State#state.active,
- S =:= Shard],
+ [
+ exit(Pid, die_now)
+ || #job{name = S, pid = Pid} <- State#state.active,
+ S =:= Shard
+ ],
{noreply, State#state{dict = Dict, waiting = from_list(Alive)}}.
handle_info({'EXIT', Active, normal}, State) ->
handle_replication_exit(State, Active);
-
handle_info({'EXIT', Active, die_now}, State) ->
% we forced this one ourselves, do not retry
handle_replication_exit(State, Active);
-
handle_info({'EXIT', Active, {{not_found, no_db_file}, _Stack}}, State) ->
% target doesn't exist, do not retry
handle_replication_exit(State, Active);
-
handle_info({'EXIT', Active, Reason}, State) ->
- NewState = case lists:keyfind(Active, #job.pid, State#state.active) of
- #job{name=OldDbName, node=OldNode} = Job ->
- couch_log:warning("~s ~s ~s ~w", [?MODULE, OldDbName, OldNode, Reason]),
- case Reason of {pending_changes, Count} ->
- maybe_resubmit(State, Job#job{pid = nil, count = Count});
- _ ->
- case mem3:db_is_current(Job#job.name) of
- true ->
- timer:apply_after(5000, ?MODULE, push, [Job#job{pid=nil}]);
- false ->
- % no need to retry (db deleted or recreated)
- ok
- end,
- State
- end;
- false -> State end,
+ NewState =
+ case lists:keyfind(Active, #job.pid, State#state.active) of
+ #job{name = OldDbName, node = OldNode} = Job ->
+ couch_log:warning("~s ~s ~s ~w", [?MODULE, OldDbName, OldNode, Reason]),
+ case Reason of
+ {pending_changes, Count} ->
+ maybe_resubmit(State, Job#job{pid = nil, count = Count});
+ _ ->
+ case mem3:db_is_current(Job#job.name) of
+ true ->
+ timer:apply_after(5000, ?MODULE, push, [Job#job{pid = nil}]);
+ false ->
+ % no need to retry (db deleted or recreated)
+ ok
+ end,
+ State
+ end;
+ false ->
+ State
+ end,
handle_replication_exit(NewState, Active);
-
handle_info(Msg, State) ->
couch_log:notice("unexpected msg at replication manager ~p", [Msg]),
{noreply, State}.
terminate(_Reason, State) ->
- [exit(Pid, shutdown) || #job{pid=Pid} <- State#state.active],
+ [exit(Pid, shutdown) || #job{pid = Pid} <- State#state.active],
ok.
code_change(_, #state{waiting = WaitingList} = State, _) when is_list(WaitingList) ->
{ok, State#state{waiting = from_list(WaitingList)}};
-
code_change(_, State, _) ->
{ok, State}.
-maybe_resubmit(State, #job{name=DbName, node=Node} = Job) ->
+maybe_resubmit(State, #job{name = DbName, node = Node} = Job) ->
case lists:member(DbName, local_dbs()) of
- true ->
- case find_next_node() of
- Node ->
- add_to_queue(State, Job);
- _ ->
- State % don't resubmit b/c we have a new replication target
- end;
- false ->
- add_to_queue(State, Job)
+ true ->
+ case find_next_node() of
+ Node ->
+ add_to_queue(State, Job);
+ _ ->
+ % don't resubmit b/c we have a new replication target
+ State
+ end;
+ false ->
+ add_to_queue(State, Job)
end.
handle_replication_exit(State, Pid) ->
- #state{active=Active, limit=Limit, dict=D, waiting=Waiting} = State,
+ #state{active = Active, limit = Limit, dict = D, waiting = Waiting} = State,
Active1 = lists:keydelete(Pid, #job.pid, Active),
case is_empty(Waiting) of
- true ->
- {noreply, State#state{active=Active1, count=length(Active1)}};
- _ ->
- Count = length(Active1),
- NewState = if Count < Limit ->
- case next_replication(Active1, Waiting, queue:new()) of
- nil -> % all waiting replications are also active
- State#state{active = Active1, count = Count};
- {#job{name=DbName, node=Node} = Job, StillWaiting} ->
- NewPid = start_push_replication(Job),
- State#state{
- active = [Job#job{pid = NewPid} | Active1],
- count = Count+1,
- dict = dict:erase({DbName,Node}, D),
- waiting = StillWaiting
- }
- end;
true ->
- State#state{active = Active1, count=Count}
- end,
- {noreply, NewState}
+ {noreply, State#state{active = Active1, count = length(Active1)}};
+ _ ->
+ Count = length(Active1),
+ NewState =
+ if
+ Count < Limit ->
+ case next_replication(Active1, Waiting, queue:new()) of
+ % all waiting replications are also active
+ nil ->
+ State#state{active = Active1, count = Count};
+ {#job{name = DbName, node = Node} = Job, StillWaiting} ->
+ NewPid = start_push_replication(Job),
+ State#state{
+ active = [Job#job{pid = NewPid} | Active1],
+ count = Count + 1,
+ dict = dict:erase({DbName, Node}, D),
+ waiting = StillWaiting
+ }
+ end;
+ true ->
+ State#state{active = Active1, count = Count}
+ end,
+ {noreply, NewState}
end.
-start_push_replication(#job{name=Name, node=Node, pid=From}) ->
- if From =/= nil -> gen_server:reply(From, ok); true -> ok end,
+start_push_replication(#job{name = Name, node = Node, pid = From}) ->
+ if
+ From =/= nil -> gen_server:reply(From, ok);
+ true -> ok
+ end,
spawn_link(fun() ->
case mem3_rep:go(Name, maybe_redirect(Node)) of
{ok, Pending} when Pending > 0 ->
@@ -217,18 +244,21 @@ start_push_replication(#job{name=Name, node=Node, pid=From}) ->
end
end).
-add_to_queue(State, #job{name=DbName, node=Node, pid=From} = Job) ->
- #state{dict=D, waiting=WQ} = State,
+add_to_queue(State, #job{name = DbName, node = Node, pid = From} = Job) ->
+ #state{dict = D, waiting = WQ} = State,
case dict:is_key({DbName, Node}, D) of
- true ->
- if From =/= nil -> gen_server:reply(From, ok); true -> ok end,
- State;
- false ->
- couch_log:debug("adding ~s -> ~p to mem3_sync queue", [DbName, Node]),
- State#state{
- dict = dict:store({DbName,Node}, ok, D),
- waiting = in(Job, WQ)
- }
+ true ->
+ if
+ From =/= nil -> gen_server:reply(From, ok);
+ true -> ok
+ end,
+ State;
+ false ->
+ couch_log:debug("adding ~s -> ~p to mem3_sync queue", [DbName, Node]),
+ State#state{
+ dict = dict:store({DbName, Node}, ok, D),
+ waiting = in(Job, WQ)
+ }
end.
sync_nodes_and_dbs() ->
@@ -247,32 +277,37 @@ initial_sync(Live) ->
initial_sync_fold(#shard{dbname = Db} = Shard, {LocalNode, Live, AccShards}) ->
case AccShards of
- [#shard{dbname = AccDb} | _] when Db =/= AccDb ->
- submit_replication_tasks(LocalNode, Live, AccShards),
- {LocalNode, Live, [Shard]};
- _ ->
- {LocalNode, Live, [Shard|AccShards]}
+ [#shard{dbname = AccDb} | _] when Db =/= AccDb ->
+ submit_replication_tasks(LocalNode, Live, AccShards),
+ {LocalNode, Live, [Shard]};
+ _ ->
+ {LocalNode, Live, [Shard | AccShards]}
end.
submit_replication_tasks(LocalNode, Live, Shards) ->
SplitFun = fun(#shard{node = Node}) -> Node =:= LocalNode end,
{Local, Remote} = lists:partition(SplitFun, Shards),
- lists:foreach(fun(#shard{name = ShardName}) ->
- [sync_push(ShardName, N) || #shard{node=N, name=Name} <- Remote,
- Name =:= ShardName, lists:member(N, Live)]
- end, Local).
+ lists:foreach(
+ fun(#shard{name = ShardName}) ->
+ [
+ sync_push(ShardName, N)
+ || #shard{node = N, name = Name} <- Remote,
+ Name =:= ShardName,
+ lists:member(N, Live)
+ ]
+ end,
+ Local
+ ).
sync_push(ShardName, N) ->
- gen_server:call(mem3_sync, {push, #job{name=ShardName, node=N}}, infinity).
-
-
+ gen_server:call(mem3_sync, {push, #job{name = ShardName, node = N}}, infinity).
find_next_node() ->
- LiveNodes = [node()|nodes()],
+ LiveNodes = [node() | nodes()],
AllNodes0 = lists:sort(mem3:nodes()),
AllNodes1 = [X || X <- AllNodes0, lists:member(X, LiveNodes)],
AllNodes = AllNodes1 ++ [hd(AllNodes1)],
- [_Self, Next| _] = lists:dropwhile(fun(N) -> N =/= node() end, AllNodes),
+ [_Self, Next | _] = lists:dropwhile(fun(N) -> N =/= node() end, AllNodes),
Next.
%% @doc Finds the next {DbName,Node} pair in the list of waiting replications
@@ -281,25 +316,29 @@ find_next_node() ->
{#job{}, queue:queue(_)} | nil.
next_replication(Active, Waiting, WaitingAndRunning) ->
case is_empty(Waiting) of
- true ->
- nil;
- false ->
- {{value, #job{name=S, node=N} = Job}, RemQ} = out(Waiting),
- case is_running(S,N,Active) of
true ->
- next_replication(Active, RemQ, in(Job, WaitingAndRunning));
+ nil;
false ->
- {Job, join(RemQ, WaitingAndRunning)}
- end
+ {{value, #job{name = S, node = N} = Job}, RemQ} = out(Waiting),
+ case is_running(S, N, Active) of
+ true ->
+ next_replication(Active, RemQ, in(Job, WaitingAndRunning));
+ false ->
+ {Job, join(RemQ, WaitingAndRunning)}
+ end
end.
is_running(DbName, Node, ActiveList) ->
- [] =/= [true || #job{name=S, node=N} <- ActiveList, S=:=DbName, N=:=Node].
+ [] =/= [true || #job{name = S, node = N} <- ActiveList, S =:= DbName, N =:= Node].
remove_entries(Dict, Entries) ->
- lists:foldl(fun(#job{name=S, node=N}, D) ->
- dict:erase({S, N}, D)
- end, Dict, Entries).
+ lists:foldl(
+ fun(#job{name = S, node = N}, D) ->
+ dict:erase({S, N}, D)
+ end,
+ Dict,
+ Entries
+ ).
local_dbs() ->
UsersDb = users_db(),
diff --git a/src/mem3/src/mem3_sync_event.erl b/src/mem3/src/mem3_sync_event.erl
index 7bca23086..ec6debb45 100644
--- a/src/mem3/src/mem3_sync_event.erl
+++ b/src/mem3/src/mem3_sync_event.erl
@@ -14,8 +14,14 @@
-behaviour(gen_event).
-vsn(1).
--export([init/1, handle_event/2, handle_call/2, handle_info/2, terminate/2,
- code_change/3]).
+-export([
+ init/1,
+ handle_event/2,
+ handle_call/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
init(_) ->
net_kernel:monitor_nodes(true),
@@ -25,11 +31,9 @@ handle_event({add_node, Node}, State) when Node =/= node() ->
net_kernel:connect_node(Node),
mem3_sync_nodes:add([Node]),
{ok, State};
-
-handle_event({remove_node, Node}, State) ->
+handle_event({remove_node, Node}, State) ->
mem3_sync:remove_node(Node),
{ok, State};
-
handle_event(_Event, State) ->
{ok, State}.
@@ -41,11 +45,9 @@ handle_info({nodeup, Node}, State) ->
Nodes = lists:filter(fun(N) -> lists:member(N, mem3:nodes()) end, Nodes0),
wait_for_rexi(Nodes, 5),
{ok, State};
-
handle_info({nodedown, Node}, State) ->
mem3_sync:remove_node(Node),
{ok, State};
-
handle_info(_Info, State) ->
{ok, State}.
@@ -75,12 +77,13 @@ wait_for_rexi(Waiting, Retries) ->
case length(Up) > 0 of
true ->
mem3_sync_nodes:add(Up);
- false -> ok
+ false ->
+ ok
end,
case length(NotUp) > 0 andalso Retries > 0 of
true ->
timer:sleep(1000),
- wait_for_rexi(NotUp, Retries-1);
+ wait_for_rexi(NotUp, Retries - 1);
false ->
ok
end.
diff --git a/src/mem3/src/mem3_sync_event_listener.erl b/src/mem3/src/mem3_sync_event_listener.erl
index 5a8d162d2..a01921f85 100644
--- a/src/mem3/src/mem3_sync_event_listener.erl
+++ b/src/mem3/src/mem3_sync_event_listener.erl
@@ -90,7 +90,7 @@ handle_event(UsersDb, updated, #state{users = UsersDb} = St) ->
maybe_push_shards(St);
handle_event(<<"shards/", _/binary>> = ShardName, updated, St) ->
Buckets = bucket_shard(ShardName, St#state.buckets),
- maybe_push_shards(St#state{buckets=Buckets});
+ maybe_push_shards(St#state{buckets = Buckets});
handle_event(<<"shards/", _:18/binary, _/binary>> = ShardName, deleted, St) ->
mem3_sync:remove_shard(ShardName),
maybe_push_shards(St);
@@ -100,11 +100,11 @@ handle_event(_DbName, _Event, St) ->
handle_cast({set_frequency, Frequency}, St) ->
#state{delay = Delay, buckets = Buckets0} = St,
Buckets1 = rebucket_shards(Delay, Frequency, Buckets0),
- maybe_push_shards(St#state{frequency=Frequency, buckets=Buckets1});
+ maybe_push_shards(St#state{frequency = Frequency, buckets = Buckets1});
handle_cast({set_delay, Delay}, St) ->
#state{frequency = Frequency, buckets = Buckets0} = St,
Buckets1 = rebucket_shards(Delay, Frequency, Buckets0),
- maybe_push_shards(St#state{delay=Delay, buckets=Buckets1});
+ maybe_push_shards(St#state{delay = Delay, buckets = Buckets1});
handle_cast(Msg, St) ->
couch_log:notice("unexpected cast to mem3_sync_event_listener: ~p", [Msg]),
maybe_push_shards(St).
@@ -134,19 +134,20 @@ set_config(Cmd, Value, Error) ->
try list_to_integer(Value) of
IntegerValue ->
couch_event_listener:cast(self(), {Cmd, IntegerValue})
- catch error:badarg ->
- couch_log:warning("~s: ~p", [Error, Value])
+ catch
+ error:badarg ->
+ couch_log:warning("~s: ~p", [Error, Value])
end.
-bucket_shard(ShardName, [B|Bs]=Buckets0) ->
+bucket_shard(ShardName, [B | Bs] = Buckets0) ->
case waiting(ShardName, Buckets0) of
true -> Buckets0;
- false -> [sets:add_element(ShardName, B)|Bs]
+ false -> [sets:add_element(ShardName, B) | Bs]
end.
waiting(_, []) ->
false;
-waiting(ShardName, [B|Bs]) ->
+waiting(ShardName, [B | Bs]) ->
case sets:is_element(ShardName, B) of
true -> true;
false -> waiting(ShardName, Bs)
@@ -158,8 +159,8 @@ rebucket_shards(Frequency, Delay, Buckets0) ->
Buckets0;
N when N < 0 ->
%% Reduce the number of buckets by merging the last N + 1 together
- {ToMerge, [B|Buckets1]} = lists:split(abs(N), Buckets0),
- [sets:union([B|ToMerge])|Buckets1];
+ {ToMerge, [B | Buckets1]} = lists:split(abs(N), Buckets0),
+ [sets:union([B | ToMerge]) | Buckets1];
M ->
%% Extend the number of buckets by M
lists:duplicate(M, sets:new()) ++ Buckets0
@@ -170,42 +171,43 @@ rebucket_shards(Frequency, Delay, Buckets0) ->
%% to maybe_push_shards/1 rather than directly. All timing coordination - i.e.,
%% calling mem3_sync:push/2 or setting a proper timeout to ensure that pending
%% messages aren't dropped in case no further messages arrive - is handled here.
-maybe_push_shards(#state{last_push=undefined} = St) ->
- {ok, St#state{last_push=os:timestamp()}, St#state.frequency};
+maybe_push_shards(#state{last_push = undefined} = St) ->
+ {ok, St#state{last_push = os:timestamp()}, St#state.frequency};
maybe_push_shards(St) ->
- #state{frequency=Frequency, last_push=LastPush, buckets=Buckets0} = St,
+ #state{frequency = Frequency, last_push = LastPush, buckets = Buckets0} = St,
Now = os:timestamp(),
Delta = timer:now_diff(Now, LastPush) div 1000,
case Delta > Frequency of
true ->
{Buckets1, [ToPush]} = lists:split(length(Buckets0) - 1, Buckets0),
- Buckets2 = [sets:new()|Buckets1],
+ Buckets2 = [sets:new() | Buckets1],
%% There's no sets:map/2!
sets:fold(
fun(ShardName, _) -> push_shard(ShardName) end,
undefined,
ToPush
),
- {ok, St#state{last_push=Now, buckets=Buckets2}, Frequency};
+ {ok, St#state{last_push = Now, buckets = Buckets2}, Frequency};
false ->
{ok, St, Frequency - Delta}
end.
push_shard(ShardName) ->
try mem3_shards:for_shard_range(ShardName) of
- Shards ->
- Live = nodes(),
- lists:foreach(
- fun(#shard{node=N}) ->
- case lists:member(N, Live) of
- true -> mem3_sync:push(ShardName, N);
- false -> ok
- end
- end,
- Shards
- )
- catch error:database_does_not_exist ->
- ok
+ Shards ->
+ Live = nodes(),
+ lists:foreach(
+ fun(#shard{node = N}) ->
+ case lists:member(N, Live) of
+ true -> mem3_sync:push(ShardName, N);
+ false -> ok
+ end
+ end,
+ Shards
+ )
+ catch
+ error:database_does_not_exist ->
+ ok
end.
subscribe_for_config() ->
@@ -320,7 +322,6 @@ should_terminate(Pid) ->
ok
end).
-
get_state(Pid) ->
Ref = make_ref(),
Pid ! {get_state, Ref, self()},
@@ -330,7 +331,6 @@ get_state(Pid) ->
timeout
end.
-
wait_state(Pid, Field, Val) when is_pid(Pid), is_integer(Field) ->
WaitFun = fun() ->
case get_state(Pid) of
@@ -342,7 +342,6 @@ wait_state(Pid, Field, Val) when is_pid(Pid), is_integer(Field) ->
end,
test_util:wait(WaitFun).
-
wait_config_subscribed(Pid) ->
WaitFun = fun() ->
Handlers = gen_event:which_handlers(config_event),
diff --git a/src/mem3/src/mem3_sync_nodes.erl b/src/mem3/src/mem3_sync_nodes.erl
index 0a4bffcd2..43ca8b756 100644
--- a/src/mem3/src/mem3_sync_nodes.erl
+++ b/src/mem3/src/mem3_sync_nodes.erl
@@ -14,7 +14,6 @@
-behaviour(gen_server).
-vsn(1).
-
-export([start_link/0]).
-export([add/1]).
@@ -23,86 +22,71 @@
-export([monitor_sync/1]).
-
-record(st, {
tid
}).
-
-record(job, {
nodes,
pid,
retry
}).
-
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
add(Nodes) ->
gen_server:cast(?MODULE, {add, Nodes}).
-
init([]) ->
{ok, #st{
tid = ets:new(?MODULE, [set, protected, {keypos, #job.nodes}])
}}.
-
terminate(_Reason, St) ->
- [exit(Pid, kill) || #job{pid=Pid} <- ets:tab2list(St#st.tid)],
+ [exit(Pid, kill) || #job{pid = Pid} <- ets:tab2list(St#st.tid)],
ok.
-
handle_call(Msg, _From, St) ->
{stop, {invalid_call, Msg}, invalid_call, St}.
-
-handle_cast({add, Nodes}, #st{tid=Tid}=St) ->
+handle_cast({add, Nodes}, #st{tid = Tid} = St) ->
case ets:lookup(Tid, Nodes) of
[] ->
Pid = start_sync(Nodes),
- ets:insert(Tid, #job{nodes=Nodes, pid=Pid, retry=false});
- [#job{retry=false}=Job] ->
- ets:insert(Tid, Job#job{retry=true});
+ ets:insert(Tid, #job{nodes = Nodes, pid = Pid, retry = false});
+ [#job{retry = false} = Job] ->
+ ets:insert(Tid, Job#job{retry = true});
_ ->
ok
end,
{noreply, St};
-
handle_cast(Msg, St) ->
{stop, {invalid_cast, Msg}, St}.
-
-handle_info({'DOWN', _, _, _, {sync_done, Nodes}}, #st{tid=Tid}=St) ->
+handle_info({'DOWN', _, _, _, {sync_done, Nodes}}, #st{tid = Tid} = St) ->
case ets:lookup(Tid, Nodes) of
- [#job{retry=true}=Job] ->
+ [#job{retry = true} = Job] ->
Pid = start_sync(Nodes),
- ets:insert(Tid, Job#job{pid=Pid, retry=false});
+ ets:insert(Tid, Job#job{pid = Pid, retry = false});
_ ->
ets:delete(Tid, Nodes)
end,
{noreply, St};
-
-handle_info({'DOWN', _, _, _, {sync_error, Nodes}}, #st{tid=Tid}=St) ->
+handle_info({'DOWN', _, _, _, {sync_error, Nodes}}, #st{tid = Tid} = St) ->
Pid = start_sync(Nodes),
- ets:insert(Tid, #job{nodes=Nodes, pid=Pid, retry=false}),
+ ets:insert(Tid, #job{nodes = Nodes, pid = Pid, retry = false}),
{noreply, St};
-
handle_info(Msg, St) ->
{stop, {invalid_info, Msg}, St}.
-
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
start_sync(Nodes) ->
{Pid, _} = spawn_monitor(?MODULE, monitor_sync, [Nodes]),
Pid.
-
monitor_sync(Nodes) ->
process_flag(trap_exit, true),
Pid = spawn_link(mem3_sync, initial_sync, [Nodes]),
@@ -112,4 +96,3 @@ monitor_sync(Nodes) ->
_ ->
exit({sync_error, Nodes})
end.
-
diff --git a/src/mem3/src/mem3_sync_security.erl b/src/mem3/src/mem3_sync_security.erl
index 291e4e085..fc1726901 100644
--- a/src/mem3/src/mem3_sync_security.erl
+++ b/src/mem3/src/mem3_sync_security.erl
@@ -17,8 +17,7 @@
-include_lib("mem3/include/mem3.hrl").
-
-maybe_sync(#shard{}=Src, #shard{}=Dst) ->
+maybe_sync(#shard{} = Src, #shard{} = Dst) ->
case is_local(Src#shard.name) of
false ->
erlang:spawn(?MODULE, maybe_sync_int, [Src, Dst]);
@@ -26,7 +25,7 @@ maybe_sync(#shard{}=Src, #shard{}=Dst) ->
ok
end.
-maybe_sync_int(#shard{name=Name}=Src, Dst) ->
+maybe_sync_int(#shard{name = Name} = Src, Dst) ->
DbName = mem3:dbname(Name),
case fabric:get_all_security(DbName, [{shards, [Src, Dst]}]) of
{ok, WorkerObjs} ->
@@ -53,44 +52,55 @@ handle_existing_db(DbName) ->
try handle_db(DbName) of
_ -> ok
catch
- error:database_does_not_exist->
- couch_log:error("Db was deleted while getting security"
- " object. DbName: ~p", [DbName]),
+ error:database_does_not_exist ->
+ couch_log:error(
+ "Db was deleted while getting security"
+ " object. DbName: ~p",
+ [DbName]
+ ),
ok
end.
handle_db(DbName) ->
ShardCount = length(mem3:shards(DbName)),
case get_all_security(DbName) of
- {ok, SecObjs} ->
- case is_ok(SecObjs, ShardCount) of
- ok ->
- ok;
- {fixable, SecObj} ->
- couch_log:info("Sync security object for ~p: ~p", [DbName, SecObj]),
- case fabric:set_security(DbName, SecObj) of
- ok -> ok;
- Error ->
- couch_log:error("Error setting security object in ~p: ~p",
- [DbName, Error])
+ {ok, SecObjs} ->
+ case is_ok(SecObjs, ShardCount) of
+ ok ->
+ ok;
+ {fixable, SecObj} ->
+ couch_log:info("Sync security object for ~p: ~p", [DbName, SecObj]),
+ case fabric:set_security(DbName, SecObj) of
+ ok ->
+ ok;
+ Error ->
+ couch_log:error(
+ "Error setting security object in ~p: ~p",
+ [DbName, Error]
+ )
+ end;
+ broken ->
+ couch_log:error("Bad security object in ~p: ~p", [DbName, SecObjs])
end;
- broken ->
- couch_log:error("Bad security object in ~p: ~p", [DbName, SecObjs])
- end;
- Error ->
- couch_log:error("Error getting security objects for ~p: ~p", [
- DbName, Error])
+ Error ->
+ couch_log:error("Error getting security objects for ~p: ~p", [
+ DbName, Error
+ ])
end.
get_all_security(DbName) ->
case fabric:get_all_security(DbName) of
- {ok, SecObjs} ->
- SecObjsDict = lists:foldl(fun({_, SO}, Acc) ->
- dict:update_counter(SO, 1, Acc)
- end, dict:new(), SecObjs),
- {ok, dict:to_list(SecObjsDict)};
- Error ->
- Error
+ {ok, SecObjs} ->
+ SecObjsDict = lists:foldl(
+ fun({_, SO}, Acc) ->
+ dict:update_counter(SO, 1, Acc)
+ end,
+ dict:new(),
+ SecObjs
+ ),
+ {ok, dict:to_list(SecObjsDict)};
+ Error ->
+ Error
end.
is_ok([_], _) ->
@@ -100,7 +110,7 @@ is_ok([_, _] = SecObjs0, ShardCount) ->
% Figure out if we have a simple majority of security objects
% and if so, use that as the correct value. Otherwise we abort
% and rely on human intervention.
- {Count, SecObj} = lists:max([{C, O} || {O, C} <- SecObjs0]),
+ {Count, SecObj} = lists:max([{C, O} || {O, C} <- SecObjs0]),
case Count >= ((ShardCount div 2) + 1) of
true -> {fixable, SecObj};
false -> broken
@@ -109,9 +119,7 @@ is_ok(_, _) ->
% Anything else requires human intervention
broken.
-
is_local(<<"shards/", _/binary>>) ->
false;
is_local(_) ->
true.
-
diff --git a/src/mem3/src/mem3_util.erl b/src/mem3/src/mem3_util.erl
index 005a6b1bc..8547fc071 100644
--- a/src/mem3/src/mem3_util.erl
+++ b/src/mem3/src/mem3_util.erl
@@ -12,9 +12,21 @@
-module(mem3_util).
--export([name_shard/2, create_partition_map/5, build_shards/2,
- n_val/2, q_val/1, to_atom/1, to_integer/1, write_db_doc/1, delete_db_doc/1,
- shard_info/1, ensure_exists/1, open_db_doc/1, update_db_doc/1]).
+-export([
+ name_shard/2,
+ create_partition_map/5,
+ build_shards/2,
+ n_val/2,
+ q_val/1,
+ to_atom/1,
+ to_integer/1,
+ write_db_doc/1,
+ delete_db_doc/1,
+ shard_info/1,
+ ensure_exists/1,
+ open_db_doc/1,
+ update_db_doc/1
+]).
-export([get_or_create_db/2, get_or_create_db_int/2]).
-export([is_deleted/1, rotate_list/2]).
-export([get_shard_opts/1, get_engine_opt/1, get_props_opt/1]).
@@ -41,26 +53,32 @@
-deprecated({create_partition_map, 4, eventually}).
-deprecated({name_shard, 1, eventually}).
--define(RINGTOP, 2 bsl 31). % CRC32 space
+% CRC32 space
+-define(RINGTOP, 2 bsl 31).
-include_lib("mem3/include/mem3.hrl").
-include_lib("couch/include/couch_db.hrl").
-
name_shard(Shard) ->
name_shard(Shard, "").
-name_shard(#shard{dbname = DbName, range=Range} = Shard, Suffix) ->
+name_shard(#shard{dbname = DbName, range = Range} = Shard, Suffix) ->
Name = make_name(DbName, Range, Suffix),
Shard#shard{name = ?l2b(Name)};
-
-name_shard(#ordered_shard{dbname = DbName, range=Range} = Shard, Suffix) ->
+name_shard(#ordered_shard{dbname = DbName, range = Range} = Shard, Suffix) ->
Name = make_name(DbName, Range, Suffix),
Shard#ordered_shard{name = ?l2b(Name)}.
-make_name(DbName, [B,E], Suffix) ->
- ["shards/", couch_util:to_hex(<<B:32/integer>>), "-",
- couch_util:to_hex(<<E:32/integer>>), "/", DbName, Suffix].
+make_name(DbName, [B, E], Suffix) ->
+ [
+ "shards/",
+ couch_util:to_hex(<<B:32/integer>>),
+ "-",
+ couch_util:to_hex(<<E:32/integer>>),
+ "/",
+ DbName,
+ Suffix
+ ].
create_partition_map(DbName, N, Q, Nodes) ->
create_partition_map(DbName, N, Q, Nodes, "").
@@ -69,50 +87,55 @@ create_partition_map(DbName, N, Q, Nodes, Suffix) when Q > 0 ->
UniqueShards = make_key_ranges((?RINGTOP) div Q, 0, []),
Shards0 = lists:flatten([lists:duplicate(N, S) || S <- UniqueShards]),
Shards1 = attach_nodes(Shards0, [], Nodes, []),
- [name_shard(S#shard{dbname=DbName}, Suffix) || S <- Shards1].
+ [name_shard(S#shard{dbname = DbName}, Suffix) || S <- Shards1].
make_key_ranges(I, CurrentPos, Acc) when I > 0, CurrentPos >= ?RINGTOP ->
Acc;
make_key_ranges(Increment, Start, Acc) when Increment > 0 ->
- case Start + 2*Increment of
- X when X > ?RINGTOP ->
- End = ?RINGTOP - 1;
- _ ->
- End = Start + Increment - 1
+ case Start + 2 * Increment of
+ X when X > ?RINGTOP ->
+ End = ?RINGTOP - 1;
+ _ ->
+ End = Start + Increment - 1
end,
- make_key_ranges(Increment, End+1, [#shard{range=[Start, End]} | Acc]).
+ make_key_ranges(Increment, End + 1, [#shard{range = [Start, End]} | Acc]).
attach_nodes([], Acc, _, _) ->
lists:reverse(Acc);
attach_nodes(Shards, Acc, [], UsedNodes) ->
attach_nodes(Shards, Acc, lists:reverse(UsedNodes), []);
attach_nodes([S | Rest], Acc, [Node | Nodes], UsedNodes) ->
- attach_nodes(Rest, [S#shard{node=Node} | Acc], Nodes, [Node | UsedNodes]).
+ attach_nodes(Rest, [S#shard{node = Node} | Acc], Nodes, [Node | UsedNodes]).
open_db_doc(DocId) ->
{ok, Db} = couch_db:open(mem3_sync:shards_db(), [?ADMIN_CTX]),
- try couch_db:open_doc(Db, DocId, [ejson_body]) after couch_db:close(Db) end.
+ try
+ couch_db:open_doc(Db, DocId, [ejson_body])
+ after
+ couch_db:close(Db)
+ end.
write_db_doc(Doc) ->
write_db_doc(mem3_sync:shards_db(), Doc, true).
-write_db_doc(DbName, #doc{id=Id, body=Body} = Doc, ShouldMutate) ->
+write_db_doc(DbName, #doc{id = Id, body = Body} = Doc, ShouldMutate) ->
{ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
try couch_db:open_doc(Db, Id, [ejson_body]) of
- {ok, #doc{body = Body}} ->
- % the doc is already in the desired state, we're done here
- ok;
- {not_found, _} when ShouldMutate ->
- try couch_db:update_doc(Db, Doc, []) of
- {ok, _} ->
- ok
- catch conflict ->
- % check to see if this was a replication race or a different edit
- write_db_doc(DbName, Doc, false)
- end;
- _ ->
- % the doc already exists in a different state
- conflict
+ {ok, #doc{body = Body}} ->
+ % the doc is already in the desired state, we're done here
+ ok;
+ {not_found, _} when ShouldMutate ->
+ try couch_db:update_doc(Db, Doc, []) of
+ {ok, _} ->
+ ok
+ catch
+ conflict ->
+ % check to see if this was a replication race or a different edit
+ write_db_doc(DbName, Doc, false)
+ end;
+ _ ->
+ % the doc already exists in a different state
+ conflict
after
couch_db:close(Db)
end.
@@ -120,27 +143,28 @@ write_db_doc(DbName, #doc{id=Id, body=Body} = Doc, ShouldMutate) ->
update_db_doc(Doc) ->
update_db_doc(mem3_sync:shards_db(), Doc, true).
-update_db_doc(DbName, #doc{id=Id, body=Body} = Doc, ShouldMutate) ->
+update_db_doc(DbName, #doc{id = Id, body = Body} = Doc, ShouldMutate) ->
{ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
try couch_db:open_doc(Db, Id, [ejson_body]) of
- {ok, #doc{body = Body}} ->
- % the doc is already in the desired state, we're done here
- ok;
- {ok, #doc{body = Body1}} ->
- % the doc has a new body to be written
- {ok, _} = couch_db:update_doc(Db, Doc#doc{body=Body1}, []),
- ok;
- {not_found, _} when ShouldMutate ->
- try couch_db:update_doc(Db, Doc, []) of
- {ok, _} ->
- ok
- catch conflict ->
- % check to see if this was a replication race or a different edit
- update_db_doc(DbName, Doc, false)
- end;
- _ ->
- % the doc already exists in a different state
- conflict
+ {ok, #doc{body = Body}} ->
+ % the doc is already in the desired state, we're done here
+ ok;
+ {ok, #doc{body = Body1}} ->
+ % the doc has a new body to be written
+ {ok, _} = couch_db:update_doc(Db, Doc#doc{body = Body1}, []),
+ ok;
+ {not_found, _} when ShouldMutate ->
+ try couch_db:update_doc(Db, Doc, []) of
+ {ok, _} ->
+ ok
+ catch
+ conflict ->
+ % check to see if this was a replication race or a different edit
+ update_db_doc(DbName, Doc, false)
+ end;
+ _ ->
+ % the doc already exists in a different state
+ conflict
after
couch_db:close(Db)
end.
@@ -152,20 +176,21 @@ delete_db_doc(DocId) ->
delete_db_doc(DbName, DocId, ShouldMutate) ->
{ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]),
{ok, Revs} = couch_db:open_doc_revs(Db, DocId, all, []),
- try [Doc#doc{deleted=true} || {ok, #doc{deleted=false}=Doc} <- Revs] of
- [] ->
- not_found;
- Docs when ShouldMutate ->
- try couch_db:update_docs(Db, Docs, []) of
- {ok, _} ->
- ok
- catch conflict ->
- % check to see if this was a replication race or if leafs survived
- delete_db_doc(DbName, DocId, false)
- end;
- _ ->
- % we have live leafs that we aren't allowed to delete. let's bail
- conflict
+ try [Doc#doc{deleted = true} || {ok, #doc{deleted = false} = Doc} <- Revs] of
+ [] ->
+ not_found;
+ Docs when ShouldMutate ->
+ try couch_db:update_docs(Db, Docs, []) of
+ {ok, _} ->
+ ok
+ catch
+ conflict ->
+ % check to see if this was a replication race or if leafs survived
+ delete_db_doc(DbName, DocId, false)
+ end;
+ _ ->
+ % we have live leafs that we aren't allowed to delete. let's bail
+ conflict
after
couch_db:close(Db)
end.
@@ -184,44 +209,62 @@ build_ordered_shards(DbName, DocProps) ->
ByRange = build_shards_by_range(DbName, DocProps),
Symmetrical = lists:sort(ByNode) =:= lists:sort(downcast(ByRange)),
case Symmetrical of
- true -> ByRange;
+ true -> ByRange;
false -> ByNode
end.
build_shards_by_node(DbName, DocProps) ->
{ByNode} = couch_util:get_value(<<"by_node">>, DocProps, {[]}),
Suffix = couch_util:get_value(<<"shard_suffix">>, DocProps, ""),
- lists:flatmap(fun({Node, Ranges}) ->
- lists:map(fun(Range) ->
- [B,E] = string:tokens(?b2l(Range), "-"),
- Beg = httpd_util:hexlist_to_integer(B),
- End = httpd_util:hexlist_to_integer(E),
- name_shard(#shard{
- dbname = DbName,
- node = to_atom(Node),
- range = [Beg, End],
- opts = get_shard_opts(DocProps)
- }, Suffix)
- end, Ranges)
- end, ByNode).
+ lists:flatmap(
+ fun({Node, Ranges}) ->
+ lists:map(
+ fun(Range) ->
+ [B, E] = string:tokens(?b2l(Range), "-"),
+ Beg = httpd_util:hexlist_to_integer(B),
+ End = httpd_util:hexlist_to_integer(E),
+ name_shard(
+ #shard{
+ dbname = DbName,
+ node = to_atom(Node),
+ range = [Beg, End],
+ opts = get_shard_opts(DocProps)
+ },
+ Suffix
+ )
+ end,
+ Ranges
+ )
+ end,
+ ByNode
+ ).
build_shards_by_range(DbName, DocProps) ->
{ByRange} = couch_util:get_value(<<"by_range">>, DocProps, {[]}),
Suffix = couch_util:get_value(<<"shard_suffix">>, DocProps, ""),
- lists:flatmap(fun({Range, Nodes}) ->
- lists:map(fun({Node, Order}) ->
- [B,E] = string:tokens(?b2l(Range), "-"),
- Beg = httpd_util:hexlist_to_integer(B),
- End = httpd_util:hexlist_to_integer(E),
- name_shard(#ordered_shard{
- dbname = DbName,
- node = to_atom(Node),
- range = [Beg, End],
- order = Order,
- opts = get_shard_opts(DocProps)
- }, Suffix)
- end, lists:zip(Nodes, lists:seq(1, length(Nodes))))
- end, ByRange).
+ lists:flatmap(
+ fun({Range, Nodes}) ->
+ lists:map(
+ fun({Node, Order}) ->
+ [B, E] = string:tokens(?b2l(Range), "-"),
+ Beg = httpd_util:hexlist_to_integer(B),
+ End = httpd_util:hexlist_to_integer(E),
+ name_shard(
+ #ordered_shard{
+ dbname = DbName,
+ node = to_atom(Node),
+ range = [Beg, End],
+ order = Order,
+ opts = get_shard_opts(DocProps)
+ },
+ Suffix
+ )
+ end,
+ lists:zip(Nodes, lists:seq(1, length(Nodes)))
+ )
+ end,
+ ByRange
+ ).
to_atom(Node) when is_binary(Node) ->
list_to_atom(binary_to_list(Node));
@@ -256,15 +299,12 @@ get_props_opt(DocProps) ->
db_props_from_json([]) ->
[];
-
db_props_from_json([{<<"partitioned">>, Value} | Rest]) ->
[{partitioned, Value} | db_props_from_json(Rest)];
-
db_props_from_json([{<<"hash">>, [MBin, FBin, A]} | Rest]) ->
M = binary_to_existing_atom(MBin, utf8),
F = binary_to_existing_atom(FBin, utf8),
[{hash, [M, F, A]} | db_props_from_json(Rest)];
-
db_props_from_json([{K, V} | Rest]) ->
[{K, V} | db_props_from_json(Rest)].
@@ -288,28 +328,29 @@ q_val(_) ->
throw({error, invalid_q_value}).
shard_info(DbName) ->
- [{n, mem3:n(DbName)},
- {q, length(mem3:shards(DbName)) div mem3:n(DbName)}].
+ [
+ {n, mem3:n(DbName)},
+ {q, length(mem3:shards(DbName)) div mem3:n(DbName)}
+ ].
ensure_exists(DbName) when is_list(DbName) ->
ensure_exists(list_to_binary(DbName));
ensure_exists(DbName) ->
Options = [nologifmissing, sys_db, {create_if_missing, true}, ?ADMIN_CTX],
case couch_db:open(DbName, Options) of
- {ok, Db} ->
- {ok, Db};
- file_exists ->
- couch_db:open(DbName, [sys_db, ?ADMIN_CTX])
+ {ok, Db} ->
+ {ok, Db};
+ file_exists ->
+ couch_db:open(DbName, [sys_db, ?ADMIN_CTX])
end.
-
is_deleted(Change) ->
case couch_util:get_value(<<"deleted">>, Change) of
- undefined ->
- % keep backwards compatibility for a while
- couch_util:get_value(deleted, Change, false);
- Else ->
- Else
+ undefined ->
+ % keep backwards compatibility for a while
+ couch_util:get_value(deleted, Change, false);
+ Else ->
+ Else
end.
rotate_list(_Key, []) ->
@@ -320,44 +361,41 @@ rotate_list(Key, List) ->
{H, T} = lists:split(erlang:crc32(Key) rem length(List), List),
T ++ H.
-downcast(#shard{}=S) ->
+downcast(#shard{} = S) ->
S;
-downcast(#ordered_shard{}=S) ->
+downcast(#ordered_shard{} = S) ->
#shard{
- name = S#ordered_shard.name,
- node = S#ordered_shard.node,
- dbname = S#ordered_shard.dbname,
- range = S#ordered_shard.range,
- ref = S#ordered_shard.ref,
- opts = S#ordered_shard.opts
- };
+ name = S#ordered_shard.name,
+ node = S#ordered_shard.node,
+ dbname = S#ordered_shard.dbname,
+ range = S#ordered_shard.range,
+ ref = S#ordered_shard.ref,
+ opts = S#ordered_shard.opts
+ };
downcast(Shards) when is_list(Shards) ->
[downcast(Shard) || Shard <- Shards].
iso8601_timestamp() ->
- {_,_,Micro} = Now = os:timestamp(),
- {{Year,Month,Date},{Hour,Minute,Second}} = calendar:now_to_datetime(Now),
+ {_, _, Micro} = Now = os:timestamp(),
+ {{Year, Month, Date}, {Hour, Minute, Second}} = calendar:now_to_datetime(Now),
Format = "~4.10.0B-~2.10.0B-~2.10.0BT~2.10.0B:~2.10.0B:~2.10.0B.~6.10.0BZ",
io_lib:format(Format, [Year, Month, Date, Hour, Minute, Second, Micro]).
-
live_nodes() ->
LiveNodes = [node() | nodes()],
Mem3Nodes = lists:sort(mem3:nodes()),
[N || N <- Mem3Nodes, lists:member(N, LiveNodes)].
-
% Replicate "dbs" db to all nodes. Basically push the changes to all the live
% mem3:nodes(). Returns only after all current changes have been replicated,
% which could be a while.
%
replicate_dbs_to_all_nodes(Timeout) ->
DbName = mem3_sync:shards_db(),
- Targets= mem3_util:live_nodes() -- [node()],
- Res = [start_replication(node(), T, DbName, Timeout) || T <- Targets],
+ Targets = mem3_util:live_nodes() -- [node()],
+ Res = [start_replication(node(), T, DbName, Timeout) || T <- Targets],
collect_replication_results(Res, Timeout).
-
% Replicate "dbs" db from all nodes to this node. Basically make an rpc call
% to all the nodes an have them push their changes to this node. Then monitor
% them until they are all done.
@@ -368,7 +406,6 @@ replicate_dbs_from_all_nodes(Timeout) ->
Res = [start_replication(S, node(), DbName, Timeout) || S <- Sources],
collect_replication_results(Res, Timeout).
-
% Spawn and monitor a single replication of a database to a target node.
% Returns {ok, PidRef}. This function could be called locally or remotely from
% mem3_rpc, for instance when replicating other nodes' data to this node.
@@ -383,7 +420,6 @@ start_replication(Source, Target, DbName, Timeout) ->
end
end).
-
collect_replication_results(Replications, Timeout) ->
Res = [collect_replication_result(R, Timeout) || R <- Replications],
case [R || R <- Res, R =/= ok] of
@@ -393,7 +429,6 @@ collect_replication_results(Replications, Timeout) ->
{error, Errors}
end.
-
collect_replication_result({Pid, Ref}, Timeout) when is_pid(Pid) ->
receive
{'DOWN', Ref, _, _, Res} ->
@@ -403,11 +438,9 @@ collect_replication_result({Pid, Ref}, Timeout) when is_pid(Pid) ->
exit(Pid, kill),
{error, {timeout, Timeout, node(Pid)}}
end;
-
collect_replication_result(Error, _) ->
{error, Error}.
-
% Consider these cases:
%
% A-------B
@@ -423,64 +456,77 @@ collect_replication_result(Error, _) ->
% X-Y because X !=< B
%
range_overlap([A, B], [X, Y]) when
- is_integer(A), is_integer(B),
- is_integer(X), is_integer(Y),
- A =< B, X =< Y ->
+ is_integer(A),
+ is_integer(B),
+ is_integer(X),
+ is_integer(Y),
+ A =< B,
+ X =< Y
+->
A =< Y andalso X =< B.
-
non_overlapping_shards(Shards) ->
- {Start, End} = lists:foldl(fun(Shard, {Min, Max}) ->
- [B, E] = mem3:range(Shard),
- {min(B, Min), max(E, Max)}
- end, {0, ?RING_END}, Shards),
+ {Start, End} = lists:foldl(
+ fun(Shard, {Min, Max}) ->
+ [B, E] = mem3:range(Shard),
+ {min(B, Min), max(E, Max)}
+ end,
+ {0, ?RING_END},
+ Shards
+ ),
non_overlapping_shards(Shards, Start, End).
-
non_overlapping_shards([], _, _) ->
[];
-
non_overlapping_shards(Shards, Start, End) ->
- Ranges = lists:map(fun(Shard) ->
- [B, E] = mem3:range(Shard),
- {B, E}
- end, Shards),
+ Ranges = lists:map(
+ fun(Shard) ->
+ [B, E] = mem3:range(Shard),
+ {B, E}
+ end,
+ Shards
+ ),
Ring = get_ring(Ranges, fun sort_ranges_fun/2, Start, End),
- lists:filter(fun(Shard) ->
- [B, E] = mem3:range(Shard),
- lists:member({B, E}, Ring)
- end, Shards).
-
+ lists:filter(
+ fun(Shard) ->
+ [B, E] = mem3:range(Shard),
+ lists:member({B, E}, Ring)
+ end,
+ Shards
+ ).
% Given a list of shards, return the maximum number of copies
% across all the ranges. If the ring is incomplete it will return 0.
% If there it is an n = 1 database, it should return 1, etc.
calculate_max_n(Shards) ->
- Ranges = lists:map(fun(Shard) ->
- [B, E] = mem3:range(Shard),
- {B, E}
- end, Shards),
+ Ranges = lists:map(
+ fun(Shard) ->
+ [B, E] = mem3:range(Shard),
+ {B, E}
+ end,
+ Shards
+ ),
calculate_max_n(Ranges, get_ring(Ranges), 0).
-
calculate_max_n(_Ranges, [], N) ->
N;
-
calculate_max_n(Ranges, Ring, N) ->
NewRanges = Ranges -- Ring,
calculate_max_n(NewRanges, get_ring(NewRanges), N + 1).
-
get_ring(Ranges) ->
get_ring(Ranges, fun sort_ranges_fun/2, 0, ?RING_END).
-
get_ring(Ranges, SortFun) when is_function(SortFun, 2) ->
get_ring(Ranges, SortFun, 0, ?RING_END).
-
-get_ring(Ranges, Start, End) when is_integer(Start), is_integer(End),
- Start >= 0, End >= 0, Start =< End ->
+get_ring(Ranges, Start, End) when
+ is_integer(Start),
+ is_integer(End),
+ Start >= 0,
+ End >= 0,
+ Start =< End
+->
get_ring(Ranges, fun sort_ranges_fun/2, Start, End).
% Build a ring out of a list of possibly overlapping ranges. If a ring cannot
@@ -493,19 +539,22 @@ get_ring(Ranges, Start, End) when is_integer(Start), is_integer(End),
%
get_ring([], _SortFun, _Start, _End) ->
[];
-get_ring(Ranges, SortFun, Start, End) when is_function(SortFun, 2),
- is_integer(Start), is_integer(End),
- Start >= 0, End >= 0, Start =< End ->
+get_ring(Ranges, SortFun, Start, End) when
+ is_function(SortFun, 2),
+ is_integer(Start),
+ is_integer(End),
+ Start >= 0,
+ End >= 0,
+ Start =< End
+->
Sorted = lists:usort(SortFun, Ranges),
case get_subring_int(Start, End, Sorted) of
fail -> [];
Ring -> Ring
end.
-
get_subring_int(_, _, []) ->
fail;
-
get_subring_int(Start, EndMax, [{Start, End} = Range | Tail]) ->
case End =:= EndMax of
true ->
@@ -518,33 +567,28 @@ get_subring_int(Start, EndMax, [{Start, End} = Range | Tail]) ->
[Range | Acc]
end
end;
-
get_subring_int(Start1, _, [{Start2, _} | _]) when Start2 > Start1 ->
% Found a gap, this attempt is done
fail;
-
get_subring_int(Start1, EndMax, [{Start2, _} | Rest]) when Start2 < Start1 ->
% We've overlapped the head, skip the shard
get_subring_int(Start1, EndMax, Rest).
-
% Sort ranges by starting point, then sort so that
% the longest range comes first
sort_ranges_fun({B, E1}, {B, E2}) ->
E2 =< E1;
-
sort_ranges_fun({B1, _}, {B2, _}) ->
B1 =< B2.
-
add_db_config_options(DbName, Options) ->
- DbOpts = case mem3:dbname(DbName) of
- DbName -> [];
- MDbName -> mem3_shards:opts_for_db(MDbName)
- end,
+ DbOpts =
+ case mem3:dbname(DbName) of
+ DbName -> [];
+ MDbName -> mem3_shards:opts_for_db(MDbName)
+ end,
merge_opts(DbOpts, Options).
-
get_or_create_db(DbName, Options) ->
case couch_db:open(DbName, Options) of
{ok, _} = OkDb ->
@@ -554,14 +598,14 @@ get_or_create_db(DbName, Options) ->
Options1 = [{create_if_missing, true} | Options],
Options2 = add_db_config_options(DbName, Options1),
couch_db:open(DbName, Options2)
- catch error:database_does_not_exist ->
- throw({error, missing_target})
+ catch
+ error:database_does_not_exist ->
+ throw({error, missing_target})
end;
Else ->
Else
end.
-
get_or_create_db_int(DbName, Options) ->
case couch_db:open_int(DbName, Options) of
{ok, _} = OkDb ->
@@ -571,35 +615,40 @@ get_or_create_db_int(DbName, Options) ->
Options1 = [{create_if_missing, true} | Options],
Options2 = add_db_config_options(DbName, Options1),
couch_db:open_int(DbName, Options2)
- catch error:database_does_not_exist ->
- throw({error, missing_target})
+ catch
+ error:database_does_not_exist ->
+ throw({error, missing_target})
end;
Else ->
Else
end.
-
%% merge two proplists, atom options only valid in Old
merge_opts(New, Old) ->
- lists:foldl(fun({Key, Val}, Acc) ->
- lists:keystore(Key, 1, Acc, {Key, Val})
- end, Old, New).
-
+ lists:foldl(
+ fun({Key, Val}, Acc) ->
+ lists:keystore(Key, 1, Acc, {Key, Val})
+ end,
+ Old,
+ New
+ ).
get_shard_props(ShardName) ->
case couch_db:open_int(ShardName, []) of
{ok, Db} ->
- Props = case couch_db_engine:get_props(Db) of
- undefined -> [];
- Else -> Else
- end,
+ Props =
+ case couch_db_engine:get_props(Db) of
+ undefined -> [];
+ Else -> Else
+ end,
%% We don't normally store the default engine name
- EngineProps = case couch_db_engine:get_engine(Db) of
- couch_bt_engine ->
- [];
- EngineName ->
- [{engine, EngineName}]
- end,
+ EngineProps =
+ case couch_db_engine:get_engine(Db) of
+ couch_bt_engine ->
+ [];
+ EngineName ->
+ [{engine, EngineName}]
+ end,
[{props, Props} | EngineProps];
{not_found, _} ->
not_found;
@@ -607,92 +656,98 @@ get_shard_props(ShardName) ->
Else
end.
-
find_dirty_shards() ->
- mem3_shards:fold(fun(#shard{node=Node, name=Name, opts=Opts}=Shard, Acc) ->
- case Opts of
- [] ->
- Acc;
- [{props, []}] ->
- Acc;
- _ ->
- Props = rpc:call(Node, ?MODULE, get_shard_props, [Name]),
- case Props =:= Opts of
- true ->
- Acc;
- false ->
- [{Shard, Props} | Acc]
- end
- end
- end, []).
-
+ mem3_shards:fold(
+ fun(#shard{node = Node, name = Name, opts = Opts} = Shard, Acc) ->
+ case Opts of
+ [] ->
+ Acc;
+ [{props, []}] ->
+ Acc;
+ _ ->
+ Props = rpc:call(Node, ?MODULE, get_shard_props, [Name]),
+ case Props =:= Opts of
+ true ->
+ Acc;
+ false ->
+ [{Shard, Props} | Acc]
+ end
+ end
+ end,
+ []
+ ).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
range_overlap_test_() ->
- [?_assertEqual(Res, range_overlap(R1, R2)) || {R1, R2, Res} <- [
- {[2, 6], [1, 3], true},
- {[2, 6], [3, 4], true},
- {[2, 6], [4, 8], true},
- {[2, 6], [1, 9], true},
- {[2, 6], [1, 2], true},
- {[2, 6], [6, 7], true},
- {[2, 6], [0, 1], false},
- {[2, 6], [7, 9], false}
- ]].
-
+ [
+ ?_assertEqual(Res, range_overlap(R1, R2))
+ || {R1, R2, Res} <- [
+ {[2, 6], [1, 3], true},
+ {[2, 6], [3, 4], true},
+ {[2, 6], [4, 8], true},
+ {[2, 6], [1, 9], true},
+ {[2, 6], [1, 2], true},
+ {[2, 6], [6, 7], true},
+ {[2, 6], [0, 1], false},
+ {[2, 6], [7, 9], false}
+ ]
+ ].
non_overlapping_shards_test() ->
- [?_assertEqual(Res, non_overlapping_shards(Shards)) || {Shards, Res} <- [
- {
- [shard(0, ?RING_END)],
- [shard(0, ?RING_END)]
- },
- {
- [shard(0, 1)],
- [shard(0, 1)]
- },
- {
- [shard(0, 1), shard(0, 1)],
- [shard(0, 1)]
- },
- {
- [shard(0, 1), shard(3, 4)],
- []
- },
- {
- [shard(0, 1), shard(1, 2), shard(2, 3)],
- [shard(0, 1), shard(2, 3)]
- },
- {
- [shard(1, 2), shard(0, 1)],
- [shard(0, 1), shard(1, 2)]
- },
- {
- [shard(0, 1), shard(0, 2), shard(2, 5), shard(3, 5)],
- [shard(0, 2), shard(2, 5)]
- },
- {
- [shard(0, 2), shard(4, 5), shard(1, 3)],
- []
- }
-
- ]].
-
+ [
+ ?_assertEqual(Res, non_overlapping_shards(Shards))
+ || {Shards, Res} <- [
+ {
+ [shard(0, ?RING_END)],
+ [shard(0, ?RING_END)]
+ },
+ {
+ [shard(0, 1)],
+ [shard(0, 1)]
+ },
+ {
+ [shard(0, 1), shard(0, 1)],
+ [shard(0, 1)]
+ },
+ {
+ [shard(0, 1), shard(3, 4)],
+ []
+ },
+ {
+ [shard(0, 1), shard(1, 2), shard(2, 3)],
+ [shard(0, 1), shard(2, 3)]
+ },
+ {
+ [shard(1, 2), shard(0, 1)],
+ [shard(0, 1), shard(1, 2)]
+ },
+ {
+ [shard(0, 1), shard(0, 2), shard(2, 5), shard(3, 5)],
+ [shard(0, 2), shard(2, 5)]
+ },
+ {
+ [shard(0, 2), shard(4, 5), shard(1, 3)],
+ []
+ }
+ ]
+ ].
calculate_max_n_test_() ->
- [?_assertEqual(Res, calculate_max_n(Shards)) || {Res, Shards} <- [
- {0, []},
- {0, [shard(1, ?RING_END)]},
- {1, [shard(0, ?RING_END)]},
- {1, [shard(0, ?RING_END), shard(1, ?RING_END)]},
- {2, [shard(0, ?RING_END), shard(0, ?RING_END)]},
- {2, [shard(0, 1), shard(2, ?RING_END), shard(0, ?RING_END)]},
- {0, [shard(0, 3), shard(5, ?RING_END), shard(1, ?RING_END)]}
- ]].
-
+ [
+ ?_assertEqual(Res, calculate_max_n(Shards))
+ || {Res, Shards} <- [
+ {0, []},
+ {0, [shard(1, ?RING_END)]},
+ {1, [shard(0, ?RING_END)]},
+ {1, [shard(0, ?RING_END), shard(1, ?RING_END)]},
+ {2, [shard(0, ?RING_END), shard(0, ?RING_END)]},
+ {2, [shard(0, 1), shard(2, ?RING_END), shard(0, ?RING_END)]},
+ {0, [shard(0, 3), shard(5, ?RING_END), shard(1, ?RING_END)]}
+ ]
+ ].
shard(Begin, End) ->
#shard{range = [Begin, End]}.
diff --git a/src/mem3/test/eunit/mem3_bdu_test.erl b/src/mem3/test/eunit/mem3_bdu_test.erl
index ad047f6e9..849295691 100644
--- a/src/mem3/test/eunit/mem3_bdu_test.erl
+++ b/src/mem3/test/eunit/mem3_bdu_test.erl
@@ -12,11 +12,9 @@
-module(mem3_bdu_test).
-
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-
-define(TDEF_FE(Name), fun(Arg) -> {atom_to_list(Name), ?_test(Name(Arg))} end).
-define(USER, "mem3_bdu_test_admin").
@@ -24,10 +22,9 @@
-define(AUTH, {basic_auth, {?USER, ?PASS}}).
-define(JSON, {"Content-Type", "application/json"}).
-
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Db = ?tempdb(),
Port = mochiweb_socket_server:get(chttpd, port),
@@ -35,29 +32,27 @@ setup() ->
ShardsDb = "_node/_local/" ++ config:get("mem3", "shards_db", "_dbs"),
{Url, Db, ShardsDb}.
-
teardown({Url, Db, _}) ->
sync_delete_db(Url, Db),
- ok = config:delete("admins", ?USER, _Persist=false).
-
+ ok = config:delete("admins", ?USER, _Persist = false).
start_couch() ->
test_util:start_couch([mem3, chttpd]).
-
stop_couch(Ctx) ->
test_util:stop_couch(Ctx).
-
mem3_bdu_shard_doc_test_() ->
{
"mem3 bdu shard doc tests",
{
setup,
- fun start_couch/0, fun stop_couch/1,
+ fun start_couch/0,
+ fun stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
?TDEF_FE(t_can_insert_shard_map_doc),
?TDEF_FE(t_missing_by_node_section),
@@ -77,7 +72,6 @@ mem3_bdu_shard_doc_test_() ->
}
}.
-
t_can_insert_shard_map_doc({Top, Db, ShardsDb}) ->
Node = atom_to_binary(node(), utf8),
Range = <<"00000000-ffffffff">>,
@@ -91,7 +85,6 @@ t_can_insert_shard_map_doc({Top, Db, ShardsDb}) ->
?assertEqual(201, Code),
?assertMatch(#{<<"ok">> := true}, Res).
-
t_missing_by_node_section({Top, Db, ShardsDb}) ->
Node = atom_to_binary(node(), utf8),
Range = <<"00000000-ffffffff">>,
@@ -101,7 +94,6 @@ t_missing_by_node_section({Top, Db, ShardsDb}) ->
},
?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
t_by_node_not_a_map({Top, Db, ShardsDb}) ->
Node = atom_to_binary(node(), utf8),
Range = <<"00000000-ffffffff">>,
@@ -112,7 +104,6 @@ t_by_node_not_a_map({Top, Db, ShardsDb}) ->
},
?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
t_missing_by_range_section({Top, Db, ShardsDb}) ->
Node = atom_to_binary(node(), utf8),
Range = <<"00000000-ffffffff">>,
@@ -122,7 +113,6 @@ t_missing_by_range_section({Top, Db, ShardsDb}) ->
},
?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
t_by_range_not_a_map({Top, Db, ShardsDb}) ->
Node = atom_to_binary(node(), utf8),
Range = <<"00000000-ffffffff">>,
@@ -133,7 +123,6 @@ t_by_range_not_a_map({Top, Db, ShardsDb}) ->
},
?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
t_missing_range_in_by_range({Top, Db, ShardsDb}) ->
Node = atom_to_binary(node(), utf8),
Range = <<"00000000-ffffffff">>,
@@ -144,7 +133,6 @@ t_missing_range_in_by_range({Top, Db, ShardsDb}) ->
},
?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
t_missing_node_in_by_range_node_list({Top, Db, ShardsDb}) ->
Node = atom_to_binary(node(), utf8),
Range = <<"00000000-ffffffff">>,
@@ -155,7 +143,6 @@ t_missing_node_in_by_range_node_list({Top, Db, ShardsDb}) ->
},
?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
t_missing_node_in_by_node({Top, Db, ShardsDb}) ->
Node = atom_to_binary(node(), utf8),
Range = <<"00000000-ffffffff">>,
@@ -166,7 +153,6 @@ t_missing_node_in_by_node({Top, Db, ShardsDb}) ->
},
?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
t_missing_range_in_by_node_range_list({Top, Db, ShardsDb}) ->
Node = atom_to_binary(node(), utf8),
Range = <<"00000000-ffffffff">>,
@@ -177,7 +163,6 @@ t_missing_range_in_by_node_range_list({Top, Db, ShardsDb}) ->
},
?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
t_by_node_val_not_array({Top, Db, ShardsDb}) ->
Node = atom_to_binary(node(), utf8),
Range = <<"00000000-ffffffff">>,
@@ -188,7 +173,6 @@ t_by_node_val_not_array({Top, Db, ShardsDb}) ->
},
?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
t_by_range_val_not_array({Top, Db, ShardsDb}) ->
Node = atom_to_binary(node(), utf8),
Range = <<"00000000-ffffffff">>,
@@ -199,7 +183,6 @@ t_by_range_val_not_array({Top, Db, ShardsDb}) ->
},
?assertMatch({403, _}, req(post, Top ++ ShardsDb, ShardMap)).
-
t_design_docs_are_not_validated({Top, _, ShardsDb}) ->
Suffix = integer_to_list(erlang:system_time() + rand:uniform(1000)),
DDocId = list_to_binary("_design/ddoc_bdu_test-" ++ Suffix),
@@ -214,7 +197,6 @@ t_design_docs_are_not_validated({Top, _, ShardsDb}) ->
},
?assertMatch({200, _}, req(post, Top ++ ShardsDb, Deleted)).
-
t_replicated_changes_not_validated({Top, Db, ShardsDb}) ->
Node = atom_to_binary(node(), utf8),
Range = <<"00000000-ffffffff">>,
@@ -244,7 +226,6 @@ t_replicated_changes_not_validated({Top, Db, ShardsDb}) ->
},
?assertMatch({200, _}, req(post, Top ++ ShardsDb, Deleted)).
-
delete_db(Top, Db) when is_binary(Db) ->
Url = Top ++ binary_to_list(Db),
case test_request:get(Url, [?AUTH]) of
@@ -255,7 +236,6 @@ delete_db(Top, Db) when is_binary(Db) ->
ok
end.
-
sync_delete_db(Top, Db) when is_binary(Db) ->
delete_db(Top, Db),
try
@@ -268,15 +248,12 @@ sync_delete_db(Top, Db) when is_binary(Db) ->
ok
end.
-
req(Method, Url, #{} = Body) ->
req(Method, Url, jiffy:encode(Body));
-
req(Method, Url, Body) ->
Headers = [?JSON, ?AUTH],
{ok, Code, _, Res} = test_request:request(Method, Url, Headers, Body),
{Code, jiffy:decode(Res, [return_maps])}.
-
suffix() ->
integer_to_list(erlang:system_time(second)).
diff --git a/src/mem3/test/eunit/mem3_cluster_test.erl b/src/mem3/test/eunit/mem3_cluster_test.erl
index 4610d64bd..d1a0fcd38 100644
--- a/src/mem3/test/eunit/mem3_cluster_test.erl
+++ b/src/mem3/test/eunit/mem3_cluster_test.erl
@@ -21,7 +21,6 @@
cluster_stable/1
]).
-
% Mem3 cluster callbacks
cluster_unstable(Server) ->
@@ -32,7 +31,6 @@ cluster_stable(Server) ->
Server ! cluster_stable,
Server.
-
mem3_cluster_test_test_() ->
{
foreach,
@@ -43,81 +41,79 @@ mem3_cluster_test_test_() ->
t_cluster_unstable_delivered_on_nodeup(),
t_cluster_unstable_delivered_on_nodedown(),
t_wait_period_is_reset_after_last_change()
- ]
+ ]
}.
-
t_cluster_stable_during_startup_period() ->
- ?_test(begin
+ ?_test(begin
{ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 2),
register(?MODULE, Pid),
receive
cluster_stable ->
?assert(true)
- after 1500 ->
- throw(timeout)
+ after 1500 ->
+ throw(timeout)
end,
unlink(Pid),
exit(Pid, kill)
end).
-
t_cluster_unstable_delivered_on_nodeup() ->
- ?_test(begin
+ ?_test(begin
{ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 2),
register(?MODULE, Pid),
Pid ! {nodeup, node()},
receive
cluster_unstable ->
?assert(true)
- after 1000 ->
- throw(timeout)
+ after 1000 ->
+ throw(timeout)
end,
unlink(Pid),
exit(Pid, kill)
end).
-
t_cluster_unstable_delivered_on_nodedown() ->
- ?_test(begin
+ ?_test(begin
{ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 2),
register(?MODULE, Pid),
Pid ! {nodedown, node()},
receive
cluster_unstable ->
?assert(true)
- after 1000 ->
- throw(timeout)
+ after 1000 ->
+ throw(timeout)
end,
unlink(Pid),
exit(Pid, kill)
end).
-
t_wait_period_is_reset_after_last_change() ->
- ?_test(begin
+ ?_test(begin
{ok, Pid} = mem3_cluster:start_link(?MODULE, self(), 1, 1),
register(?MODULE, Pid),
timer:sleep(800),
- Pid ! {nodeup, node()}, % after 800 sec send a nodeup
+ % after 800 sec send a nodeup
+ Pid ! {nodeup, node()},
receive
cluster_stable ->
?assert(false)
- after 400 ->
- ?assert(true) % stability check should have been reset
+ after 400 ->
+ % stability check should have been reset
+ ?assert(true)
end,
timer:sleep(1000),
receive
cluster_stable ->
?assert(true)
- after 0 ->
- ?assert(false) % cluster_stable arrives after enough quiet time
+ after 0 ->
+ % cluster_stable arrives after enough quiet time
+ ?assert(false)
end,
unlink(Pid),
exit(Pid, kill)
end).
-
% Test helper functions
setup() ->
diff --git a/src/mem3/test/eunit/mem3_hash_test.erl b/src/mem3/test/eunit/mem3_hash_test.erl
index 7a40c5366..beeb0ac63 100644
--- a/src/mem3/test/eunit/mem3_hash_test.erl
+++ b/src/mem3/test/eunit/mem3_hash_test.erl
@@ -15,9 +15,9 @@
-include_lib("eunit/include/eunit.hrl").
hash_test() ->
- ?assertEqual(1624516141,mem3_hash:crc32(0)),
- ?assertEqual(3816901808,mem3_hash:crc32("0")),
- ?assertEqual(3523407757,mem3_hash:crc32(<<0>>)),
- ?assertEqual(4108050209,mem3_hash:crc32(<<"0">>)),
- ?assertEqual(3094724072,mem3_hash:crc32(zero)),
+ ?assertEqual(1624516141, mem3_hash:crc32(0)),
+ ?assertEqual(3816901808, mem3_hash:crc32("0")),
+ ?assertEqual(3523407757, mem3_hash:crc32(<<0>>)),
+ ?assertEqual(4108050209, mem3_hash:crc32(<<"0">>)),
+ ?assertEqual(3094724072, mem3_hash:crc32(zero)),
ok.
diff --git a/src/mem3/test/eunit/mem3_rep_test.erl b/src/mem3/test/eunit/mem3_rep_test.erl
index 4a46e7b93..31a6d9b77 100644
--- a/src/mem3/test/eunit/mem3_rep_test.erl
+++ b/src/mem3/test/eunit/mem3_rep_test.erl
@@ -12,15 +12,14 @@
-module(mem3_rep_test).
-
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("couch_mrview/include/couch_mrview.hrl").
-include_lib("mem3/include/mem3.hrl").
-
-define(ID, <<"_id">>).
--define(TIMEOUT, 60). % seconds
+% seconds
+-define(TIMEOUT, 60).
setup() ->
{AllSrc, AllTgt} = {?tempdb(), ?tempdb()},
@@ -32,28 +31,26 @@ setup() ->
create_db(PartTgt, [{q, 2}, {n, 1}, {props, PartProps}]),
#{allsrc => AllSrc, alltgt => AllTgt, partsrc => PartSrc, parttgt => PartTgt}.
-
teardown(#{} = Dbs) ->
maps:map(fun(_, Db) -> delete_db(Db) end, Dbs).
-
start_couch() ->
test_util:start_couch([mem3, fabric]).
-
stop_couch(Ctx) ->
test_util:stop_couch(Ctx).
-
mem3_reshard_db_test_() ->
{
"mem3 rep db tests",
{
setup,
- fun start_couch/0, fun stop_couch/1,
+ fun start_couch/0,
+ fun stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun replicate_basics/1,
fun replicate_small_batches/1,
@@ -64,150 +61,148 @@ mem3_reshard_db_test_() ->
}
}.
-
replicate_basics(#{allsrc := AllSrc, alltgt := AllTgt}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{docs => 10, delete => [5, 9]},
- add_test_docs(AllSrc, DocSpec),
- SDocs = get_all_docs(AllSrc),
-
- [Src] = lists:sort(mem3:local_shards(AllSrc)),
- [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)),
- #shard{range = R1} = Tgt1,
- #shard{range = R2} = Tgt2,
- TMap = #{R1 => Tgt1, R2 => Tgt2},
- Opts = [{batch_size, 1000}, {batch_count, all}],
- ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)),
-
- ?assertEqual(SDocs, get_all_docs(AllTgt))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ DocSpec = #{docs => 10, delete => [5, 9]},
+ add_test_docs(AllSrc, DocSpec),
+ SDocs = get_all_docs(AllSrc),
+
+ [Src] = lists:sort(mem3:local_shards(AllSrc)),
+ [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)),
+ #shard{range = R1} = Tgt1,
+ #shard{range = R2} = Tgt2,
+ TMap = #{R1 => Tgt1, R2 => Tgt2},
+ Opts = [{batch_size, 1000}, {batch_count, all}],
+ ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)),
+
+ ?assertEqual(SDocs, get_all_docs(AllTgt))
+ end)}.
replicate_small_batches(#{allsrc := AllSrc, alltgt := AllTgt}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{docs => 10, delete => [5, 9]},
- add_test_docs(AllSrc, DocSpec),
- SDocs = get_all_docs(AllSrc),
-
- [Src] = lists:sort(mem3:local_shards(AllSrc)),
- [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)),
- #shard{range = R1} = Tgt1,
- #shard{range = R2} = Tgt2,
- TMap = #{R1 => Tgt1, R2 => Tgt2},
- Opts = [{batch_size, 2}, {batch_count, all}],
- ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)),
-
- ?assertEqual(SDocs, get_all_docs(AllTgt))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ DocSpec = #{docs => 10, delete => [5, 9]},
+ add_test_docs(AllSrc, DocSpec),
+ SDocs = get_all_docs(AllSrc),
+
+ [Src] = lists:sort(mem3:local_shards(AllSrc)),
+ [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)),
+ #shard{range = R1} = Tgt1,
+ #shard{range = R2} = Tgt2,
+ TMap = #{R1 => Tgt1, R2 => Tgt2},
+ Opts = [{batch_size, 2}, {batch_count, all}],
+ ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)),
+
+ ?assertEqual(SDocs, get_all_docs(AllTgt))
+ end)}.
replicate_low_batch_count(#{allsrc := AllSrc, alltgt := AllTgt}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{docs => 10, delete => [5, 9]},
- add_test_docs(AllSrc, DocSpec),
- SDocs = get_all_docs(AllSrc),
-
- [Src] = lists:sort(mem3:local_shards(AllSrc)),
- [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)),
- #shard{range = R1} = Tgt1,
- #shard{range = R2} = Tgt2,
- TMap = #{R1 => Tgt1, R2 => Tgt2},
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ DocSpec = #{docs => 10, delete => [5, 9]},
+ add_test_docs(AllSrc, DocSpec),
+ SDocs = get_all_docs(AllSrc),
- Opts1 = [{batch_size, 2}, {batch_count, 1}],
- ?assertMatch({ok, 8}, mem3_rep:go(Src, TMap, Opts1)),
+ [Src] = lists:sort(mem3:local_shards(AllSrc)),
+ [Tgt1, Tgt2] = lists:sort(mem3:local_shards(AllTgt)),
+ #shard{range = R1} = Tgt1,
+ #shard{range = R2} = Tgt2,
+ TMap = #{R1 => Tgt1, R2 => Tgt2},
- Opts2 = [{batch_size, 1}, {batch_count, 2}],
- ?assertMatch({ok, 6}, mem3_rep:go(Src, TMap, Opts2)),
+ Opts1 = [{batch_size, 2}, {batch_count, 1}],
+ ?assertMatch({ok, 8}, mem3_rep:go(Src, TMap, Opts1)),
- Opts3 = [{batch_size, 1000}, {batch_count, all}],
- ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts3)),
+ Opts2 = [{batch_size, 1}, {batch_count, 2}],
+ ?assertMatch({ok, 6}, mem3_rep:go(Src, TMap, Opts2)),
- ?assertEqual(SDocs, get_all_docs(AllTgt))
- end)}.
+ Opts3 = [{batch_size, 1000}, {batch_count, all}],
+ ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts3)),
+ ?assertEqual(SDocs, get_all_docs(AllTgt))
+ end)}.
replicate_with_partitions(#{partsrc := PartSrc, parttgt := PartTgt}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{
- pdocs => #{
- <<"PX">> => 15,
- <<"PY">> => 19
- }
- },
- add_test_docs(PartSrc, DocSpec),
- SDocs = get_all_docs(PartSrc),
- PXSrc = get_partition_info(PartSrc, <<"PX">>),
- PYSrc = get_partition_info(PartSrc, <<"PY">>),
-
- [Src] = lists:sort(mem3:local_shards(PartSrc)),
- [Tgt1, Tgt2] = lists:sort(mem3:local_shards(PartTgt)),
- #shard{range = R1} = Tgt1,
- #shard{range = R2} = Tgt2,
- TMap = #{R1 => Tgt1, R2 => Tgt2},
- Opts = [{batch_size, 1000}, {batch_count, all}],
- ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)),
-
- ?assertEqual(PXSrc, get_partition_info(PartTgt, <<"PX">>)),
- ?assertEqual(PYSrc, get_partition_info(PartTgt, <<"PY">>)),
- ?assertEqual(SDocs, get_all_docs(PartTgt))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ DocSpec = #{
+ pdocs => #{
+ <<"PX">> => 15,
+ <<"PY">> => 19
+ }
+ },
+ add_test_docs(PartSrc, DocSpec),
+ SDocs = get_all_docs(PartSrc),
+ PXSrc = get_partition_info(PartSrc, <<"PX">>),
+ PYSrc = get_partition_info(PartSrc, <<"PY">>),
+
+ [Src] = lists:sort(mem3:local_shards(PartSrc)),
+ [Tgt1, Tgt2] = lists:sort(mem3:local_shards(PartTgt)),
+ #shard{range = R1} = Tgt1,
+ #shard{range = R2} = Tgt2,
+ TMap = #{R1 => Tgt1, R2 => Tgt2},
+ Opts = [{batch_size, 1000}, {batch_count, all}],
+ ?assertMatch({ok, 0}, mem3_rep:go(Src, TMap, Opts)),
+
+ ?assertEqual(PXSrc, get_partition_info(PartTgt, <<"PX">>)),
+ ?assertEqual(PYSrc, get_partition_info(PartTgt, <<"PY">>)),
+ ?assertEqual(SDocs, get_all_docs(PartTgt))
+ end)}.
get_partition_info(DbName, Partition) ->
with_proc(fun() ->
{ok, PInfo} = fabric:get_partition_info(DbName, Partition),
- maps:with([
- <<"doc_count">>, <<"doc_del_count">>, <<"partition">>
- ], to_map(PInfo))
+ maps:with(
+ [
+ <<"doc_count">>, <<"doc_del_count">>, <<"partition">>
+ ],
+ to_map(PInfo)
+ )
end).
-
get_all_docs(DbName) ->
get_all_docs(DbName, #mrargs{}).
-
get_all_docs(DbName, #mrargs{} = QArgs0) ->
GL = erlang:group_leader(),
- with_proc(fun() ->
- Cb = fun
- ({row, Props}, Acc) ->
- Doc = to_map(couch_util:get_value(doc, Props)),
- #{?ID := Id} = Doc,
- {ok, Acc#{Id => Doc}};
- ({meta, _}, Acc) -> {ok, Acc};
- (complete, Acc) -> {ok, Acc}
+ with_proc(
+ fun() ->
+ Cb = fun
+ ({row, Props}, Acc) ->
+ Doc = to_map(couch_util:get_value(doc, Props)),
+ #{?ID := Id} = Doc,
+ {ok, Acc#{Id => Doc}};
+ ({meta, _}, Acc) ->
+ {ok, Acc};
+ (complete, Acc) ->
+ {ok, Acc}
+ end,
+ QArgs = QArgs0#mrargs{include_docs = true},
+ {ok, Docs} = fabric:all_docs(DbName, Cb, #{}, QArgs),
+ Docs
end,
- QArgs = QArgs0#mrargs{include_docs = true},
- {ok, Docs} = fabric:all_docs(DbName, Cb, #{}, QArgs),
- Docs
- end, GL).
-
+ GL
+ ).
to_map([_ | _] = Props) ->
to_map({Props});
-
to_map({[_ | _]} = EJson) ->
jiffy:decode(jiffy:encode(EJson), [return_maps]).
-
create_db(DbName, Opts) ->
GL = erlang:group_leader(),
with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL).
-
delete_db(DbName) ->
GL = erlang:group_leader(),
with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL).
-
with_proc(Fun) ->
with_proc(Fun, undefined, 30000).
-
with_proc(Fun, GroupLeader) ->
with_proc(Fun, GroupLeader, 30000).
-
with_proc(Fun, GroupLeader, Timeout) ->
{Pid, Ref} = spawn_monitor(fun() ->
case GroupLeader of
@@ -227,21 +222,23 @@ with_proc(Fun, GroupLeader, Timeout) ->
error({with_proc_timeout, Fun, Timeout})
end.
-
add_test_docs(DbName, #{} = DocSpec) ->
- Docs = docs(maps:get(docs, DocSpec, []))
- ++ pdocs(maps:get(pdocs, DocSpec, #{})),
+ Docs =
+ docs(maps:get(docs, DocSpec, [])) ++
+ pdocs(maps:get(pdocs, DocSpec, #{})),
Res = update_docs(DbName, Docs),
- Docs1 = lists:map(fun({Doc, {ok, {RevPos, Rev}}}) ->
- Doc#doc{revs = {RevPos, [Rev]}}
- end, lists:zip(Docs, Res)),
+ Docs1 = lists:map(
+ fun({Doc, {ok, {RevPos, Rev}}}) ->
+ Doc#doc{revs = {RevPos, [Rev]}}
+ end,
+ lists:zip(Docs, Res)
+ ),
case delete_docs(maps:get(delete, DocSpec, []), Docs1) of
[] -> ok;
[_ | _] = Deleted -> update_docs(DbName, Deleted)
end,
ok.
-
update_docs(DbName, Docs) ->
with_proc(fun() ->
case fabric:update_docs(DbName, Docs, [?ADMIN_CTX]) of
@@ -250,29 +247,32 @@ update_docs(DbName, Docs) ->
end
end).
-
delete_docs([S, E], Docs) when E >= S ->
ToDelete = [doc_id(<<"">>, I) || I <- lists:seq(S, E)],
- lists:filtermap(fun(#doc{id = Id} = Doc) ->
- case lists:member(Id, ToDelete) of
- true -> {true, Doc#doc{deleted = true}};
- false -> false
- end
- end, Docs);
+ lists:filtermap(
+ fun(#doc{id = Id} = Doc) ->
+ case lists:member(Id, ToDelete) of
+ true -> {true, Doc#doc{deleted = true}};
+ false -> false
+ end
+ end,
+ Docs
+ );
delete_docs(_, _) ->
[].
-
pdocs(#{} = PMap) ->
- maps:fold(fun(Part, DocSpec, DocsAcc) ->
- docs(DocSpec, <<Part/binary, ":">>) ++ DocsAcc
- end, [], PMap).
-
+ maps:fold(
+ fun(Part, DocSpec, DocsAcc) ->
+ docs(DocSpec, <<Part/binary, ":">>) ++ DocsAcc
+ end,
+ [],
+ PMap
+ ).
docs(DocSpec) ->
docs(DocSpec, <<"">>).
-
docs(N, Prefix) when is_integer(N), N > 0 ->
docs([0, N - 1], Prefix);
docs([S, E], Prefix) when E >= S ->
@@ -280,12 +280,10 @@ docs([S, E], Prefix) when E >= S ->
docs(_, _) ->
[].
-
doc(Pref, Id) ->
Body = bodyprops(),
doc(Pref, Id, Body, 42).
-
doc(Pref, Id, BodyProps, AttSize) ->
#doc{
id = doc_id(Pref, Id),
@@ -293,29 +291,28 @@ doc(Pref, Id, BodyProps, AttSize) ->
atts = atts(AttSize)
}.
-
doc_id(Pref, Id) ->
IdBin = iolist_to_binary(io_lib:format("~5..0B", [Id])),
<<Pref/binary, IdBin/binary>>.
-
bodyprops() ->
[
- {<<"g">>, {[
- {<<"type">>, <<"Polygon">>},
- {<<"coordinates">>, [[[-71.0, 48.4], [-70.0, 48.4], [-71.0, 48.4]]]}
- ]}}
+ {<<"g">>,
+ {[
+ {<<"type">>, <<"Polygon">>},
+ {<<"coordinates">>, [[[-71.0, 48.4], [-70.0, 48.4], [-71.0, 48.4]]]}
+ ]}}
].
-
atts(0) ->
[];
-
atts(Size) when is_integer(Size), Size >= 1 ->
- Data = << <<"x">> || _ <- lists:seq(1, Size) >>,
- [couch_att:new([
- {name, <<"att">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, Data}
- ])].
+ Data = <<<<"x">> || _ <- lists:seq(1, Size)>>,
+ [
+ couch_att:new([
+ {name, <<"att">>},
+ {type, <<"app/binary">>},
+ {att_len, Size},
+ {data, Data}
+ ])
+ ].
diff --git a/src/mem3/test/eunit/mem3_reshard_api_test.erl b/src/mem3/test/eunit/mem3_reshard_api_test.erl
index c4df24ad3..6e4107a5c 100644
--- a/src/mem3/test/eunit/mem3_reshard_api_test.erl
+++ b/src/mem3/test/eunit/mem3_reshard_api_test.erl
@@ -12,12 +12,10 @@
-module(mem3_reshard_api_test).
-
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("mem3/src/mem3_reshard.hrl").
-
-define(USER, "mem3_reshard_api_test_admin").
-define(PASS, "pass").
-define(AUTH, {basic_auth, {?USER, ?PASS}}).
@@ -27,12 +25,12 @@
-define(STATE, "_reshard/state").
-define(ID, <<"id">>).
-define(OK, <<"ok">>).
--define(TIMEOUT, 60). % seconds
-
+% seconds
+-define(TIMEOUT, 60).
setup() ->
Hashed = couch_passwords:hash_admin_password(?PASS),
- ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false),
+ ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist = false),
Addr = config:get("chttpd", "bind_address", "127.0.0.1"),
Port = mochiweb_socket_server:get(chttpd, port),
Url = lists:concat(["http://", Addr, ":", Port, "/"]),
@@ -42,37 +40,35 @@ setup() ->
create_db(Url, Db3, "?q=2&n=1"),
{Url, {Db1, Db2, Db3}}.
-
teardown({Url, {Db1, Db2, Db3}}) ->
mem3_reshard:reset_state(),
application:unset_env(mem3, reshard_disabled),
delete_db(Url, Db1),
delete_db(Url, Db2),
delete_db(Url, Db3),
- ok = config:delete("reshard", "max_jobs", _Persist=false),
- ok = config:delete("reshard", "require_node_param", _Persist=false),
- ok = config:delete("reshard", "require_range_param", _Persist=false),
- ok = config:delete("admins", ?USER, _Persist=false),
+ ok = config:delete("reshard", "max_jobs", _Persist = false),
+ ok = config:delete("reshard", "require_node_param", _Persist = false),
+ ok = config:delete("reshard", "require_range_param", _Persist = false),
+ ok = config:delete("admins", ?USER, _Persist = false),
meck:unload().
-
start_couch() ->
test_util:start_couch([mem3, chttpd]).
-
stop_couch(Ctx) ->
test_util:stop_couch(Ctx).
-
mem3_reshard_api_test_() ->
{
"mem3 shard split api tests",
{
setup,
- fun start_couch/0, fun stop_couch/1,
+ fun start_couch/0,
+ fun stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun basics/1,
fun create_job_basic/1,
@@ -109,632 +105,768 @@ mem3_reshard_api_test_() ->
}
}.
-
basics({Top, _}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % GET /_reshard
- ?assertMatch({200, #{
- <<"state">> := <<"running">>,
- <<"state_reason">> := null,
- <<"completed">> := 0,
- <<"failed">> := 0,
- <<"running">> := 0,
- <<"stopped">> := 0,
- <<"total">> := 0
- }}, req(get, Top ++ ?RESHARD)),
-
- % GET _reshard/state
- ?assertMatch({200, #{<<"state">> := <<"running">>}},
- req(get, Top ++ ?STATE)),
-
- % GET _reshard/jobs
- ?assertMatch({200, #{
- <<"jobs">> := [],
- <<"offset">> := 0,
- <<"total_rows">> := 0
- }}, req(get, Top ++ ?JOBS)),
-
- % Some invalid paths and methods
- ?assertMatch({404, _}, req(get, Top ++ ?RESHARD ++ "/invalidpath")),
- ?assertMatch({405, _}, req(put, Top ++ ?RESHARD, #{dont => thinkso})),
- ?assertMatch({405, _}, req(post, Top ++ ?RESHARD, #{nope => nope}))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ % GET /_reshard
+ ?assertMatch(
+ {200, #{
+ <<"state">> := <<"running">>,
+ <<"state_reason">> := null,
+ <<"completed">> := 0,
+ <<"failed">> := 0,
+ <<"running">> := 0,
+ <<"stopped">> := 0,
+ <<"total">> := 0
+ }},
+ req(get, Top ++ ?RESHARD)
+ ),
+
+ % GET _reshard/state
+ ?assertMatch(
+ {200, #{<<"state">> := <<"running">>}},
+ req(get, Top ++ ?STATE)
+ ),
+
+ % GET _reshard/jobs
+ ?assertMatch(
+ {200, #{
+ <<"jobs">> := [],
+ <<"offset">> := 0,
+ <<"total_rows">> := 0
+ }},
+ req(get, Top ++ ?JOBS)
+ ),
+
+ % Some invalid paths and methods
+ ?assertMatch({404, _}, req(get, Top ++ ?RESHARD ++ "/invalidpath")),
+ ?assertMatch({405, _}, req(put, Top ++ ?RESHARD, #{dont => thinkso})),
+ ?assertMatch({405, _}, req(post, Top ++ ?RESHARD, #{nope => nope}))
+ end)}.
create_job_basic({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % POST /_reshard/jobs
- {C1, R1} = req(post, Top ++ ?JOBS, #{type => split, db => Db1}),
- ?assertEqual(201, C1),
- ?assertMatch([#{?OK := true, ?ID := J, <<"shard">> := S}]
- when is_binary(J) andalso is_binary(S), R1),
- [#{?ID := Id, <<"shard">> := Shard}] = R1,
-
- % GET /_reshard/jobs
- ?assertMatch({200, #{
- <<"jobs">> := [#{?ID := Id, <<"type">> := <<"split">>}],
- <<"offset">> := 0,
- <<"total_rows">> := 1
- }}, req(get, Top ++ ?JOBS)),
-
- % GET /_reshard/job/$jobid
- {C2, R2} = req(get, Top ++ ?JOBS ++ ?b2l(Id)),
- ?assertEqual(200, C2),
- ThisNode = atom_to_binary(node(), utf8),
- ?assertMatch(#{?ID := Id}, R2),
- ?assertMatch(#{<<"type">> := <<"split">>}, R2),
- ?assertMatch(#{<<"source">> := Shard}, R2),
- ?assertMatch(#{<<"history">> := History} when length(History) > 1, R2),
- ?assertMatch(#{<<"node">> := ThisNode}, R2),
- ?assertMatch(#{<<"split_state">> := SSt} when is_binary(SSt), R2),
- ?assertMatch(#{<<"job_state">> := JSt} when is_binary(JSt), R2),
- ?assertMatch(#{<<"state_info">> := #{}}, R2),
- ?assertMatch(#{<<"target">> := Target} when length(Target) == 2, R2),
-
- % GET /_reshard/job/$jobid/state
- ?assertMatch({200, #{<<"state">> := S, <<"reason">> := R}}
- when is_binary(S) andalso (is_binary(R) orelse R =:= null),
- req(get, Top ++ ?JOBS ++ ?b2l(Id) ++ "/state")),
-
- % GET /_reshard
- ?assertMatch({200, #{<<"state">> := <<"running">>, <<"total">> := 1}},
- req(get, Top ++ ?RESHARD)),
-
- % DELETE /_reshard/jobs/$jobid
- ?assertMatch({200, #{?OK := true}},
- req(delete, Top ++ ?JOBS ++ ?b2l(Id))),
-
- % GET _reshard/jobs
- ?assertMatch({200, #{<<"jobs">> := [], <<"total_rows">> := 0}},
- req(get, Top ++ ?JOBS)),
-
- % GET /_reshard/job/$jobid should be a 404
- ?assertMatch({404, #{}}, req(get, Top ++ ?JOBS ++ ?b2l(Id))),
-
- % DELETE /_reshard/jobs/$jobid should be a 404 as well
- ?assertMatch({404, #{}}, req(delete, Top ++ ?JOBS ++ ?b2l(Id)))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ % POST /_reshard/jobs
+ {C1, R1} = req(post, Top ++ ?JOBS, #{type => split, db => Db1}),
+ ?assertEqual(201, C1),
+ ?assertMatch(
+ [#{?OK := true, ?ID := J, <<"shard">> := S}] when
+ is_binary(J) andalso is_binary(S),
+ R1
+ ),
+ [#{?ID := Id, <<"shard">> := Shard}] = R1,
+
+ % GET /_reshard/jobs
+ ?assertMatch(
+ {200, #{
+ <<"jobs">> := [#{?ID := Id, <<"type">> := <<"split">>}],
+ <<"offset">> := 0,
+ <<"total_rows">> := 1
+ }},
+ req(get, Top ++ ?JOBS)
+ ),
+
+ % GET /_reshard/job/$jobid
+ {C2, R2} = req(get, Top ++ ?JOBS ++ ?b2l(Id)),
+ ?assertEqual(200, C2),
+ ThisNode = atom_to_binary(node(), utf8),
+ ?assertMatch(#{?ID := Id}, R2),
+ ?assertMatch(#{<<"type">> := <<"split">>}, R2),
+ ?assertMatch(#{<<"source">> := Shard}, R2),
+ ?assertMatch(#{<<"history">> := History} when length(History) > 1, R2),
+ ?assertMatch(#{<<"node">> := ThisNode}, R2),
+ ?assertMatch(#{<<"split_state">> := SSt} when is_binary(SSt), R2),
+ ?assertMatch(#{<<"job_state">> := JSt} when is_binary(JSt), R2),
+ ?assertMatch(#{<<"state_info">> := #{}}, R2),
+ ?assertMatch(#{<<"target">> := Target} when length(Target) == 2, R2),
+
+ % GET /_reshard/job/$jobid/state
+ ?assertMatch(
+ {200, #{<<"state">> := S, <<"reason">> := R}} when
+ is_binary(S) andalso (is_binary(R) orelse R =:= null),
+ req(get, Top ++ ?JOBS ++ ?b2l(Id) ++ "/state")
+ ),
+
+ % GET /_reshard
+ ?assertMatch(
+ {200, #{<<"state">> := <<"running">>, <<"total">> := 1}},
+ req(get, Top ++ ?RESHARD)
+ ),
+
+ % DELETE /_reshard/jobs/$jobid
+ ?assertMatch(
+ {200, #{?OK := true}},
+ req(delete, Top ++ ?JOBS ++ ?b2l(Id))
+ ),
+
+ % GET _reshard/jobs
+ ?assertMatch(
+ {200, #{<<"jobs">> := [], <<"total_rows">> := 0}},
+ req(get, Top ++ ?JOBS)
+ ),
+
+ % GET /_reshard/job/$jobid should be a 404
+ ?assertMatch({404, #{}}, req(get, Top ++ ?JOBS ++ ?b2l(Id))),
+
+ % DELETE /_reshard/jobs/$jobid should be a 404 as well
+ ?assertMatch({404, #{}}, req(delete, Top ++ ?JOBS ++ ?b2l(Id)))
+ end)}.
create_two_jobs({Top, {Db1, Db2, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
-
- ?assertMatch({201, [#{?OK := true}]},
- req(post, Jobs, #{type => split, db => Db1})),
- ?assertMatch({201, [#{?OK := true}]},
- req(post, Jobs, #{type => split, db => Db2})),
-
- ?assertMatch({200, #{<<"total">> := 2}}, req(get, Top ++ ?RESHARD)),
-
- ?assertMatch({200, #{
- <<"jobs">> := [#{?ID := Id1}, #{?ID := Id2}],
- <<"offset">> := 0,
- <<"total_rows">> := 2
- }} when Id1 =/= Id2, req(get, Jobs)),
-
- {200, #{<<"jobs">> := [#{?ID := Id1}, #{?ID := Id2}]}} = req(get, Jobs),
-
- {200, #{?OK := true}} = req(delete, Jobs ++ ?b2l(Id1)),
- ?assertMatch({200, #{<<"total">> := 1}}, req(get, Top ++ ?RESHARD)),
- {200, #{?OK := true}} = req(delete, Jobs ++ ?b2l(Id2)),
- ?assertMatch({200, #{<<"total">> := 0}}, req(get, Top ++ ?RESHARD))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ Jobs = Top ++ ?JOBS,
+
+ ?assertMatch(
+ {201, [#{?OK := true}]},
+ req(post, Jobs, #{type => split, db => Db1})
+ ),
+ ?assertMatch(
+ {201, [#{?OK := true}]},
+ req(post, Jobs, #{type => split, db => Db2})
+ ),
+
+ ?assertMatch({200, #{<<"total">> := 2}}, req(get, Top ++ ?RESHARD)),
+
+ ?assertMatch(
+ {200, #{
+ <<"jobs">> := [#{?ID := Id1}, #{?ID := Id2}],
+ <<"offset">> := 0,
+ <<"total_rows">> := 2
+ }} when Id1 =/= Id2,
+ req(get, Jobs)
+ ),
+
+ {200, #{<<"jobs">> := [#{?ID := Id1}, #{?ID := Id2}]}} = req(get, Jobs),
+
+ {200, #{?OK := true}} = req(delete, Jobs ++ ?b2l(Id1)),
+ ?assertMatch({200, #{<<"total">> := 1}}, req(get, Top ++ ?RESHARD)),
+ {200, #{?OK := true}} = req(delete, Jobs ++ ?b2l(Id2)),
+ ?assertMatch({200, #{<<"total">> := 0}}, req(get, Top ++ ?RESHARD))
+ end)}.
create_multiple_jobs_from_one_post({Top, {_, _, Db3}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
- {C1, R1} = req(post, Jobs, #{type => split, db => Db3}),
- ?assertMatch({201, [#{?OK := true}, #{?OK := true}]}, {C1, R1}),
- ?assertMatch({200, #{<<"total">> := 2}}, req(get, Top ++ ?RESHARD))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ Jobs = Top ++ ?JOBS,
+ {C1, R1} = req(post, Jobs, #{type => split, db => Db3}),
+ ?assertMatch({201, [#{?OK := true}, #{?OK := true}]}, {C1, R1}),
+ ?assertMatch({200, #{<<"total">> := 2}}, req(get, Top ++ ?RESHARD))
+ end)}.
start_stop_cluster_basic({Top, _}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Url = Top ++ ?STATE,
-
- ?assertMatch({200, #{
- <<"state">> := <<"running">>,
- <<"reason">> := null
- }}, req(get, Url)),
-
- ?assertMatch({200, _}, req(put, Url, #{state => stopped})),
- ?assertMatch({200, #{
- <<"state">> := <<"stopped">>,
- <<"reason">> := R
- }} when is_binary(R), req(get, Url)),
-
- ?assertMatch({200, _}, req(put, Url, #{state => running})),
-
- % Make sure the reason shows in the state GET request
- Reason = <<"somereason">>,
- ?assertMatch({200, _}, req(put, Url, #{state => stopped,
- reason => Reason})),
- ?assertMatch({200, #{<<"state">> := <<"stopped">>,
- <<"reason">> := Reason}}, req(get, Url)),
-
- % Top level summary also shows the reason
- ?assertMatch({200, #{
- <<"state">> := <<"stopped">>,
- <<"state_reason">> := Reason
- }}, req(get, Top ++ ?RESHARD)),
- ?assertMatch({200, _}, req(put, Url, #{state => running})),
- ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, Url))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ Url = Top ++ ?STATE,
+
+ ?assertMatch(
+ {200, #{
+ <<"state">> := <<"running">>,
+ <<"reason">> := null
+ }},
+ req(get, Url)
+ ),
+
+ ?assertMatch({200, _}, req(put, Url, #{state => stopped})),
+ ?assertMatch(
+ {200, #{
+ <<"state">> := <<"stopped">>,
+ <<"reason">> := R
+ }} when is_binary(R),
+ req(get, Url)
+ ),
+
+ ?assertMatch({200, _}, req(put, Url, #{state => running})),
+
+ % Make sure the reason shows in the state GET request
+ Reason = <<"somereason">>,
+ ?assertMatch(
+ {200, _},
+ req(put, Url, #{
+ state => stopped,
+ reason => Reason
+ })
+ ),
+ ?assertMatch(
+ {200, #{
+ <<"state">> := <<"stopped">>,
+ <<"reason">> := Reason
+ }},
+ req(get, Url)
+ ),
+
+ % Top level summary also shows the reason
+ ?assertMatch(
+ {200, #{
+ <<"state">> := <<"stopped">>,
+ <<"state_reason">> := Reason
+ }},
+ req(get, Top ++ ?RESHARD)
+ ),
+ ?assertMatch({200, _}, req(put, Url, #{state => running})),
+ ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, Url))
+ end)}.
test_disabled({Top, _}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- application:set_env(mem3, reshard_disabled, true),
- ?assertMatch({501, _}, req(get, Top ++ ?RESHARD)),
- ?assertMatch({501, _}, req(put, Top ++ ?STATE, #{state => running})),
-
- application:unset_env(mem3, reshard_disabled),
- ?assertMatch({200, _}, req(get, Top ++ ?RESHARD)),
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running}))
- end)}.
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ application:set_env(mem3, reshard_disabled, true),
+ ?assertMatch({501, _}, req(get, Top ++ ?RESHARD)),
+ ?assertMatch({501, _}, req(put, Top ++ ?STATE, #{state => running})),
+ application:unset_env(mem3, reshard_disabled),
+ ?assertMatch({200, _}, req(get, Top ++ ?RESHARD)),
+ ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running}))
+ end)}.
start_stop_cluster_with_a_job({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Url = Top ++ ?STATE,
-
- ?assertMatch({200, _}, req(put, Url, #{state => stopped})),
- ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, Url)),
-
- % Can add jobs with global state stopped, they just won't be running
- {201, R1} = req(post, Top ++ ?JOBS, #{type => split, db => Db1}),
- ?assertMatch([#{?OK := true}], R1),
- [#{?ID := Id1}] = R1,
- {200, J1} = req(get, Top ++ ?JOBS ++ ?b2l(Id1)),
- ?assertMatch(#{?ID := Id1, <<"job_state">> := <<"stopped">>}, J1),
- % Check summary stats
- ?assertMatch({200, #{
- <<"state">> := <<"stopped">>,
- <<"running">> := 0,
- <<"stopped">> := 1,
- <<"total">> := 1
- }}, req(get, Top ++ ?RESHARD)),
-
- % Can delete the job when stopped
- {200, #{?OK := true}} = req(delete, Top ++ ?JOBS ++ ?b2l(Id1)),
- ?assertMatch({200, #{
- <<"state">> := <<"stopped">>,
- <<"running">> := 0,
- <<"stopped">> := 0,
- <<"total">> := 0
- }}, req(get, Top ++ ?RESHARD)),
-
- % Add same job again
- {201, [#{?ID := Id2}]} = req(post, Top ++ ?JOBS, #{type => split,
- db => Db1}),
- ?assertMatch({200, #{?ID := Id2, <<"job_state">> := <<"stopped">>}},
- req(get, Top ++ ?JOBS ++ ?b2l(Id2))),
-
- % Job should start after resharding is started on the cluster
- ?assertMatch({200, _}, req(put, Url, #{state => running})),
- ?assertMatch({200, #{?ID := Id2, <<"job_state">> := JSt}}
- when JSt =/= <<"stopped">>, req(get, Top ++ ?JOBS ++ ?b2l(Id2)))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ Url = Top ++ ?STATE,
+
+ ?assertMatch({200, _}, req(put, Url, #{state => stopped})),
+ ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, Url)),
+
+ % Can add jobs with global state stopped, they just won't be running
+ {201, R1} = req(post, Top ++ ?JOBS, #{type => split, db => Db1}),
+ ?assertMatch([#{?OK := true}], R1),
+ [#{?ID := Id1}] = R1,
+ {200, J1} = req(get, Top ++ ?JOBS ++ ?b2l(Id1)),
+ ?assertMatch(#{?ID := Id1, <<"job_state">> := <<"stopped">>}, J1),
+ % Check summary stats
+ ?assertMatch(
+ {200, #{
+ <<"state">> := <<"stopped">>,
+ <<"running">> := 0,
+ <<"stopped">> := 1,
+ <<"total">> := 1
+ }},
+ req(get, Top ++ ?RESHARD)
+ ),
+
+ % Can delete the job when stopped
+ {200, #{?OK := true}} = req(delete, Top ++ ?JOBS ++ ?b2l(Id1)),
+ ?assertMatch(
+ {200, #{
+ <<"state">> := <<"stopped">>,
+ <<"running">> := 0,
+ <<"stopped">> := 0,
+ <<"total">> := 0
+ }},
+ req(get, Top ++ ?RESHARD)
+ ),
+
+ % Add same job again
+ {201, [#{?ID := Id2}]} = req(post, Top ++ ?JOBS, #{
+ type => split,
+ db => Db1
+ }),
+ ?assertMatch(
+ {200, #{?ID := Id2, <<"job_state">> := <<"stopped">>}},
+ req(get, Top ++ ?JOBS ++ ?b2l(Id2))
+ ),
+
+ % Job should start after resharding is started on the cluster
+ ?assertMatch({200, _}, req(put, Url, #{state => running})),
+ ?assertMatch(
+ {200, #{?ID := Id2, <<"job_state">> := JSt}} when
+ JSt =/= <<"stopped">>,
+ req(get, Top ++ ?JOBS ++ ?b2l(Id2))
+ )
+ end)}.
individual_job_start_stop({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- intercept_state(topoff1),
-
- Body = #{type => split, db => Db1},
- {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
-
- JobUrl = Top ++ ?JOBS ++ ?b2l(Id),
- StUrl = JobUrl ++ "/state",
-
- % Wait for the the job to start running and intercept it in topoff1 state
- receive {JobPid, topoff1} -> ok end,
- % Tell the intercept to never finish checkpointing so job is left hanging
- % forever in running state
- JobPid ! cancel,
- ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
-
- {200, _} = req(put, StUrl, #{state => stopped}),
- wait_state(StUrl, <<"stopped">>),
-
- % Stop/start resharding globally and job should still stay stopped
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
- ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)),
-
- % Start the job again
- ?assertMatch({200, _}, req(put, StUrl, #{state => running})),
- % Wait for the the job to start running and intercept it in topoff1 state
- receive {JobPid2, topoff1} -> ok end,
- ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
- % Let it continue running and it should complete eventually
- JobPid2 ! continue,
- wait_state(StUrl, <<"completed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ intercept_state(topoff1),
+
+ Body = #{type => split, db => Db1},
+ {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
+
+ JobUrl = Top ++ ?JOBS ++ ?b2l(Id),
+ StUrl = JobUrl ++ "/state",
+
+ % Wait for the the job to start running and intercept it in topoff1 state
+ receive
+ {JobPid, topoff1} -> ok
+ end,
+ % Tell the intercept to never finish checkpointing so job is left hanging
+ % forever in running state
+ JobPid ! cancel,
+ ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
+
+ {200, _} = req(put, StUrl, #{state => stopped}),
+ wait_state(StUrl, <<"stopped">>),
+
+ % Stop/start resharding globally and job should still stay stopped
+ ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
+ ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
+ ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)),
+
+ % Start the job again
+ ?assertMatch({200, _}, req(put, StUrl, #{state => running})),
+ % Wait for the the job to start running and intercept it in topoff1 state
+ receive
+ {JobPid2, topoff1} -> ok
+ end,
+ ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
+ % Let it continue running and it should complete eventually
+ JobPid2 ! continue,
+ wait_state(StUrl, <<"completed">>)
+ end)}.
individual_job_stop_when_cluster_stopped({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- intercept_state(topoff1),
-
- Body = #{type => split, db => Db1},
- {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
-
- JobUrl = Top ++ ?JOBS ++ ?b2l(Id),
- StUrl = JobUrl ++ "/state",
-
- % Wait for the the job to start running and intercept in topoff1
- receive {JobPid, topoff1} -> ok end,
- % Tell the intercept to never finish checkpointing so job is left
- % hanging forever in running state
- JobPid ! cancel,
- ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
-
- % Stop resharding globally
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
- wait_state(StUrl, <<"stopped">>),
-
- % Stop the job specifically
- {200, _} = req(put, StUrl, #{state => stopped}),
- % Job stays stopped
- ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)),
-
- % Set cluster to running again
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
-
- % The job should stay stopped
- ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)),
-
- % It should be possible to resume job and it should complete
- ?assertMatch({200, _}, req(put, StUrl, #{state => running})),
-
- % Wait for the the job to start running and intercept in topoff1 state
- receive {JobPid2, topoff1} -> ok end,
- ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
-
- % Let it continue running and it should complete eventually
- JobPid2 ! continue,
- wait_state(StUrl, <<"completed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ intercept_state(topoff1),
+
+ Body = #{type => split, db => Db1},
+ {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
+
+ JobUrl = Top ++ ?JOBS ++ ?b2l(Id),
+ StUrl = JobUrl ++ "/state",
+
+ % Wait for the the job to start running and intercept in topoff1
+ receive
+ {JobPid, topoff1} -> ok
+ end,
+ % Tell the intercept to never finish checkpointing so job is left
+ % hanging forever in running state
+ JobPid ! cancel,
+ ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
+
+ % Stop resharding globally
+ ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
+ wait_state(StUrl, <<"stopped">>),
+
+ % Stop the job specifically
+ {200, _} = req(put, StUrl, #{state => stopped}),
+ % Job stays stopped
+ ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)),
+
+ % Set cluster to running again
+ ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
+
+ % The job should stay stopped
+ ?assertMatch({200, #{<<"state">> := <<"stopped">>}}, req(get, StUrl)),
+
+ % It should be possible to resume job and it should complete
+ ?assertMatch({200, _}, req(put, StUrl, #{state => running})),
+
+ % Wait for the the job to start running and intercept in topoff1 state
+ receive
+ {JobPid2, topoff1} -> ok
+ end,
+ ?assertMatch({200, #{<<"state">> := <<"running">>}}, req(get, StUrl)),
+
+ % Let it continue running and it should complete eventually
+ JobPid2 ! continue,
+ wait_state(StUrl, <<"completed">>)
+ end)}.
create_job_with_invalid_arguments({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
-
- % Nothing in the body
- ?assertMatch({400, _}, req(post, Jobs, #{})),
-
- % Missing type
- ?assertMatch({400, _}, req(post, Jobs, #{db => Db1})),
-
- % Have type but no db and no shard
- ?assertMatch({400, _}, req(post, Jobs, #{type => split})),
-
- % Have type and db but db is invalid
- ?assertMatch({400, _}, req(post, Jobs, #{db => <<"baddb">>,
- type => split})),
-
- % Have type and shard but shard is not an existing database
- ?assertMatch({404, _}, req(post, Jobs, #{type => split,
- shard => <<"shards/80000000-ffffffff/baddb.1549492084">>})),
-
- % Bad range values, too large, different types, inverted
- ?assertMatch({400, _}, req(post, Jobs, #{db => Db1, range => 42,
- type => split})),
- ?assertMatch({400, _}, req(post, Jobs, #{db => Db1,
- range => <<"x">>, type => split})),
- ?assertMatch({400, _}, req(post, Jobs, #{db => Db1,
- range => <<"ffffffff-80000000">>, type => split})),
- ?assertMatch({400, _}, req(post, Jobs, #{db => Db1,
- range => <<"00000000-fffffffff">>, type => split})),
-
- % Can't have both db and shard
- ?assertMatch({400, _}, req(post, Jobs, #{type => split, db => Db1,
- shard => <<"blah">>}))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ Jobs = Top ++ ?JOBS,
+
+ % Nothing in the body
+ ?assertMatch({400, _}, req(post, Jobs, #{})),
+
+ % Missing type
+ ?assertMatch({400, _}, req(post, Jobs, #{db => Db1})),
+
+ % Have type but no db and no shard
+ ?assertMatch({400, _}, req(post, Jobs, #{type => split})),
+
+ % Have type and db but db is invalid
+ ?assertMatch(
+ {400, _},
+ req(post, Jobs, #{
+ db => <<"baddb">>,
+ type => split
+ })
+ ),
+
+ % Have type and shard but shard is not an existing database
+ ?assertMatch(
+ {404, _},
+ req(post, Jobs, #{
+ type => split,
+ shard => <<"shards/80000000-ffffffff/baddb.1549492084">>
+ })
+ ),
+
+ % Bad range values, too large, different types, inverted
+ ?assertMatch(
+ {400, _},
+ req(post, Jobs, #{
+ db => Db1,
+ range => 42,
+ type => split
+ })
+ ),
+ ?assertMatch(
+ {400, _},
+ req(post, Jobs, #{
+ db => Db1,
+ range => <<"x">>,
+ type => split
+ })
+ ),
+ ?assertMatch(
+ {400, _},
+ req(post, Jobs, #{
+ db => Db1,
+ range => <<"ffffffff-80000000">>,
+ type => split
+ })
+ ),
+ ?assertMatch(
+ {400, _},
+ req(post, Jobs, #{
+ db => Db1,
+ range => <<"00000000-fffffffff">>,
+ type => split
+ })
+ ),
+
+ % Can't have both db and shard
+ ?assertMatch(
+ {400, _},
+ req(post, Jobs, #{
+ type => split,
+ db => Db1,
+ shard => <<"blah">>
+ })
+ )
+ end)}.
create_job_with_db({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
- Body1 = #{type => split, db => Db1},
-
- % Node with db
- N = atom_to_binary(node(), utf8),
- {C1, R1} = req(post, Jobs, Body1#{node => N}),
- ?assertMatch({201, [#{?OK := true}]}, {C1, R1}),
- wait_to_complete_then_cleanup(Top, R1),
-
- % Range and db
- {C2, R2} = req(post, Jobs, Body1#{range => <<"00000000-7fffffff">>}),
- ?assertMatch({201, [#{?OK := true}]}, {C2, R2}),
- wait_to_complete_then_cleanup(Top, R2),
-
- % Node, range and db
- Range = <<"80000000-ffffffff">>,
- {C3, R3} = req(post, Jobs, Body1#{range => Range, node => N}),
- ?assertMatch({201, [#{?OK := true}]}, {C3, R3}),
- wait_to_complete_then_cleanup(Top, R3),
-
- ?assertMatch([
- [16#00000000, 16#3fffffff],
- [16#40000000, 16#7fffffff],
- [16#80000000, 16#bfffffff],
- [16#c0000000, 16#ffffffff]
- ], [mem3:range(S) || S <- lists:sort(mem3:shards(Db1))])
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ Jobs = Top ++ ?JOBS,
+ Body1 = #{type => split, db => Db1},
+
+ % Node with db
+ N = atom_to_binary(node(), utf8),
+ {C1, R1} = req(post, Jobs, Body1#{node => N}),
+ ?assertMatch({201, [#{?OK := true}]}, {C1, R1}),
+ wait_to_complete_then_cleanup(Top, R1),
+
+ % Range and db
+ {C2, R2} = req(post, Jobs, Body1#{range => <<"00000000-7fffffff">>}),
+ ?assertMatch({201, [#{?OK := true}]}, {C2, R2}),
+ wait_to_complete_then_cleanup(Top, R2),
+
+ % Node, range and db
+ Range = <<"80000000-ffffffff">>,
+ {C3, R3} = req(post, Jobs, Body1#{range => Range, node => N}),
+ ?assertMatch({201, [#{?OK := true}]}, {C3, R3}),
+ wait_to_complete_then_cleanup(Top, R3),
+
+ ?assertMatch(
+ [
+ [16#00000000, 16#3fffffff],
+ [16#40000000, 16#7fffffff],
+ [16#80000000, 16#bfffffff],
+ [16#c0000000, 16#ffffffff]
+ ],
+ [mem3:range(S) || S <- lists:sort(mem3:shards(Db1))]
+ )
+ end)}.
create_job_with_shard_name({Top, {_, _, Db3}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
- [S1, S2] = [mem3:name(S) || S <- lists:sort(mem3:shards(Db3))],
-
- % Shard only
- {C1, R1} = req(post, Jobs, #{type => split, shard => S1}),
- ?assertMatch({201, [#{?OK := true}]}, {C1, R1}),
- wait_to_complete_then_cleanup(Top, R1),
-
- % Shard with a node
- N = atom_to_binary(node(), utf8),
- {C2, R2} = req(post, Jobs, #{type => split, shard => S2, node => N}),
- ?assertMatch({201, [#{?OK := true}]}, {C2, R2}),
- wait_to_complete_then_cleanup(Top, R2),
-
- ?assertMatch([
- [16#00000000, 16#3fffffff],
- [16#40000000, 16#7fffffff],
- [16#80000000, 16#bfffffff],
- [16#c0000000, 16#ffffffff]
- ], [mem3:range(S) || S <- lists:sort(mem3:shards(Db3))])
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ Jobs = Top ++ ?JOBS,
+ [S1, S2] = [mem3:name(S) || S <- lists:sort(mem3:shards(Db3))],
+
+ % Shard only
+ {C1, R1} = req(post, Jobs, #{type => split, shard => S1}),
+ ?assertMatch({201, [#{?OK := true}]}, {C1, R1}),
+ wait_to_complete_then_cleanup(Top, R1),
+
+ % Shard with a node
+ N = atom_to_binary(node(), utf8),
+ {C2, R2} = req(post, Jobs, #{type => split, shard => S2, node => N}),
+ ?assertMatch({201, [#{?OK := true}]}, {C2, R2}),
+ wait_to_complete_then_cleanup(Top, R2),
+
+ ?assertMatch(
+ [
+ [16#00000000, 16#3fffffff],
+ [16#40000000, 16#7fffffff],
+ [16#80000000, 16#bfffffff],
+ [16#c0000000, 16#ffffffff]
+ ],
+ [mem3:range(S) || S <- lists:sort(mem3:shards(Db3))]
+ )
+ end)}.
completed_job_handling({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
-
- % Run job to completion
- {C1, R1} = req(post, Jobs, #{type => split, db => Db1}),
- ?assertMatch({201, [#{?OK := true}]}, {C1, R1}),
- [#{?ID := Id}] = R1,
- wait_to_complete(Top, R1),
-
- % Check top level stats
- ?assertMatch({200, #{
- <<"state">> := <<"running">>,
- <<"state_reason">> := null,
- <<"completed">> := 1,
- <<"failed">> := 0,
- <<"running">> := 0,
- <<"stopped">> := 0,
- <<"total">> := 1
- }}, req(get, Top ++ ?RESHARD)),
-
- % Job state itself
- JobUrl = Jobs ++ ?b2l(Id),
- ?assertMatch({200, #{
- <<"split_state">> := <<"completed">>,
- <<"job_state">> := <<"completed">>
- }}, req(get, JobUrl)),
-
- % Job's state endpoint
- StUrl = Jobs ++ ?b2l(Id) ++ "/state",
- ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
-
- % Try to stop it and it should stay completed
- {200, _} = req(put, StUrl, #{state => stopped}),
- ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
-
- % Try to resume it and it should stay completed
- {200, _} = req(put, StUrl, #{state => running}),
- ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
-
- % Stop resharding globally and job should still stay completed
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
- ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
-
- % Start resharding and job stays completed
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
- ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
-
- ?assertMatch({200, #{?OK := true}}, req(delete, JobUrl))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ Jobs = Top ++ ?JOBS,
+
+ % Run job to completion
+ {C1, R1} = req(post, Jobs, #{type => split, db => Db1}),
+ ?assertMatch({201, [#{?OK := true}]}, {C1, R1}),
+ [#{?ID := Id}] = R1,
+ wait_to_complete(Top, R1),
+
+ % Check top level stats
+ ?assertMatch(
+ {200, #{
+ <<"state">> := <<"running">>,
+ <<"state_reason">> := null,
+ <<"completed">> := 1,
+ <<"failed">> := 0,
+ <<"running">> := 0,
+ <<"stopped">> := 0,
+ <<"total">> := 1
+ }},
+ req(get, Top ++ ?RESHARD)
+ ),
+
+ % Job state itself
+ JobUrl = Jobs ++ ?b2l(Id),
+ ?assertMatch(
+ {200, #{
+ <<"split_state">> := <<"completed">>,
+ <<"job_state">> := <<"completed">>
+ }},
+ req(get, JobUrl)
+ ),
+
+ % Job's state endpoint
+ StUrl = Jobs ++ ?b2l(Id) ++ "/state",
+ ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
+
+ % Try to stop it and it should stay completed
+ {200, _} = req(put, StUrl, #{state => stopped}),
+ ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
+
+ % Try to resume it and it should stay completed
+ {200, _} = req(put, StUrl, #{state => running}),
+ ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
+
+ % Stop resharding globally and job should still stay completed
+ ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
+ ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
+
+ % Start resharding and job stays completed
+ ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
+ ?assertMatch({200, #{<<"state">> := <<"completed">>}}, req(get, StUrl)),
+
+ ?assertMatch({200, #{?OK := true}}, req(delete, JobUrl))
+ end)}.
handle_db_deletion_in_topoff1({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = delete_source_in_state(Top, Db1, topoff1),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ JobId = delete_source_in_state(Top, Db1, topoff1),
+ wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
+ end)}.
handle_db_deletion_in_initial_copy({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = delete_source_in_state(Top, Db1, initial_copy),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ JobId = delete_source_in_state(Top, Db1, initial_copy),
+ wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
+ end)}.
handle_db_deletion_in_copy_local_docs({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = delete_source_in_state(Top, Db1, copy_local_docs),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ JobId = delete_source_in_state(Top, Db1, copy_local_docs),
+ wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
+ end)}.
handle_db_deletion_in_build_indices({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = delete_source_in_state(Top, Db1, build_indices),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ JobId = delete_source_in_state(Top, Db1, build_indices),
+ wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
+ end)}.
handle_db_deletion_in_update_shard_map({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = delete_source_in_state(Top, Db1, update_shardmap),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ JobId = delete_source_in_state(Top, Db1, update_shardmap),
+ wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
+ end)}.
handle_db_deletion_in_wait_source_close({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = delete_source_in_state(Top, Db1, wait_source_close),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ JobId = delete_source_in_state(Top, Db1, wait_source_close),
+ wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"failed">>)
+ end)}.
recover_in_topoff1({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, topoff1),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ JobId = recover_in_state(Top, Db1, topoff1),
+ wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
+ end)}.
recover_in_initial_copy({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, initial_copy),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ JobId = recover_in_state(Top, Db1, initial_copy),
+ wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
+ end)}.
recover_in_copy_local_docs({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, copy_local_docs),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ JobId = recover_in_state(Top, Db1, copy_local_docs),
+ wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
+ end)}.
recover_in_build_indices({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, build_indices),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ JobId = recover_in_state(Top, Db1, build_indices),
+ wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
+ end)}.
recover_in_update_shard_map({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, update_shardmap),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ JobId = recover_in_state(Top, Db1, update_shardmap),
+ wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
+ end)}.
recover_in_wait_source_close({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, wait_source_close),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ JobId = recover_in_state(Top, Db1, wait_source_close),
+ wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
+ end)}.
recover_in_topoff3({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, topoff3),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ JobId = recover_in_state(Top, Db1, topoff3),
+ wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
+ end)}.
recover_in_source_delete({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- JobId = recover_in_state(Top, Db1, source_delete),
- wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ JobId = recover_in_state(Top, Db1, source_delete),
+ wait_state(Top ++ ?JOBS ++ ?b2l(JobId) ++ "/state", <<"completed">>)
+ end)}.
check_max_jobs({Top, {Db1, Db2, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
-
- config:set("reshard", "max_jobs", "0", _Persist=false),
- {C1, R1} = req(post, Jobs, #{type => split, db => Db1}),
- ?assertMatch({500, [#{<<"error">> := <<"max_jobs_exceeded">>}]}, {C1, R1}),
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ Jobs = Top ++ ?JOBS,
- config:set("reshard", "max_jobs", "1", _Persist=false),
- {201, R2} = req(post, Jobs, #{type => split, db => Db1}),
- wait_to_complete(Top, R2),
+ config:set("reshard", "max_jobs", "0", _Persist = false),
+ {C1, R1} = req(post, Jobs, #{type => split, db => Db1}),
+ ?assertMatch({500, [#{<<"error">> := <<"max_jobs_exceeded">>}]}, {C1, R1}),
- % Stop clustering so jobs are not started anymore and ensure max jobs
- % is enforced even if jobs are stopped
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
+ config:set("reshard", "max_jobs", "1", _Persist = false),
+ {201, R2} = req(post, Jobs, #{type => split, db => Db1}),
+ wait_to_complete(Top, R2),
- {C3, R3} = req(post, Jobs, #{type => split, db => Db2}),
- ?assertMatch({500, [#{<<"error">> := <<"max_jobs_exceeded">>}]},
- {C3, R3}),
+ % Stop clustering so jobs are not started anymore and ensure max jobs
+ % is enforced even if jobs are stopped
+ ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => stopped})),
- % Allow the job to be created by raising max_jobs
- config:set("reshard", "max_jobs", "2", _Persist=false),
+ {C3, R3} = req(post, Jobs, #{type => split, db => Db2}),
+ ?assertMatch(
+ {500, [#{<<"error">> := <<"max_jobs_exceeded">>}]},
+ {C3, R3}
+ ),
- {C4, R4} = req(post, Jobs, #{type => split, db => Db2}),
- ?assertEqual(201, C4),
+ % Allow the job to be created by raising max_jobs
+ config:set("reshard", "max_jobs", "2", _Persist = false),
- % Lower max_jobs after job is created but it's not running
- config:set("reshard", "max_jobs", "1", _Persist=false),
+ {C4, R4} = req(post, Jobs, #{type => split, db => Db2}),
+ ?assertEqual(201, C4),
- % Start resharding again
- ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
+ % Lower max_jobs after job is created but it's not running
+ config:set("reshard", "max_jobs", "1", _Persist = false),
- % Jobs that have been created already are not removed if max jobs is lowered
- % so make sure the job completes
- wait_to_complete(Top, R4)
- end)}.
+ % Start resharding again
+ ?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
+ % Jobs that have been created already are not removed if max jobs is lowered
+ % so make sure the job completes
+ wait_to_complete(Top, R4)
+ end)}.
check_node_and_range_required_params({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Jobs = Top ++ ?JOBS,
-
- Node = atom_to_binary(node(), utf8),
- Range = <<"00000000-ffffffff">>,
-
- config:set("reshard", "require_node_param", "true", _Persist=false),
- {C1, R1} = req(post, Jobs, #{type => split, db => Db1}),
- NodeRequiredErr = <<"`node` prameter is required">>,
- ?assertEqual({400, #{<<"error">> => <<"bad_request">>,
- <<"reason">> => NodeRequiredErr}}, {C1, R1}),
-
- config:set("reshard", "require_range_param", "true", _Persist=false),
- {C2, R2} = req(post, Jobs, #{type => split, db => Db1, node => Node}),
- RangeRequiredErr = <<"`range` prameter is required">>,
- ?assertEqual({400, #{<<"error">> => <<"bad_request">>,
- <<"reason">> => RangeRequiredErr}}, {C2, R2}),
-
- Body = #{type => split, db => Db1, range => Range, node => Node},
- {C3, R3} = req(post, Jobs, Body),
- ?assertMatch({201, [#{?OK := true}]}, {C3, R3}),
- wait_to_complete_then_cleanup(Top, R3)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ Jobs = Top ++ ?JOBS,
+
+ Node = atom_to_binary(node(), utf8),
+ Range = <<"00000000-ffffffff">>,
+
+ config:set("reshard", "require_node_param", "true", _Persist = false),
+ {C1, R1} = req(post, Jobs, #{type => split, db => Db1}),
+ NodeRequiredErr = <<"`node` prameter is required">>,
+ ?assertEqual(
+ {400, #{
+ <<"error">> => <<"bad_request">>,
+ <<"reason">> => NodeRequiredErr
+ }},
+ {C1, R1}
+ ),
+
+ config:set("reshard", "require_range_param", "true", _Persist = false),
+ {C2, R2} = req(post, Jobs, #{type => split, db => Db1, node => Node}),
+ RangeRequiredErr = <<"`range` prameter is required">>,
+ ?assertEqual(
+ {400, #{
+ <<"error">> => <<"bad_request">>,
+ <<"reason">> => RangeRequiredErr
+ }},
+ {C2, R2}
+ ),
+
+ Body = #{type => split, db => Db1, range => Range, node => Node},
+ {C3, R3} = req(post, Jobs, Body),
+ ?assertMatch({201, [#{?OK := true}]}, {C3, R3}),
+ wait_to_complete_then_cleanup(Top, R3)
+ end)}.
cleanup_completed_jobs({Top, {Db1, _, _}}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- Body = #{type => split, db => Db1},
- {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
- JobUrl = Top ++ ?JOBS ++ ?b2l(Id),
- wait_state(JobUrl ++ "/state", <<"completed">>),
- delete_db(Top, Db1),
- wait_for_http_code(JobUrl, 404)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ Body = #{type => split, db => Db1},
+ {201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
+ JobUrl = Top ++ ?JOBS ++ ?b2l(Id),
+ wait_state(JobUrl ++ "/state", <<"completed">>),
+ delete_db(Top, Db1),
+ wait_for_http_code(JobUrl, 404)
+ end)}.
% Test help functions
wait_to_complete_then_cleanup(Top, Jobs) ->
JobsUrl = Top ++ ?JOBS,
- lists:foreach(fun(#{?ID := Id}) ->
- wait_state(JobsUrl ++ ?b2l(Id) ++ "/state", <<"completed">>),
- {200, _} = req(delete, JobsUrl ++ ?b2l(Id))
- end, Jobs).
-
+ lists:foreach(
+ fun(#{?ID := Id}) ->
+ wait_state(JobsUrl ++ ?b2l(Id) ++ "/state", <<"completed">>),
+ {200, _} = req(delete, JobsUrl ++ ?b2l(Id))
+ end,
+ Jobs
+ ).
wait_to_complete(Top, Jobs) ->
JobsUrl = Top ++ ?JOBS,
- lists:foreach(fun(#{?ID := Id}) ->
- wait_state(JobsUrl ++ ?b2l(Id) ++ "/state", <<"completed">>)
- end, Jobs).
-
+ lists:foreach(
+ fun(#{?ID := Id}) ->
+ wait_state(JobsUrl ++ ?b2l(Id) ++ "/state", <<"completed">>)
+ end,
+ Jobs
+ ).
intercept_state(State) ->
TestPid = self(),
@@ -752,46 +884,57 @@ intercept_state(State) ->
end
end).
-
cancel_intercept() ->
meck:expect(mem3_reshard_job, checkpoint_done, fun(Job) ->
meck:passthrough([Job])
end).
-
wait_state(Url, State) ->
- test_util:wait(fun() ->
+ test_util:wait(
+ fun() ->
case req(get, Url) of
- {200, #{<<"state">> := State}} -> ok;
- {200, #{}} -> timer:sleep(100), wait
+ {200, #{<<"state">> := State}} ->
+ ok;
+ {200, #{}} ->
+ timer:sleep(100),
+ wait
end
- end, 30000).
-
+ end,
+ 30000
+ ).
wait_for_http_code(Url, Code) when is_integer(Code) ->
- test_util:wait(fun() ->
+ test_util:wait(
+ fun() ->
case req(get, Url) of
- {Code, _} -> ok;
- {_, _} -> timer:sleep(100), wait
+ {Code, _} ->
+ ok;
+ {_, _} ->
+ timer:sleep(100),
+ wait
end
- end, 30000).
-
+ end,
+ 30000
+ ).
delete_source_in_state(Top, Db, State) when is_atom(State), is_binary(Db) ->
intercept_state(State),
Body = #{type => split, db => Db},
{201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
- receive {JobPid, State} -> ok end,
+ receive
+ {JobPid, State} -> ok
+ end,
sync_delete_db(Top, Db),
JobPid ! continue,
Id.
-
recover_in_state(Top, Db, State) when is_atom(State) ->
intercept_state(State),
Body = #{type => split, db => Db},
{201, [#{?ID := Id}]} = req(post, Top ++ ?JOBS, Body),
- receive {JobPid, State} -> ok end,
+ receive
+ {JobPid, State} -> ok
+ end,
% Job is now stuck in running we prevented it from executing
% the given state
JobPid ! cancel,
@@ -801,13 +944,11 @@ recover_in_state(Top, Db, State) when is_atom(State) ->
?assertMatch({200, _}, req(put, Top ++ ?STATE, #{state => running})),
Id.
-
create_db(Top, Db, QArgs) when is_binary(Db) ->
Url = Top ++ binary_to_list(Db) ++ QArgs,
{ok, Status, _, _} = test_request:put(Url, [?JSON, ?AUTH], "{}"),
?assert(Status =:= 201 orelse Status =:= 202).
-
delete_db(Top, Db) when is_binary(Db) ->
Url = Top ++ binary_to_list(Db),
case test_request:get(Url, [?AUTH]) of
@@ -818,7 +959,6 @@ delete_db(Top, Db) when is_binary(Db) ->
ok
end.
-
sync_delete_db(Top, Db) when is_binary(Db) ->
delete_db(Top, Db),
try
@@ -831,16 +971,13 @@ sync_delete_db(Top, Db) when is_binary(Db) ->
ok
end.
-
req(Method, Url) ->
Headers = [?AUTH],
{ok, Code, _, Res} = test_request:request(Method, Url, Headers),
{Code, jiffy:decode(Res, [return_maps])}.
-
req(Method, Url, #{} = Body) ->
req(Method, Url, jiffy:encode(Body));
-
req(Method, Url, Body) ->
Headers = [?JSON, ?AUTH],
{ok, Code, _, Res} = test_request:request(Method, Url, Headers, Body),
diff --git a/src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl b/src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl
index 4b9e2a34a..b9cafd75c 100644
--- a/src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl
+++ b/src/mem3/test/eunit/mem3_reshard_changes_feed_test.erl
@@ -12,51 +12,47 @@
-module(mem3_reshard_changes_feed_test).
-
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("mem3/src/mem3_reshard.hrl").
--define(TIMEOUT, 60). % seconds
-
--define(assertChanges(Expected, Received),
- begin
- ((fun() ->
- ExpectedIDs = lists:sort([I || #{id := I} <- Expected]),
- ReceivedIDs = lists:sort([I || #{id := I} <- Received]),
- ?assertEqual(ExpectedIDs, ReceivedIDs)
- end)())
- end).
+% seconds
+-define(TIMEOUT, 60).
+-define(assertChanges(Expected, Received), begin
+ ((fun() ->
+ ExpectedIDs = lists:sort([I || #{id := I} <- Expected]),
+ ReceivedIDs = lists:sort([I || #{id := I} <- Received]),
+ ?assertEqual(ExpectedIDs, ReceivedIDs)
+ end)())
+end).
setup() ->
Db1 = ?tempdb(),
create_db(Db1, [{q, 1}, {n, 1}]),
#{db1 => Db1}.
-
teardown(#{} = Dbs) ->
mem3_reshard:reset_state(),
maps:map(fun(_, Db) -> delete_db(Db) end, Dbs).
-
start_couch() ->
test_util:start_couch(?CONFIG_CHAIN, [mem3, fabric]).
-
stop_couch(Ctx) ->
test_util:stop_couch(Ctx).
-
mem3_reshard_changes_feed_test_() ->
{
"mem3 shard split changes feed tests",
{
setup,
- fun start_couch/0, fun stop_couch/1,
+ fun start_couch/0,
+ fun stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun normal_feed_should_work_after_split/1,
fun continuous_feed_should_work_during_split/1
@@ -65,177 +61,193 @@ mem3_reshard_changes_feed_test_() ->
}
}.
-
normal_feed_should_work_after_split(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{
- docs => [1, 10],
- delete => [5, 6]
- },
- add_test_docs(Db, DocSpec),
-
- % gather pre-shard changes
- BaseArgs = #changes_args{feed = "normal", dir = fwd, since = 0},
- {ok, OldChanges, OldEndSeq} = get_changes_feed(Db, BaseArgs),
-
- % Split the shard
- split_and_wait(Db),
-
- % verify changes list consistent for all the old seqs
- lists:foldl(fun(#{seq := Seq} = C, ExpectedChanges) ->
- Args = BaseArgs#changes_args{since = Seq},
- {ok, Changes, _EndSeq} = get_changes_feed(Db, Args),
- ?assertChanges(ExpectedChanges, Changes),
- [C | ExpectedChanges]
- end, [], OldChanges),
-
- % confirm that old LastSeq respected
- Args1 = BaseArgs#changes_args{since = OldEndSeq},
- {ok, Changes1, EndSeq1} = get_changes_feed(Db, Args1),
- ?assertChanges([], Changes1),
-
- % confirm that new LastSeq also respected
- Args2 = BaseArgs#changes_args{since = EndSeq1},
- {ok, Changes2, EndSeq2} = get_changes_feed(Db, Args2),
- ?assertChanges([], Changes2),
- ?assertEqual(EndSeq2, EndSeq1),
-
- % confirm we didn't lost any changes and have consistent last seq
- {ok, Changes3, EndSeq3} = get_changes_feed(Db, BaseArgs),
- ?assertChanges(OldChanges, Changes3),
-
- % add some docs
- add_test_docs(Db, #{docs => [11, 15]}),
- Args4 = BaseArgs#changes_args{since = EndSeq3},
- {ok, Changes4, EndSeq4} = get_changes_feed(Db, Args4),
- AddedChanges = [#{id => ID} || #doc{id = ID} <- docs([11, 15])],
- ?assertChanges(AddedChanges, Changes4),
-
- % confirm include_docs and deleted works
- Args5 = BaseArgs#changes_args{include_docs = true},
- {ok, Changes5, EndSeq5} = get_changes_feed(Db, Args5),
- ?assertEqual(EndSeq4, EndSeq5),
- [SampleChange] = [C || #{id := ID} = C <- Changes5, ID == <<"00005">>],
- ?assertMatch(#{deleted := true}, SampleChange),
- ?assertMatch(#{doc := {Body}} when is_list(Body), SampleChange),
-
- % update and delete some pre and post split docs
- AllDocs = [couch_doc:from_json_obj(Doc) || #{doc := Doc} <- Changes5],
- UpdateDocs = lists:filtermap(fun
- (#doc{id = <<"00002">>}) -> true;
- (#doc{id = <<"00012">>}) -> true;
- (#doc{id = <<"00004">>} = Doc) -> {true, Doc#doc{deleted = true}};
- (#doc{id = <<"00014">>} = Doc) -> {true, Doc#doc{deleted = true}};
- (_) -> false
- end, AllDocs),
- update_docs(Db, UpdateDocs),
-
- Args6 = BaseArgs#changes_args{since = EndSeq5},
- {ok, Changes6, EndSeq6} = get_changes_feed(Db, Args6),
- UpdatedChanges = [#{id => ID} || #doc{id = ID} <- UpdateDocs],
- ?assertChanges(UpdatedChanges, Changes6),
- [#{seq := Seq6} | _] = Changes6,
- ?assertEqual(EndSeq6, Seq6),
-
- Args7 = Args6#changes_args{dir = rev, limit = 4},
- {ok, Changes7, EndSeq7} = get_changes_feed(Db, Args7),
- ?assertEqual(4, length(Changes7)),
- [#{seq := Seq7} | _] = Changes7,
- ?assertEqual(EndSeq7, Seq7)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ DocSpec = #{
+ docs => [1, 10],
+ delete => [5, 6]
+ },
+ add_test_docs(Db, DocSpec),
+
+ % gather pre-shard changes
+ BaseArgs = #changes_args{feed = "normal", dir = fwd, since = 0},
+ {ok, OldChanges, OldEndSeq} = get_changes_feed(Db, BaseArgs),
+
+ % Split the shard
+ split_and_wait(Db),
+
+ % verify changes list consistent for all the old seqs
+ lists:foldl(
+ fun(#{seq := Seq} = C, ExpectedChanges) ->
+ Args = BaseArgs#changes_args{since = Seq},
+ {ok, Changes, _EndSeq} = get_changes_feed(Db, Args),
+ ?assertChanges(ExpectedChanges, Changes),
+ [C | ExpectedChanges]
+ end,
+ [],
+ OldChanges
+ ),
+
+ % confirm that old LastSeq respected
+ Args1 = BaseArgs#changes_args{since = OldEndSeq},
+ {ok, Changes1, EndSeq1} = get_changes_feed(Db, Args1),
+ ?assertChanges([], Changes1),
+
+ % confirm that new LastSeq also respected
+ Args2 = BaseArgs#changes_args{since = EndSeq1},
+ {ok, Changes2, EndSeq2} = get_changes_feed(Db, Args2),
+ ?assertChanges([], Changes2),
+ ?assertEqual(EndSeq2, EndSeq1),
+
+ % confirm we didn't lost any changes and have consistent last seq
+ {ok, Changes3, EndSeq3} = get_changes_feed(Db, BaseArgs),
+ ?assertChanges(OldChanges, Changes3),
+
+ % add some docs
+ add_test_docs(Db, #{docs => [11, 15]}),
+ Args4 = BaseArgs#changes_args{since = EndSeq3},
+ {ok, Changes4, EndSeq4} = get_changes_feed(Db, Args4),
+ AddedChanges = [#{id => ID} || #doc{id = ID} <- docs([11, 15])],
+ ?assertChanges(AddedChanges, Changes4),
+
+ % confirm include_docs and deleted works
+ Args5 = BaseArgs#changes_args{include_docs = true},
+ {ok, Changes5, EndSeq5} = get_changes_feed(Db, Args5),
+ ?assertEqual(EndSeq4, EndSeq5),
+ [SampleChange] = [C || #{id := ID} = C <- Changes5, ID == <<"00005">>],
+ ?assertMatch(#{deleted := true}, SampleChange),
+ ?assertMatch(#{doc := {Body}} when is_list(Body), SampleChange),
+
+ % update and delete some pre and post split docs
+ AllDocs = [couch_doc:from_json_obj(Doc) || #{doc := Doc} <- Changes5],
+ UpdateDocs = lists:filtermap(
+ fun
+ (#doc{id = <<"00002">>}) -> true;
+ (#doc{id = <<"00012">>}) -> true;
+ (#doc{id = <<"00004">>} = Doc) -> {true, Doc#doc{deleted = true}};
+ (#doc{id = <<"00014">>} = Doc) -> {true, Doc#doc{deleted = true}};
+ (_) -> false
+ end,
+ AllDocs
+ ),
+ update_docs(Db, UpdateDocs),
+
+ Args6 = BaseArgs#changes_args{since = EndSeq5},
+ {ok, Changes6, EndSeq6} = get_changes_feed(Db, Args6),
+ UpdatedChanges = [#{id => ID} || #doc{id = ID} <- UpdateDocs],
+ ?assertChanges(UpdatedChanges, Changes6),
+ [#{seq := Seq6} | _] = Changes6,
+ ?assertEqual(EndSeq6, Seq6),
+
+ Args7 = Args6#changes_args{dir = rev, limit = 4},
+ {ok, Changes7, EndSeq7} = get_changes_feed(Db, Args7),
+ ?assertEqual(4, length(Changes7)),
+ [#{seq := Seq7} | _] = Changes7,
+ ?assertEqual(EndSeq7, Seq7)
+ end)}.
continuous_feed_should_work_during_split(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- {UpdaterPid, UpdaterRef} = spawn_monitor(fun() ->
- Updater = fun U({State, I}) ->
- receive
- {get_state, {Pid, Ref}} ->
- Pid ! {state, Ref, {State, I}},
- U({State, I});
- add ->
- DocSpec = #{docs => [I, I]},
- add_test_docs(Db, DocSpec),
- U({State, I + 1});
- split ->
- spawn_monitor(fun() -> split_and_wait(Db) end),
- U({"in_process", I});
- stop ->
- receive {'DOWN', _, process, _, _} -> ok end,
- ok
- end
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ {UpdaterPid, UpdaterRef} = spawn_monitor(fun() ->
+ Updater = fun U({State, I}) ->
+ receive
+ {get_state, {Pid, Ref}} ->
+ Pid ! {state, Ref, {State, I}},
+ U({State, I});
+ add ->
+ DocSpec = #{docs => [I, I]},
+ add_test_docs(Db, DocSpec),
+ U({State, I + 1});
+ split ->
+ spawn_monitor(fun() -> split_and_wait(Db) end),
+ U({"in_process", I});
+ stop ->
+ receive
+ {'DOWN', _, process, _, _} -> ok
+ end,
+ ok
+ end
+ end,
+ Updater({"before", 1})
+ end),
+
+ Callback = fun
+ (start, Acc) ->
+ {ok, Acc};
+ (waiting_for_updates, Acc) ->
+ Ref = make_ref(),
+ UpdaterPid ! {get_state, {self(), Ref}},
+ receive
+ {state, Ref, {State, _}} -> ok
+ end,
+ case {State, length(Acc)} of
+ {"before", N} when N < 5 ->
+ UpdaterPid ! add,
+ {ok, Acc};
+ {"before", _} ->
+ UpdaterPid ! split,
+ {ok, Acc};
+ {"in_process", N} when N < 10 ->
+ UpdaterPid ! add,
+ {ok, Acc};
+ {"in_process", _} ->
+ {ok, Acc}
+ end;
+ (timeout, Acc) ->
+ {ok, Acc};
+ ({change, {Change}}, Acc) ->
+ CM = maps:from_list(Change),
+ {ok, [CM | Acc]};
+ ({stop, EndSeq, _Pending}, Acc) ->
+ % Notice updater is still running
+ {stop, EndSeq, Acc}
end,
- Updater({"before", 1})
- end),
-
- Callback = fun
- (start, Acc) ->
- {ok, Acc};
- (waiting_for_updates, Acc) ->
- Ref = make_ref(),
- UpdaterPid ! {get_state, {self(), Ref}},
- receive {state, Ref, {State, _}} -> ok end,
- case {State, length(Acc)} of
- {"before", N} when N < 5 ->
- UpdaterPid ! add,
- {ok, Acc};
- {"before", _} ->
- UpdaterPid ! split,
- {ok, Acc};
- {"in_process", N} when N < 10 ->
- UpdaterPid ! add,
- {ok, Acc};
- {"in_process", _} ->
- {ok, Acc}
- end;
- (timeout, Acc) ->
- {ok, Acc};
- ({change, {Change}}, Acc) ->
- CM = maps:from_list(Change),
- {ok, [CM | Acc]};
- ({stop, EndSeq, _Pending}, Acc) ->
- % Notice updater is still running
- {stop, EndSeq, Acc}
- end,
- BaseArgs = #changes_args{
- feed = "continuous",
- heartbeat = 100,
- timeout = 1000
- },
- StopResult = get_changes_feed(Db, BaseArgs, Callback),
-
- % Changes feed stopped when source shard was deleted
- ?assertMatch({stop, _, _}, StopResult),
- {stop, StopEndSeq, StopChanges} = StopResult,
-
- % Add 5 extra docs to the db right after changes feed was stopped
- [UpdaterPid ! add || _ <- lists:seq(1, 5)],
-
- % The the number of documents that updater had added
- Ref = make_ref(),
- UpdaterPid ! {get_state, {self(), Ref}},
- DocCount = receive {state, Ref, {_, I}} -> I - 1 end,
-
- UpdaterPid ! stop,
- receive
- {'DOWN', UpdaterRef, process, UpdaterPid, normal} ->
- ok;
- {'DOWN', UpdaterRef, process, UpdaterPid, Error} ->
- erlang:error({test_context_failed, [
- {module, ?MODULE},
- {line, ?LINE},
- {value, Error},
- {reason, "Updater died"}]})
- end,
+ BaseArgs = #changes_args{
+ feed = "continuous",
+ heartbeat = 100,
+ timeout = 1000
+ },
+ StopResult = get_changes_feed(Db, BaseArgs, Callback),
+
+ % Changes feed stopped when source shard was deleted
+ ?assertMatch({stop, _, _}, StopResult),
+ {stop, StopEndSeq, StopChanges} = StopResult,
- AfterArgs = #changes_args{feed = "normal", since = StopEndSeq},
- {ok, AfterChanges, _} = get_changes_feed(Db, AfterArgs),
- DocIDs = [Id || #{id := Id} <- StopChanges ++ AfterChanges],
- ExpectedDocIDs = [doc_id(<<>>, N) || N <- lists:seq(1, DocCount)],
- ?assertEqual(ExpectedDocIDs, lists:usort(DocIDs))
- end)}.
+ % Add 5 extra docs to the db right after changes feed was stopped
+ [UpdaterPid ! add || _ <- lists:seq(1, 5)],
+
+ % The the number of documents that updater had added
+ Ref = make_ref(),
+ UpdaterPid ! {get_state, {self(), Ref}},
+ DocCount =
+ receive
+ {state, Ref, {_, I}} -> I - 1
+ end,
+
+ UpdaterPid ! stop,
+ receive
+ {'DOWN', UpdaterRef, process, UpdaterPid, normal} ->
+ ok;
+ {'DOWN', UpdaterRef, process, UpdaterPid, Error} ->
+ erlang:error(
+ {test_context_failed, [
+ {module, ?MODULE},
+ {line, ?LINE},
+ {value, Error},
+ {reason, "Updater died"}
+ ]}
+ )
+ end,
+ AfterArgs = #changes_args{feed = "normal", since = StopEndSeq},
+ {ok, AfterChanges, _} = get_changes_feed(Db, AfterArgs),
+ DocIDs = [Id || #{id := Id} <- StopChanges ++ AfterChanges],
+ ExpectedDocIDs = [doc_id(<<>>, N) || N <- lists:seq(1, DocCount)],
+ ?assertEqual(ExpectedDocIDs, lists:usort(DocIDs))
+ end)}.
split_and_wait(Db) ->
[#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
@@ -244,30 +256,34 @@ split_and_wait(Db) ->
ResultShards = lists:sort(mem3:local_shards(Db)),
?assertEqual(2, length(ResultShards)).
-
wait_state(JobId, State) ->
- test_util:wait(fun() ->
- case mem3_reshard:job(JobId) of
- {ok, {Props}} ->
- case couch_util:get_value(job_state, Props) of
- State -> ok;
- _ -> timer:sleep(100), wait
- end;
- {error, not_found} -> timer:sleep(100), wait
- end
- end, 30000).
-
+ test_util:wait(
+ fun() ->
+ case mem3_reshard:job(JobId) of
+ {ok, {Props}} ->
+ case couch_util:get_value(job_state, Props) of
+ State ->
+ ok;
+ _ ->
+ timer:sleep(100),
+ wait
+ end;
+ {error, not_found} ->
+ timer:sleep(100),
+ wait
+ end
+ end,
+ 30000
+ ).
get_changes_feed(Db, Args) ->
get_changes_feed(Db, Args, fun changes_callback/2).
-
get_changes_feed(Db, Args, Callback) ->
with_proc(fun() ->
fabric:changes(Db, Callback, [], Args)
end).
-
changes_callback(start, Acc) ->
{ok, Acc};
changes_callback({change, {Change}}, Acc) ->
@@ -276,28 +292,22 @@ changes_callback({change, {Change}}, Acc) ->
changes_callback({stop, EndSeq, _Pending}, Acc) ->
{ok, Acc, EndSeq}.
-
%% common helpers from here
-
create_db(DbName, Opts) ->
GL = erlang:group_leader(),
with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL).
-
delete_db(DbName) ->
GL = erlang:group_leader(),
with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL).
-
with_proc(Fun) ->
with_proc(Fun, undefined, 30000).
-
with_proc(Fun, GroupLeader) ->
with_proc(Fun, GroupLeader, 30000).
-
with_proc(Fun, GroupLeader, Timeout) ->
{Pid, Ref} = spawn_monitor(fun() ->
case GroupLeader of
@@ -317,20 +327,21 @@ with_proc(Fun, GroupLeader, Timeout) ->
error({with_proc_timeout, Fun, Timeout})
end.
-
add_test_docs(DbName, #{} = DocSpec) ->
Docs = docs(maps:get(docs, DocSpec, [])),
Res = update_docs(DbName, Docs),
- Docs1 = lists:map(fun({Doc, {ok, {RevPos, Rev}}}) ->
- Doc#doc{revs = {RevPos, [Rev]}}
- end, lists:zip(Docs, Res)),
+ Docs1 = lists:map(
+ fun({Doc, {ok, {RevPos, Rev}}}) ->
+ Doc#doc{revs = {RevPos, [Rev]}}
+ end,
+ lists:zip(Docs, Res)
+ ),
case delete_docs(maps:get(delete, DocSpec, []), Docs1) of
[] -> ok;
[_ | _] = Deleted -> update_docs(DbName, Deleted)
end,
ok.
-
update_docs(DbName, Docs) ->
with_proc(fun() ->
case fabric:update_docs(DbName, Docs, [?ADMIN_CTX]) of
@@ -339,30 +350,29 @@ update_docs(DbName, Docs) ->
end
end).
-
delete_docs([S, E], Docs) when E >= S ->
ToDelete = [doc_id(<<"">>, I) || I <- lists:seq(S, E)],
- lists:filtermap(fun(#doc{id = Id} = Doc) ->
- case lists:member(Id, ToDelete) of
- true -> {true, Doc#doc{deleted = true}};
- false -> false
- end
- end, Docs);
+ lists:filtermap(
+ fun(#doc{id = Id} = Doc) ->
+ case lists:member(Id, ToDelete) of
+ true -> {true, Doc#doc{deleted = true}};
+ false -> false
+ end
+ end,
+ Docs
+ );
delete_docs(_, _) ->
[].
-
docs([S, E]) when E >= S ->
[doc(<<"">>, I) || I <- lists:seq(S, E)];
docs(_) ->
[].
-
doc(Pref, Id) ->
Body = [{<<"a">>, <<"b">>}],
doc(Pref, Id, Body, 42).
-
doc(Pref, Id, BodyProps, AttSize) ->
#doc{
id = doc_id(Pref, Id),
@@ -370,20 +380,19 @@ doc(Pref, Id, BodyProps, AttSize) ->
atts = atts(AttSize)
}.
-
doc_id(Pref, Id) ->
IdBin = iolist_to_binary(io_lib:format("~5..0B", [Id])),
<<Pref/binary, IdBin/binary>>.
-
atts(0) ->
[];
-
atts(Size) when is_integer(Size), Size >= 1 ->
- Data = << <<"x">> || _ <- lists:seq(1, Size) >>,
- [couch_att:new([
- {name, <<"att">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, Data}
- ])].
+ Data = <<<<"x">> || _ <- lists:seq(1, Size)>>,
+ [
+ couch_att:new([
+ {name, <<"att">>},
+ {type, <<"app/binary">>},
+ {att_len, Size},
+ {data, Data}
+ ])
+ ].
diff --git a/src/mem3/test/eunit/mem3_reshard_test.erl b/src/mem3/test/eunit/mem3_reshard_test.erl
index 65f2b4bb0..1929242bb 100644
--- a/src/mem3/test/eunit/mem3_reshard_test.erl
+++ b/src/mem3/test/eunit/mem3_reshard_test.erl
@@ -12,57 +12,57 @@
-module(mem3_reshard_test).
-
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("mem3/src/mem3_reshard.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl"). % for all_docs function
+% for all_docs function
+-include_lib("couch_mrview/include/couch_mrview.hrl").
-define(ID, <<"_id">>).
-define(TIMEOUT, 60).
setup() ->
HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name},
- case HaveDreyfus of false -> ok; true ->
- mock_dreyfus_indices()
+ case HaveDreyfus of
+ false -> ok;
+ true -> mock_dreyfus_indices()
end,
HaveHastings = code:lib_dir(hastings) /= {error, bad_name},
- case HaveHastings of false -> ok; true ->
- mock_hastings_indices()
+ case HaveHastings of
+ false -> ok;
+ true -> mock_hastings_indices()
end,
{Db1, Db2} = {?tempdb(), ?tempdb()},
create_db(Db1, [{q, 1}, {n, 1}]),
PartProps = [{partitioned, true}, {hash, [couch_partition, hash, []]}],
create_db(Db2, [{q, 1}, {n, 1}, {props, PartProps}]),
- config:set("reshard", "retry_interval_sec", "0", _Persist=false),
+ config:set("reshard", "retry_interval_sec", "0", _Persist = false),
#{db1 => Db1, db2 => Db2}.
-
teardown(#{} = Dbs) ->
mem3_reshard:reset_state(),
maps:map(fun(_, Db) -> delete_db(Db) end, Dbs),
- config:delete("reshard", "retry_interval_sec", _Persist=false),
+ config:delete("reshard", "retry_interval_sec", _Persist = false),
meck:unload().
-
start_couch() ->
test_util:start_couch(?CONFIG_CHAIN, [mem3, fabric]).
-
stop_couch(Ctx) ->
test_util:stop_couch(Ctx).
-
mem3_reshard_db_test_() ->
{
"mem3 shard split db tests",
{
setup,
- fun start_couch/0, fun stop_couch/1,
+ fun start_couch/0,
+ fun stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun split_one_shard/1,
fun split_shard_with_lots_of_purges/1,
@@ -80,187 +80,198 @@ mem3_reshard_db_test_() ->
}
}.
-
% This is a basic test to check that shard splitting preserves documents, and
% db meta props like revs limits and security.
split_one_shard(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{docs => 10, delete => [5, 9], mrview => 1, local => 1},
- add_test_docs(Db, DocSpec),
-
- % Save documents before the split
- Docs0 = get_all_docs(Db),
- Local0 = get_local_docs(Db),
-
- % Set some custom metadata properties
- set_revs_limit(Db, 942),
- set_purge_infos_limit(Db, 943),
- SecObj = {[{<<"foo">>, <<"bar">>}]},
- set_security(Db, SecObj),
-
- % DbInfo is saved after setting metadata bits
- % as those could bump the update sequence
- DbInfo0 = get_db_info(Db),
-
- % Split the one shard
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- [#shard{range = R1}, #shard{range = R2}] = Shards1,
- ?assertEqual([16#00000000, 16#7fffffff], R1),
- ?assertEqual([16#80000000, 16#ffffffff], R2),
-
- % Check metadata bits after the split
- ?assertEqual(942, get_revs_limit(Db)),
- ?assertEqual(943, get_purge_infos_limit(Db)),
- ?assertEqual(SecObj, get_security(Db)),
-
- DbInfo1 = get_db_info(Db),
- Docs1 = get_all_docs(Db),
- Local1 = get_local_docs(Db),
-
- % When comparing db infos, ignore update sequences they won't be the
- % same since they are more shards involved after the split
- ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
-
- % Update seq prefix number is a sum of all shard update sequences
- #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0),
- #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1),
- ?assertEqual(UpdateSeq0 * 2, UpdateSeq1),
-
- % Finally compare that the documents are still there after the split
- ?assertEqual(Docs0, Docs1),
-
- % Don't forget about the local but don't include internal checkpoints
- % as some of those are munged and transformed during the split
- ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ DocSpec = #{docs => 10, delete => [5, 9], mrview => 1, local => 1},
+ add_test_docs(Db, DocSpec),
+
+ % Save documents before the split
+ Docs0 = get_all_docs(Db),
+ Local0 = get_local_docs(Db),
+
+ % Set some custom metadata properties
+ set_revs_limit(Db, 942),
+ set_purge_infos_limit(Db, 943),
+ SecObj = {[{<<"foo">>, <<"bar">>}]},
+ set_security(Db, SecObj),
+
+ % DbInfo is saved after setting metadata bits
+ % as those could bump the update sequence
+ DbInfo0 = get_db_info(Db),
+
+ % Split the one shard
+ [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
+ {ok, JobId} = mem3_reshard:start_split_job(Shard),
+ wait_state(JobId, completed),
+
+ % Perform some basic checks that the shard was split
+ Shards1 = lists:sort(mem3:local_shards(Db)),
+ ?assertEqual(2, length(Shards1)),
+ [#shard{range = R1}, #shard{range = R2}] = Shards1,
+ ?assertEqual([16#00000000, 16#7fffffff], R1),
+ ?assertEqual([16#80000000, 16#ffffffff], R2),
+
+ % Check metadata bits after the split
+ ?assertEqual(942, get_revs_limit(Db)),
+ ?assertEqual(943, get_purge_infos_limit(Db)),
+ ?assertEqual(SecObj, get_security(Db)),
+
+ DbInfo1 = get_db_info(Db),
+ Docs1 = get_all_docs(Db),
+ Local1 = get_local_docs(Db),
+
+ % When comparing db infos, ignore update sequences they won't be the
+ % same since they are more shards involved after the split
+ ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
+
+ % Update seq prefix number is a sum of all shard update sequences
+ #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0),
+ #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1),
+ ?assertEqual(UpdateSeq0 * 2, UpdateSeq1),
+
+ % Finally compare that the documents are still there after the split
+ ?assertEqual(Docs0, Docs1),
+
+ % Don't forget about the local but don't include internal checkpoints
+ % as some of those are munged and transformed during the split
+ ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
+ end)}.
% Test to check that shard with high number of purges can be split
split_shard_with_lots_of_purges(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- % Set a low purge infos limit, we are planning to overrun it
- set_purge_infos_limit(Db, 10),
-
- % Add docs 1..20 and purge them
- add_test_docs(Db, #{docs => [1, 20]}),
- IdRevs = maps:fold(fun(Id, #{<<"_rev">> := Rev}, Acc) ->
- [{Id, [Rev]} | Acc]
- end, [], get_all_docs(Db)),
- ?assertMatch({ok, _}, purge_docs(Db, IdRevs)),
-
- % Compact to trim the purge sequence
- ok = compact(Db),
-
- % Add some extra docs, these won't be purged
- add_test_docs(Db, #{docs => [21, 30]}),
- Docs0 = get_all_docs(Db),
-
- % Save db info before splitting
- DbInfo0 = get_db_info(Db),
-
- % Split the one shard
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- [#shard{range = R1}, #shard{range = R2}] = Shards1,
- ?assertEqual([16#00000000, 16#7fffffff], R1),
- ?assertEqual([16#80000000, 16#ffffffff], R2),
-
- % Check metadata bits after the split
- ?assertEqual(10, get_purge_infos_limit(Db)),
-
- DbInfo1 = get_db_info(Db),
- Docs1 = get_all_docs(Db),
-
- % When comparing db infos, ignore update sequences they won't be the
- % same since they are more shards involved after the split
- ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
-
- % Finally compare that the documents are still there after the split
- ?assertEqual(Docs0, Docs1)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ % Set a low purge infos limit, we are planning to overrun it
+ set_purge_infos_limit(Db, 10),
+
+ % Add docs 1..20 and purge them
+ add_test_docs(Db, #{docs => [1, 20]}),
+ IdRevs = maps:fold(
+ fun(Id, #{<<"_rev">> := Rev}, Acc) ->
+ [{Id, [Rev]} | Acc]
+ end,
+ [],
+ get_all_docs(Db)
+ ),
+ ?assertMatch({ok, _}, purge_docs(Db, IdRevs)),
+
+ % Compact to trim the purge sequence
+ ok = compact(Db),
+
+ % Add some extra docs, these won't be purged
+ add_test_docs(Db, #{docs => [21, 30]}),
+ Docs0 = get_all_docs(Db),
+
+ % Save db info before splitting
+ DbInfo0 = get_db_info(Db),
+
+ % Split the one shard
+ [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
+ {ok, JobId} = mem3_reshard:start_split_job(Shard),
+ wait_state(JobId, completed),
+
+ % Perform some basic checks that the shard was split
+ Shards1 = lists:sort(mem3:local_shards(Db)),
+ ?assertEqual(2, length(Shards1)),
+ [#shard{range = R1}, #shard{range = R2}] = Shards1,
+ ?assertEqual([16#00000000, 16#7fffffff], R1),
+ ?assertEqual([16#80000000, 16#ffffffff], R2),
+
+ % Check metadata bits after the split
+ ?assertEqual(10, get_purge_infos_limit(Db)),
+
+ DbInfo1 = get_db_info(Db),
+ Docs1 = get_all_docs(Db),
+
+ % When comparing db infos, ignore update sequences they won't be the
+ % same since they are more shards involved after the split
+ ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
+
+ % Finally compare that the documents are still there after the split
+ ?assertEqual(Docs0, Docs1)
+ end)}.
% This test checks that document added while the shard is being split are not
% lost. Topoff1 state happens before indices are built
update_docs_before_topoff1(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- add_test_docs(Db, #{docs => 10}),
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ add_test_docs(Db, #{docs => 10}),
- intercept_state(topoff1),
+ intercept_state(topoff1),
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
+ [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
+ {ok, JobId} = mem3_reshard:start_split_job(Shard),
- receive {JobPid, topoff1} -> ok end,
- add_test_docs(Db, #{docs => [10, 19], local => 1}),
- Docs0 = get_all_docs(Db),
- Local0 = get_local_docs(Db),
- DbInfo0 = get_db_info(Db),
- JobPid ! continue,
-
- wait_state(JobId, completed),
+ receive
+ {JobPid, topoff1} -> ok
+ end,
+ add_test_docs(Db, #{docs => [10, 19], local => 1}),
+ Docs0 = get_all_docs(Db),
+ Local0 = get_local_docs(Db),
+ DbInfo0 = get_db_info(Db),
+ JobPid ! continue,
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
+ wait_state(JobId, completed),
- DbInfo1 = get_db_info(Db),
- Docs1 = get_all_docs(Db),
- Local1 = get_local_docs(Db),
+ % Perform some basic checks that the shard was split
+ Shards1 = lists:sort(mem3:local_shards(Db)),
+ ?assertEqual(2, length(Shards1)),
- ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
+ DbInfo1 = get_db_info(Db),
+ Docs1 = get_all_docs(Db),
+ Local1 = get_local_docs(Db),
- % Update sequence after initial copy with 10 docs would be 10 on each
- % target shard (to match the source) and the total update sequence
- % would have been 20. But then 10 more docs were added (3 might have
- % ended up on one target and 7 on another) so the final update sequence
- % would then be 20 + 10 = 30.
- ?assertMatch(#{<<"update_seq">> := 30}, update_seq_to_num(DbInfo1)),
+ ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
- ?assertEqual(Docs0, Docs1),
- ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
- end)}.
+ % Update sequence after initial copy with 10 docs would be 10 on each
+ % target shard (to match the source) and the total update sequence
+ % would have been 20. But then 10 more docs were added (3 might have
+ % ended up on one target and 7 on another) so the final update sequence
+ % would then be 20 + 10 = 30.
+ ?assertMatch(#{<<"update_seq">> := 30}, update_seq_to_num(DbInfo1)),
+ ?assertEqual(Docs0, Docs1),
+ ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
+ end)}.
% This test that indices are built during shard splitting.
indices_are_built(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name},
- HaveHastings = code:lib_dir(hastings) /= {error, bad_name},
-
- add_test_docs(Db, #{docs => 10, mrview => 2, search => 2, geo => 2}),
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- MRViewGroupInfo = get_group_info(Db, <<"_design/mrview00000">>),
- ?assertMatch(#{<<"update_seq">> := 32}, MRViewGroupInfo),
-
- HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name},
- case HaveDreyfus of false -> ok; true ->
- % 4 because there are 2 indices and 2 target shards
- ?assertEqual(4, meck:num_calls(dreyfus_index, await, 2))
- end,
-
- HaveHastings = code:lib_dir(hastings) /= {error, bad_name},
- case HaveHastings of false -> ok; true ->
- % 4 because there are 2 indices and 2 target shards
- ?assertEqual(4, meck:num_calls(hastings_index, await, 2))
- end
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name},
+ HaveHastings = code:lib_dir(hastings) /= {error, bad_name},
+
+ add_test_docs(Db, #{docs => 10, mrview => 2, search => 2, geo => 2}),
+ [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
+ {ok, JobId} = mem3_reshard:start_split_job(Shard),
+ wait_state(JobId, completed),
+ Shards1 = lists:sort(mem3:local_shards(Db)),
+ ?assertEqual(2, length(Shards1)),
+ MRViewGroupInfo = get_group_info(Db, <<"_design/mrview00000">>),
+ ?assertMatch(#{<<"update_seq">> := 32}, MRViewGroupInfo),
+
+ HaveDreyfus = code:lib_dir(dreyfus) /= {error, bad_name},
+ case HaveDreyfus of
+ false ->
+ ok;
+ true ->
+ % 4 because there are 2 indices and 2 target shards
+ ?assertEqual(4, meck:num_calls(dreyfus_index, await, 2))
+ end,
+
+ HaveHastings = code:lib_dir(hastings) /= {error, bad_name},
+ case HaveHastings of
+ false ->
+ ok;
+ true ->
+ % 4 because there are 2 indices and 2 target shards
+ ?assertEqual(4, meck:num_calls(hastings_index, await, 2))
+ end
+ end)}.
mock_dreyfus_indices() ->
meck:expect(dreyfus_index, design_doc_to_indexes, fun(Doc) ->
@@ -275,7 +286,6 @@ mock_dreyfus_indices() ->
meck:expect(dreyfus_index_manager, get_index, fun(_, _) -> {ok, pid} end),
meck:expect(dreyfus_index, await, fun(_, _) -> ok end).
-
mock_hastings_indices() ->
meck:expect(hastings_index, design_doc_to_indexes, fun(Doc) ->
#doc{body = {BodyProps}} = Doc,
@@ -291,465 +301,484 @@ mock_hastings_indices() ->
% Split partitioned database
split_partitioned_db(#{db2 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{
- pdocs => #{
- <<"PX">> => 5,
- <<"PY">> => 5
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ DocSpec = #{
+ pdocs => #{
+ <<"PX">> => 5,
+ <<"PY">> => 5
+ },
+ mrview => 1,
+ local => 1
},
- mrview => 1,
- local => 1
- },
- add_test_docs(Db, DocSpec),
-
- % Save documents before the split
- Docs0 = get_all_docs(Db),
- Local0 = get_local_docs(Db),
-
- % Set some custom metadata properties
- set_revs_limit(Db, 942),
- set_purge_infos_limit(Db, 943),
- SecObj = {[{<<"foo">>, <<"bar">>}]},
- set_security(Db, SecObj),
-
- % DbInfo is saved after setting metadata bits
- % as those could bump the update sequence
- DbInfo0 = get_db_info(Db),
- PX0 = get_partition_info(Db, <<"PX">>),
- PY0 = get_partition_info(Db, <<"PY">>),
-
- % Split the one shard
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- [#shard{range = R1}, #shard{range = R2}] = Shards1,
- ?assertEqual([16#00000000, 16#7fffffff], R1),
- ?assertEqual([16#80000000, 16#ffffffff], R2),
-
- % Check metadata bits after the split
- ?assertEqual(942, get_revs_limit(Db)),
- ?assertEqual(943, get_purge_infos_limit(Db)),
- ?assertEqual(SecObj, get_security(Db)),
-
- DbInfo1 = get_db_info(Db),
- Docs1 = get_all_docs(Db),
- Local1 = get_local_docs(Db),
-
- % When comparing db infos, ignore update sequences they won't be the
- % same since they are more shards involved after the split
- ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
-
- % Update seq prefix number is a sum of all shard update sequences
- #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0),
- #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1),
- ?assertEqual(UpdateSeq0 * 2, UpdateSeq1),
-
- % Finally compare that documents are still there after the split
- ?assertEqual(Docs0, Docs1),
-
- ?assertEqual(PX0, get_partition_info(Db, <<"PX">>)),
- ?assertEqual(PY0, get_partition_info(Db, <<"PY">>)),
-
- % Don't forget about the local but don't include internal checkpoints
- % as some of those are munged and transformed during the split
- ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
- end)}.
-
+ add_test_docs(Db, DocSpec),
+
+ % Save documents before the split
+ Docs0 = get_all_docs(Db),
+ Local0 = get_local_docs(Db),
+
+ % Set some custom metadata properties
+ set_revs_limit(Db, 942),
+ set_purge_infos_limit(Db, 943),
+ SecObj = {[{<<"foo">>, <<"bar">>}]},
+ set_security(Db, SecObj),
+
+ % DbInfo is saved after setting metadata bits
+ % as those could bump the update sequence
+ DbInfo0 = get_db_info(Db),
+ PX0 = get_partition_info(Db, <<"PX">>),
+ PY0 = get_partition_info(Db, <<"PY">>),
+
+ % Split the one shard
+ [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
+ {ok, JobId} = mem3_reshard:start_split_job(Shard),
+ wait_state(JobId, completed),
+
+ % Perform some basic checks that the shard was split
+ Shards1 = lists:sort(mem3:local_shards(Db)),
+ ?assertEqual(2, length(Shards1)),
+ [#shard{range = R1}, #shard{range = R2}] = Shards1,
+ ?assertEqual([16#00000000, 16#7fffffff], R1),
+ ?assertEqual([16#80000000, 16#ffffffff], R2),
+
+ % Check metadata bits after the split
+ ?assertEqual(942, get_revs_limit(Db)),
+ ?assertEqual(943, get_purge_infos_limit(Db)),
+ ?assertEqual(SecObj, get_security(Db)),
+
+ DbInfo1 = get_db_info(Db),
+ Docs1 = get_all_docs(Db),
+ Local1 = get_local_docs(Db),
+
+ % When comparing db infos, ignore update sequences they won't be the
+ % same since they are more shards involved after the split
+ ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
+
+ % Update seq prefix number is a sum of all shard update sequences
+ #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0),
+ #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1),
+ ?assertEqual(UpdateSeq0 * 2, UpdateSeq1),
+
+ % Finally compare that documents are still there after the split
+ ?assertEqual(Docs0, Docs1),
+
+ ?assertEqual(PX0, get_partition_info(Db, <<"PX">>)),
+ ?assertEqual(PY0, get_partition_info(Db, <<"PY">>)),
+
+ % Don't forget about the local but don't include internal checkpoints
+ % as some of those are munged and transformed during the split
+ ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1))
+ end)}.
% Make sure a shard can be split again after it was split once. This checks that
% too many got added to some range, such that on next split they'd fail to fit
% in to any of the new target ranges.
split_twice(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- DocSpec = #{docs => 100, delete => [80, 99], mrview => 2, local => 100},
- add_test_docs(Db, DocSpec),
-
- % Save documents before the split
- Docs0 = get_all_docs(Db),
- Local0 = get_local_docs(Db),
-
- % Set some custom metadata properties
- set_revs_limit(Db, 942),
- set_purge_infos_limit(Db, 943),
- SecObj = {[{<<"foo">>, <<"bar">>}]},
- set_security(Db, SecObj),
-
- % DbInfo is saved after setting metadata bits
- % as those could bump the update sequence
- DbInfo0 = get_db_info(Db),
-
- % Split the one shard
- [#shard{name=Shard1}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId1} = mem3_reshard:start_split_job(Shard1),
- wait_state(JobId1, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- [#shard{range = R1}, #shard{range = R2}] = Shards1,
- ?assertEqual([16#00000000, 16#7fffffff], R1),
- ?assertEqual([16#80000000, 16#ffffffff], R2),
-
- % Check metadata bits after the split
- ?assertEqual(942, get_revs_limit(Db)),
- ?assertEqual(943, get_purge_infos_limit(Db)),
- ?assertEqual(SecObj, get_security(Db)),
-
- DbInfo1 = get_db_info(Db),
- Docs1 = get_all_docs(Db),
- Local1 = get_local_docs(Db),
-
- % When comparing db infos, ignore update sequences they won't be the
- % same since they are more shards involved after the split
- ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
-
- % Update seq prefix number is a sum of all shard update sequences
- #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0),
- #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1),
- ?assertEqual(UpdateSeq0 * 2, UpdateSeq1),
-
- ?assertEqual(Docs0, Docs1),
- ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1)),
-
- % Split the first range again
- [#shard{name=Shard2}, _] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId2} = mem3_reshard:start_split_job(Shard2),
- wait_state(JobId2, completed),
-
- Shards2 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(3, length(Shards2)),
- [R3, R4, R5] = [R || #shard{range = R} <- Shards2],
- ?assertEqual([16#00000000, 16#3fffffff], R3),
- ?assertEqual([16#40000000, 16#7fffffff], R4),
- ?assertEqual([16#80000000, 16#ffffffff], R5),
-
- % Check metadata bits after the second split
- ?assertEqual(942, get_revs_limit(Db)),
- ?assertEqual(943, get_purge_infos_limit(Db)),
- ?assertEqual(SecObj, get_security(Db)),
-
- DbInfo2 = get_db_info(Db),
- Docs2 = get_all_docs(Db),
- Local2 = get_local_docs(Db),
-
- ?assertEqual(without_seqs(DbInfo1), without_seqs(DbInfo2)),
- % Update seq prefix number is a sum of all shard update sequences
- % But only 1 shard out of 2 was split
- #{<<"update_seq">> := UpdateSeq2} = update_seq_to_num(DbInfo2),
- ?assertEqual(trunc(UpdateSeq1 * 1.5), UpdateSeq2),
- ?assertEqual(Docs1, Docs2),
- ?assertEqual(without_meta_locals(Local1), without_meta_locals(Local2))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ DocSpec = #{docs => 100, delete => [80, 99], mrview => 2, local => 100},
+ add_test_docs(Db, DocSpec),
+
+ % Save documents before the split
+ Docs0 = get_all_docs(Db),
+ Local0 = get_local_docs(Db),
+
+ % Set some custom metadata properties
+ set_revs_limit(Db, 942),
+ set_purge_infos_limit(Db, 943),
+ SecObj = {[{<<"foo">>, <<"bar">>}]},
+ set_security(Db, SecObj),
+
+ % DbInfo is saved after setting metadata bits
+ % as those could bump the update sequence
+ DbInfo0 = get_db_info(Db),
+
+ % Split the one shard
+ [#shard{name = Shard1}] = lists:sort(mem3:local_shards(Db)),
+ {ok, JobId1} = mem3_reshard:start_split_job(Shard1),
+ wait_state(JobId1, completed),
+
+ % Perform some basic checks that the shard was split
+ Shards1 = lists:sort(mem3:local_shards(Db)),
+ ?assertEqual(2, length(Shards1)),
+ [#shard{range = R1}, #shard{range = R2}] = Shards1,
+ ?assertEqual([16#00000000, 16#7fffffff], R1),
+ ?assertEqual([16#80000000, 16#ffffffff], R2),
+
+ % Check metadata bits after the split
+ ?assertEqual(942, get_revs_limit(Db)),
+ ?assertEqual(943, get_purge_infos_limit(Db)),
+ ?assertEqual(SecObj, get_security(Db)),
+
+ DbInfo1 = get_db_info(Db),
+ Docs1 = get_all_docs(Db),
+ Local1 = get_local_docs(Db),
+
+ % When comparing db infos, ignore update sequences they won't be the
+ % same since they are more shards involved after the split
+ ?assertEqual(without_seqs(DbInfo0), without_seqs(DbInfo1)),
+
+ % Update seq prefix number is a sum of all shard update sequences
+ #{<<"update_seq">> := UpdateSeq0} = update_seq_to_num(DbInfo0),
+ #{<<"update_seq">> := UpdateSeq1} = update_seq_to_num(DbInfo1),
+ ?assertEqual(UpdateSeq0 * 2, UpdateSeq1),
+
+ ?assertEqual(Docs0, Docs1),
+ ?assertEqual(without_meta_locals(Local0), without_meta_locals(Local1)),
+
+ % Split the first range again
+ [#shard{name = Shard2}, _] = lists:sort(mem3:local_shards(Db)),
+ {ok, JobId2} = mem3_reshard:start_split_job(Shard2),
+ wait_state(JobId2, completed),
+
+ Shards2 = lists:sort(mem3:local_shards(Db)),
+ ?assertEqual(3, length(Shards2)),
+ [R3, R4, R5] = [R || #shard{range = R} <- Shards2],
+ ?assertEqual([16#00000000, 16#3fffffff], R3),
+ ?assertEqual([16#40000000, 16#7fffffff], R4),
+ ?assertEqual([16#80000000, 16#ffffffff], R5),
+
+ % Check metadata bits after the second split
+ ?assertEqual(942, get_revs_limit(Db)),
+ ?assertEqual(943, get_purge_infos_limit(Db)),
+ ?assertEqual(SecObj, get_security(Db)),
+
+ DbInfo2 = get_db_info(Db),
+ Docs2 = get_all_docs(Db),
+ Local2 = get_local_docs(Db),
+
+ ?assertEqual(without_seqs(DbInfo1), without_seqs(DbInfo2)),
+ % Update seq prefix number is a sum of all shard update sequences
+ % But only 1 shard out of 2 was split
+ #{<<"update_seq">> := UpdateSeq2} = update_seq_to_num(DbInfo2),
+ ?assertEqual(trunc(UpdateSeq1 * 1.5), UpdateSeq2),
+ ?assertEqual(Docs1, Docs2),
+ ?assertEqual(without_meta_locals(Local1), without_meta_locals(Local2))
+ end)}.
couch_events_are_emitted(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- couch_event:register_all(self()),
-
- % Split the one shard
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- wait_state(JobId, completed),
-
- % Perform some basic checks that the shard was split
- Shards1 = lists:sort(mem3:local_shards(Db)),
- ?assertEqual(2, length(Shards1)),
- [#shard{range = R1}, #shard{range = R2}] = Shards1,
- ?assertEqual([16#00000000, 16#7fffffff], R1),
- ?assertEqual([16#80000000, 16#ffffffff], R2),
-
- Flush = fun F(Events) ->
- receive
- {'$couch_event', DbName, Event} when Event =:= deleted
- orelse Event =:= updated ->
- case binary:match(DbName, Db) of
- nomatch -> F(Events);
- {_, _} -> F([Event | Events])
- end
- after 0 ->
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ couch_event:register_all(self()),
+
+ % Split the one shard
+ [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
+ {ok, JobId} = mem3_reshard:start_split_job(Shard),
+ wait_state(JobId, completed),
+
+ % Perform some basic checks that the shard was split
+ Shards1 = lists:sort(mem3:local_shards(Db)),
+ ?assertEqual(2, length(Shards1)),
+ [#shard{range = R1}, #shard{range = R2}] = Shards1,
+ ?assertEqual([16#00000000, 16#7fffffff], R1),
+ ?assertEqual([16#80000000, 16#ffffffff], R2),
+
+ Flush = fun F(Events) ->
+ receive
+ {'$couch_event', DbName, Event} when
+ Event =:= deleted orelse
+ Event =:= updated
+ ->
+ case binary:match(DbName, Db) of
+ nomatch -> F(Events);
+ {_, _} -> F([Event | Events])
+ end
+ after 0 ->
lists:reverse(Events)
- end
- end,
- Events = Flush([]),
- StartAtDeleted = lists:dropwhile(fun(E) -> E =/= deleted end, Events),
- ?assertMatch([deleted, deleted, updated, updated | _], StartAtDeleted),
- couch_event:unregister(self())
- end)}.
-
+ end
+ end,
+ Events = Flush([]),
+ StartAtDeleted = lists:dropwhile(fun(E) -> E =/= deleted end, Events),
+ ?assertMatch([deleted, deleted, updated, updated | _], StartAtDeleted),
+ couch_event:unregister(self())
+ end)}.
retries_work(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- meck:expect(couch_db_split, split, fun(_, _, _) ->
- error(kapow)
- end),
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ meck:expect(couch_db_split, split, fun(_, _, _) ->
+ error(kapow)
+ end),
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
-
- wait_state(JobId, failed),
- ?assertEqual(3, meck:num_calls(couch_db_split, split, 3))
- end)}.
+ [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
+ {ok, JobId} = mem3_reshard:start_split_job(Shard),
+ wait_state(JobId, failed),
+ ?assertEqual(3, meck:num_calls(couch_db_split, split, 3))
+ end)}.
target_reset_in_initial_copy(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [#shard{} = Src] = lists:sort(mem3:local_shards(Db)),
- Job = #job{
- source = Src,
- target = [#shard{name= <<"t1">>}, #shard{name = <<"t2">>}],
- job_state = running,
- split_state = initial_copy
- },
- meck:expect(couch_db_split, cleanup_target, 2, ok),
- meck:expect(couch_server, exists, fun
- (<<"t1">>) -> true;
- (<<"t2">>) -> true;
- (DbName) -> meck:passthrough([DbName])
- end),
- JobPid = spawn(fun() -> mem3_reshard_job:initial_copy_impl(Job) end),
- meck:wait(2, couch_db_split, cleanup_target, ['_', '_'], 5000),
- exit(JobPid, kill),
- ?assertEqual(2, meck:num_calls(couch_db_split, cleanup_target, 2))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [#shard{} = Src] = lists:sort(mem3:local_shards(Db)),
+ Job = #job{
+ source = Src,
+ target = [#shard{name = <<"t1">>}, #shard{name = <<"t2">>}],
+ job_state = running,
+ split_state = initial_copy
+ },
+ meck:expect(couch_db_split, cleanup_target, 2, ok),
+ meck:expect(couch_server, exists, fun
+ (<<"t1">>) -> true;
+ (<<"t2">>) -> true;
+ (DbName) -> meck:passthrough([DbName])
+ end),
+ JobPid = spawn(fun() -> mem3_reshard_job:initial_copy_impl(Job) end),
+ meck:wait(2, couch_db_split, cleanup_target, ['_', '_'], 5000),
+ exit(JobPid, kill),
+ ?assertEqual(2, meck:num_calls(couch_db_split, cleanup_target, 2))
+ end)}.
split_an_incomplete_shard_map(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- meck:expect(mem3_util, calculate_max_n, 1, 0),
- ?assertMatch({error, {not_enough_shard_copies, _}},
- mem3_reshard:start_split_job(Shard))
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
+ meck:expect(mem3_util, calculate_max_n, 1, 0),
+ ?assertMatch(
+ {error, {not_enough_shard_copies, _}},
+ mem3_reshard:start_split_job(Shard)
+ )
+ end)}.
% Opening a db target db in initial copy phase will throw an error
target_shards_are_locked(#{db1 := Db}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- add_test_docs(Db, #{docs => 10}),
-
- % Make the job stops right when it was about to copy the docs
- TestPid = self(),
- meck:new(couch_db, [passthrough]),
- meck:expect(couch_db, start_link, fun(Engine, TName, FilePath, Opts) ->
- TestPid ! {start_link, self(), TName},
- receive
- continue ->
- meck:passthrough([Engine, TName, FilePath, Opts])
- end
- end),
-
- [#shard{name=Shard}] = lists:sort(mem3:local_shards(Db)),
- {ok, JobId} = mem3_reshard:start_split_job(Shard),
- {Target0, JobPid} = receive
- {start_link, Pid, TName} -> {TName, Pid}
- end,
- ?assertEqual({error, {locked, <<"shard splitting">>}},
- couch_db:open_int(Target0, [])),
-
- % Send two continues for two targets
- JobPid ! continue,
- JobPid ! continue,
-
- wait_state(JobId, completed)
- end)}.
-
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ add_test_docs(Db, #{docs => 10}),
+
+ % Make the job stops right when it was about to copy the docs
+ TestPid = self(),
+ meck:new(couch_db, [passthrough]),
+ meck:expect(couch_db, start_link, fun(Engine, TName, FilePath, Opts) ->
+ TestPid ! {start_link, self(), TName},
+ receive
+ continue ->
+ meck:passthrough([Engine, TName, FilePath, Opts])
+ end
+ end),
+
+ [#shard{name = Shard}] = lists:sort(mem3:local_shards(Db)),
+ {ok, JobId} = mem3_reshard:start_split_job(Shard),
+ {Target0, JobPid} =
+ receive
+ {start_link, Pid, TName} -> {TName, Pid}
+ end,
+ ?assertEqual(
+ {error, {locked, <<"shard splitting">>}},
+ couch_db:open_int(Target0, [])
+ ),
+
+ % Send two continues for two targets
+ JobPid ! continue,
+ JobPid ! continue,
+
+ wait_state(JobId, completed)
+ end)}.
intercept_state(State) ->
TestPid = self(),
meck:new(mem3_reshard_job, [passthrough]),
meck:expect(mem3_reshard_job, checkpoint_done, fun(Job) ->
- case Job#job.split_state of
- State ->
- TestPid ! {self(), State},
- receive
- continue -> meck:passthrough([Job]);
- cancel -> ok
- end;
- _ ->
- meck:passthrough([Job])
- end
- end).
-
+ case Job#job.split_state of
+ State ->
+ TestPid ! {self(), State},
+ receive
+ continue -> meck:passthrough([Job]);
+ cancel -> ok
+ end;
+ _ ->
+ meck:passthrough([Job])
+ end
+ end).
wait_state(JobId, State) ->
- test_util:wait(fun() ->
+ test_util:wait(
+ fun() ->
case mem3_reshard:job(JobId) of
{ok, {Props}} ->
case couch_util:get_value(job_state, Props) of
- State -> ok;
- _ -> timer:sleep(100), wait
+ State ->
+ ok;
+ _ ->
+ timer:sleep(100),
+ wait
end;
- {error, not_found} -> timer:sleep(100), wait
+ {error, not_found} ->
+ timer:sleep(100),
+ wait
end
- end, 30000).
-
+ end,
+ 30000
+ ).
set_revs_limit(DbName, Limit) ->
with_proc(fun() -> fabric:set_revs_limit(DbName, Limit, [?ADMIN_CTX]) end).
-
get_revs_limit(DbName) ->
with_proc(fun() -> fabric:get_revs_limit(DbName) end).
-
get_purge_infos_limit(DbName) ->
with_proc(fun() -> fabric:get_purge_infos_limit(DbName) end).
-
set_purge_infos_limit(DbName, Limit) ->
with_proc(fun() ->
fabric:set_purge_infos_limit(DbName, Limit, [?ADMIN_CTX])
end).
-
purge_docs(DbName, DocIdRevs) ->
with_proc(fun() ->
fabric:purge_docs(DbName, DocIdRevs, [])
end).
-
compact(DbName) ->
InitFileSize = get_db_file_size(DbName),
ok = with_proc(fun() -> fabric:compact(DbName) end),
- test_util:wait(fun() ->
- case {compact_running(DbName), get_db_file_size(DbName)} of
- {true, _} -> wait;
- {false, FileSize} when FileSize == InitFileSize -> wait;
- {false, FileSize} when FileSize < InitFileSize -> ok
- end
- end, 5000, 200).
-
+ test_util:wait(
+ fun() ->
+ case {compact_running(DbName), get_db_file_size(DbName)} of
+ {true, _} -> wait;
+ {false, FileSize} when FileSize == InitFileSize -> wait;
+ {false, FileSize} when FileSize < InitFileSize -> ok
+ end
+ end,
+ 5000,
+ 200
+ ).
compact_running(DbName) ->
{ok, DbInfo} = with_proc(fun() -> fabric:get_db_info(DbName) end),
#{<<"compact_running">> := CompactRunning} = to_map(DbInfo),
CompactRunning.
-
get_db_file_size(DbName) ->
{ok, DbInfo} = with_proc(fun() -> fabric:get_db_info(DbName) end),
#{<<"sizes">> := #{<<"file">> := FileSize}} = to_map(DbInfo),
FileSize.
-
set_security(DbName, SecObj) ->
with_proc(fun() -> fabric:set_security(DbName, SecObj) end).
-
get_security(DbName) ->
with_proc(fun() -> fabric:get_security(DbName, [?ADMIN_CTX]) end).
-
get_db_info(DbName) ->
with_proc(fun() ->
{ok, Info} = fabric:get_db_info(DbName),
- maps:with([
- <<"db_name">>, <<"doc_count">>, <<"props">>, <<"doc_del_count">>,
- <<"update_seq">>, <<"purge_seq">>, <<"disk_format_version">>
- ], to_map(Info))
+ maps:with(
+ [
+ <<"db_name">>,
+ <<"doc_count">>,
+ <<"props">>,
+ <<"doc_del_count">>,
+ <<"update_seq">>,
+ <<"purge_seq">>,
+ <<"disk_format_version">>
+ ],
+ to_map(Info)
+ )
end).
-
get_group_info(DbName, DesignId) ->
with_proc(fun() ->
{ok, GInfo} = fabric:get_view_group_info(DbName, DesignId),
- maps:with([
- <<"language">>, <<"purge_seq">>, <<"signature">>, <<"update_seq">>
- ], to_map(GInfo))
+ maps:with(
+ [
+ <<"language">>, <<"purge_seq">>, <<"signature">>, <<"update_seq">>
+ ],
+ to_map(GInfo)
+ )
end).
-
get_partition_info(DbName, Partition) ->
with_proc(fun() ->
{ok, PInfo} = fabric:get_partition_info(DbName, Partition),
- maps:with([
- <<"db_name">>, <<"doc_count">>, <<"doc_del_count">>, <<"partition">>
- ], to_map(PInfo))
+ maps:with(
+ [
+ <<"db_name">>, <<"doc_count">>, <<"doc_del_count">>, <<"partition">>
+ ],
+ to_map(PInfo)
+ )
end).
-
get_all_docs(DbName) ->
get_all_docs(DbName, #mrargs{}).
-
get_all_docs(DbName, #mrargs{} = QArgs0) ->
GL = erlang:group_leader(),
- with_proc(fun() ->
- Cb = fun
- ({row, Props}, Acc) ->
- Doc = to_map(couch_util:get_value(doc, Props)),
- #{?ID := Id} = Doc,
- {ok, Acc#{Id => Doc}};
- ({meta, _}, Acc) -> {ok, Acc};
- (complete, Acc) -> {ok, Acc}
+ with_proc(
+ fun() ->
+ Cb = fun
+ ({row, Props}, Acc) ->
+ Doc = to_map(couch_util:get_value(doc, Props)),
+ #{?ID := Id} = Doc,
+ {ok, Acc#{Id => Doc}};
+ ({meta, _}, Acc) ->
+ {ok, Acc};
+ (complete, Acc) ->
+ {ok, Acc}
+ end,
+ QArgs = QArgs0#mrargs{include_docs = true},
+ {ok, Docs} = fabric:all_docs(DbName, Cb, #{}, QArgs),
+ Docs
end,
- QArgs = QArgs0#mrargs{include_docs = true},
- {ok, Docs} = fabric:all_docs(DbName, Cb, #{}, QArgs),
- Docs
- end, GL).
-
+ GL
+ ).
get_local_docs(DbName) ->
LocalNS = {namespace, <<"_local">>},
- maps:map(fun(_, Doc) ->
- maps:without([<<"_rev">>], Doc)
- end, get_all_docs(DbName, #mrargs{extra = [LocalNS]})).
-
+ maps:map(
+ fun(_, Doc) ->
+ maps:without([<<"_rev">>], Doc)
+ end,
+ get_all_docs(DbName, #mrargs{extra = [LocalNS]})
+ ).
without_seqs(#{} = InfoMap) ->
maps:without([<<"update_seq">>, <<"purge_seq">>], InfoMap).
-
without_meta_locals(#{} = Local) ->
- maps:filter(fun
- (<<"_local/purge-mrview-", _/binary>>, _) -> false;
- (<<"_local/shard-sync-", _/binary>>, _) -> false;
- (_, _) -> true
- end, Local).
-
+ maps:filter(
+ fun
+ (<<"_local/purge-mrview-", _/binary>>, _) -> false;
+ (<<"_local/shard-sync-", _/binary>>, _) -> false;
+ (_, _) -> true
+ end,
+ Local
+ ).
update_seq_to_num(#{} = InfoMap) ->
- maps:map(fun
- (<<"update_seq">>, Seq) -> seq_to_num(Seq);
- (<<"purge_seq">>, PSeq) -> seq_to_num(PSeq);
- (_, V) -> V
- end, InfoMap).
-
+ maps:map(
+ fun
+ (<<"update_seq">>, Seq) -> seq_to_num(Seq);
+ (<<"purge_seq">>, PSeq) -> seq_to_num(PSeq);
+ (_, V) -> V
+ end,
+ InfoMap
+ ).
seq_to_num(Seq) ->
[SeqNum, _] = binary:split(Seq, <<"-">>),
binary_to_integer(SeqNum).
-
to_map([_ | _] = Props) ->
to_map({Props});
-
to_map({[_ | _]} = EJson) ->
- jiffy:decode(jiffy:encode(EJson), [return_maps]).
-
+ jiffy:decode(jiffy:encode(EJson), [return_maps]).
create_db(DbName, Opts) ->
GL = erlang:group_leader(),
with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL).
-
delete_db(DbName) ->
GL = erlang:group_leader(),
with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL).
-
with_proc(Fun) ->
with_proc(Fun, undefined, 30000).
-
with_proc(Fun, GroupLeader) ->
with_proc(Fun, GroupLeader, 30000).
-
with_proc(Fun, GroupLeader, Timeout) ->
{Pid, Ref} = spawn_monitor(fun() ->
case GroupLeader of
@@ -767,27 +796,29 @@ with_proc(Fun, GroupLeader, Timeout) ->
erlang:demonitor(Ref, [flush]),
exit(Pid, kill),
error({with_proc_timeout, Fun, Timeout})
- end.
-
+ end.
add_test_docs(DbName, #{} = DocSpec) ->
- Docs = docs(maps:get(docs, DocSpec, []))
- ++ pdocs(maps:get(pdocs, DocSpec, #{}))
- ++ ddocs(mrview, maps:get(mrview, DocSpec, []))
- ++ ddocs(search, maps:get(search, DocSpec, []))
- ++ ddocs(geo, maps:get(geo, DocSpec, []))
- ++ ldocs(maps:get(local, DocSpec, [])),
+ Docs =
+ docs(maps:get(docs, DocSpec, [])) ++
+ pdocs(maps:get(pdocs, DocSpec, #{})) ++
+ ddocs(mrview, maps:get(mrview, DocSpec, [])) ++
+ ddocs(search, maps:get(search, DocSpec, [])) ++
+ ddocs(geo, maps:get(geo, DocSpec, [])) ++
+ ldocs(maps:get(local, DocSpec, [])),
Res = update_docs(DbName, Docs),
- Docs1 = lists:map(fun({Doc, {ok, {RevPos, Rev}}}) ->
- Doc#doc{revs = {RevPos, [Rev]}}
- end, lists:zip(Docs, Res)),
+ Docs1 = lists:map(
+ fun({Doc, {ok, {RevPos, Rev}}}) ->
+ Doc#doc{revs = {RevPos, [Rev]}}
+ end,
+ lists:zip(Docs, Res)
+ ),
case delete_docs(maps:get(delete, DocSpec, []), Docs1) of
[] -> ok;
[_ | _] = Deleted -> update_docs(DbName, Deleted)
end,
ok.
-
update_docs(DbName, Docs) ->
with_proc(fun() ->
case fabric:update_docs(DbName, Docs, [?ADMIN_CTX]) of
@@ -796,29 +827,32 @@ update_docs(DbName, Docs) ->
end
end).
-
delete_docs([S, E], Docs) when E >= S ->
ToDelete = [doc_id(<<"">>, I) || I <- lists:seq(S, E)],
- lists:filtermap(fun(#doc{id = Id} = Doc) ->
- case lists:member(Id, ToDelete) of
- true -> {true, Doc#doc{deleted = true}};
- false -> false
- end
- end, Docs);
+ lists:filtermap(
+ fun(#doc{id = Id} = Doc) ->
+ case lists:member(Id, ToDelete) of
+ true -> {true, Doc#doc{deleted = true}};
+ false -> false
+ end
+ end,
+ Docs
+ );
delete_docs(_, _) ->
[].
-
pdocs(#{} = PMap) ->
- maps:fold(fun(Part, DocSpec, DocsAcc) ->
- docs(DocSpec, <<Part/binary, ":">>) ++ DocsAcc
- end, [], PMap).
-
+ maps:fold(
+ fun(Part, DocSpec, DocsAcc) ->
+ docs(DocSpec, <<Part/binary, ":">>) ++ DocsAcc
+ end,
+ [],
+ PMap
+ ).
docs(DocSpec) ->
docs(DocSpec, <<"">>).
-
docs(N, Prefix) when is_integer(N), N > 0 ->
docs([0, N - 1], Prefix);
docs([S, E], Prefix) when E >= S ->
@@ -835,7 +869,6 @@ ddocs(Type, [S, E]) when E >= S ->
ddocs(_, _) ->
[].
-
ldocs(N) when is_integer(N), N > 0 ->
ldocs([0, N - 1]);
ldocs([S, E]) when E >= S ->
@@ -843,13 +876,10 @@ ldocs([S, E]) when E >= S ->
ldocs(_) ->
[].
-
-
doc(Pref, Id) ->
Body = bodyprops(),
doc(Pref, Id, Body, 42).
-
doc(Pref, Id, BodyProps, AttSize) ->
#doc{
id = doc_id(Pref, Id),
@@ -857,58 +887,60 @@ doc(Pref, Id, BodyProps, AttSize) ->
atts = atts(AttSize)
}.
-
doc_id(Pref, Id) ->
IdBin = iolist_to_binary(io_lib:format("~5..0B", [Id])),
<<Pref/binary, IdBin/binary>>.
-
ddprop(mrview) ->
[
- {<<"views">>, {[
- {<<"v1">>, {[
- {<<"map">>, <<"function(d){emit(d);}">>}
- ]}}
- ]}}
+ {<<"views">>,
+ {[
+ {<<"v1">>,
+ {[
+ {<<"map">>, <<"function(d){emit(d);}">>}
+ ]}}
+ ]}}
];
-
ddprop(geo) ->
[
- {<<"st_indexes">>, {[
- {<<"area">>, {[
- {<<"analyzer">>, <<"standard">>},
- {<<"index">>, <<"function(d){if(d.g){st_index(d.g)}}">> }
+ {<<"st_indexes">>,
+ {[
+ {<<"area">>,
+ {[
+ {<<"analyzer">>, <<"standard">>},
+ {<<"index">>, <<"function(d){if(d.g){st_index(d.g)}}">>}
+ ]}}
]}}
- ]}}
];
-
ddprop(search) ->
[
- {<<"indexes">>, {[
- {<<"types">>, {[
- {<<"index">>, <<"function(d){if(d.g){st_index(d.g.type)}}">>}
+ {<<"indexes">>,
+ {[
+ {<<"types">>,
+ {[
+ {<<"index">>, <<"function(d){if(d.g){st_index(d.g.type)}}">>}
+ ]}}
]}}
- ]}}
- ].
-
+ ].
bodyprops() ->
[
- {<<"g">>, {[
- {<<"type">>, <<"Polygon">>},
- {<<"coordinates">>, [[[-71.0, 48.4], [-70.0, 48.4], [-71.0, 48.4]]]}
- ]}}
- ].
-
+ {<<"g">>,
+ {[
+ {<<"type">>, <<"Polygon">>},
+ {<<"coordinates">>, [[[-71.0, 48.4], [-70.0, 48.4], [-71.0, 48.4]]]}
+ ]}}
+ ].
atts(0) ->
[];
-
atts(Size) when is_integer(Size), Size >= 1 ->
- Data = << <<"x">> || _ <- lists:seq(1, Size) >>,
- [couch_att:new([
- {name, <<"att">>},
- {type, <<"app/binary">>},
- {att_len, Size},
- {data, Data}
- ])].
+ Data = <<<<"x">> || _ <- lists:seq(1, Size)>>,
+ [
+ couch_att:new([
+ {name, <<"att">>},
+ {type, <<"app/binary">>},
+ {att_len, Size},
+ {data, Data}
+ ])
+ ].
diff --git a/src/mem3/test/eunit/mem3_ring_prop_tests.erl b/src/mem3/test/eunit/mem3_ring_prop_tests.erl
index 51d8f10bf..3e2310b21 100644
--- a/src/mem3/test/eunit/mem3_ring_prop_tests.erl
+++ b/src/mem3/test/eunit/mem3_ring_prop_tests.erl
@@ -12,30 +12,32 @@
-module(mem3_ring_prop_tests).
-
-ifdef(WITH_PROPER).
-include_lib("couch/include/couch_eunit_proper.hrl").
-
property_test_() ->
?EUNIT_QUICKCHECK(60).
-
% Properties
prop_get_ring_with_connected_intervals() ->
- ?FORALL({Start, End}, oneof(ranges()),
- ?FORALL(Intervals, g_connected_intervals(Start, End),
+ ?FORALL(
+ {Start, End},
+ oneof(ranges()),
+ ?FORALL(
+ Intervals,
+ g_connected_intervals(Start, End),
mem3_util:get_ring(Intervals, Start, End) =:= lists:sort(Intervals)
)
).
-
prop_get_ring_connected_plus_random_intervals() ->
- ?FORALL({Intervals, Extra}, {g_connected_intervals(1, 100),
- g_random_intervals(1, 100)},
- ?IMPLIES(sets:is_disjoint(endpoints(Intervals), endpoints(Extra)),
+ ?FORALL(
+ {Intervals, Extra},
+ {g_connected_intervals(1, 100), g_random_intervals(1, 100)},
+ ?IMPLIES(
+ sets:is_disjoint(endpoints(Intervals), endpoints(Extra)),
begin
AllInts = Intervals ++ Extra,
Ring = mem3_util:get_ring(AllInts, 1, 100),
@@ -44,10 +46,13 @@ prop_get_ring_connected_plus_random_intervals() ->
)
).
-
prop_get_ring_connected_with_sub_intervals() ->
- ?FORALL(Intervals, g_connected_intervals(1, 100),
- ?FORALL(SubIntervals, g_subintervals(Intervals),
+ ?FORALL(
+ Intervals,
+ g_connected_intervals(1, 100),
+ ?FORALL(
+ SubIntervals,
+ g_subintervals(Intervals),
begin
AllInts = Intervals ++ SubIntervals,
Ring = mem3_util:get_ring(AllInts, 1, 100),
@@ -56,94 +61,98 @@ prop_get_ring_connected_with_sub_intervals() ->
)
).
-
prop_get_ring_with_disconnected_intervals() ->
- ?FORALL({Start, End}, oneof(ranges()),
- ?FORALL(Intervals, g_disconnected_intervals(Start, End),
+ ?FORALL(
+ {Start, End},
+ oneof(ranges()),
+ ?FORALL(
+ Intervals,
+ g_disconnected_intervals(Start, End),
mem3_util:get_ring(Intervals, Start, End) =:= []
)
).
-
% Generators
ranges() ->
[{1, 10}, {0, 2 bsl 31 - 1}, {2 bsl 31 - 10, 2 bsl 31 - 1}].
-
g_connected_intervals(Begin, End) ->
?SIZED(Size, g_connected_intervals(Begin, End, 5 * Size)).
-
g_connected_intervals(Begin, End, Split) when Begin =< End ->
- ?LET(N, choose(0, Split),
- begin
- if
- N == 0 ->
- [{Begin, End}];
- N > 0 ->
- Ns = lists:seq(1, N - 1),
- Bs = lists:usort([rand_range(Begin, End) || _ <- Ns]),
- Es = [B - 1 || B <- Bs],
- shuffle(lists:zip([Begin] ++ Bs, Es ++ [End]))
+ ?LET(
+ N,
+ choose(0, Split),
+ begin
+ if
+ N == 0 ->
+ [{Begin, End}];
+ N > 0 ->
+ Ns = lists:seq(1, N - 1),
+ Bs = lists:usort([rand_range(Begin, End) || _ <- Ns]),
+ Es = [B - 1 || B <- Bs],
+ shuffle(lists:zip([Begin] ++ Bs, Es ++ [End]))
+ end
end
- end).
-
+ ).
g_non_trivial_connected_intervals(Begin, End, Split) ->
- ?SUCHTHAT(Connected, g_connected_intervals(Begin, End, Split),
- length(Connected) > 1).
-
+ ?SUCHTHAT(
+ Connected,
+ g_connected_intervals(Begin, End, Split),
+ length(Connected) > 1
+ ).
g_disconnected_intervals(Begin, End) ->
?SIZED(Size, g_disconnected_intervals(Begin, End, Size)).
-
g_disconnected_intervals(Begin, End, Split) when Begin =< End ->
- ?LET(Connected, g_non_trivial_connected_intervals(Begin, End, Split),
- begin
- I = rand:uniform(length(Connected)) - 1,
- {Before, [_ | After]} = lists:split(I, Connected),
- Before ++ After
- end).
-
+ ?LET(
+ Connected,
+ g_non_trivial_connected_intervals(Begin, End, Split),
+ begin
+ I = rand:uniform(length(Connected)) - 1,
+ {Before, [_ | After]} = lists:split(I, Connected),
+ Before ++ After
+ end
+ ).
g_subintervals(Intervals) ->
lists:foldl(fun(R, Acc) -> split_interval(R) ++ Acc end, [], Intervals).
-
split_interval({B, E}) when E - B >= 2 ->
- E1 = rand_range(B, E) - 1,
- B1 = E1 + 1,
- [{B, E1}, {B1, E}];
-
+ E1 = rand_range(B, E) - 1,
+ B1 = E1 + 1,
+ [{B, E1}, {B1, E}];
split_interval(_Range) ->
[].
-
g_random_intervals(Start, End) ->
- ?LET(N, choose(1, 10),
- begin
- [begin
- B = rand_range(Start, End),
- E = rand_range(B, End),
- {B, E}
- end || _ <- lists:seq(1, N)]
- end).
-
+ ?LET(
+ N,
+ choose(1, 10),
+ begin
+ [
+ begin
+ B = rand_range(Start, End),
+ E = rand_range(B, End),
+ {B, E}
+ end
+ || _ <- lists:seq(1, N)
+ ]
+ end
+ ).
rand_range(B, B) ->
B;
-
rand_range(B, E) ->
B + rand:uniform(E - B).
-
shuffle(L) ->
Tagged = [{rand:uniform(), X} || X <- L],
[X || {_, X} <- lists:sort(Tagged)].
-
endpoints(Ranges) ->
{Begins, Ends} = lists:unzip(Ranges),
sets:from_list(Begins ++ Ends).
diff --git a/src/mem3/test/eunit/mem3_seeds_test.erl b/src/mem3/test/eunit/mem3_seeds_test.erl
index 1c1e2fed6..2c9c1d383 100644
--- a/src/mem3/test/eunit/mem3_seeds_test.erl
+++ b/src/mem3/test/eunit/mem3_seeds_test.erl
@@ -61,8 +61,10 @@ check_nodelist() ->
check_local_dbs() ->
LocalDbs = mem3_sync:local_dbs(),
{ok, _} = couch_server:create(<<"_users">>, []),
- ?assertEqual(lists:append(LocalDbs, [<<"_users">>]),
- mem3_sync:local_dbs()).
+ ?assertEqual(
+ lists:append(LocalDbs, [<<"_users">>]),
+ mem3_sync:local_dbs()
+ ).
cleanup() ->
application:stop(mem3),
diff --git a/src/mem3/test/eunit/mem3_shards_test.erl b/src/mem3/test/eunit/mem3_shards_test.erl
index 9c9bbb402..6d2766fa2 100644
--- a/src/mem3/test/eunit/mem3_shards_test.erl
+++ b/src/mem3/test/eunit/mem3_shards_test.erl
@@ -12,11 +12,11 @@
-module(mem3_shards_test).
-
-include_lib("couch/include/couch_eunit.hrl").
-include_lib("couch/include/couch_db.hrl").
-include_lib("mem3/src/mem3_reshard.hrl").
--include_lib("couch_mrview/include/couch_mrview.hrl"). % for all_docs function
+% for all_docs function
+-include_lib("couch_mrview/include/couch_mrview.hrl").
-define(ID, <<"_id">>).
-define(TIMEOUT, 60).
@@ -28,28 +28,26 @@ setup() ->
{ok, DbDoc} = mem3_util:open_db_doc(DbName),
#{dbname => DbName, dbdoc => DbDoc}.
-
teardown(#{dbname := DbName}) ->
delete_db(DbName).
-
start_couch() ->
test_util:start_couch(?CONFIG_CHAIN, [mem3, fabric]).
-
stop_couch(Ctx) ->
test_util:stop_couch(Ctx).
-
mem3_shards_db_create_props_test_() ->
{
"mem3 shards partition query database properties tests",
{
setup,
- fun start_couch/0, fun stop_couch/1,
+ fun start_couch/0,
+ fun stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun partitioned_shards_recreated_properly/1
]
@@ -57,57 +55,61 @@ mem3_shards_db_create_props_test_() ->
}
}.
-
% This asserts that when the mem3_shards's changes listener on the shards db
% encounters a db doc update for a db that has a missing shard on the local
% instance, the shard creation logic will properly propagate the db's config
% properties.
% SEE: apache/couchdb#3631
partitioned_shards_recreated_properly(#{dbname := DbName, dbdoc := DbDoc}) ->
- {timeout, ?TIMEOUT, ?_test(begin
- #doc{body = {Body0}} = DbDoc,
- Body1 = [{<<"foo">>, <<"bar">>} | Body0],
- Shards = [Shard|_] = lists:sort(mem3:shards(DbName)),
- ShardName = Shard#shard.name,
- ?assert(is_partitioned(Shards)),
- ok = with_proc(fun() -> couch_server:delete(ShardName, []) end),
- ?assertThrow({not_found, no_db_file}, is_partitioned(Shard)),
- ok = mem3_util:update_db_doc(DbDoc#doc{body = {Body1}}),
- Shards = [Shard|_] = test_util:wait_value(fun() ->
- lists:sort(mem3:shards(DbName))
- end, Shards),
- ?assertEqual(true, test_util:wait_value(fun() ->
- catch is_partitioned(Shard)
- end, true))
- end)}.
-
-
-is_partitioned([#shard{}|_]=Shards) ->
+ {timeout, ?TIMEOUT,
+ ?_test(begin
+ #doc{body = {Body0}} = DbDoc,
+ Body1 = [{<<"foo">>, <<"bar">>} | Body0],
+ Shards = [Shard | _] = lists:sort(mem3:shards(DbName)),
+ ShardName = Shard#shard.name,
+ ?assert(is_partitioned(Shards)),
+ ok = with_proc(fun() -> couch_server:delete(ShardName, []) end),
+ ?assertThrow({not_found, no_db_file}, is_partitioned(Shard)),
+ ok = mem3_util:update_db_doc(DbDoc#doc{body = {Body1}}),
+ Shards =
+ [Shard | _] = test_util:wait_value(
+ fun() ->
+ lists:sort(mem3:shards(DbName))
+ end,
+ Shards
+ ),
+ ?assertEqual(
+ true,
+ test_util:wait_value(
+ fun() ->
+ catch is_partitioned(Shard)
+ end,
+ true
+ )
+ )
+ end)}.
+
+is_partitioned([#shard{} | _] = Shards) ->
lists:all(fun is_partitioned/1, Shards);
-is_partitioned(#shard{name=Name}) ->
+is_partitioned(#shard{name = Name}) ->
couch_util:with_db(Name, fun couch_db:is_partitioned/1);
is_partitioned(Db) ->
couch_db:is_partitioned(Db).
-
create_db(DbName, Opts) ->
GL = erlang:group_leader(),
with_proc(fun() -> fabric:create_db(DbName, Opts) end, GL).
-
delete_db(DbName) ->
GL = erlang:group_leader(),
with_proc(fun() -> fabric:delete_db(DbName, [?ADMIN_CTX]) end, GL).
-
with_proc(Fun) ->
with_proc(Fun, undefined, 30000).
-
with_proc(Fun, GroupLeader) ->
with_proc(Fun, GroupLeader, 30000).
-
with_proc(Fun, GroupLeader, Timeout) ->
{Pid, Ref} = spawn_monitor(fun() ->
case GroupLeader of
@@ -125,5 +127,4 @@ with_proc(Fun, GroupLeader, Timeout) ->
erlang:demonitor(Ref, [flush]),
exit(Pid, kill),
error({with_proc_timeout, Fun, Timeout})
- end.
-
+ end.
diff --git a/src/mem3/test/eunit/mem3_sync_security_test.erl b/src/mem3/test/eunit/mem3_sync_security_test.erl
index e67a72017..7f4b7b699 100644
--- a/src/mem3/test/eunit/mem3_sync_security_test.erl
+++ b/src/mem3/test/eunit/mem3_sync_security_test.erl
@@ -17,17 +17,20 @@
-include("mem3.hrl").
-include_lib("eunit/include/eunit.hrl").
--define(TIMEOUT, 5). % seconds
+% seconds
+-define(TIMEOUT, 5).
go_test_() ->
{
"security property sync test",
{
setup,
- fun start_couch/0, fun stop_couch/1,
+ fun start_couch/0,
+ fun stop_couch/1,
{
foreach,
- fun setup/0, fun teardown/1,
+ fun setup/0,
+ fun teardown/1,
[
fun sync_security_ok/1
]
diff --git a/src/mem3/test/eunit/mem3_util_test.erl b/src/mem3/test/eunit/mem3_util_test.erl
index a8103bfd0..9cab89f67 100644
--- a/src/mem3/test/eunit/mem3_util_test.erl
+++ b/src/mem3/test/eunit/mem3_util_test.erl
@@ -19,19 +19,20 @@ name_shard_test() ->
Shard1 = #shard{},
?assertError(function_clause, mem3_util:name_shard(Shard1, ".1234")),
- Shard2 = #shard{dbname = <<"testdb">>, range = [0,100]},
- #shard{name=Name2} = mem3_util:name_shard(Shard2, ".1234"),
+ Shard2 = #shard{dbname = <<"testdb">>, range = [0, 100]},
+ #shard{name = Name2} = mem3_util:name_shard(Shard2, ".1234"),
?assertEqual(<<"shards/00000000-00000064/testdb.1234">>, Name2),
ok.
create_partition_map_test() ->
- {DbName1, N1, Q1, Nodes1} = {<<"testdb1">>, 3, 4, [a,b,c,d]},
+ {DbName1, N1, Q1, Nodes1} = {<<"testdb1">>, 3, 4, [a, b, c, d]},
Map1 = mem3_util:create_partition_map(DbName1, N1, Q1, Nodes1),
?assertEqual(12, length(Map1)),
- {DbName2, N2, Q2, Nodes2} = {<<"testdb2">>, 1, 1, [a,b,c,d]},
- [#shard{name=Name2,node=Node2}] = Map2 =
+ {DbName2, N2, Q2, Nodes2} = {<<"testdb2">>, 1, 1, [a, b, c, d]},
+ [#shard{name = Name2, node = Node2}] =
+ Map2 =
mem3_util:create_partition_map(DbName2, N2, Q2, Nodes2, ".1234"),
?assertEqual(1, length(Map2)),
?assertEqual(<<"shards/00000000-ffffffff/testdb2.1234">>, Name2),
@@ -40,76 +41,97 @@ create_partition_map_test() ->
build_shards_test() ->
DocProps1 =
- [{<<"changelog">>,
- [[<<"add">>,<<"00000000-1fffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"20000000-3fffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"40000000-5fffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"60000000-7fffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"80000000-9fffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"a0000000-bfffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"c0000000-dfffffff">>,
- <<"bigcouch@node.local">>],
- [<<"add">>,<<"e0000000-ffffffff">>,
- <<"bigcouch@node.local">>]]},
- {<<"by_node">>,
- {[{<<"bigcouch@node.local">>,
- [<<"00000000-1fffffff">>,<<"20000000-3fffffff">>,
- <<"40000000-5fffffff">>,<<"60000000-7fffffff">>,
- <<"80000000-9fffffff">>,<<"a0000000-bfffffff">>,
- <<"c0000000-dfffffff">>,<<"e0000000-ffffffff">>]}]}},
- {<<"by_range">>,
- {[{<<"00000000-1fffffff">>,[<<"bigcouch@node.local">>]},
- {<<"20000000-3fffffff">>,[<<"bigcouch@node.local">>]},
- {<<"40000000-5fffffff">>,[<<"bigcouch@node.local">>]},
- {<<"60000000-7fffffff">>,[<<"bigcouch@node.local">>]},
- {<<"80000000-9fffffff">>,[<<"bigcouch@node.local">>]},
- {<<"a0000000-bfffffff">>,[<<"bigcouch@node.local">>]},
- {<<"c0000000-dfffffff">>,[<<"bigcouch@node.local">>]},
- {<<"e0000000-ffffffff">>,[<<"bigcouch@node.local">>]}]}}],
+ [
+ {<<"changelog">>, [
+ [
+ <<"add">>,
+ <<"00000000-1fffffff">>,
+ <<"bigcouch@node.local">>
+ ],
+ [
+ <<"add">>,
+ <<"20000000-3fffffff">>,
+ <<"bigcouch@node.local">>
+ ],
+ [
+ <<"add">>,
+ <<"40000000-5fffffff">>,
+ <<"bigcouch@node.local">>
+ ],
+ [
+ <<"add">>,
+ <<"60000000-7fffffff">>,
+ <<"bigcouch@node.local">>
+ ],
+ [
+ <<"add">>,
+ <<"80000000-9fffffff">>,
+ <<"bigcouch@node.local">>
+ ],
+ [
+ <<"add">>,
+ <<"a0000000-bfffffff">>,
+ <<"bigcouch@node.local">>
+ ],
+ [
+ <<"add">>,
+ <<"c0000000-dfffffff">>,
+ <<"bigcouch@node.local">>
+ ],
+ [
+ <<"add">>,
+ <<"e0000000-ffffffff">>,
+ <<"bigcouch@node.local">>
+ ]
+ ]},
+ {<<"by_node">>,
+ {[
+ {<<"bigcouch@node.local">>, [
+ <<"00000000-1fffffff">>,
+ <<"20000000-3fffffff">>,
+ <<"40000000-5fffffff">>,
+ <<"60000000-7fffffff">>,
+ <<"80000000-9fffffff">>,
+ <<"a0000000-bfffffff">>,
+ <<"c0000000-dfffffff">>,
+ <<"e0000000-ffffffff">>
+ ]}
+ ]}},
+ {<<"by_range">>,
+ {[
+ {<<"00000000-1fffffff">>, [<<"bigcouch@node.local">>]},
+ {<<"20000000-3fffffff">>, [<<"bigcouch@node.local">>]},
+ {<<"40000000-5fffffff">>, [<<"bigcouch@node.local">>]},
+ {<<"60000000-7fffffff">>, [<<"bigcouch@node.local">>]},
+ {<<"80000000-9fffffff">>, [<<"bigcouch@node.local">>]},
+ {<<"a0000000-bfffffff">>, [<<"bigcouch@node.local">>]},
+ {<<"c0000000-dfffffff">>, [<<"bigcouch@node.local">>]},
+ {<<"e0000000-ffffffff">>, [<<"bigcouch@node.local">>]}
+ ]}}
+ ],
Shards1 = mem3_util:build_shards(<<"testdb1">>, DocProps1),
ExpectedShards1 =
- [{shard,<<"shards/00000000-1fffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [0,536870911],
- undefined,[]},
- {shard,<<"shards/20000000-3fffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [536870912,1073741823],
- undefined,[]},
- {shard,<<"shards/40000000-5fffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [1073741824,1610612735],
- undefined,[]},
- {shard,<<"shards/60000000-7fffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [1610612736,2147483647],
- undefined,[]},
- {shard,<<"shards/80000000-9fffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [2147483648,2684354559],
- undefined,[]},
- {shard,<<"shards/a0000000-bfffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [2684354560,3221225471],
- undefined,[]},
- {shard,<<"shards/c0000000-dfffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [3221225472,3758096383],
- undefined,[]},
- {shard,<<"shards/e0000000-ffffffff/testdb1">>,
- 'bigcouch@node.local',<<"testdb1">>,
- [3758096384,4294967295],
- undefined,[]}],
+ [
+ {shard, <<"shards/00000000-1fffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
+ [0, 536870911], undefined, []},
+ {shard, <<"shards/20000000-3fffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
+ [536870912, 1073741823], undefined, []},
+ {shard, <<"shards/40000000-5fffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
+ [1073741824, 1610612735], undefined, []},
+ {shard, <<"shards/60000000-7fffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
+ [1610612736, 2147483647], undefined, []},
+ {shard, <<"shards/80000000-9fffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
+ [2147483648, 2684354559], undefined, []},
+ {shard, <<"shards/a0000000-bfffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
+ [2684354560, 3221225471], undefined, []},
+ {shard, <<"shards/c0000000-dfffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
+ [3221225472, 3758096383], undefined, []},
+ {shard, <<"shards/e0000000-ffffffff/testdb1">>, 'bigcouch@node.local', <<"testdb1">>,
+ [3758096384, 4294967295], undefined, []}
+ ],
?assertEqual(ExpectedShards1, Shards1),
ok.
-
%% n_val tests
nval_test_() ->
diff --git a/src/rexi/src/rexi.erl b/src/rexi/src/rexi.erl
index 170503b7c..fb1763e51 100644
--- a/src/rexi/src/rexi.erl
+++ b/src/rexi/src/rexi.erl
@@ -30,8 +30,8 @@ stop() ->
application:stop(rexi).
restart() ->
- stop(), start().
-
+ stop(),
+ start().
%% @equiv cast(Node, self(), MFA)
-spec cast(node(), {atom(), atom(), list()}) -> reference().
@@ -86,22 +86,34 @@ kill_all(NodeRefs) when is_list(NodeRefs) ->
%% configure the cluster to send kill_all messages.
case config:get_boolean("rexi", "use_kill_all", false) of
true ->
- PerNodeMap = lists:foldl(fun({Node, Ref}, Acc) ->
- maps:update_with(Node, fun(Refs) ->
- [Ref | Refs]
- end, [Ref], Acc)
- end, #{}, NodeRefs),
- maps:map(fun(Node, Refs) ->
- ServerPid = rexi_utils:server_pid(Node),
- rexi_utils:send(ServerPid, cast_msg({kill_all, Refs}))
- end, PerNodeMap);
+ PerNodeMap = lists:foldl(
+ fun({Node, Ref}, Acc) ->
+ maps:update_with(
+ Node,
+ fun(Refs) ->
+ [Ref | Refs]
+ end,
+ [Ref],
+ Acc
+ )
+ end,
+ #{},
+ NodeRefs
+ ),
+ maps:map(
+ fun(Node, Refs) ->
+ ServerPid = rexi_utils:server_pid(Node),
+ rexi_utils:send(ServerPid, cast_msg({kill_all, Refs}))
+ end,
+ PerNodeMap
+ );
false ->
lists:foreach(fun({Node, Ref}) -> kill(Node, Ref) end, NodeRefs)
end,
ok.
%% @equiv async_server_call(Server, self(), Request)
--spec async_server_call(pid() | {atom(),node()}, any()) -> reference().
+-spec async_server_call(pid() | {atom(), node()}, any()) -> reference().
async_server_call(Server, Request) ->
async_server_call(Server, self(), Request).
@@ -110,17 +122,17 @@ async_server_call(Server, Request) ->
%% function acts more like cast() than call() in that the server process
%% is not monitored. Clients who want to know if the server is alive should
%% monitor it themselves before calling this function.
--spec async_server_call(pid() | {atom(),node()}, pid(), any()) -> reference().
+-spec async_server_call(pid() | {atom(), node()}, pid(), any()) -> reference().
async_server_call(Server, Caller, Request) ->
Ref = make_ref(),
- rexi_utils:send(Server, {'$gen_call', {Caller,Ref}, Request}),
+ rexi_utils:send(Server, {'$gen_call', {Caller, Ref}, Request}),
Ref.
%% @doc convenience function to reply to the original rexi Caller.
-spec reply(any()) -> any().
reply(Reply) ->
{Caller, Ref} = get(rexi_from),
- erlang:send(Caller, {Ref,Reply}).
+ erlang:send(Caller, {Ref, Reply}).
%% @equiv sync_reply(Reply, 300000)
sync_reply(Reply) ->
@@ -133,9 +145,10 @@ sync_reply(Reply) ->
sync_reply(Reply, Timeout) ->
{Caller, Ref} = get(rexi_from),
Tag = make_ref(),
- erlang:send(Caller, {Ref, {self(),Tag}, Reply}),
- receive {Tag, Response} ->
- Response
+ erlang:send(Caller, {Ref, {self(), Tag}, Reply}),
+ receive
+ {Tag, Response} ->
+ Response
after Timeout ->
timeout
end.
@@ -174,7 +187,7 @@ stream_init(Timeout) ->
%% sending messages. The `From` should be the value provided by
%% the worker in the rexi_STREAM_INIT message.
-spec stream_start({pid(), any()}) -> ok.
-stream_start({Pid, _Tag}=From) when is_pid(Pid) ->
+stream_start({Pid, _Tag} = From) when is_pid(Pid) ->
gen_server:reply(From, rexi_STREAM_START).
%% @doc Cancel a worker stream
@@ -184,7 +197,7 @@ stream_start({Pid, _Tag}=From) when is_pid(Pid) ->
%% The `From` should be the value provided by the worker in the
%% rexi_STREAM_INIT message.
-spec stream_cancel({pid(), any()}) -> ok.
-stream_cancel({Pid, _Tag}=From) when is_pid(Pid) ->
+stream_cancel({Pid, _Tag} = From) when is_pid(Pid) ->
gen_server:reply(From, rexi_STREAM_CANCEL).
%% @equiv stream(Msg, 100, 300000)
@@ -202,13 +215,14 @@ stream(Msg, Limit) ->
stream(Msg, Limit, Timeout) ->
try maybe_wait(Limit, Timeout) of
{ok, Count} ->
- put(rexi_unacked, Count+1),
+ put(rexi_unacked, Count + 1),
{Caller, Ref} = get(rexi_from),
erlang:send(Caller, {Ref, self(), Msg}),
ok
- catch throw:timeout ->
- couch_stats:increment_counter([rexi, streams, timeout, stream]),
- exit(timeout)
+ catch
+ throw:timeout ->
+ couch_stats:increment_counter([rexi, streams, timeout, stream]),
+ exit(timeout)
end.
%% @equiv stream2(Msg, 5, 300000)
@@ -230,13 +244,14 @@ stream2(Msg, Limit, Timeout) ->
maybe_init_stream(Timeout),
try maybe_wait(Limit, Timeout) of
{ok, Count} ->
- put(rexi_unacked, Count+1),
+ put(rexi_unacked, Count + 1),
{Caller, Ref} = get(rexi_from),
erlang:send(Caller, {Ref, self(), Msg}),
ok
- catch throw:timeout ->
- couch_stats:increment_counter([rexi, streams, timeout, stream]),
- exit(timeout)
+ catch
+ throw:timeout ->
+ couch_stats:increment_counter([rexi, streams, timeout, stream]),
+ exit(timeout)
end.
%% @equiv stream_last(Msg, 300000)
@@ -259,14 +274,12 @@ stream_ack(Client) ->
stream_ack(Client, N) ->
erlang:send(Client, {rexi_ack, N}).
-
%% Sends a ping message to the coordinator. This is for long running
%% operations on a node that could exceed the rexi timeout
-ping() ->
+ping() ->
{Caller, _} = get(rexi_from),
erlang:send(Caller, {rexi, '$rexi_ping'}).
-
%% internal functions %%
cast_msg(Msg) -> {'$gen_cast', Msg}.
@@ -304,7 +317,7 @@ maybe_wait(Limit, Timeout) ->
wait_for_ack(Count, Timeout) ->
receive
- {rexi_ack, N} -> drain_acks(Count-N)
+ {rexi_ack, N} -> drain_acks(Count - N)
after Timeout ->
couch_stats:increment_counter([rexi, streams, timeout, wait_for_ack]),
throw(timeout)
@@ -314,7 +327,7 @@ drain_acks(Count) when Count < 0 ->
erlang:error(mismatched_rexi_ack);
drain_acks(Count) ->
receive
- {rexi_ack, N} -> drain_acks(Count-N)
+ {rexi_ack, N} -> drain_acks(Count - N)
after 0 ->
{ok, Count}
end.
diff --git a/src/rexi/src/rexi_app.erl b/src/rexi/src/rexi_app.erl
index 0f1e892b5..61e7886e1 100644
--- a/src/rexi/src/rexi_app.erl
+++ b/src/rexi/src/rexi_app.erl
@@ -14,7 +14,6 @@
-behaviour(application).
-export([start/2, stop/1]).
-
start(_Type, StartArgs) ->
rexi_sup:start_link(StartArgs).
diff --git a/src/rexi/src/rexi_buffer.erl b/src/rexi/src/rexi_buffer.erl
index d16dc8ba3..7f0079f03 100644
--- a/src/rexi/src/rexi_buffer.erl
+++ b/src/rexi/src/rexi_buffer.erl
@@ -15,10 +15,16 @@
-vsn(1).
% gen_server callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
--export ([
+-export([
send/2,
start_link/1
]).
@@ -37,7 +43,6 @@ send(Dest, Msg) ->
Server = list_to_atom(lists:concat([rexi_buffer, "_", get_node(Dest)])),
gen_server:cast(Server, {deliver, Dest, Msg}).
-
init(_) ->
%% TODO Leverage os_mon to discover available memory in the system
Max = list_to_integer(config:get("rexi", "buffer_count", "2000")),
@@ -45,7 +50,6 @@ init(_) ->
handle_call(erase_buffer, _From, State) ->
{reply, ok, State#state{buffer = queue:new(), count = 0}, 0};
-
handle_call(get_buffered_count, _From, State) ->
{reply, State#state.count, State, 0}.
@@ -53,19 +57,19 @@ handle_cast({deliver, Dest, Msg}, #state{buffer = Q, count = C} = State) ->
couch_stats:increment_counter([rexi, buffered]),
Q2 = queue:in({Dest, Msg}, Q),
case should_drop(State) of
- true ->
- couch_stats:increment_counter([rexi, dropped]),
+ true ->
+ couch_stats:increment_counter([rexi, dropped]),
{noreply, State#state{buffer = queue:drop(Q2)}, 0};
- false ->
- {noreply, State#state{buffer = Q2, count = C+1}, 0}
+ false ->
+ {noreply, State#state{buffer = Q2, count = C + 1}, 0}
end.
-handle_info(timeout, #state{sender = nil, buffer = {[],[]}, count = 0}=State) ->
+handle_info(timeout, #state{sender = nil, buffer = {[], []}, count = 0} = State) ->
{noreply, State};
handle_info(timeout, #state{sender = nil, count = C} = State) when C > 0 ->
#state{buffer = Q, count = C} = State,
{{value, {Dest, Msg}}, Q2} = queue:out_r(Q),
- NewState = State#state{buffer = Q2, count = C-1},
+ NewState = State#state{buffer = Q2, count = C - 1},
case erlang:send(Dest, Msg, [noconnect, nosuspend]) of
ok when C =:= 1 ->
% We just sent the last queued messsage, we'll use this opportunity
@@ -82,7 +86,6 @@ handle_info(timeout, #state{sender = nil, count = C} = State) when C > 0 ->
handle_info(timeout, State) ->
% Waiting on a sender to return
{noreply, State};
-
handle_info({'DOWN', Ref, _, Pid, _}, #state{sender = {Pid, Ref}} = State) ->
{noreply, State#state{sender = nil}, 0}.
@@ -91,7 +94,7 @@ terminate(_Reason, _State) ->
code_change(_OldVsn, {state, Buffer, Sender, Count}, _Extra) ->
Max = list_to_integer(config:get("rexi", "buffer_count", "2000")),
- {ok, #state{buffer=Buffer, sender=Sender, count=Count, max_count=Max}};
+ {ok, #state{buffer = Buffer, sender = Sender, count = Count, max_count = Max}};
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
diff --git a/src/rexi/src/rexi_monitor.erl b/src/rexi/src/rexi_monitor.erl
index f90ec5160..7fe66db71 100644
--- a/src/rexi/src/rexi_monitor.erl
+++ b/src/rexi/src/rexi_monitor.erl
@@ -14,16 +14,17 @@
-export([start/1, stop/1]).
-export([wait_monitors/1]).
-
%% @doc spawn_links a process which monitors the supplied list of items and
%% returns the process ID. If a monitored process exits, the caller will
%% receive a {rexi_DOWN, MonitoringPid, DeadPid, Reason} message.
--spec start([pid() | atom() | {atom(),node()}]) -> pid().
+-spec start([pid() | atom() | {atom(), node()}]) -> pid().
start(Procs) ->
Parent = self(),
Nodes = [node() | nodes()],
- {Mon, Skip} = lists:partition(fun(P) -> should_monitor(P, Nodes) end,
- Procs),
+ {Mon, Skip} = lists:partition(
+ fun(P) -> should_monitor(P, Nodes) end,
+ Procs
+ ),
spawn_link(fun() ->
[notify_parent(Parent, P, noconnect) || P <- Skip],
[erlang:monitor(process, P) || P <- Mon],
@@ -50,16 +51,17 @@ should_monitor({_, Node}, Nodes) ->
wait_monitors(Parent) ->
receive
- {'DOWN', _, process, Pid, Reason} ->
- notify_parent(Parent, Pid, Reason),
- ?MODULE:wait_monitors(Parent);
- {Parent, shutdown} ->
- ok
+ {'DOWN', _, process, Pid, Reason} ->
+ notify_parent(Parent, Pid, Reason),
+ ?MODULE:wait_monitors(Parent);
+ {Parent, shutdown} ->
+ ok
end.
flush_down_messages() ->
- receive {rexi_DOWN, _, _, _} ->
- flush_down_messages()
+ receive
+ {rexi_DOWN, _, _, _} ->
+ flush_down_messages()
after 0 ->
ok
end.
diff --git a/src/rexi/src/rexi_server.erl b/src/rexi/src/rexi_server.erl
index 4cd9ce66e..47c128d7b 100644
--- a/src/rexi/src/rexi_server.erl
+++ b/src/rexi/src/rexi_server.erl
@@ -13,8 +13,14 @@
-module(rexi_server).
-behaviour(gen_server).
-vsn(1).
--export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
- code_change/3]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ terminate/2,
+ code_change/3
+]).
-export([start_link/1, init_p/2, init_p/3]).
@@ -24,10 +30,10 @@
-include_lib("rexi/include/rexi.hrl").
-record(job, {
- client::reference(),
- worker::reference(),
- client_pid::pid(),
- worker_pid::pid()
+ client :: reference(),
+ worker :: reference(),
+ client_pid :: pid(),
+ worker_pid :: pid()
}).
-record(st, {
@@ -47,30 +53,27 @@ init([]) ->
handle_call(get_errors, _From, #st{errors = Errors} = St) ->
{reply, {ok, lists:reverse(queue:to_list(Errors))}, St};
-
handle_call(get_last_error, _From, #st{errors = Errors} = St) ->
try
{reply, {ok, queue:get_r(Errors)}, St}
- catch error:empty ->
- {reply, {error, empty}, St}
+ catch
+ error:empty ->
+ {reply, {error, empty}, St}
end;
-
-handle_call({set_error_limit, N}, _From, #st{error_count=Len, errors=Q} = St) ->
- if N < Len ->
- {NewQ, _} = queue:split(N, Q);
- true ->
- NewQ = Q
+handle_call({set_error_limit, N}, _From, #st{error_count = Len, errors = Q} = St) ->
+ if
+ N < Len ->
+ {NewQ, _} = queue:split(N, Q);
+ true ->
+ NewQ = Q
end,
NewLen = queue:len(NewQ),
- {reply, ok, St#st{error_limit=N, error_count=NewLen, errors=NewQ}};
-
+ {reply, ok, St#st{error_limit = N, error_count = NewLen, errors = NewQ}};
handle_call(_Request, _From, St) ->
{reply, ignored, St}.
-
handle_cast({doit, From, MFA}, St) ->
handle_cast({doit, From, undefined, MFA}, St);
-
handle_cast({doit, {ClientPid, ClientRef} = From, Nonce, MFA}, State) ->
{LocalPid, Ref} = spawn_monitor(?MODULE, init_p, [From, MFA, Nonce]),
Job = #job{
@@ -80,60 +83,61 @@ handle_cast({doit, {ClientPid, ClientRef} = From, Nonce, MFA}, State) ->
worker_pid = LocalPid
},
{noreply, add_job(Job, State)};
-
-
handle_cast({kill, FromRef}, St) ->
kill_worker(FromRef, St),
{noreply, St};
-
handle_cast({kill_all, FromRefs}, St) ->
lists:foreach(fun(FromRef) -> kill_worker(FromRef, St) end, FromRefs),
{noreply, St};
-
handle_cast(_, St) ->
couch_log:notice("rexi_server ignored_cast", []),
{noreply, St}.
-handle_info({'DOWN', Ref, process, _, normal}, #st{workers=Workers} = St) ->
+handle_info({'DOWN', Ref, process, _, normal}, #st{workers = Workers} = St) ->
case find_worker(Ref, Workers) of
- #job{} = Job ->
- {noreply, remove_job(Job, St)};
- false ->
- {noreply, St}
+ #job{} = Job ->
+ {noreply, remove_job(Job, St)};
+ false ->
+ {noreply, St}
end;
-
-handle_info({'DOWN', Ref, process, Pid, Error}, #st{workers=Workers} = St) ->
+handle_info({'DOWN', Ref, process, Pid, Error}, #st{workers = Workers} = St) ->
case find_worker(Ref, Workers) of
- #job{worker_pid=Pid, worker=Ref, client_pid=CPid, client=CRef} =Job ->
- case Error of #error{reason = {_Class, Reason}, stack = Stack} ->
- notify_caller({CPid, CRef}, {Reason, Stack}),
- St1 = save_error(Error, St),
- {noreply, remove_job(Job, St1)};
- _ ->
- notify_caller({CPid, CRef}, Error),
- {noreply, remove_job(Job, St)}
- end;
- false ->
- {noreply, St}
+ #job{worker_pid = Pid, worker = Ref, client_pid = CPid, client = CRef} = Job ->
+ case Error of
+ #error{reason = {_Class, Reason}, stack = Stack} ->
+ notify_caller({CPid, CRef}, {Reason, Stack}),
+ St1 = save_error(Error, St),
+ {noreply, remove_job(Job, St1)};
+ _ ->
+ notify_caller({CPid, CRef}, Error),
+ {noreply, remove_job(Job, St)}
+ end;
+ false ->
+ {noreply, St}
end;
-
handle_info(_Info, St) ->
{noreply, St}.
terminate(_Reason, St) ->
- ets:foldl(fun(#job{worker_pid=Pid},_) -> exit(Pid,kill) end, nil,
- St#st.workers),
+ ets:foldl(
+ fun(#job{worker_pid = Pid}, _) -> exit(Pid, kill) end,
+ nil,
+ St#st.workers
+ ),
ok.
-code_change(_OldVsn, #st{}=State, _Extra) ->
+code_change(_OldVsn, #st{} = State, _Extra) ->
{ok, State}.
init_p(From, MFA) ->
init_p(From, MFA, undefined).
%% @doc initializes a process started by rexi_server.
--spec init_p({pid(), reference()}, {atom(), atom(), list()},
- string() | undefined) -> any().
+-spec init_p(
+ {pid(), reference()},
+ {atom(), atom(), list()},
+ string() | undefined
+) -> any().
init_p(From, {M,F,A}, Nonce) ->
put(rexi_from, From),
put('$initial_call', {M,F,length(A)}),
@@ -158,13 +162,19 @@ init_p(From, {M,F,A}, Nonce) ->
save_error(_E, #st{error_limit = 0} = St) ->
St;
-save_error(E, #st{errors=Q, error_limit=L, error_count=C} = St) when C >= L ->
+save_error(E, #st{errors = Q, error_limit = L, error_count = C} = St) when C >= L ->
St#st{errors = queue:in(E, queue:drop(Q))};
-save_error(E, #st{errors=Q, error_count=C} = St) ->
- St#st{errors = queue:in(E, Q), error_count = C+1}.
+save_error(E, #st{errors = Q, error_count = C} = St) ->
+ St#st{errors = queue:in(E, Q), error_count = C + 1}.
clean_stack(S) ->
- lists:map(fun({M,F,A}) when is_list(A) -> {M,F,length(A)}; (X) -> X end, S).
+ lists:map(
+ fun
+ ({M, F, A}) when is_list(A) -> {M, F, length(A)};
+ (X) -> X
+ end,
+ S
+ ).
add_job(Job, #st{workers = Workers, clients = Clients} = State) ->
ets:insert(Workers, Job),
@@ -177,19 +187,21 @@ remove_job(Job, #st{workers = Workers, clients = Clients} = State) ->
State.
find_worker(Ref, Tab) ->
- case ets:lookup(Tab, Ref) of [] -> false; [Worker] -> Worker end.
+ case ets:lookup(Tab, Ref) of
+ [] -> false;
+ [Worker] -> Worker
+ end.
notify_caller({Caller, Ref}, Reason) ->
rexi_utils:send(Caller, {Ref, {rexi_EXIT, Reason}}).
-
kill_worker(FromRef, #st{clients = Clients} = St) ->
case find_worker(FromRef, Clients) of
- #job{worker = KeyRef, worker_pid = Pid} = Job ->
- erlang:demonitor(KeyRef),
- exit(Pid, kill),
- remove_job(Job, St),
- ok;
- false ->
- ok
+ #job{worker = KeyRef, worker_pid = Pid} = Job ->
+ erlang:demonitor(KeyRef),
+ exit(Pid, kill),
+ remove_job(Job, St),
+ ok;
+ false ->
+ ok
end.
diff --git a/src/rexi/src/rexi_server_mon.erl b/src/rexi/src/rexi_server_mon.erl
index cfe1144ce..9057807e6 100644
--- a/src/rexi/src/rexi_server_mon.erl
+++ b/src/rexi/src/rexi_server_mon.erl
@@ -1,5 +1,5 @@
% Copyright 2010-2013 Cloudant
-%
+%
% Licensed under the Apache License, Version 2.0 (the "License"); you may not
% use this file except in compliance with the License. You may obtain a copy of
% the License at
@@ -17,13 +17,11 @@
-behaviour(mem3_cluster).
-vsn(1).
-
-export([
start_link/1,
status/0
]).
-
-export([
init/1,
terminate/2,
@@ -34,23 +32,19 @@
]).
-export([
- cluster_stable/1,
- cluster_unstable/1
+ cluster_stable/1,
+ cluster_unstable/1
]).
-
-define(CLUSTER_STABILITY_PERIOD_SEC, 15).
-
start_link(ChildMod) ->
Name = list_to_atom(lists:concat([ChildMod, "_mon"])),
gen_server:start_link({local, Name}, ?MODULE, ChildMod, []).
-
status() ->
gen_server:call(?MODULE, status).
-
% Mem3 cluster callbacks
cluster_unstable(Server) ->
@@ -62,21 +56,22 @@ cluster_stable(Server) ->
gen_server:cast(Server, cluster_stable),
Server.
-
% gen_server callbacks
init(ChildMod) ->
- {ok, _Mem3Cluster} = mem3_cluster:start_link(?MODULE, self(),
- ?CLUSTER_STABILITY_PERIOD_SEC, ?CLUSTER_STABILITY_PERIOD_SEC),
+ {ok, _Mem3Cluster} = mem3_cluster:start_link(
+ ?MODULE,
+ self(),
+ ?CLUSTER_STABILITY_PERIOD_SEC,
+ ?CLUSTER_STABILITY_PERIOD_SEC
+ ),
start_servers(ChildMod),
couch_log:notice("~s : started servers", [ChildMod]),
{ok, ChildMod}.
-
terminate(_Reason, _St) ->
ok.
-
handle_call(status, _From, ChildMod) ->
case missing_servers(ChildMod) of
[] ->
@@ -84,7 +79,6 @@ handle_call(status, _From, ChildMod) ->
Missing ->
{reply, {waiting, length(Missing)}, ChildMod}
end;
-
handle_call(Msg, _From, St) ->
couch_log:notice("~s ignored_call ~w", [?MODULE, Msg]),
{reply, ignored, St}.
@@ -96,7 +90,6 @@ handle_cast(cluster_unstable, ChildMod) ->
couch_log:notice("~s : cluster unstable", [ChildMod]),
start_servers(ChildMod),
{noreply, ChildMod};
-
% When cluster is stable, start any servers for new nodes and stop servers for
% the ones that disconnected.
handle_cast(cluster_stable, ChildMod) ->
@@ -104,51 +97,48 @@ handle_cast(cluster_stable, ChildMod) ->
start_servers(ChildMod),
stop_servers(ChildMod),
{noreply, ChildMod};
-
handle_cast(Msg, St) ->
couch_log:notice("~s ignored_cast ~w", [?MODULE, Msg]),
{noreply, St}.
-
handle_info(Msg, St) ->
couch_log:notice("~s ignored_info ~w", [?MODULE, Msg]),
{noreply, St}.
-
code_change(_OldVsn, nil, _Extra) ->
{ok, rexi_server};
code_change(_OldVsn, St, _Extra) ->
{ok, St}.
-
start_servers(ChildMod) ->
- lists:foreach(fun(Id) ->
- {ok, _} = start_server(ChildMod, Id)
- end, missing_servers(ChildMod)).
+ lists:foreach(
+ fun(Id) ->
+ {ok, _} = start_server(ChildMod, Id)
+ end,
+ missing_servers(ChildMod)
+ ).
stop_servers(ChildMod) ->
- lists:foreach(fun(Id) ->
- ok = stop_server(ChildMod, Id)
- end, extra_servers(ChildMod)).
-
+ lists:foreach(
+ fun(Id) ->
+ ok = stop_server(ChildMod, Id)
+ end,
+ extra_servers(ChildMod)
+ ).
server_ids(ChildMod) ->
Nodes = [node() | nodes()],
[list_to_atom(lists:concat([ChildMod, "_", Node])) || Node <- Nodes].
-
running_servers(ChildMod) ->
[Id || {Id, _, _, _} <- supervisor:which_children(sup_module(ChildMod))].
-
missing_servers(ChildMod) ->
server_ids(ChildMod) -- running_servers(ChildMod).
-
extra_servers(ChildMod) ->
running_servers(ChildMod) -- server_ids(ChildMod).
-
start_server(ChildMod, ChildId) ->
ChildSpec = {
ChildId,
@@ -165,12 +155,10 @@ start_server(ChildMod, ChildId) ->
erlang:error(Else)
end.
-
stop_server(ChildMod, ChildId) ->
SupMod = sup_module(ChildMod),
ok = supervisor:terminate_child(SupMod, ChildId),
ok = supervisor:delete_child(SupMod, ChildId).
-
sup_module(ChildMod) ->
list_to_atom(lists:concat([ChildMod, "_sup"])).
diff --git a/src/rexi/src/rexi_server_sup.erl b/src/rexi/src/rexi_server_sup.erl
index 29c6ad60c..53497197f 100644
--- a/src/rexi/src/rexi_server_sup.erl
+++ b/src/rexi/src/rexi_server_sup.erl
@@ -15,15 +15,12 @@
-module(rexi_server_sup).
-behaviour(supervisor).
-
-export([init/1]).
-export([start_link/1]).
-
start_link(Name) ->
supervisor:start_link({local, Name}, ?MODULE, []).
-
init([]) ->
{ok, {{one_for_one, 1, 1}, []}}.
diff --git a/src/rexi/src/rexi_sup.erl b/src/rexi/src/rexi_sup.erl
index 3d9aa2a16..3bea0ed15 100644
--- a/src/rexi/src/rexi_sup.erl
+++ b/src/rexi/src/rexi_sup.erl
@@ -17,48 +17,49 @@
-export([init/1]).
start_link(Args) ->
- supervisor:start_link({local,?MODULE}, ?MODULE, Args).
+ supervisor:start_link({local, ?MODULE}, ?MODULE, Args).
init([]) ->
- {ok, {{rest_for_one, 3, 10}, [
- {
- rexi_server,
- {rexi_server, start_link, [rexi_server]},
- permanent,
- 100,
- worker,
- [rexi_server]
- },
- {
- rexi_server_sup,
- {rexi_server_sup, start_link, [rexi_server_sup]},
- permanent,
- 100,
- supervisor,
- [rexi_server_sup]
- },
- {
- rexi_server_mon,
- {rexi_server_mon, start_link, [rexi_server]},
- permanent,
- 100,
- worker,
- [rexi_server_mon]
- },
- {
- rexi_buffer_sup,
- {rexi_server_sup, start_link, [rexi_buffer_sup]},
- permanent,
- 100,
- supervisor,
- [rexi_server_sup]
- },
- {
- rexi_buffer_mon,
- {rexi_server_mon, start_link, [rexi_buffer]},
- permanent,
- 100,
- worker,
- [rexi_server_mon]
- }
- ]}}.
+ {ok,
+ {{rest_for_one, 3, 10}, [
+ {
+ rexi_server,
+ {rexi_server, start_link, [rexi_server]},
+ permanent,
+ 100,
+ worker,
+ [rexi_server]
+ },
+ {
+ rexi_server_sup,
+ {rexi_server_sup, start_link, [rexi_server_sup]},
+ permanent,
+ 100,
+ supervisor,
+ [rexi_server_sup]
+ },
+ {
+ rexi_server_mon,
+ {rexi_server_mon, start_link, [rexi_server]},
+ permanent,
+ 100,
+ worker,
+ [rexi_server_mon]
+ },
+ {
+ rexi_buffer_sup,
+ {rexi_server_sup, start_link, [rexi_buffer_sup]},
+ permanent,
+ 100,
+ supervisor,
+ [rexi_server_sup]
+ },
+ {
+ rexi_buffer_mon,
+ {rexi_server_mon, start_link, [rexi_buffer]},
+ permanent,
+ 100,
+ worker,
+ [rexi_server_mon]
+ }
+ ]}}.
diff --git a/src/rexi/src/rexi_utils.erl b/src/rexi/src/rexi_utils.erl
index 960318418..d59c5ea0f 100644
--- a/src/rexi/src/rexi_utils.erl
+++ b/src/rexi/src/rexi_utils.erl
@@ -17,10 +17,10 @@
%% @doc Return a rexi_server id for the given node.
server_id(Node) ->
case config:get_boolean("rexi", "server_per_node", true) of
- true ->
- list_to_atom("rexi_server_" ++ atom_to_list(Node));
- _ ->
- rexi_server
+ true ->
+ list_to_atom("rexi_server_" ++ atom_to_list(Node));
+ _ ->
+ rexi_server
end.
%% @doc Return a {server_id(node()), Node} Pid name for the given Node.
@@ -30,11 +30,11 @@ server_pid(Node) ->
%% @doc send a message as quickly as possible
send(Dest, Msg) ->
case erlang:send(Dest, Msg, [noconnect, nosuspend]) of
- ok ->
- ok;
- _ ->
- % treat nosuspend and noconnect the same
- rexi_buffer:send(Dest, Msg)
+ ok ->
+ ok;
+ _ ->
+ % treat nosuspend and noconnect the same
+ rexi_buffer:send(Dest, Msg)
end.
%% @doc set up the receive loop with an overall timeout
@@ -53,53 +53,53 @@ recv(Refs, Keypos, Fun, Acc0, GlobalTimeout, PerMsgTO) ->
process_mailbox(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) ->
case process_message(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) of
- {ok, Acc} ->
- process_mailbox(RefList, Keypos, Fun, Acc, TimeoutRef, PerMsgTO);
- {new_refs, NewRefList, Acc} ->
- process_mailbox(NewRefList, Keypos, Fun, Acc, TimeoutRef, PerMsgTO);
- {stop, Acc} ->
- {ok, Acc};
- Error ->
- Error
+ {ok, Acc} ->
+ process_mailbox(RefList, Keypos, Fun, Acc, TimeoutRef, PerMsgTO);
+ {new_refs, NewRefList, Acc} ->
+ process_mailbox(NewRefList, Keypos, Fun, Acc, TimeoutRef, PerMsgTO);
+ {stop, Acc} ->
+ {ok, Acc};
+ Error ->
+ Error
end.
process_message(RefList, Keypos, Fun, Acc0, TimeoutRef, PerMsgTO) ->
receive
- {timeout, TimeoutRef} ->
- {timeout, Acc0};
- {rexi, Ref, Msg} ->
- case lists:keyfind(Ref, Keypos, RefList) of
- false ->
+ {timeout, TimeoutRef} ->
+ {timeout, Acc0};
+ {rexi, Ref, Msg} ->
+ case lists:keyfind(Ref, Keypos, RefList) of
+ false ->
+ {ok, Acc0};
+ Worker ->
+ Fun(Msg, Worker, Acc0)
+ end;
+ {rexi, Ref, From, Msg} ->
+ case lists:keyfind(Ref, Keypos, RefList) of
+ false ->
+ {ok, Acc0};
+ Worker ->
+ Fun(Msg, {Worker, From}, Acc0)
+ end;
+ {rexi, '$rexi_ping'} ->
{ok, Acc0};
- Worker ->
- Fun(Msg, Worker, Acc0)
- end;
- {rexi, Ref, From, Msg} ->
- case lists:keyfind(Ref, Keypos, RefList) of
- false ->
- {ok, Acc0};
- Worker ->
- Fun(Msg, {Worker, From}, Acc0)
- end;
- {rexi, '$rexi_ping'} ->
- {ok, Acc0};
- {Ref, Msg} ->
- case lists:keyfind(Ref, Keypos, RefList) of
- false ->
- % this was some non-matching message which we will ignore
- {ok, Acc0};
- Worker ->
- Fun(Msg, Worker, Acc0)
- end;
- {Ref, From, Msg} ->
- case lists:keyfind(Ref, Keypos, RefList) of
- false ->
- {ok, Acc0};
- Worker ->
- Fun(Msg, {Worker, From}, Acc0)
- end;
- {rexi_DOWN, _, _, _} = Msg ->
- Fun(Msg, nil, Acc0)
+ {Ref, Msg} ->
+ case lists:keyfind(Ref, Keypos, RefList) of
+ false ->
+ % this was some non-matching message which we will ignore
+ {ok, Acc0};
+ Worker ->
+ Fun(Msg, Worker, Acc0)
+ end;
+ {Ref, From, Msg} ->
+ case lists:keyfind(Ref, Keypos, RefList) of
+ false ->
+ {ok, Acc0};
+ Worker ->
+ Fun(Msg, {Worker, From}, Acc0)
+ end;
+ {rexi_DOWN, _, _, _} = Msg ->
+ Fun(Msg, nil, Acc0)
after PerMsgTO ->
{timeout, Acc0}
end.
diff --git a/src/setup/src/setup.erl b/src/setup/src/setup.erl
index 56773e4da..1757a43e7 100644
--- a/src/setup/src/setup.erl
+++ b/src/setup/src/setup.erl
@@ -18,11 +18,10 @@
-include_lib("../couch/include/couch_db.hrl").
-
require_admins(undefined, {undefined, undefined}) ->
% no admin in CouchDB, no admin in request
throw({error, "Cluster setup requires admin account to be configured"});
-require_admins(_,_) ->
+require_admins(_, _) ->
ok.
require_node_count(undefined) ->
@@ -34,8 +33,14 @@ error_local_bind_address() ->
throw({error, "Cluster setup requires a remote bind_address (not 127.0.0.1 nor ::1)"}).
error_invalid_bind_address(InvalidBindAddress) ->
- throw({error, io:format("Setup requires a valid IP bind_address. " ++
- "~p is invalid.", [InvalidBindAddress])}).
+ throw(
+ {error,
+ io:format(
+ "Setup requires a valid IP bind_address. " ++
+ "~p is invalid.",
+ [InvalidBindAddress]
+ )}
+ ).
require_remote_bind_address(OldBindAddress, NewBindAddress) ->
case {OldBindAddress, NewBindAddress} of
@@ -60,8 +65,8 @@ is_cluster_enabled() ->
Admins = config:get("admins"),
case {BindAddress, Admins} of
{"127.0.0.1", _} -> false;
- {_,[]} -> false;
- {_,_} -> true
+ {_, []} -> false;
+ {_, _} -> true
end.
is_single_node_enabled(Dbs) ->
@@ -71,23 +76,21 @@ is_single_node_enabled(Dbs) ->
case {Admins, HasDbs} of
{[], _} -> false;
{_, false} -> false;
- {_,_} -> true
+ {_, _} -> true
end.
cluster_system_dbs() ->
["_users", "_replicator"].
-
has_cluster_system_dbs([]) ->
true;
-has_cluster_system_dbs([Db|Dbs]) ->
+has_cluster_system_dbs([Db | Dbs]) ->
case catch fabric:get_db_info(Db) of
{ok, _} -> has_cluster_system_dbs(Dbs);
_ -> false
end.
enable_cluster(Options) ->
-
case couch_util:get_value(remote_node, Options, undefined) of
undefined ->
enable_cluster_int(Options, is_cluster_enabled());
@@ -114,17 +117,19 @@ enable_cluster_http(Options) ->
AdminUsername = couch_util:get_value(username, Options),
AdminPasswordHash = config:get("admins", binary_to_list(AdminUsername)),
- Body = ?JSON_ENCODE({[
- {<<"action">>, <<"enable_cluster">>},
- {<<"username">>, AdminUsername},
- {<<"password_hash">>, ?l2b(AdminPasswordHash)},
- {<<"bind_address">>, couch_util:get_value(bind_address, Options)},
- {<<"port">>, couch_util:get_value(port, Options)},
- {<<"node_count">>, couch_util:get_value(node_count, Options)}
- ]}),
+ Body = ?JSON_ENCODE(
+ {[
+ {<<"action">>, <<"enable_cluster">>},
+ {<<"username">>, AdminUsername},
+ {<<"password_hash">>, ?l2b(AdminPasswordHash)},
+ {<<"bind_address">>, couch_util:get_value(bind_address, Options)},
+ {<<"port">>, couch_util:get_value(port, Options)},
+ {<<"node_count">>, couch_util:get_value(node_count, Options)}
+ ]}
+ ),
Headers = [
- {"Content-Type","application/json"}
+ {"Content-Type", "application/json"}
],
RemoteNode = couch_util:get_value(remote_node, Options),
@@ -142,19 +147,18 @@ enable_cluster_http(Options) ->
enable_cluster_int(_Options, true) ->
{error, cluster_enabled};
enable_cluster_int(Options, false) ->
-
% if no admin in config and no admin in req -> error
CurrentAdmins = config:get("admins"),
NewCredentials = {
proplists:get_value(username, Options),
case proplists:get_value(password_hash, Options) of
- undefined -> proplists:get_value(password, Options);
- Pw -> Pw
+ undefined -> proplists:get_value(password, Options);
+ Pw -> Pw
end
},
ok = require_admins(CurrentAdmins, NewCredentials),
% if bind_address == 127.0.0.1 and no bind_address in req -> error
- CurrentBindAddress = config:get("chttpd","bind_address"),
+ CurrentBindAddress = config:get("chttpd", "bind_address"),
NewBindAddress = proplists:get_value(bind_address, Options),
ok = require_remote_bind_address(CurrentBindAddress, NewBindAddress),
NodeCount = couch_util:get_value(node_count, Options),
@@ -196,7 +200,6 @@ setup_node(NewCredentials, NewBindAddress, NodeCount, Port) ->
config:set_integer("chttpd", "port", Port)
end.
-
finish_cluster(Options) ->
% ensure that uuid is set
couch_server:get_uuid(),
@@ -208,7 +211,6 @@ finish_cluster(Options) ->
Dbs = proplists:get_value(ensure_dbs_exist, Options, cluster_system_dbs()),
finish_cluster_int(Dbs, has_cluster_system_dbs(Dbs)).
-
wait_connected() ->
Nodes = other_nodes(),
Result = test_util:wait(fun() ->
@@ -225,30 +227,31 @@ wait_connected() ->
ok
end.
-
other_nodes() ->
mem3:nodes() -- [node()].
-
disconnected(Nodes) ->
- lists:filter(fun(Node) ->
- case net_adm:ping(Node) of
- pong -> false;
- pang -> true
- end
- end, Nodes).
-
+ lists:filter(
+ fun(Node) ->
+ case net_adm:ping(Node) of
+ pong -> false;
+ pang -> true
+ end
+ end,
+ Nodes
+ ).
sync_admins() ->
- ok = lists:foreach(fun({User, Pass}) ->
- sync_admin(User, Pass)
- end, config:get("admins")).
-
+ ok = lists:foreach(
+ fun({User, Pass}) ->
+ sync_admin(User, Pass)
+ end,
+ config:get("admins")
+ ).
sync_admin(User, Pass) ->
sync_config("admins", User, Pass).
-
sync_uuid() ->
Uuid = config:get("couchdb", "uuid"),
sync_config("couchdb", "uuid", Uuid).
@@ -257,26 +260,33 @@ sync_auth_secret() ->
Secret = config:get("chttpd_auth", "secret"),
sync_config("chttpd_auth", "secret", Secret).
-
sync_config(Section, Key, Value) ->
- {Results, Errors} = rpc:multicall(other_nodes(), config, set,
- [Section, Key, Value]),
+ {Results, Errors} = rpc:multicall(
+ other_nodes(),
+ config,
+ set,
+ [Section, Key, Value]
+ ),
case validate_multicall(Results, Errors) of
ok ->
ok;
error ->
- couch_log:error("~p sync_admin results ~p errors ~p",
- [?MODULE, Results, Errors]),
+ couch_log:error(
+ "~p sync_admin results ~p errors ~p",
+ [?MODULE, Results, Errors]
+ ),
Reason = "Cluster setup unable to sync admin passwords",
throw({setup_error, Reason})
end.
-
validate_multicall(Results, Errors) ->
- AllOk = lists:all(fun
- (ok) -> true;
- (_) -> false
- end, Results),
+ AllOk = lists:all(
+ fun
+ (ok) -> true;
+ (_) -> false
+ end,
+ Results
+ ),
case AllOk andalso Errors == [] of
true ->
ok;
@@ -284,21 +294,19 @@ validate_multicall(Results, Errors) ->
error
end.
-
finish_cluster_int(_Dbs, true) ->
{error, cluster_finished};
finish_cluster_int(Dbs, false) ->
lists:foreach(fun fabric:create_db/1, Dbs).
-
enable_single_node(Options) ->
% if no admin in config and no admin in req -> error
CurrentAdmins = config:get("admins"),
NewCredentials = {
proplists:get_value(username, Options),
case proplists:get_value(password_hash, Options) of
- undefined -> proplists:get_value(password, Options);
- Pw -> Pw
+ undefined -> proplists:get_value(password, Options);
+ Pw -> Pw
end
},
ok = require_admins(CurrentAdmins, NewCredentials),
@@ -311,7 +319,6 @@ enable_single_node(Options) ->
finish_cluster_int(Dbs, has_cluster_system_dbs(Dbs)),
couch_log:debug("Enable Single Node: ~p~n", [Options]).
-
add_node(Options) ->
add_node_int(Options, is_cluster_enabled()).
@@ -329,13 +336,15 @@ add_node_int(Options, true) ->
}}
],
- Body = ?JSON_ENCODE({[
- {<<"action">>, <<"receive_cookie">>},
- {<<"cookie">>, atom_to_binary(ErlangCookie, utf8)}
- ]}),
+ Body = ?JSON_ENCODE(
+ {[
+ {<<"action">>, <<"receive_cookie">>},
+ {<<"cookie">>, atom_to_binary(ErlangCookie, utf8)}
+ ]}
+ ),
Headers = [
- {"Content-Type","application/json"}
+ {"Content-Type", "application/json"}
],
Host = proplists:get_value(host, Options),
diff --git a/src/setup/src/setup_epi.erl b/src/setup/src/setup_epi.erl
index c3f2636f0..df717dbc3 100644
--- a/src/setup/src/setup_epi.erl
+++ b/src/setup/src/setup_epi.erl
@@ -10,7 +10,6 @@
% License for the specific language governing permissions and limitations under
% the License.
-
-module(setup_epi).
-behaviour(couch_epi_plugin).
@@ -30,7 +29,7 @@ app() ->
providers() ->
[
- {chttpd_handlers, setup_httpd_handlers}
+ {chttpd_handlers, setup_httpd_handlers}
].
services() ->
diff --git a/src/setup/src/setup_httpd.erl b/src/setup/src/setup_httpd.erl
index 6d17186fb..418a72845 100644
--- a/src/setup/src/setup_httpd.erl
+++ b/src/setup/src/setup_httpd.erl
@@ -15,19 +15,19 @@
-export([handle_setup_req/1]).
-handle_setup_req(#httpd{method='POST'}=Req) ->
+handle_setup_req(#httpd{method = 'POST'} = Req) ->
ok = chttpd:verify_is_server_admin(Req),
couch_httpd:validate_ctype(Req, "application/json"),
Setup = get_body(Req),
couch_log:notice("Setup: ~p~n", [remove_sensitive(Setup)]),
Action = binary_to_list(couch_util:get_value(<<"action">>, Setup, <<"missing">>)),
case handle_action(Action, Setup) of
- ok ->
- chttpd:send_json(Req, 201, {[{ok, true}]});
- {error, Message} ->
- couch_httpd:send_error(Req, 400, <<"bad_request">>, Message)
+ ok ->
+ chttpd:send_json(Req, 201, {[{ok, true}]});
+ {error, Message} ->
+ couch_httpd:send_error(Req, 400, <<"bad_request">>, Message)
end;
-handle_setup_req(#httpd{method='GET'}=Req) ->
+handle_setup_req(#httpd{method = 'GET'} = Req) ->
ok = chttpd:verify_is_server_admin(Req),
Dbs = chttpd:qs_json_value(Req, "ensure_dbs_exist", setup:cluster_system_dbs()),
couch_log:notice("Dbs: ~p~n", [Dbs]),
@@ -58,10 +58,9 @@ handle_setup_req(#httpd{method='GET'}=Req) ->
end
end
end;
-handle_setup_req(#httpd{}=Req) ->
+handle_setup_req(#httpd{} = Req) ->
chttpd:send_method_not_allowed(Req, "GET,POST").
-
get_options(Options, Setup) ->
ExtractValues = fun({Tag, Option}, OptionsAcc) ->
case couch_util:get_value(Option, Setup) of
@@ -72,30 +71,35 @@ get_options(Options, Setup) ->
lists:foldl(ExtractValues, [], Options).
handle_action("enable_cluster", Setup) ->
- Options = get_options([
- {username, <<"username">>},
- {password, <<"password">>},
- {password_hash, <<"password_hash">>},
- {bind_address, <<"bind_address">>},
- {port, <<"port">>},
- {remote_node, <<"remote_node">>},
- {remote_current_user, <<"remote_current_user">>},
- {remote_current_password, <<"remote_current_password">>},
- {node_count, <<"node_count">>}
- ], Setup),
+ Options = get_options(
+ [
+ {username, <<"username">>},
+ {password, <<"password">>},
+ {password_hash, <<"password_hash">>},
+ {bind_address, <<"bind_address">>},
+ {port, <<"port">>},
+ {remote_node, <<"remote_node">>},
+ {remote_current_user, <<"remote_current_user">>},
+ {remote_current_password, <<"remote_current_password">>},
+ {node_count, <<"node_count">>}
+ ],
+ Setup
+ ),
case setup:enable_cluster(Options) of
{error, cluster_enabled} ->
{error, <<"Cluster is already enabled">>};
- _ -> ok
+ _ ->
+ ok
end;
-
-
handle_action("finish_cluster", Setup) ->
couch_log:notice("finish_cluster: ~p~n", [remove_sensitive(Setup)]),
- Options = get_options([
- {ensure_dbs_exist, <<"ensure_dbs_exist">>}
- ], Setup),
+ Options = get_options(
+ [
+ {ensure_dbs_exist, <<"ensure_dbs_exist">>}
+ ],
+ Setup
+ ),
case setup:finish_cluster(Options) of
{error, cluster_finished} ->
{error, <<"Cluster is already finished">>};
@@ -103,18 +107,20 @@ handle_action("finish_cluster", Setup) ->
couch_log:notice("finish_cluster: ~p~n", [Else]),
ok
end;
-
handle_action("enable_single_node", Setup) ->
couch_log:notice("enable_single_node: ~p~n", [remove_sensitive(Setup)]),
- Options = get_options([
- {ensure_dbs_exist, <<"ensure_dbs_exist">>},
- {username, <<"username">>},
- {password, <<"password">>},
- {password_hash, <<"password_hash">>},
- {bind_address, <<"bind_address">>},
- {port, <<"port">>}
- ], Setup),
+ Options = get_options(
+ [
+ {ensure_dbs_exist, <<"ensure_dbs_exist">>},
+ {username, <<"username">>},
+ {password, <<"password">>},
+ {password_hash, <<"password_hash">>},
+ {bind_address, <<"bind_address">>},
+ {port, <<"port">>}
+ ],
+ Setup
+ ),
case setup:enable_single_node(Options) of
{error, cluster_finished} ->
{error, <<"Cluster is already finished">>};
@@ -122,18 +128,19 @@ handle_action("enable_single_node", Setup) ->
couch_log:notice("Else: ~p~n", [Else]),
ok
end;
-
-
handle_action("add_node", Setup) ->
couch_log:notice("add_node: ~p~n", [remove_sensitive(Setup)]),
- Options = get_options([
- {username, <<"username">>},
- {password, <<"password">>},
- {host, <<"host">>},
- {port, <<"port">>},
- {name, <<"name">>}
- ], Setup),
+ Options = get_options(
+ [
+ {username, <<"username">>},
+ {password, <<"password">>},
+ {host, <<"host">>},
+ {port, <<"port">>},
+ {name, <<"name">>}
+ ],
+ Setup
+ ),
case setup:add_node(Options) of
{error, cluster_not_enabled} ->
{error, <<"Cluster is not enabled.">>};
@@ -143,35 +150,36 @@ handle_action("add_node", Setup) ->
{error, <<"Add node failed. Invalid admin credentials,">>};
{error, Message} ->
{error, Message};
- _ -> ok
+ _ ->
+ ok
end;
-
handle_action("remove_node", Setup) ->
couch_log:notice("remove_node: ~p~n", [remove_sensitive(Setup)]);
-
handle_action("receive_cookie", Setup) ->
couch_log:notice("receive_cookie: ~p~n", [remove_sensitive(Setup)]),
- Options = get_options([
- {cookie, <<"cookie">>}
- ], Setup),
+ Options = get_options(
+ [
+ {cookie, <<"cookie">>}
+ ],
+ Setup
+ ),
case setup:receive_cookie(Options) of
{error, Error} ->
{error, Error};
- _ -> ok
+ _ ->
+ ok
end;
-
handle_action(_, _) ->
couch_log:notice("invalid_action: ~n", []),
{error, <<"Invalid Action'">>}.
-
get_body(Req) ->
case catch couch_httpd:json_body_obj(Req) of
- {Body} ->
- Body;
- Else ->
- couch_log:notice("Body Fail: ~p~n", [Else]),
- couch_httpd:send_error(Req, 400, <<"bad_request">>, <<"Missing JSON body'">>)
+ {Body} ->
+ Body;
+ Else ->
+ couch_log:notice("Body Fail: ~p~n", [Else]),
+ couch_httpd:send_error(Req, 400, <<"bad_request">>, <<"Missing JSON body'">>)
end.
remove_sensitive(KVList) ->
diff --git a/src/setup/src/setup_sup.erl b/src/setup/src/setup_sup.erl
index 4670a0a59..e80ad8ef0 100644
--- a/src/setup/src/setup_sup.erl
+++ b/src/setup/src/setup_sup.erl
@@ -35,8 +35,8 @@ start_link() ->
%% ===================================================================
init([]) ->
- case config:get_boolean("couchdb", "single_node", false) of
- true ->
+ case config:get_boolean("couchdb", "single_node", false) of
+ true ->
setup:finish_cluster([]);
false ->
ok
diff --git a/src/smoosh/src/smoosh.erl b/src/smoosh/src/smoosh.erl
index 676e7faad..950500ffa 100644
--- a/src/smoosh/src/smoosh.erl
+++ b/src/smoosh/src/smoosh.erl
@@ -37,33 +37,48 @@ status() ->
smoosh_server:status().
enqueue_all_dbs() ->
- fold_local_shards(fun(#shard{name=Name}, _Acc) ->
- sync_enqueue(Name) end, ok).
+ fold_local_shards(
+ fun(#shard{name = Name}, _Acc) ->
+ sync_enqueue(Name)
+ end,
+ ok
+ ).
enqueue_all_dbs(Timeout) ->
- fold_local_shards(fun(#shard{name=Name}, _Acc) ->
- sync_enqueue(Name, Timeout) end, ok).
+ fold_local_shards(
+ fun(#shard{name = Name}, _Acc) ->
+ sync_enqueue(Name, Timeout)
+ end,
+ ok
+ ).
enqueue_all_views() ->
- fold_local_shards(fun(#shard{name=Name}, _Acc) ->
- catch enqueue_views(Name) end, ok).
+ fold_local_shards(
+ fun(#shard{name = Name}, _Acc) ->
+ catch enqueue_views(Name)
+ end,
+ ok
+ ).
fold_local_shards(Fun, Acc0) ->
- mem3:fold_shards(fun(Shard, Acc1) ->
- case node() == Shard#shard.node of
- true ->
- Fun(Shard, Acc1);
- false ->
- Acc1
- end
- end, Acc0).
+ mem3:fold_shards(
+ fun(Shard, Acc1) ->
+ case node() == Shard#shard.node of
+ true ->
+ Fun(Shard, Acc1);
+ false ->
+ Acc1
+ end
+ end,
+ Acc0
+ ).
enqueue_views(ShardName) ->
DbName = mem3:dbname(ShardName),
{ok, DDocs} = fabric:design_docs(DbName),
[sync_enqueue({ShardName, id(DDoc)}) || DDoc <- DDocs].
-id(#doc{id=Id}) ->
+id(#doc{id = Id}) ->
Id;
id({Props}) ->
couch_util:get_value(<<"_id">>, Props).
diff --git a/src/smoosh/src/smoosh_channel.erl b/src/smoosh/src/smoosh_channel.erl
index 2bc98be9d..06849ac3a 100644
--- a/src/smoosh/src/smoosh_channel.erl
+++ b/src/smoosh/src/smoosh_channel.erl
@@ -20,17 +20,23 @@
-export([enqueue/3, last_updated/2, flush/1]).
% gen_server api.
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- code_change/3, terminate/2]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3,
+ terminate/2
+]).
% records.
-record(state, {
- active=[],
+ active = [],
name,
- waiting=smoosh_priority_queue:new(),
- paused=true,
- starting=[]
+ waiting = smoosh_priority_queue:new(),
+ paused = true,
+ starting = []
}).
% public functions.
@@ -64,41 +70,40 @@ flush(ServerRef) ->
init(Name) ->
schedule_unpause(),
erlang:send_after(60 * 1000, self(), check_window),
- {ok, #state{name=Name}}.
+ {ok, #state{name = Name}}.
handle_call({last_updated, Object}, _From, State0) ->
{ok, State} = code_change(nil, State0, nil),
LastUpdated = smoosh_priority_queue:last_updated(Object, State#state.waiting),
{reply, LastUpdated, State};
-
handle_call(suspend, _From, State0) ->
{ok, State} = code_change(nil, State0, nil),
#state{active = Active} = State,
- [catch erlang:suspend_process(Pid, [unless_suspending])
- || {_,Pid} <- Active],
- {reply, ok, State#state{paused=true}};
-
+ [
+ catch erlang:suspend_process(Pid, [unless_suspending])
+ || {_, Pid} <- Active
+ ],
+ {reply, ok, State#state{paused = true}};
handle_call(resume, _From, State0) ->
{ok, State} = code_change(nil, State0, nil),
#state{active = Active} = State,
- [catch erlang:resume_process(Pid) || {_,Pid} <- Active],
- {reply, ok, State#state{paused=false}};
-
+ [catch erlang:resume_process(Pid) || {_, Pid} <- Active],
+ {reply, ok, State#state{paused = false}};
handle_call(status, _From, State0) ->
{ok, State} = code_change(nil, State0, nil),
- {reply, {ok, [
- {active, length(State#state.active)},
- {starting, length(State#state.starting)},
- {waiting, smoosh_priority_queue:info(State#state.waiting)}
- ]}, State};
-
+ {reply,
+ {ok, [
+ {active, length(State#state.active)},
+ {starting, length(State#state.starting)},
+ {waiting, smoosh_priority_queue:info(State#state.waiting)}
+ ]},
+ State};
handle_call(close, _From, State0) ->
{ok, State} = code_change(nil, State0, nil),
{stop, normal, ok, State};
-
handle_call(flush, _From, State0) ->
{ok, State} = code_change(nil, State0, nil),
- {reply, ok, State#state{waiting=smoosh_priority_queue:new()}}.
+ {reply, ok, State#state{waiting = smoosh_priority_queue:new()}}.
handle_cast({enqueue, _Object, 0}, State0) ->
{ok, State} = code_change(nil, State0, nil),
@@ -109,206 +114,247 @@ handle_cast({enqueue, Object, Priority}, State0) ->
% We accept noproc here due to possibly having monitored a restarted compaction
% pid after it finished.
-handle_info({'DOWN', Ref, _, Job, Reason}, State0) when Reason == normal;
- Reason == noproc ->
+handle_info({'DOWN', Ref, _, Job, Reason}, State0) when
+ Reason == normal;
+ Reason == noproc
+->
{ok, State} = code_change(nil, State0, nil),
- #state{active=Active, starting=Starting} = State,
- {noreply, maybe_start_compaction(
- State#state{active=lists:keydelete(Job, 2, Active),
- starting=lists:keydelete(Ref, 1, Starting)})};
-
+ #state{active = Active, starting = Starting} = State,
+ {noreply,
+ maybe_start_compaction(
+ State#state{
+ active = lists:keydelete(Job, 2, Active),
+ starting = lists:keydelete(Ref, 1, Starting)
+ }
+ )};
handle_info({'DOWN', Ref, _, Job, Reason}, State0) ->
{ok, State} = code_change(nil, State0, nil),
- #state{active=Active0, starting=Starting0} = State,
+ #state{active = Active0, starting = Starting0} = State,
case lists:keytake(Job, 2, Active0) of
{value, {Key, _Pid}, Active1} ->
- State1 = maybe_remonitor_cpid(State#state{active=Active1}, Key,
- Reason),
+ State1 = maybe_remonitor_cpid(
+ State#state{active = Active1},
+ Key,
+ Reason
+ ),
{noreply, maybe_start_compaction(State1)};
false ->
case lists:keytake(Ref, 1, Starting0) of
{value, {_, Key}, Starting1} ->
couch_log:warning("failed to start compaction of ~p: ~p", [
- smoosh_utils:stringify(Key), Reason]),
+ smoosh_utils:stringify(Key), Reason
+ ]),
{ok, _} = timer:apply_after(5000, smoosh_server, enqueue, [Key]),
- {noreply, maybe_start_compaction(State#state{starting=Starting1})};
+ {noreply, maybe_start_compaction(State#state{starting = Starting1})};
false ->
{noreply, State}
end
end;
-
handle_info({Ref, {ok, Pid}}, State0) when is_reference(Ref) ->
{ok, State} = code_change(nil, State0, nil),
case lists:keytake(Ref, 1, State#state.starting) of
{value, {_, Key}, Starting1} ->
- couch_log:notice("~s: Started compaction for ~s",
- [State#state.name, smoosh_utils:stringify(Key)]),
+ couch_log:notice(
+ "~s: Started compaction for ~s",
+ [State#state.name, smoosh_utils:stringify(Key)]
+ ),
erlang:monitor(process, Pid),
erlang:demonitor(Ref, [flush]),
- {noreply, State#state{active=[{Key, Pid}|State#state.active],
- starting=Starting1}};
+ {noreply, State#state{
+ active = [{Key, Pid} | State#state.active],
+ starting = Starting1
+ }};
false ->
{noreply, State}
end;
-
handle_info(check_window, State0) ->
{ok, State} = code_change(nil, State0, nil),
#state{paused = Paused, name = Name} = State,
StrictWindow = smoosh_utils:get(Name, "strict_window", "false"),
- FinalState = case {not Paused, smoosh_utils:in_allowed_window(Name)} of
- {false, false} ->
- % already in desired state
- State;
- {true, true} ->
- % already in desired state
- State;
- {false, true} ->
- % resume is always safe even if we did not previously suspend
- {reply, ok, NewState} = handle_call(resume, nil, State),
- NewState;
- {true, false} ->
- if StrictWindow =:= "true" ->
- {reply, ok, NewState} = handle_call(suspend, nil, State),
+ FinalState =
+ case {not Paused, smoosh_utils:in_allowed_window(Name)} of
+ {false, false} ->
+ % already in desired state
+ State;
+ {true, true} ->
+ % already in desired state
+ State;
+ {false, true} ->
+ % resume is always safe even if we did not previously suspend
+ {reply, ok, NewState} = handle_call(resume, nil, State),
NewState;
- true ->
- State#state{paused=true}
- end
- end,
+ {true, false} ->
+ if
+ StrictWindow =:= "true" ->
+ {reply, ok, NewState} = handle_call(suspend, nil, State),
+ NewState;
+ true ->
+ State#state{paused = true}
+ end
+ end,
erlang:send_after(60 * 1000, self(), check_window),
{noreply, FinalState};
-
handle_info(pause, State0) ->
{ok, State} = code_change(nil, State0, nil),
- {noreply, State#state{paused=true}};
+ {noreply, State#state{paused = true}};
handle_info(unpause, State0) ->
{ok, State} = code_change(nil, State0, nil),
- {noreply, maybe_start_compaction(State#state{paused=false})}.
+ {noreply, maybe_start_compaction(State#state{paused = false})}.
terminate(_Reason, _State) ->
ok.
-code_change(_OldVsn, #state{}=State, _Extra) ->
+code_change(_OldVsn, #state{} = State, _Extra) ->
{ok, State}.
% private functions.
add_to_queue(Key, Priority, State) ->
- #state{active=Active,waiting=Q} = State,
+ #state{active = Active, waiting = Q} = State,
case lists:keymember(Key, 1, Active) of
- true ->
- State;
- false ->
- Capacity = list_to_integer(smoosh_utils:get(State#state.name, "capacity", "9999")),
- couch_log:notice(
- "~s: adding ~p to internal compactor queue with priority ~p",
- [State#state.name, Key, Priority]),
- State#state{
- waiting=smoosh_priority_queue:in(Key, Priority, Priority, Capacity, Q)
- }
+ true ->
+ State;
+ false ->
+ Capacity = list_to_integer(smoosh_utils:get(State#state.name, "capacity", "9999")),
+ couch_log:notice(
+ "~s: adding ~p to internal compactor queue with priority ~p",
+ [State#state.name, Key, Priority]
+ ),
+ State#state{
+ waiting = smoosh_priority_queue:in(Key, Priority, Priority, Capacity, Q)
+ }
end.
-maybe_start_compaction(#state{paused=true}=State) ->
+maybe_start_compaction(#state{paused = true} = State) ->
State;
maybe_start_compaction(State) ->
- Concurrency = list_to_integer(smoosh_utils:get(State#state.name,
- "concurrency", "1")),
- if length(State#state.active) + length(State#state.starting) < Concurrency ->
- case smoosh_priority_queue:out(State#state.waiting) of
- false ->
- State;
- {Key, Priority, Q} ->
- try
- State2 = case start_compact(State, Key) of
+ Concurrency = list_to_integer(
+ smoosh_utils:get(
+ State#state.name,
+ "concurrency",
+ "1"
+ )
+ ),
+ if
+ length(State#state.active) + length(State#state.starting) < Concurrency ->
+ case smoosh_priority_queue:out(State#state.waiting) of
false ->
State;
- State1 ->
- couch_log:notice(
- "~s: Starting compaction for ~s (priority ~p)",
- [State#state.name, smoosh_utils:stringify(Key), Priority]),
- State1
- end,
- maybe_start_compaction(State2#state{waiting=Q})
- catch Class:Exception ->
- couch_log:notice("~s: ~p ~p for ~s",
- [State#state.name, Class, Exception,
- smoosh_utils:stringify(Key)]),
- maybe_start_compaction(State#state{waiting=Q})
- end
- end;
- true ->
- State
+ {Key, Priority, Q} ->
+ try
+ State2 =
+ case start_compact(State, Key) of
+ false ->
+ State;
+ State1 ->
+ couch_log:notice(
+ "~s: Starting compaction for ~s (priority ~p)",
+ [State#state.name, smoosh_utils:stringify(Key), Priority]
+ ),
+ State1
+ end,
+ maybe_start_compaction(State2#state{waiting = Q})
+ catch
+ Class:Exception ->
+ couch_log:notice(
+ "~s: ~p ~p for ~s",
+ [
+ State#state.name,
+ Class,
+ Exception,
+ smoosh_utils:stringify(Key)
+ ]
+ ),
+ maybe_start_compaction(State#state{waiting = Q})
+ end
+ end;
+ true ->
+ State
end.
start_compact(State, {schema, DbName, GroupId}) ->
case smoosh_utils:ignore_db({DbName, GroupId}) of
false ->
- {ok, Pid} = couch_md_index_manager:get_group_pid(DbName,
- GroupId),
+ {ok, Pid} = couch_md_index_manager:get_group_pid(
+ DbName,
+ GroupId
+ ),
Ref = erlang:monitor(process, Pid),
Pid ! {'$gen_call', {self(), Ref}, compact},
- State#state{starting=[{Ref, {schema, DbName,
- GroupId}} | State#state.starting]};
+ State#state{starting = [{Ref, {schema, DbName, GroupId}} | State#state.starting]};
_ ->
false
end;
-
start_compact(State, DbName) when is_list(DbName) ->
start_compact(State, ?l2b(DbName));
start_compact(State, DbName) when is_binary(DbName) ->
{ok, Db} = couch_db:open_int(DbName, []),
- try start_compact(State, Db) after couch_db:close(Db) end;
-start_compact(State, {Shard,GroupId}) ->
+ try
+ start_compact(State, Db)
+ after
+ couch_db:close(Db)
+ end;
+start_compact(State, {Shard, GroupId}) ->
case smoosh_utils:ignore_db({Shard, GroupId}) of
- false ->
- DbName = mem3:dbname(Shard),
- {ok, Pid} = couch_index_server:get_index(
- couch_mrview_index, Shard, GroupId),
- spawn(fun() -> cleanup_index_files(DbName, Shard) end),
- Ref = erlang:monitor(process, Pid),
- Pid ! {'$gen_call', {self(), Ref}, compact},
- State#state{starting=[{Ref, {Shard, GroupId}}|State#state.starting]};
- _ ->
- false
+ false ->
+ DbName = mem3:dbname(Shard),
+ {ok, Pid} = couch_index_server:get_index(
+ couch_mrview_index, Shard, GroupId
+ ),
+ spawn(fun() -> cleanup_index_files(DbName, Shard) end),
+ Ref = erlang:monitor(process, Pid),
+ Pid ! {'$gen_call', {self(), Ref}, compact},
+ State#state{starting = [{Ref, {Shard, GroupId}} | State#state.starting]};
+ _ ->
+ false
end;
start_compact(State, Db) ->
case smoosh_utils:ignore_db(Db) of
- false ->
- DbPid = couch_db:get_pid(Db),
- Key = couch_db:name(Db),
- case couch_db:get_compactor_pid(Db) of
- nil ->
- Ref = erlang:monitor(process, DbPid),
- DbPid ! {'$gen_call', {self(), Ref}, start_compact},
- State#state{starting=[{Ref, Key}|State#state.starting]};
- % Compaction is already running, so monitor existing compaction pid.
- CPid ->
- couch_log:notice("Db ~s continuing compaction",
- [smoosh_utils:stringify(Key)]),
- erlang:monitor(process, CPid),
- State#state{active=[{Key, CPid}|State#state.active]}
- end;
- _ ->
- false
+ false ->
+ DbPid = couch_db:get_pid(Db),
+ Key = couch_db:name(Db),
+ case couch_db:get_compactor_pid(Db) of
+ nil ->
+ Ref = erlang:monitor(process, DbPid),
+ DbPid ! {'$gen_call', {self(), Ref}, start_compact},
+ State#state{starting = [{Ref, Key} | State#state.starting]};
+ % Compaction is already running, so monitor existing compaction pid.
+ CPid ->
+ couch_log:notice(
+ "Db ~s continuing compaction",
+ [smoosh_utils:stringify(Key)]
+ ),
+ erlang:monitor(process, CPid),
+ State#state{active = [{Key, CPid} | State#state.active]}
+ end;
+ _ ->
+ false
end.
maybe_remonitor_cpid(State, DbName, Reason) when is_binary(DbName) ->
{ok, Db} = couch_db:open_int(DbName, []),
case couch_db:get_compactor_pid_sync(Db) of
nil ->
- couch_log:warning("exit for compaction of ~p: ~p",
- [smoosh_utils:stringify(DbName), Reason]),
+ couch_log:warning(
+ "exit for compaction of ~p: ~p",
+ [smoosh_utils:stringify(DbName), Reason]
+ ),
{ok, _} = timer:apply_after(5000, smoosh_server, enqueue, [DbName]),
State;
CPid ->
- couch_log:notice("~s compaction already running. Re-monitor Pid ~p",
- [smoosh_utils:stringify(DbName), CPid]),
+ couch_log:notice(
+ "~s compaction already running. Re-monitor Pid ~p",
+ [smoosh_utils:stringify(DbName), CPid]
+ ),
erlang:monitor(process, CPid),
- State#state{active=[{DbName, CPid}|State#state.active]}
+ State#state{active = [{DbName, CPid} | State#state.active]}
end;
% not a database compaction, so ignore the pid check
maybe_remonitor_cpid(State, Key, Reason) ->
- couch_log:warning("exit for compaction of ~p: ~p",
- [smoosh_utils:stringify(Key), Reason]),
+ couch_log:warning(
+ "exit for compaction of ~p: ~p",
+ [smoosh_utils:stringify(Key), Reason]
+ ),
{ok, _} = timer:apply_after(5000, smoosh_server, enqueue, [Key]),
State.
@@ -318,8 +364,8 @@ schedule_unpause() ->
cleanup_index_files(DbName, _Shard) ->
case config:get("smoosh", "cleanup_index_files", "false") of
- "true" ->
- fabric:cleanup_index_files(DbName);
- _ ->
- ok
+ "true" ->
+ fabric:cleanup_index_files(DbName);
+ _ ->
+ ok
end.
diff --git a/src/smoosh/src/smoosh_priority_queue.erl b/src/smoosh/src/smoosh_priority_queue.erl
index 6376103d9..b6f4b6dd8 100644
--- a/src/smoosh/src/smoosh_priority_queue.erl
+++ b/src/smoosh/src/smoosh_priority_queue.erl
@@ -15,14 +15,14 @@
-export([new/0, last_updated/2, is_key/2, in/4, in/5, out/1, size/1, info/1]).
-record(priority_queue, {
- dict=dict:new(),
- tree=gb_trees:empty()
+ dict = dict:new(),
+ tree = gb_trees:empty()
}).
new() ->
#priority_queue{}.
-last_updated(Key, #priority_queue{dict=Dict}) ->
+last_updated(Key, #priority_queue{dict = Dict}) ->
case dict:find(Key, Dict) of
{ok, {_Priority, {LastUpdatedMTime, _MInt}}} ->
LastUpdatedMTime;
@@ -30,48 +30,51 @@ last_updated(Key, #priority_queue{dict=Dict}) ->
false
end.
-is_key(Key, #priority_queue{dict=Dict}) ->
+is_key(Key, #priority_queue{dict = Dict}) ->
dict:is_key(Key, Dict).
in(Key, Value, Priority, Q) ->
in(Key, Value, Priority, infinity, Q).
-in(Key, Value, Priority, Capacity, #priority_queue{dict=Dict, tree=Tree}) ->
- Tree1 = case dict:find(Key, Dict) of
- {ok, TreeKey} ->
- gb_trees:delete_any(TreeKey, Tree);
- error ->
- Tree
- end,
+in(Key, Value, Priority, Capacity, #priority_queue{dict = Dict, tree = Tree}) ->
+ Tree1 =
+ case dict:find(Key, Dict) of
+ {ok, TreeKey} ->
+ gb_trees:delete_any(TreeKey, Tree);
+ error ->
+ Tree
+ end,
Now = {erlang:monotonic_time(), erlang:unique_integer([monotonic])},
TreeKey1 = {Priority, Now},
Tree2 = gb_trees:enter(TreeKey1, {Key, Value}, Tree1),
Dict1 = dict:store(Key, TreeKey1, Dict),
- truncate(Capacity, #priority_queue{dict=Dict1, tree=Tree2}).
+ truncate(Capacity, #priority_queue{dict = Dict1, tree = Tree2}).
-out(#priority_queue{dict=Dict,tree=Tree}) ->
+out(#priority_queue{dict = Dict, tree = Tree}) ->
case gb_trees:is_empty(Tree) of
- true ->
- false;
- false ->
- {_, {Key, Value}, Tree1} = gb_trees:take_largest(Tree),
- Dict1 = dict:erase(Key, Dict),
- {Key, Value, #priority_queue{dict=Dict1, tree=Tree1}}
+ true ->
+ false;
+ false ->
+ {_, {Key, Value}, Tree1} = gb_trees:take_largest(Tree),
+ Dict1 = dict:erase(Key, Dict),
+ {Key, Value, #priority_queue{dict = Dict1, tree = Tree1}}
end.
-size(#priority_queue{tree=Tree}) ->
+size(#priority_queue{tree = Tree}) ->
gb_trees:size(Tree).
-info(#priority_queue{tree=Tree}=Q) ->
- [{size, ?MODULE:size(Q)}|
- case gb_trees:is_empty(Tree) of
- true ->
- [];
- false ->
- {Min, _, _} = gb_trees:take_smallest(Tree),
- {Max, _, _} = gb_trees:take_largest(Tree),
- [{min, Min}, {max, Max}]
- end].
+info(#priority_queue{tree = Tree} = Q) ->
+ [
+ {size, ?MODULE:size(Q)}
+ | case gb_trees:is_empty(Tree) of
+ true ->
+ [];
+ false ->
+ {Min, _, _} = gb_trees:take_smallest(Tree),
+ {Max, _, _} = gb_trees:take_largest(Tree),
+ [{min, Min}, {max, Max}]
+ end
+ ].
truncate(infinity, Q) ->
Q;
@@ -80,7 +83,7 @@ truncate(Capacity, Q) when Capacity > 0 ->
truncate(Capacity, Size, Q) when Size =< Capacity ->
Q;
-truncate(Capacity, Size, #priority_queue{dict=Dict, tree=Tree}) when Size > 0 ->
+truncate(Capacity, Size, #priority_queue{dict = Dict, tree = Tree}) when Size > 0 ->
{_, {Key, _}, Tree1} = gb_trees:take_smallest(Tree),
- Q1 = #priority_queue{dict=dict:erase(Key, Dict), tree=Tree1},
+ Q1 = #priority_queue{dict = dict:erase(Key, Dict), tree = Tree1},
truncate(Capacity, ?MODULE:size(Q1), Q1).
diff --git a/src/smoosh/src/smoosh_server.erl b/src/smoosh/src/smoosh_server.erl
index 6aab19183..0526625ff 100644
--- a/src/smoosh/src/smoosh_server.erl
+++ b/src/smoosh/src/smoosh_server.erl
@@ -31,8 +31,14 @@
-define(SECONDS_PER_MINUTE, 60).
% gen_server api.
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- code_change/3, terminate/2]).
+-export([
+ init/1,
+ handle_call/3,
+ handle_cast/2,
+ handle_info/2,
+ code_change/3,
+ terminate/2
+]).
% config_listener api
-export([handle_config_change/5, handle_config_terminate/3]).
@@ -49,12 +55,12 @@
% private records.
-record(state, {
- db_channels=[],
- view_channels=[],
- schema_channels=[],
+ db_channels = [],
+ view_channels = [],
+ schema_channels = [],
tab,
event_listener,
- waiting=dict:new()
+ waiting = dict:new()
}).
-record(channel, {
@@ -107,19 +113,27 @@ init([]) ->
ok = config:listen_for_changes(?MODULE, nil),
{ok, Pid} = start_event_listener(),
DbChannels = smoosh_utils:split(
- config:get("smoosh", "db_channels", "upgrade_dbs,ratio_dbs,slack_dbs")),
+ config:get("smoosh", "db_channels", "upgrade_dbs,ratio_dbs,slack_dbs")
+ ),
ViewChannels = smoosh_utils:split(
- config:get("smoosh", "view_channels", "upgrade_views,ratio_views,slack_views")),
- SchemaChannels = smoosh_utils:split(config:get("smoosh",
- "schema_channels", "ratio_schemas,slack_schemas")),
+ config:get("smoosh", "view_channels", "upgrade_views,ratio_views,slack_views")
+ ),
+ SchemaChannels = smoosh_utils:split(
+ config:get(
+ "smoosh",
+ "schema_channels",
+ "ratio_schemas,slack_schemas"
+ )
+ ),
Tab = ets:new(channels, [{keypos, #channel.name}]),
- {ok, create_missing_channels(#state{
- db_channels=DbChannels,
- view_channels=ViewChannels,
- schema_channels=SchemaChannels,
- event_listener=Pid,
- tab=Tab
- })}.
+ {ok,
+ create_missing_channels(#state{
+ db_channels = DbChannels,
+ view_channels = ViewChannels,
+ schema_channels = SchemaChannels,
+ event_listener = Pid,
+ tab = Tab
+ })}.
handle_config_change("smoosh", "db_channels", L, _, _) ->
{ok, gen_server:cast(?MODULE, {new_db_channels, smoosh_utils:split(L)})};
@@ -133,105 +147,123 @@ handle_config_change(_, _, _, _, _) ->
handle_config_terminate(_Server, stop, _State) ->
ok;
handle_config_terminate(_Server, _Reason, _State) ->
- erlang:send_after(?RELISTEN_DELAY,
- whereis(?MODULE), restart_config_listener).
+ erlang:send_after(
+ ?RELISTEN_DELAY,
+ whereis(?MODULE),
+ restart_config_listener
+ ).
handle_call(status, _From, State) ->
Acc = ets:foldl(fun get_channel_status/2, [], State#state.tab),
{reply, {ok, Acc}, State};
-
handle_call({enqueue, Object}, _From, State) ->
{noreply, NewState} = handle_cast({enqueue, Object}, State),
{reply, ok, NewState};
-
handle_call(suspend, _From, State) ->
- ets:foldl(fun(#channel{name=Name, pid=P}, _) ->
- couch_log:notice("Suspending ~p", [Name]),
- smoosh_channel:suspend(P) end, 0,
- State#state.tab),
+ ets:foldl(
+ fun(#channel{name = Name, pid = P}, _) ->
+ couch_log:notice("Suspending ~p", [Name]),
+ smoosh_channel:suspend(P)
+ end,
+ 0,
+ State#state.tab
+ ),
{reply, ok, State};
-
handle_call(resume, _From, State) ->
- ets:foldl(fun(#channel{name=Name, pid=P}, _) ->
- couch_log:notice("Resuming ~p", [Name]),
- smoosh_channel:resume(P) end, 0,
- State#state.tab),
+ ets:foldl(
+ fun(#channel{name = Name, pid = P}, _) ->
+ couch_log:notice("Resuming ~p", [Name]),
+ smoosh_channel:resume(P)
+ end,
+ 0,
+ State#state.tab
+ ),
{reply, ok, State}.
handle_cast({new_db_channels, Channels}, State) ->
- [smoosh_channel:close(channel_pid(State#state.tab, C)) ||
- C <- State#state.db_channels -- Channels],
- {noreply, create_missing_channels(State#state{db_channels=Channels})};
-
+ [
+ smoosh_channel:close(channel_pid(State#state.tab, C))
+ || C <- State#state.db_channels -- Channels
+ ],
+ {noreply, create_missing_channels(State#state{db_channels = Channels})};
handle_cast({new_view_channels, Channels}, State) ->
- [smoosh_channel:close(channel_pid(State#state.tab, C)) ||
- C <- State#state.view_channels -- Channels],
- {noreply, create_missing_channels(State#state{view_channels=Channels})};
-
+ [
+ smoosh_channel:close(channel_pid(State#state.tab, C))
+ || C <- State#state.view_channels -- Channels
+ ],
+ {noreply, create_missing_channels(State#state{view_channels = Channels})};
handle_cast({new_schema_channels, Channels}, State) ->
- [smoosh_channel:close(channel_pid(State#state.tab, C)) ||
- C <- State#state.schema_channels -- Channels],
- {noreply, create_missing_channels(State#state{view_channels=Channels})};
-
+ [
+ smoosh_channel:close(channel_pid(State#state.tab, C))
+ || C <- State#state.schema_channels -- Channels
+ ],
+ {noreply, create_missing_channels(State#state{view_channels = Channels})};
handle_cast({enqueue, Object}, State) ->
- #state{waiting=Waiting}=State,
+ #state{waiting = Waiting} = State,
case dict:is_key(Object, Waiting) of
true ->
{noreply, State};
false ->
{_Pid, Ref} = spawn_monitor(?MODULE, enqueue_request, [State, Object]),
- {noreply, State#state{waiting=dict:store(Object, Ref, Waiting)}}
+ {noreply, State#state{waiting = dict:store(Object, Ref, Waiting)}}
end.
-handle_info({'EXIT', Pid, Reason}, #state{event_listener=Pid}=State) ->
- couch_log:notice("update notifier died ~p", [Reason]),
- {ok, Pid1} = start_event_listener(),
- {noreply, State#state{event_listener=Pid1}};
+handle_info({'EXIT', Pid, Reason}, #state{event_listener = Pid} = State) ->
+ couch_log:notice("update notifier died ~p", [Reason]),
+ {ok, Pid1} = start_event_listener(),
+ {noreply, State#state{event_listener = Pid1}};
handle_info({'EXIT', Pid, Reason}, State) ->
couch_log:notice("~p ~p died ~p", [?MODULE, Pid, Reason]),
- case ets:match_object(State#state.tab, #channel{pid=Pid, _='_'}) of
- [#channel{name=Name}] ->
- ets:delete(State#state.tab, Name);
- _ ->
- ok
+ case ets:match_object(State#state.tab, #channel{pid = Pid, _ = '_'}) of
+ [#channel{name = Name}] ->
+ ets:delete(State#state.tab, Name);
+ _ ->
+ ok
end,
{noreply, create_missing_channels(State)};
-
handle_info({'DOWN', Ref, _, _, _}, State) ->
- Waiting = dict:filter(fun(_Key, Value) -> Value =/= Ref end,
- State#state.waiting),
- {noreply, State#state{waiting=Waiting}};
-
+ Waiting = dict:filter(
+ fun(_Key, Value) -> Value =/= Ref end,
+ State#state.waiting
+ ),
+ {noreply, State#state{waiting = Waiting}};
handle_info(restart_config_listener, State) ->
ok = config:listen_for_changes(?MODULE, nil),
{noreply, State};
-
handle_info(_Msg, State) ->
{noreply, State}.
terminate(_Reason, State) ->
- ets:foldl(fun(#channel{pid=P}, _) -> smoosh_channel:close(P) end, 0,
- State#state.tab),
+ ets:foldl(
+ fun(#channel{pid = P}, _) -> smoosh_channel:close(P) end,
+ 0,
+ State#state.tab
+ ),
ok.
-code_change(_OldVsn, {state, DbChannels, ViewChannels, Tab,
- EventListener, Waiting}, _Extra) ->
- {ok, #state{db_channels=DbChannels, view_channels=ViewChannels,
- schema_channels=[], tab=Tab, event_listener = EventListener,
- waiting=Waiting}};
+code_change(_OldVsn, {state, DbChannels, ViewChannels, Tab, EventListener, Waiting}, _Extra) ->
+ {ok, #state{
+ db_channels = DbChannels,
+ view_channels = ViewChannels,
+ schema_channels = [],
+ tab = Tab,
+ event_listener = EventListener,
+ waiting = Waiting
+ }};
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
% private functions.
-get_channel_status(#channel{name=Name, pid=P}, Acc0) when is_pid(P) ->
+get_channel_status(#channel{name = Name, pid = P}, Acc0) when is_pid(P) ->
try gen_server:call(P, status) of
- {ok, Status} ->
- [{Name, Status} | Acc0];
- _ ->
- Acc0
- catch _:_ ->
- Acc0
+ {ok, Status} ->
+ [{Name, Status} | Acc0];
+ _ ->
+ Acc0
+ catch
+ _:_ ->
+ Acc0
end;
get_channel_status(_, Acc0) ->
Acc0.
@@ -254,41 +286,42 @@ enqueue_request(State, Object) ->
smoosh_utils:stringify(Object), Stack])
end.
-find_channel(#state{}=State, {schema, DbName, GroupId}) ->
+find_channel(#state{} = State, {schema, DbName, GroupId}) ->
find_channel(State#state.tab, State#state.schema_channels, {schema, DbName, GroupId});
-find_channel(#state{}=State, {Shard, GroupId}) ->
+find_channel(#state{} = State, {Shard, GroupId}) ->
find_channel(State#state.tab, State#state.view_channels, {Shard, GroupId});
-find_channel(#state{}=State, DbName) ->
+find_channel(#state{} = State, DbName) ->
find_channel(State#state.tab, State#state.db_channels, DbName).
find_channel(_Tab, [], _Object) ->
false;
-find_channel(Tab, [Channel|Rest], Object) ->
+find_channel(Tab, [Channel | Rest], Object) ->
Pid = channel_pid(Tab, Channel),
LastUpdated = smoosh_channel:last_updated(Pid, Object),
- StalenessInSec = config:get_integer("smoosh", "staleness", 5)
- * ?SECONDS_PER_MINUTE,
+ StalenessInSec =
+ config:get_integer("smoosh", "staleness", 5) *
+ ?SECONDS_PER_MINUTE,
Staleness = erlang:convert_time_unit(StalenessInSec, seconds, native),
Now = erlang:monotonic_time(),
case LastUpdated =:= false orelse Now - LastUpdated > Staleness of
- true ->
- case smoosh_utils:ignore_db(Object) of
true ->
- find_channel(Tab, Rest, Object);
- _ ->
- case get_priority(Channel, Object) of
- 0 ->
- find_channel(Tab, Rest, Object);
- Priority ->
- {ok, Pid, Priority}
- end
- end;
- false ->
- find_channel(Tab, Rest, Object)
+ case smoosh_utils:ignore_db(Object) of
+ true ->
+ find_channel(Tab, Rest, Object);
+ _ ->
+ case get_priority(Channel, Object) of
+ 0 ->
+ find_channel(Tab, Rest, Object);
+ Priority ->
+ {ok, Pid, Priority}
+ end
+ end;
+ false ->
+ find_channel(Tab, Rest, Object)
end.
channel_pid(Tab, Channel) ->
- [#channel{pid=Pid}] = ets:lookup(Tab, Channel),
+ [#channel{pid = Pid}] = ets:lookup(Tab, Channel),
Pid.
create_missing_channels(State) ->
@@ -299,11 +332,11 @@ create_missing_channels(State) ->
create_missing_channels(_Tab, []) ->
ok;
-create_missing_channels(Tab, [Channel|Rest]) ->
+create_missing_channels(Tab, [Channel | Rest]) ->
case ets:lookup(Tab, Channel) of
[] ->
{ok, Pid} = smoosh_channel:start_link(Channel),
- true = ets:insert(Tab, [#channel{name=Channel, pid=Pid}]);
+ true = ets:insert(Tab, [#channel{name = Channel, pid = Pid}]);
_ ->
ok
end,
@@ -311,44 +344,50 @@ create_missing_channels(Tab, [Channel|Rest]) ->
get_priority(Channel, {Shard, GroupId}) ->
case couch_index_server:get_index(couch_mrview_index, Shard, GroupId) of
- {ok, Pid} ->
- try
- {ok, ViewInfo} = couch_index:get_info(Pid),
- {SizeInfo} = couch_util:get_value(sizes, ViewInfo),
- DiskSize = couch_util:get_value(file, SizeInfo),
- ActiveSize = couch_util:get_value(active, SizeInfo),
- NeedsUpgrade = needs_upgrade(ViewInfo),
- get_priority(Channel, DiskSize, ActiveSize, NeedsUpgrade)
- catch
- exit:{timeout, _} ->
- 0
- end;
- {not_found, _Reason} ->
- 0;
- {error, Reason} ->
- couch_log:warning("Failed to get group_pid for ~p ~p ~p: ~p",
- [Channel, Shard, GroupId, Reason]),
- 0
+ {ok, Pid} ->
+ try
+ {ok, ViewInfo} = couch_index:get_info(Pid),
+ {SizeInfo} = couch_util:get_value(sizes, ViewInfo),
+ DiskSize = couch_util:get_value(file, SizeInfo),
+ ActiveSize = couch_util:get_value(active, SizeInfo),
+ NeedsUpgrade = needs_upgrade(ViewInfo),
+ get_priority(Channel, DiskSize, ActiveSize, NeedsUpgrade)
+ catch
+ exit:{timeout, _} ->
+ 0
+ end;
+ {not_found, _Reason} ->
+ 0;
+ {error, Reason} ->
+ couch_log:warning(
+ "Failed to get group_pid for ~p ~p ~p: ~p",
+ [Channel, Shard, GroupId, Reason]
+ ),
+ 0
end;
-
get_priority(Channel, {schema, DbName, DDocId}) ->
case couch_md_index_manager:get_group_pid(DbName, DDocId) of
- {ok, Pid} ->
- {ok, SchemaInfo} = couch_md_index:get_info(Pid),
- DiskSize = couch_util:get_value(disk_size, SchemaInfo),
- DataSize = couch_util:get_value(data_size, SchemaInfo),
- get_priority(Channel, DiskSize, DataSize, false);
- {error, Reason} ->
- couch_log:warning("Failed to get group_pid for ~p ~p ~p: ~p",
- [Channel, DbName, DDocId, Reason]),
- 0
+ {ok, Pid} ->
+ {ok, SchemaInfo} = couch_md_index:get_info(Pid),
+ DiskSize = couch_util:get_value(disk_size, SchemaInfo),
+ DataSize = couch_util:get_value(data_size, SchemaInfo),
+ get_priority(Channel, DiskSize, DataSize, false);
+ {error, Reason} ->
+ couch_log:warning(
+ "Failed to get group_pid for ~p ~p ~p: ~p",
+ [Channel, DbName, DDocId, Reason]
+ ),
+ 0
end;
-
get_priority(Channel, DbName) when is_list(DbName) ->
get_priority(Channel, ?l2b(DbName));
get_priority(Channel, DbName) when is_binary(DbName) ->
{ok, Db} = couch_db:open_int(DbName, []),
- try get_priority(Channel, Db) after couch_db:close(Db) end;
+ try
+ get_priority(Channel, Db)
+ after
+ couch_db:close(Db)
+ end;
get_priority(Channel, Db) ->
{ok, DocInfo} = couch_db:get_db_info(Db),
{SizeInfo} = couch_util:get_value(sizes, DocInfo),
@@ -356,7 +395,7 @@ get_priority(Channel, Db) ->
ActiveSize = couch_util:get_value(active, SizeInfo),
NeedsUpgrade = needs_upgrade(DocInfo),
case db_changed(Channel, DocInfo) of
- true -> get_priority(Channel, DiskSize, ActiveSize, NeedsUpgrade);
+ true -> get_priority(Channel, DiskSize, ActiveSize, NeedsUpgrade);
false -> 0
end.
@@ -364,30 +403,35 @@ get_priority(Channel, DiskSize, DataSize, NeedsUpgrade) ->
Priority = get_priority(Channel),
MinSize = to_number(Channel, "min_size", "1048576"),
MaxSize = to_number(Channel, "max_size", "infinity"),
- DefaultMinPriority = case Priority of "slack" -> "536870912"; _ -> "2.0" end,
+ DefaultMinPriority =
+ case Priority of
+ "slack" -> "536870912";
+ _ -> "2.0"
+ end,
MinPriority = to_number(Channel, "min_priority", DefaultMinPriority),
MaxPriority = to_number(Channel, "max_priority", "infinity"),
- if Priority =:= "upgrade", NeedsUpgrade ->
+ if
+ Priority =:= "upgrade", NeedsUpgrade ->
1;
- DiskSize =< MinSize ->
+ DiskSize =< MinSize ->
0;
- DiskSize > MaxSize ->
+ DiskSize > MaxSize ->
0;
- DataSize =:= 0 ->
+ DataSize =:= 0 ->
MinPriority;
- Priority =:= "ratio", DiskSize/DataSize =< MinPriority ->
+ Priority =:= "ratio", DiskSize / DataSize =< MinPriority ->
0;
- Priority =:= "ratio", DiskSize/DataSize > MaxPriority ->
+ Priority =:= "ratio", DiskSize / DataSize > MaxPriority ->
0;
- Priority =:= "ratio" ->
- DiskSize/DataSize;
- Priority =:= "slack", DiskSize-DataSize =< MinPriority ->
+ Priority =:= "ratio" ->
+ DiskSize / DataSize;
+ Priority =:= "slack", DiskSize - DataSize =< MinPriority ->
0;
- Priority =:= "slack", DiskSize-DataSize > MaxPriority ->
+ Priority =:= "slack", DiskSize - DataSize > MaxPriority ->
0;
- Priority =:= "slack" ->
- DiskSize-DataSize;
- true ->
+ Priority =:= "slack" ->
+ DiskSize - DataSize;
+ true ->
0
end.
@@ -397,19 +441,22 @@ db_changed(Channel, Info) ->
true;
CompactedSeq ->
MinChanges = list_to_integer(
- smoosh_utils:get(Channel, "min_changes", "0")),
+ smoosh_utils:get(Channel, "min_changes", "0")
+ ),
UpdateSeq = couch_util:get_value(update_seq, Info),
UpdateSeq - CompactedSeq >= MinChanges
end.
to_number(Channel, Name, Default) ->
case smoosh_utils:get(Channel, Name, Default) of
- "infinity" -> infinity;
+ "infinity" ->
+ infinity;
Value ->
try
list_to_float(Value)
- catch error:badarg ->
- list_to_integer(Value)
+ catch
+ error:badarg ->
+ list_to_integer(Value)
end
end.
@@ -441,11 +488,9 @@ needs_upgrade(Props) ->
false
end.
-
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-
setup_all() ->
Ctx = test_util:start_couch([couch_log]),
meck:new([config, couch_index, couch_index_server], [passthrough]),
@@ -477,7 +522,7 @@ config_change_test_() ->
fun t_restart_config_listener/1
]
}
-}.
+ }.
get_priority_test_() ->
{
diff --git a/src/smoosh/src/smoosh_sup.erl b/src/smoosh/src/smoosh_sup.erl
index 158498cd5..abd55a2eb 100644
--- a/src/smoosh/src/smoosh_sup.erl
+++ b/src/smoosh/src/smoosh_sup.erl
@@ -35,4 +35,4 @@ start_link() ->
%% ===================================================================
init([]) ->
- {ok, { {one_for_one, 5, 10}, [?CHILD(smoosh_server, worker)]} }.
+ {ok, {{one_for_one, 5, 10}, [?CHILD(smoosh_server, worker)]}}.
diff --git a/src/smoosh/src/smoosh_utils.erl b/src/smoosh/src/smoosh_utils.erl
index fcd0fcd6f..882d3ec84 100644
--- a/src/smoosh/src/smoosh_utils.erl
+++ b/src/smoosh/src/smoosh_utils.erl
@@ -20,14 +20,15 @@
group_pid({Shard, GroupId}) ->
case couch_view_group:open_db_group(Shard, GroupId) of
- {ok, Group} ->
- try
- gen_server:call(couch_view, {get_group_server, Shard, Group})
- catch _:Error ->
- {error, Error}
- end;
- Else ->
- Else
+ {ok, Group} ->
+ try
+ gen_server:call(couch_view, {get_group_server, Shard, Group})
+ catch
+ _:Error ->
+ {error, Error}
+ end;
+ Else ->
+ Else
end.
get(Channel, Key) ->
@@ -37,7 +38,7 @@ get(Channel, Key, Default) ->
config:get("smoosh." ++ Channel, Key, Default).
split(CSV) ->
- re:split(CSV, "\\s*,\\s*", [{return,list}, trim]).
+ re:split(CSV, "\\s*,\\s*", [{return, list}, trim]).
stringify({DbName, GroupId}) ->
io_lib:format("~s ~s", [DbName, GroupId]);
@@ -48,14 +49,14 @@ stringify(DbName) ->
ignore_db({DbName, _GroupName}) ->
ignore_db(DbName);
-ignore_db(DbName) when is_binary(DbName)->
+ignore_db(DbName) when is_binary(DbName) ->
ignore_db(?b2l(DbName));
ignore_db(DbName) when is_list(DbName) ->
case config:get("smoosh.ignore", DbName, false) of
- "true" ->
- true;
- _ ->
- false
+ "true" ->
+ true;
+ _ ->
+ false
end;
ignore_db(Db) ->
ignore_db(couch_db:name(Db)).
@@ -68,13 +69,12 @@ in_allowed_window(Channel) ->
in_allowed_window(From, To) ->
{_, {HH, MM, _}} = calendar:universal_time(),
case From < To of
- true ->
- ({HH, MM} >= From) andalso ({HH, MM} < To);
- false ->
- ({HH, MM} >= From) orelse ({HH, MM} < To)
+ true ->
+ ({HH, MM} >= From) andalso ({HH, MM} < To);
+ false ->
+ ({HH, MM} >= From) orelse ({HH, MM} < To)
end.
-
parse_time(undefined, Default) ->
Default;
parse_time(String, Default) ->
@@ -82,9 +82,10 @@ parse_time(String, Default) ->
[HH, MM] ->
try
{list_to_integer(HH), list_to_integer(MM)}
- catch error:badarg ->
- couch_log:error("Malformed compaction schedule configuration: ~s", [String]),
- Default
+ catch
+ error:badarg ->
+ couch_log:error("Malformed compaction schedule configuration: ~s", [String]),
+ Default
end;
_Else ->
couch_log:error("Malformed compaction schedule configuration: ~s", [String]),
diff --git a/src/weatherreport/src/weatherreport.erl b/src/weatherreport/src/weatherreport.erl
index fd4b1e450..8a46b4a87 100644
--- a/src/weatherreport/src/weatherreport.erl
+++ b/src/weatherreport/src/weatherreport.erl
@@ -56,20 +56,23 @@
-export([main/1]).
-define(OPTS, [
- {etc, $c, "etc", string, "Path to the CouchDB configuration directory"},
- {level, $d, "level", {atom, notice}, "Minimum message severity level (default: notice)"},
- {expert, $e, "expert", undefined, "Perform more detailed diagnostics" },
- {usage, $h, "help", undefined, "Display help/usage" },
- {list, $l, "list", undefined, "Describe available diagnostic tasks" },
- {all_nodes, $a, "all-nodes", undefined, "Run weatherreport on all cluster nodes" },
- {timeout, $t, "timeout", integer, "Timeout value (in ms) for each diagnostic check" }
- ]).
+ {etc, $c, "etc", string, "Path to the CouchDB configuration directory"},
+ {level, $d, "level", {atom, notice}, "Minimum message severity level (default: notice)"},
+ {expert, $e, "expert", undefined, "Perform more detailed diagnostics"},
+ {usage, $h, "help", undefined, "Display help/usage"},
+ {list, $l, "list", undefined, "Describe available diagnostic tasks"},
+ {all_nodes, $a, "all-nodes", undefined, "Run weatherreport on all cluster nodes"},
+ {timeout, $t, "timeout", integer, "Timeout value (in ms) for each diagnostic check"}
+]).
--define(USAGE_OPTS, [ O || O <- ?OPTS,
- element(5,O) =/= undefined]).
+-define(USAGE_OPTS, [
+ O
+ || O <- ?OPTS,
+ element(5, O) =/= undefined
+]).
%% @doc The main entry point for the weatherreport escript.
--spec main(CommandLineArguments::[string()]) -> any().
+-spec main(CommandLineArguments :: [string()]) -> any().
main(Args) ->
application:load(weatherreport),
@@ -86,15 +89,22 @@ main(Args) ->
end.
list_checks() ->
- Descriptions = [ {weatherreport_util:short_name(Mod), Mod:description()} ||
- Mod <- weatherreport_check:modules() ],
+ Descriptions = [
+ {weatherreport_util:short_name(Mod), Mod:description()}
+ || Mod <- weatherreport_check:modules()
+ ],
io:format("Available diagnostic checks:~n~n"),
- lists:foreach(fun({Mod, Desc}) ->
- io:format(" ~.20s ~s~n", [Mod, Desc])
- end, lists:sort(Descriptions)).
+ lists:foreach(
+ fun({Mod, Desc}) ->
+ io:format(" ~.20s ~s~n", [Mod, Desc])
+ end,
+ lists:sort(Descriptions)
+ ).
usage() ->
- weatherreport_getopt:usage(?USAGE_OPTS, "weatherreport ", "[check_name ...]", [{"check_name", "A specific check to run"}]).
+ weatherreport_getopt:usage(?USAGE_OPTS, "weatherreport ", "[check_name ...]", [
+ {"check_name", "A specific check to run"}
+ ]).
run(InputChecks) ->
case weatherreport_config:prepare() of
@@ -104,47 +114,58 @@ run(InputChecks) ->
_ ->
ok
end,
- Checks = case InputChecks of
- [] ->
- weatherreport_check:modules();
- _ ->
- ShortNames = [{weatherreport_util:short_name(Mod), Mod} || Mod <- weatherreport_check:modules() ],
- element(1, lists:foldr(fun validate_checks/2, {[], ShortNames}, InputChecks))
- end,
- Messages = case application:get_env(weatherreport, all_nodes) of
- {ok, true} ->
- weatherreport_runner:run(Checks, all);
- _ ->
- weatherreport_runner:run(Checks)
- end,
- case Messages of
- [] ->
- io:format("No diagnostic messages to report.~n"),
- halt(0);
- _ ->
- %% Print the most critical messages first
- FilteredMessages = lists:filter(fun({_,Level,_,_}) ->
- weatherreport_log:should_log(Level)
- end, Messages),
- SortedMessages = lists:sort(fun({_, ALevel, _, _}, {_, BLevel, _, _}) ->
- weatherreport_log:level(ALevel) =< weatherreport_log:level(BLevel)
- end, FilteredMessages),
- case SortedMessages of
+ Checks =
+ case InputChecks of
[] ->
- io:format("No diagnostic messages to report.~n"),
- halt(0);
+ weatherreport_check:modules();
_ ->
- lists:foreach(fun weatherreport_check:print/1, SortedMessages),
- weatherreport_util:flush_stdout(),
- halt(1)
+ ShortNames = [
+ {weatherreport_util:short_name(Mod), Mod}
+ || Mod <- weatherreport_check:modules()
+ ],
+ element(1, lists:foldr(fun validate_checks/2, {[], ShortNames}, InputChecks))
end,
- halt(1)
+ Messages =
+ case application:get_env(weatherreport, all_nodes) of
+ {ok, true} ->
+ weatherreport_runner:run(Checks, all);
+ _ ->
+ weatherreport_runner:run(Checks)
+ end,
+ case Messages of
+ [] ->
+ io:format("No diagnostic messages to report.~n"),
+ halt(0);
+ _ ->
+ %% Print the most critical messages first
+ FilteredMessages = lists:filter(
+ fun({_, Level, _, _}) ->
+ weatherreport_log:should_log(Level)
+ end,
+ Messages
+ ),
+ SortedMessages = lists:sort(
+ fun({_, ALevel, _, _}, {_, BLevel, _, _}) ->
+ weatherreport_log:level(ALevel) =< weatherreport_log:level(BLevel)
+ end,
+ FilteredMessages
+ ),
+ case SortedMessages of
+ [] ->
+ io:format("No diagnostic messages to report.~n"),
+ halt(0);
+ _ ->
+ lists:foreach(fun weatherreport_check:print/1, SortedMessages),
+ weatherreport_util:flush_stdout(),
+ halt(1)
+ end,
+ halt(1)
end.
validate_checks(Check, {Mods, SNames}) ->
case lists:keyfind(Check, 1, SNames) of
{Check, Mod} ->
- {[Mod|Mods], lists:delete({Check, Mod}, SNames)};
+ {[Mod | Mods], lists:delete({Check, Mod}, SNames)};
_ ->
io:format("Unknown check '~s' specified, skipping.~n", [Check]),
{Mods, SNames}
@@ -155,10 +176,10 @@ process_opts(Opts) ->
process_opts([], Result) ->
Result;
-process_opts([H|T], Result) ->
+process_opts([H | T], Result) ->
process_opts(T, process_option(H, Result)).
-process_option({etc,Path}, Result) ->
+process_option({etc, Path}, Result) ->
application:set_env(weatherreport, etc, filename:absname(Path)),
Result;
process_option({level, Level}, Result) ->
@@ -173,7 +194,8 @@ process_option(expert, Result) ->
process_option(all_nodes, Result) ->
application:set_env(weatherreport, all_nodes, true),
Result;
-process_option(list, usage) -> %% Help should have precedence over listing checks
+%% Help should have precedence over listing checks
+process_option(list, usage) ->
usage;
process_option(list, _) ->
list;
diff --git a/src/weatherreport/src/weatherreport_check.erl b/src/weatherreport/src/weatherreport_check.erl
index ffac2da0a..65ce1a416 100644
--- a/src/weatherreport/src/weatherreport_check.erl
+++ b/src/weatherreport/src/weatherreport_check.erl
@@ -58,27 +58,31 @@
-module(weatherreport_check).
-export([behaviour_info/1]).
--export([check/2,
- modules/0,
- print/1]).
+-export([
+ check/2,
+ modules/0,
+ print/1
+]).
%% @doc The behaviour definition for diagnostic modules.
-spec behaviour_info(atom()) -> 'undefined' | [{atom(), arity()}].
behaviour_info(callbacks) ->
- [{description, 0},
- {valid, 0},
- {check, 1},
- {format, 1}];
+ [
+ {description, 0},
+ {valid, 0},
+ {check, 1},
+ {format, 1}
+ ];
behaviour_info(_) ->
undefined.
%% @doc Runs the diagnostic in the given module, if it is valid. Returns a
%% list of messages that will be printed later using print/1.
--spec check(Module::module(), list()) -> [{atom(), module(), term()}].
+-spec check(Module :: module(), list()) -> [{atom(), module(), term()}].
check(Module, Opts) ->
case Module:valid() of
true ->
- [ {Level, Module, Message} || {Level, Message} <- Module:check(Opts) ];
+ [{Level, Module, Message} || {Level, Message} <- Module:check(Opts)];
_ ->
[]
end.
@@ -88,16 +92,18 @@ check(Module, Opts) ->
-spec modules() -> [module()].
modules() ->
{ok, Mods} = application:get_key(weatherreport, modules),
- [ M || M <- Mods,
- Attr <- M:module_info(attributes),
- {behaviour, [?MODULE]} =:= Attr orelse {behavior, [?MODULE]} =:= Attr ].
-
+ [
+ M
+ || M <- Mods,
+ Attr <- M:module_info(attributes),
+ {behaviour, [?MODULE]} =:= Attr orelse {behavior, [?MODULE]} =:= Attr
+ ].
%% @doc Formats and prints the given message. The diagnostic
%% module's format/1 function will be called to provide a
%% human-readable message. It should return an iolist() or a 2-tuple
%% consisting of a format string and a list of terms.
--spec print({Node::atom(), Level::atom(), Module::module(), Data::term()}) -> ok.
+-spec print({Node :: atom(), Level :: atom(), Module :: module(), Data :: term()}) -> ok.
print({Node, Level, Mod, Data}) ->
case Mod:format(Data) of
{Format, Terms} ->
diff --git a/src/weatherreport/src/weatherreport_check_custodian.erl b/src/weatherreport/src/weatherreport_check_custodian.erl
index 36a6a4c8b..924d1c94f 100644
--- a/src/weatherreport/src/weatherreport_check_custodian.erl
+++ b/src/weatherreport/src/weatherreport_check_custodian.erl
@@ -33,10 +33,12 @@
-module(weatherreport_check_custodian).
-behaviour(weatherreport_check).
--export([description/0,
- valid/0,
- check/1,
- format/1]).
+-export([
+ description/0,
+ valid/0,
+ check/1,
+ format/1
+]).
-include_lib("eunit/include/eunit.hrl").
diff --git a/src/weatherreport/src/weatherreport_check_disk.erl b/src/weatherreport/src/weatherreport_check_disk.erl
index 187db86b7..5361ae632 100644
--- a/src/weatherreport/src/weatherreport_check_disk.erl
+++ b/src/weatherreport/src/weatherreport_check_disk.erl
@@ -38,18 +38,22 @@
-define(TEST_FILE, "weatherreport.tmp").
%% A dependent chain of permissions checking functions.
--define(CHECKPERMFUNS, [fun check_is_dir/1,
- fun check_is_writeable/1,
- fun check_is_readable/1,
- fun check_is_file_readable/1,
- fun check_atime/1]).
+-define(CHECKPERMFUNS, [
+ fun check_is_dir/1,
+ fun check_is_writeable/1,
+ fun check_is_readable/1,
+ fun check_is_file_readable/1,
+ fun check_atime/1
+]).
-include_lib("kernel/include/file.hrl").
--export([description/0,
- valid/0,
- check/1,
- format/1]).
+-export([
+ description/0,
+ valid/0,
+ check/1,
+ format/1
+]).
-spec description() -> string().
description() ->
@@ -63,29 +67,41 @@ valid() ->
check(_Opts) ->
DataDirs = weatherreport_config:data_directories(),
%% Add additional disk checks in the function below
- lists:flatmap(fun(Dir) ->
- check_directory_permissions(Dir)
- end,
- DataDirs).
+ lists:flatmap(
+ fun(Dir) ->
+ check_directory_permissions(Dir)
+ end,
+ DataDirs
+ ).
-spec format(term()) -> {io:format(), [term()]}.
format({disk_full, DataDir}) ->
- {"Disk containing data directory ~s is full! "
- "Please check that it is set to the correct location and that there are not "
- "other files using up space intended for Riak.", [DataDir]};
+ {
+ "Disk containing data directory ~s is full! "
+ "Please check that it is set to the correct location and that there are not "
+ "other files using up space intended for Riak.",
+ [DataDir]
+ };
format({no_data_dir, DataDir}) ->
{"Data directory ~s does not exist. Please create it.", [DataDir]};
format({no_write, DataDir}) ->
User = weatherreport_config:user(),
- {"No write access to data directory ~s. Please make it writeable by the '~s' user.", [DataDir, User]};
+ {"No write access to data directory ~s. Please make it writeable by the '~s' user.", [
+ DataDir, User
+ ]};
format({no_read, DataDir}) ->
User = weatherreport_config:user(),
- {"No read access to data directory ~s. Please make it readable by the '~s' user.", [DataDir, User]};
+ {"No read access to data directory ~s. Please make it readable by the '~s' user.", [
+ DataDir, User
+ ]};
format({write_check, File}) ->
{"Write-test file ~s is a directory! Please remove it so this test can continue.", [File]};
format({atime, Dir}) ->
- {"Data directory ~s is not mounted with 'noatime'. "
- "Please remount its disk with the 'noatime' flag to improve performance.", [Dir]}.
+ {
+ "Data directory ~s is not mounted with 'noatime'. "
+ "Please remount its disk with the 'noatime' flag to improve performance.",
+ [Dir]
+ }.
%%% Private functions
@@ -96,12 +112,12 @@ check_directory_permissions(Directory) ->
%% returning the first non-ok result.
check_directory(_, []) ->
[];
-check_directory(Directory, [Check|Checks]) ->
+check_directory(Directory, [Check | Checks]) ->
case Check(Directory) of
ok ->
check_directory(Directory, Checks);
Message ->
- [ Message ]
+ [Message]
end.
%% Check if the path is actually a directory
@@ -130,13 +146,17 @@ check_is_writeable(Directory) ->
%% Check if the directory is readable
check_is_readable(Directory) ->
case file:read_file_info(Directory) of
- {ok, #file_info{access=Access}} when Access == read orelse
- Access == read_write ->
+ {ok, #file_info{access = Access}} when
+ Access == read orelse
+ Access == read_write
+ ->
ok;
{error, eacces} ->
{error, {no_read, Directory}};
- {error, Error} when Error == enoent orelse
- Error == enotdir ->
+ {error, Error} when
+ Error == enoent orelse
+ Error == enotdir
+ ->
{error, {no_data_dir, Directory}};
_ ->
{error, {no_read, Directory}}
@@ -146,12 +166,15 @@ check_is_readable(Directory) ->
check_is_file_readable(Directory) ->
File = filename:join([Directory, ?TEST_FILE]),
case file:read_file(File) of
- {error, Error} when Error == eacces orelse
- Error == enotdir ->
+ {error, Error} when
+ Error == eacces orelse
+ Error == enotdir
+ ->
{error, {no_read, Directory}};
{error, enoent} ->
{error, {write_check, File}};
- _ -> ok
+ _ ->
+ ok
end.
%% Check if the directory is mounted with 'noatime'
diff --git a/src/weatherreport/src/weatherreport_check_internal_replication.erl b/src/weatherreport/src/weatherreport_check_internal_replication.erl
index 7cfdea09e..5dc0bfa64 100644
--- a/src/weatherreport/src/weatherreport_check_internal_replication.erl
+++ b/src/weatherreport/src/weatherreport_check_internal_replication.erl
@@ -21,15 +21,17 @@
%% -------------------------------------------------------------------
%% @doc Diagnostic that checks the current size of the mem3_sync
-%% backlog. The size is printed as an info message if under a defined
+%% backlog. The size is printed as an info message if under a defined
%% threshold, or as a warning if above the threshold.
-module(weatherreport_check_internal_replication).
-behaviour(weatherreport_check).
--export([description/0,
- valid/0,
- check/1,
- format/1]).
+-export([
+ description/0,
+ valid/0,
+ check/1,
+ format/1
+]).
-define(THRESHOLD, 1000000).
@@ -49,8 +51,8 @@ total_to_level(_Total) ->
-spec check(list()) -> [{atom(), term()}].
check(_Opts) ->
- Backlog = mem3_sync:get_backlog(),
- [{total_to_level(Backlog), Backlog}].
+ Backlog = mem3_sync:get_backlog(),
+ [{total_to_level(Backlog), Backlog}].
-spec format(term()) -> {io:format(), [term()]}.
format(Backlog) ->
diff --git a/src/weatherreport/src/weatherreport_check_ioq.erl b/src/weatherreport/src/weatherreport_check_ioq.erl
index 2c25964ef..a0e0b0e60 100644
--- a/src/weatherreport/src/weatherreport_check_ioq.erl
+++ b/src/weatherreport/src/weatherreport_check_ioq.erl
@@ -26,10 +26,12 @@
-module(weatherreport_check_ioq).
-behaviour(weatherreport_check).
--export([description/0,
- valid/0,
- check/1,
- format/1]).
+-export([
+ description/0,
+ valid/0,
+ check/1,
+ format/1
+]).
-define(THRESHOLD, 500).
@@ -67,9 +69,13 @@ check(Opts) ->
true ->
case ioq:get_queue_lengths() of
Queues when is_map(Queues) ->
- Total = maps:fold(fun(_Key, Val, Acc) ->
- Val + Acc
- end, 0, Queues),
+ Total = maps:fold(
+ fun(_Key, Val, Acc) ->
+ Val + Acc
+ end,
+ 0,
+ Queues
+ ),
[{total_to_level(Total), {ioq_requests, Total, Queues}}];
Error ->
[{warning, {ioq_requests_unknown, Error}}]
diff --git a/src/weatherreport/src/weatherreport_check_mem3_sync.erl b/src/weatherreport/src/weatherreport_check_mem3_sync.erl
index 8dfe41c02..cabca5d50 100644
--- a/src/weatherreport/src/weatherreport_check_mem3_sync.erl
+++ b/src/weatherreport/src/weatherreport_check_mem3_sync.erl
@@ -26,10 +26,12 @@
-module(weatherreport_check_mem3_sync).
-behaviour(weatherreport_check).
--export([description/0,
- valid/0,
- check/1,
- format/1]).
+-export([
+ description/0,
+ valid/0,
+ check/1,
+ format/1
+]).
-spec description() -> string().
description() ->
diff --git a/src/weatherreport/src/weatherreport_check_membership.erl b/src/weatherreport/src/weatherreport_check_membership.erl
index 1ba104116..8fff33c10 100644
--- a/src/weatherreport/src/weatherreport_check_membership.erl
+++ b/src/weatherreport/src/weatherreport_check_membership.erl
@@ -34,10 +34,12 @@
-module(weatherreport_check_membership).
-behaviour(weatherreport_check).
--export([description/0,
- valid/0,
- check/1,
- format/1]).
+-export([
+ description/0,
+ valid/0,
+ check/1,
+ format/1
+]).
-include_lib("eunit/include/eunit.hrl").
@@ -62,4 +64,5 @@ check(_Opts) ->
-spec format(term()) -> {io:format(), [term()]}.
format({not_ring_member, Nodename}) ->
- {"Local node ~w is not a member of the cluster. Please check that the -name setting in vm.args is correct.", [Nodename]}.
+ {"Local node ~w is not a member of the cluster. Please check that the -name setting in vm.args is correct.",
+ [Nodename]}.
diff --git a/src/weatherreport/src/weatherreport_check_memory_use.erl b/src/weatherreport/src/weatherreport_check_memory_use.erl
index 5e49ab0d8..04c021381 100644
--- a/src/weatherreport/src/weatherreport_check_memory_use.erl
+++ b/src/weatherreport/src/weatherreport_check_memory_use.erl
@@ -34,10 +34,12 @@
-module(weatherreport_check_memory_use).
-behaviour(weatherreport_check).
--export([description/0,
- valid/0,
- check/1,
- format/1]).
+-export([
+ description/0,
+ valid/0,
+ check/1,
+ format/1
+]).
-spec description() -> string().
description() ->
@@ -51,7 +53,7 @@ valid() ->
check(_Opts) ->
Pid = weatherreport_node:pid(),
Output = weatherreport_util:run_command("ps -o pmem,rss -p " ++ Pid),
- [_,_,Percent, RealSize| _] = string:tokens(Output, "/n \n"),
+ [_, _, Percent, RealSize | _] = string:tokens(Output, "/n \n"),
Messages = [{info, {process_usage, Percent, RealSize}}],
case weatherreport_util:binary_to_float(list_to_binary(Percent)) >= 90 of
false ->
diff --git a/src/weatherreport/src/weatherreport_check_message_queues.erl b/src/weatherreport/src/weatherreport_check_message_queues.erl
index ae99ff4dc..e55e9eb52 100644
--- a/src/weatherreport/src/weatherreport_check_message_queues.erl
+++ b/src/weatherreport/src/weatherreport_check_message_queues.erl
@@ -26,10 +26,12 @@
-module(weatherreport_check_message_queues).
-behaviour(weatherreport_check).
--export([description/0,
- valid/0,
- check/1,
- format/1]).
+-export([
+ description/0,
+ valid/0,
+ check/1,
+ format/1
+]).
-define(THRESHOLD, 1000).
@@ -46,7 +48,8 @@ check(Opts) ->
weatherreport_util:check_proc_count(
message_queue_len,
?THRESHOLD,
- Opts).
+ Opts
+ ).
-spec format(term()) -> {io:format(), [term()]}.
format({high, {Pid, MBoxSize, Info, Pinfo}}) ->
diff --git a/src/weatherreport/src/weatherreport_check_node_stats.erl b/src/weatherreport/src/weatherreport_check_node_stats.erl
index 27b77cefd..6c3353dc6 100644
--- a/src/weatherreport/src/weatherreport_check_node_stats.erl
+++ b/src/weatherreport/src/weatherreport_check_node_stats.erl
@@ -26,10 +26,12 @@
-module(weatherreport_check_node_stats).
-behaviour(weatherreport_check).
--export([description/0,
- valid/0,
- check/1,
- format/1]).
+-export([
+ description/0,
+ valid/0,
+ check/1,
+ format/1
+]).
-define(SAMPLES, 10).
-define(T_RUN_QUEUE, 40).
@@ -57,9 +59,9 @@ mean_to_message({Statistic, Mean}) ->
-spec check(list()) -> [{atom(), term()}].
check(_Opts) ->
- SumOfStats = recon:node_stats(?SAMPLES, 100, fun sum_absolute_stats/2, []),
- MeanStats = [{K, erlang:round(V / ?SAMPLES)} || {K, V} <- SumOfStats],
- lists:map(fun mean_to_message/1, MeanStats).
+ SumOfStats = recon:node_stats(?SAMPLES, 100, fun sum_absolute_stats/2, []),
+ MeanStats = [{K, erlang:round(V / ?SAMPLES)} || {K, V} <- SumOfStats],
+ lists:map(fun mean_to_message/1, MeanStats).
-spec format(term()) -> {io:format(), [term()]}.
format({Statistic, Value}) ->
diff --git a/src/weatherreport/src/weatherreport_check_nodes_connected.erl b/src/weatherreport/src/weatherreport_check_nodes_connected.erl
index 7b47de8aa..389054209 100644
--- a/src/weatherreport/src/weatherreport_check_nodes_connected.erl
+++ b/src/weatherreport/src/weatherreport_check_nodes_connected.erl
@@ -32,10 +32,12 @@
-module(weatherreport_check_nodes_connected).
-behaviour(weatherreport_check).
--export([description/0,
- valid/0,
- check/1,
- format/1]).
+-export([
+ description/0,
+ valid/0,
+ check/1,
+ format/1
+]).
-spec description() -> string().
description() ->
@@ -50,9 +52,12 @@ check(_Opts) ->
NodeName = node(),
ConnectedNodes = [NodeName | erlang:nodes()],
Members = mem3:nodes(),
- [{warning, {node_disconnected, N}} || N <- Members,
- N =/= NodeName,
- lists:member(N, ConnectedNodes) == false].
+ [
+ {warning, {node_disconnected, N}}
+ || N <- Members,
+ N =/= NodeName,
+ lists:member(N, ConnectedNodes) == false
+ ].
-spec format(term()) -> {io:format(), [term()]}.
format({node_disconnected, Node}) ->
diff --git a/src/weatherreport/src/weatherreport_check_process_calls.erl b/src/weatherreport/src/weatherreport_check_process_calls.erl
index a12fa61e4..b6a228aeb 100644
--- a/src/weatherreport/src/weatherreport_check_process_calls.erl
+++ b/src/weatherreport/src/weatherreport_check_process_calls.erl
@@ -25,10 +25,12 @@
-module(weatherreport_check_process_calls).
-behaviour(weatherreport_check).
--export([description/0,
- valid/0,
- check/1,
- format/1]).
+-export([
+ description/0,
+ valid/0,
+ check/1,
+ format/1
+]).
-define(THRESHOLD, 1000).
@@ -56,18 +58,22 @@ fold_processes([{Count, undefined} | T], Acc, Lim, CallType, Opts) ->
fold_processes(T, [Message | Acc], Lim - 1, CallType, Opts);
fold_processes([{Count, {M, F, A}} | T], Acc, Lim, CallType, Opts) ->
Level = total_to_level(Count),
- Message = case proplists:get_value(expert, Opts) of
- true ->
- PidFun = list_to_atom("find_by_" ++ CallType ++ "_call"),
- Pids = erlang:apply(recon, PidFun, [M, F]),
- Pinfos = lists:map(fun(Pid) ->
- Pinfo = recon:info(Pid),
- {Pid, Pinfo}
- end, lists:sublist(Pids, 10)),
- {Level, {process_count, {CallType, Count, M, F, A, Pinfos}}};
- _ ->
- {Level, {process_count, {CallType, Count, M, F, A}}}
- end,
+ Message =
+ case proplists:get_value(expert, Opts) of
+ true ->
+ PidFun = list_to_atom("find_by_" ++ CallType ++ "_call"),
+ Pids = erlang:apply(recon, PidFun, [M, F]),
+ Pinfos = lists:map(
+ fun(Pid) ->
+ Pinfo = recon:info(Pid),
+ {Pid, Pinfo}
+ end,
+ lists:sublist(Pids, 10)
+ ),
+ {Level, {process_count, {CallType, Count, M, F, A, Pinfos}}};
+ _ ->
+ {Level, {process_count, {CallType, Count, M, F, A}}}
+ end,
fold_processes(T, [Message | Acc], Lim - 1, CallType, Opts).
-spec check(list()) -> [{atom(), term()}].
@@ -81,13 +87,15 @@ check(Opts) ->
Opts
),
FirstCallCounts = show_first_call_counts(),
- lists:reverse(fold_processes(
- FirstCallCounts,
- CurrentCallMessages,
- 10,
- "first",
- Opts
- )).
+ lists:reverse(
+ fold_processes(
+ FirstCallCounts,
+ CurrentCallMessages,
+ 10,
+ "first",
+ Opts
+ )
+ ).
-spec format(term()) -> {io:format(), [term()]}.
format({process_count, {CallType, Count, undefined}}) ->
@@ -97,57 +105,64 @@ format({process_count, {CallType, Count, M, F, A}}) ->
format({process_count, {CallType, Count, M, F, A, Pinfos}}) ->
{"~w processes with ~s call ~w:~w/~w ~w", [Count, CallType, M, F, A, Pinfos]}.
-
%% @doc Show the list of first calls sorted by the number of
%% processes that had that initial call.
-spec show_first_call_counts() -> [{Count, {Module, Function, Arity}}] when
- Count :: pos_integer(),
- Module :: atom(),
- Function :: atom(),
- Arity :: non_neg_integer().
+ Count :: pos_integer(),
+ Module :: atom(),
+ Function :: atom(),
+ Arity :: non_neg_integer().
show_first_call_counts() ->
- Res = lists:foldl(fun(Pid, Acc) ->
- dict:update_counter(first_call(Pid), 1, Acc)
- end, dict:new(), processes()),
+ Res = lists:foldl(
+ fun(Pid, Acc) ->
+ dict:update_counter(first_call(Pid), 1, Acc)
+ end,
+ dict:new(),
+ processes()
+ ),
Rev = [{Count, Call} || {Call, Count} <- dict:to_list(Res)],
lists:reverse(lists:sort(Rev)).
-
%% @doc Show the list of current calls sorted by the number of
%% processes that had that current call.
-spec show_current_call_counts() -> [{Count, {Module, Function, Arity}}] when
- Count :: pos_integer(),
- Module :: atom(),
- Function :: atom(),
- Arity :: non_neg_integer().
+ Count :: pos_integer(),
+ Module :: atom(),
+ Function :: atom(),
+ Arity :: non_neg_integer().
show_current_call_counts() ->
- Res = lists:foldl(fun(Pid, Acc) ->
- case process_info(Pid, current_function) of
- {current_function, Call} ->
- dict:update_counter(Call, 1, Acc);
- undefined ->
- Acc
- end
- end, dict:new(), processes()),
+ Res = lists:foldl(
+ fun(Pid, Acc) ->
+ case process_info(Pid, current_function) of
+ {current_function, Call} ->
+ dict:update_counter(Call, 1, Acc);
+ undefined ->
+ Acc
+ end
+ end,
+ dict:new(),
+ processes()
+ ),
Rev = [{Count, Call} || {Call, Count} <- dict:to_list(Res)],
lists:reverse(lists:sort(Rev)).
-
%% @doc Find the first function call for a Pid taking into account cases
%% where '$initial_call' is set in the process dictionary.
-spec first_call(Pid) -> {Module, Function, Arity} when
- Pid :: pid(),
- Module :: atom(),
- Function :: atom(),
- Arity :: non_neg_integer().
+ Pid :: pid(),
+ Module :: atom(),
+ Function :: atom(),
+ Arity :: non_neg_integer().
first_call(Pid) ->
- IC = case process_info(Pid, initial_call) of
- {initial_call, IC0} -> IC0;
- undefined -> undefined
- end,
- Dict = case process_info(Pid, dictionary) of
- {dictionary, Dict0} -> Dict0;
- undefined -> []
- end,
+ IC =
+ case process_info(Pid, initial_call) of
+ {initial_call, IC0} -> IC0;
+ undefined -> undefined
+ end,
+ Dict =
+ case process_info(Pid, dictionary) of
+ {dictionary, Dict0} -> Dict0;
+ undefined -> []
+ end,
MaybeCall = proplists:get_value('$initial_call', Dict, IC),
proplists:get_value(initial_call, Dict, MaybeCall).
diff --git a/src/weatherreport/src/weatherreport_check_process_memory.erl b/src/weatherreport/src/weatherreport_check_process_memory.erl
index 2f766cdfe..4c7b2c76f 100644
--- a/src/weatherreport/src/weatherreport_check_process_memory.erl
+++ b/src/weatherreport/src/weatherreport_check_process_memory.erl
@@ -26,10 +26,12 @@
-module(weatherreport_check_process_memory).
-behaviour(weatherreport_check).
--export([description/0,
- valid/0,
- check/1,
- format/1]).
+-export([
+ description/0,
+ valid/0,
+ check/1,
+ format/1
+]).
-define(THRESHOLD, 104857600).
@@ -46,7 +48,8 @@ check(Opts) ->
weatherreport_util:check_proc_count(
memory,
?THRESHOLD,
- Opts).
+ Opts
+ ).
-spec format(term()) -> {io:format(), [term()]}.
format({high, {Pid, Memory, Info, Pinfo}}) ->
diff --git a/src/weatherreport/src/weatherreport_check_safe_to_rebuild.erl b/src/weatherreport/src/weatherreport_check_safe_to_rebuild.erl
index a7c46c979..86bb1f9c7 100644
--- a/src/weatherreport/src/weatherreport_check_safe_to_rebuild.erl
+++ b/src/weatherreport/src/weatherreport_check_safe_to_rebuild.erl
@@ -25,10 +25,12 @@
-module(weatherreport_check_safe_to_rebuild).
-behaviour(weatherreport_check).
--export([description/0,
- valid/0,
- check/1,
- format/1]).
+-export([
+ description/0,
+ valid/0,
+ check/1,
+ format/1
+]).
-spec description() -> string().
description() ->
@@ -42,15 +44,17 @@ valid() ->
%% that no shard would end up with N<Threshold when the node is offline
-spec safe_to_rebuild(atom(), integer()) -> [list()].
safe_to_rebuild(Node, RawThreshold) ->
- Threshold = case config:get("couchdb", "maintenance_mode") of
- "true" ->
- RawThreshold - 1;
- _ ->
- RawThreshold
- end,
+ Threshold =
+ case config:get("couchdb", "maintenance_mode") of
+ "true" ->
+ RawThreshold - 1;
+ _ ->
+ RawThreshold
+ end,
BelowThreshold = fun
({_, _, {_, C}}) when C =< Threshold -> true;
- (_) -> false end,
+ (_) -> false
+ end,
ToKV = fun({Db, Range, Status}) -> {[Db, Range], Status} end,
ShardsInDanger = dict:from_list(
@@ -77,7 +81,8 @@ safe_to_rebuild(Node, RawThreshold) ->
_ ->
Acc
end;
- _ -> Acc
+ _ ->
+ Acc
end
end,
[]
diff --git a/src/weatherreport/src/weatherreport_check_search.erl b/src/weatherreport/src/weatherreport_check_search.erl
index c04c86d63..b7986db2b 100644
--- a/src/weatherreport/src/weatherreport_check_search.erl
+++ b/src/weatherreport/src/weatherreport_check_search.erl
@@ -27,10 +27,12 @@
-module(weatherreport_check_search).
-behaviour(weatherreport_check).
--export([description/0,
- valid/0,
- check/1,
- format/1]).
+-export([
+ description/0,
+ valid/0,
+ check/1,
+ format/1
+]).
-spec description() -> string().
description() ->
diff --git a/src/weatherreport/src/weatherreport_check_tcp_queues.erl b/src/weatherreport/src/weatherreport_check_tcp_queues.erl
index 8e161487f..cc502031b 100644
--- a/src/weatherreport/src/weatherreport_check_tcp_queues.erl
+++ b/src/weatherreport/src/weatherreport_check_tcp_queues.erl
@@ -26,10 +26,12 @@
-module(weatherreport_check_tcp_queues).
-behaviour(weatherreport_check).
--export([description/0,
- valid/0,
- check/1,
- format/1]).
+-export([
+ description/0,
+ valid/0,
+ check/1,
+ format/1
+]).
-define(THRESHOLD, 1000000).
@@ -55,13 +57,14 @@ sum_queues(Netstats) ->
sum_queues([], Acc) ->
Acc;
sum_queues([Row | Rest], {SumRecvQ, SumSendQ}) ->
- {RecvQ, SendQ} = case string:tokens(Row, " ") of
- [[$t, $c, $p | _] | _]=Cols ->
- {Rq, Sq} = {lists:nth(2, Cols), lists:nth(3, Cols)},
- {list_to_integer(Rq), list_to_integer(Sq)};
- _ ->
- {0, 0}
- end,
+ {RecvQ, SendQ} =
+ case string:tokens(Row, " ") of
+ [[$t, $c, $p | _] | _] = Cols ->
+ {Rq, Sq} = {lists:nth(2, Cols), lists:nth(3, Cols)},
+ {list_to_integer(Rq), list_to_integer(Sq)};
+ _ ->
+ {0, 0}
+ end,
sum_queues(Rest, {RecvQ + SumRecvQ, SendQ + SumSendQ}).
%% @doc Converts the sum of queue lengths to a log message at the approriate
@@ -77,7 +80,7 @@ check(_Opts) ->
Netstats = weatherreport_util:run_command("netstat"),
{SumRecvQ, SumSendQ} = sum_queues(Netstats),
[sum_to_message(SumRecvQ, "recv_q"), sum_to_message(SumSendQ, "send_q")].
-
+
-spec format(term()) -> {io:format(), [term()]}.
format({recv_q_high, QLen}) ->
{"Total TCP Recv-Q is HIGH: ~w", [QLen]};
diff --git a/src/weatherreport/src/weatherreport_config.erl b/src/weatherreport/src/weatherreport_config.erl
index 0538365be..6cf9fd533 100644
--- a/src/weatherreport/src/weatherreport_config.erl
+++ b/src/weatherreport/src/weatherreport_config.erl
@@ -34,14 +34,16 @@
-module(weatherreport_config).
--export([prepare/0,
- data_directories/0,
- get_vm_env/1,
- etc_dir/0,
- timeout/0,
- node_name/0,
- cookie/0,
- user/0]).
+-export([
+ prepare/0,
+ data_directories/0,
+ get_vm_env/1,
+ etc_dir/0,
+ timeout/0,
+ node_name/0,
+ cookie/0,
+ user/0
+]).
%% @doc Prepares appropriate configuration to the weatherreport script
%% can run. This is called by the weaterreport module and you do
@@ -52,7 +54,7 @@ prepare() ->
prepare([]) ->
ok;
-prepare([Fun|T]) ->
+prepare([Fun | T]) ->
case Fun() of
{error, Reason} ->
{error, Reason};
@@ -62,9 +64,9 @@ prepare([Fun|T]) ->
%% @doc Determines where CouchDB is configured to store data. Returns a
%% list of paths to directories defined by storage backends.
--spec data_directories() -> [ file:filename() ].
+-spec data_directories() -> [file:filename()].
data_directories() ->
- [config:get("couchdb","view_index_dir"), config:get("couchdb","database_dir")].
+ [config:get("couchdb", "view_index_dir"), config:get("couchdb", "database_dir")].
%% @doc Get an -env flag out of the vm.args file.
-spec get_vm_env(string()) -> string() | undefined.
@@ -110,7 +112,7 @@ etc_dir() ->
%% @doc The local node name. Includes whether the node uses short
%% or long nodenames for distributed Erlang.
--spec node_name() -> {shortnames | longnames, Name::string()}.
+-spec node_name() -> {shortnames | longnames, Name :: string()}.
node_name() ->
case application:get_env(weatherreport, node_name) of
undefined ->
@@ -140,13 +142,15 @@ load_app_config() ->
weatherreport_log:log(node(), debug, "Local node config: ~p~n", [config:all()]).
load_vm_args() ->
- VmArgs = case init:get_argument(vm_args) of
- {ok, [[X]]} -> X;
- _ ->
- %% This is a backup. If for some reason -vm_args isn't specified
- %% then assume it lives in the same dir as app.config
- filename:absname("./vm.args", ?MODULE:etc_dir())
- end,
+ VmArgs =
+ case init:get_argument(vm_args) of
+ {ok, [[X]]} ->
+ X;
+ _ ->
+ %% This is a backup. If for some reason -vm_args isn't specified
+ %% then assume it lives in the same dir as app.config
+ filename:absname("./vm.args", ?MODULE:etc_dir())
+ end,
case file:read_file(VmArgs) of
{error, Reason} ->
@@ -159,32 +163,32 @@ load_vm_args(Bin) when is_binary(Bin) ->
load_vm_args(re:split(Bin, "\s*\r?\n\s*", [{return, list}, trim]));
load_vm_args([]) ->
ok;
-load_vm_args([[$#|_]|T]) ->
+load_vm_args([[$# | _] | T]) ->
load_vm_args(T);
-load_vm_args([""|T]) ->
+load_vm_args(["" | T]) ->
load_vm_args(T);
-load_vm_args(["-sname " ++ NodeName|T]) ->
+load_vm_args(["-sname " ++ NodeName | T]) ->
application:set_env(weatherreport, node_name, {shortnames, string:strip(NodeName)}),
load_vm_args(T);
-load_vm_args(["-name " ++ NodeName|T]) ->
+load_vm_args(["-name " ++ NodeName | T]) ->
application:set_env(weatherreport, node_name, {longnames, string:strip(NodeName)}),
load_vm_args(T);
-load_vm_args(["-setcookie " ++ Cookie|T]) ->
+load_vm_args(["-setcookie " ++ Cookie | T]) ->
application:set_env(weatherreport, cookie, string:strip(Cookie)),
load_vm_args(T);
-load_vm_args(["-env " ++ Env|T]) ->
+load_vm_args(["-env " ++ Env | T]) ->
[Key, Value] = re:split(Env, "\s+", [{return, list}, trim]),
add_or_insert_env(vm_env, {Key, Value}),
load_vm_args(T);
-load_vm_args([[$+|EmuFlags]|T]) ->
- [Flag|Rest] = re:split(EmuFlags, "\s+", [{return,list}, trim]),
- add_or_insert_env(emu_flags, {[$+|Flag], Rest}),
+load_vm_args([[$+ | EmuFlags] | T]) ->
+ [Flag | Rest] = re:split(EmuFlags, "\s+", [{return, list}, trim]),
+ add_or_insert_env(emu_flags, {[$+ | Flag], Rest}),
load_vm_args(T);
-load_vm_args([[$-|InitFlags]|T]) ->
- [Flag|Rest] = re:split(InitFlags, "\s+", [{return,list}, trim]),
- add_or_insert_env(init_flags, {[$-|Flag], Rest}),
+load_vm_args([[$- | InitFlags] | T]) ->
+ [Flag | Rest] = re:split(InitFlags, "\s+", [{return, list}, trim]),
+ add_or_insert_env(init_flags, {[$- | Flag], Rest}),
load_vm_args(T);
-load_vm_args([Line|_]) ->
+load_vm_args([Line | _]) ->
{error, io_lib:format("Erroneous line in vm.args: ~s", [Line])}.
add_or_insert_env(Key, Value) ->
@@ -192,5 +196,5 @@ add_or_insert_env(Key, Value) ->
undefined ->
application:set_env(weatherreport, Key, [Value]);
{ok, List} ->
- application:set_env(weatherreport, Key, [Value|List])
+ application:set_env(weatherreport, Key, [Value | List])
end.
diff --git a/src/weatherreport/src/weatherreport_getopt.erl b/src/weatherreport/src/weatherreport_getopt.erl
index cbee63cd6..736112630 100644
--- a/src/weatherreport/src/weatherreport_getopt.erl
+++ b/src/weatherreport/src/weatherreport_getopt.erl
@@ -13,13 +13,15 @@
-export([parse/2, usage/2, usage/3, usage/4]).
--export_type([arg_type/0,
- arg_value/0,
- arg_spec/0,
- simple_option/0,
- compound_option/0,
- option/0,
- option_spec/0]).
+-export_type([
+ arg_type/0,
+ arg_value/0,
+ arg_spec/0,
+ simple_option/0,
+ compound_option/0,
+ option/0,
+ option_spec/0
+]).
-define(TAB_LENGTH, 8).
%% Indentation of the help messages in number of tabs.
@@ -34,7 +36,6 @@
-define(IS_OPT_SPEC(Opt), (tuple_size(Opt) =:= ?OPT_HELP)).
-
%% Atom indicating the data type that an argument can be converted to.
-type arg_type() :: 'atom' | 'binary' | 'boolean' | 'float' | 'integer' | 'string'.
%% Data type that an argument can be converted to.
@@ -47,16 +48,15 @@
-type option() :: simple_option() | compound_option().
%% Command line option specification.
-type option_spec() :: {
- Name :: atom(),
- Short :: char() | undefined,
- Long :: string() | undefined,
- ArgSpec :: arg_spec(),
- Help :: string() | undefined
- }.
+ Name :: atom(),
+ Short :: char() | undefined,
+ Long :: string() | undefined,
+ ArgSpec :: arg_spec(),
+ Help :: string() | undefined
+}.
%% Output streams
-type output_stream() :: 'standard_io' | 'standard_error'.
-
%% @doc Parse the command line options and arguments returning a list of tuples
%% and/or atoms using the Erlang convention for sending options to a
%% function.
@@ -64,19 +64,19 @@
{ok, {[option()], [string()]}} | {error, {Reason :: atom(), Data :: any()}}.
parse(OptSpecList, CmdLine) ->
try
- Args = if
- is_integer(hd(CmdLine)) ->
- string:tokens(CmdLine, " \t\n");
- true ->
- CmdLine
- end,
+ Args =
+ if
+ is_integer(hd(CmdLine)) ->
+ string:tokens(CmdLine, " \t\n");
+ true ->
+ CmdLine
+ end,
parse(OptSpecList, [], [], 0, Args)
catch
- throw: {error, {_Reason, _Data}} = Error ->
+ throw:{error, {_Reason, _Data}} = Error ->
Error
end.
-
-spec parse([option_spec()], [option()], [string()], integer(), [string()]) ->
{ok, {[option()], [string()]}}.
%% Process the option terminator.
@@ -102,27 +102,28 @@ parse(OptSpecList, OptAcc, ArgAcc, _ArgPos, []) ->
%% not present but had default arguments in the specification.
{ok, {lists:reverse(append_default_options(OptSpecList, OptAcc)), lists:reverse(ArgAcc)}}.
-
%% @doc Parse a long option, add it to the option accumulator and continue
%% parsing the rest of the arguments recursively.
%% A long option can have the following syntax:
%% --foo Single option 'foo', no argument
%% --foo=bar Single option 'foo', argument "bar"
%% --foo bar Single option 'foo', argument "bar"
--spec parse_long_option([option_spec()], [option()], [string()], integer(), [string()], string(), string()) ->
- {ok, {[option()], [string()]}}.
+-spec parse_long_option(
+ [option_spec()], [option()], [string()], integer(), [string()], string(), string()
+) ->
+ {ok, {[option()], [string()]}}.
parse_long_option(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr, OptArg) ->
case split_assigned_arg(OptArg) of
{Long, Arg} ->
%% Get option that has its argument within the same string
%% separated by an equal ('=') character (e.g. "--port=1000").
- parse_long_option_assigned_arg(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr, Long, Arg);
-
+ parse_long_option_assigned_arg(
+ OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr, Long, Arg
+ );
Long ->
case lists:keyfind(Long, ?OPT_LONG, OptSpecList) of
{Name, _Short, Long, undefined, _Help} ->
parse(OptSpecList, [Name | OptAcc], ArgAcc, ArgPos, Args);
-
{_Name, _Short, Long, _ArgSpec, _Help} = OptSpec ->
%% The option argument string is empty, but the option requires
%% an argument, so we look into the next string in the list.
@@ -133,13 +134,20 @@ parse_long_option(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr, OptArg) ->
end
end.
-
%% @doc Parse an option where the argument is 'assigned' in the same string using
%% the '=' character, add it to the option accumulator and continue parsing the
%% rest of the arguments recursively. This syntax is only valid for long options.
--spec parse_long_option_assigned_arg([option_spec()], [option()], [string()], integer(),
- [string()], string(), string(), string()) ->
- {ok, {[option()], [string()]}}.
+-spec parse_long_option_assigned_arg(
+ [option_spec()],
+ [option()],
+ [string()],
+ integer(),
+ [string()],
+ string(),
+ string(),
+ string()
+) ->
+ {ok, {[option()], [string()]}}.
parse_long_option_assigned_arg(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr, Long, Arg) ->
case lists:keyfind(Long, ?OPT_LONG, OptSpecList) of
{_Name, _Short, Long, ArgSpec, _Help} = OptSpec ->
@@ -147,13 +155,18 @@ parse_long_option_assigned_arg(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr
undefined ->
throw({error, {invalid_option_arg, OptStr}});
_ ->
- parse(OptSpecList, add_option_with_assigned_arg(OptSpec, Arg, OptAcc), ArgAcc, ArgPos, Args)
+ parse(
+ OptSpecList,
+ add_option_with_assigned_arg(OptSpec, Arg, OptAcc),
+ ArgAcc,
+ ArgPos,
+ Args
+ )
end;
false ->
throw({error, {invalid_option, OptStr}})
end.
-
%% @doc Split an option string that may contain an option with its argument
%% separated by an equal ('=') character (e.g. "port=1000").
-spec split_assigned_arg(string()) -> {Name :: string(), Arg :: string()} | string().
@@ -167,11 +180,12 @@ split_assigned_arg(OptStr, [Char | Tail], Acc) ->
split_assigned_arg(OptStr, [], _Acc) ->
OptStr.
-
%% @doc Retrieve the argument for an option from the next string in the list of
%% command-line parameters or set the value of the argument from the argument
%% specification (for boolean and integer arguments), if possible.
-parse_long_option_next_arg(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, {Name, _Short, _Long, ArgSpec, _Help} = OptSpec) ->
+parse_long_option_next_arg(
+ OptSpecList, OptAcc, ArgAcc, ArgPos, Args, {Name, _Short, _Long, ArgSpec, _Help} = OptSpec
+) ->
ArgSpecType = arg_spec_type(ArgSpec),
case Args =:= [] orelse is_implicit_arg(ArgSpecType, hd(Args)) of
true ->
@@ -179,14 +193,15 @@ parse_long_option_next_arg(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, {Name, _Sh
false ->
[Arg | Tail] = Args,
try
- parse(OptSpecList, [{Name, to_type(ArgSpecType, Arg)} | OptAcc], ArgAcc, ArgPos, Tail)
+ parse(
+ OptSpecList, [{Name, to_type(ArgSpecType, Arg)} | OptAcc], ArgAcc, ArgPos, Tail
+ )
catch
error:_ ->
throw({error, {invalid_option_arg, {Name, Arg}}})
end
end.
-
%% @doc Parse a short option, add it to the option accumulator and continue
%% parsing the rest of the arguments recursively.
%% A short option can have the following syntax:
@@ -196,7 +211,9 @@ parse_long_option_next_arg(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, {Name, _Sh
%% -abc Multiple options: 'a'; 'b'; 'c'
%% -bcafoo Multiple options: 'b'; 'c'; 'a' with argument "foo"
%% -aaa Multiple repetitions of option 'a' (only valid for options with integer arguments)
--spec parse_short_option([option_spec()], [option()], [string()], integer(), [string()], string(), string()) ->
+-spec parse_short_option(
+ [option_spec()], [option()], [string()], integer(), [string()], string(), string()
+) ->
{ok, {[option()], [string()]}}.
parse_short_option(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr, OptArg) ->
parse_short_option(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr, first, OptArg).
@@ -204,45 +221,68 @@ parse_short_option(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr, OptArg) ->
parse_short_option(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptStr, OptPos, [Short | Arg]) ->
case lists:keyfind(Short, ?OPT_SHORT, OptSpecList) of
{Name, Short, _Long, undefined, _Help} ->
- parse_short_option(OptSpecList, [Name | OptAcc], ArgAcc, ArgPos, Args, OptStr, first, Arg);
-
+ parse_short_option(
+ OptSpecList, [Name | OptAcc], ArgAcc, ArgPos, Args, OptStr, first, Arg
+ );
{_Name, Short, _Long, ArgSpec, _Help} = OptSpec ->
%% The option has a specification, so it requires an argument.
case Arg of
[] ->
%% The option argument string is empty, but the option requires
%% an argument, so we look into the next string in the list.
- parse_short_option_next_arg(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptSpec, OptPos);
-
+ parse_short_option_next_arg(
+ OptSpecList, OptAcc, ArgAcc, ArgPos, Args, OptSpec, OptPos
+ );
_ ->
case is_valid_arg(ArgSpec, Arg) of
true ->
- parse(OptSpecList, add_option_with_arg(OptSpec, Arg, OptAcc), ArgAcc, ArgPos, Args);
+ parse(
+ OptSpecList,
+ add_option_with_arg(OptSpec, Arg, OptAcc),
+ ArgAcc,
+ ArgPos,
+ Args
+ );
_ ->
- NewOptAcc = case OptPos of
- first -> add_option_with_implicit_arg(OptSpec, OptAcc);
- _ -> add_option_with_implicit_incrementable_arg(OptSpec, OptAcc)
- end,
- parse_short_option(OptSpecList, NewOptAcc, ArgAcc, ArgPos, Args, OptStr, next, Arg)
+ NewOptAcc =
+ case OptPos of
+ first -> add_option_with_implicit_arg(OptSpec, OptAcc);
+ _ -> add_option_with_implicit_incrementable_arg(OptSpec, OptAcc)
+ end,
+ parse_short_option(
+ OptSpecList, NewOptAcc, ArgAcc, ArgPos, Args, OptStr, next, Arg
+ )
end
end;
-
false ->
throw({error, {invalid_option, OptStr}})
end;
parse_short_option(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, _OptStr, _OptPos, []) ->
parse(OptSpecList, OptAcc, ArgAcc, ArgPos, Args).
-
%% @doc Retrieve the argument for an option from the next string in the list of
%% command-line parameters or set the value of the argument from the argument
%% specification (for boolean and integer arguments), if possible.
-parse_short_option_next_arg(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, {Name, _Short, _Long, ArgSpec, _Help} = OptSpec, OptPos) ->
+parse_short_option_next_arg(
+ OptSpecList,
+ OptAcc,
+ ArgAcc,
+ ArgPos,
+ Args,
+ {Name, _Short, _Long, ArgSpec, _Help} = OptSpec,
+ OptPos
+) ->
case Args =:= [] orelse is_implicit_arg(ArgSpec, hd(Args)) of
true when OptPos =:= first ->
parse(OptSpecList, add_option_with_implicit_arg(OptSpec, OptAcc), ArgAcc, ArgPos, Args);
true ->
- parse(OptSpecList, add_option_with_implicit_incrementable_arg(OptSpec, OptAcc), ArgAcc, ArgPos, Args);
+ parse(
+ OptSpecList,
+ add_option_with_implicit_incrementable_arg(OptSpec, OptAcc),
+ ArgAcc,
+ ArgPos,
+ Args
+ );
false ->
[Arg | Tail] = Args,
try
@@ -253,12 +293,11 @@ parse_short_option_next_arg(OptSpecList, OptAcc, ArgAcc, ArgPos, Args, {Name, _S
end
end.
-
%% @doc Find the option for the discrete argument in position specified in the
%% Pos argument.
-spec find_non_option_arg([option_spec()], integer()) -> {value, option_spec()} | false.
find_non_option_arg([{_Name, undefined, undefined, _ArgSpec, _Help} = OptSpec | _Tail], 0) ->
- {value, OptSpec};
+ {value, OptSpec};
find_non_option_arg([{_Name, undefined, undefined, _ArgSpec, _Help} | Tail], Pos) ->
find_non_option_arg(Tail, Pos - 1);
find_non_option_arg([_Head | Tail], Pos) ->
@@ -266,25 +305,25 @@ find_non_option_arg([_Head | Tail], Pos) ->
find_non_option_arg([], _Pos) ->
false.
-
%% @doc Append options that were not present in the command line arguments with
%% their default arguments.
-spec append_default_options([option_spec()], [option()]) -> [option()].
append_default_options([{Name, _Short, _Long, {_Type, DefaultArg}, _Help} | Tail], OptAcc) ->
- append_default_options(Tail,
- case lists:keymember(Name, 1, OptAcc) of
- false ->
- [{Name, DefaultArg} | OptAcc];
- _ ->
- OptAcc
- end);
+ append_default_options(
+ Tail,
+ case lists:keymember(Name, 1, OptAcc) of
+ false ->
+ [{Name, DefaultArg} | OptAcc];
+ _ ->
+ OptAcc
+ end
+ );
%% For options with no default argument.
append_default_options([_Head | Tail], OptAcc) ->
append_default_options(Tail, OptAcc);
append_default_options([], OptAcc) ->
OptAcc.
-
%% @doc Add an option with argument converting it to the data type indicated by the
%% argument specification.
-spec add_option_with_arg(option_spec(), string(), [option()]) -> [option()].
@@ -301,7 +340,6 @@ add_option_with_arg({Name, _Short, _Long, ArgSpec, _Help} = OptSpec, Arg, OptAcc
add_option_with_implicit_arg(OptSpec, OptAcc)
end.
-
%% @doc Add an option with argument that was part of an assignment expression
%% (e.g. "--verbose=3") converting it to the data type indicated by the
%% argument specification.
@@ -314,7 +352,6 @@ add_option_with_assigned_arg({Name, _Short, _Long, ArgSpec, _Help}, Arg, OptAcc)
throw({error, {invalid_option_arg, {Name, Arg}}})
end.
-
%% @doc Add an option that required an argument but did not have one. Some data
%% types (boolean, integer) allow implicit or assumed arguments.
-spec add_option_with_implicit_arg(option_spec(), [option()]) -> [option()].
@@ -333,9 +370,9 @@ add_option_with_implicit_arg({Name, _Short, _Long, ArgSpec, _Help}, OptAcc) ->
throw({error, {missing_option_arg, Name}})
end.
-
%% @doc Add an option with an implicit or assumed argument.
--spec add_option_with_implicit_incrementable_arg(option_spec() | arg_spec(), [option()]) -> [option()].
+-spec add_option_with_implicit_incrementable_arg(option_spec() | arg_spec(), [option()]) ->
+ [option()].
add_option_with_implicit_incrementable_arg({Name, _Short, _Long, ArgSpec, _Help}, OptAcc) ->
case arg_spec_type(ArgSpec) of
boolean ->
@@ -357,7 +394,6 @@ add_option_with_implicit_incrementable_arg({Name, _Short, _Long, ArgSpec, _Help}
throw({error, {missing_option_arg, Name}})
end.
-
%% @doc Retrieve the data type form an argument specification.
-spec arg_spec_type(arg_spec()) -> arg_type() | undefined.
arg_spec_type({Type, _DefaultArg}) ->
@@ -365,7 +401,6 @@ arg_spec_type({Type, _DefaultArg}) ->
arg_spec_type(Type) when is_atom(Type) ->
Type.
-
%% @doc Convert an argument string to its corresponding data type.
-spec to_type(arg_spec() | arg_type(), string()) -> arg_value().
to_type({Type, _DefaultArg}, Arg) ->
@@ -394,22 +429,19 @@ to_type(boolean, Arg) ->
to_type(_Type, Arg) ->
Arg.
-
-spec is_arg_true(string()) -> boolean().
is_arg_true(Arg) ->
(Arg =:= "true") orelse (Arg =:= "t") orelse
- (Arg =:= "yes") orelse (Arg =:= "y") orelse
- (Arg =:= "on") orelse (Arg =:= "enabled") orelse
- (Arg =:= "1").
-
+ (Arg =:= "yes") orelse (Arg =:= "y") orelse
+ (Arg =:= "on") orelse (Arg =:= "enabled") orelse
+ (Arg =:= "1").
-spec is_arg_false(string()) -> boolean().
is_arg_false(Arg) ->
(Arg =:= "false") orelse (Arg =:= "f") orelse
- (Arg =:= "no") orelse (Arg =:= "n") orelse
- (Arg =:= "off") orelse (Arg =:= "disabled") orelse
- (Arg =:= "0").
-
+ (Arg =:= "no") orelse (Arg =:= "n") orelse
+ (Arg =:= "off") orelse (Arg =:= "disabled") orelse
+ (Arg =:= "0").
-spec is_valid_arg(arg_spec(), nonempty_string()) -> boolean().
is_valid_arg({Type, _DefaultArg}, Arg) ->
@@ -423,7 +455,6 @@ is_valid_arg(float, Arg) ->
is_valid_arg(_Type, _Arg) ->
true.
-
-spec is_implicit_arg(arg_spec(), nonempty_string()) -> boolean().
is_implicit_arg({Type, _DefaultArg}, Arg) ->
is_implicit_arg(Type, Arg);
@@ -434,20 +465,17 @@ is_implicit_arg(integer, Arg) ->
is_implicit_arg(_Type, _Arg) ->
false.
-
-spec is_boolean_arg(string()) -> boolean().
is_boolean_arg(Arg) ->
LowerArg = string:to_lower(Arg),
is_arg_true(LowerArg) orelse is_arg_false(LowerArg).
-
-spec is_integer_arg(string()) -> boolean().
is_integer_arg("-" ++ Tail) ->
is_non_neg_integer_arg(Tail);
is_integer_arg(Arg) ->
is_non_neg_integer_arg(Arg).
-
-spec is_non_neg_integer_arg(string()) -> boolean().
is_non_neg_integer_arg([Head | Tail]) when Head >= $0, Head =< $9 ->
is_non_neg_integer_arg(Tail);
@@ -456,7 +484,6 @@ is_non_neg_integer_arg([_Head | _Tail]) ->
is_non_neg_integer_arg([]) ->
true.
-
-spec is_non_neg_float_arg(string()) -> boolean().
is_non_neg_float_arg([Head | Tail]) when (Head >= $0 andalso Head =< $9) orelse Head =:= $. ->
is_non_neg_float_arg(Tail);
@@ -465,41 +492,43 @@ is_non_neg_float_arg([_Head | _Tail]) ->
is_non_neg_float_arg([]) ->
true.
-
%% @doc Show a message on standard_error indicating the command line options and
%% arguments that are supported by the program.
-spec usage([option_spec()], string()) -> ok.
usage(OptSpecList, ProgramName) ->
- usage(OptSpecList, ProgramName, standard_error).
-
+ usage(OptSpecList, ProgramName, standard_error).
%% @doc Show a message on standard_error or standard_io indicating the command line options and
%% arguments that are supported by the program.
-spec usage([option_spec()], string(), output_stream() | string()) -> ok.
usage(OptSpecList, ProgramName, OutputStream) when is_atom(OutputStream) ->
- io:format(OutputStream, "Usage: ~s~s~n~n~s~n",
- [ProgramName, usage_cmd_line(OptSpecList), usage_options(OptSpecList)]);
+ io:format(
+ OutputStream,
+ "Usage: ~s~s~n~n~s~n",
+ [ProgramName, usage_cmd_line(OptSpecList), usage_options(OptSpecList)]
+ );
%% @doc Show a message on standard_error indicating the command line options and
%% arguments that are supported by the program. The CmdLineTail argument
%% is a string that is added to the end of the usage command line.
usage(OptSpecList, ProgramName, CmdLineTail) ->
- usage(OptSpecList, ProgramName, CmdLineTail, standard_error).
-
+ usage(OptSpecList, ProgramName, CmdLineTail, standard_error).
%% @doc Show a message on standard_error or standard_io indicating the command line options and
%% arguments that are supported by the program. The CmdLineTail argument
%% is a string that is added to the end of the usage command line.
-spec usage([option_spec()], string(), string(), output_stream() | [{string(), string()}]) -> ok.
usage(OptSpecList, ProgramName, CmdLineTail, OutputStream) when is_atom(OutputStream) ->
- io:format(OutputStream, "Usage: ~s~s ~s~n~n~s~n",
- [ProgramName, usage_cmd_line(OptSpecList), CmdLineTail, usage_options(OptSpecList)]);
+ io:format(
+ OutputStream,
+ "Usage: ~s~s ~s~n~n~s~n",
+ [ProgramName, usage_cmd_line(OptSpecList), CmdLineTail, usage_options(OptSpecList)]
+ );
%% @doc Show a message on standard_error indicating the command line options and
%% arguments that are supported by the program. The CmdLineTail and OptionsTail
%% arguments are a string that is added to the end of the usage command line
%% and a list of tuples that are added to the end of the options' help lines.
usage(OptSpecList, ProgramName, CmdLineTail, OptionsTail) ->
- usage(OptSpecList, ProgramName, CmdLineTail, OptionsTail, standard_error).
-
+ usage(OptSpecList, ProgramName, CmdLineTail, OptionsTail, standard_error).
%% @doc Show a message on standard_error or standard_io indicating the command line options and
%% arguments that are supported by the program. The CmdLineTail and OptionsTail
@@ -508,13 +537,22 @@ usage(OptSpecList, ProgramName, CmdLineTail, OptionsTail) ->
-spec usage([option_spec()], string(), string(), [{string(), string()}], output_stream()) -> ok.
usage(OptSpecList, ProgramName, CmdLineTail, OptionsTail, OutputStream) ->
UsageOptions = lists:foldl(
- fun ({Prefix, Help}, Acc) ->
- add_option_help(Prefix, Help, Acc)
- end, usage_options_reverse(OptSpecList, []), OptionsTail),
- io:format(OutputStream, "Usage: ~s~s ~s~n~n~s~n",
- [ProgramName, usage_cmd_line(OptSpecList), CmdLineTail,
- lists:flatten(lists:reverse(UsageOptions))]).
-
+ fun({Prefix, Help}, Acc) ->
+ add_option_help(Prefix, Help, Acc)
+ end,
+ usage_options_reverse(OptSpecList, []),
+ OptionsTail
+ ),
+ io:format(
+ OutputStream,
+ "Usage: ~s~s ~s~n~n~s~n",
+ [
+ ProgramName,
+ usage_cmd_line(OptSpecList),
+ CmdLineTail,
+ lists:flatten(lists:reverse(UsageOptions))
+ ]
+ ).
%% @doc Return a string with the syntax for the command line options and
%% arguments.
@@ -553,7 +591,6 @@ usage_cmd_line([{Name, Short, Long, ArgSpec, _Help} | Tail], Acc) ->
usage_cmd_line([], Acc) ->
lists:flatten(lists:reverse(Acc)).
-
%% @doc Return a string with the help message for each of the options and
%% arguments.
-spec usage_options([option_spec()]) -> string().
@@ -586,7 +623,6 @@ usage_options_reverse([{Name, Short, Long, _ArgSpec, Help} | Tail], Acc) ->
usage_options_reverse([], Acc) ->
Acc.
-
%% @doc Add the help message corresponding to an option specification to a list
%% with the correct indentation.
-spec add_option_help(Prefix :: string(), Help :: string(), Acc :: string()) -> string().
@@ -605,8 +641,6 @@ add_option_help(Prefix, Help, Acc) when is_list(Help), Help =/= [] ->
add_option_help(_Opt, _Prefix, Acc) ->
Acc.
-
-
%% @doc Return the smallest integral value not less than the argument.
-spec ceiling(float()) -> integer().
ceiling(X) ->
diff --git a/src/weatherreport/src/weatherreport_log.erl b/src/weatherreport/src/weatherreport_log.erl
index 29547da6f..7a511aa1c 100644
--- a/src/weatherreport/src/weatherreport_log.erl
+++ b/src/weatherreport/src/weatherreport_log.erl
@@ -20,25 +20,33 @@
should_log/1
]).
-
-level(debug) -> 7;
-level(info) -> 6;
-level(notice) -> 5;
-level(warn) -> 4;
-level(warning) -> 4;
-level(err) -> 3;
-level(error) -> 3;
-level(crit) -> 2;
-level(alert) -> 1;
-level(emerg) -> 0;
-level(panic) -> 0;
-
+level(debug) ->
+ 7;
+level(info) ->
+ 6;
+level(notice) ->
+ 5;
+level(warn) ->
+ 4;
+level(warning) ->
+ 4;
+level(err) ->
+ 3;
+level(error) ->
+ 3;
+level(crit) ->
+ 2;
+level(alert) ->
+ 1;
+level(emerg) ->
+ 0;
+level(panic) ->
+ 0;
level(I) when is_integer(I), I >= 0, I =< 7 ->
I;
level(_BadLevel) ->
3.
-
log(Node, Level, Format, Terms) ->
case should_log(Level) of
true ->
@@ -59,10 +67,11 @@ log(Node, Level, String) ->
end.
should_log(Level) ->
- AppLevel = case application:get_env(weatherreport, log_level) of
- undefined -> info;
- {ok, L0} -> L0
- end,
+ AppLevel =
+ case application:get_env(weatherreport, log_level) of
+ undefined -> info;
+ {ok, L0} -> L0
+ end,
level(AppLevel) >= level(Level).
get_prefix(Node, Level) ->
diff --git a/src/weatherreport/src/weatherreport_node.erl b/src/weatherreport/src/weatherreport_node.erl
index bb5cef007..81d868c39 100644
--- a/src/weatherreport/src/weatherreport_node.erl
+++ b/src/weatherreport/src/weatherreport_node.erl
@@ -31,21 +31,22 @@
%% node or other members of the cluster.
-module(weatherreport_node).
--export([can_connect/0,
- can_connect_all/0,
- pid/0,
- local_command/2,
- local_command/3,
- local_command/4,
- multicall/5,
- nodename/0
- ]).
+-export([
+ can_connect/0,
+ can_connect_all/0,
+ pid/0,
+ local_command/2,
+ local_command/3,
+ local_command/4,
+ multicall/5,
+ nodename/0
+]).
%% @doc Calls the given 0-arity module and function on the local
%% node and returns the result of that call.
%% @equiv local_command(Module, Function, [])
%% @see can_connect/0.
--spec local_command(Module::atom(), Function::atom()) -> term().
+-spec local_command(Module :: atom(), Function :: atom()) -> term().
local_command(Module, Function) ->
local_command(Module, Function, []).
@@ -53,7 +54,7 @@ local_command(Module, Function) ->
%% on the local node and returns the result of that call.
%% @equiv local_command(Module, Function, Args, 5000)
%% @see can_connect/0
--spec local_command(Module::atom(), Function::atom(), Args::[term()]) -> term().
+-spec local_command(Module :: atom(), Function :: atom(), Args :: [term()]) -> term().
local_command(Module, Function, Args) ->
local_command(Module, Function, Args, weatherreport_config:timeout()).
@@ -63,7 +64,8 @@ local_command(Module, Function, Args) ->
%% timeout.
%% @equiv rpc:call(NodeName, Module, Function, Args, Timeout)
%% @see can_connect/0
--spec local_command(Module::atom(), Function::atom(), Args::[term()], Timeout::integer()) -> term().
+-spec local_command(Module :: atom(), Function :: atom(), Args :: [term()], Timeout :: integer()) ->
+ term().
local_command(Module, Function, Args, Timeout) ->
case is_cluster_node() of
true ->
@@ -86,7 +88,9 @@ local_command(Module, Function, Args, Timeout) ->
%% @doc Call rpc:multicall/5 from the local cluster node rather than the
%% escript.
--spec multicall([node()], Module::atom(), Function::atom(), Args::[term()], Timeout::integer()) -> term().
+-spec multicall(
+ [node()], Module :: atom(), Function :: atom(), Args :: [term()], Timeout :: integer()
+) -> term().
multicall(Nodes, Module, Function, Args, Timeout) ->
case local_command(rpc, multicall, [Nodes, Module, Function, Args, Timeout]) of
{badrpc, Reason} ->
@@ -108,7 +112,8 @@ pid() ->
-spec can_connect() -> true | false.
can_connect() ->
case is_connected() or is_cluster_node() of
- true -> true;
+ true ->
+ true;
false ->
weatherreport_log:log(
node(),
@@ -127,16 +132,18 @@ can_connect_all() ->
[] -> true;
_ -> false
end;
- false -> false
+ false ->
+ false
end.
nodename() ->
- Name = case weatherreport_config:node_name() of
- undefined ->
- atom_to_list(node());
- {_, NodeName} ->
- NodeName
- end,
+ Name =
+ case weatherreport_config:node_name() of
+ undefined ->
+ atom_to_list(node());
+ {_, NodeName} ->
+ NodeName
+ end,
case string:tokens(Name, "@") of
[_Node, _Host] ->
list_to_atom(Name);
diff --git a/src/weatherreport/src/weatherreport_runner.erl b/src/weatherreport/src/weatherreport_runner.erl
index e67940ace..77518d690 100644
--- a/src/weatherreport/src/weatherreport_runner.erl
+++ b/src/weatherreport/src/weatherreport_runner.erl
@@ -29,13 +29,13 @@
-export([run/1, run/2, format/1]).
%% @doc Run the supplied list of checks on the local node
--spec run([Module::atom()]) -> [tuple()].
+-spec run([Module :: atom()]) -> [tuple()].
run(Checks) ->
weatherreport_node:can_connect(),
run(Checks, [weatherreport_node:nodename()]).
%% @doc Run the supplied list of checks on the supplied list of cluster nodes
--spec run([Module::atom()], [node()] | all) -> [tuple()].
+-spec run([Module :: atom()], [node()] | all) -> [tuple()].
run(Checks, all) ->
weatherreport_node:can_connect(),
case weatherreport_node:local_command(mem3, nodes, []) of
@@ -46,27 +46,33 @@ run(Checks, all) ->
end;
run(Checks, Nodes) ->
CheckOpts = get_check_options(),
- lists:flatten(lists:foldl(fun(Mod, Acc) ->
- {Resps, BadNodes} = weatherreport_node:multicall(
- Nodes,
- erlang,
- apply,
- [fun() -> {node(), weatherreport_check:check(Mod, CheckOpts)} end, []],
- weatherreport_config:timeout()
- ),
- TransformFailedCheck = fun(Node) ->
- {node(), crit, weatherreport_runner, {check_failed, Mod, Node}}
- end,
- FailedChecks = [TransformFailedCheck(Node) || Node <- BadNodes],
- TransformResponse = fun
- ({badrpc, Error}) ->
- [{node(), crit, weatherreport_runner, {badrpc, Mod, Error}}];
- ({Node, Messages}) ->
- [{Node, Lvl, Module, Msg} || {Lvl, Module, Msg} <- Messages]
- end,
- Responses = [TransformResponse(Resp) || Resp <- Resps],
- [Responses ++ FailedChecks | Acc]
- end, [], Checks)).
+ lists:flatten(
+ lists:foldl(
+ fun(Mod, Acc) ->
+ {Resps, BadNodes} = weatherreport_node:multicall(
+ Nodes,
+ erlang,
+ apply,
+ [fun() -> {node(), weatherreport_check:check(Mod, CheckOpts)} end, []],
+ weatherreport_config:timeout()
+ ),
+ TransformFailedCheck = fun(Node) ->
+ {node(), crit, weatherreport_runner, {check_failed, Mod, Node}}
+ end,
+ FailedChecks = [TransformFailedCheck(Node) || Node <- BadNodes],
+ TransformResponse = fun
+ ({badrpc, Error}) ->
+ [{node(), crit, weatherreport_runner, {badrpc, Mod, Error}}];
+ ({Node, Messages}) ->
+ [{Node, Lvl, Module, Msg} || {Lvl, Module, Msg} <- Messages]
+ end,
+ Responses = [TransformResponse(Resp) || Resp <- Resps],
+ [Responses ++ FailedChecks | Acc]
+ end,
+ [],
+ Checks
+ )
+ ).
%% @doc Part of the weatherreport_check behaviour. This means that any messages
%% returned by this module can be handled via the existing message reporting
@@ -80,10 +86,11 @@ format({badrpc, Check, Error}) ->
%% Private functions
get_check_options() ->
- Expert = case application:get_env(weatherreport, expert) of
- {ok, true} ->
- true;
- _ ->
- false
- end,
+ Expert =
+ case application:get_env(weatherreport, expert) of
+ {ok, true} ->
+ true;
+ _ ->
+ false
+ end,
[{expert, Expert}].
diff --git a/src/weatherreport/src/weatherreport_util.erl b/src/weatherreport/src/weatherreport_util.erl
index 450475d42..ef42505e9 100644
--- a/src/weatherreport/src/weatherreport_util.erl
+++ b/src/weatherreport/src/weatherreport_util.erl
@@ -28,11 +28,13 @@
%% @doc Utility functions for weatherreport.
%% @end
-module(weatherreport_util).
--export([short_name/1,
- run_command/1,
- binary_to_float/1,
- flush_stdout/0,
- check_proc_count/3]).
+-export([
+ short_name/1,
+ run_command/1,
+ binary_to_float/1,
+ flush_stdout/0,
+ check_proc_count/3
+]).
%% @doc Converts a check module name into a short name that can be
%% used to refer to a check on the command line. For example,
@@ -44,7 +46,7 @@ short_name(Mod) when is_atom(Mod) ->
%% @doc Runs a shell command and returns the output. stderr is
%% redirected to stdout so its output will be included.
--spec run_command(Command::iodata()) -> StdOut::iodata().
+-spec run_command(Command :: iodata()) -> StdOut :: iodata().
run_command(Command) ->
weatherreport_log:log(
node(),
@@ -52,7 +54,7 @@ run_command(Command) ->
"Running shell command: ~s",
[Command]
),
- Port = erlang:open_port({spawn,Command},[exit_status, stderr_to_stdout]),
+ Port = erlang:open_port({spawn, Command}, [exit_status, stderr_to_stdout]),
do_read(Port, []).
do_read(Port, Acc) ->
@@ -65,14 +67,14 @@ do_read(Port, Acc) ->
[StdOut]
),
do_read(Port, Acc ++ StdOut);
- {Port, {exit_status, _}} ->
+ {Port, {exit_status, _}} ->
%%port_close(Port),
Acc;
- Other ->
+ Other ->
io:format("~w", [Other]),
do_read(Port, Acc)
end.
-
+
%% @doc Converts a binary containing a text representation of a float
%% into a float type.
-spec binary_to_float(binary()) -> float().
@@ -95,17 +97,19 @@ check_proc_count(Key, Threshold, Opts) ->
procs_to_messages([], _Threshold, Acc, _Opts) ->
Acc;
procs_to_messages([{Pid, Value, Info} | T], Threshold, Acc, Opts) ->
- Level = case Value > Threshold of
- true -> warning;
- _ -> info
- end,
- Message = case {Level, proplists:get_value(expert, Opts)} of
- {warning, true} ->
- Pinfo = recon:info(Pid),
- {warning, {high, {Pid, Value, Info, Pinfo}}};
- {warning, _} ->
- {warning, {high, {Pid, Value, Info}}};
- {info, _} ->
- {info, {ok, {Pid, Value, Info}}}
- end,
+ Level =
+ case Value > Threshold of
+ true -> warning;
+ _ -> info
+ end,
+ Message =
+ case {Level, proplists:get_value(expert, Opts)} of
+ {warning, true} ->
+ Pinfo = recon:info(Pid),
+ {warning, {high, {Pid, Value, Info, Pinfo}}};
+ {warning, _} ->
+ {warning, {high, {Pid, Value, Info}}};
+ {info, _} ->
+ {info, {ok, {Pid, Value, Info}}}
+ end,
procs_to_messages(T, Threshold, [Message | Acc], Opts).